diff --git "a/3927.jsonl" "b/3927.jsonl" new file mode 100644--- /dev/null +++ "b/3927.jsonl" @@ -0,0 +1,742 @@ +{"seq_id":"339572317","text":"import csv\nimport sys\n\n# Check arguments count\nif len(sys.argv) != 3:\n sys.exit(\"Usage: python dna.py data.csv sequence.txt\")\n\n# Save filenames to variables\ndata = sys.argv[1]\nsequence = sys.argv[2]\npeople = []\n\n# Open database and save to memory\nwith open(data, \"r\") as f:\n reader = csv.DictReader(f)\n for person in reader:\n people.append(person)\n\n# Open dna sequence and save to dna variable\nwith open(sequence, \"r\") as f:\n dna = f.readlines()\n dna = dna[0].strip()\n\n# Create STR list\nSTR = list(people[0].keys())\nSTR.pop(0)\n\n# Count the longest run of consecutive repeats for each STR\ndi = {}\nfor i in STR:\n cur_max = 0\n for j in range(0, len(dna)):\n tmp = 0\n if dna[j: j + len(i)] == i:\n for k in range(j, len(dna), len(i)):\n if dna[k: k + len(i)] != i:\n break\n tmp += 1\n if cur_max < tmp:\n cur_max = tmp\n di[i] = str(cur_max)\n\nis_found = False\n\n# Compare people\nfor i in range(len(people)):\n is_match = True\n for j in STR:\n if people[i][j] != di[j]:\n is_match = False\n break\n if is_match:\n print(people[i][\"name\"])\n is_found = True\n break\n\n# If not match print\nif not is_found:\n print(\"No match\")\n","sub_path":"pset6/dna/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"136544956","text":"def lcp(list):\n if len(list) == 0:\n return \"\"\n minlen = len(list[0])\n for i in range(len(list)):\n minlen = min(len(list[i]), minlen)\n print(minlen)\n lcp = \"\"\n i = 0\n while i < minlen:\n char = list[0][i]\n for j in range(1, len(list)):\n print(char,list[j][i])\n if list[j][i] != char:\n return(lcp)\n lcp = lcp+char\n i += 1\n return(lcp)\nlist = [\"flower\", \"flow\", \"flight\",\"fl\"]\nprint(lcp(list))\n\n","sub_path":"python programs/longestCommonPrefix.py","file_name":"longestCommonPrefix.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"90498723","text":"from ExtratorMetricas import ExtratorMetricas\nfrom BancoDados import BancoMySQL\nfrom GeradorLaTeX import GeradorLaTeX\nimport csv\n\nbd = BancoMySQL('garruda', 'garruda', '127.0.0.1', 'noticias')\nEM = ExtratorMetricas(bd)\nGL = GeradorLaTeX(EM)\n\n\ndef calcula_metricas(id_entidade):\n EM.calcula_distancia_total(id_entidade, True)\n\n\ndef imprime_metricas(id_entidade, header=False):\n\n f = open('saida.csv', 'ab')\n writer = csv.writer(f)\n\n if header:\n writer.writerow(['entidade', 'perfil', 'selecao', 'cobertura', 'positivo', 'neutro', 'negativo'])\n\n for id_perfil in range(1, 6):\n metricas = EM.contabiliza_metricas(id_entidade, id_perfil, True)\n writer.writerow([id_entidade, id_perfil, metricas[0], metricas[5], metricas[2], metricas[3], metricas[4]])\n\n f.close()\n\nimprime_metricas(4074, True) # Alckmin\nimprime_metricas(4078) # Padilha\nimprime_metricas(4075) # Skaf\nimprime_metricas(3956) # Aecio\nimprime_metricas(4031) # Dilma\nimprime_metricas(4039) # Marina\n\n\nGL.gera_desvio('SELECAO', 'MEDIA')\nGL.gera_desvio('COBERTURA_TWEET', 'MEDIA')\nGL.gera_desvio('POLARIDADE_POSITIVA', 'MEDIA')\nGL.gera_desvio('POLARIDADE_NEUTRA', 'MEDIA')\nGL.gera_desvio('POLARIDADE_NEGATIVA', 'MEDIA')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"574099829","text":"from unittest import TestCase\nfrom pricescrapper import PriceScrapper\n\n__author__ = 'kn1m'\n\n\nclass TestPriceScrapper(TestCase):\n def test_get_urls_from_xml(self):\n s = PriceScrapper('../urls.xml', 'res.xml')\n self.assertEqual(s.get_urls_from_xml(), ['http://koba.ua/product/noutbuk_dell_inspiron_3542_156_intel_i3-4005u_4500dvdintwifibtlin_59256/',\n 'http://elmir.ua/laptops/notebook_dell_inspiron_3542_black_i35345dil-34.html?utm_campaign=%D0%9D%D0%BE%D1%83%D1%82%D0%B1%D1%83%D0%BA%D0%B8%2C%20%D1%83%D0%BB%D1%8C%D1%82%D1%80%D0%B0%D0%B1%D1%83%D0%BA%D0%B8&utm_content=256258&utm_medium=cpc&utm_source=hotline&utm_term=Dell%20Inspiron%203542%20%28I35345DIL-34%29',\n 'http://allo.ua/ru/products/notebooks/dell-inspiron-3542-i35345dil-34.html?utm_medium=price_list&utm_source=hotline&utm_term=dell_inspiron_3542_i35345dil_34&utm_campaign=hotline_noutbuki',\n 'http://allo.ua/ru/products/mobile/sony-xperia-z1-c6902-black.html',\n 'http://all-ok.com.ua/mobilnye-telefony/sony-xperia-z1-c6902-black-detail.html?utm_medium=cpc&utm_source=hotline&utm_campaign=%D1%EC%E0%F0%F2%F4%EE%ED%FB+%E8+%EC%EE%E1%E8%EB%FC%ED%FB%E5+%F2%E5%EB%E5%F4%EE%ED%FB&utm_content=&utm_term=Sony+Xperia+Z1+C6902+%28Black%29',\n 'http://musicmag.com.ua/audioquest-vodka-hdmi-2m.html'])\n\n def test_get_tags_from_xml(self):\n s = PriceScrapper('../urls.xml', 'res.xml')\n self.assertEqual(s.get_tags_from_xml(), [[u'div', u'id', u'product_price_body'],\n [u'span', u'itemprop', u'price'],\n [u'span', u'class', u'sum'],\n [u'span', u'class', u'sum'],\n [u'span', u'class', u'PricesalesPrice'],\n [u'span', u'class', u'price']])\n\n def test_scrapper(self):\n s = PriceScrapper('../urls.xml', 'res.xml')\n self.assertEqual(s.scrapper([]), None)\n\n def test_levenshtein(self):\n s = PriceScrapper('../urls.xml', 'res.xml')\n self.assertEqual(s.levenshtein('asd', 'not'), 3)","sub_path":"unit_tests/test_priceScrapper.py","file_name":"test_priceScrapper.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"255182718","text":"_base_ = [\n '_base_/default_runtime.py', '_base_/schedules/schedule_1x_sgd.py',\n '_base_/models/fusion_4branch.py', '_base_/datasets/base_dataset.py'\n]\n\nmodel = dict(\n mode=1,\n head_config=dict(video=dict(dropout_p=0.5)),\n modal_used=['image'],\n branch_config=dict(image_branch=dict(plugins=[\n dict(cfg=dict(\n type='DropBlock', drop_prob=0.2, block_size=30, postfix='_1'),\n stages=(False, False, True, True),\n position='after_conv1'),\n dict(cfg=dict(\n type='DropBlock', drop_prob=0.15, block_size=10, postfix='_2'),\n stages=(False, False, True, True),\n position='after_conv2'),\n dict(cfg=dict(\n type='DropBlock', drop_prob=0.1, block_size=7, postfix='_3'),\n stages=(False, False, True, True),\n position='after_conv3')\n ])))\n\nimg_norm_cfg = dict(mean=[123.675, 116.28, 103.53],\n std=[58.395, 57.12, 57.375])\ntrain_pipeline = [\n dict(type='LoadAnnotations'),\n dict(type='PhotoMetricDistortion'),\n dict(type='RandomFlip', flip_ratio=0.5),\n dict(type='Tokenize', vocab_root='dataset/vocab_small.txt',\n max_length=256),\n dict(type='Pad', video_pad_size=(300, 1024), audio_pad_size=(300, 128)),\n dict(type='Resize', size=(224, 224)),\n dict(type='Normalize', **img_norm_cfg),\n dict(type='DefaultFormatBundle'),\n dict(type='Collect', keys=['video', 'image', 'text', 'audio', 'gt_labels'])\n]\n\ndata = dict(train=dict(pipeline=train_pipeline))\n\noptimizer = dict(_delete_=True, type='SGD', lr=0.02, weight_decay=0.0001)\n","sub_path":"src/utils/configs/archive/0605/mode1_image_aug.py","file_name":"mode1_image_aug.py","file_ext":"py","file_size_in_byte":1609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"178954291","text":"# -*- coding: utf-8 -*-\r\n# 只管解析,\r\nimport re\r\nimport zipfile\r\nfrom io import StringIO\r\nfrom io import BytesIO\r\n\r\nimport docx\r\nimport win32com\r\nimport xlrd\r\nfrom bson import ObjectId\r\nfrom win32com.client import Dispatch\r\nimport os, sys\r\n\r\nfrom tools.req_for_api import req_for_serial_number\r\n\r\ncurPath = os.path.abspath(os.path.dirname(__file__))\r\nsys.path.append(curPath[:-9])\r\nsrc_dir = curPath + \"/Files/\"\r\n\r\nimport requests\r\nimport random\r\nfrom pdfminer.pdfparser import PDFParser, PDFDocument\r\nfrom pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter, process_pdf\r\nfrom pdfminer.converter import PDFPageAggregator, TextConverter\r\nfrom pdfminer.layout import LTTextBoxHorizontal, LAParams\r\nfrom pdfminer.pdfinterp import PDFTextExtractionNotAllowed\r\nfrom PyPDF2 import PdfFileReader, PdfFileWriter\r\nfrom urllib.request import urlopen\r\n\r\n\r\nFILE_PATH = ''\r\n\r\n\r\nfilesList = '''反复读这段代码并没有发现什么问题因为有些网页的附件名称是相同的例如所以我按每个网页的标题在总览页面爬到的分文件夹放置下载的文件所以方法中传了一个参数而如果参数传空则不会报错其实由此已经可以发现所在了但我却没想到又反复折腾了很久才发现原来是文件名太长了在下面单个文件名的长度限制是完整的路径长度如限制是路径最后有一个字符串结束符要占掉一个字符所以完整路径实际限长是'''\r\n\r\n\r\ndef parse_pdf(url=None):\r\n\r\n fp = urlopen(url)\r\n parser = PDFParser(fp) # 用文件对象创建一个PDF文档分析器\r\n doc = PDFDocument() # 创建一个PDF文档\r\n parser.set_document(doc) # 连接分析器,与文档对象\r\n doc.set_parser(parser)\r\n doc.initialize() # 提供初始化密码,如果没有密码,就创建一个空的字符串\r\n # 检测文档是否提供txt转换,不提供就忽略\r\n if not doc.is_extractable:\r\n raise PDFTextExtractionNotAllowed\r\n else:\r\n # 创建PDF,资源管理器,来共享资源\r\n rsrcmgr = PDFResourceManager()\r\n # 创建一个PDF设备对象\r\n laparams = LAParams()\r\n device = PDFPageAggregator(rsrcmgr, laparams=laparams)\r\n # 创建一个PDF解释器对象\r\n interpreter = PDFPageInterpreter(rsrcmgr, device)\r\n\r\n # 循环遍历列表,每次处理一个page内容\r\n # doc.get_pages() 获取page列表\r\n result_list = list()\r\n for page in doc.get_pages():\r\n interpreter.process_page(page)\r\n # 接受该页面的LTPage对象\r\n layout = device.get_result()\r\n # 这里layout是一个LTPage对象 里面存放着 这个page解析出的各种对象\r\n # 一般包括LTTextBox, LTFigure, LTImage, LTTextBoxHorizontal 等等\r\n # 想要获取文本就获得对象的text属性,\r\n text_list = list()\r\n for x in layout:\r\n if isinstance(x, LTTextBoxHorizontal):\r\n # with open(r'2.txt', 'a', encoding=\"utf-8\") as f:\r\n results = x.get_text()\r\n text_list.append(results)\r\n text = \"\\n\".join(text_list)\r\n result_list.append(text)\r\n\r\n return \"\".join(result_list)\r\n\r\n\r\ndef parse_zip(url=None):\r\n '''\r\n 输入 .zip 文件, 解压文件, 返回被压缩文件的内容\r\n :param url: Either the path to the file, or a file-like object.If it is a path, the file will be opened and closed by ZipFile.\r\n :return:\r\n '''\r\n if url:\r\n response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1', })\r\n url = BytesIO(response.content)\r\n\r\n zFile = zipfile.ZipFile(url)\r\n # ZipFile.namelist(): 获取ZIP文档内所有文件的名称列表\r\n str_data = ''\r\n for file_name in zFile.namelist():\r\n data = zFile.read(file_name)\r\n try:\r\n str_data += data.decode('gbk')\r\n except:\r\n try:\r\n str_data += str(data)\r\n except:\r\n str_data += ''\r\n zFile.close()\r\n return str_data\r\n\r\n\r\ndef doc2docx(url=None):\r\n '''\r\n\r\n :param url: http://fgw.gz.gov.cn/gzplan/s15713/201902/ffff52619b4d4c40ae400f6ec873d1af/files/22ea53cb1e61499b9b6bb0162e5d8e65.doc\r\n :return: 新的文件名称\r\n '''\r\n # 由url获取 文件 , 写入文件夹\r\n try:\r\n file_name = src_dir + ''.join(random.choices(filesList, k=5)) + '.doc'\r\n response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1',})\r\n with open(file_name, 'wb') as fp:\r\n fp.write(response.content)\r\n\r\n w = win32com.client.Dispatch('Word.Application')\r\n w.Visible = 0\r\n w.DisplayAlerts = 0\r\n\r\n doc = w.Documents.Open(file_name) # 读取 .doc 文件\r\n newpath = os.path.splitext(file_name)[0] + '.docx'\r\n doc.SaveAs(newpath, 12, False, \"\", True, \"\", False, False, False, False)\r\n # doc.Close()\r\n w.Quit()\r\n except:\r\n os.remove(file_name)\r\n else:\r\n # os.remove(file_name) # 删除请求的文件, 若文件使用为关闭,将没有权限删除\r\n return newpath, file_name\r\n\r\n\r\ndef parse_word(url=None):\r\n '''\r\n\r\n :param url: 需要解析的 .doc 文件\r\n :return:\r\n '''\r\n if '.docx' not in url:\r\n newpath, file_name = doc2docx(url)\r\n os.remove(file_name) # 删除请求的文件\r\n else:\r\n response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1', })\r\n newpath = BytesIO(response.content) # 数据读写不一定是文件,也可以在内存中读写. StringIO和BytesIO是在内存中操作str和bytes的方法\r\n\r\n doc = docx.Document(newpath) # 可传入文件路径(字符串)或类似文件的对象(文件游标, 数据流)\r\n text = '\\n'.join([paragraph.text for paragraph in doc.paragraphs])\r\n for table in doc.tables:\r\n for row in table.rows:\r\n for cell in row.cells:\r\n text += cell.text.strip() + '\\n'\r\n # doc.Close()\r\n # doc.Quit() # 好像不需要关闭, 官方文当中没有关闭\r\n\r\n os.remove(newpath) if isinstance(newpath, str) else '' # 删除请求的文件\r\n return text\r\n\r\n\r\ndef parse_excel(url=None):\r\n '''\r\n\r\n :param url: http://fgw.sz.gov.cn/fzggzl/zdxm/201902/P020190220602369247623.xls\r\n :return:\r\n '''\r\n # 由url获取 文件 , 写入文件夹\r\n if '\\.xlsx' in url:\r\n file_name = src_dir + ''.join(random.choices(filesList, k=5)) + '.xlsx'\r\n else:\r\n file_name = src_dir + ''.join(random.choices(filesList, k=5)) + '.xls'\r\n response = requests.get(url, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/22.0.1207.1 Safari/537.1', })\r\n with open(file_name, 'wb') as fp:\r\n fp.write(response.content) # 文件大多是二进制或者字符串传输的,\r\n\r\n wb = xlrd.open_workbook(filename=file_name) # 打开文件\r\n # wb = xlrd.open_workbook(file_contents=response.text) # 打开文件\r\n sheet_names = wb.sheet_names()[0] if isinstance(wb.sheet_names(), list) else wb.sheet_names()\r\n excel_data = wb.sheet_by_name(sheet_names)\r\n row = excel_data.nrows # 总行数\r\n text = []\r\n for i in range(1, row):\r\n rowdata = excel_data.row_values(i) # i行的list\r\n text.append(' '.join([str(_) for _ in rowdata]))\r\n os.remove(file_name) # 删除请求的文件\r\n return '\\n'.join(text)\r\n\r\n\r\ndef transform_data(url=None, data=None):\r\n type_ = data.get('conten_type')\r\n if type:\r\n if '.pdf' in type_:\r\n return parse_pdf(url)\r\n elif '.doc' in type_ or '.docx' in type_:\r\n return parse_word(url)\r\n elif '.zip' in type_:\r\n return parse_zip(url)\r\n elif '.xls' in type_ or '.xlsx' in type_:\r\n return parse_excel(url)\r\n else:\r\n return ' '\r\n else:\r\n return ' '\r\n\r\n\r\ndef find_type(url=None):\r\n if url:\r\n if '.pdf' in url:\r\n return '.pdf'\r\n elif '.docx' in url:\r\n return '.docx'\r\n elif '.doc' in url :\r\n return '.doc'\r\n elif '.zip' in url:\r\n return '.zip'\r\n elif '.xls' in url :\r\n return '.xls'\r\n elif '.xlsx' in url:\r\n return '.xlsx'\r\n else:\r\n return ''\r\n else:\r\n return ''\r\n\r\n\r\nif __name__ == '__main__':\r\n # print(requests.post('http://172.22.69.39:8100/ZHlocation', data={'text': '''粤发改交通[2016]472号|汕头、潮州、揭阳市人民政府,省政府各部门:|经省人民政府同意,现将|《|粤东港口群发展规划(|2016-2030年)|》|印发给你们,请认真组织实施。实施中遇到的问题,可径向省发展改革委、省交通运输厅反映。|附件:粤东港口群发展规划(|2016-2030年)|广东省发展改革委广东省交通运输厅|2016年7月22日|相关附件:|粤东港口群发展规划(2016-2030年)|varleng=$(\".fujianma\").length;|if(leng==0){|$(\".fjlis\").hide()|}|相关导读:|'''.replace('|', '')}).json())\r\n # print(transform_data('http://drc.gd.gov.cn/attachment/0/112/112043/845093.pdf'))\r\n # zip2txt('http://www.gz.gov.cn/publicfiles/business/htmlfiles/gzplanjg/cmsmedia/other/2013/5/other94483.zip')\r\n # print(parse_zip())\r\n # print(parse_excel('http://fgw.sz.gov.cn/fzggzl/zdxm/201902/P020190220602369247623.xls'))\r\n # print(parse_word('http://www.gz.gov.cn/gzgov/s2882/200708/2662796/files/3c23d2cb40894cc78baee8eff5b71489.docx'))\r\n print(parse_pdf('http://drc.gd.gov.cn/attachment/0/112/112043/845093.pdf'))\r\n\r\n # serial_number = req_for_serial_number(code=\"GOV_ZX_GDS\")\r\n # print(str(int(serial_number[5:13]) - 3932), serial_number)\r\n","sub_path":"datashufflepy-zeus/src/tools/req_for_wordExcelZip.py","file_name":"req_for_wordExcelZip.py","file_ext":"py","file_size_in_byte":10032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"545785192","text":"\"\"\"Vaillant component.\"\"\"\nimport abc\nfrom abc import ABC\n\nimport logging\nfrom datetime import timedelta, datetime\n\nfrom pymultimatic.model import BoilerStatus\nfrom typing import Optional\n\nimport voluptuous as vol\n\nfrom homeassistant.const import (CONF_PASSWORD, CONF_SCAN_INTERVAL,\n CONF_USERNAME)\nfrom homeassistant.helpers.entity import Entity\nfrom homeassistant.util import Throttle, slugify\nimport homeassistant.helpers.config_validation as cv\nfrom homeassistant.helpers import discovery\n\n_LOGGER = logging.getLogger(__name__)\n\nDOMAIN = 'vaillant'\nHUB = '{}_HUB'.format(DOMAIN)\n\nPLATFORMS = [\n 'binary_sensor',\n 'sensor',\n 'climate',\n 'water_heater'\n]\n\nDEFAULT_EMPTY = ''\n\nDEFAULT_SCAN_INTERVAL = timedelta(minutes=5)\nMIN_SCAN_INTERVAL = timedelta(minutes=1)\n\nDEFAULT_QUICK_VETO_DURATION = 3 * 60\nQUICK_VETO_MIN_DURATION = 0.5 * 60\nQUICK_VETO_MAX_DURATION = 24 * 60\nCONF_QUICK_VETO_DURATION = 'quick_veto_duration'\n\nCONF_SMARTPHONE_ID = 'smartphoneid'\nDEFAULT_SMART_PHONE_ID = 'homeassistant'\n\n\nCONFIG_SCHEMA = vol.Schema({\n DOMAIN: vol.Schema({\n vol.Required(CONF_USERNAME): cv.string,\n vol.Required(CONF_PASSWORD): cv.string,\n vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL): (\n vol.All(cv.time_period, vol.Clamp(min=MIN_SCAN_INTERVAL))),\n vol.Optional(CONF_SMARTPHONE_ID,\n default=DEFAULT_SMART_PHONE_ID): cv.string,\n vol.Optional(CONF_QUICK_VETO_DURATION,\n default=DEFAULT_QUICK_VETO_DURATION):\n (vol.All(cv.positive_int, vol.Clamp(min=QUICK_VETO_MIN_DURATION,\n max=QUICK_VETO_MAX_DURATION))),\n })\n}, extra=vol.ALLOW_EXTRA)\n\nATTR_VAILLANT_MODE = 'vaillant_mode'\nATTR_VAILLANT_SETTING = 'setting'\nATTR_VAILLANT_NEXT_SETTING = 'next_sub_mode'\nATTR_VAILLANT_SETTING_END = 'sub_mode_end'\nATTR_QUICK_VETO_END = 'quick_veto_end'\nATTR_QUICK_MODE = 'quick_mode'\nATTR_START_DATE = 'start_date'\nATTR_END_DATE = 'end_date'\nATTR_TEMPERATURE = 'temperature'\n\nQUICK_MODES_LIST = ['QM_HOTWATER_BOOST', 'QM_VENTILATION_BOOST', 'QM_PARTY',\n 'QM_ONE_DAY_AWAY', 'QM_SYSTEM_OFF', 'QM_ONE_DAY_AT_HOME']\n\n# Services\nSERVICE_REMOVE_QUICK_MODE = \"remove_quick_mode\"\nSERVICE_REMOVE_HOLIDAY_MODE = \"remove_holiday_mode\"\nSERVICE_SET_QUICK_MODE = \"set_quick_mode\"\nSERVICE_SET_HOLIDAY_MODE = \"set_holiday_mode\"\n\nSERVICE_REMOVE_QUICK_MODE_SCHEMA = vol.Schema({})\nSERVICE_REMOVE_HOLIDAY_MODE_SCHEMA = vol.Schema({})\nSERVICE_SET_QUICK_MODE_SCHEMA = vol.Schema(\n {\n vol.Required(ATTR_QUICK_MODE): vol.All(\n vol.Coerce(str), vol.In(QUICK_MODES_LIST)\n )\n }\n)\nSERVICE_SET_HOLIDAY_MODE_SCHEMA = vol.Schema(\n {\n vol.Required(ATTR_START_DATE): vol.All(\n vol.Coerce(str)\n ),\n vol.Required(ATTR_END_DATE): vol.All(\n vol.Coerce(str)\n ),\n vol.Required(ATTR_TEMPERATURE): vol.All(\n vol.Coerce(float), vol.Clamp(min=5, max=30)\n )\n }\n)\n\nSERVICE_TO_METHOD = {\n SERVICE_REMOVE_QUICK_MODE: {\n \"method\": SERVICE_REMOVE_QUICK_MODE,\n \"schema\": SERVICE_REMOVE_QUICK_MODE_SCHEMA,\n },\n SERVICE_REMOVE_HOLIDAY_MODE: {\n \"method\": SERVICE_REMOVE_HOLIDAY_MODE,\n \"schema\": SERVICE_REMOVE_HOLIDAY_MODE_SCHEMA,\n },\n SERVICE_SET_QUICK_MODE: {\n \"method\": SERVICE_SET_QUICK_MODE,\n \"schema\": SERVICE_SET_QUICK_MODE_SCHEMA,\n },\n SERVICE_SET_HOLIDAY_MODE: {\n \"method\": SERVICE_SET_HOLIDAY_MODE,\n \"schema\": SERVICE_SET_HOLIDAY_MODE_SCHEMA,\n },\n}\n\n\nasync def async_setup(hass, config):\n \"\"\"Set up vaillant component.\"\"\"\n hub = VaillantHub(config[DOMAIN])\n hass.data[HUB] = hub\n\n service_handler = VaillantServiceHandler(hub)\n\n for platform in PLATFORMS:\n hass.async_create_task(\n discovery.async_load_platform(hass, platform, DOMAIN, {}, config))\n\n for service in SERVICE_TO_METHOD:\n schema = SERVICE_TO_METHOD[service][\"schema\"]\n hass.services.async_register(\n DOMAIN, service, service_handler.async_handle, schema=schema\n )\n\n _LOGGER.info(\"Successfully initialized\")\n\n return True\n\n#\n# async def async_setup_entry(hass, entry):\n# \"\"\"Set vaillant from a config entry.\"\"\"\n# for platform in PLATFORMS:\n# hass.async_create_task(\n# hass.config_entries.async_forward_entry_setup(entry, platform)\n# )\n#\n\n\nclass VaillantHub:\n \"\"\"Vaillant entry point for home-assistant.\"\"\"\n\n def __init__(self, config):\n \"\"\"Initialize hub.\"\"\"\n from pymultimatic.model import System\n from pymultimatic.systemmanager import SystemManager\n\n self.manager = SystemManager(config[CONF_USERNAME],\n config[CONF_PASSWORD],\n config[CONF_SMARTPHONE_ID])\n\n self._listeners = []\n self.system: System = self.manager.get_system()\n self._quick_veto_duration = config[CONF_QUICK_VETO_DURATION]\n self.config = config\n self.update_system = Throttle(\n config[CONF_SCAN_INTERVAL])(self._update_system)\n\n def _update_system(self):\n \"\"\"Fetch vaillant system.\"\"\"\n try:\n self.manager.request_hvac_update()\n self.system = self.manager.get_system()\n _LOGGER.debug(\"update_system successfully fetched\")\n # pylint: disable=broad-except\n except Exception:\n _LOGGER.exception(\"Enable to fetch data from vaillant API\")\n # update_system can is called by all entities, if it fails for\n # one entity, it will certainly fail for others.\n # catching exception so the throttling is occurring\n\n def find_component(self, comp):\n \"\"\"Find a component in the system with the given id, no IO is done.\"\"\"\n from pymultimatic.model import Zone, Room, HotWater, Circulation\n\n if isinstance(comp, Zone):\n return [zone for zone in self.system.zones\n if zone.id == comp.id][0]\n if isinstance(comp, Room):\n return [room for room in self.system.rooms\n if room.id == comp.id][0]\n if isinstance(comp, HotWater):\n if self.system.hot_water and self.system.hot_water.id == comp.id:\n return self.system.hot_water\n if isinstance(comp, Circulation):\n if self.system.circulation \\\n and self.system.circulation.id == comp.id:\n return self.system.circulation\n\n return None\n\n def add_listener(self, listener):\n \"\"\"Add an entity in listener list.\"\"\"\n self._listeners.append(listener)\n\n def refresh_listening_entities(self):\n \"\"\"Force refresh of all listening entities and fetch vaillant data.\"\"\"\n self.update_system(no_throttle=True)\n for listener in self._listeners:\n listener.async_schedule_update_ha_state(True)\n\n def set_hot_water_target_temperature(self, entity, hot_water,\n target_temp):\n \"\"\"Set hot water target temperature.\n\n * If there is a quick mode that impact dhw running on, remove it.\n\n * If dhw is ON or AUTO, modify the target temperature\n\n * If dhw is OFF, change to ON and set target temperature\n \"\"\"\n from pymultimatic.model import OperatingModes\n\n touch_quick_mode = False\n if self.system.quick_mode is not None and \\\n self.system.quick_mode.for_dhw:\n self.manager.remove_quick_mode()\n touch_quick_mode = True\n self.system.quick_mode = None\n\n current_mode = self.system.get_active_mode_hot_water(hot_water)\\\n .current_mode\n\n if current_mode == OperatingModes.OFF or touch_quick_mode:\n self.manager.set_hot_water_operating_mode(hot_water.id,\n OperatingModes.ON)\n self.manager\\\n .set_hot_water_setpoint_temperature(hot_water.id, target_temp)\n\n if touch_quick_mode:\n self.refresh_listening_entities()\n else:\n self.system.hot_water = self.manager.get_hot_water(hot_water.id)\n entity.async_schedule_update_ha_state(True)\n\n def set_room_target_temperature(self, entity, room, target_temp):\n \"\"\"Set target temperature for a room.\n\n * If there is a quick mode that impact room running on, remove it.\n\n * If the room is in MANUAL mode, simply modify the target temperature.\n\n * if the room is not in MANUAL mode, create à quick veto.\n\n \"\"\"\n from pymultimatic.model import QuickVeto, OperatingModes\n\n touch_quick_mode = False\n if self.system.quick_mode is not None and \\\n self.system.quick_mode.for_room:\n self.manager.remove_quick_mode()\n touch_quick_mode = True\n self.system.quick_mode = None\n\n current_mode = self.system.get_active_mode_room(room).current_mode\n if room.quick_veto is not None or touch_quick_mode \\\n or current_mode == OperatingModes.OFF \\\n or current_mode == OperatingModes.AUTO:\n if room.quick_veto is not None:\n self.manager.remove_room_quick_veto(room.id)\n veto = QuickVeto(self._quick_veto_duration, target_temp)\n self.manager.set_room_quick_veto(room.id, veto)\n room.quick_veto = veto\n elif current_mode == OperatingModes.MANUAL:\n self.manager.set_room_setpoint_temperature(room.id, target_temp)\n room.target_temperature = target_temp\n\n if touch_quick_mode:\n self.refresh_listening_entities()\n else:\n entity.async_schedule_update_ha_state(True)\n\n def set_zone_target_temperature(self, entity, zone, target_temp):\n \"\"\"Set target temperature for a zone.\n\n * If there is a quick mode related to zone running, remove it\n\n * If quick veto running on, remove it and create a new one with the\n new target temp\n\n * If mode is DAY, change the setpoint temperature\n\n * If mode is NIGHT, change the setback temperature\n\n * If mode is OFF, create a quick veto\n \"\"\"\n from pymultimatic.model import QuickVeto, OperatingModes\n\n touch_quick_mode = False\n if self.system.quick_mode is not None and \\\n self.system.quick_mode.for_zone:\n self.manager.remove_quick_mode()\n touch_quick_mode = True\n self.system.quick_mode = None\n\n current_mode = self.system.get_active_mode_zone(zone).current_mode\n if zone.quick_veto is not None or touch_quick_mode \\\n or current_mode == OperatingModes.OFF:\n if zone.quick_veto is not None:\n self.manager.remove_zone_quick_veto(zone.id)\n veto = QuickVeto(None, target_temp)\n self.manager.set_zone_quick_veto(zone.id, veto)\n zone.quick_veto = veto\n elif current_mode == OperatingModes.DAY:\n self.manager.set_zone_setpoint_temperature(zone.id, target_temp)\n zone.target_temperature = target_temp\n elif current_mode == OperatingModes.NIGHT:\n self.manager.set_zone_setback_temperature(zone.id, target_temp)\n zone.target_min_temperature = target_temp\n\n if touch_quick_mode:\n self.refresh_listening_entities()\n else:\n entity.async_schedule_update_ha_state(True)\n\n def set_zone_target_high_temperature(self, entity, zone, temperature):\n \"\"\"Set high target temperature for a zone., create a quick veto.\"\"\"\n self.manager.set_zone_setpoint_temperature(zone.id, temperature)\n zone.target_temperature = temperature\n entity.async_schedule_update_ha_state(True)\n\n def set_zone_target_low_temperature(self, entity, zone, temperature):\n \"\"\"Set low temperature for a zone.\"\"\"\n self.manager.set_zone_setback_temperature(zone.id, temperature)\n zone.target_min_temperature = temperature\n entity.async_schedule_update_ha_state(True)\n\n def set_hot_water_operating_mode(self, entity, hot_water, mode):\n \"\"\"Set hot water operation mode.\"\"\"\n touch_quick_mode = False\n if self.system.quick_mode is not None and \\\n self.system.quick_mode.for_dhw:\n self.manager.remove_quick_mode()\n touch_quick_mode = True\n self.system.quick_mode = None\n\n self.manager.set_hot_water_operating_mode(hot_water.id, mode)\n\n if touch_quick_mode:\n self.refresh_listening_entities()\n else:\n self.system.hot_water = self.manager.get_hot_water(hot_water.id)\n entity.async_schedule_update_ha_state(True)\n\n def set_room_operating_mode(self, entity, room, mode):\n \"\"\"Set room operation mode.\n\n If there is a quick mode that impact room\n running on, remove it.\n \"\"\"\n touch_quick_mode = False\n if self.system.quick_mode is not None and \\\n self.system.quick_mode.for_room:\n self.manager.remove_quick_mode()\n touch_quick_mode = True\n self.system.quick_mode = None\n\n if room.quick_veto is not None:\n self.manager.remove_room_quick_veto(room.id)\n\n self.manager.set_room_operating_mode(room.id, mode)\n\n if touch_quick_mode:\n self.refresh_listening_entities()\n else:\n self.system.set_room(room.id, self.manager.get_room(room.id))\n entity.async_schedule_update_ha_state(True)\n\n def set_zone_operating_mode(self, entity, zone, mode):\n \"\"\"Set zone operation mode.\n\n If there is a quick mode that impact room\n running on, remove it.\n \"\"\"\n touch_quick_mode = False\n if self.system.quick_mode is not None and \\\n self.system.quick_mode.for_zone:\n self.manager.remove_quick_mode()\n touch_quick_mode = True\n self.system.quick_mode = None\n\n if zone.quick_veto is not None:\n self.manager.remove_zone_quick_veto(zone.id)\n\n self.manager.set_zone_operating_mode(zone.id, mode)\n\n if touch_quick_mode:\n self.refresh_listening_entities()\n else:\n self.system.set_zone(zone.id, self.manager.get_zone(zone.id))\n entity.async_schedule_update_ha_state(True)\n\n\nclass VaillantServiceHandler:\n \"\"\"Service implementation\"\"\"\n\n def __init__(self, hub) -> None:\n \"\"\"Init.\"\"\"\n self._hub = hub\n\n async def remove_quick_mode(self):\n \"\"\"Remove quick mode. It has an impact on all components.\"\"\"\n self._hub.manager.remove_quick_mode()\n self._hub.refresh_listening_entities()\n\n async def set_holiday_mode(self, start_date, end_date, temperature):\n start = datetime.strptime(start_date, \"%Y-%m-%d\")\n end = datetime.strptime(end_date, \"%Y-%m-%d\")\n self._hub.manager.set_holiday_mode(start, end, temperature)\n self._hub.refresh_listening_entities()\n\n async def remove_holiday_mode(self):\n self._hub.manager.remove_holiday_mode()\n self._hub.refresh_listening_entities()\n\n async def set_quick_mode(self, quick_mode):\n \"\"\"Set quick mode, it may impact the whole system.\"\"\"\n from pymultimatic.model import QuickModes\n\n _LOGGER.debug('Will set quick mode %s', quick_mode)\n self._hub.manager.remove_quick_mode()\n self._hub.manager.set_quick_mode(QuickModes.get(quick_mode))\n self._hub.refresh_listening_entities()\n\n async def async_handle(self, service):\n \"\"\"Dispatch a service call.\"\"\"\n method = SERVICE_TO_METHOD.get(service.service)\n params = service.data.copy()\n await getattr(self, method[\"method\"])(**params)\n\n\nclass BaseVaillantEntity(Entity, ABC):\n \"\"\"Define base class for vaillant.\"\"\"\n\n def __init__(self, domain, device_class, comp_id, comp_name,\n class_in_id=True):\n \"\"\"Initialize entity.\"\"\"\n self._device_class = device_class\n if device_class and class_in_id:\n id_format = domain + '.' + DOMAIN + '_{}_' + device_class\n else:\n id_format = domain + '.' + DOMAIN + '_{}'\n\n self.entity_id = id_format\\\n .format(slugify(comp_id)).replace(' ', '_').lower()\n self._vaillant_name = comp_name\n self.hub = None\n\n @property\n def name(self) -> Optional[str]:\n \"\"\"Return the name of the entity.\"\"\"\n return self._vaillant_name\n\n async def async_update(self):\n \"\"\"Update the entity.\"\"\"\n _LOGGER.debug(\"Time to update %s\", self.entity_id)\n if not self.hub:\n self.hub = self.hass.data[HUB]\n self.hub.update_system()\n\n await self.vaillant_update()\n\n @property\n def device_class(self):\n \"\"\"Return the class of this device, from component DEVICE_CLASSES.\"\"\"\n return self._device_class\n\n @abc.abstractmethod\n async def vaillant_update(self):\n \"\"\"Update specific for vaillant.\"\"\"\n pass\n\n\nclass VaillantBoiler(Entity):\n \"\"\"Base class for boiler device.\"\"\"\n\n def __init__(self, boiler_status: BoilerStatus) -> None:\n \"\"\"Initialize device.\"\"\"\n self.boiler_status = boiler_status\n\n @property\n def device_info(self):\n \"\"\"Return device specific attributes.\"\"\"\n return {\n 'identifiers': {\n (DOMAIN, self.boiler_status.device_name)\n },\n 'name': self.boiler_status.device_name,\n }\n","sub_path":"homeassistant/components/vaillant/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":17652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"141595339","text":"# ---------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# ---------------------------------------------------------\n\nfrom azure.ai.ml._utils._experimental import experimental\nfrom azure.ai.ml._restclient.v2023_06_01_preview.models import (\n MonitorServerlessSparkCompute,\n AmlTokenComputeIdentity,\n)\n\n\n@experimental\nclass ServerlessSparkCompute:\n def __init__(\n self,\n *,\n runtime_version: str,\n instance_type: str,\n ):\n self.runtime_version = runtime_version\n self.instance_type = instance_type\n\n def _to_rest_object(self) -> MonitorServerlessSparkCompute:\n return MonitorServerlessSparkCompute(\n runtime_version=self.runtime_version,\n instance_type=self.instance_type,\n compute_identity=AmlTokenComputeIdentity(\n compute_identity_type=\"AmlToken\",\n ),\n )\n\n @classmethod\n def _from_rest_object(cls, obj: MonitorServerlessSparkCompute) -> \"ServerlessSparkCompute\":\n return cls(\n runtime_version=obj.runtime_version,\n instance_type=obj.instance_type,\n )\n","sub_path":"sdk/ml/azure-ai-ml/azure/ai/ml/entities/_monitoring/compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"527109757","text":"import socket, json\nfrom socket import timeout\nfrom uPack import *\nimport Application as app\n\nSEG_SIZE = 100\nprox_id = 0\nsender_port = 4000\ndest_port = 5000\n\nsend_ip = \"10.13.37.191\"\nmy_ip = \"10.13.28.50\"\n\nsend_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nrecv_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n# ativando o listen do servdor\nrecv_sock.bind((my_ip, sender_port))\nrecv_sock.settimeout(2)\n\nlast_pkt_id = 0\n\n\n# evia um pacote para o destino\ndef send_pack(uPack):\n msg = uPack.toString()\n msg_bytes = str.encode(msg)\n return send_sock.sendto(msg_bytes, (send_ip, dest_port))\n \n# envia um ack com o id de sequencia fornecido\ndef sendAck(id_seq):\n ack = uPack(sender_port, dest_port, id_seq, True, None)\n send_pack(ack)\n \n# cria um pacote com a mensagem recebida\ndef make_pack(data):\n # sender_port, dest_port, id_seq, isAck, checksum, data\n pack = uPack(sender_port, dest_port, None, False, data)\n return pack\n \n# transforma um json em um pacote uPack\ndef mount_pack(jsn):\n send_prt = jsn['send_port']\n dest_prt = jsn['dest_port']\n id = jsn['id_seq']\n ack = jsn['isAck']\n data = jsn['data']\n \n pkt = uPack(send_prt, dest_prt, id, ack, data)\n return pkt\n \n# recebe uma mensagem\ndef receiv():\n msg_bytes, server = recv_sock.recvfrom(SEG_SIZE)\n res_pkt = json.loads(msg_bytes.decode())\n \n pkt = mount_pack(res_pkt)\n \n return pkt\n \n# envia um pacote com a mensagem recebida e gerencia seu ack de confirmcao\ndef send_msg(msg):\n global prox_id\n pkt = make_pack(msg)\n pkt.setId_req(prox_id)\n prox_id = 1 - prox_id\n expected = pkt.id_seq\n \n ack = False\n \n while not ack:\n send_pack(pkt)\n try:\n ack = receiv() \n except timeout:\n print(\"Timeout\")\n else:\n if ack.isAck and ack.id_seq == expected:\n print(\"ACK \" +str() + \" recebido\")\n ack = True\n \n return True\n \ndef receive():\n while True:\n try:\n # tentando receber uma mensagem\n pkt = receiv()\n except timeout:\n # timeout = nenhuma mensagem recebida, não fazer nada\n continue\n else:\n # se o pacote recebido igual, o ack foi perdido. Reenviando\n if pkt.id_seq == last_pkt_id:\n sendAck(last_pkt_id)\n else:\n # caso receba o pkt novo envia o ack e atualiza o last_pkt_id\n sendAck(pkt.id_seq)\n last_pkt_id = pkt.id_seq\n return pkt\n\n\neletric_moves = [app.Move(\"Thunderbolt\", 15.0, 100.0, 7), app.Move(\n \"QuickAttack\", 14.0, 100.0, 7), app.Move(\"ThunderShock\", 20.0, 100.0, 3)]\npikachu = app.Pokemon(\"Pikachu\", 5, eletric_moves)\n\nwhile True:\n \n pokemon_data = app.prepare_dic(pikachu)\n pokemon_data = pokemon_data.__str__()\n send_msg(pokemon_data)\n msg = receive()\n pokemon_data = eval(msg.data)\n moves = pokemon_data['moves']\n remote_pokemon_move = []\n for move in moves:\n remote_pokemon_move.append(\n app.Move(move[0], move[1], move[2], move[3]))\n remote_pokemon = app.Pokemon(\n pokemon_data['name'], pokemon_data['health'], remote_pokemon_move)\n app.turn(pikachu, remote_pokemon)\n if pikachu.health < 0:\n print(pikachu.name + \" has been defeated!\")\n print(\"END GAME\")\n break\n","sub_path":"uSender.py","file_name":"uSender.py","file_ext":"py","file_size_in_byte":3388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"28718889","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n# ~\n# Author: Alejandro M. Bernardis\n# Email: alejandro.bernardis@gmail.com\n# Created: 10/Jun/2016 22:19\n# ~\n\nimport os\nfrom importlib import import_module\nfrom sinthius import constants, compat\nfrom sinthius.conf import global_settings\nfrom sinthius.errors import ConfigurationError\nfrom sinthius.utils.generators import random_string\nfrom sinthius.utils.lazy import LazyObject\n\n\nclass LazySettings(LazyObject):\n def _setup(self, name=None):\n settings_module = os.environ.get(constants.ENV_SETTINGS_MODULE)\n if not settings_module:\n desc = ('setting %s' % name) if name else 'settings'\n raise ConfigurationError(\n 'Requested %s, but settings are not configured. '\n 'You must either define the environment variable %s '\n 'or call settings.configure() before accessing settings'\n % (desc, constants.ENV_SETTINGS_MODULE))\n self._wrapped = Settings(settings_module)\n\n def __getattr__(self, name):\n if self._wrapped is compat.empty:\n self._setup(name)\n return getattr(self._wrapped, name)\n\n def configure(self, default_settings=global_settings, **options):\n if self._wrapped is not compat.empty:\n raise RuntimeError('Settings already configured')\n holder = UserSettingsHolder(default_settings)\n for name, value in options.iteritems():\n setattr(holder, name, value)\n self._wrapped = holder\n\n def options_parse(self, options):\n if self._wrapped is compat.empty:\n raise RuntimeError('Settings are not configured')\n if not isinstance(options, dict):\n raise ConfigurationError('Options must be a dictionary')\n for key, value in options.iteritems():\n setting_key = key.upper()\n if not hasattr(self._wrapped, setting_key) \\\n or getattr(self._wrapped, setting_key) != value:\n setattr(self._wrapped, setting_key, value)\n self.exec_builders()\n setattr(self._wrapped, constants.ENV_OPTIONS_PARSER, True)\n\n def is_options_parsed(self):\n return hasattr(self._wrapped, constants.ENV_OPTIONS_PARSER)\n\n def as_dict(self):\n if self._wrapped is compat.empty:\n raise RuntimeError('Settings are not configured')\n return {item.lower(): getattr(self._wrapped, item)\n for item in dir(self._wrapped)\n if item.isupper() and item != constants.ENV_OPTIONS_PARSER}\n\n def exec_builders(self):\n self._wrapped.exec_builders()\n\n @property\n def configured(self):\n return self._wrapped is not compat.empty\n\n\nclass BaseSettings(object):\n def __setattr__(self, name, value):\n object.__setattr__(self, name, value)\n\n def build_domains(self):\n for key, domain in self.DOMAINS.iteritems():\n if 'port' not in domain or self.PORT != domain['port']:\n domain['port'] = self.PORT\n\n def build_paths(self):\n if self.ROOT_PATH is None:\n self.ROOT_PATH = '/server'\n self.BIN_PATH = '%s/bin' % self.ROOT_PATH\n self.ETC_PATH = '%s/etc' % self.ROOT_PATH\n self.SRC_PATH = '%s/src' % self.ROOT_PATH\n self.TMP_PATH = '%s/tmp' % self.ROOT_PATH\n self.VAR_PATH = '%s/var' % self.ROOT_PATH\n self.STATIC_PATH = '%s/public/static' % self.VAR_PATH\n self.TEMPLATE_PATH = '%s/template' % self.VAR_PATH\n self.LOCALE_PATH = '%s/locale' % self.VAR_PATH\n self.PATHS = {\n 'root': self.ROOT_PATH,\n 'bin': self.BIN_PATH,\n 'etc': self.ETC_PATH,\n 'src': self.SRC_PATH,\n 'tmp': self.TMP_PATH,\n 'var': self.VAR_PATH,\n 'ca': '%s/CA' % self.ETC_PATH,\n 'secret': '%s/secret' % self.ETC_PATH,\n 'backend': '%s/backend' % self.SRC_PATH,\n 'backoffice': '%s/backoffice' % self.SRC_PATH,\n 'frontend': '%s/frontend' % self.SRC_PATH,\n 'services': '%s/services' % self.SRC_PATH,\n 'object': '%s/object' % self.VAR_PATH,\n 'otp': '%s/otp' % self.VAR_PATH,\n 'public': '%s/public' % self.VAR_PATH,\n 'log': '%s/log' % self.VAR_PATH,\n 'locale': self.LOCALE_PATH,\n 'static': self.STATIC_PATH,\n 'template': self.TEMPLATE_PATH\n }\n return self.PATHS\n\n def exec_builders(self):\n self.build_domains()\n self.build_paths()\n\n\nclass Settings(BaseSettings):\n def __init__(self, settings_module):\n for setting in dir(global_settings):\n if setting.isupper():\n setattr(self, setting, getattr(global_settings, setting))\n\n self.SETTINGS_MODULE = settings_module\n module = import_module(self.SETTINGS_MODULE)\n\n list_settings = (\n 'HANDLERS',\n 'TRANSFORMS',\n 'UI_MODULES',\n 'UI_METHODS',\n 'LOCALE_SUPPORTED'\n )\n\n dict_settings = (\n 'DOMAINS',\n 'DATABASES',\n 'KEYVALUES',\n 'OBJECTS',\n 'EMAILS',\n )\n\n self._explicit_settings = set()\n\n for setting in dir(module):\n if setting.isupper():\n setting_value = getattr(module, setting)\n if setting in list_settings \\\n and not isinstance(setting_value, compat.list_type):\n raise ConfigurationError('The \"%s\" setting must be a list '\n 'or a tuple. Please fix your '\n 'settings' % setting)\n if setting in dict_settings \\\n and not isinstance(setting_value, dict):\n raise ConfigurationError('The \"%s\" setting must be a dict. '\n 'Please fix your settings'\n % setting)\n setattr(self, setting, setting_value)\n self._explicit_settings.add(setting)\n\n if not self.SECRET_KEY:\n self.SECRET_KEY = random_string(128)\n\n self.exec_builders()\n\n def is_overridden(self, setting):\n return setting in self._explicit_settings\n\n\nclass UserSettingsHolder(BaseSettings):\n SETTINGS_MODULE = None\n\n def __init__(self, default_settings):\n self.__dict__['_deleted'] = set()\n self.default_settings = default_settings\n self.build_paths()\n\n def __getattr__(self, name):\n if name in self._deleted:\n raise AttributeError\n return getattr(self.default_settings, name)\n\n def __setattr__(self, name, value):\n self._deleted.discard(name)\n super(UserSettingsHolder, self).__setattr__(name, value)\n\n def __delattr__(self, name):\n self._deleted.add(name)\n if hasattr(self, name):\n super(UserSettingsHolder, self).__delattr__(name)\n\n def __dir__(self):\n return list(self.__dict__) + dir(self.default_settings)\n\n def is_overridden(self, setting):\n deleted = (setting in self._deleted)\n set_locally = (setting in self.__dict__)\n set_on_default = getattr(\n self.default_settings, 'is_overridden', lambda s: False)(setting)\n return deleted or set_locally or set_on_default\n\n\nsettings = LazySettings()\n","sub_path":"ar/py-sinthius-backend-rc/sinthius/conf/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"65646065","text":"import pytest\n\nimport httpx\n\n\n@pytest.mark.parametrize(\n \"given,idna,host,raw_host,scheme,port\",\n [\n (\n \"http://中国.icom.museum:80/\",\n \"http://xn--fiqs8s.icom.museum:80/\",\n \"中国.icom.museum\",\n b\"xn--fiqs8s.icom.museum\",\n \"http\",\n None,\n ),\n (\n \"http://Königsgäßchen.de\",\n \"http://xn--knigsgchen-b4a3dun.de\",\n \"königsgäßchen.de\",\n b\"xn--knigsgchen-b4a3dun.de\",\n \"http\",\n None,\n ),\n (\n \"https://faß.de\",\n \"https://xn--fa-hia.de\",\n \"faß.de\",\n b\"xn--fa-hia.de\",\n \"https\",\n None,\n ),\n (\n \"https://βόλος.com:443\",\n \"https://xn--nxasmm1c.com:443\",\n \"βόλος.com\",\n b\"xn--nxasmm1c.com\",\n \"https\",\n None,\n ),\n (\n \"http://ශ්‍රී.com:444\",\n \"http://xn--10cl1a0b660p.com:444\",\n \"ශ්‍රී.com\",\n b\"xn--10cl1a0b660p.com\",\n \"http\",\n 444,\n ),\n (\n \"https://نامه‌ای.com:4433\",\n \"https://xn--mgba3gch31f060k.com:4433\",\n \"نامه‌ای.com\",\n b\"xn--mgba3gch31f060k.com\",\n \"https\",\n 4433,\n ),\n ],\n ids=[\n \"http_with_port\",\n \"unicode_tr46_compat\",\n \"https_without_port\",\n \"https_with_port\",\n \"http_with_custom_port\",\n \"https_with_custom_port\",\n ],\n)\ndef test_idna_url(given, idna, host, raw_host, scheme, port):\n url = httpx.URL(given)\n assert url == httpx.URL(idna)\n assert url.host == host\n assert url.raw_host == raw_host\n assert url.scheme == scheme\n assert url.port == port\n\n\ndef test_url():\n url = httpx.URL(\"https://example.org:123/path/to/somewhere?abc=123#anchor\")\n assert url.scheme == \"https\"\n assert url.host == \"example.org\"\n assert url.port == 123\n assert url.path == \"/path/to/somewhere\"\n assert url.query == b\"abc=123\"\n assert url.raw_path == b\"/path/to/somewhere?abc=123\"\n assert url.fragment == \"anchor\"\n assert (\n repr(url) == \"URL('https://example.org:123/path/to/somewhere?abc=123#anchor')\"\n )\n\n new = url.copy_with(scheme=\"http\", port=None)\n assert new == httpx.URL(\"http://example.org/path/to/somewhere?abc=123#anchor\")\n assert new.scheme == \"http\"\n\n\ndef test_url_eq_str():\n url = httpx.URL(\"https://example.org:123/path/to/somewhere?abc=123#anchor\")\n assert url == \"https://example.org:123/path/to/somewhere?abc=123#anchor\"\n assert str(url) == url\n\n\ndef test_url_params():\n url = httpx.URL(\"https://example.org:123/path/to/somewhere\", params={\"a\": \"123\"})\n assert str(url) == \"https://example.org:123/path/to/somewhere?a=123\"\n assert url.params == httpx.QueryParams({\"a\": \"123\"})\n\n url = httpx.URL(\n \"https://example.org:123/path/to/somewhere?b=456\", params={\"a\": \"123\"}\n )\n assert str(url) == \"https://example.org:123/path/to/somewhere?a=123\"\n assert url.params == httpx.QueryParams({\"a\": \"123\"})\n\n\ndef test_url_join():\n \"\"\"\n Some basic URL joining tests.\n \"\"\"\n url = httpx.URL(\"https://example.org:123/path/to/somewhere\")\n assert url.join(\"/somewhere-else\") == \"https://example.org:123/somewhere-else\"\n assert (\n url.join(\"somewhere-else\") == \"https://example.org:123/path/to/somewhere-else\"\n )\n assert (\n url.join(\"../somewhere-else\") == \"https://example.org:123/path/somewhere-else\"\n )\n assert url.join(\"../../somewhere-else\") == \"https://example.org:123/somewhere-else\"\n\n\ndef test_url_set_param_manipulation():\n \"\"\"\n Some basic URL query parameter manipulation.\n \"\"\"\n url = httpx.URL(\"https://example.org:123/?a=123\")\n assert url.copy_set_param(\"a\", \"456\") == \"https://example.org:123/?a=456\"\n\n\ndef test_url_add_param_manipulation():\n \"\"\"\n Some basic URL query parameter manipulation.\n \"\"\"\n url = httpx.URL(\"https://example.org:123/?a=123\")\n assert url.copy_add_param(\"a\", \"456\") == \"https://example.org:123/?a=123&a=456\"\n\n\ndef test_url_remove_param_manipulation():\n \"\"\"\n Some basic URL query parameter manipulation.\n \"\"\"\n url = httpx.URL(\"https://example.org:123/?a=123\")\n assert url.copy_remove_param(\"a\") == \"https://example.org:123/\"\n\n\ndef test_url_merge_params_manipulation():\n \"\"\"\n Some basic URL query parameter manipulation.\n \"\"\"\n url = httpx.URL(\"https://example.org:123/?a=123\")\n assert url.copy_merge_params({\"b\": \"456\"}) == \"https://example.org:123/?a=123&b=456\"\n\n\ndef test_relative_url_join():\n url = httpx.URL(\"/path/to/somewhere\")\n assert url.join(\"/somewhere-else\") == \"/somewhere-else\"\n assert url.join(\"somewhere-else\") == \"/path/to/somewhere-else\"\n assert url.join(\"../somewhere-else\") == \"/path/somewhere-else\"\n assert url.join(\"../../somewhere-else\") == \"/somewhere-else\"\n\n\ndef test_url_join_rfc3986():\n \"\"\"\n URL joining tests, as-per reference examples in RFC 3986.\n\n https://tools.ietf.org/html/rfc3986#section-5.4\n \"\"\"\n\n url = httpx.URL(\"http://example.com/b/c/d;p?q\")\n\n assert url.join(\"g\") == \"http://example.com/b/c/g\"\n assert url.join(\"./g\") == \"http://example.com/b/c/g\"\n assert url.join(\"g/\") == \"http://example.com/b/c/g/\"\n assert url.join(\"/g\") == \"http://example.com/g\"\n assert url.join(\"//g\") == \"http://g\"\n assert url.join(\"?y\") == \"http://example.com/b/c/d;p?y\"\n assert url.join(\"g?y\") == \"http://example.com/b/c/g?y\"\n assert url.join(\"#s\") == \"http://example.com/b/c/d;p?q#s\"\n assert url.join(\"g#s\") == \"http://example.com/b/c/g#s\"\n assert url.join(\"g?y#s\") == \"http://example.com/b/c/g?y#s\"\n assert url.join(\";x\") == \"http://example.com/b/c/;x\"\n assert url.join(\"g;x\") == \"http://example.com/b/c/g;x\"\n assert url.join(\"g;x?y#s\") == \"http://example.com/b/c/g;x?y#s\"\n assert url.join(\"\") == \"http://example.com/b/c/d;p?q\"\n assert url.join(\".\") == \"http://example.com/b/c/\"\n assert url.join(\"./\") == \"http://example.com/b/c/\"\n assert url.join(\"..\") == \"http://example.com/b/\"\n assert url.join(\"../\") == \"http://example.com/b/\"\n assert url.join(\"../g\") == \"http://example.com/b/g\"\n assert url.join(\"../..\") == \"http://example.com/\"\n assert url.join(\"../../\") == \"http://example.com/\"\n assert url.join(\"../../g\") == \"http://example.com/g\"\n\n assert url.join(\"../../../g\") == \"http://example.com/g\"\n assert url.join(\"../../../../g\") == \"http://example.com/g\"\n\n assert url.join(\"/./g\") == \"http://example.com/g\"\n assert url.join(\"/../g\") == \"http://example.com/g\"\n assert url.join(\"g.\") == \"http://example.com/b/c/g.\"\n assert url.join(\".g\") == \"http://example.com/b/c/.g\"\n assert url.join(\"g..\") == \"http://example.com/b/c/g..\"\n assert url.join(\"..g\") == \"http://example.com/b/c/..g\"\n\n assert url.join(\"./../g\") == \"http://example.com/b/g\"\n assert url.join(\"./g/.\") == \"http://example.com/b/c/g/\"\n assert url.join(\"g/./h\") == \"http://example.com/b/c/g/h\"\n assert url.join(\"g/../h\") == \"http://example.com/b/c/h\"\n assert url.join(\"g;x=1/./y\") == \"http://example.com/b/c/g;x=1/y\"\n assert url.join(\"g;x=1/../y\") == \"http://example.com/b/c/y\"\n\n assert url.join(\"g?y/./x\") == \"http://example.com/b/c/g?y/./x\"\n assert url.join(\"g?y/../x\") == \"http://example.com/b/c/g?y/../x\"\n assert url.join(\"g#s/./x\") == \"http://example.com/b/c/g#s/./x\"\n assert url.join(\"g#s/../x\") == \"http://example.com/b/c/g#s/../x\"\n\n\ndef test_url_set():\n urls = (\n httpx.URL(\"http://example.org:123/path/to/somewhere\"),\n httpx.URL(\"http://example.org:123/path/to/somewhere/else\"),\n )\n\n url_set = set(urls)\n\n assert all(url in urls for url in url_set)\n\n\ndef test_url_copywith_authority_subcomponents():\n copy_with_kwargs = {\n \"username\": \"username\",\n \"password\": \"password\",\n \"port\": 444,\n \"host\": \"example.net\",\n }\n url = httpx.URL(\"https://example.org\")\n new = url.copy_with(**copy_with_kwargs)\n assert str(new) == \"https://username:password@example.net:444\"\n\n\ndef test_url_copywith_netloc():\n copy_with_kwargs = {\n \"netloc\": b\"example.net:444\",\n }\n url = httpx.URL(\"https://example.org\")\n new = url.copy_with(**copy_with_kwargs)\n assert str(new) == \"https://example.net:444\"\n\n\ndef test_url_copywith_userinfo_subcomponents():\n copy_with_kwargs = {\n \"username\": \"tom@example.org\",\n \"password\": \"abc123@ %\",\n }\n url = httpx.URL(\"https://example.org\")\n new = url.copy_with(**copy_with_kwargs)\n assert str(new) == \"https://tom%40example.org:abc123%40%20%25@example.org\"\n assert new.username == \"tom@example.org\"\n assert new.password == \"abc123@ %\"\n assert new.userinfo == b\"tom%40example.org:abc123%40%20%25\"\n\n\ndef test_url_copywith_invalid_component():\n url = httpx.URL(\"https://example.org\")\n with pytest.raises(TypeError):\n url.copy_with(pathh=\"/incorrect-spelling\")\n with pytest.raises(TypeError):\n url.copy_with(userinfo=\"should be bytes\")\n\n\ndef test_url_copywith_urlencoded_path():\n url = httpx.URL(\"https://example.org\")\n url = url.copy_with(path=\"/path to somewhere\")\n assert url.path == \"/path to somewhere\"\n assert url.query == b\"\"\n assert url.raw_path == b\"/path%20to%20somewhere\"\n\n\ndef test_url_copywith_query():\n url = httpx.URL(\"https://example.org\")\n url = url.copy_with(query=b\"a=123\")\n assert url.path == \"/\"\n assert url.query == b\"a=123\"\n assert url.raw_path == b\"/?a=123\"\n\n\ndef test_url_copywith_raw_path():\n url = httpx.URL(\"https://example.org\")\n url = url.copy_with(raw_path=b\"/some/path\")\n assert url.path == \"/some/path\"\n assert url.query == b\"\"\n assert url.raw_path == b\"/some/path\"\n\n url = httpx.URL(\"https://example.org\")\n url = url.copy_with(raw_path=b\"/some/path?\")\n assert url.path == \"/some/path\"\n assert url.query == b\"\"\n assert url.raw_path == b\"/some/path?\"\n\n url = httpx.URL(\"https://example.org\")\n url = url.copy_with(raw_path=b\"/some/path?a=123\")\n assert url.path == \"/some/path\"\n assert url.query == b\"a=123\"\n assert url.raw_path == b\"/some/path?a=123\"\n\n\ndef test_url_copywith_security():\n \"\"\"\n Prevent unexpected changes on URL after calling copy_with (CVE-2021-41945)\n \"\"\"\n with pytest.raises(httpx.InvalidURL):\n httpx.URL(\"https://u:p@[invalid!]//evilHost/path?t=w#tw\")\n\n url = httpx.URL(\"https://example.com/path?t=w#tw\")\n bad = \"https://xxxx:xxxx@xxxxxxx/xxxxx/xxx?x=x#xxxxx\"\n with pytest.raises(httpx.InvalidURL):\n url.copy_with(scheme=bad)\n\n\ndef test_url_invalid():\n with pytest.raises(httpx.InvalidURL):\n httpx.URL(\"https://😇/\")\n\n\ndef test_url_invalid_type():\n class ExternalURLClass: # representing external URL class\n pass\n\n with pytest.raises(TypeError):\n httpx.URL(ExternalURLClass()) # type: ignore\n\n\ndef test_url_with_empty_query():\n \"\"\"\n URLs with and without a trailing `?` but an empty query component\n should preserve the information on the raw path.\n \"\"\"\n url = httpx.URL(\"https://www.example.com/path\")\n assert url.path == \"/path\"\n assert url.query == b\"\"\n assert url.raw_path == b\"/path\"\n\n url = httpx.URL(\"https://www.example.com/path?\")\n assert url.path == \"/path\"\n assert url.query == b\"\"\n assert url.raw_path == b\"/path?\"\n\n\ndef test_url_query_encoding():\n \"\"\"\n URL query parameters should use '%20' to encoding spaces,\n and should treat '/' as a safe character. This behaviour differs\n across clients, but we're matching browser behaviour here.\n\n See https://github.com/encode/httpx/issues/2536\n and https://github.com/encode/httpx/discussions/2460\n \"\"\"\n url = httpx.URL(\"https://www.example.com/?a=b c&d=e/f\")\n assert url.raw_path == b\"/?a=b%20c&d=e%2Ff\"\n\n url = httpx.URL(\"https://www.example.com/\", params={\"a\": \"b c\", \"d\": \"e/f\"})\n assert url.raw_path == b\"/?a=b%20c&d=e%2Ff\"\n\n\ndef test_url_with_url_encoded_path():\n url = httpx.URL(\"https://www.example.com/path%20to%20somewhere\")\n assert url.path == \"/path to somewhere\"\n assert url.query == b\"\"\n assert url.raw_path == b\"/path%20to%20somewhere\"\n\n\ndef test_ipv6_url():\n url = httpx.URL(\"http://[::ffff:192.168.0.1]:5678/\")\n\n assert url.host == \"::ffff:192.168.0.1\"\n assert url.netloc == b\"[::ffff:192.168.0.1]:5678\"\n\n\n@pytest.mark.parametrize(\n \"url_str\",\n [\n \"http://127.0.0.1:1234\",\n \"http://example.com:1234\",\n \"http://[::ffff:127.0.0.1]:1234\",\n ],\n)\n@pytest.mark.parametrize(\"new_host\", [\"[::ffff:192.168.0.1]\", \"::ffff:192.168.0.1\"])\ndef test_ipv6_url_copy_with_host(url_str, new_host):\n url = httpx.URL(url_str).copy_with(host=new_host)\n\n assert url.host == \"::ffff:192.168.0.1\"\n assert url.netloc == b\"[::ffff:192.168.0.1]:1234\"\n assert str(url) == \"http://[::ffff:192.168.0.1]:1234\"\n\n\n@pytest.mark.parametrize(\"host\", [\"[::ffff:192.168.0.1]\", \"::ffff:192.168.0.1\"])\ndef test_ipv6_url_from_raw_url(host):\n url = httpx.URL(scheme=\"https\", host=host, port=443, path=\"/\")\n\n assert url.host == \"::ffff:192.168.0.1\"\n assert url.netloc == b\"[::ffff:192.168.0.1]\"\n assert str(url) == \"https://[::ffff:192.168.0.1]/\"\n\n\ndef test_resolution_error_1833():\n \"\"\"\n See https://github.com/encode/httpx/issues/1833\n \"\"\"\n url = httpx.URL(\"https://example.com/?[]\")\n assert url.join(\"/\") == \"https://example.com/\"\n\n\ndef test_url_raw_compatibility():\n url = httpx.URL(\"https://www.example.com/path\")\n scheme, host, port, raw_path = url.raw\n\n assert scheme == b\"https\"\n assert host == b\"www.example.com\"\n assert port is None\n assert raw_path == b\"/path\"\n","sub_path":"tests/models/test_url.py","file_name":"test_url.py","file_ext":"py","file_size_in_byte":13776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"448087142","text":"\"\"\"\nDefinition of urls for DjangoWebProject.\n\"\"\"\n\nfrom datetime import datetime\nfrom django.conf.urls import patterns, url, include\n\n# Uncomment the next lines to enable the admin:\n# from django.conf.urls import include\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n url('', include('social.apps.django_app.urls', namespace='social')),\n url(r'^logout/$', 'directory.views.logout', name='logout'),\n url(r'^home/(?P.*)/', 'directory.views.home', name='home'),\n url(r'^home/', 'directory.views.home', name='home'),\n url(r'^createuser/', 'directory.views.createuser', name='createuser'),\n url(r'^edituser/', 'directory.views.edituser', name='edituser'),\n url(r'^profile/(?P.+)/$', 'directory.views.profile', name='profile'),\n url(r'^profile/', 'directory.views.profile', name='profile'),\n url(r'^search/$', 'directory.views.search', name='search'), \n url(r'', 'directory.views.login', name='login'),\n\n #url(r'^logout/$', 'directory.views.logout', name='logout'),\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n)\n","sub_path":"DjangoWebProject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"130487489","text":"##===============================================\n## Jiadong Mai (20557203)\n## CS 116 Winter 2018\n## Assignment 06, Question 2\n##===============================================\nimport check\n# Question 2\n# find_bigger(ints) consumes a (listof Int), ints, and returns a (listof Int)\n# which values in the list that are bigger than all values that came before \n# in the list\n# find_bigger: (listof Int) -> (listof Int)\n# Examples:\n# find_bigger([0, 4, 5, 4]) => [0, 4, 5]\n# find_bigger([1, 2, 4, 4]) => [1, 2, 4]\n# find_bigger([-2, -4, -4, -1]) => [-2, -1]\n# find_bigger([]) => []\n\ndef find_bigger(ints):\n if ints==[]:\n return []\n else:\n num = -1\n z = [ints[0]]\n max_num = ints[0]\n for k in ints:\n if max_num < k:\n max_num = max(max_num, k)\n z.append(k)\n num +=1\n else:\n num +=1\n return z\n\n# Test:\n# Test1: empty list\ncheck.expect('empty', find_bigger([]), [])\n# Test2: increasing order\ncheck.expect('Increase Order1', find_bigger([1,2,3,4,5,6,7]), [1,2,3,4,5,6,7])\ncheck.expect('Increase Order2', find_bigger([1,3,3,4,6,6,8]), [1,3,4,6,8])\ncheck.expect('Increase Order3', find_bigger([1,2,4,5,8,10]), [1,2,4,5,8,10])\ncheck.expect('Increase Order4', find_bigger([1,5,8,12,33,99]), [1,5,8,12,33,99])\ncheck.expect('Increase Order5', find_bigger([1,15,15,15,90]), [1,15,90])\ncheck.expect('Increase Order6', find_bigger([1,5,6,7]), [1,5,6,7])\n# Test3: decreasing order\ncheck.expect('Decreasing Order1', find_bigger([9,8,7,6,5,4,3]), [9])\ncheck.expect('Decreasing Order2', find_bigger([19,7,4,2]), [19])\ncheck.expect('Decreasing Order3', find_bigger([99,55,32,32,12,0]), [99])\ncheck.expect('Decreasing Order4', find_bigger([87,4,3,67,3]), [87])\ncheck.expect('Decreasing Order5', find_bigger([9,8,8,8,8,8,8]), [9])\ncheck.expect('Decreasing Order6', find_bigger([9,8,2,9,4]), [9])\n# Test4: up-down-up order\ncheck.expect('Up-Down-UP Order1', find_bigger([2,6,9,8,7,7,5,8,10]), [2,6,9,10])\ncheck.expect('Up-Down-UP Order2', find_bigger([2,19,7,5,100,101]), [2,19,100,101])\ncheck.expect('Up-Down-UP Order3', find_bigger([2,20,21,20,21,22]), [2,20,21,22])\n# Test5: All Same\ncheck.expect('All Same', find_bigger([2,2,2,2,2]), [2])\ncheck.expect('All Same', find_bigger([5,5,5]), [5])\n# Test6: Sample Question\ncheck.expect('Sample Question1', find_bigger([0, 4, 5, 4]), [0,4,5])\ncheck.expect('Sample Question2', find_bigger([1,2,4,4]), [1,2,4])\ncheck.expect('Sample Question3', find_bigger([-2,-4,-4,-1]), [-2,-1])\n\n","sub_path":"CS116/a06-j4mai/a06-j4mai/a06q2.py","file_name":"a06q2.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"127649039","text":"#class--- plan,design,blueprint\n#object--real world entity\n#reference--perform opn over object\n\n#syntax\n#class Classname:\n#methods\n\nclass Person:\n\n def setValues(self,ag,nam):\n self.age=ag\n self.name=nam\n\n def printValues(self):\n print(\"age=\",self.age)\n print(\"name=\",self.name)\n\n#object syntax referencename=ClassName()\nobj=Person()\nobj1=Person()\nobj1.setValues(26,\"vijay\")\nobj.setValues(27,\"aijay\")","sub_path":"objectorientedprogramming/objectorientedprogramming.py","file_name":"objectorientedprogramming.py","file_ext":"py","file_size_in_byte":434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"487359201","text":"import os\nimport sys\nimport tempfile\nfrom pathlib import Path\nfrom shutil import rmtree\n\nimport pytest\nfrom teletype.io import strip_format\n\nfrom mnamer.__main__ import run\nfrom mnamer.target import Target\nfrom tests import *\n\n\n@pytest.fixture(autouse=True)\ndef reset_args():\n \"\"\"Clears argv before and after running test.\"\"\"\n del sys.argv[:]\n sys.argv.append(\"mnamer\")\n\n\n@pytest.fixture()\ndef setup_test_path():\n \"\"\"Creates mixed media file types for testing in a temporary directory.\"\"\"\n orig_dir = os.getcwd()\n tmp_dir = tempfile.mkdtemp()\n for test_file in TEST_FILES.values():\n path = Path(tmp_dir, test_file)\n path.parent.mkdir(parents=True, exist_ok=True)\n path.open(\"w\").close()\n os.chdir(tmp_dir)\n yield\n os.chdir(orig_dir)\n rmtree(tmp_dir)\n\n\n@pytest.fixture\ndef e2e_run(capsys, setup_test_path):\n \"\"\"Runs main with provided arguments and returns stdout.\"\"\"\n\n def fn(*args):\n Target.reset_providers()\n for arg in args:\n sys.argv.append(arg)\n try:\n run()\n except SystemExit as e:\n code = e.code\n else:\n code = 0\n\n return E2EResult(\n code,\n strip_format(capsys.readouterr().out.strip())\n + strip_format(capsys.readouterr().err.strip()),\n )\n\n return fn\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"284673511","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport argparse\nimport itertools\n\nimport psycopg2\nfrom sdfpy import load_sdf\n\n\ndef parse_args():\n \"\"\"\n Parse command line arguments.\n \"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-f', '--file',\n dest=\"filename\",\n required=True,\n help=\"input file (sdf)\")\n\n parser.add_argument(\n '-s', '--snapshot-id',\n type=int,\n dest=\"sid\",\n required=True,\n help=\"snapshot id\")\n\n parser.add_argument(\n '--db-host',\n default=\"127.0.0.1\",\n help=\"database host\")\n\n parser.add_argument(\n '--db-port',\n type=int,\n default=5432,\n help=\"database port\")\n\n parser.add_argument(\n '--db-name',\n default=\"scivis\",\n help=\"database name\")\n\n parser.add_argument(\n '--db-user',\n default=\"scivis\",\n help=\"database user\")\n\n parser.add_argument(\n '--db-pass',\n required=True,\n help=\"database password\")\n\n parser.add_argument(\n '--batch',\n type=int,\n default=100,\n help=\"insert batch size\")\n\n return parser.parse_args()\n\n\ndef connectdb(args):\n \"\"\"\n Connect to the database with the given arguments and return\n the connection.\n \"\"\"\n\n return psycopg2.connect(\n host=args.db_host,\n port=args.db_port,\n database=args.db_name,\n user=args.db_user,\n password=args.db_pass)\n\n\ndef insert_snapshot(args, snapshot):\n \"\"\"\n Insert a snapshot into the database.\n \"\"\"\n\n # connect to the database\n with connectdb(args) as conn:\n with conn.cursor() as cursor:\n # prepare values\n values = [\n args.sid,\n snapshot.parameters['h_100'],\n snapshot.parameters['L0'],\n snapshot.parameters['a'],\n snapshot.parameters['length_unit'],\n snapshot.parameters['mass_unit'],\n snapshot.parameters['time_unit'],\n snapshot.parameters['velocity_unit']]\n\n # create the snapshot\n sql = cursor.mogrify(\"(%s, %s, %s, %s, %s, %s, %s, %s)\", values)\n cursor.execute(\"INSERT INTO snapshots VALUES {0};\".format(sql))\n conn.commit()\n\n\ndef chunks(l, n):\n \"\"\"\n Yield successive n-sized chunks from l.\n \"\"\"\n\n for i in xrange(0, len(l), n):\n yield l[i:i+n]\n\n\ndef insert_points(args, points):\n \"\"\"\n Insert points into the database.\n \"\"\"\n\n # connect to the database\n with connectdb(args) as conn:\n with conn.cursor() as cursor:\n # iterate through chunks of points\n for chunk in chunks(points, args.batch):\n # determine query parameters\n params = [\n \"%s\",\n \"%s\",\n \"ST_GeometryFromText('POINT(%s %s %s)')\",\n \"ST_GeometryFromText('POINT(%s %s %s)')\",\n \"ST_GeometryFromText('POINT(%s %s %s)')\",\n \"%s\"]\n\n # prepare query values\n values = map(\n lambda p: [p[0], args.sid] + map(float, p[1:]),\n chunk)\n\n # create the points\n sql = ','.join(\n cursor.mogrify(\"({0})\".format(\",\".join(params)), values)\n for values in values)\n\n cursor.execute(\"INSERT INTO points VALUES {0};\".format(sql))\n conn.commit()\n\n\nif __name__ == \"__main__\":\n # parse arugments\n args = parse_args()\n\n # load the snapshot\n snapshot = load_sdf(args.filename)\n\n # insert the snapshot\n insert_snapshot(args, snapshot)\n\n # insert the points\n points = zip(*[\n snapshot[e] for e in\n ['ident', 'x', 'y', 'z', 'vx', 'vy', 'vz', 'ax', 'ay', 'az', 'phi']])\n\n insert_points(args, points)\n","sub_path":"scivis2015/postgis/load-snapshot.py","file_name":"load-snapshot.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575174144","text":"#!/usr/bin/python3\n# python 3.6\n\nimport random\nimport time\nimport statistics\n\nfrom paho.mqtt import client as mqtt_client\n\n# def average var\nLoopCounter = -1\nNumbers = [[],[],[],[]]\n\n# MQTT settings\nbroker = '127.0.0.1'\nport = 1883\n#topic = \"python/mqtt\"\ntopic = \"domoticz/in\"\n# generate client ID with pub prefix randomly\nclient_id = f'python-mqtt-{random.randint(0, 1000)}'\nusername = 'your-username'\npassword = 'your-password'\n\n# Sensors settings\n# temp min and max value incase of a sensor error\nmin = 10.0\nmax = 60.0\n# list of sensors to process, loopcounter gives position value\n# old id_list = ['28-0315902e73ff', '28-0315a87126ff', '28-0315a88e3bff']\nid_list = ['28-0317303a5bff', '28-0315a87126ff', '28-0315a88e3bff', '28-0517608afdff']\nid_name = ['Temp Aquarium : ', 'Temp Aquarium koeler warm : ', 'Temp1 : ', 'Temp-licht : ']\nidx_list = ['333', '332', '331', '7391']\n\ndef average(x, i):\n global LoopCounter\n # max memory positions\n max = 4\n # number of digits to return\n digits = 2\n if LoopCounter >= 0 and LoopCounter < max-1:\n LoopCounter +=1\n elif LoopCounter >= max-1 or LoopCounter == -1:\n LoopCounter = 0\n #fill list\n if len(Numbers[i]) < max:\n Numbers[i].insert(LoopCounter, x)\n elif len(Numbers[i]) == max:\n Numbers[i][LoopCounter] = x\n result = round(statistics.mean(Numbers[i]), digits)\n #print(i)\n #print(Numbers[i])\n #print(result)\n return result\n\ndef connect_mqtt():\n def on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected to MQTT Broker!\")\n else:\n print(\"Failed to connect, return code %d\\n\", rc)\n\n client = mqtt_client.Client(client_id)\n client.username_pw_set(username, password)\n client.on_connect = on_connect\n client.connect(broker, port)\n return client\n\n\ndef gettemp(id):\n try:\n mytemp = ''\n filename = 'w1_slave'\n f = open('/sys/bus/w1/devices/' + id + '/' + filename, 'r')\n line = f.readline() # read 1st line\n crc = line.rsplit(' ',1)\n crc = crc[1].replace('\\n', '')\n if crc=='YES':\n line = f.readline() # read 2nd line\n mytemp = line.rsplit('t=',1)\n else:\n mytemp = 99999\n f.close()\n\n return int(mytemp[1])\n\n except:\n return 99999\n\n\ndef publish(client):\n while True:\n loopcounter = 0\n for id in id_list:\n #print id_name[loopcounter] + '{:.3f}'.format(gettemp(id)/float(1000))\n temp = gettemp(id)/float(1000)\n print('temp : ', temp)\n # filter temp min and max value incase of a sensor error\n if temp >= min and temp <= max:\n #msg = f\"messages: {msg_count}\"\n begin_sl_char = \"{\"\n end_sl_char = \"}\"\n msg = f\"{begin_sl_char}\\\"idx\\\" : {idx_list[loopcounter]}, \\\"nvalue\\\" : 0, \\\"svalue\\\" : \\\"{str(average(temp, loopcounter))}\\\"{end_sl_char}\"\n result = client.publish(topic, msg)\n # result: [0, 1]\n status = result[0]\n if status == 0:\n print(f\"Send `{msg}` to topic `{topic}`\")\n else:\n print(f\"Failed to send message to topic {topic}\")\n loopcounter += 1\n time.sleep(1)\n\n\ndef run():\n client = connect_mqtt()\n client.loop_start()\n publish(client)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"ds18b20-mqtt-post.py","file_name":"ds18b20-mqtt-post.py","file_ext":"py","file_size_in_byte":3384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"561429841","text":"# Copyright 2014 Diamond Light Source Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\n.. module:: display_formatter\n :platform: Unix\n :synopsis: Classes for formatting plugin list output in the configurator.\n.. moduleauthor:: Nicola Wadeson \n\"\"\"\n\nimport textwrap\n\nfrom colorama import Fore, Back, Style\n\nWIDTH = 85\n\n\nclass DisplayFormatter(object):\n\n def __init__(self, plugin_list):\n self.plugin_list_inst = plugin_list\n self.plugin_list = plugin_list.plugin_list\n\n def _get_string(self, **kwargs):\n out_string = []\n verbosity = kwargs.get('verbose', False)\n level = kwargs.get('level', 'user')\n\n start = kwargs.get('start', 0)\n stop = kwargs.get('stop', len(self.plugin_list))\n if stop == -1:\n stop = len(self.plugin_list)\n\n count = start\n plugin_list = self.plugin_list[start:stop]\n\n line_break = ('%s' % ('-'*WIDTH))\n out_string.append(line_break)\n for p_dict in plugin_list:\n count += 1\n description = \\\n self._get_description(WIDTH, level, p_dict, count, verbosity)\n out_string.append(description)\n out_string.append(line_break)\n return '\\n'.join(out_string)\n\n def _get_description(self, width, level, p_dict, count, verbose):\n if verbose == '-q':\n return self._get_quiet(p_dict, count, width)\n if not verbose:\n return self._get_default(level, p_dict, count, width)\n if verbose == '-v':\n return self._get_verbose(level, p_dict, count, width)\n if verbose == '-vv':\n return self._get_verbose_verbose(level, p_dict, count, width)\n\n def _get_plugin_title(self, p_dict, width, fore_colour, back_colour,\n active=\"\", quiet=False, pos=None):\n pos = \"%2s\" % (str(pos) + \")\") if pos else \"\"\n title = \"%s %s %s\" % (active, pos, p_dict['name'])\n title = title if quiet else title+\"(%s)\" % p_dict['id']\n width -= len(title)\n return back_colour + fore_colour + title + \" \"*width + Style.RESET_ALL\n\n def _get_synopsis(self, plugin_name, width, colour_on, colour_off):\n doc_str = \\\n self.plugin_list_inst._get_docstring_info(plugin_name)['synopsis']\n synopsis = \\\n self._get_equal_lines(doc_str, width, colour_on, colour_off, \" \"*2)\n if not synopsis:\n return ''\n return \"\\n\" + colour_on + synopsis + colour_off\n\n def _get_param_details(self, level, p_dict, width, desc=False):\n margin = 4\n keycount = 0\n joiner = \"\\n\" + \" \"*margin\n params = ''\n\n dev_keys = [k for k in p_dict['data'].keys() if k not in\n p_dict['user'] + p_dict['hide']]\n keys = p_dict['user'] if level == 'user' else p_dict['user'] + dev_keys\n\n for key in keys:\n keycount += 1\n temp = \"\\n %2i) %20s : %s\"\n # keycount = all_keys.index(key)+1\n params += temp % (keycount, key, p_dict['data'][key])\n if desc:\n pdesc = \" \".join(desc[key].split())\n pdesc = joiner.join(textwrap.wrap(pdesc, width=width-margin))\n temp = '\\n' + Fore.CYAN + ' '*margin + \"%s\" + Fore.RESET\n params += temp % pdesc\n return params\n\n def _get_extra_info(self, p_dict, width, colour_off, info_colour,\n warn_colour):\n extra_info = self.plugin_list_inst._get_docstring_info(p_dict['name'])\n info = self._get_equal_lines(extra_info['info'], width, info_colour,\n colour_off, \" \"*2)\n warn = self._get_equal_lines(extra_info['warn'], width, warn_colour,\n colour_off, \" \"*2)\n info = \"\\n\"+info if info else ''\n warn = \"\\n\"+warn if warn else ''\n return info, warn\n\n def _get_equal_lines(self, string, width, colour_on, colour_off, offset):\n if not string or not colour_on:\n return ''\n str_list = textwrap.wrap(string, width=width-len(offset))\n new_str_list = []\n for line in str_list:\n lwidth = width - len(line) - len(offset)\n new_str_list.append(\n colour_on + offset + line + \" \"*lwidth + colour_off)\n return \"\\n\".join(new_str_list)\n\n\nclass DispDisplay(DisplayFormatter):\n\n def __init__(self, plugin_list):\n super(DispDisplay, self).__init__(plugin_list)\n\n def _get_quiet(self, p_dict, count, width, quiet=True):\n active = \\\n '***OFF***' if 'active' in p_dict and not p_dict['active'] else ''\n pos = p_dict['pos'].strip() if 'pos' in p_dict.keys() else count\n fore = Fore.RED + Style.DIM if active else Fore.LIGHTWHITE_EX\n back = Back.LIGHTBLACK_EX\n return self._get_plugin_title(p_dict, width, fore, back,\n active=active, quiet=quiet, pos=pos)\n\n def _get_default(self, level, p_dict, count, width):\n title = self._get_quiet(p_dict, count, width)\n params = self._get_param_details(level, p_dict, width)\n return title + params\n\n def _get_verbose(self, level, p_dict, count, width, breakdown=False):\n title = self._get_quiet(p_dict, count, width, quiet=False)\n colour_on = Back.LIGHTBLACK_EX + Fore.LIGHTWHITE_EX\n colour_off = Back.RESET + Fore.RESET\n synopsis = \\\n self._get_synopsis(p_dict['name'], width, colour_on, colour_off)\n params = \\\n self._get_param_details(level, p_dict, width, desc=p_dict['desc'])\n if breakdown:\n return title, synopsis, params\n return title + synopsis + params\n\n def _get_verbose_verbose(self, level, p_dict, count, width):\n title, synopsis, param_details = \\\n self._get_verbose(level, p_dict, count, width, breakdown=True)\n info_c = Back.CYAN + Fore.LIGHTWHITE_EX\n warn_c = Back.WHITE + Fore.RED\n c_off = Back.RESET + Fore.RESET\n info, warn = self._get_extra_info(p_dict, width, c_off, info_c, warn_c)\n return title + synopsis + info + warn + param_details\n\n def _notices(self):\n width = 86\n warnings = self.get_warnings(width)\n if warnings:\n notice = Back.RED + Fore.WHITE + \"IMPORTANT PLUGIN NOTICES\" +\\\n Back.RESET + Fore.RESET + \"\\n\"\n border = \"*\"*width + '\\n'\n print (border + notice + warnings + '\\n'+border)\n\n def get_warnings(self, width):\n # remove display styling outside of this class\n colour = Back.RESET + Fore.RESET\n warnings = []\n names = []\n for plugin in self.plugin_list:\n if plugin['name'] not in names:\n names.append(plugin['name'])\n warn = self.plugin_list_inst._get_docstring_info(\n plugin['name'])['warn']\n if warn:\n for w in warn.split('\\n'):\n string = plugin['name'] + \": \" + w + '.'\n warnings.append(self._get_equal_lines(\n string, width-1, colour, colour, \" \"*2))\n return \"\\n\".join(\n [\"*\" + \"\\n \".join(w.split('\\n')) for w in warnings if w])\n\n\nclass ListDisplay(DisplayFormatter):\n\n def __init__(self, plugin_list):\n super(ListDisplay, self).__init__(plugin_list)\n\n def _get_quiet(self, p_dict, count, width):\n return self._get_plugin_title(p_dict, width, Fore.RESET, Back.RESET,\n quiet=True)\n\n def _get_default(self, level, p_dict, count, width):\n title = self._get_quiet(p_dict, count, width)\n synopsis = \\\n self._get_synopsis(p_dict['name'], width, Fore.CYAN, Fore.RESET)\n return title + synopsis\n\n def _get_verbose(self, level, p_dict, count, width, breakdown=False):\n default_str = self._get_default(level, p_dict, count, width)\n info_c = Fore.CYAN\n c_off = Back.RESET + Fore.RESET\n info, warn = self._get_extra_info(p_dict, width, c_off, info_c, info_c)\n return default_str + info\n\n def _get_verbose_verbose(self, level, p_dict, count, width):\n all_params = self._get_param_details('all', p_dict, 100)\n default_str = self._get_default(level, p_dict, count, width)\n info_c = Fore.CYAN\n warn_c = Fore.RED\n c_off = Back.RESET + Fore.RESET\n info, warn = self._get_extra_info(p_dict, width, c_off, info_c, warn_c)\n return default_str + info + warn + all_params","sub_path":"qml/i14testgui/display_formatter.py","file_name":"display_formatter.py","file_ext":"py","file_size_in_byte":9125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"435079014","text":"#!/usr/bin/env python\n# pylint: disable=too-many-locals,compare-to-zero\nfrom datetime import datetime\nfrom itertools import chain\nfrom os import chmod, environ, listdir, makedirs\nfrom os.path import expanduser, realpath\nfrom pathlib import Path\nfrom shlex import quote\nfrom typing import AsyncIterator, List, Tuple, cast\nimport argparse\nimport asyncio\nimport asyncio.subprocess as sp\nimport plistlib\nimport sys\n\nfrom .constants import GLOBAL_DOMAIN_ARG, MAX_CONCURRENT_EXPORT_TASKS\nfrom .filters import BAD_DOMAINS\nfrom .mp_typing import PlistRoot\nfrom .plist2defaults import plist_to_defaults_commands\nfrom .processing import remove_data_fields\nfrom .shell import git\nfrom .utils import setup_logging_stderr\n\n__all__ = ('main', )\n\n\nasync def _has_git() -> bool:\n p = await sp.create_subprocess_shell('bash -c \"command -v git\"',\n stdout=sp.PIPE)\n await p.wait()\n return p.returncode == 0\n\n\nasync def _generate_domains() -> AsyncIterator[str]:\n for plist in (x for x in (\n Path.home().joinpath('Library/Preferences').glob('*.plist'))\n if x.stem and x.stem != '$(PRODUCT_BUNDLE_IDENTIFIER)'\n and not x.name.startswith('.')):\n yield plist.stem\n yield GLOBAL_DOMAIN_ARG\n\n\nasync def _defaults_export(domain: str,\n repo_prefs_dir: Path,\n debug: bool = False) -> Tuple[str, PlistRoot]:\n command = f'defaults export {quote(domain)}'\n out_domain = 'globalDomain' if domain == GLOBAL_DOMAIN_ARG else domain\n plist_out = repo_prefs_dir.joinpath(f'{out_domain}.plist')\n path_quoted = quote(str(plist_out))\n command += f' {path_quoted}'\n log = setup_logging_stderr(verbose=debug)\n log.debug('Running: %s', command)\n p = await asyncio.create_subprocess_shell(command,\n stdout=sp.PIPE,\n stderr=sp.PIPE)\n await p.wait()\n if p.returncode != 0:\n assert p.stderr is not None\n err = (await p.stderr.read()).decode('utf-8')\n raise RuntimeError(\n f'Non-zero exit status from defaults. STDERR: {err}')\n with plist_out.open('rb') as f:\n return domain, await remove_data_fields(plistlib.load(f))\n\n\nasync def _setup_out_dir(out_dir: str) -> Tuple[str, Path]:\n out_dir = realpath(out_dir)\n repo_prefs_dir = Path(out_dir).joinpath('Preferences')\n # pylint: disable=too-many-try-statements\n try:\n makedirs(out_dir)\n makedirs(str(repo_prefs_dir))\n except FileExistsError:\n pass\n # pylint: enable=too-many-try-statements\n return out_dir, repo_prefs_dir\n\n\nasync def _main(out_dir: str,\n debug: bool = False,\n commit: bool = False) -> int:\n log = setup_logging_stderr(verbose=debug)\n has_git = await _has_git()\n\n out_dir, repo_prefs_dir = await _setup_out_dir(out_dir)\n export_tasks = []\n all_data: List[Tuple[str, PlistRoot]] = []\n async for domain in _generate_domains():\n # spell-checker: disable\n if domain in ('com.apple.Music', 'com.apple.TV',\n 'com.apple.identityservices.idstatuscache',\n 'com.apple.security.KCN'):\n # spell-checker: enable\n continue\n export_tasks.append(_defaults_export(domain, repo_prefs_dir, debug))\n if len(export_tasks) == MAX_CONCURRENT_EXPORT_TASKS:\n all_data.extend(await asyncio.gather(*export_tasks))\n export_tasks = []\n all_data.extend(await asyncio.gather(*export_tasks))\n\n exec_defaults = Path(out_dir).joinpath('exec-defaults.sh')\n tasks = []\n known_domains = []\n with exec_defaults.open('w+') as f:\n f.write('#!/usr/bin/env bash\\n')\n f.write('# shellcheck disable=SC1112,SC2088,SC1010,SC2016,SC1003\\n')\n f.write('# This file is generated, but is versioned.\\n\\n')\n for domain, root in sorted(all_data, key=lambda x: x[0]):\n if not root:\n continue\n async for line in plist_to_defaults_commands(domain, root, debug):\n f.write(line + '\\n')\n out_domain = ('globalDomain'\n if domain == GLOBAL_DOMAIN_ARG else domain)\n known_domains.append(out_domain)\n plist_path = repo_prefs_dir.joinpath(f'{out_domain}.plist')\n cmd = f'plutil -convert xml1 {quote(str(plist_path))}'\n log.debug('Executing: %s', cmd)\n p = await asyncio.create_subprocess_shell(cmd)\n tasks.append(p.wait())\n chmod(str(exec_defaults), 0o755)\n await asyncio.wait(tasks)\n\n if has_git:\n # Clean up very old plists\n delete_with_git = [\n str(j[1])\n for j in ((file, file_)\n for file, file_ in ((x, repo_prefs_dir.joinpath(x))\n for x in listdir(str(repo_prefs_dir))\n if x != '.gitignore')\n if file[:-6] not in known_domains and file_.exists()\n if not file_.is_dir())\n ]\n await git(chain(('rm', '-f', '--ignore-unmatch', '--'),\n delete_with_git),\n check=True,\n work_tree=out_dir,\n debug=debug)\n all_files = ' '.join(map(quote, delete_with_git))\n cmd = f'rm -f -- {all_files}'\n log.debug('Executing: %s', cmd)\n p = await asyncio.create_subprocess_shell(f'rm -f -- {all_files}')\n await p.wait()\n\n delete_with_git_ = []\n delete_with_rm = []\n for x in BAD_DOMAINS:\n if x == 'MobileMeAccounts':\n continue\n plist = repo_prefs_dir.joinpath(f'{x}.plist')\n delete_with_git_.append(str(plist))\n delete_with_rm.append(quote(str(plist)))\n if has_git:\n await git(chain(('rm', '-f', '--ignore-unmatch', '--'),\n delete_with_git_),\n check=True,\n debug=debug,\n work_tree=out_dir)\n deletions = ' '.join(delete_with_rm)\n cmd = f'rm -f -- {deletions}'\n log.debug('Executing: %s', cmd)\n p = await asyncio.create_subprocess_shell(cmd)\n await p.wait()\n\n if has_git and commit:\n log.debug('Commiting changes')\n await git(('add', '.'), work_tree=out_dir, check=True)\n await git(('commit', '--no-gpg-sign', '--quiet', '--no-verify',\n '--author=macprefs ', '-m',\n f'Automatic commit @ {datetime.now().strftime(\"%c\")}'),\n work_tree=out_dir,\n check=True)\n\n return 0\n\n\nclass Namespace(argparse.Namespace): # pylint: disable=too-few-public-methods\n \"\"\"Arguments to main().\"\"\"\n output_directory: str\n debug: bool\n commit: bool\n\n\ndef main() -> int:\n \"\"\"Entry point.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-o',\n '--output-directory',\n default='.',\n help='Where to store the exported data')\n parser.add_argument('-d', '--debug', action='store_true')\n parser.add_argument('-c',\n '--commit',\n action='store_true',\n help='Commit the changes with Git')\n args = cast(Namespace, parser.parse_args())\n loop = asyncio.get_event_loop()\n ret = loop.run_until_complete(\n _main(args.output_directory, debug=args.debug, commit=args.commit))\n loop.close()\n return ret\n\n\nasync def _install_job(output_dir: str) -> int:\n p = await sp.create_subprocess_shell('bash -c \"command -v prefs-export\"',\n stdout=sp.PIPE)\n assert p.stdout is not None\n prefs_export_path = (await p.stdout.read()).decode().strip()\n home = environ['HOME']\n plist_path = expanduser('~/Library/LaunchAgents/sh.tat.macprefs.plist')\n with open(plist_path, 'w+') as f:\n f.write(f'''\n\n\n \n Label\n sh.tat.macprefs\n ProgramArguments\n \n {prefs_export_path}\n --output-directory\n {realpath(output_dir)}\n --commit\n \n StartCalendarInterval\n \n Hour\n 0\n Minute\n 0\n \n StandardOutPath\n {home}/Library/Logs/macprefs.log\n StandardErrorPath\n {home}/Library/Logs/macprefs.log\n RunAtLoad\n \n \n\n''')\n await (await sp.create_subprocess_exec('launchctl',\n 'stop',\n plist_path,\n stderr=sp.PIPE,\n stdout=sp.PIPE)).wait()\n await (await sp.create_subprocess_exec('launchctl',\n 'unload',\n '-w',\n plist_path,\n stderr=sp.PIPE,\n stdout=sp.PIPE)).wait()\n proc1 = await sp.create_subprocess_exec('launchctl', 'load', '-w',\n plist_path)\n await proc1.wait()\n proc2 = await sp.create_subprocess_exec('launchctl', 'start', plist_path)\n await proc2.wait()\n return 0 if proc1.returncode == 0 and proc2.returncode == 0 else 1\n\n\ndef install_job() -> int:\n \"\"\"Job installer command entry point.\"\"\"\n parser = argparse.ArgumentParser()\n parser.add_argument('-o',\n '--output-directory',\n default=expanduser('~/.config/defaults'),\n help='Where to store the exported data')\n args = cast(Namespace, parser.parse_args())\n loop = asyncio.get_event_loop()\n ret = loop.run_until_complete(_install_job(args.output_directory))\n loop.close()\n return ret\n\n\nif __name__ == \"__main__\":\n sys.exit(main())\n","sub_path":"macprefs/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":10377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"62673217","text":"import requests\nimport socket\nimport psutil\nimport struct\nfrom sys import platform\nfrom time import sleep\nfrom requests.models import PreparedRequest\nfrom mycloud.logger import log, add_request_count, save_files\nfrom mycloud.mycloudapi.auth import MyCloudAuthenticator, AuthMode\nfrom mycloud.mycloudapi import MyCloudRequest\nfrom mycloud.mycloudapi.request import ContentType\nfrom mycloud.mycloudapi.request import Method\nfrom mycloud.constants import WAIT_TIME_MULTIPLIER, RESET_SESSION_EVERY\n\n\nclass MyCloudRequestExecutor:\n def __init__(self, authenticator: MyCloudAuthenticator):\n self._request_count_for_current_session = 0\n self.authenticator = authenticator\n self.session = requests.Session()\n self._reset_wait_time()\n\n def execute_request(self, request: MyCloudRequest):\n # TODO: also use aiohttp instead of requests\n content_type = request.get_content_type()\n token = self.authenticator.get_token()\n headers = self._get_headers(content_type, token)\n request_url = request.get_request_url()\n request_method = request.get_method()\n data_generator = request.get_data_generator()\n if request.is_query_parameter_access_token():\n req = PreparedRequest()\n req.prepare_url(request_url, {'access_token': token})\n request_url = req.url\n\n if request_method == Method.GET:\n if data_generator:\n raise ValueError('Cannot have a data generator for HTTP GET')\n response = self.session.get(request_url, headers=headers)\n elif request_method == Method.PUT:\n response = self.session.put(request_url, headers=headers) if not data_generator else requests.put(\n request_url, headers=headers, data=data_generator)\n else:\n raise ValueError('Invalid request method')\n if self._request_count_for_current_session % RESET_SESSION_EVERY == 0:\n self.reset_session()\n save_files()\n self._request_count_for_current_session += 1\n add_request_count(type(request).__name__)\n ignored = request.ignored_error_status_codes()\n retry = self._check_validity(response, ignored, request_url)\n if retry:\n return self.execute_request(request)\n return response\n\n def reset_session(self):\n self.session.close()\n del self.session\n self.session = requests.Session()\n\n @staticmethod\n def _get_ip_address(ifname):\n network_ifs = psutil.net_if_addrs()\n if ifname not in network_ifs:\n raise ValueError('Could not find network if {}'.format(ifname))\n\n selected = network_ifs[ifname]\n addr = list(filter(lambda x: x.family ==\n socket.AddressFamily.AF_INET, selected))[0]\n return addr.address\n\n def _reset_wait_time(self):\n self.wait_time = 10\n\n def _get_headers(self, content_type: ContentType, bearer_token: str):\n headers = requests.utils.default_headers()\n headers['Content-Type'] = content_type\n headers['Authorization'] = 'Bearer ' + bearer_token\n headers['User-Agent'] = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/56.0.2924.87 Safari/537.36'\n return headers\n\n def _check_validity(self, response, ignored, request_url: str):\n separately_handled = [401, 500, 404, 400, 409, 502]\n\n retry = False\n if response.status_code == 401:\n if self.authenticator.auth_mode == AuthMode.Token:\n raise ValueError('Bearer token is invalid')\n else:\n self.authenticator.invalidate_token()\n retry = True\n\n if (response.status_code == 500 and 500 not in ignored) or (response.status_code == 502 and 502 not in ignored):\n log(f'HTTP {response.status_code} returned from server', error=True)\n log('ERR: {}'.format(str(response.content)), error=True)\n log('Waiting {} seconds until retry...'.format(self.wait_time))\n sleep(self.wait_time)\n retry = True\n # TODO: make logarithmic instead of exponential?\n self.wait_time = int(self.wait_time * WAIT_TIME_MULTIPLIER)\n else:\n self._reset_wait_time()\n\n log('Checking status code {} (Status {})...'.format(\n request_url, str(response.status_code)))\n if response.status_code == 404 and 404 not in ignored:\n raise ValueError('File not found in myCloud')\n\n if response.status_code == 400 and 400 not in ignored:\n raise ValueError('Bad Request: {}'.format(response.text))\n\n if response.status_code == 409 and 409 not in ignored:\n raise ValueError('Conflict: {}'.format(response.text))\n\n if not str(response.status_code).startswith('2') and response.status_code not in separately_handled:\n log('ERR: Status code {}!'.format(str(response.status_code)))\n log('ERR: {}'.format(str(response.content)))\n raise ValueError('Error while performing myCloud request')\n return retry\n","sub_path":"mycloud/mycloudapi/request_executor.py","file_name":"request_executor.py","file_ext":"py","file_size_in_byte":5144,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"545927742","text":"# -*- coding:utf-8 -*-\n\nimport io\nimport time\nimport copy\nimport random\n\nfrom .utils import *\nfrom corgi.singleton import Singleton\n\n\nclass FileBuffer(object):\n def __init__(self, path, ttl):\n self.path = path\n self.ttl = ttl\n self.hits = 0\n self._buffer = None\n self.last_refresh_time = None\n\n @property\n def buffer(self):\n buffer = self._buffer\n if self.last_refresh_time is None or self.is_expired:\n fd = open(self.path)\n buffer = io.StringIO(fd.read())\n fd.close()\n self._buffer = buffer\n self.hits += 1\n return copy.deepcopy(buffer)\n\n @property\n def is_expired(self):\n assert self.last_refresh_time is not None\n return (time.time() - self.last_refresh_time) > self.ttl\n\n\nclass FileBufferManager(object):\n __meta_class__ = Singleton\n\n def __init__(self):\n self.ttl = 5\n self.max_items = 100\n self.file_buffers = {}\n\n def open(self, path):\n # buffer 数量超过限制之后,随机移除一个 item\n if len(self.file_buffers) > self.max_items:\n self.file_buffers.pop(random.choice(list(self.file_buffers.keys())))\n assert_path_exists(path)\n assert_file_type(path, FSType.FILE)\n fb = self.file_buffers.get(path, FileBuffer(path, self.ttl))\n if path not in self.file_buffers:\n self.file_buffers[path] = fb\n return fb.buffer\n\n\n","sub_path":"fs/file_buffer.py","file_name":"file_buffer.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"489356857","text":"import os\nimport numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nimport matplotlib\nimport loompy\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\n\nMAX_COMPONENTS = 100\n\n\nloom_file = snakemake.input['loom']\npca_out = snakemake.output['latent']\n\nout_dir = os.path.dirname(pca_out)\n\n\nwith loompy.connect(loom_file, 'r') as ds:\n expression = ds.layers['scaled'][:]\n ens_ids = ds.ra['EnsID'][:]\n barcodes = ds.ca['Barcode'][:]\n\nexpression = pd.DataFrame(expression, index=ens_ids, columns=barcodes)\nexpression = np.log2(expression + 1)\n\n# Gene filtering\ngenes = snakemake.input['genes']\ngenes = pd.read_table(genes, header=None).iloc[:, 0].tolist()\nexpression = expression.loc[genes]\n\n\nmodel = PCA(n_components=MAX_COMPONENTS, random_state=0)\nmodel.fit(expression.values.T)\n\ngene_components = model.components_.T\ngene_components = pd.DataFrame(gene_components, index=expression.index)\n\n# Now, look at null data\nnull = expression.values.copy()\nfor i in range(null.shape[0]):\n np.random.shuffle(null[i, :])\n\nnull_model = PCA(n_components=MAX_COMPONENTS, random_state=0)\nnull_model.fit(null.T)\n\n\n# First value less than = num PCs\n# E.g., if index 21 is first value less than null, then 0-20\n# are greater than null, and num_pcs = 21\n# num_pcs = np.nonzero(\n# (model.explained_variance_ < null_model.explained_variance_)\n# )[0][0]\n\ntry:\n if snakemake.params['doJackstraw']:\n # First value less than = num PCs\n # E.g., if index 21 is first value less than null, then 0-20\n # are greater than null, and num_pcs = 21\n num_pcs = np.nonzero(\n (model.explained_variance_ < null_model.explained_variance_)\n )[0][0]\n else:\n num_pcs = 20\nexcept AttributeError:\n num_pcs = 20\n\n# Diagnostic plot\nxmin = 0\nxmax = min(num_pcs*2, 200)\nymin = 0\nymax = max(model.explained_variance_.max(),\n null_model.explained_variance_.max())*1.2\n\nplt.figure()\nplt.plot(np.arange(len(model.explained_variance_))+1,\n model.explained_variance_, 'o-', label='Expression')\nplt.plot(np.arange(len(null_model.explained_variance_))+1,\n null_model.explained_variance_, 'o-', label='Null')\n\nplt.xlim(xmin, xmax)\nplt.ylim(ymin, ymax)\n\nplt.vlines(num_pcs, ymin, ymax, linestyle='dashed', color='red')\nplt.legend(loc='best')\nplt.title('Significant PCs: {}'.format(num_pcs))\nplt.savefig(os.path.join(out_dir, 'PermutationPA.png'))\n\npcs = model.fit_transform(expression.values.T)\npcs = pcs[:, 0:num_pcs]\n\npcs = pd.DataFrame(pcs, index=expression.columns)\n\npcs.to_csv(pca_out, sep='\\t')\n\ntry:\n components_file = snakemake.output['components']\n gene_components.to_csv(components_file, sep=\"\\t\")\nexcept AttributeError:\n pass\n","sub_path":"pipelineScripts/pca/pca.py","file_name":"pca.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"287271918","text":"#!/usr/bin/python3\n\nimport serial\nimport subprocess\nimport socket\nimport os\nimport shutil\nimport time\nfrom time import gmtime, strftime\nfrom PIL import Image, ImageDraw, ImageFont, ExifTags\nimport glob\nimport re\nfrom collections import deque\nimport syslog\nfrom datetime import datetime\n\nfrom picamera import PiCamera\nfrom time import sleep\nfrom fractions import Fraction\n\n# shutter speed (exposure time) in microseconds\nraspistill_ss = 6000000 \n\ntag_raspistill_ss = str(round(raspistill_ss/1000000, 2))\n\n#How many images do want?\n#number_of_images = 3\n\n# Sensitivity (ISO)\nISO = 800\n\n#Dynamic Range Compression (DRC) options :off,low,med,high\ndrc = 'off'\n\n#White Balance: off,auto,sun,cloud,shade,tungsten,fluorescent,incandescent,flash,horizon\nawb = 'off'\n\n# Mannually set white balance gains\nwhite_balance_gains = '1.03125, 1.8086'\n\n#Brightness\nbr = 50\n\n\n#interval between images in milliseconds\nraspistill_tl = 0\n\n\n\n# total time of the run in milliseconds (controls how many photos you take)\n# Exposure time converted to milliseconds + interval between images X number of images + an extra interval\n#raspistill_t = (((raspistill_ss/1000) + raspistill_tl) * (number_of_images +1)) \n#raspistill_t = (((raspistill_ss/1000) + raspistill_tl) * (number_of_images)) \nraspistill_t = 65000\n\nrun_start_time = time.time()\nprint (\"run_start_time = \" + str(run_start_time))\n\n\nlogfile = open(\"/home/pi/Tools/Camera/gonet.log\",\"a+\")\nlogfile.write(\"run_start_time = \" + str(run_start_time) + \"\\n\")\nnow = datetime.now()\nlogfile.write(\"run_start_time = \" + now.strftime(\"%m/%d/%Y, %H:%M:%S\") + \"\\n\")\n\nprint (\"run_start_time = \" + now.strftime(\"%m/%d/%Y, %H:%M:%S\"))\n\nscratch_dir = \"/home/pi/Tools/Camera/scratch/\"\nif not os.path.exists(scratch_dir):\n os.makedirs(scratch_dir)\n\nimage_dir = \"/home/pi/images/\"\nif not os.path.exists(image_dir):\n os.makedirs(image_dir)\n\nthumbs_dir = \"/home/pi/_sfpg_data/thumb/\"\nif not os.path.exists(thumbs_dir):\n os.makedirs(thumbs_dir)\n\n# remove any zero length file from scratch dir\n\nfor filename in os.listdir(scratch_dir):\n if os.path.getsize(scratch_dir + filename) == 0:\n print(\"Deleting zero length file \" + scratch_dir + filename)\n logfile.write((\"Deleting zero length file \" + scratch_dir + filename) + \"\\n\")\n os.remove(scratch_dir + filename)\n\n\n# move any reaming jpg files to image dir\n\nfor filename in os.listdir(scratch_dir):\n if filename.endswith(\".jpg\"):\n print(\"Moving \" + scratch_dir + filename + \" to \" + image_dir + filename)\n logfile.write((\"Moving \" + scratch_dir + filename + \" to \" + image_dir + filename) + \"\\n\")\n os.rename(scratch_dir + filename, image_dir + filename)\n\n# Here date and time is captured from GPS $GPRMC if available.\n# The time service from the network is shutdown, and system time is set from GPS value.\n# Then the network time service is restarted in case the device is being tested indoors\n# and network is accessible and GPS is not. \n#\n# This is in case the program is executed without access to netork.\n# It is ran everytime the program is executed in case GPS is not valid when the GONet\n# device is first turned on. \n#\nport = \"/dev/serial0\"\nser = serial.Serial(port, baudrate = 9600, timeout = 0.5)\n\nprint (\"The sysdate at startup is: \")\nsubprocess.call('date')\n\n\ni = 0\nwhile i < 5:\n time.sleep(1.0)\n data = ser.read_until().decode('utf_8') \n sdata = data.split(\",\")\n if sdata[0] == '$GPRMC' and sdata[2] == 'V':\n print (sdata)\n elif sdata[0] == '$GPRMC' and sdata[2] != 'V':\n\n sdate = sdata[9]\n stime = sdata[1]\n \n if sdate[2:4] == '01':\n \tsmonth = \"JAN\"\n elif sdate[2:4] == '02':\n \tsmonth = \"FEB\"\n elif sdate[2:4] == '03':\n \tsmonth = \"MAR\"\n elif sdate[2:4] == '04':\n \tsmonth = \"APR\"\n elif sdate[2:4] == '05':\n \tsmonth = \"MAY\"\n elif sdate[2:4] == '06':\n \tsmonth = \"JUN\"\n elif sdate[2:4] == '07':\n \tsmonth = \"JUL\"\n elif sdate[2:4] == '08':\n \tsmonth = \"AUG\"\n elif sdate[2:4] == '09':\n \tsmonth = \"SEP\"\n elif sdate[2:4] == '10':\n \tsmonth = \"OCT\"\n elif sdate[2:4] == '11':\n \tsmonth = \"NOV\"\n elif sdate[2:4] == '12':\n \tsmonth = \"DEC\"\n \n print (sdate[0:2] + \" \" + smonth + \" \" +\"20\"+ sdate[4:7] + \" \" + stime[0:2] + \":\" + stime[2:4] + \":\" + stime[4:6])\n date_time = sdate[0:2] + \" \" + smonth + \" \" +\"20\"+ sdate[4:7] + \" \" + stime[0:2] + \":\" + stime[2:4] + \":\" + stime[4:6]\n \n print (\"Shutting down network time service\")\n command = ['sudo', 'systemctl', 'stop', 'systemd-timesyncd.service']\n subprocess.call(command)\n \n print(\"The sysdate before setting with GPS is:\")\n subprocess.call('date')\n\n # Uncomment below for testing while on network.\n # This \"holds\" the GPS time for 5 seconds. \n # If on a network the time will be set by the network, the GPS will set the time 5 seconds, but 5 seconds slow, then then when the network is\n # restarted, the time will be proplerly set. \n #time.sleep(5.0)\n\n print (\"Setting time via GPS.\")\n syslog.syslog(\"Setting time via GPS\") \n command = ['sudo', 'date', '-s', date_time]\n subprocess.call(command)\n \n print(\"The sysdate after setting with GPS is:\")\n subprocess.call('date')\n\n break\n\n i += 1\n\nprint (\"restarting netwotk time servivce\")\ncommand = ['sudo', 'systemctl', 'start', 'systemd-timesyncd.service']\nsubprocess.call(command)\nsubprocess.call('date')\n\n##### End of setting sysdate #####\n\nrun_start_time = time.time()\nprint (\"run_start_time = \" + str(run_start_time))\n\n\n#logfile = open(\"/home/pi/Tools/Camera/gonet.log\",\"a+\")\n#logfile.write(\"run_start_time = \" + str(run_start_time) + \"\\n\")\n#now = datetime.now()\n#logfile.write(\"run_start_time = \" + now.strftime(\"%m/%d/%Y, %H:%M:%S\") + \"\\n\")\n#print (\"run_start_time = \" + now.strftime(\"%m/%d/%Y, %H:%M:%S\"))\n\n#logfile.write(\"images = \" + str(number_of_images) + \" interval = \" + str(raspistill_tl) + \" shutter = \" + str(raspistill_ss) + \"\\n\")\nlogfile.write(\"interval = \" + str(raspistill_tl) + \" shutter = \" + str(raspistill_ss) + \"\\n\")\n\n\n\n###############################\n##### Start of functions #####\n###############################\n\ndef disk_stat(path):\n disk = os.statvfs(path)\n #percent = (disk.f_blocks - disk.f_bfree) * 100 / (disk.f_blocks -disk.f_bfree + disk.f_bavail) + 1\n percent = (disk.f_bavail * 100.0) / disk.f_blocks\n return percent\n\n##### end of parse gga #####\n\ndef parse_gga(sdata):\n lat = sdata[2]\n lat_dir = sdata[3]\n long = sdata[4]\n long_dir = sdata[5]\n alt = sdata[9]\n\n return lat + \" \" + lat_dir + \" \" + long + \" \" + long_dir + \" \" + alt + \" M\"\n\n##### end of parse gga #####\n\ndef convert_to_dd(x):\n \n x=float(x)\n degrees = int(x) // 100\n minutes = (x - 100*degrees) / 60\n dd = str(round((degrees + minutes),6))\n \n return dd\n\n##### end of convert_to_dd #####\n\n#def lat_long_decode(coord):\n# #Converts DDDMM.MMMMM > DD MM' SS.SSS\" \n# x = coord.split(\".\")\n# head = x[0]\n# tail = x[1]\n# deg = head[0:-2]\n# min = head[-2:]\n# #sec = str((float(coord[-6:]) * 60.0))\n# sec = str((float(\".\" + tail) * 60.0))\n#\n# #return deg + \" \" + min + \" \" + sec + \" \"\n# return deg + u\"\\u00b0 \" + min + \"\\' \" + sec + \"\\\"\"\n#\n###### end of lat_long_decode #####\n\n\n\ndef convert_raw_gps_fix_to_image_gps_fix(raw_gps_fix):\n #4203.4338X N 08748.7831X W 215.3 M\n print(\"here is the raw gps fix for finding da error\")\n print(raw_gps_fix)\n sraw_gps_fix = raw_gps_fix.split(\" \")\n #lat = lat_long_decode(sraw_gps_fix[0])\n lat = convert_to_dd(sraw_gps_fix[0])\n lat_dir = sraw_gps_fix[1]\n #long = lat_long_decode(sraw_gps_fix[2])\n long = convert_to_dd(sraw_gps_fix[2])\n long_dir = sraw_gps_fix[3]\n alt = sraw_gps_fix[4]\n\n return lat + \" \" + lat_dir + \" \" + long + \" \" + long_dir + \" \" + alt + \" M\"\n\n##### end of convert_raw_gps_fix_to_image_gps_fix #####\n\ndef convert_raw_gps_fix_to_exif_lat(raw_gps_fix):\n raw_lat = (raw_gps_fix.split(\" \"))[0]\n deg = raw_lat[0:2]\n min = raw_lat[2:4]\n sec = str(int(float(raw_lat[4:9]) * 60.0))\n #sec = str(float(raw_lat[5:9]) * 60.0 / 10000)\n return deg + \"/1,\" + min + \"/1,\" + sec + \"/1\"\n #return deg + \"/1,\" + min + \"/1,\" + sec + \"/1000\"\n\n##### end of convert_raw_gps_fix_to_exif_lat #####\n\ndef convert_raw_gps_fix_to_exif_lat_dir(raw_gps_fix):\n print(\"I'm in convert_raw_gps_fix_to_exif_lat_dir\")\n return (raw_gps_fix.split(\" \"))[1]\n\n##### end of convert_raw_gps_fix_to_exif_lat_dir #####\n\n\ndef convert_raw_gps_fix_to_exif_long(raw_gps_fix):\n raw_lat = (raw_gps_fix.split(\" \"))[2]\n deg = raw_lat[0:3]\n min = raw_lat[3:5]\n sec = str(int((float(raw_lat[5:10]) * 60.0)))\n return deg + \"/1,\" + min + \"/1,\" + sec + \"/1\"\n #return deg + \"/1,\" + min + \"/1,\" + sec + \"/1000\"\n\n##### end of convert_raw_gps_fix_to_exif_long #####\n\n\ndef convert_raw_gps_fix_to_exif_long_dir(raw_gps_fix):\n return (raw_gps_fix.split(\" \"))[3]\n\n##### end of convert_raw_gps_fix_to_exif_long_dir #####\n\n\n\ndef convert_raw_gps_fix_to_exif_alt(raw_gps_fix):\n return (raw_gps_fix.split(\" \"))[4]\n\n##### end of convert_raw_gps_fix_to_exif_alt #####\n\ndef nmea_cksum(data):\n \n # http://doschman.blogspot.com/2013/01/calculating-nmea-sentence-checksums.html\n # Thiss is a string, will need to convert it to hex for\n # proper comparsion below\n \n\n ck_sum = data[len(data) - 4:] \n\n # String slicing: Grabs all the characters\n # between '$' and '*' and nukes any lingering\n # newline or CRLF\n chksumdata = re.sub(\"(\\n|\\r\\n)\",\"\", data[data.find(\"$\")+1:data.find(\"*\")])\n \n # Initializing our first XOR value\n csum = 0\n \n # For each char in chksumdata, XOR against the previous\n # XOR'd char. The final XOR of the last char will be our\n # checksum to verify against the checksum we sliced off\n # the NMEA sentence\n \n for c in chksumdata:\n # XOR'ing value of csum against the next char in line\n # and storing the new XOR value in csum\n csum ^= ord(c)\n \n # Do we have a validated sentence?\n if hex(csum) == hex(int(ck_sum, 16)):\n return True\n \n return False\n\n##### end of convert_raw_gps_fix_to_exif_long #####\n\n#################################\n##### Start of main program #####\n#################################\n\nprint(\"free disk space = \" + str(round(disk_stat('/'),2)) + \"%\")\nif (disk_stat('/')) < 10:\n print(\"exitng due to full disk\")\n os.system(\"(rm -rf /home/pi/Tools/Status/*; touch /home/pi/Tools/Status/Disk_Full; crontab -r) &\")\n exit()\n\n\nstart_looking_for_GPS_time = time.time()\nsetup_time = str(start_looking_for_GPS_time - run_start_time)\n\nprint (\"setup_time = \" + setup_time)\nlogfile.write(\"setup_time = \" + setup_time + \"\\n\")\n\nos.system(\"(rm -rf /home/pi/Tools/Status/*; touch /home/pi/Tools/Status/Imaging...) &\")\n\nprint (\"Looking for GPS Data\")\nlogfile.write(\"Looking for GPS Data\" + \"\\n\")\n\n#while True:\ngps_time_out = 0\nwhile gps_time_out < 35:\n time.sleep(1.0)\n data = ser.readline().decode('utf_8') \n #data = ser.read_until()\n sdata = data.split(',')\n\n if sdata[0] == \"$GPGGA\" and sdata[6] in (\"1\",\"2\"):\n if nmea_cksum(data):\n\n print (\"GPS Checksum Passed\")\n logfile.write(\"GPS Checksum Passed\" + \"\\n\")\n\n for fl in glob.glob(\"/home/pi/Tools/Camera/GPS/GPGGA*\"):\n os.remove(fl)\n \n filename = \"/home/pi/Tools/Camera/GPS/\" + data[1:]\n mode = 0o644\n os.mknod(filename, mode)\n \n print (filename,)\n logfile.write(filename)\n \n raw_gps_fix = parse_gga(sdata)\n gps_flag = 2\n break \n else:\n print (\"GPS Checksum Failed\")\n # end of if sdata[0] == \"$GPGGA\" and sdata[6] in (\"1\",\"2\")\n\n elif sdata[0] == \"$GPGGA\" and sdata[6] == \"0\":\n if nmea_cksum(data):\n\n print (\"GPS Checksum Passed\")\n logfile.write(\"GPS Checksum Passed\\n\")\n\n for fl in glob.glob(\"/home/pi/Tools/Camera/GPS/GPGGA*\"):\n os.remove(fl)\n \n filename = \"/home/pi/Tools/Camera/GPS/\" + data[1:]\n mode = 0o644\n os.mknod(filename, mode)\n \n print (filename,)\n logfile.write(filename)\n \n print (gps_time_out)\n logfile.write(str(gps_time_out) + \"\\n\")\n \n if gps_time_out>= 25:\n # We are receiving data from the GPS, not can't get a fix.\n # Giving up, we must be in the basement.\n gps_flag = 1\n break\n else:\n print (\"GPS Checksum Failed\")\n logfile.write(\"GPS Checksum Failed\" + \"\\n\")\n \n gps_flag = 0\n gps_time_out += 1\n\n##### end of gps_time_out while #####\n\nser.close()\n\nprint (\"gps_flag = \" + str(gps_flag))\nlogfile.write(\"gps_flag = \" + str(gps_flag) + \"\\n\")\n\n##### done with gps #####\n\n\n\n##### manuipilate gps strings to make them useful #####\n\n\nstart_GPS_string_manipulation_time = time.time()\ngps_aquisition = str(start_GPS_string_manipulation_time - start_looking_for_GPS_time)\nprint (\"gps_aquisition = \" + gps_aquisition)\nlogfile.write(\"gps_aquisition = \" + gps_aquisition + \"\\n\")\n\nif gps_flag == 0:\n\n image_gps_fix = \"GPS Not Available\"\n\n print (\"GPS Not Responsive, proceeding to collect images.\")\n logfile.write(\"GPS Not Responsive, proceeding to collect images.\")\n\n exif_lat = '00/1,00/1,00.00/1'\n exif_lat_dir = 'X'\n exif_long = '000/1,00/1,00.0000/1'\n exif_long_dir = 'X' \n exif_alt = '0'\n\n for fl in glob.glob(\"/home/pi/Tools/Camera/GPS/GPGGA*\"):\n os.remove(fl)\n\n filename = \"/home/pi/Tools/Camera/GPS/GPGGA,ERROR\" \n mode = 0o644\n os.mknod(filename, mode)\n\nif gps_flag == 1:\n\n image_gps_fix = \"GPS Fix Not Available\"\n\n print (\"GPS Not Fix Available, proceeding to collect images.\")\n logfile.write(\"GPS Not Fix Available, proceeding to collect images.\")\n\n exif_lat = '00/1,00/1,00.00/1'\n exif_lat_dir = 'X'\n exif_long = '000/1,00/1,00.0000/1'\n exif_long_dir = 'X'\n exif_alt = '0'\n\n\n\nif gps_flag == 2:\n\n image_gps_fix = convert_raw_gps_fix_to_image_gps_fix(raw_gps_fix)\n print (\"Raw GPS Data = \" + raw_gps_fix)\n logfile.write(\"Raw GPS Data = \" + raw_gps_fix + \"\\n\")\n# print (\"Processed GPS Data = \" + image_gps_fix)\n# logfile.write(\"Processed GPS Data = \" + image_gps_fix + \"\\n\")\n \n exif_lat = convert_raw_gps_fix_to_exif_lat(raw_gps_fix)\n exif_lat_dir = convert_raw_gps_fix_to_exif_lat_dir(raw_gps_fix)\n exif_long = convert_raw_gps_fix_to_exif_long(raw_gps_fix)\n exif_long_dir = convert_raw_gps_fix_to_exif_long_dir(raw_gps_fix)\n exif_alt = convert_raw_gps_fix_to_exif_alt(raw_gps_fix)\n\n##### done with gps string manipulation #####\n\n\n\n##### Imaging begins here! #####\n\nstart_create_image_tag_time = time.time()\ngps_string_manipulation = str(start_create_image_tag_time - start_GPS_string_manipulation_time)\nprint (\"gps_string_manipulation = \" + gps_string_manipulation)\nlogfile.write(\"gps_string_manipulation = \" + gps_string_manipulation + \"\\n\")\n\n#Create image of a rectangle for text background\n#img = Image.new('RGB', (1944, 120), color=(255,255,255))\nimg = Image.new('RGB', (1944, 120), color=(0,0,0))\n \n \n# place text on image, rotate and save as foreground.jpeg\n \nfont = ImageFont.truetype(\"/home/pi/Tools/Camera/dejavu/DejaVuSans-Bold.ttf\",40)\nd = ImageDraw.Draw(img)\n\n# Black Text\n#d.text((20,10), \"Adler / Far Horizons GONet hostname: \" + socket.gethostname(), font=font, fill=(0,0,0)) \n\n# Old version check remove after further testing \n##version = ''.join(os.listdir('/home/pi/Tools/Version'))\n#version = ''.join(glob.glob(os.path.join('/home/pi/Tools/Version', '*'))).split(\"/\")[5]\n# end of old version check\n\nversion_dir = os.listdir(\"/home/pi/Tools/Version\")\n#print(version_dir)\n#print (len(version_dir))\nif len(version_dir) == 0: \n print(\"Empty directory using UNK\")\n version = 'UNK'\n #os.system('touch {}'.format(\"/home/pi/Tools/Version/UNK\"))\nelse:\n version = ''.join(glob.glob(os.path.join('/home/pi/Tools/Version', '*'))).split(\"/\")[5]\nprint (version)\n\n# White Text\nd.text((20,10), \"Adler / Far Horizons \" + socket.gethostname() + \" \" + version + \" Exp: \" + tag_raspistill_ss + \" S\"\\\n+ \" ISO: \" + str(ISO) + \" WB: \" + white_balance_gains, font=font, fill=(255,255,255))\n# Next Line \nd.text((20,70), strftime(\"%y%m%d %H:%M:%S\", gmtime()) + \" UTC \" + image_gps_fix , font=font, fill=(255,255,255))\nimg.rotate(90,expand = True).save(scratch_dir + 'foreground.jpeg', 'JPEG')\n \n# take a picture with pi cam!\n\n\nstart_imaging_time = time.time()\ncreate_image_tag = str(start_imaging_time - start_create_image_tag_time)\nprint (\"create_image_tag = \" + create_image_tag)\nlogfile.write(\"create_image_tag = \" + create_image_tag + \"\\n\")\n\n########### start of raspistill ##############\n\n#image_file_name = socket.gethostname()[-3:] + \"_\" + (strftime(\"%y%m%d_%H%M%S\", gmtime())) + \"_%03d\"\nimage_file_name = \"R_\" + socket.gethostname()[-3:] + \"_\" + (strftime(\"%y%m%d_%H%M%S\", gmtime())) + \"_%d\"\nprint (\"image_file_name = \" + image_file_name)\nlogfile.write(\"image_file_name = \" + image_file_name + \"\\n\")\n\n##command = ['/usr/bin/raspistill', '-v',\n# '-t', str(raspistill_t),\n# '-ss', str(raspistill_ss),\n# '-tl', str(raspistill_tl),\n# '-ISO', str(ISO),\n# '-drc', str(drc),\n# '-awb', awb,\n# #'-awbg', '1.03125, 1.8086',\n# '-awbg', white_balance_gains,\n# '-br', str(br),\n# '-r',\n# '-ts',\n# #'--timeout', '1',\n# '-st',\n# #'-set',\n# '-x', 'GPS.GPSLongitude=' + exif_long, \n# '-x', 'GPS.GPSLongitudeRef=' + exif_long_dir,\n# '-x', 'GPS.GPSLatitude=' + exif_lat,\n# '-x', 'GPS.GPSLatitudeRef=' + exif_lat_dir,\n# '-x', 'GPS.GPSAltitude=' + exif_alt,\n# #'-x', 'IFD0.Artist=GONet ' + white_balance_gains,\n# '-x', 'IFD0.Software=' + socket.gethostname() + ' ' + version + ' WB: ' + white_balance_gains, \n# #'-x', 'IDF0.HostComputer= ' + socket.gethostname(),\n# '-o', scratch_dir + image_file_name + '.jpg']\n#subprocess.call(command)\n\n########## end of raspistill ##############\n\n\n\n########### Start of picamera ##############\n\ncamera = PiCamera()\nsleep(2)\n# Set a framerate of 1/6fps, then set shutter\n# speed to 6s and ISO to 800\ncamera.framerate = Fraction(1, 6)\ncamera.shutter_speed = raspistill_ss\ncamera.iso = ISO\ncamera.drc_strength=drc\ncamera.awb_mode = awb\ncamera.awb_gains = (1.03125, 1.8086)\ncamera.brightness = br\ncamera.still_stats = True\ncamera.resolution = (2592, 1944)\n\n\ncamera.exif_tags['GPS.GPSLongitude'] = exif_long\ncamera.exif_tags['GPS.GPSLongitudeRef'] = exif_long_dir\ncamera.exif_tags['GPS.GPSLatitude'] = exif_lat\ncamera.exif_tags['GPS.GPSLatitudeRef'] = exif_lat_dir\ncamera.exif_tags['GPS.GPSAltitude'] = exif_alt\n\n\ncamera.exif_tags['IFD0.Software'] = socket.gethostname() + ' ' + version + ' WB: ' + str(white_balance_gains)\ncamera.exif_tags['IFD0.ImageDescription'] = 'some number'\n\n\n\ncamera.exif_tags['EXIF.UserComment'] = 'some other number'\ncamera.exif_tags['EXIF.HostComputer'] = 'some number'\ncamera.exif_tags['IFD0.CameraSerialNumber'] = 'some number'\ncamera.exif_tags['EXIF.BodySerialNumber'] = 'some number'\ncamera.exif_tags['EXIF.SerialNumber'] = 'some number'\n\n\nfor x in range(5):\n filename = \"P_\" + socket.gethostname()[-3:] + \"_\" + (strftime(\"%y%m%d_%H%M%S_%s\", gmtime())) + \".jpg\"\n print(scratch_dir + filename)\n camera.capture(scratch_dir +filename, bayer=True)\n\n\n# The images collected with picamera yields a file about 2m small than raspisill\n\nprint(\"Closing Camera\")\ncamera.close()\n\n######### End of picamera ###########\n\nstart_post_processing_time = time.time()\nimaging_time = str(start_post_processing_time - start_imaging_time)\nprint (\"imaging_time = \" + imaging_time)\nlogfile.write(\"imaging_time = \" + imaging_time + \"\\n\")\n\nphoto_count = 0 \n#post processing\n#for filename in os.listdir(\"/home/pi/gonet3/scratch/\"):\nfor filename in os.listdir(scratch_dir):\n if filename.endswith(\".jpg\"):\n sfilename = filename.split(\"_\")\n print (scratch_dir + filename)\n logfile.write(scratch_dir + filename + \"\\n\")\n #logfile.write(scratch_dir + filename + \"\\n\")\n\n # open the the image from pi cam\n background = Image.open(scratch_dir + filename).convert(\"RGB\")\n\n # save its exif - does not include raw (bayer) data\n exif = background.info['exif']\n\n #print(exif)\n\n # open foreground.jpg and paste it to pi cam image\n foreground = Image.open(scratch_dir + \"foreground.jpeg\")\n background.paste(foreground, (0, 0)) #, foreground)\n\n #save the new composite image with pi cam photo's exif\n background.save(image_dir + filename, 'JPEG', exif=exif)\n\n # open the composited file for append. then tail the raw file raw data from the original to the end of composited file.\n composite_file = open(image_dir + filename, 'a') \n subprocess.call(['tail', '-c', '10237440', scratch_dir + filename], stdout=composite_file)\n\n\n # create thumnail here?\n MAX_SIZE = (160, 120) \n background.thumbnail(MAX_SIZE) \n background.save(thumbs_dir + filename, 'JPEG')\n\n # clean up the scratch directory\n os.remove(scratch_dir + filename)\n photo_count += 1\n ##### End of .jpg if\n##### End of filename in directory for\nprint (\"photo_count = \" + str(photo_count))\nlogfile.write(\"photo_count = \" + str(photo_count) + \"\\n\")\n\nos.system(\"(rm -rf /home/pi/Tools/Status/*; touch /home/pi/Tools/Status/Ready) &\")\n\n\n\nfinish_time = time.time()\npost_processing_time = str(finish_time - start_post_processing_time)\nprint (\"post_processing_time = \" + post_processing_time)\nlogfile.write(\"post_processing_time = \" + post_processing_time + \"\\n\")\n\n\n\ntotal_run_time = str(finish_time - run_start_time)\nprint (\"total_time = \" + total_run_time)\nlogfile.write(\"total_time = \" + total_run_time + \"\\n\")\n\nprint (\"perf,\" + str(run_start_time) + \",\" + setup_time + \",\" + gps_aquisition + \",\" + gps_string_manipulation + \",\" + create_image_tag + \",\" + imaging_time + \",\" + post_processing_time + \",\" + total_run_time + \",\" + str(gps_flag) + \",\" + str(photo_count))\nprint (\"\")\n\nlogfile.write(\"perf,\" + str(run_start_time) + \",\" + setup_time + \",\" + gps_aquisition + \",\" + gps_string_manipulation + \",\" + create_image_tag + \",\" + imaging_time + \",\" + post_processing_time + \",\" + total_run_time + \",\" + str(gps_flag) + \",\" + str(photo_count) + \"\\n\")\nlogfile.write(\"\\n\")\n\nlogfile.flush()\nlogfile.close()\n\nwith open('/home/pi/Tools/Camera/gonet.log') as fin, open('/home/pi/Tools/Camera/temp_gonet.log', 'w') as fout:\n fout.writelines(deque(fin, 10000))\nos.remove(\"/home/pi/Tools/Camera/gonet.log\")\nos.rename(\"/home/pi/Tools/Camera/temp_gonet.log\",\"/home/pi/Tools/Camera/gonet.log\")\n","sub_path":"gonet4.py","file_name":"gonet4.py","file_ext":"py","file_size_in_byte":23368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"249679043","text":"import time\nfrom datetime import datetime as dt\n\nhosts_temp=r\"c:\\Dropbox\\pp\\block_websites\\Demo\\hosts\"\nhosts_path=\"/etc/hosts\"\nredirect=\"127.0.0.1\"\nwebsite_list=[\"www.facebook.com\",\"facebook.com\",\"dub119.mail.live.com\",\"www.dub119.mail.live.com\"]\n\nwhile True:\n if dt(dt.now().year,dt.now().month,dt.now().day,8) < dt.now() < dt(dt.now().year,dt.now().month,dt.now().day,16):\n print(\"Working hours...\")\n else:\n print(\"Fun hours...\")\n time.sleep(5)\n","sub_path":"26may2020/26.3.py","file_name":"26.3.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"267334850","text":"import os\nimport sys\nimport confit\nimport textwrap\nfrom . import unittest, _root\n\n\nclass PrettyDumpTest(unittest.TestCase):\n def test_dump_null(self):\n config = confit.Configuration('myapp', read=False)\n config.add({'foo': None})\n yaml = config.dump().strip()\n self.assertEqual(yaml, 'foo:')\n\n def test_dump_true(self):\n config = confit.Configuration('myapp', read=False)\n config.add({'foo': True})\n yaml = config.dump().strip()\n self.assertEqual(yaml, 'foo: true')\n\n def test_dump_false(self):\n config = confit.Configuration('myapp', read=False)\n config.add({'foo': False})\n yaml = config.dump().strip()\n self.assertEqual(yaml, 'foo: false')\n\n def test_dump_short_list(self):\n config = confit.Configuration('myapp', read=False)\n config.add({'foo': ['bar', 'baz']})\n yaml = config.dump().strip()\n self.assertEqual(yaml, 'foo: [bar, baz]')\n\n def test_dump_ordered_dict(self):\n odict = confit.OrderedDict()\n odict['foo'] = 'bar'\n odict['bar'] = 'baz'\n odict['baz'] = 'qux'\n\n config = confit.Configuration('myapp', read=False)\n config.add({'key': odict})\n yaml = config.dump().strip()\n self.assertEqual(yaml, textwrap.dedent(\"\"\"\n key:\n foo: bar\n bar: baz\n baz: qux\n \"\"\").strip())\n\n def test_dump_sans_defaults(self):\n config = confit.Configuration('myapp', read=False)\n config.add({'foo': 'bar'})\n config.sources[0].default = True\n config.add({'baz': 'qux'})\n\n yaml = config.dump().strip()\n self.assertEqual(yaml, \"foo: bar\\nbaz: qux\")\n\n yaml = config.dump(full=False).strip()\n self.assertEqual(yaml, \"baz: qux\")\n\n\nclass RedactTest(unittest.TestCase):\n def test_no_redaction(self):\n config = _root({'foo': 'bar'})\n data = config.flatten(redact=True)\n self.assertEqual(data, {'foo': 'bar'})\n\n def test_redact_key(self):\n config = _root({'foo': 'bar'})\n config['foo'].redact = True\n data = config.flatten(redact=True)\n self.assertEqual(data, {'foo': 'REDACTED'})\n\n def test_unredact(self):\n config = _root({'foo': 'bar'})\n config['foo'].redact = True\n config['foo'].redact = False\n data = config.flatten(redact=True)\n self.assertEqual(data, {'foo': 'bar'})\n\n def test_dump_redacted(self):\n config = confit.Configuration('myapp', read=False)\n config.add({'foo': 'bar'})\n config['foo'].redact = True\n yaml = config.dump(redact=True).strip()\n self.assertEqual(yaml, 'foo: REDACTED')\n\n def test_dump_unredacted(self):\n config = confit.Configuration('myapp', read=False)\n config.add({'foo': 'bar'})\n config['foo'].redact = True\n yaml = config.dump(redact=False).strip()\n self.assertEqual(yaml, 'foo: bar')\n\n def test_dump_redacted_sans_defaults(self):\n config = confit.Configuration('myapp', read=False)\n config.add({'foo': 'bar'})\n config.sources[0].default = True\n config.add({'baz': 'qux'})\n config['baz'].redact = True\n\n yaml = config.dump(redact=True, full=False).strip()\n self.assertEqual(yaml, \"baz: REDACTED\")\n\n @unittest.skipIf(sys.version_info <= (2, 7), reason=\"Unsupprted Python version!\")\n def test_dump_comments(self):\n # self.maxDiff = None\n config = confit.Configuration(\"myapp\",\n file=\"%s\" % os.path.join(confit.get_parent_folder(__file__), 'comment-yaml.yml'))\n\n self.assertEqual(config.dump(default_flow_style=False).strip(), textwrap.dedent(\"\"\"\n############################################################\n# +------------------------------------------------------+ #\n# | Essentials (Global) | #\n# +------------------------------------------------------+ #\n############################################################\n\n# A color code between 0-9 or a-f. Set to 'none' to disable.\nops-name-color: '4'\n \"\"\").strip())\n\n notes_yaml = os.path.join(confit.get_parent_folder(__file__), 'notes.yml')\n # print(\"Notes-Yaml: \", notes_yaml)\n config.add(confit.ConfigSource.from_file(notes_yaml))\n self.assertEqual(config.dump(default_flow_style=False).strip(), textwrap.dedent(\"\"\"\n ############################################################\n# +------------------------------------------------------+ #\n# | Essentials (Global) | #\n# +------------------------------------------------------+ #\n############################################################\n\n# A color code between 0-9 or a-f. Set to 'none' to disable.\nops-name-color: '4'\n############################################################\n# +------------------------------------------------------+ #\n# | Notes | #\n# +------------------------------------------------------+ #\n############################################################\nalways-finish-her-first: true\"\"\").strip())\n","sub_path":"test/test_dump.py","file_name":"test_dump.py","file_ext":"py","file_size_in_byte":5179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"576357316","text":"'''\nCreated on Aug 23, 2015\n\n@author: rasca1\n'''\nimport os\nimport xml.dom.minidom\nimport scanlist\nimport multiprocessing\nimport time\nclass nmaph:\n def __init__(self,cmd):\n self.cmd=cmd\n self.pool = multiprocessing.Pool(processes=4)\n \n \n #read n-result.xml \n def readxml(self):\n dom = xml.dom.minidom.parse('save/n-result.xml')\n root = dom.documentElement\n i=0\n j=root.getElementsByTagName(\"host\").length \n while ij: \n break\n #print '111111111111'\n self.pool.close()\n self.pool.join() \n \n def main(self):\n #nmap scaning\n os.system(\"nmap -webxml -oX save/n-result.xml \"+self.cmd)\n self.readxml()\n\n ","sub_path":"Port_Scan/nmaph.py","file_name":"nmaph.py","file_ext":"py","file_size_in_byte":2162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"617981049","text":"from django.urls import path\r\nfrom apps.index import views\r\n\r\napp_name = 'index'\r\n\r\nurlpatterns = [\r\n path('login/', views.login_view, name='login'),\r\n path('register/', views.register_view, name='register')\r\n # path('transfer/', views.TransferView.as_view(), name='transfer'),\r\n]","sub_path":"logistics/apps/index/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"302239642","text":"# -*- coding: utf-8 -*-\nfrom .errors import *\n\nimport aiohttp\n\nclass Client:\n '''API Wrapper para Naeg.in\n\n - Descrição:\n Cria uma instância\n\n - Parâmetros:\n token : str\n (Token fornecido para usar a API)\n '''\n def __init__(self, token):\n if not isinstance(token, str):\n raise ArgumentoInvalido('Tokens devem ser strings')\n\n self.url = 'https://api.naeg.in/img?token=' + token\n\n async def get_all_tags(self):\n '''API Wrapper para Naeg.in\n\n - Descrição:\n Retorna todas as tags disponíveis em uma list\n '''\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url) as r:\n if r.status != 200:\n raise RespostaInvalida('Falha na requisição: Código inválido retornado')\n \n data = await r.json()\n if data['erro'] and data['mensagem'] != 'Tag Invalida':\n raise ArgumentoInvalido(data['mensagem'])\n \n return data['tags']\n\n async def get_random(self, tag, nsfw = False):\n '''API Wrapper para Naeg.in\n\n - Descrição:\n Busca e retorna o link de uma imagem com a tag fornecida\n\n - Parâmetros:\n tag : str\n (Usada para buscar imagens)\n\n nsfw : bool (opcional)\n (NOTA: Tags NSFW apenas vão funcionar se você especificar \"nsfw\" como True)\n '''\n if not isinstance(tag, str):\n raise ArgumentoInvalido('Tags devem ser strings')\n \n if not isinstance(nsfw, bool):\n raise ArgumentoInvalido('Valor NSFW deve ser boolean')\n\n async with aiohttp.ClientSession() as session:\n async with session.get(self.url + '&tag=' + tag + '&nsfw=' + str(nsfw)) as r:\n if r.status != 200:\n raise RespostaInvalida('Falha na requisição: Código inválido retornado')\n \n data = await r.json()\n if data['erro']:\n raise ArgumentoInvalido(data['mensagem'])\n \n return data['url']","sub_path":"naegin/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"337188454","text":"import random,time\nfrom autocorrect import spell\n# from __future__ import division\n# from __future__ import print_function\n\nimport json\nimport sys\nimport argparse\nimport cv2,os\nimport editdistance\nimport numpy as np\nimport random\nimport pandas as pd\nimport tensorflow as tf\nfrom flask import jsonify \nfrom keras.models import Sequential,Model\nfrom keras.layers import LSTM,Bidirectional,Dense,Activation,Lambda,Input\nimport keras.backend as K\nfrom keras.optimizers import Adam, SGD, RMSprop,Adadelta\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.callbacks import ModelCheckpoint\ncharset = u' !@#><~%&\\$^*+,-./0123456789:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\nmaxstrokeslen = 500\noutputtextlen = len(charset)+1\ninput_layer = Input((maxstrokeslen, 3))\nX = Bidirectional( LSTM(units = 512,return_sequences = True) ) (input_layer)\nX = Bidirectional( LSTM(units = 512,return_sequences = True) ) (X)\nX = Dense(outputtextlen)(X)\nX = Activation('softmax', name='softmax')(X)\ntest_model = Model(input_layer,X)#.summary()\ntest_model.load_weights('Modell00000010.hdf5')\ngraph = tf.get_default_graph()\n\ndef labels_to_text(labels):\n ret = []\n for c in labels:\n if c == len(charset): # CTC Blank\n ret.append(\"\")\n else:\n ret.append(charset[c])\n return \"\".join(ret)\n\nimport itertools\n#charset = u' !@#><~%&\\$^*+,-./0123456789:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'\ndef decode_label(out):\n # out : (1, 32, 42)\n out_best = list(np.argmax(out[0, :], axis=-1)) # get max index -> len = 32\n #print(out_best)\n out_best = [k for k, g in itertools.groupby(out_best)] # remove overlap value\n #print(out_best)\n outstr = ''\n for i in out_best:\n if i < len(charset) and i > 0:\n outstr += charset[i]\n return outstr\n\ndef levenshteinDistance(s1, s2):\n if len(s1) > len(s2):\n s1, s2 = s2, s1\n\n distances = range(len(s1) + 1)\n for i2, c2 in enumerate(s2):\n distances_ = [i2+1]\n for i1, c1 in enumerate(s1):\n if c1 == c2:\n distances_.append(distances[i1])\n else:\n distances_.append(1 + min((distances[i1], distances[i1 + 1], distances_[-1])))\n distances = distances_\n return distances[-1]\n\n\ndef ctc_lambda_func(args):\n y_pred, labels, input_length, label_length = args\n # the 2 is critical here since the first couple outputs of the RNN\n # tend to be garbage:\n y_pred = y_pred[:, 2:, :]\n return K.ctc_batch_cost(labels, y_pred, input_length, label_length)\n\ndef text_to_labels(text):\n ret = []\n for char in text:\n ret.append(charset.find(char))\n return ret\n\nimport math\n\ndef cubicBezierPoint(a0, a1, a2, a3, t):\n return math.pow(1 - t, 3) * a0 + 3 * math.pow(1 - t, 2) * t * a1 + 3 * (1 - t) * math.pow(t, 2) * a2+ math.pow(t, 3) * a3\n\ndef bezier_curve(points):\n\n controlpoints = []\n renderpoints = []\n\n for i in range(1,len(points)-1,2):\n cp = ((points[i-1][0]+points[i][0])/2,(points[i-1][1]+points[i][1])/2,(points[i-1][2]+points[i][2])/2)\n controlpoints.append(cp)\n controlpoints.append(points[i])\n controlpoints.append(points[i+1])\n\n if (i + 2 ) < (len(points) - 1) :\n cp1 = ((points[i+1][0]+points[i+2][0])/2,(points[i+1][1]+points[i+2][1])/2,(points[i+1][2]+points[i+2][2])/2)\n controlpoints.append(cp1)\n\n for i in range(0, len(controlpoints) - 3,4) :\n a0 = controlpoints[i]\n a1 = controlpoints[i+1]\n a2 = controlpoints[i+2]\n a3 = controlpoints[i+3]\n op = (cubicBezierPoint(a0[0], a1[0], a2[0], a3[0], 0),cubicBezierPoint(a0[1], a1[1], a2[1], a3[1], 0),cubicBezierPoint(a0[2], a1[2], a2[2], a3[2], 0))\n renderpoints.append(op)\n #print(renderpoints)\n return renderpoints\n\ndef beizerprocess(data):\n builder = []\n for i in range(0,len(data)):\n x_cord = []\n y_cord = []\n for j in range(0,len(data[i][0])):\n x_cord.append(data[i][0][j])\n y_cord.append(data[i][1][j])\n minX = min(x_cord)\n maxX = max(x_cord)\n Xvalue = maxX - minX\n x_cord_mean = [(elt - minX)/Xvalue for elt in x_cord]\n\n minY = min(y_cord)\n maxY = max(y_cord)\n Yvalue = maxY - minY\n y_cord_mean = [(elt - minY)/Yvalue for elt in y_cord]\n\n temptuple = []\n for i in range(0,len(x_cord_mean)):\n temptuple.append( (x_cord_mean[i],y_cord_mean[i],0) )\n\n bezpoints = bezier_curve(temptuple)\n for l in range(0,len(bezpoints)):\n if (l == 0):\n templ = []\n templ.append(bezpoints[l][0])\n templ.append(bezpoints[l][1])\n templ.append(1)\n else:\n templ = []\n templ.append(bezpoints[l][0])\n templ.append(bezpoints[l][1])\n templ.append(0)\n builder.append(templ)\n return builder\ndef HWRmodel(text_data):\n\n data = text_data\n\n if len(data) != maxstrokeslen:\n c = len(data)\n for j in range(c, maxstrokeslen):\n data.append([-1,-1,-1])\n c+=1\n num = 71\n t = np.expand_dims(data,axis = 0)\n st = time.time()\n with graph.as_default():\n prediction = test_model.predict(t)\n et = time.time()\n # print(et - st)\n de = decode_label(prediction)\n sde = spell(de)\n print(\"prediction and spellprediction\",de,sde)\n return sde\n\nfrom flask import Flask, url_for, request\napp = Flask(__name__)\n\n# @app.route('/apitest')\n# def apitest():\n# return 'API working'\n\n#@app.route('/hwrrecog', methods=['POST'])\ndef hwrrecog(data):\n \n \n\n beizerdata = beizerprocess(data)\n text_out = HWRmodel(beizerdata)\n op1 = {'output':text_out}\n\n return jsonify(op1)\n\n\n","sub_path":"flaskpython/app/restapi.py","file_name":"restapi.py","file_ext":"py","file_size_in_byte":5826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343656909","text":"\"\"\"Description: returns a string that, when printed, will list the options vertically with numbers preceding each option\nPrecondition: optionList must be a list of strings, menuTitle must be a string\n\"\"\"\n\"\"\"\nDescription: Creates a menu format with the Parameters.\n\"\"\"\n\n__author__=\"Ethan Storm\"\n__date__=\"February 7, 2017\"\n\ndef CreateMenu(optionList, menuTitle):\n \"\"\"\n Description: Creates a menu using the parameters given\n Precondition: optionList must be a list of strings\n menuTitle must be a string.\n \"\"\"\n \"\"\"\n This is an addition for git lab 2\n \"\"\"\n if not type(optionList)==list:\n return None\n \n ct=0\n St=menuTitle+\"\\n\"\n \n for el in optionList:\n ct+=1\n St+=\"\\t\"+str(ct)+\". \"+ el+\"\\n\"\n return St\n\ndef getValidChoice(menuString:str, menuTitle:str, numOptions:int):\n \"\"\"\n Description:\n prints the menuTitle and the menuString and\n continues to print the menuTitle and menuString and ask the user\n to enter a choice until they enter a valid numeric choice\n that is in the proper range.\n Returns an integer.\n\n PreConditions:\n menuString must be a string that contains the\n numbered options for the choices\n menuTitle must be a string\n numOptions is an integer indicating the number of options for that menu\n \"\"\"\n \n czech=True\n while czech:\n xList=input(\"\\n\"+menuString+\"\\nPlease enter your choice: \")\n test=True\n try:\n yList=eval(xList)\n except:\n test=False\n\n if test and type(yList)==int and yList > 0 and yList <=numOptions:\n czech=False\n\n else:\n print(\"Invalid choice - try again\")\n\n return yList\n\ndef getValidChoices(menuString:str, start:int, end:int):\n \"\"\"\n Description:\n prints the menuString and\n continues to print the menuTitle and menuString and ask the user\n to enter a choice until they enter a valid numeric choice\n that is in the proper range.\n Returns an integer.\n\n PreConditions:\n menuString must be a string that contains the\n numbered options for the choices\n \n dtsrt and end are integers indicating the number of options for that menu\n \"\"\"\n \n czech=True\n while czech:\n xList=input(\"\\n\"+menuString+\"\\nPlease enter your choice: \")\n test=True\n try:\n yList=eval(xList)\n except:\n test=False\n\n if test and type(yList)==int and yList>=start and yList <=end:\n czech=False\n\n else:\n print(\"Invalid choice - try again\")\n\n return yList\n\ndef main():\n x = CreateMenu([\"item1\",\"item2\"], \"Title\")\n if x == None:\n return \"Not a Valid Menu\"\n print(x) \n choice=input(\"Enter the number of the opion you wish to use: \")\n return choice\n\n\n \nif __name__==\"__main__\":\n main()\n","sub_path":"CreateMenu.py","file_name":"CreateMenu.py","file_ext":"py","file_size_in_byte":2936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"567480228","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 7 10:01:43 2019\n\n@author: Home\n\"\"\"\n\"\"\"\nReading a gmsh file and filling a mesh object\n\"\"\"\nfrom base_FE_Q import Mesh, Node, Element, Triangle, Segment\nimport numpy as np\n\ndef read_file(filename):\n Nodes = np.empty((100000, 4), dtype = float)\n MeshFormat = np.empty(3, dtype = int) #[None] * 3\n Number0fNodes = 0\n NumberOfTr = 0\n NumberOfSeg = 0\n Number0fElems = 0\n #NumberOfBorders =0\n cnt_1 = cnt_2 = cnt_3 = cnt_4 = 0 # compteurs seg dans chaque Bord\n \n with open(filename) as f:\n content = f.readlines()\n\n for i in range(0, len(content)):\n line = content[i]\n\n if line[0] == '$': # reading a property\n \n property = line[1:-1]\n if property ==\"PhysicalNames\":\n i+=1\n line = content[i]\n \n #NumberOfBorders = (int)(content[i][0:-1].split(\" \")[0]) -1\n #print(\"nb bords : {}\".format(NumberOfBorders))\n\n i += 1;\n\n elif property == \"Nodes\":\n i+=1\n line = content[i]\n \n Number0fNodes = (int)(content[i][0:-1].split(\" \")[0])\n\n i += 1;\n \n for j in range(i, i + Number0fNodes):\n Nodes[j - i] = np.asarray([content[j][0:-1].split(\" \")[:4]] )\n\n elif property == \"Elements\":\n i+=1\n line = content[i]\n\n Number0fElems = (int)(content[i][0:-1].split(\" \")[0])\n\n i += 1;\n #lent = content[i][0:-1].split(\" \")\n Elems = list() #np.empty((Number0fElems ,0 ), dtype = list)\n\n cnt = 0;\n for j in range(i, i + Number0fElems): # todo i doesnt change\n\n type = (int)(content[j][0:-1].split(\" \")[1])\n bntg = (int)(content[j][0:-1].split(\" \")[2])\n\n vertice_n = 4;\n\n if type == 2: # triangle\n vertice_n = 3;\n NumberOfTr += 1\n \n elif type == 1:\n vertice_n = 2;\n NumberOfSeg +=1\n\n Elems.append(np.asarray([content[j][0:-1].split(\" \")[1: ( vertice_n +bntg + 3)]])) #[cnt] = np.asarray([content[j][0:-1].split(\" \")[1:(vertice_n * 2 + 2)]])\n\n if Elems[-1][0][0] == '1' and Elems[-1][0][2] == '1':\n cnt_1 += 1\n \n if Elems[-1][0][0] == '1' and Elems[-1][0][2] == '2':\n cnt_2 += 1\n \n if Elems[-1][0][0] == '1' and Elems[-1][0][2] == '3':\n cnt_3 += 1\n \n if Elems[-1][0][0] == '1' and Elems[-1][0][2] == '4':\n cnt_4 += 1\n cnt = cnt +1\n else:\n a = 2\n\n Nodes_ = np.empty(Number0fNodes, dtype = Node) #[Node]* (Number0fNodes+1);\n Elems_ = np.empty(Number0fElems, dtype = Element)\n Trs_ = np.empty(NumberOfTr, dtype = Triangle)\n \n\n segs_1 = np.empty(cnt_1, dtype = Segment)\n segs_2 = np.empty(cnt_2, dtype = Segment)\n segs_3 = np.empty(cnt_3, dtype = Segment)\n segs_4 = np.empty(cnt_4, dtype = Segment)\n\n cnt = 0\n for i in range(0, Number0fNodes):\n Nodes_[ i ] = Node(i,Nodes[i][1], Nodes[i][2], Nodes[i][3])\n\n\n ide_1 = ide_2 = ide_3 = ide_4 = 0\n cntT = 0\n for i in range(0, Number0fElems):\n\n Elems_i = Elems[i][0].astype(int)\n\n nbTag = int(Elems[i][0][1])\n Elems[i] = Elems[i].astype( int)\n \n Elems_[ i ] = Element(i, Elems_i[0], Elems_i[2:2+nbTag], Elems_i[2+nbTag:] ) #id type tags sommets\n\n\n if Elems_i[0] == 2:\n Trs_[cntT] = Triangle(i, Elems_i[2:nbTag], Elems_i[2+nbTag:])\n cntT +=1\n\n\n elif Elems_i[0] == 1:\n\n if int(Elems_i[2]) == 1: #Mur ou Ext \n segs_1[ide_1] = Segment(i, Elems_i[2+nbTag:]) #seg_s;\n ide_1 += 1\n \n elif Elems_i[2] == 2: # Interieur ou Gauche\n segs_2[ide_2] = Segment(i, Elems_i[2+nbTag:])#seg_s\n ide_2 += 1\n elif Elems_i[2] == 3: # Droite\n segs_3[ide_3] = Segment(i, Elems_i[2+nbTag:])#seg_s\n ide_3 += 1\n elif Elems_i[2] == 4: # Droite\n segs_4[ide_4] = Segment(i, Elems_i[2+nbTag:])#seg_s\n ide_4 += 1\n \n Segs=[segs_1,segs_2,segs_3,segs_4]\n Cnt_bord=[cnt_1,cnt_2,cnt_3,cnt_4]\n princeMesh = Mesh(Number0fNodes, Nodes_, NumberOfTr , Trs_,Segs,Cnt_bord) #def __init__(this, Format_, Ns,Nodes , Nt, Triangles):\n return princeMesh","sub_path":"coagulation/avec_correction/V1/read_file.py","file_name":"read_file.py","file_ext":"py","file_size_in_byte":4826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"320153219","text":"#!/usr/bin/python3\n\nfrom bluepy import btle\nimport argparse\nimport os\nimport re\nfrom dataclasses import dataclass\nfrom collections import deque\nimport threading\nimport time\nimport signal\nimport traceback\nimport logging\n\n\n@dataclass\nclass Measurement:\n temperature: float\n humidity: int\n voltage: float\n battery: int = 0\n timestamp: int = 0\n\n def __eq__(self, other):\n if self.temperature == other.temperature and self.humidity == other.humidity and self.battery == other.battery and self.voltage == other.voltage:\n return True\n else:\n return False\n\n\nmeasurements = deque()\n\n\ndef signal_handler(sig, frame):\n os._exit(0)\n\n\ndef watchDog_Thread():\n global unconnectedTime\n global connected\n global pid\n while True:\n logging.debug(\"watchdog_Thread\")\n logging.debug(\"unconnectedTime : \" + str(unconnectedTime))\n logging.debug(\"connected : \" + str(connected))\n logging.debug(\"pid : \" + str(pid))\n now = int(time.time())\n if (unconnectedTime is not None) and ((now - unconnectedTime) > 60): #could also check connected is False, but this is more fault proof\n pstree=os.popen(\"pstree -p \" + str(pid)).read() #we want to kill only bluepy from our own process tree, because other python scripts have there own bluepy-helper process\n logging.debug(\"PSTree: \" + pstree)\n try:\n bluepypid=re.findall(r'bluepy-helper\\((.*)\\)',pstree)[0] #Store the bluepypid, to kill it later\n except IndexError: #Should not happen since we're now connected\n logging.debug(\"Couldn't find pid of bluepy-helper\")\n os.system(\"kill \" + bluepypid)\n logging.debug(\"Killed bluepy with pid: \" + str(bluepypid))\n unconnectedTime = now #reset unconnectedTime to prevent multiple killings in a row\n time.sleep(5)\n\n\n\nclass MyDelegate(btle.DefaultDelegate):\n def __init__(self, params):\n btle.DefaultDelegate.__init__(self)\n # ... initialise here\n\n def handleNotification(self, cHandle, data):\n global measurements\n try:\n measurement = Measurement(0, 0, 0, 0, 0)\n measurement.timestamp = int(time.time())\n temp=int.from_bytes(data[0:2],byteorder='little',signed=True)/100\n print(\"Temperature: \" + str(temp))\n\n humidity=int.from_bytes(data[2:3],byteorder='little')\n print(\"Humidity: \" + str(humidity))\n\n voltage=int.from_bytes(data[3:5],byteorder='little') / 1000.\n print(\"Battery voltage:\",voltage)\n measurement.temperature = temp\n measurement.humidity = humidity\n measurement.voltage = voltage\n if args.battery:\n batteryLevel = min(int(round((voltage - 2.1),2) * 100), 100) #3.1 or above --> 100% 2.1 --> 0 %\n measurement.battery = batteryLevel\n print(\"Battery level:\",batteryLevel)\n\n measurements.append(measurement)\n\n except Exception as e:\n print(\"Fehler\")\n print(e)\n print(traceback.format_exc())\n\n# Initialisation -------\n\ndef connect():\n p = btle.Peripheral(adress) \n val=b'\\x01\\x00'\n p.writeCharacteristic(0x0038,val,True) #enable notifications of Temperature, Humidity and Battery voltage\n p.writeCharacteristic(0x0046,b'\\xf4\\x01\\x00',True)\n p.withDelegate(MyDelegate(\"abc\"))\n return p\n\n# roll around to the next device address \ndef set_address():\n global address_ctr, addresses, adress\n address_ctr += 1\n if address_ctr > len(addresses):\n address_ctr = 0 \n adress = addresses[address_ctr]\n\n# Main loop --------\nparser=argparse.ArgumentParser()\nparser.add_argument(\"--device\",\"-d\", help=\"Set the device MAC-Address in format AA:BB:CC:DD:EE:FF\",metavar='AA:BB:CC:DD:EE:FF')\nparser.add_argument(\"--battery\",\"-b\", help=\"Get estimated battery level\", metavar='', type=int, nargs='?', const=1)\nparser.add_argument(\"--count\",\"-c\", help=\"Read/Receive N measurements and then exit script\", metavar='N', type=int)\nparser.add_argument(\"--delay\",\"-del\", help=\"Delay between taking readings from each device\", metavar='N', type=int)\n\n\nargs=parser.parse_args()\nif args.device:\n print('args.device', args.device)\n addresses = args.device.split(',')\n print(addresses, len(addresses))\n for address in addresses:\n if not re.match(\"[0-9a-fA-F]{2}([:]?)[0-9a-fA-F]{2}(\\\\1[0-9a-fA-F]{2}){4}$\",address):\n print(\"Please specify device MAC-Address in format AA:BB:CC:DD:EE:FF\")\n os._exit(1)\n address_ctr = 1000\n set_address()\n\nelse:\n parser.print_help()\n os._exit(1)\n\n\nif args.delay:\n delay = args.delay\n print ('Delay set to {} seconds'.format(delay))\nelse:\n delay = 30\n print ('No delay set. Defaulting to 30 seconds')\n\n\np=btle.Peripheral()\ncnt=0\n\nsignal.signal(signal.SIGINT, signal_handler)\nconnected=False\n#logging.basicConfig(level=logging.DEBUG)\nlogging.basicConfig(level=logging.ERROR)\nlogging.debug(\"Debug: Starting script...\")\npid=os.getpid() \nbluepypid=None\nunconnectedTime=None\n\nwatchdogThread = threading.Thread(target=watchDog_Thread)\nwatchdogThread.start()\nlogging.debug(\"watchdogThread startet\")\n\n\nwhile len(measurements) < len(addresses):\n try:\n if not connected:\n print(\"Trying to connect to \" + adress)\n p=connect()\n connected=True\n unconnectedTime=None\n\n if p.waitForNotifications(2000):\n cnt += 1\n if args.count is not None and cnt >= args.count:\n print(str(args.count) + \" measurements collected. Exiting in a moment.\")\n p.disconnect()\n time.sleep(5)\n #It seems that sometimes bluepy-helper remains and thus prevents a reconnection, so we try killing our own bluepy-helper\n pstree=os.popen(\"pstree -p \" + str(pid)).read() #we want to kill only bluepy from our own process tree, because other python scripts have there own bluepy-helper process\n bluepypid=0\n try:\n bluepypid=re.findall(r'bluepy-helper\\((.*)\\)',pstree)[0] #Store the bluepypid, to kill it later\n except IndexError: #Should normally occur because we're disconnected\n logging.debug(\"Couldn't find pid of bluepy-helper\")\n if bluepypid is not 0:\n os.system(\"kill \" + bluepypid)\n logging.debug(\"Killed bluepy with pid: \" + str(bluepypid))\n cnt = 0 # reset the counter - do not exit\n # measurements.clear() # clear the measurements array or it will continue to grow\n set_address() # roll round to the next address in the array\n time.sleep(delay) # delay between reading each device\n print(\"\")\n continue\n except Exception as e:\n print(\"Connection lost\")\n if connected is True: #First connection abort after connected\n unconnectedTime=int(time.time())\n connected=False\n time.sleep(1)\n logging.debug(e)\n logging.debug(traceback.format_exc()) \n \n print (\"Waiting...\")\n # Perhaps do something else here\n","sub_path":"LYWSD03MMC.py","file_name":"LYWSD03MMC.py","file_ext":"py","file_size_in_byte":7334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"30622933","text":"import json\n\nfrom django.http import Http404\n\nfrom core.api.serializers import PatientUpdateSerializer\nfrom core.api.util.helper import KakaoResponseAPI\n\n\nclass PatientMeasurementEntrance(KakaoResponseAPI):\n serializer_class = PatientUpdateSerializer\n model_class = serializer_class.Meta.model\n queryset = model_class.objects.all()\n\n def post(self, request, format='json', *args, **kwargs):\n self.preprocess(request)\n try:\n patient = self.get_object_by_kakao_user_id()\n except Http404:\n return self.build_response_fallback_404()\n\n response = self.build_response(response_type=KakaoResponseAPI.RESPONSE_SKILL)\n if patient.measurement_manage_flag:\n response.add_simple_text(text='산소포화도 관리를 설정한 적이 있습니다. 다시 설정할까요?')\n response.set_quick_replies_yes_or_no(\n block_id_for_yes='5dc708cdffa74800014107a8', # (블록) 02 건강재설정_알림횟수 확인\n block_id_for_no='5dd10b3392690d000194ba29', # (블록) 00 대화 종료 여부_산소포화도관리\n message_text_for_yes='네, 설정할게요!', message_text_for_no='아니요, 괜찮아요!'\n )\n response.add_context(name='건강관리재설정', params={'daily_measurement_count': patient.daily_measurement_count})\n else:\n response.add_simple_text(text='안녕하세요 콜로크만 박사입니다. 저와 함께 산소포화도 관리를 시작해보시겠습니까?☁︎')\n response.set_quick_replies_yes_or_no(\n block_id_for_yes='5dbfa982b617ea000165eeee', # (블록) 01-1 건강관리_횟수\n block_id_for_no='5dd10b3392690d000194ba29', # (블록) 00 대화 종료 여부_산소포화도관리\n message_text_for_yes='네, 시작할께요', message_text_for_no='아니요, 괜찮아요!'\n )\n\n return response.get_response_200()\n\n\nclass PatientMeasurementNotiTimeQuestion(KakaoResponseAPI):\n serializer_class = PatientUpdateSerializer\n model_class = serializer_class.Meta.model\n queryset = model_class.objects.all()\n\n def post(self, request, format='json', *args, **kwargs):\n self.preprocess(request)\n try:\n patient = self.get_object_by_kakao_user_id()\n except Http404:\n return self.build_response_fallback_404()\n\n response = self.build_response(response_type=KakaoResponseAPI.RESPONSE_SKILL)\n patient.measurement_noti_flag = True\n patient.save()\n if self.data.get('reset_measurement_noti_time') == 'true':\n patient.reset_measurement_noti_time()\n\n if not patient.need_measurement_noti_time_set() and len(patient.measurement_noti_time_list()) != 0:\n time_list = ','.join([x.strftime('%H시 %M분') for x in patient.measurement_noti_time_list()])\n\n message = f\"이미 모든 회차 알림 설정을 마쳤습니다.\\n[설정한 시간]\\n{time_list}\"\n response.add_simple_text(text=message)\n response.add_simple_text(text='산소포화도 확인 알림을 모두 재설정하시겠어요?')\n\n response.set_quick_replies_yes_or_no(block_id_for_yes='5dc708cdffa74800014107a8') # (블록) 02 건강재설정_알림횟수 확인\n else:\n next_undefined_number = patient.next_undefined_measurement_noti_time_number()\n message = f'{next_undefined_number:d}회차 산소포화도 확인 알림을 설정할까요?'\n response.add_simple_text(text=message)\n if self.request.query_params.get('restart') == 'true':\n response.set_quick_replies_yes_or_no(\n block_id_for_yes='5dc709c38192ac0001c5d9cb') # (블록) 05 건강재설정_알림 시간대\n else:\n response.set_quick_replies_yes_or_no(\n block_id_for_yes='5dbfaec792690d0001e8805d') # (블록) 02-2 건강관리_산소포화도 알림 시간대\n\n return response.get_response_200()\n\n\nclass PatientMeasurementNotiSetTime(KakaoResponseAPI):\n serializer_class = PatientUpdateSerializer\n model_class = serializer_class.Meta.model\n queryset = model_class.objects.all()\n\n def post(self, request, format='json', *args, **kwargs):\n self.preprocess(request)\n try:\n patient = self.get_object_by_kakao_user_id()\n except Http404:\n return self.build_response_fallback_404()\n response = self.build_response(response_type=KakaoResponseAPI.RESPONSE_SKILL)\n patient.measurement_manage_flag = True\n patient.measurement_noti_flag = True\n\n data = dict()\n measurement_noti_time = self.data.get('noti_time')\n if measurement_noti_time:\n time = json.loads(measurement_noti_time)['value']\n next_undefined_number = patient.next_undefined_measurement_noti_time_number()\n\n if next_undefined_number:\n field_name = 'measurement_noti_time_%d' % next_undefined_number\n data[field_name] = time\n\n serializer = self.get_serializer(patient, data=data, partial=True)\n if not serializer.is_valid():\n response.add_simple_text(text='알 수 없는 오류가 발생했습니다. 입력값이 잘못 되었습니다.')\n return response.get_response_200()\n\n if not request.query_params.get('test'):\n serializer.save()\n\n patient.refresh_from_db()\n\n # check patient doesn't need measurement noti time set.\n if not patient.need_measurement_noti_time_set() and len(patient.measurement_noti_time_list()) != 0:\n time_list = ', '.join([x.strftime('%H시 %M분') for x in patient.measurement_noti_time_list()])\n response.add_simple_text(text='모든 회차 알림 설정을 마쳤습니다.\\n[설정한 시간]\\n%s' % time_list)\n response.add_simple_text(text='이대로 산소포화도 확인 알림을 설정할까요?')\n if self.request.query_params.get('restart') == 'true':\n response.set_quick_replies_yes_or_no(\n block_id_for_yes='5dc709d48192ac0001c5d9cd', # (블록) 06 건강재설정_알림 설정 완료\n block_id_for_no='5dc72e60ffa74800014107c6', # (블록) 건강관리_정보 리셋\n message_text_for_no=\"아니요, 취소할게요\")\n else:\n response.set_quick_replies_yes_or_no(\n block_id_for_yes='5dbfb1ee8192ac00016aa32b', # (블록) 03 건강관리_알람 설정 완료\n block_id_for_no='5dc72e60ffa74800014107c6', # (블록) 건강관리_정보 리셋\n message_text_for_no=\"아니요, 취소할게요\")\n\n return response.get_response_200()\n else:\n time = time.split(':')[0] + '시 ' + time.split(':')[1] + '분'\n response.add_simple_text(text=f'{time}을 입력하셨어요.\\n다음 회차를 설정하시려면 \\'예\\'를 눌러주세요.')\n if self.request.query_params.get('restart') == 'true':\n response.set_quick_replies_yes_or_no(\n block_id_for_yes='5dc7097affa74800014107ac') # (블록) 04 건강재설정_알림 설정 질문\n else:\n response.set_quick_replies_yes_or_no(\n block_id_for_yes='5dbfaeaf92690d0001e8805b') # (블록) 02-1 건강관리_산소포화도 알림 시작\n\n return response.get_response_200()\n\n\nclass PatientMeasurementRestart(KakaoResponseAPI):\n serializer_class = PatientUpdateSerializer\n model_class = serializer_class.Meta.model\n queryset = model_class.objects.all()\n\n def post(self, request, format='json', *args, **kwargs):\n self.preprocess(request)\n try:\n patient = self.get_object_by_kakao_user_id()\n except Http404:\n return self.build_response_fallback_404()\n response = self.build_response(response_type=self.RESPONSE_SKILL)\n\n response.add_simple_text('산소포화도 관리를 설정한 적이 있습니다.\\n다시 설정할까요?')\n response.set_quick_replies_yes_or_no(\n block_id_for_yes='5dc708cdffa74800014107a8', # (블록) 02 건강재설정_알림횟수 확인\n block_id_for_no='5dd10b3392690d000194ba29', # (블록) 00 대화 종료 여부_산소포화도관리\n )\n\n return response.get_response_200()\n\n\nclass PatientMeasurementNotiReset(KakaoResponseAPI):\n serializer_class = PatientUpdateSerializer\n model_class = serializer_class.Meta.model\n queryset = model_class.objects.all()\n\n def post(self, request, format='json', *args, **kwargs):\n self.preprocess(request)\n try:\n patient = self.get_object_by_kakao_user_id()\n except Http404:\n return self.build_response_fallback_404()\n patient.reset_measurement()\n response = self.build_response(response_type=KakaoResponseAPI.RESPONSE_SKILL)\n return response.get_response_200()\n","sub_path":"core/api/views/measurements.py","file_name":"measurements.py","file_ext":"py","file_size_in_byte":9110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"342690155","text":"from __future__ import print_function, division\n\nimport os\nimport pickle\nfrom pathlib import Path\nfrom typing import List, Tuple\nimport torch\nimport numpy as np\nimport torchvision\nfrom torch.utils.data import Dataset\nfrom torchvision import transforms\n\nfrom coord2vec.feature_extraction.features_builders import FeaturesBuilder\n\n\ndef get_files_from_path(pathstring) -> List[Tuple[str, str]]:\n \"\"\"\n Retrives file names from the folder and returns dual file names (in tuple pairs)\n\n Args:\n pathstring: The folder path\n\n Returns:\n The all pair file paths\n \"\"\"\n\n files_paths = []\n for file in Path(pathstring).glob(\"**/*_img.npy\"):\n fname = os.path.basename(file)\n file_number = fname[:-8] # minus _img.npy\n features_file = f\"{pathstring}/{file_number}_features.npy\"\n if os.path.exists(features_file):\n files_paths.append((str(file), features_file))\n\n return files_paths\n\n\nclass TileFeaturesDataset(Dataset):\n \"\"\"Tile Features Dataset \"\"\"\n\n def __init__(self, root_dir, feature_builder, image_transforms=None, inf2value: float = 1e3):\n \"\"\"\n Args:\n feature_builder:\n root_dir (string): Directory with all the images.\n image_transforms (callable, optional): Optional transform to be applied\n on an image.\n inf2value : number to replace all the inf's with\n \"\"\"\n self.files_paths = get_files_from_path(root_dir)\n if image_transforms is None:\n image_transforms = torchvision.transforms.Compose(\n [transforms.ToPILImage(),\n transforms.RandomHorizontalFlip(),\n transforms.RandomVerticalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])\n self.image_transforms = image_transforms\n self.feature_builder = feature_builder\n self.inf2value = inf2value\n\n def __len__(self):\n return len(self.files_paths)\n\n def __getitem__(self, idx):\n img_path, feats_paths = self.files_paths[idx]\n image_arr = np.load(img_path)\n features_arr = np.load(feats_paths)\n features_arr = features_arr[0] if len(features_arr.shape) > 1 else features_arr\n\n if len(features_arr) > len(self.feature_builder.features_names): # read more from cache (both norm and not)\n features_arr = features_arr[self.feature_builder.relevant_feature_idxs]\n\n features_arr[np.isnan(features_arr)] = self.inf2value\n\n image_torch = self.image_transforms(image_arr.swapaxes(0, 2)) # make it (X, X, 3)\n features_torch = torch.tensor(features_arr).float()\n\n return image_torch, features_torch\n\n\nclass SingleTileFeaturesDataset(TileFeaturesDataset):\n def __init__(self, root_dir, feature_builder: FeaturesBuilder, feature_index: int = None, image_transforms=None):\n \"\"\"\n Args:\n feature_builder:\n root_dir (string): Directory with all the images.\n feature_index: the index of the feature to be used\n image_transforms (callable, optional): Optional transform to be applied\n on a sample.\n \"\"\"\n super().__init__(root_dir, feature_builder, image_transforms)\n self.feature_index = feature_index\n\n def __getitem__(self, idx):\n image_torch, features_torch = super().__getitem__(idx)\n return image_torch, features_torch[self.feature_index:self.feature_index + 1]\n","sub_path":"geo_embedding/models/data_loading/tile_features_loader.py","file_name":"tile_features_loader.py","file_ext":"py","file_size_in_byte":3517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"515869880","text":"from django.utils.translation import ugettext_lazy as _\n\nimport horizon\n\n\nclass TelemetryPanelGroup(horizon.PanelGroup):\n slug = \"telemetry_panel_group\"\n name = _(\"Telemetry Panel\")\n panels = ('graphs', 'recommendations', 'alarms', 'availability', 'benchmark', )\n\n\nclass MessagesPanelGroup(horizon.PanelGroup):\n slug = \"messages_panel_group\"\n name = _(\"Notification Board\")\n panels = ('messages', 'sent_messages', )\n\n\nclass InboxMessagesPanelGroup(horizon.PanelGroup):\n slug = \"inbox_panel_group\" \n name = _(\"Notification Board - Inbox\")\n panels = ('messages_user', )\n \n\nclass TelemetryDashboard(horizon.Dashboard):\n name = _(\"Telemetry\")\n slug = \"telemetry_dashboard\"\n panels = (TelemetryPanelGroup, MessagesPanelGroup, InboxMessagesPanelGroup, )\n default_panel = 'graphs'\n\n\nhorizon.register(TelemetryDashboard)\n","sub_path":"telemetry_dashboard/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"500390890","text":"#!/bin/python\n\nimport json\nimport os\n\ndef create_chip_file(chip_obj, file_name):\n \"\"\"\n returns nothing.\n \n parameters:\n chip_obj (Chip): chip that contains only and/not gates,\n file_name (str): the name of the file to be created\n \"\"\"\n \n if os.path.exists(file_name):\n print(\"Please use a filepath that does not exist to export your chip save to.\")\n quit()\n\n print(\n \"\"\"\n Instructions:\n If numbers are asked for, input them as numbers, not words.\n If it asks for a number, give a decimal number (e.g. 0-255 in the case of RGB), not a hexadecimal number (e.g. 00-FF) for RGB.\n \"\"\"\n )\n\n try:\n create_chip = open(file_name, \"w\")\n try:\n json.dump({\n \"name\": (chip_obj.chipName),\n \"creationIndex\": 0,\n \"colour\": {\n \"r\": float(input(\"The R part of the RGB: \"))+1/256,\n \"g\": float(input(\"The G part of the RGB: \"))+1/256,\n \"b\": float(input(\"The B part of the RGB: \"))+1/256,\n \"a\": 1.0\n },\n \"nameColour\": {\n \"r\": 1.0,\n \"g\": 1.0,\n \"b\": 1.0,\n \"a\": 1.0\n },\n \"componentNameList\": chip_obj.chipData['chipComponents'],\n \"savedComponentChips\": chip_obj.chipData['componentData']\n }, create_chip, indent = 4)\n except:\n print(f\"Could not dump chip! Printable version: {repr(chip_obj)}. This is most likely an internal error. Go to [NOT YET MERGED, ISSUES TAB WILL NOT EXIST] and submit a Issue in the Issues tab.\") # If you do fork this, just put the link https://github.com/sk8terkyd/digital-logic-sim-chip-compiler/issues there.\n quit(1)\n except:\n print(f\"Could not create/open output file! Requested file: {file_name}. Be sure to include the file extension. This is most likely an internal error. Go to [NOT YET MERGED, ISSUES TAB WILL NOT EXIST] and submit a Issue in the Issues tab.\") # If you do fork this, just put the link https://github.com/sk8terkyd/digital-logic-sim-chip-compiler/issues there.\")\n quit(1)\n","sub_path":"create_file.py","file_name":"create_file.py","file_ext":"py","file_size_in_byte":2277,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"607777882","text":"from flask.ext.restful import Resource, abort, marshal_with, marshal\r\nfrom app import rest_api\r\nfrom flask import request\r\nfrom flask.ext.login import current_user\r\nfrom .fields import *\r\nfrom .exceptions import *\r\nfrom .services import *\r\nimport logging\r\n\r\nlog = logging.getLogger(__name__)\r\n\r\n\r\nclass WorkPermitsResource(Resource):\r\n \"\"\"\r\n Resource for getting all Work Permits\r\n \"\"\"\r\n\r\n @marshal_with(work_permit_fields)\r\n def get(self):\r\n \"\"\" GET /api/work_permits \"\"\"\r\n return get_work_permits()\r\n\r\n def post(self):\r\n \"\"\" POST /api/work_permits \"\"\"\r\n form_data = request.json\r\n log.debug('New Work Permit application request: {0}'.format(form_data))\r\n work_permit = new_work_permit_request(form_data)\r\n result = dict(status=200, message='OK', work_permit=work_permit)\r\n return marshal(result, work_permit_create_fields)\r\n\r\n\r\nclass WorkPermitDetailsResource(Resource):\r\n \"\"\"\r\n Resource for getting Work Permit Details\r\n \"\"\"\r\n\r\n @marshal_with(work_permit_complete_fields)\r\n def get(self, work_permit_id):\r\n \"\"\" GET /api/work_permits/ \"\"\"\r\n try:\r\n return get_work_permit_details(work_permit_id)\r\n except WorkPermitNotFoundError as err:\r\n abort(404, message=err.message)\r\n\r\n def put(self, work_permit_id):\r\n \"\"\" PUT /api/work_permits/ \"\"\"\r\n form_data = request.json\r\n log.debug('Update Work Permit id={0} , {1}'.format(work_permit_id, form_data))\r\n try:\r\n work_permit = update_work_permit(work_permit_id, form_data)\r\n result = dict(status=200, message='OK', work_permit=work_permit)\r\n return marshal(result, work_permit_create_fields)\r\n except WorkPermitNotFoundError as err:\r\n abort(404, message=err.message)\r\n\r\n def delete(self, work_permit_id):\r\n try:\r\n delete_work_permit(work_permit_id)\r\n result = dict(status=200, message=\"OK\")\r\n return marshal(result, success_fields)\r\n except WorkPermitNotFoundError as err:\r\n abort(404, message=err.message)\r\n\r\n\r\nclass WorkPermitStepsResource(Resource):\r\n \"\"\"\r\n Resource for getting Work Permit Steps\r\n \"\"\"\r\n\r\n def delete(self, work_permit_id):\r\n step_id = request.args.get('id')\r\n log.debug('Delete Work Step id={0} , step id={1}'.format(work_permit_id, step_id))\r\n try:\r\n delete_work_step(work_permit_id, step_id)\r\n result = dict(status=200, message=\"OK\")\r\n return marshal(result, success_fields)\r\n except WorkPermitNotFoundError as err:\r\n abort(404, message=err.message)\r\n\r\n\r\nclass WorkPermitHazardsResource(Resource):\r\n \"\"\"\r\n Resource for getting Work Permit hazards\r\n \"\"\"\r\n\r\n def delete(self, work_permit_id):\r\n haz_id = request.args.get('id')\r\n log.debug('Delete Work Hazard id={0} , work hazard id={1}'.format(work_permit_id, haz_id))\r\n try:\r\n delete_work_hazard(work_permit_id, haz_id)\r\n result = dict(status=200, message=\"OK\")\r\n return marshal(result, success_fields)\r\n except WorkPermitNotFoundError as err:\r\n abort(404, message=err.message)\r\n\r\n\r\nrest_api.add_resource(WorkPermitsResource, '/api/work_permits')\r\nrest_api.add_resource(WorkPermitDetailsResource, '/api/work_permits/')\r\nrest_api.add_resource(WorkPermitStepsResource, '/api/work_permits//steps')\r\nrest_api.add_resource(WorkPermitHazardsResource, '/api/work_permits//hazards')\r\n","sub_path":"app/maintenance_and_permits_module/resources.py","file_name":"resources.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"459652081","text":"import tensorflow as tf\n\n# Global parameters\nDATA_FILE = \"boston_housing.csv\"\nBATCH_SIZE = 10 # batch means 一批;一次所制之量,分批处理; 批量,批处理\nNUM_FEATURES = 14\n\n\n# ----------------------------- data preprocess ---------------------------------\ndef data_generator(filename):\n \"\"\"\n\n generates tensor in batches of size BATCH_SIZE.\n args: string tensor\n\n :param filename: from which data is to be read\n :return: tensors feature_batch and label_batch\n \"\"\"\n f_queue = tf.train.string_input_producer(filename)\n reader = tf.TextLineReader(skip_header_lines=1) # skip the first\n _, value = reader.read(f_queue)\n\n record_defaults = [[0.0] for _ in range(NUM_FEATURES)]\n\n data = tf.decode_csv(value, record_defaults=record_defaults)\n features = tf.stack(tf.gather_nd(data, [[5], [10], [12]])) # choose feature RM, PTRATIO, LSTAT\n label = data[-1]\n\n # minimum number elements in the queue after a\n min_after_dequeue = 10 * BATCH_SIZE\n\n # the maximum number of elements in the queue\n capacity = 20 * BATCH_SIZE\n\n # shuffle the data to generate BATCH_SIZE sample pairs\n feature_batch, label_batch = tf.train.shuffle_batch([features, label], batch_size=BATCH_SIZE, capacity=capacity,\n min_after_dequeue=min_after_dequeue)\n\n return feature_batch, label_batch\n\n\ndef generate_data(feature_batch, label_batch):\n \"\"\"\n\n :param feature_batch:\n :param label_batch:\n :return:\n \"\"\"\n with tf.Session() as sess:\n # initialize the queue threads # Coordinate:坐标;同等的人或物;并列的,同等的\n coord = tf.train.Coordinator() # Coordinator means 协调器,协调员\n threads = tf.train.start_queue_runners(coord=coord)\n for _ in range(5): # generate 5 batches\n features, labels = sess.run([feature_batch, label_batch])\n print(features, 'HI')\n\n coord.request_stop()\n coord.join(threads)\n\n# -------------------------------- end of data preprocess ------------------------------------\n\n\nif __name__ == '__main__':\n _feature_batch, _label_batch = data_generator([DATA_FILE])\n generate_data(_feature_batch, _label_batch)\n\n \"\"\"\n there are 16 lines of data's MEDV equal to 50.0, and they are useless, \n so we can use some option to delete them:\n \n condition = tf.equal(data[13], tf.constant(50.0))\n data = tf.where(condition, tf.zeros(NUM_FEATURES), data[:]) # to change the data into zeros\n \"\"\"\n","sub_path":"tf_learning_boston.py","file_name":"tf_learning_boston.py","file_ext":"py","file_size_in_byte":2558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"185621050","text":"import ray\nimport sys\nimport time\nimport torch\nimport numpy as np\n\nsys.path.insert(0, \"../malib\")\nfrom malib.utils import Logger, default_league_config, default_learner_config\nfrom malib.utils import regist, register_init\nfrom malib.learn import League, Learner\nimport example\nfrom example.smac_example.agent.scplayer import SCPlayer\nfrom example.smac_example.env.smac_env import SmacEnv\nfrom example.smac_example.env.smac_env import (\n SmacEnv,\n SmacWrapper,\n SmacObsWrapper,\n SmacActionWrapper,\n)\n\nfrom malib.utils import regist, register_init\nfrom malib.utils import default_league_config, default_learner_config\nfrom malib.game import Game\nfrom malib.action import ProbSampleAction, RandomAction, GreedyAction\nfrom malib.feature import DictFeature\n\nif torch.cuda.is_available():\n ray.init(address=\"auto\")\nelse:\n ray.init()\n\nregister_handle = register_init()\nregist(\"smacenv\", SmacEnv)\nregist(\"smacwrapper\", SmacWrapper)\nregist(\"smacobswrapper\", SmacObsWrapper)\nregist(\"smacactionwrapper\", SmacActionWrapper)\nregist(\"prob_action\", ProbSampleAction)\nregist(\"random_action\", RandomAction)\nregist(\"greedy_action\", GreedyAction)\nregist(\"dict_feature\", DictFeature)\nregist(\"scplayer\", SCPlayer)\n\nenv_config = dict(\n config_name=\"env_config\",\n smac_env=dict(\n raw_env=\"smacenv\",\n wrapper=[\"smacwrapper\", \"smacobswrapper\", \"smacactionwrapper\"],\n env_params=dict(map_name=\"8m\"),\n ),\n)\ndata_config = dict(\n config_name=\"data_config\",\n data_to_save=dict(\n player_data=[\"feature\", \"obs\", \"model_out\", \"action\"],\n other_data=[\"game_data\", \"reward\"],\n ),\n train_data_num=256,\n tra_len=1,\n batch_size=128,\n data_async=False,\n data_capacity=2000,\n data_sample_mode=\"USWR\",\n)\neval_config = dict(\n config_name=\"eval_config\",\n eval_game_number=5,\n total_episode_number=10,\n ray_mode=\"sync\",\n eval_mode=\"env\", # env: 单个player在env中测试, dynamic:挑选对手,opponent_id:指定对手\n env_name=\"smac_env\",\n players=[\"p0\"],\n evaluator_num=2,\n)\nif torch.cuda.is_available():\n game_number = 5\nelse:\n game_number = 2\n\nlearner_config = dict(\n config_name=\"learner_config\",\n game_number=game_number,\n env_name=\"smac_env\",\n player_id=\"p0\",\n local_data=True,\n learn_model_id=[\"m0\"],\n ray_mode=\"sync\", # sync: ray.get, async: ray.wait\n)\n\n\ntrainer_config = dict(\n config_name=\"trainer_config\",\n use_gpu=torch.cuda.is_available(),\n gpu_num=1,\n trainer_mode=\"local\",\n m0=dict(\n trainer_number=1,\n trainer_name=\"trainer:DQNTrainer\",\n lr=0.0001,\n target_model_update_iter=30,\n EPSILON=0.9,\n GAMMA=0.9,\n # training_procedure= train_on_batch,\n ),\n)\nplayer_config = dict(\n config_name=\"player_config\",\n players=[\"p0\"],\n p0=dict(\n # player_name=\"agent:MAPlayer\",\n player_name=\"scplayer\",\n agents=[\"a0\", \"a1\", \"a2\", \"a3\", \"a4\", \"a5\", \"a6\", \"a7\"],\n action_config=dict(\n action_name=\"greedy_action\",\n epsilon=1.0,\n episode_count=10000000,\n ),\n feature_config=\"dict_feature\",\n ),\n agent_config=dict(\n a0=dict(agent=\"agent:BaseAgent\", model_id=\"m0\"),\n a1=dict(agent=\"agent:BaseAgent\", model_id=\"m0\"),\n a2=dict(agent=\"agent:BaseAgent\", model_id=\"m0\"),\n a3=dict(agent=\"agent:BaseAgent\", model_id=\"m0\"),\n a4=dict(agent=\"agent:BaseAgent\", model_id=\"m0\"),\n a5=dict(agent=\"agent:BaseAgent\", model_id=\"m0\"),\n a6=dict(agent=\"agent:BaseAgent\", model_id=\"m0\"),\n a7=dict(agent=\"agent:BaseAgent\", model_id=\"m0\"),\n ),\n model_config=dict(\n m0=dict(\n model_name=\"model:MLP\",\n model_params=dict(\n in_dim=(80),\n out_dim=(14),\n hidden_dim=512,\n ),\n ),\n ),\n)\n\nleague_config_dict = dict(\n config_name=\"league_config\",\n eval_players=[\"p0\"],\n eval_auto=True,\n auto_save=False,\n standings_mode=[\n \"reward\",\n \"winrate\",\n ], # reward:compute the reward,score:compute the score, winrate:compute the winrate\n env_name=\"cartpole_v0\",\n workdir=\"logs/league\",\n)\n\nconfig = default_learner_config()\nconfig.update(learner_config)\nconfig.update(player_config)\nconfig.update(data_config)\nconfig.update(env_config)\nconfig.update(trainer_config)\nconfig.update(eval_config)\n\nleague_config = default_league_config()\nleague_config.update(eval_config)\nleague_config.update(league_config_dict)\nleague_config.update(player_config)\nleague_config.update(env_config)\n\nconfig.save()\nleague_config.save()\n\n\nlogger = Logger(config.log_config)\nlogger.config_info([config, league_config])\n\n\nclass MyLearner(Learner):\n def __init__(self, config, register_handle):\n super(MyLearner, self).__init__(config=config, register_handle=register_handle)\n self.build_games()\n self.build_trainers()\n self.init_games_weights()\n # self.start_data_thread()\n\n def learning_procedure(self, learner=None):\n t0 = time.time()\n data = self.ask_for_data(min_episode_count=3)\n t1 = time.time()\n result = self.learn_on_data(data)\n t2 = time.time()\n\n self.sync_weights(result)\n t3 = time.time()\n game_last_info = self.get_game_info()\n game_reward = [g_data[\"info\"][\"episode_reward\"] for g_data in game_last_info]\n mean_reward1 = np.mean(game_reward)\n self.logger.add_scalar(\"p0/reward\", mean_reward1, self.learn_step_number)\n t4 = time.time()\n\n logger.info(\n [\n \"learner step number:{},train reward:{}\".format(\n self.learn_step_number, mean_reward1\n )\n ]\n )\n\n return result\n\n\nif __name__ == \"__main__\":\n print('start ----------------------------------------------')\n league_cls = League.as_remote().remote\n league = league_cls(league_config, register_handle=register_handle)\n\n learner = MyLearner(config, register_handle=register_handle)\n for i in range(5000):\n learner.step()\n\n if i % 3 == 0:\n p = learner.get_training_player()\n league.add_player.remote(p)\n time.sleep(100000)\n","sub_path":"FTG/malib/example/smac_example/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":6247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"550990822","text":"# -*- coding: utf-8 -*-\nimport os\nimport dj_database_url\nfrom .base import *\n\nDEBUG = False\nSTATIC_ROOT = os.path.join(BASE_DIR, \"staticfiles\")\nSTATICFILES_DIRS = (\n os.path.join(BASE_DIR, 'static'),\n)\nDATABASES['default'] = dj_database_url.config()\nSECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')\nALLOWED_HOSTS = ['*']\n","sub_path":"Credit/settings/production.py","file_name":"production.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"508238077","text":"from assignment3.task import task\nfrom assignment3.discretization_task import discretization_task\nimport numpy as np\nfrom hmmlearn import hmm\n\n\nclass profiling_task(task):\n dataframe = None # The preproccessed dataset\n infected_hosts = None # IP of all infected hosts minus the chosen infected host (147.32.84.165)\n normal_hosts = None # IP of all normal hosts\n\n def __init__(self, dataframe, infected_hosts, normal_hosts):\n self.dataframe = dataframe\n self.infected_hosts = infected_hosts\n self.normal_hosts = normal_hosts\n\n # Returns array with sliding windows of size window_size\n def sliding_windows(self, ip, window_size):\n new_data = []\n # Obtain sequence data from the netflows from the given ip\n data = self.dataframe[(self.dataframe['src_ip'] == ip) | (self.dataframe['dst_ip'] == ip)]\n data = data['encoding'].tolist()\n\n if len(data) < window_size:\n return new_data\n\n for i in range(len(data) - window_size):\n new_data.append(data[i:i + window_size])\n new_data = np.array(new_data)\n\n return new_data\n\n # Returns the log probability of all hosts\n def hmm_model(self, data):\n # Learn hmm from the data of infected host 147.32.84.165\n model = hmm.GaussianHMM(n_components=4)\n model.fit(data)\n # Save the log probability\n logprob_infected = model.score(data)\n\n # Get log probability of the other infected and normal hosts,\n # using the model learned from the data from the chosen infected host\n logprob_others = []\n\n # Get log probability of the other infected hosts\n for infected in self.infected_hosts:\n new_data = self.sliding_windows(infected, 10)\n if len(new_data) == 0:\n logprob_others.append((infected, 0))\n else:\n logprob_others.append((infected, model.score(new_data)))\n # Get log probability of the normal hosts\n for normal in self.normal_hosts:\n new_data = self.sliding_windows(normal, 10)\n if len(new_data) == 0:\n logprob_others.append((normal, 0))\n else:\n logprob_others.append((normal, model.score(new_data)))\n\n # Prints the log probability of the infested host and all other hosts\n # This is used to determine the threshold\n print(\"logprob_infected: \", logprob_infected)\n print(\"logprob_others: \", logprob_others)\n\n return logprob_infected, logprob_others\n\n # Returns a list with ips which are classified as infected and another list,\n # with ips which are classified as normal\n def classification(self, logprob_infected, logprob_others):\n classified_infected = []\n classified_normal = []\n\n for tup in logprob_others:\n ip, logprob = tup\n # Check whether the difference is below the threshold logprob_infected/2?\n # Classify accordingly\n if abs(logprob - logprob_infected) < (logprob_infected / 2):\n classified_infected.append(ip)\n else:\n classified_normal.append(ip)\n\n return classified_infected, classified_normal\n\n # Compute true negatives, true positives, false negatives, true positives,\n # and precision and recall\n def evaluation(self, classified_infected, classified_normal):\n tn = 0\n fp = 0\n fn = 0\n tp = 0\n\n for ip in classified_infected:\n if ip in self.infected_hosts:\n tp = tp + 1\n else:\n fp = fp + 1\n\n for ip in classified_normal:\n if ip in self.normal_hosts:\n tn = tn + 1\n else:\n fn = fn + 1\n\n print(\"tp: \", tp)\n print(\"tn: \", tn)\n print(\"fp: \", fp)\n print(\"fn: \", fn)\n print(\"precision: \", tp / (tp + fp))\n print(\"recall: \", tp / (tp + fn))\n\n @staticmethod\n def run_task(preprocessing=False):\n if preprocessing:\n if (preprocessing):\n print(\"Preprocessing. Wait til it says it is done.\")\n profiling_task.preprocess(input=\"datasets/capture20110818.pcap.netflow.labeled\",\n output=\"datasets/preprocessed_task_3_4.csv\",\n list_of_ips=[\"147.32.84.205\", \"147.32.84.170\", \"147.32.84.134\", \"147.32.84.164\",\n \"147.32.87.36\", \"147.32.80.9\", \"147.32.87.11\"], task_name=\"profiling\")\n print(\"Done.\")\n\n discretization = discretization_task(\"datasets/preprocessed_task_3_4.csv\",\n bins=3,\n protocol=True,\n packets=True,\n duration=False,\n bytes=False)\n discretization.add_netflow_encoding_column()\n\n profiling = profiling_task(discretization.df, [\"147.32.84.191\", \"147.32.84.192\", \"147.32.84.193\",\n \"147.32.84.204\", \"147.32.84.205\", \"147.32.84.206\",\n \"147.32.84.207\", \"147.32.84.208\", \"147.32.84.209\"],\n [\"147.32.84.170\", \"147.32.84.134\", \"147.32.84.164\",\n \"147.32.87.36\", \"147.32.80.9\", \"147.32.87.11\"])\n data = profiling.sliding_windows(\"147.32.84.165\", 10)\n logprob_infected, logprob_others = profiling.hmm_model(data)\n classified_infected, classified_normal = profiling.classification(logprob_infected, logprob_others)\n profiling.evaluation(classified_infected, classified_normal)\n\n\nif __name__ == \"__main__\":\n # Set 'preprocessing' to True if you want to create the dataset, set to False to use the provided dataset.\n profiling_task.run_task(preprocessing=False)\n\n#","sub_path":"assignment3/profiling_task.py","file_name":"profiling_task.py","file_ext":"py","file_size_in_byte":6003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"170053770","text":"import os\nimport sys\nsys.path.append(os.path.join(os.path.dirname(__file__), '../../'))\nimport itertools\nimport torch\nimport numpy as np\n#import torch.multiprocessing as mp\nimport multiprocessing as mp\n\nimport pommerman\nfrom pommerman.agents import nn_agent\nfrom pommerman import constants\nfrom pommerman.agents import input_util2\nfrom pommerman import utility\nimport random\nimport copy\n\nimport psutil\nimport pickle \nfrom collections import deque\n\nfrom pommerman.agents import worker\nfrom pommerman.agents import optimize_nn\nfrom pommerman.agents import game_buffer\n\ndef main(params):\n \"\"\"\n #The training loop\n #1. play a set of games using multiple worker\n #2. load games, optimize nn\n #3. go back to step 1 \n \"\"\"\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('Device:', device)\n mp.set_start_method('spawn')\n learning_rate=params['learning_rate']\n\n print('algo: %s'%(params['objective']), 'objective:', params['objective'])\n print('games dir:', params['game_dir'], 'nn model save at:', params['nn_model_dir'])\n loop_step=0\n nn_model_dir=params['nn_model_dir']\n\n input_shape=params['input_shape']\n n_actions=params['n_actions']\n n_filters_per_layer=params['n_filters_per_layer']\n n_cnn_layers=params['n_cnn_layers']\n nn_model=worker.load_newest_nn(nn_model_dir, input_shape, n_actions, n_filters_per_layer, n_cnn_layers, 0, params['nn_type'])\n optimizer=torch.optim.Adam(nn_model.parameters(), lr=params['learning_rate'], eps=1e-5) if params['optimizer'] == 'adam' else torch.optim.SGD(nn_model.parameters(), lr=learning_rate, momentum=0.9)\n while loop_step < params['max_loop_step']:\n pool=mp.Pool(processes=params['n_workers'])\n process = psutil.Process(os.getpid())\n print('Main RSS(GB):', process.memory_info().rss/(10**9))\n print('playing games')\n results=[]\n gpu_count=torch.cuda.device_count()\n \n gameBuffer=game_buffer.GameBuffer(capacity=params['buffer_size'])\n #assign workers to each GPU evenly\n for i in range(params['n_workers']):\n k=i%gpu_count\n #if k not in assign: assign[k]=0\n #else: assign[k] +=1\n #device_string=\"cuda:\"+repr(k) if assign[k]<=3 else 'cpu'\n device_id=k\n #device_string='cpu'\n params2=dict(params)\n print('worker %d on device_id %d'%(i,device_id))\n params2['device_id']=device_id\n results.append(pool.apply_async(worker.produce_games, [params2, False]))\n\n for i in range(params['n_workers']):\n game_list=results[i].get()\n for g in game_list:\n gameBuffer.push(g)\n del game_list\n print('All workers finished!')\n print('optimize nn')\n optimize_nn.optimize_nn(params, loop_step, nn_model, optimizer,gameBuffer)\n del gameBuffer\n loop_step +=1\n pool.close()\n pool.join()\n del pool\n\nif __name__ == \"__main__\":\n import argparse\n parser=argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--n_workers', type=int, default=2, help='number of worker threads')\n parser.add_argument('--n_games_per_worker', type=int, default=10, help='number of games per work') \n parser.add_argument('--batch_seq_size', type=int, default=4, help='mini-batch size games per training sample')\n parser.add_argument('--buffer_size', type=int, default=20, help='number of games in the buffer')\n parser.add_argument('--env_id', type=str, default='PommeTeamCompetition-v0', help=\"environment id string. Possible values: [PommeFFACompetition-v0, PommeFFACompetitionFast-v0, PommeFFAFast-v0, PommeFFA-v1, PommeRadio-v2, PommeTeamCompetition-v0, PommeTeam-v0, PommeTeamFast-v0] \")\n parser.add_argument('--objective', type=str, default='ppo', help='should be one of {maximum_likelihood, a2c, ppo}')\n parser.add_argument('--max_loop_step', type=int, default=10, help='maximum training steps')\n parser.add_argument('--game_dir', type=str, default=\"GAMES\", help='log dir')\n parser.add_argument('--nn_model_dir', type=str, default=\"NN_MODELS\", help='log dir')\n\n parser.add_argument('--opponent', type=str, default='static', help='must be one of {static, random, smart_random, smart_random_no_bomb}')\n\n parser.add_argument('--chunk_size', type=int, default=12, help='chunk size when split a long seq into chunks')\n\n parser.add_argument('--n_epochs', type=int, default=1, help='how many epochs to train given a set of games in game_memory')\n parser.add_argument('--optimizer', type=str, default='adam', help='which optimizer to use {momentum, adam}')\n parser.add_argument('--gae_discount', type=float, default=0.998, help='reward discount rate')\n parser.add_argument('--gae_lambda', type=float, default=0.95, help='lambda for Generalized Advantage Estimate (weighting factor, 0 then one step TD, 1 T-step)')\n parser.add_argument('--gae_n_step', type=int, default=2560, help='for Generalized Advantage Estimate, how many steps lookahead')\n parser.add_argument('--entropy_loss_coeff', type=float, default=0.01, help='entropy loss weight')\n parser.add_argument('--policy_loss_coeff', type=float, default=1.0, help='entropy loss weight')\n parser.add_argument('--value_loss_coeff', type=float, default=0.5, help='entropy loss weight')\n \n parser.add_argument('--learning_rate', type=float, default=0.0003, help='learning rate')\n parser.add_argument('--input_shape', type=tuple, default=(14,11,11), help='input shape for neuralnet')\n parser.add_argument('--n_actions', type=int, default=6, help='there are 6 actions for pommerman')\n parser.add_argument('--n_cnn_layers', type=int, default=4, help='how deep is the CNN')\n parser.add_argument('--n_filters_per_layer', type=int, default=64, help='how many 3x3 filters per layer')\n parser.add_argument('--device_id', type=int, default=None, help='on which device, cup or cuda:0 or cuda:1 or ..')\n parser.add_argument('--random_start', type=bool, default=False, help='random start state')\n parser.add_argument('--nn_type', type=str, default='CNN', help='CNN or CNN_LSTM')\n\n args=parser.parse_args()\n params=vars(args)\n main(params)\n","sub_path":"pommerman/agents/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"31933216","text":"class Solution(object):\n def calculate(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n from operator import mul, div, add, sub\n m = {'+': add, '-': sub, '*': mul, '/': lambda x, y: x/y if x*y >= 0 else -(abs(x)/abs(y))}\n nums, ops = [], []\n num, op = '0', '+'\n for ch in s+'+':\n if ch.isdigit():\n num += ch\n elif ch != ' ':\n if op in '+-':\n nums.append(m[op](0, int(num)))\n else:\n nums[-1] = m[op](nums[-1], int(num))\n num = '0'\n op = ch\n return sum(nums)\n","sub_path":"Basic Calculator II.py","file_name":"Basic Calculator II.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"256158798","text":"#日付と日時を取得するライブラリ(標準機能)\nimport datetime as dt\n\ndef function(thisyear, today, oprn):\n newyear = dt.datetime(thisyear, 1, 1)\n print('---------------------------------------------')\n if oprn==1:\n #timedeltaはdatetimeライブラリのうるう年を考慮した便利な奴\n print(today + dt.timedelta(days=7))\n \n elif oprn==2:\n print(today + dt.timedelta(days=30))\n\n elif oprn==3:\n print(today + dt.timedelta(days=365))\n\n elif oprn==4:\n calc = today - newyear + dt.timedelta(days=1)\n #.をつけて抜き取りたい要素を書くと抜き取れる\n print(calc.days)\n \n else:\n print('無効な識別コードです。')\n\n######################################################\n#実行した時点の日時を取得\ntoday = dt.datetime.today()\nprint('=============================================')\nprint('実行年月日時分秒:',today)\nprint('=============================================')\n\n#todayから年の部分を抜き取り\nthisyear = today.year\n\nprint('''知りたい項目の識別コードを入力\n 1週間後の日付: 1\n 30日後の日付: 2\n 1年後(365日後)の日付: 3\n''',str(thisyear) + '年1月1日から何日目か: 4')\noprn = input('>>')\noprn = int(oprn)\n\n#ユーザ関数functionに値を投げ込み\n#ちなみにclassの中のdefはメソッドという\n#ユーザ関数に出力\nfunction(thisyear, today, oprn)","sub_path":"various_day_func.py","file_name":"various_day_func.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"596539105","text":"#\n# [420] Strong Password Checker\n#\n# https://leetcode.com/problems/strong-password-checker/description/\n#\n# algorithms\n# Hard (19.17%)\n# Total Accepted: 5K\n# Total Submissions: 26K\n# Testcase Example: '\"\"'\n#\n# A password is considered strong if below conditions are all met:\n#\n#\n# ⁠It has at least 6 characters and at most 20 characters.\n# ⁠It must contain at least one lowercase letter, at least one uppercase\n# letter, and at least one digit.\n# ⁠It must NOT contain three repeating characters in a row (\"...aaa...\" is\n# weak, but \"...aa...a...\" is strong, assuming other conditions are met).\n#\n#\n# Write a function strongPasswordChecker(s), that takes a string s as input,\n# and return the MINIMUM change required to make s a strong password. If s is\n# already strong, return 0.\n#\n# Insertion, deletion or replace of any one character are all considered as one\n# change.\n#\n\n\n# REVIEW:\n# - len < 6\n# - 6 <= len <= 20\n# - len > 20\n\n\nclass Solution:\n def strongPasswordChecker(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n\n missing_type_count = 0\n if not any('a' <= c <= 'z' for c in s):\n missing_type_count += 1\n if not any('A' <= c <= 'Z' for c in s):\n missing_type_count += 1\n if not any(c.isdigit() for c in s):\n missing_type_count += 1\n\n repeat_insert = 0\n remain_zero = 0\n remain_one = 0\n\n idx = 2\n while idx < len(s):\n if s[idx] == s[idx - 1] == s[idx - 2]:\n length = 2\n while idx < len(s) and s[idx] == s[idx - 1]:\n length += 1\n idx += 1\n\n repeat_insert += length // 3\n if length % 3 == 0:\n remain_zero += 1\n elif length % 3 == 1:\n remain_one += 1\n else:\n idx += 1\n\n if len(s) < 6:\n return max(missing_type_count, 6 - len(s))\n elif len(s) <= 20:\n return max(missing_type_count, repeat_insert)\n else:\n delete = len(s) - 20\n\n repeat_insert -= min(delete, remain_zero)\n repeat_insert -= min(max(delete - remain_zero, 0), remain_one * 2) // 2\n repeat_insert -= max(delete - remain_zero - 2 * remain_one, 0) // 3\n\n return delete + max(missing_type_count, repeat_insert)\n","sub_path":"src/420.strong-password-checker.python3.py","file_name":"420.strong-password-checker.python3.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"195804622","text":"salario = float(input('Qual seu salário? '))\r\n\r\ndef main():\r\n vp = salario * tabela_atual() / 100\r\n vc = salario * tabela_corrigida() / 100\r\n\r\n print(f'Valor pago: R${vp} \\nValor que deveria ser pago com a tabela corrigida: R${vc}')\r\n\r\n\r\ndef tabela_atual():\r\n if salario <= 1903.98:\r\n return 0\r\n elif salario <= 2826.65:\r\n return 7.5\r\n elif salario <= 3751.05:\r\n return 15\r\n elif salario <= 4664.68:\r\n return 22.5\r\n else:\r\n return 27.5\r\n\r\n\r\ndef tabela_corrigida():\r\n if salario <= 3881.65:\r\n return 0\r\n elif salario <= 5714.11:\r\n return 7.5\r\n elif salario <= 7654.67:\r\n return 15\r\n elif salario <= 9564.42:\r\n return 22.5\r\n else:\r\n return 27.5\r\n\r\n\r\nmain()","sub_path":"desafio/impostoderenda.py","file_name":"impostoderenda.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91433487","text":"import sys\nimport os\nimport mujoco_py as mujoco\n\nDIRNAME = os.path.dirname(__file__)\nsys.path.append(os.path.join(DIRNAME, '..'))\n\n\nfrom src.constrainedChasingEscapingEnv.envMujoco import IsTerminal, TransitionFunction\nfrom src.constrainedChasingEscapingEnv.state import GetAgentPosFromState\n\n\ndef main():\n # transition function\n dirName = os.path.dirname(__file__)\n physicsDynamicsPath = os.path.join(dirName, '..', 'env', 'xmls', 'twoAgents.xml')\n agentsBodyMassIndex = [6, 7]\n physicsSmallMassModel = mujoco.load_model_from_path(physicsDynamicsPath)\n physicsSmallMassModel.body_mass[agentsBodyMassIndex] = [4, 5] \n physicsLargeMassModel = mujoco.load_model_from_path(physicsDynamicsPath)\n physicsLargeMassModel.body_mass[agentsBodyMassIndex] = [8, 10] \n physicsSmallMassSimulation = mujoco.MjSim(physicsSmallMassModel)\n physicsLargeMassSimulation = mujoco.MjSim(physicsLargeMassModel)\n #set_constants fit for mujoco_py version >= 2.0, no fit for 1.50 \n physicsSmallMassSimulation.set_constants()\n physicsLargeMassSimulation.set_constants()\n\n sheepId = 0\n wolfId = 1\n xPosIndex = [2, 3]\n getSheepXPos = GetAgentPosFromState(sheepId, xPosIndex)\n getWolfXPos = GetAgentPosFromState(wolfId, xPosIndex)\n killzoneRadius = 2\n isTerminal = IsTerminal(killzoneRadius, getSheepXPos, getWolfXPos)\n \n numSimulationFrames = 20\n transitSmallMassAgents = TransitionFunction(physicsSmallMassSimulation, isTerminal, numSimulationFrames)\n transitLargeMassAgents = TransitionFunction(physicsLargeMassSimulation, isTerminal, numSimulationFrames)\n\nif __name__ == '__main__':\n main()\n","sub_path":"exec/exampleModifyPhysicsBodyMass.py","file_name":"exampleModifyPhysicsBodyMass.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"561101530","text":"\"\"\"Make intron regions using transcript - everything but gene\n (CDS, exon, utrs, start/stop codons)\n\"\"\"\nimport argparse\nfrom interval import Interval, IntervalSet\n\ndef getIntrons(transcriptLs, otherLs):\n ls = []\n for anInterval in IntervalSet(transcriptLs) - IntervalSet(otherLs):\n ls.append( [str(anInterval.lower_bound),\n str(anInterval.upper_bound)] )\n return ls\n\ndef write_gene(fout, foutLen, sp, current_gene, geneType,\n transcriptLs, otherLs, transcriptInfoLine):\n for r in IntervalSet(transcriptLs):\n print(r)\n if current_gene:\n ls0 = [sp[0], geneType, 'intron']\n intron_length = 0\n for intron in getIntrons(transcriptLs, otherLs):\n intron_length += int(intron[1]) - int(intron[0]) + 1\n ls = ls0 + intron + transcriptInfoLine\n print >> fout, '\\t'.join(ls)\n print >> foutLen, current_gene + '\\t' + str(intron_length)\n\ndef mkIntronGff(f, fout, foutLen):\n currentGene, geneType, transcriptInfoLine = '', '', ''\n transcriptLs = []\n otherLs = []\n\n print >> foutLen, 'gene\\tintronLen'\n for line in f:\n if line[0] == '#':\n print >> fout, line.strip()\n else:\n sp = line.strip().split('\\t')\n gene = sp[8].split(';')[0].split('\"')[1].split('\"')[0]\n\n if not gene == currentGene:\n write_gene(fout, foutLen, sp, currentGene, geneType,\n transcriptLs, otherLs, transcriptInfoLine)\n currentGene = gene\n transcriptLs, otherLs = [], []\n\n feature = sp[2]\n if feature == 'transcript':\n geneType = sp[1]\n transcriptInfoLine = sp[5:]\n if feature != 'gene':\n i = Interval(int(sp[3]), int(sp[4]))\n if feature == 'transcript':\n transcriptLs.append(i)\n else:\n otherLs.append(i) \n \n # catch last one\n write_gene(fout, foutLen, sp, currentGene, geneType,\n transcriptLs, otherLs, transcriptInfoLine)\n\ndef main(args):\n with open(args.gffIn) as f, open(args.gffOut, 'w') as fout, open(args.geneLenOut, 'w') as foutLen:\n mkIntronGff(f, fout, foutLen)\n\nif __name__ == \"__main__\":\n desc = 'Mk intron region gff'\n parser = argparse.ArgumentParser(description=desc)\n argLs = ('gffIn', 'gffOut', 'geneLenOut')\n for param in argLs:\n parser.add_argument(param)\n args = parser.parse_args()\n main(args)\n\n\n","sub_path":"code/scripts/mkIntronGtf.py","file_name":"mkIntronGtf.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"415039747","text":"import praw\nfrom natsort import natsorted\nfrom bs4 import BeautifulSoup\nfrom requests import get\nimport pickledb\nimport schedule\nfrom time import sleep\nimport configparser\n\n\nconfig = configparser.ConfigParser()\nconfig.read('conf.ini')\nclient_id = config['REDDIT']['client_id']\nclient_secret = config['REDDIT']['client_secret']\nreddit_user = config['REDDIT']['reddit_user']\nreddit_pass = config['REDDIT']['reddit_pass']\ntarget_subreddit = config['SETTINGS']['target_subreddit']\nslug = config['SETTINGS']['slug']\ncheck_time = config['SETTINGS']['check_time']\ncatch_up_mode = int(config['SETTINGS']['catch_up_mode'])\n\ndb = pickledb.load(f'{slug}.db', False)\n\nreddit = praw.Reddit(client_id=client_id,\n client_secret=client_secret,\n user_agent=\"Jaimini's Box New Chapter Poster (by /u/impshum)\",\n username=reddit_user,\n password=reddit_pass)\n\n\ndef lovely_soup(u):\n r = get(u, headers={\n 'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:15.0) Gecko/20100101 Firefox/15.0.1'})\n return BeautifulSoup(r.content, 'lxml')\n\n\ndef do_db(x, y):\n if not db.exists(x):\n db.set(x, y)\n db.dump()\n return True\n else:\n return False\n\n\ndef read_db():\n for x in db.getall():\n print(x, db.get(x))\n\n\ndef set_sticky(id):\n submission = reddit.submission(id=id)\n submission.mod.sticky()\n\n\ndef remove_sticky(post_title, post_url, pre):\n for submission in reddit.subreddit(target_subreddit).new(limit=None):\n titled = f'{pre} | Chapter'\n if submission.stickied and titled in submission.title:\n submission = reddit.submission(id=submission.id)\n submission.mod.sticky(state=False)\n\n\ndef post(post_title, post_url, pre):\n remove_sticky(post_title, post_url, pre)\n id = reddit.subreddit(target_subreddit).submit(\n title=post_title, selftext=post_url)\n set_sticky(id)\n\n\ndef runner():\n pre = slug.replace('-', ' ').title()\n soup = lovely_soup(f'https://jaiminisbox.com/reader/series/{slug}')\n all = []\n\n for item in soup.findAll('a'):\n if \"Chapter\" in item.text:\n all.append(item)\n\n for item in natsorted(all):\n text = item.text\n title = f'{pre} | {text}'\n link = item['href']\n if catch_up_mode:\n do_db(title, link)\n print(f'CATCH UP | {title}')\n elif do_db(title, link):\n print(f'NEW | {title}')\n post(title, link, pre)\n else:\n print(f'OLD | {title}')\n\n\ndef main():\n if catch_up_mode:\n runner()\n else:\n runner()\n schedule.every().day.at(check_time).do(runner)\n while True:\n schedule.run_pending()\n sleep(1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"284115145","text":"#!/usr/bin/env python\n\nimport sys\nimport rospy\nfrom cv2 import cv2 # When using VS Code got error by only using import cv2\nfrom cv_bridge import CvBridge, CvBridgeError\nimport numpy as np \nfrom sensor_msgs.msg import Image\nfrom tennis_ball_tracking import HSV, selectContours, Contours # Importing functions from first script to avoid repetition\n\ndef main():\n rospy.Subscriber('/usb_cam/image_raw', Image, callback)\n rospy.spin()\n\ndef callback(img):\n try:\n image = bridge.imgmsg_to_cv2(img, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n\n binary_image = HSV(image, (60, 100, 20), (90, 255, 255)) # Filtered image\n contours = Contours(binary_image) # Found contours on image\n tracking_image = selectContours(image, contours, 3000) # Draw on selected contours\n\n cv2.imshow(\"Tracking Image\", tracking_image) # Showing output\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n rospy.signal_shutdown(\"User requested\")\n \n\n\nif __name__ == \"__main__\":\n try:\n rospy.init_node('usb_camera_tracker', disable_signals=True)\n bridge = CvBridge()\n main()\n except KeyboardInterrupt:\n pass","sub_path":"tennis_ball_usb_camera_tracker.py","file_name":"tennis_ball_usb_camera_tracker.py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"472489126","text":"from rest_framework import views, response\n\nfrom . import client\n\nobserver_client = client.QueryObserverClient()\n\n\nclass QueryObserverUnsubscribeView(views.APIView):\n def post(self, request):\n \"\"\"\n Handles a query observer unsubscription request.\n \"\"\"\n\n try:\n observer = request.query_params['observer']\n subscriber = request.query_params['subscriber']\n except KeyError:\n return response.Response(status=400)\n\n observer_client.unsubscribe_observer(observer, subscriber)\n return response.Response()\n","sub_path":"rest_framework_reactive/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"179532598","text":"import re\r\nimport codecs\r\n\r\nimport time\r\nimport sys\r\nimport os\r\nimport shutil\r\n\r\nimport itertools\r\nimport threading\r\nimport colr\r\nimport colorama\r\n\r\ncolorama.init()\r\n\r\n# if os.name == \"posix\":\r\n# if os.name in (\"nt\",\"dos\",\"ce\",\"rt\"):\r\n\r\nclass Colors:\r\n\tcolors = {\r\n\t\t\"fred\" : colorama.Fore.RED,\r\n\t\t\"fgreen\" : colorama.Fore.GREEN,\r\n\t\t\"fblue\" : colorama.Fore.BLUE,\r\n\t\t\"fwhite\" : colorama.Fore.WHITE,\r\n\t\t\"bred\" : colorama.Back.RED,\r\n\t\t\"bgreen\" : colorama.Back.GREEN,\r\n\t\t\"bblue\" : colorama.Back.BLUE,\r\n\t\t\"bwhite\" : colorama.Back.WHITE,\r\n\t\t\"bright\" : colorama.Style.BRIGHT,\r\n\t\t\"dim\" : colorama.Style.DIM,\r\n\t\t\"endc\" : colorama.Style.RESET_ALL,\r\n\t}\r\n\r\nclass Slice(list):\r\n def __call__(self, a, b):\r\n return self[a:b]\r\n\r\nclass Parse:\r\n\tdef __init__(self, file):\r\n\t\tself.file = file\r\n\r\n\t# returning sections with the lists (key = value)\r\n\tdef get_sections(self):\r\n\t\tfind = re.compile(r\"^\\s*\\[\\s*(.*?)\\s*\\]\\s*$\", re.I)\r\n\t\tsections = {}\r\n\r\n\t\twith open(self.file, \"r\", encoding=\"utf-8\") as data:\r\n\t\t\twhile True:\r\n\t\t\t\tline = data.readline()\r\n\t\t\t\tif line:\r\n\t\t\t\t\tm = find.match(line)\r\n\t\t\t\t\tif m:\r\n\t\t\t\t\t\tsection_name = m.groups()[0]\r\n\t\t\t\t\t\tsections[section_name] = []\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\tsections[section_name].append(line)\r\n\r\n\t\t\t\telse:\r\n\t\t\t\t\tbreak\r\n\r\n\t\treturn sections\r\n\r\n\t# returning text line by index\r\n\tdef get_text(self, sec, bkey, ekey):\r\n\t\tsections = self.get_sections()\r\n\t\ttry:\r\n\t\t\treturn sections[sec][bkey:ekey]\r\n\t\texcept KeyError:\r\n\t\t\traise (\"Section \\\"{}\\\" doesn't exits! Shutting down.\".format(sec))\r\n\r\n\t# returning dictionary from the section\r\n\tdef get_key(self, sec):\r\n\t\ttdict = {}\r\n\t\ttarget = self.get_sections()\r\n\t\ttarget = target[sec]\r\n\t\ttarget = [(re.split(r\"(.*) +=\", i)[1], re.split(r\"= +(.*)\", i)[1]) for i in target]\r\n\t\ttdict[sec] = {i[0] : i[1] for i in target}\r\n\r\n\t\treturn tdict[sec]\r\n\r\n\tdef colored_text(self, lines, endc=Colors.colors[\"endc\"]):\r\n\t\tcollect = []\r\n\t\tfor i in lines:\r\n\t\t\tpattern = re.compile(r\"\\<\\s*(.*?)\\s*\\|\\s*(.*?)\\s*\\>\")\r\n\t\t\tmatch = re.search(pattern, i)\r\n\t\t\tif match:\r\n\t\t\t\tcolors = match.group(1)\r\n\t\t\t\tstring = match.group(2)\r\n\t\t\t\tcolors = \"\".join([Colors.colors[i] for i in re.split(r\"\\s\", colors)])\r\n\t\t\t\tstring = colors + string + endc\r\n\t\t\t\tstring = re.sub(pattern, string, i)\r\n\t\t\t\tcollect.append(string)\r\n\t\t\telse:\r\n\t\t\t\traise re.error(\"Invalid color pattern\")\r\n\r\n\t\treturn collect\r\n\r\n\tdef find_variables(self, lines, scope={}):\r\n\t\tscope = {i : str(j) for i,j in scope.items()}\r\n\t\tcollect = []\r\n\t\tfor i in lines:\r\n\t\t\tvars_p = re.compile(r\"\\{\\{([A-Za-z0-9_]+)\\}\\}\")\r\n\t\t\tvars_m = re.search(vars_p, i)\r\n\r\n\t\t\tif vars_m:\r\n\t\t\t\tvar = vars_m.group(1)\r\n\t\t\t\ttry:\r\n\t\t\t\t\tvar = re.sub(vars_p, scope[var], i)\r\n\t\t\t\t\tcollect.append(var)\r\n\t\t\t\texcept KeyError:\r\n\t\t\t\t\tcollect.append(i)\r\n\t\t\telse:\r\n\t\t\t\tcollect.append(i)\r\n\r\n\t\treturn collect\r\n\r\n\r\nclass Spells:\r\n\tdef center(lines):\r\n\t\tswidth = shutil.get_terminal_size().columns\r\n\t\tsheight = shutil.get_terminal_size().lines\r\n\t\tminwh = len(min(lines, key=len))\r\n\t\thalfh = sheight // 2 - 2\r\n\t\thalfw = swidth // 2 - minwh // 2 + 7\r\n\r\n\t\treturn (halfh, halfw)\r\n\r\n\t# ['clear','cls'][os.name=='nt']\r\n\r\n\tdef clrscr(scr):\r\n\t\tif scr is True:\r\n\t\t\tif os.name == \"posix\":\r\n\t\t\t\tos.system(\"clear\")\r\n\t\t\telse:\r\n\t\t\t\tos.system(\"cls\")\r\n\t\telse:\r\n\t\t\tpass\r\n\r\n\r\nclass Show:\r\n\tdef __init__(self, file, numerate=False):\r\n\t\tself.parser = Parse(file)\r\n\t\tself.numerate = numerate\r\n\r\n\tdef set_scope(self, scope):\r\n\t\tself.scope = scope\r\n\r\n\t# show in custom place (coordinates: x,y - row,column)\r\n\tdef text(self, sec, bkey=None, ekey=None, clr=False, h=0, w=0, ts=0, te=0, scope={}):\r\n\t\tlines = self.parser.get_text(sec, bkey, ekey)\r\n\t\tlines = self.parser.find_variables(lines, scope=scope)\r\n\t\tlines = self.parser.colored_text(lines)\r\n\t\tlines = \"\".join(lines)\r\n\t\tSpells.clrscr(clr)\r\n\t\ttime.sleep(ts)\r\n\t\tprint(\"\\n\" * h)\r\n\r\n\t\tprint(\" \" * w + lines, end=\"\")\r\n\t\ttime.sleep(te)\r\n\r\n\t# show in middle\r\n\tdef text_mid(self, sec, bkey=None, ekey=None, halfh=True, clr=False, ts=0, te=0, scope={}):\r\n\t\tcollect = []\r\n\t\tlines = self.parser.get_text(sec, bkey, ekey)\r\n\t\tlines = self.parser.find_variables(lines, scope=scope)\r\n\t\thalfh, halfw = Spells.center(lines)\r\n\t\tSpells.clrscr(clr)\r\n\t\ttime.sleep(ts)\r\n\t\tsys.stdout.write(\"\\n\" * halfh)\r\n\r\n\t\tfor i,j in enumerate(lines):\r\n\t\t\tcollect.append(self.parser.colored_text([j]))\r\n\r\n\t\tfor i,j in enumerate(collect):\r\n\t\t\tprint(\" \" * halfw + \"\".join(j), end=\"\")\r\n\t\t\t# sys.stdout.write(j)\r\n\t\t\t# sys.stdout.flush()\r\n\t\t\r\n\t\ttime.sleep(te)\r\n\t\t\r\n\r\n\t# because of some \"input function\" bugs in Windows version\r\n\t# i can't use it with ANSI escape symbols\r\n\r\n\tdef input(self, sec, key, sep=\" \", h=0, w=0, clr=False, scope={}):\r\n\t\tlines = self.parser.get_key(sec)[key]\r\n\t\tlines = self.parser.find_variables([lines], scope=scope)\r\n\t\tlines = self.parser.colored_text(lines, endc=\" \")\r\n\t\tlines = \"\".join(lines)\r\n\r\n\t\tSpells.clrscr(clr)\r\n\t\tprint(\"\\n\" * h)\r\n\r\n\t\tprint(lines, end=\"\")\r\n\t\tinp = input()\r\n\t\treturn inp\r\n\r\n\tdef input_mid(self, sec, key, clr=False, scope={}):\r\n\t\tlines = self.parser.get_key(sec)[key]\r\n\t\thalfh, halfw = Spells.center(lines)\r\n\t\tlines = self.parser.find_variables([lines], scope=scope)\r\n\t\tlines = self.parser.colored_text(lines, endc=\" \")\r\n\t\tlines = \"\".join(lines)\r\n\t\tSpells.clrscr(clr)\r\n\t\tprint(\"\\n\" * halfh)\r\n\r\n\t\tprint(\" \" * halfw + lines, end=\"\")\r\n\t\tinp = input()\r\n\t\treturn inp\r\n\r\n\tdef top_bar(self, sec, key, sep=\",\", clr=True, scope={}):\r\n\t\tlines = self.parser.get_key(sec)[key]\r\n\t\tlines = self.parser.find_variables([lines], scope=scope)\r\n\t\tlines = self.parser.colored_text(lines, endc=\" \")\r\n\t\tlines = \"\".join(lines)\r\n\t\tsymbol = lines.split(*sep)[-1]\r\n\t\tlines = lines.replace(sep, \"\")\r\n\t\twidth = shutil.get_terminal_size().columns - len(lines) - len(symbol) + 11\r\n\r\n\t\tSpells.clrscr(clr)\r\n\t\tprint(lines + symbol * width + Colors.colors[\"endc\"])\r\n\r\n\r\n\t#here is the animation\r\n\tdef animate(self, sec, key, sep=\",\", clr=False, h=0, w=0, cycles=10, ts=0.1, te=0):\r\n\t\t# frame1, frame2, frameN ... start frame sign, end frame sign\r\n\t\tcd = 0\r\n\t\tlines = self.parser.get_key(sec)[key]\r\n\t\tlines = lines.split(*sep)\r\n\t\t# lines = self.parser.colored_text(lines)\r\n\r\n\t\tSpells.clrscr(clr)\r\n\t\tprint(\"\\n\"*h)\r\n\r\n\t\tfor i in itertools.cycle(lines[:-2]):\r\n\t\t\tsys.stdout.write('\\r{} '.format(\" \" * w + lines[-2]) + i)\r\n\t\t\tsys.stdout.flush()\r\n\t\t\ttime.sleep(ts)\r\n\t\t\tcd += 1\r\n\r\n\t\t\tif cd == cycles * len(lines[:-2]) + 1:\r\n\t\t\t\tbreak\r\n\r\n\t\ttime.sleep(te)\r\n\r\n\t\t# sys.stdout.write(\"\\r{}\".format(lines[-1]))\r\n","sub_path":"gears/gear.py","file_name":"gear.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"44606545","text":"import requests\nimport json\n\nwebsiteliste = json.loads(open('website.json','r').read())\ndef no_html(text):\n\t\twhile text.find('<') != -1 and text.find('>') != -1 :\n\t\t\t\t\n\t\t\t\tif text.find('<') != -1 and text.find('>') != -1 :\n\t\t\t\t\t\tdebut = text.find('<')\n\t\t\t\t\t\tfin = text.find('>')+1\n\t\t\t\t\t\tselection = text[debut:fin]\n\t\t\t\t\t\ttext = text.replace(selection,\"\")\n\t\t\t\telse : break\n\t\treturn text\n\ndef init(url):\n\twebsite = None\n\tfor websitefor in websiteliste :\n\t\tif websitefor['url'] in url or websitefor['name'] in url :\n\t\t\twebsite = websitefor['name']\n\t\t\tcontenthead = websitefor['contenthead']\n\t\t\tcontentfooter = websitefor['contentfooter']\n\t\t\ttry :\n\t\t\t\t\n\t\t\t\tpubtagheader = websitefor['pubtagheader']\n\t\t\t\tpub = True\n\t\t\texcept :\n\t\t\t\tpub = None\n\tif not website :\n\t\twebsite = \"Inconu\"\n\t\tcontenthead = \"',contentheadindex)+1\n\t\t\tcontenu = contentpage[fermeturebalise1:contentfooterindex]\n\t\t\tif pub == True:\n\t\t\t\tpubtagheaderindex = contentpage.find(pubtagheader)\n\t\t\t\tcontenu = contentpage[fermeturebalise1:pubtagheaderindex]\n\t\t\ttoreturn = {'contenu':{'html':contenu,'nohtml':no_html(contenu)},'url':url,'website':website}\n\telse :\n\t\ttoreturn = {'Message':'error'}\n\t\n\treturn (toreturn)\n\nhello = init('http://www.numerama.com/politique/148221-apple-veut-aller-encore-plus-loin-pour-garantir-le-chiffrement.html')\nprint(hello['contenu']['nohtml'])\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"231000911","text":"'''\nSoru: Girilen bir sayının asal olup olmadığını bulun.\n** Asal Sayı 1 ve kendisi hariç tam böleni olmayan\n sayılara denir.\n'''\n\n# x = int(input(\"Sayı: \"))\n# asalMı = True\n\n# if x == 1:\n# print(\"Sayı asal değildir.\")\n\n\n# for i in range(2, x):\n# if (x % i == 0):\n# asalMı = False\n# break\n\n\n# if asalMı:\n# print(\"Sayı asaldır.\")\n# else:\n# print(\"Sayı asal değildir.\")\n\ny = int(input(\"Kaça kadar kontrol edelim: \"))\nx = 0\nasallar = [2,]\nasalMı = True\n\n\n\n\n# deneme 12--323123","sub_path":"deneme/asal-sayi.py","file_name":"asal-sayi.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"359712868","text":"from django.shortcuts import render, get_object_or_404\nfrom .models import Airport,Flight,Flight_Leg,Seat,Customer,Leg_Instance,FFC\nimport datetime\nfrom django.utils import timezone\nfrom django.db.models.functions import TruncMonth\nfrom django.db.models import Count\n\ndef all_customers(request):\n customers = Customer.objects.all()\n return render(request, 'mainapp/all_customers.html', {'customers':customers})\n\ndef all_flights(request):\n leg_instances = Leg_Instance.objects.all()\n return render(request, 'mainapp/all_flights.html', {'leg_instances':leg_instances})\n\ndef home(request):\n airports = Airport.objects.all()\n leg_instances = Leg_Instance.objects.all()\n most_airports=(Leg_Instance.objects.values('departure_airport_code').annotate(dcount=Count('departure_airport_code')).order_by('-dcount'))\n most_airports_names = [i['departure_airport_code'] for i in most_airports]\n most_airports_counts = [i['dcount'] for i in most_airports]\n months_counts = []\n for i in range(12):\n months_counts.append(Leg_Instance.objects.filter(date__month = str(i+1)).count())\n customers = Customer.objects.all()\n return render(request, 'mainapp/home.html', {'airports':airports, 'leg_instances':leg_instances, 'customers':customers, 'months':months_counts, 'most_airports_counts':most_airports_counts, 'most_airports_names':most_airports_names, })\n\ndef airport_list(request):\n airports = Airport.objects.order_by('name')\n return render(request, 'mainapp/airport_list.html', {'airports':airports})\n\ndef flight_list(request, pk):\n airport = get_object_or_404(Airport, pk = pk)\n flight = Flight.objects.all()\n leg_instance = Leg_Instance.objects.all().order_by('date','departue_time')\n return render(request, 'mainapp/flight_list.html', {'flight' : flight, 'airport' : airport, 'leg_instance':leg_instance})\n\ndef seats(request, id):\n leg_instance = get_object_or_404(Leg_Instance, id = id)\n seats = Seat.objects.all().order_by('seat_number')\n customer = Customer.objects.all()\n return render(request, 'mainapp/flight_seats.html', {'leg_instance':leg_instance, 'seats':seats, 'customer':customer})\n\ndef customers(request, passport_number, country):\n customers = get_object_or_404(Customer, passport_number = passport_number, country = country)\n ffcs = FFC.objects.all()\n seats = Seat.objects.all()\n leg_instances = Leg_Instance.objects.all().order_by('date')\n return render(request, 'mainapp/customers.html', {'customers':customers, 'ffcs':ffcs, 'seats':seats, 'leg_instances':leg_instances})\n","sub_path":"mainapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"126246556","text":"\"\"\"Test for tmuxp workspacebuilder.\"\"\"\nimport os\n\nimport pytest\n\nimport kaptan\n\nfrom libtmux import Window\nfrom libtmux.common import has_gte_version\nfrom libtmux.test import retry, temp_session\nfrom tmuxp import config, exc\nfrom tmuxp.cli import load_plugins\nfrom tmuxp.workspacebuilder import WorkspaceBuilder\n\nfrom . import example_dir, fixtures_dir\nfrom .fixtures._util import loadfixture\n\n\ndef test_split_windows(session):\n yaml_config = loadfixture(\"workspacebuilder/two_pane.yaml\")\n s = session\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n window_count = len(session._windows) # current window count\n assert len(s._windows) == window_count\n for w, wconf in builder.iter_create_windows(s):\n for p in builder.iter_create_panes(w, wconf):\n w.select_layout('tiled') # fix glitch with pane size\n p = p\n assert len(s._windows) == window_count\n assert isinstance(w, Window)\n\n assert len(s._windows) == window_count\n window_count += 1\n\n\ndef test_split_windows_three_pane(session):\n yaml_config = loadfixture(\"workspacebuilder/three_pane.yaml\")\n\n s = session\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n window_count = len(s._windows) # current window count\n assert len(s._windows) == window_count\n for w, wconf in builder.iter_create_windows(s):\n for p in builder.iter_create_panes(w, wconf):\n w.select_layout('tiled') # fix glitch with pane size\n p = p\n assert len(s._windows) == window_count\n assert isinstance(w, Window)\n\n assert len(s._windows) == window_count\n window_count += 1\n w.set_window_option('main-pane-height', 50)\n w.select_layout(wconf['layout'])\n\n\ndef test_focus_pane_index(session):\n yaml_config = loadfixture('workspacebuilder/focus_and_pane.yaml')\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n sconfig = config.trickle(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n builder.build(session=session)\n\n assert session.attached_window.name == 'focused window'\n\n pane_base_index = int(\n session.attached_window.show_window_option('pane-base-index', g=True)\n )\n\n if not pane_base_index:\n pane_base_index = 0\n else:\n pane_base_index = int(pane_base_index)\n\n # get the pane index for each pane\n pane_base_indexes = []\n for pane in session.attached_window.panes:\n pane_base_indexes.append(int(pane.index))\n\n pane_indexes_should_be = [pane_base_index + x for x in range(0, 3)]\n assert pane_indexes_should_be == pane_base_indexes\n\n w = session.attached_window\n\n assert w.name != 'man'\n\n pane_path = '/usr'\n\n while retry():\n p = w.attached_pane\n p.server._update_panes()\n if p.current_path == pane_path:\n break\n\n assert p.current_path == pane_path\n\n proc = session.cmd('show-option', '-gv', 'base-index')\n base_index = int(proc.stdout[0])\n session.server._update_windows()\n\n window3 = session.find_where({'window_index': str(base_index + 2)})\n assert isinstance(window3, Window)\n\n p = None\n pane_path = '/'\n\n while retry():\n p = window3.attached_pane\n p.server._update_panes()\n if p.current_path == pane_path:\n break\n\n assert p.current_path == pane_path\n\n\n@pytest.mark.skip(\n reason='''\nTest needs to be rewritten, assertion not reliable across platforms\nand CI. See https://github.com/tmux-python/tmuxp/issues/310.\n '''.strip()\n)\ndef test_suppress_history(session):\n yaml_config = loadfixture(\"workspacebuilder/suppress_history.yaml\")\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n sconfig = config.trickle(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n builder.build(session=session)\n\n inHistoryWindow = session.find_where({'window_name': 'inHistory'})\n isMissingWindow = session.find_where({'window_name': 'isMissing'})\n\n def assertHistory(cmd, hist):\n return 'inHistory' in cmd and cmd.endswith(hist)\n\n def assertIsMissing(cmd, hist):\n return 'isMissing' in cmd and not cmd.endswith(hist)\n\n for w, window_name, assertCase in [\n (inHistoryWindow, 'inHistory', assertHistory),\n (isMissingWindow, 'isMissing', assertIsMissing),\n ]:\n assert w.name == window_name\n correct = False\n w.select_window()\n p = w.attached_pane\n p.select_pane()\n\n # Print the last-in-history command in the pane\n p.cmd('send-keys', ' fc -ln -1')\n p.cmd('send-keys', 'Enter')\n\n buffer_name = 'test'\n while retry():\n # from v0.7.4 libtmux session.cmd adds target -t self.id by default\n # show-buffer doesn't accept -t, use global cmd.\n\n # Get the contents of the pane\n p.cmd('capture-pane', '-b', buffer_name)\n\n captured_pane = session.server.cmd('show-buffer', '-b', buffer_name)\n session.server.cmd('delete-buffer', '-b', buffer_name)\n\n # Parse the sent and last-in-history commands\n sent_cmd = captured_pane.stdout[0].strip()\n history_cmd = captured_pane.stdout[-2].strip()\n\n if assertCase(sent_cmd, history_cmd):\n correct = True\n break\n assert correct, \"Unknown sent command: [%s] in %s\" % (sent_cmd, assertCase)\n\n\ndef test_session_options(session):\n yaml_config = loadfixture(\"workspacebuilder/session_options.yaml\")\n s = session\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n builder.build(session=session)\n\n assert \"/bin/sh\" in s.show_option('default-shell')\n assert \"/bin/sh\" in s.show_option('default-command')\n\n\ndef test_global_options(session):\n yaml_config = loadfixture(\"workspacebuilder/global_options.yaml\")\n s = session\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n builder.build(session=session)\n\n assert \"top\" in s.show_option('status-position', _global=True)\n assert 493 == s.show_option('repeat-time', _global=True)\n\n\ndef test_global_session_env_options(session, monkeypatch):\n visual_silence = 'on'\n monkeypatch.setenv(str('VISUAL_SILENCE'), str(visual_silence))\n repeat_time = 738\n monkeypatch.setenv(str('REPEAT_TIME'), str(repeat_time))\n main_pane_height = 8\n monkeypatch.setenv(str('MAIN_PANE_HEIGHT'), str(main_pane_height))\n\n yaml_config = loadfixture(\"workspacebuilder/env_var_options.yaml\")\n s = session\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n builder.build(session=session)\n\n assert visual_silence in s.show_option('visual-silence', _global=True)\n assert repeat_time == s.show_option('repeat-time')\n assert main_pane_height == s.attached_window.show_window_option('main-pane-height')\n\n\ndef test_window_options(session):\n yaml_config = loadfixture(\"workspacebuilder/window_options.yaml\")\n s = session\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n\n if has_gte_version('2.3'):\n sconfig['windows'][0]['options']['pane-border-format'] = ' #P '\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n window_count = len(session._windows) # current window count\n assert len(s._windows) == window_count\n for w, wconf in builder.iter_create_windows(s):\n for p in builder.iter_create_panes(w, wconf):\n w.select_layout('tiled') # fix glitch with pane size\n p = p\n assert len(s._windows) == window_count\n assert isinstance(w, Window)\n assert w.show_window_option('main-pane-height') == 5\n if has_gte_version('2.3'):\n assert w.show_window_option('pane-border-format') == ' #P '\n\n assert len(s._windows) == window_count\n window_count += 1\n w.select_layout(wconf['layout'])\n\n\n@pytest.mark.flaky(reruns=5)\ndef test_window_options_after(session):\n yaml_config = loadfixture(\"workspacebuilder/window_options_after.yaml\")\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n builder.build(session=session)\n\n def assert_last_line(p, s):\n correct = False\n\n while retry():\n pane_out = p.cmd('capture-pane', '-p', '-J').stdout\n while not pane_out[-1].strip(): # delete trailing lines tmux 1.8\n pane_out.pop()\n if len(pane_out) > 1 and pane_out[-2].strip() == s:\n correct = True\n break\n\n # Print output for easier debugging if assertion fails\n if not correct:\n print('\\n'.join(pane_out))\n\n return correct\n\n for i, pane in enumerate(session.attached_window.panes):\n assert assert_last_line(\n pane, str(i)\n ), \"Initial command did not execute properly/\" + str(i)\n pane.cmd('send-keys', 'Up') # Will repeat echo\n pane.enter() # in each iteration\n assert assert_last_line(\n pane, str(i)\n ), \"Repeated command did not execute properly/\" + str(i)\n\n session.cmd('send-keys', ' echo moo')\n session.cmd('send-keys', 'Enter')\n\n for pane in session.attached_window.panes:\n assert assert_last_line(\n pane, 'moo'\n ), \"Synchronized command did not execute properly\"\n\n\ndef test_window_shell(session):\n yaml_config = loadfixture(\"workspacebuilder/window_shell.yaml\")\n s = session\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n for w, wconf in builder.iter_create_windows(s):\n if 'window_shell' in wconf:\n assert wconf['window_shell'] == str('top')\n\n while retry():\n session.server._update_windows()\n if w['window_name'] != 'top':\n break\n\n assert w.name != str('top')\n\n\ndef test_environment_variables(session):\n yaml_config = loadfixture(\"workspacebuilder/environment_vars.yaml\")\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n builder.build(session)\n\n assert session.show_environment('FOO') == 'BAR'\n assert session.show_environment('PATH') == '/tmp'\n\n\ndef test_automatic_rename_option(session):\n \"\"\"With option automatic-rename: on.\"\"\"\n yaml_config = loadfixture(\"workspacebuilder/window_automatic_rename.yaml\")\n s = session\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n window_count = len(session._windows) # current window count\n assert len(s._windows) == window_count\n for w, wconf in builder.iter_create_windows(s):\n\n for p in builder.iter_create_panes(w, wconf):\n w.select_layout('tiled') # fix glitch with pane size\n p = p\n assert len(s._windows), window_count\n assert isinstance(w, Window)\n assert w.show_window_option('automatic-rename') == 'on'\n\n assert len(s._windows) == window_count\n\n window_count += 1\n w.select_layout(wconf['layout'])\n\n assert s.name != 'tmuxp'\n w = s.windows[0]\n\n while retry():\n session.server._update_windows()\n if w.name != 'sh':\n break\n\n assert w.name != 'sh'\n\n pane_base_index = w.show_window_option('pane-base-index', g=True)\n w.select_pane(pane_base_index)\n\n while retry():\n session.server._update_windows()\n if w.name == 'sh':\n break\n\n assert w.name == 'sh'\n\n w.select_pane('-D')\n\n while retry():\n session.server._update_windows()\n if w['window_name'] != 'sh':\n break\n\n assert w.name != 'sh'\n\n\ndef test_blank_pane_count(session):\n \"\"\":todo: Verify blank panes of various types build into workspaces.\"\"\"\n yaml_config_file = os.path.join(example_dir, 'blank-panes.yaml')\n test_config = kaptan.Kaptan().import_config(yaml_config_file).get()\n test_config = config.expand(test_config)\n builder = WorkspaceBuilder(sconf=test_config)\n builder.build(session=session)\n\n assert session == builder.session\n\n window1 = session.find_where({'window_name': 'Blank pane test'})\n assert len(window1._panes) == 3\n\n window2 = session.find_where({'window_name': 'More blank panes'})\n assert len(window2._panes) == 3\n\n window3 = session.find_where({'window_name': 'Empty string (return)'})\n assert len(window3._panes) == 3\n\n window4 = session.find_where({'window_name': 'Blank with options'})\n assert len(window4._panes) == 2\n\n\ndef test_start_directory(session, tmpdir):\n yaml_config = loadfixture(\"workspacebuilder/start_directory.yaml\")\n test_dir = str(tmpdir.mkdir('foo bar'))\n test_config = yaml_config.format(TMP_DIR=str(tmpdir), TEST_DIR=test_dir)\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(test_config).get()\n sconfig = config.expand(sconfig)\n sconfig = config.trickle(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n builder.build(session=session)\n\n assert session == builder.session\n dirs = ['/usr/bin', '/dev', test_dir, '/usr', '/usr']\n\n for path, window in zip(dirs, session.windows):\n for p in window.panes:\n while retry():\n p.server._update_panes()\n pane_path = p.current_path\n if pane_path is None:\n pass\n elif path in pane_path or pane_path == path:\n result = path == pane_path or path in pane_path\n break\n\n # handle case with OS X adding /private/ to /tmp/ paths\n assert result\n\n\ndef test_start_directory_relative(session, tmpdir):\n \"\"\"Same as above test, but with relative start directory, mimicking\n loading it from a location of project file. Like::\n\n $ tmuxp load ~/workspace/myproject/.tmuxp.yaml\n\n instead of::\n\n $ cd ~/workspace/myproject/.tmuxp.yaml\n $ tmuxp load .\n\n \"\"\"\n yaml_config = loadfixture(\"workspacebuilder/start_directory_relative.yaml\")\n\n test_dir = str(tmpdir.mkdir('foo bar'))\n config_dir = str(tmpdir.mkdir('testRelConfigDir'))\n test_config = yaml_config.format(TEST_DIR=test_dir)\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(test_config).get()\n # the second argument of os.getcwd() mimics the behavior\n # the CLI loader will do, but it passes in the config file's location.\n sconfig = config.expand(sconfig, config_dir)\n\n sconfig = config.trickle(sconfig)\n\n assert os.path.exists(config_dir)\n assert os.path.exists(test_dir)\n builder = WorkspaceBuilder(sconf=sconfig)\n builder.build(session=session)\n\n assert session == builder.session\n\n dirs = ['/usr/bin', '/dev', test_dir, config_dir, config_dir]\n\n for path, window in zip(dirs, session.windows):\n for p in window.panes:\n while retry():\n p.server._update_panes()\n # Handle case where directories resolve to /private/ in OSX\n pane_path = p.current_path\n if pane_path is None:\n pass\n elif path in pane_path or pane_path == path:\n result = path == pane_path or path in pane_path\n break\n\n assert result\n\n\ndef test_pane_order(session):\n \"\"\"Pane ordering based on position in config and ``pane_index``.\n\n Regression test for https://github.com/tmux-python/tmuxp/issues/15.\n\n \"\"\"\n\n yaml_config = loadfixture(\"workspacebuilder/pane_ordering.yaml\").format(\n HOME=os.path.realpath(os.path.expanduser('~'))\n )\n\n # test order of `panes` (and pane_index) above aganist pane_dirs\n pane_paths = [\n '/usr/bin',\n '/usr',\n '/usr/sbin',\n os.path.realpath(os.path.expanduser('~')),\n ]\n\n s = session\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n sconfig = config.trickle(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n window_count = len(session._windows) # current window count\n assert len(s._windows) == window_count\n for w, wconf in builder.iter_create_windows(s):\n for p in builder.iter_create_panes(w, wconf):\n w.select_layout('tiled') # fix glitch with pane size\n p = p\n assert len(s._windows) == window_count\n\n assert isinstance(w, Window)\n\n assert len(s._windows) == window_count\n window_count += 1\n\n for w in session.windows:\n pane_base_index = w.show_window_option('pane-base-index', g=True)\n for p_index, p in enumerate(w.list_panes(), start=pane_base_index):\n assert int(p_index) == int(p.index)\n\n # pane-base-index start at base-index, pane_paths always start\n # at 0 since python list.\n pane_path = pane_paths[p_index - pane_base_index]\n\n while retry():\n p.server._update_panes()\n if p.current_path == pane_path:\n break\n\n assert p.current_path, pane_path\n\n\ndef test_window_index(session):\n yaml_config = loadfixture(\"workspacebuilder/window_index.yaml\")\n proc = session.cmd('show-option', '-gv', 'base-index')\n base_index = int(proc.stdout[0])\n name_index_map = {'zero': 0 + base_index, 'one': 1 + base_index, 'five': 5}\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n sconfig = config.expand(sconfig)\n sconfig = config.trickle(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n for window, _ in builder.iter_create_windows(session):\n expected_index = name_index_map[window['window_name']]\n assert int(window['window_index']) == expected_index\n\n\ndef test_before_load_throw_error_if_retcode_error(server):\n config_script_fails = loadfixture(\"workspacebuilder/config_script_fails.yaml\")\n sconfig = kaptan.Kaptan(handler='yaml')\n yaml = config_script_fails.format(\n fixtures_dir=fixtures_dir,\n script_failed=os.path.join(fixtures_dir, 'script_failed.sh'),\n )\n\n sconfig = sconfig.import_config(yaml).get()\n sconfig = config.expand(sconfig)\n sconfig = config.trickle(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n with temp_session(server) as sess:\n session_name = sess.name\n\n with pytest.raises(exc.BeforeLoadScriptError):\n builder.build(session=sess)\n\n result = server.has_session(session_name)\n assert not result, \"Kills session if before_script exits with errcode\"\n\n\ndef test_before_load_throw_error_if_file_not_exists(server):\n config_script_not_exists = loadfixture(\n \"workspacebuilder/config_script_not_exists.yaml\"\n )\n sconfig = kaptan.Kaptan(handler='yaml')\n yaml = config_script_not_exists.format(\n fixtures_dir=fixtures_dir,\n script_not_exists=os.path.join(fixtures_dir, 'script_not_exists.sh'),\n )\n sconfig = sconfig.import_config(yaml).get()\n sconfig = config.expand(sconfig)\n sconfig = config.trickle(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n with temp_session(server) as sess:\n session_name = sess.name\n temp_session_exists = server.has_session(sess.name)\n assert temp_session_exists\n with pytest.raises((exc.BeforeLoadScriptNotExists, OSError)) as excinfo:\n builder.build(session=sess)\n excinfo.match(r'No such file or directory')\n result = server.has_session(session_name)\n assert not result, \"Kills session if before_script doesn't exist\"\n\n\ndef test_before_load_true_if_test_passes(server):\n config_script_completes = loadfixture(\n \"workspacebuilder/config_script_completes.yaml\"\n )\n assert os.path.exists(os.path.join(fixtures_dir, 'script_complete.sh'))\n sconfig = kaptan.Kaptan(handler='yaml')\n yaml = config_script_completes.format(\n fixtures_dir=fixtures_dir,\n script_complete=os.path.join(fixtures_dir, 'script_complete.sh'),\n )\n\n sconfig = sconfig.import_config(yaml).get()\n sconfig = config.expand(sconfig)\n sconfig = config.trickle(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n with temp_session(server) as session:\n builder.build(session=session)\n\n\ndef test_before_load_true_if_test_passes_with_args(server):\n config_script_completes = loadfixture(\n \"workspacebuilder/config_script_completes.yaml\"\n )\n\n assert os.path.exists(os.path.join(fixtures_dir, 'script_complete.sh'))\n sconfig = kaptan.Kaptan(handler='yaml')\n yaml = config_script_completes.format(\n fixtures_dir=fixtures_dir,\n script_complete=os.path.join(fixtures_dir, 'script_complete.sh') + ' -v',\n )\n\n sconfig = sconfig.import_config(yaml).get()\n sconfig = config.expand(sconfig)\n sconfig = config.trickle(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig)\n\n with temp_session(server) as session:\n builder.build(session=session)\n\n\ndef test_plugin_system_before_workspace_builder(\n monkeypatch_plugin_test_packages, session\n):\n config_plugins = loadfixture(\"workspacebuilder/plugin_bwb.yaml\")\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(config_plugins).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig, plugins=load_plugins(sconfig))\n assert len(builder.plugins) > 0\n\n builder.build(session=session)\n\n proc = session.cmd('display-message', '-p', \"'#S'\")\n assert proc.stdout[0] == \"'plugin_test_bwb'\"\n\n\ndef test_plugin_system_on_window_create(monkeypatch_plugin_test_packages, session):\n config_plugins = loadfixture(\"workspacebuilder/plugin_owc.yaml\")\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(config_plugins).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig, plugins=load_plugins(sconfig))\n assert len(builder.plugins) > 0\n\n builder.build(session=session)\n\n proc = session.cmd('display-message', '-p', \"'#W'\")\n assert proc.stdout[0] == \"'plugin_test_owc'\"\n\n\ndef test_plugin_system_after_window_finished(monkeypatch_plugin_test_packages, session):\n config_plugins = loadfixture(\"workspacebuilder/plugin_awf.yaml\")\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(config_plugins).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig, plugins=load_plugins(sconfig))\n assert len(builder.plugins) > 0\n\n builder.build(session=session)\n\n proc = session.cmd('display-message', '-p', \"'#W'\")\n assert proc.stdout[0] == \"'plugin_test_awf'\"\n\n\ndef test_plugin_system_on_window_create_multiple_windows(session):\n config_plugins = loadfixture(\"workspacebuilder/plugin_owc_multiple_windows.yaml\")\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(config_plugins).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig, plugins=load_plugins(sconfig))\n assert len(builder.plugins) > 0\n\n builder.build(session=session)\n\n proc = session.cmd('list-windows', '-F', \"'#W'\")\n assert \"'plugin_test_owc_mw'\" in proc.stdout\n assert \"'plugin_test_owc_mw_2'\" in proc.stdout\n\n\ndef test_plugin_system_after_window_finished_multiple_windows(\n monkeypatch_plugin_test_packages, session\n):\n config_plugins = loadfixture(\"workspacebuilder/plugin_awf_multiple_windows.yaml\")\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(config_plugins).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig, plugins=load_plugins(sconfig))\n assert len(builder.plugins) > 0\n\n builder.build(session=session)\n\n proc = session.cmd('list-windows', '-F', \"'#W'\")\n assert \"'plugin_test_awf_mw'\" in proc.stdout\n assert \"'plugin_test_awf_mw_2'\" in proc.stdout\n\n\ndef test_plugin_system_multiple_plugins(monkeypatch_plugin_test_packages, session):\n config_plugins = loadfixture(\"workspacebuilder/plugin_multiple_plugins.yaml\")\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(config_plugins).get()\n sconfig = config.expand(sconfig)\n\n builder = WorkspaceBuilder(sconf=sconfig, plugins=load_plugins(sconfig))\n assert len(builder.plugins) > 0\n\n builder.build(session=session)\n\n # Drop through to the before_script plugin hook\n proc = session.cmd('display-message', '-p', \"'#S'\")\n assert proc.stdout[0] == \"'plugin_test_bwb'\"\n\n # Drop through to the after_window_finished. This won't succeed\n # unless on_window_create succeeds because of how the test plugin\n # override methods are currently written\n proc = session.cmd('display-message', '-p', \"'#W'\")\n assert proc.stdout[0] == \"'mp_test_awf'\"\n\n\ndef test_load_configs_same_session(server):\n yaml_config = loadfixture(\"workspacebuilder/three_windows.yaml\")\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n\n builder = WorkspaceBuilder(sconf=sconfig, server=server)\n builder.build()\n\n assert len(server.sessions) == 1\n assert len(server.sessions[0]._windows) == 3\n\n yaml_config = loadfixture(\"workspacebuilder/two_windows.yaml\")\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n\n builder = WorkspaceBuilder(sconf=sconfig, server=server)\n builder.build()\n assert len(server.sessions) == 2\n assert len(server.sessions[1]._windows) == 2\n\n yaml_config = loadfixture(\"workspacebuilder/two_windows.yaml\")\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n\n builder = WorkspaceBuilder(sconf=sconfig, server=server)\n builder.build(server.sessions[1], True)\n\n assert len(server.sessions) == 2\n assert len(server.sessions[1]._windows) == 4\n\n\ndef test_load_configs_separate_sessions(server):\n yaml_config = loadfixture(\"workspacebuilder/three_windows.yaml\")\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n\n builder = WorkspaceBuilder(sconf=sconfig, server=server)\n builder.build()\n\n assert len(server.sessions) == 1\n assert len(server.sessions[0]._windows) == 3\n\n yaml_config = loadfixture(\"workspacebuilder/two_windows.yaml\")\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n\n builder = WorkspaceBuilder(sconf=sconfig, server=server)\n builder.build()\n\n assert len(server.sessions) == 2\n assert len(server.sessions[0]._windows) == 3\n assert len(server.sessions[1]._windows) == 2\n\n\ndef test_find_current_active_pane(server, monkeypatch):\n yaml_config = loadfixture(\"workspacebuilder/three_windows.yaml\")\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n\n builder = WorkspaceBuilder(sconf=sconfig, server=server)\n builder.build()\n\n yaml_config = loadfixture(\"workspacebuilder/two_windows.yaml\")\n\n sconfig = kaptan.Kaptan(handler='yaml')\n sconfig = sconfig.import_config(yaml_config).get()\n\n builder = WorkspaceBuilder(sconf=sconfig, server=server)\n builder.build()\n\n assert len(server.list_sessions()) == 2\n\n # Assign an active pane to the session\n second_session = server.list_sessions()[1]\n first_pane_on_second_session_id = second_session.list_windows()[0].list_panes()[0][\n \"pane_id\"\n ]\n monkeypatch.setenv(\"TMUX_PANE\", first_pane_on_second_session_id)\n\n builder = WorkspaceBuilder(sconf=sconfig, server=server)\n\n assert builder.find_current_attached_session() == second_session\n","sub_path":"tests/test_workspacebuilder.py","file_name":"test_workspacebuilder.py","file_ext":"py","file_size_in_byte":28675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"619488200","text":"import argparse\nimport time\nimport json\nimport os\nfrom datetime import datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport concurrent.futures\n\nfrom PIL import Image\n# pip install resize-and-crop\nfrom resize_and_crop import resize_and_crop\n\nfrom modules.file_utils import FileUtils\nfrom modules.logging_utils import LoggingUtils\n\nparser = argparse.ArgumentParser(description='Process VGG to memmap for dataset')\n\n# /vggface2/test/n009291/0002_01.jpg\nparser.add_argument('-path_input', default='/Users/evalds/Downloads/vggface2/test', type=str)\n\n# /vggface2/test.mmap\n# /vggface2/test.json\nparser.add_argument('-path_output', default='/Users/evalds/Downloads/vggface2/', type=str)\n\nparser.add_argument('-size_img', default=128, type=int)\nparser.add_argument('-thread_max', default=200, type=int)\n\nargs, args_other = parser.parse_known_args()\n\nbase_name = os.path.basename(args.path_input)\nFileUtils.createDir(args.path_output)\nlogging_utils = LoggingUtils(f\"{args.path_output}/{base_name}-{datetime.now().strftime('%y-%m-%d_%H-%M-%S')}.log\")\n\nclass_names = []\nsamples_by_class_idxes = []\nsamples_by_paths = []\nlast_class_name = None\n\nmmap_shape = [0, 3, args.size_img, args.size_img]\n\nlogging_utils.info(f'samples started to gather')\n\ndir_person_ids = os.listdir(args.path_input)\nfor person_id in dir_person_ids:\n path_person_id = f'{args.path_input}/{person_id}'\n if os.path.isdir(path_person_id):\n\n dir_images = os.listdir(path_person_id)\n for path_image_each in dir_images:\n path_image = f'{path_person_id}/{path_image_each}'\n\n if os.path.isfile(path_image):\n if last_class_name != person_id:\n last_class_name = person_id\n class_names.append(person_id)\n\n class_idx = len(class_names) - 1\n samples_by_class_idxes.append(class_idx)\n samples_by_paths.append((len(samples_by_paths), path_image))\n\nlogging_utils.info(f'samples gathered: {len(samples_by_class_idxes)}')\n\n\nmmap_shape[0] = len(samples_by_paths)\nmem = np.memmap(\n f'{args.path_output}/{base_name}.mmap',\n mode='w+',\n dtype=np.float16,\n shape=tuple(mmap_shape))\n\n\ndef thread_processing(sample):\n idx_sample, path_image = sample\n if idx_sample % 1000 == 0:\n logging_utils.info(f'idx_sample: {idx_sample}/{mmap_shape[0]}')\n #image = Image.open(path_image)\n image = resize_and_crop(path_image, (args.size_img, args.size_img), \"middle\")\n np_image = np.array(image.getdata()).reshape(args.size_img, args.size_img, 3)\n np_image = np.swapaxes(np_image, 1, 2)\n np_image = np.swapaxes(np_image, 0, 1)\n mem[idx_sample] = np_image\n\ntime_start = time.time()\nwith concurrent.futures.ThreadPoolExecutor(max_workers=args.thread_max) as executor:\n executor.map(thread_processing, samples_by_paths)\nlogging_utils.info(f'done in: {(time.time() - time_start)/60} min')\n\nmem.flush()\n\nlogging_utils.info('finished processing')\n\nwith open(f'{args.path_output}/{base_name}.json', 'w') as fp:\n json.dump({\n 'class_names': class_names,\n 'mmap_shape': mmap_shape,\n 'samples_by_class_idxes': samples_by_class_idxes\n }, fp, indent=4)\n\nlogging_utils.info('finished json')\n\n","sub_path":"process_data_vggface2.py","file_name":"process_data_vggface2.py","file_ext":"py","file_size_in_byte":3230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"135066105","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\nimport math as math\n\nclass ComplexNumber: \n def __init__(self, real = 0.0, imaginary = 0.0):\n self.real = real\n self.imaginary = imaginary\n \n @classmethod\n def from_string(self, string_input):\n parts = string_input.split(' ') \n real = float(parts[0]) \n imaginary = 0.0\n if len(parts) >= 2:\n imaginary = float(parts[1])\n return ComplexNumber(real, imaginary)\n \n def __str__(self):\n result = \"\" \n \n if self.real != 0:\n result += \"{0:.2f}\".format(self.real) \n \n if self.imaginary != 0:\n imaginary = self.imaginary\n \n if len(result) > 0:\n imaginary = abs(imaginary)\n if self.imaginary < 0:\n result += \" - \"\n else:\n result += \" + \"\n \n result += \"{0:.2f}\".format(imaginary) + \"i\" \n \n if len(result) == 0:\n result = \"0.00\"\n \n return result.strip()\n \n def __add__(self, other):\n return ComplexNumber((self.real + other.real), (self.imaginary + other.imaginary))\n \n def __sub__(self, other):\n return ComplexNumber(self.real - other.real, self.imaginary - other.imaginary)\n \n def __mul__(self, other):\n return ComplexNumber((self.real * other.real) - (self.imaginary * other.imaginary), \n (self.real * other.imaginary) + (self.imaginary * other.real))\n \n def __div__(self, other):\n conj = other.conjugate() \n top = self * conj\n bottom = other * conj \n return ComplexNumber(top.real / bottom.real, top.imaginary / bottom.real)\n \n def norm(self):\n return math.sqrt(self.real * self.real + self.imaginary * self.imaginary)\n \n def conjugate(self):\n return ComplexNumber(self.real, -self.imaginary)\n \na = ComplexNumber.from_string(raw_input())\nb = ComplexNumber.from_string(raw_input())\nprint(a + b)\nprint(a - b)\nprint(a * b)\nprint(a / b)\nprint(\"{0:.2f}\".format(a.norm()))\nprint(\"{0:.2f}\".format(b.norm()))","sub_path":"python/classes/complex_numbers/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"411585277","text":"\nimport scrapy\nimport re\nimport os\n\n\nclass Info(scrapy.Field):\n Id = scrapy.Field()\n######UNNECESSARY DEFINING OF FIELDS#############\n Aangebodensinds = scrapy.Field()\n Status = scrapy.Field()\n Verkoopdatum = scrapy.Field()\n Looptijd = scrapy.Field()\n Url = scrapy.Field()\n\n\nclass Funda(scrapy.Spider):\n name = \"funda_sold\"\n path = \"file:///home/crawler1/house/\"\n start_urls = []\n for filename in os.listdir(\"/home/crawler1/house\"):\n if filename.endswith(\".html\"):\n start_urls.append(path + filename)\n\n def parse(self, response):\n res = response.xpath(\n \"//div[@class='object-kenmerken-body']//text()[not(ancestor::h3)][not(ancestor::div[@class='' or @class='kadaster-title'])][not(ancestor::a)]\").getall()\n res = [re.sub(r\"\\r\\n\", \"\", str) for str in res]\n res = [str.strip() for str in res]\n res = list(filter(None, res))\n\n res[0::2] = [re.sub(r\" |-\", \"\", str) for str in res[0::2]]\n res[1::2] = [re.sub(r\" januari \", \"-01-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" februari \", \"-02-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" maart \", \"-03-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" april \", \"-04-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" mei \", \"-05-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" juni \", \"-06-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" juli \", \"-07-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" augustus \", \"-08-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" september \", \"-09-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" oktober \", \"-10-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" november \", \"-11-\", str) for str in res[1::2]]\n res[1::2] = [re.sub(r\" december \", \"-12-\", str) for str in res[1::2]]\n info = Info()\n # added 12/07/2019\n info['Aangebodensinds'] = \"\"\n info['Status'] = \"\"\n info['Verkoopdatum'] = \"\"\n info['Looptijd'] = \"\"\n info['Sold'] = \"True\"\n info['Url'] = response.xpath(\"//link[@rel='canonical']/@href\").get().replace('verkocht/','') \n for dt_name, dd_value in zip(res[0::2], res[1::2]):\n dt_name = dt_name.translate(\n {ord(c): \"\" for c in \"!@#$%^&*()[]{};:,./<>?\\|`~-=_+\"}).replace(\"\\n\", \"\").strip(\"\\t\").strip().capitalize()\n if dt_name in info.keys():\n info[dt_name] = dd_value.replace(\" m²\", \"\")\n\n yield info\n","sub_path":"funda-sold/funda_sold/spiders/funda_sold.py","file_name":"funda_sold.py","file_ext":"py","file_size_in_byte":2551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"461013749","text":"import json\nimport ssl\nimport requests\n\n\nclass CollectTestFiles:\n # Added below line to resolve SSL exceptions\n ssl._create_default_https_context = ssl._create_unverified_context\n\n def __init__(self):\n self.test_detection_dir = \"tests\"\n self.git_api_endpoint = f\"https://api.github.com/repos/splunk/security_content/git/trees/develop\"\n self.security_content_test_files = \"\"\n\n @staticmethod\n def fetch_file_info(endpoint, is_recursive=False):\n \"\"\"\n fetch file info using API call\n \"\"\"\n headers = {\n \"accept\": \"application/vnd.github.v3+json\"\n }\n params = {\"recursive\": 1} if is_recursive else {}\n response = requests.get(f\"{endpoint}\", headers=headers, params=params)\n if response.status_code == 200:\n return json.loads(response.content)\n raise Exception(f\"Error occur while fetching data from github, {json.loads(response.content)}\")\n\n def collect_all_files(self, detection_types: list):\n \"\"\"\n Collect test files from the github repository\n \"\"\"\n\n test_files_list = []\n\n # Fetch first set of folder structure\n fetch_first_set_from_repo = self.fetch_file_info(self.git_api_endpoint)\n\n # Traverse through all files and dir in main tree from first set\n for path in fetch_first_set_from_repo.get(\"tree\"):\n print(path)\n # Fetch the tests dir\n if path.get(\"path\") == self.test_detection_dir:\n print(path.get(\"path\"))\n tests_dir_response = self.fetch_file_info(path.get(\"url\"))\n # Traverse through all files and dir in tests dir\n for sub_dir in tests_dir_response.get(\"tree\"):\n\n # Check sub dir name in our detection sub dir list\n if sub_dir.get(\"path\") in detection_types:\n # Fetch all the test detections in sub dir\n fetch_detection_in_sub_dir = self.fetch_file_info(sub_dir.get(\"url\"))\n test_files = fetch_detection_in_sub_dir.get(\"tree\")\n\n test_files_list = [\n f\"../security_content/{self.test_detection_dir}/{sub_dir.get('path')}/{file_data.get('path')}\"\n for file_data in test_files if file_data.get(\"path\").endswith(\"test.yml\")]\n\n self.security_content_test_files = \",\".join(test_files_list)\n print(self.security_content_test_files)\n return self.security_content_test_files\n","sub_path":"collect_test_files.py","file_name":"collect_test_files.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"11316446","text":"# coding=utf-8 \nfrom action import Action\nfrom species import Species\nfrom action_type import ActionType\nfrom group import Group\nimport utils\n\n\n\n\ndef enumerate_possible_actions(state, group, specie, number_my_groups, max_split_rate):\n groups_human = state.getMembers(Species.human)\n groups_enemy = state.getMembers(specie.inverse())\n actions_total = []\n len_group_me = group.eff\n actions_simple_per_group = []\n actions_split_per_group = []\n doublets = []\n groups_targets = []\n\n #on elague les groupes d'humains\n humanDistances = []\n for humangroup in groups_human:\n humanDistances.append(utils.getDistance(group, humangroup))\n groups_human.sort(key=dict(zip(groups_human, humanDistances)).get, reverse=False)\n groups_human=groups_human[:len_group_me+1]\n\n #de même pour les ennemis\n enemyDistances = []\n for enemy in groups_enemy:\n enemyDistances.append(utils.getDistance(group, enemy))\n groups_enemy.sort(key=dict(zip(groups_enemy, enemyDistances)).get, reverse=False)\n groups_enemy=groups_human[:len_group_me+1]\n\n # actions sans split\n for group_human in groups_human:\n action = Action(ActionType.attackHuman, group_human, group)\n action.calc_mark(state)\n actions_simple_per_group.append(action)\n groups_targets.append(group_human)\n\n for group_enemy in groups_enemy:\n action = Action(ActionType.attackEnemy, group_enemy, group)\n action.calc_mark(state)\n actions_simple_per_group.append(action)\n groups_targets.append(group_enemy)\n\n # actions avec splits\n if number_my_groups <= max_split_rate: #on évite de trop se splitter\n for i in range(1, int(len_group_me/2)+1):\n doublets.append([i, len_group_me-i])\n\n for doublet in doublets:\n group1 = Group(group.x, group.y, doublet[0], specie)\n group2 = Group(group.x, group.y, doublet[1], specie)\n for target_group_1 in groups_targets:\n action_type_1 = specie.determine_action_type(target_group_1.species)\n for target_group_2 in groups_targets:\n action_type_2 = specie.determine_action_type(target_group_2.species)\n # si les deux targets sont différentes :\n if (target_group_1.x != target_group_2.x) or (target_group_1.y != target_group_2.y):\n action1 = Action(action_type_1, target_group_1, group1)\n action2 = Action(action_type_2, target_group_2, group2)\n action1.calc_mark(state)\n action2.calc_mark(state)\n action1.parent_group = group\n action2.parent_group = group\n actions_split_per_group.append([action1, action2])\n actions_total.append(actions_simple_per_group)\n actions_total.append(actions_split_per_group)\n\n return actions_total\n","sub_path":"Code/actions_generator.py","file_name":"actions_generator.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"149963143","text":"# -*- coding: utf-8 -*-\nimport os\nimport io\nimport datetime\nimport pytz\nimport six\nimport glob\nimport pytest\n\nimport pvl\nfrom pvl import (\n Label,\n LabelGroup,\n LabelObject,\n Units,\n decoder\n)\n\n\nDATA_DIR = os.path.join(os.path.dirname(__file__), 'data/')\nPDS_DATA_DIR = os.path.join(os.path.dirname(__file__), 'data', 'pds3')\nPDS_LABELS = glob.glob(os.path.join(PDS_DATA_DIR, \"*.lbl\"))\nBROKEN_DIR = os.path.join('tests', 'data', 'pds3', 'broken')\nBAD_PDS_LABELS = glob.glob(os.path.join(BROKEN_DIR, \"*.lbl\"))\n\n\ndef test_assignment():\n label = pvl.loads('foo=bar')\n assert isinstance(label, Label)\n assert label['foo'] == 'bar'\n\n label = pvl.loads('Group_Foo=bar')\n assert isinstance(label, Label)\n assert label['Group_Foo'] == 'bar'\n\n label = pvl.loads('foo=bar-')\n assert isinstance(label, Label)\n assert label['foo'] == 'bar-'\n\n label = pvl.loads('foo=bar-\\n')\n assert isinstance(label, Label)\n assert label['foo'] == 'bar'\n\n label = pvl.loads('foo=bro-\\nken')\n assert isinstance(label, Label)\n assert label['foo'] == 'broken'\n\n label = pvl.loads('foo=bro-\\n ken')\n assert isinstance(label, Label)\n assert label['foo'] == 'broken'\n\n\ndef test_spacing():\n label = pvl.loads(\"\"\"\n foo = bar\n nospace=good\n lots_of_spacing = alsogood\n same = line no = problem; like=aboss\n End\n \"\"\")\n\n assert isinstance(label, Label)\n assert label['foo'] == 'bar'\n assert label['nospace'] == 'good'\n assert label['lots_of_spacing'] == 'alsogood'\n assert label['same'] == 'line'\n assert label['no'] == 'problem'\n assert label['like'] == 'aboss'\n\n\ndef test_linewrap():\n label = pvl.loads(\"\"\"\n foo = bar-\n baz\n End\n \"\"\")\n\n assert label['foo'] == 'barbaz'\n\n\ndef test_special():\n label = pvl.loads(\"\"\"\n none1 = NULL\n none2 = Null\n true1 = TRUE\n true2 = True\n true3 = true\n false1 = FALSE\n false2 = False\n false3 = false\n End\n \"\"\")\n\n assert label['none1'] is None\n assert label['none2'] is None\n\n assert label['true1'] is True\n assert label['true2'] is True\n assert label['true3'] is True\n\n assert label['false1'] is False\n assert label['false2'] is False\n assert label['false3'] is False\n\n\ndef test_integers():\n label = pvl.loads(\"\"\"\n integer = 42\n positive_integer = +123\n negitive_integer = -1\n invalid_integer = 1a2\n End\n \"\"\")\n\n assert isinstance(label['integer'], int)\n assert label['integer'] == 42\n\n assert isinstance(label['integer'], int)\n assert label['positive_integer'] == 123\n\n assert isinstance(label['negitive_integer'], int)\n assert label['negitive_integer'] == -1\n\n assert isinstance(label['invalid_integer'], six.text_type)\n assert label['invalid_integer'] == '1a2'\n\n\ndef test_floats():\n label = pvl.loads(\"\"\"\n float = 1.0\n float_no_decimal = 2.\n float_no_whole = .3\n float_leading_zero = 0.5\n positive_float = +2.0\n negative_float = -1.0\n invalid_float = 1.2.3\n End\n \"\"\")\n assert isinstance(label['float'], float)\n assert label['float'] == 1.0\n\n assert isinstance(label['float_no_decimal'], float)\n assert label['float_no_decimal'] == 2.0\n\n assert isinstance(label['float_no_whole'], float)\n assert label['float_no_whole'] == 0.3\n\n assert isinstance(label['float_leading_zero'], float)\n assert label['float_leading_zero'] == 0.5\n\n assert isinstance(label['positive_float'], float)\n assert label['positive_float'] == 2.0\n\n assert isinstance(label['negative_float'], float)\n assert label['negative_float'] == -1.0\n\n assert isinstance(label['invalid_float'], six.text_type)\n assert label['invalid_float'] == '1.2.3'\n\n\ndef test_exponents():\n label = pvl.loads(\"\"\"\n capital = -1.E-3\n lower = -1.e-3\n small = -0.45e6\n int = 31459e1\n invalid = 1e\n End\n \"\"\")\n\n assert isinstance(label['capital'], float)\n assert label['capital'] == -1.0E-3\n\n assert isinstance(label['lower'], float)\n assert label['lower'] == -1.0E-3\n\n assert isinstance(label['small'], float)\n assert label['small'] == -0.45E6\n\n assert isinstance(label['int'], float)\n assert label['int'] == 31459e1\n\n assert isinstance(label['invalid'], six.text_type)\n assert label['invalid'] == '1e'\n\n\ndef test_objects():\n label = pvl.loads(\"\"\"\n Object = test_object\n foo = bar\n\n Object = embedded_object\n foo = bar\n End_Object\n\n Group = embedded_group\n foo = bar\n End_Group\n End_Object\n End\n \"\"\")\n test_object = label['test_object']\n assert isinstance(test_object, LabelObject)\n assert test_object['foo'] == 'bar'\n\n embedded_object = test_object['embedded_object']\n assert isinstance(embedded_object, LabelObject)\n assert embedded_object['foo'] == 'bar'\n\n embedded_group = test_object['embedded_group']\n assert isinstance(embedded_group, LabelGroup)\n assert embedded_group['foo'] == 'bar'\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(\"\"\"\n BEGIN_OBJECT = foo\n END_OBJECT = bar\n \"\"\")\n\n\ndef test_groups():\n label = pvl.loads(\"\"\"\n Group = test_group\n foo = bar\n Object = embedded_object\n foo = bar\n End_Object\n\n Group = embedded_group\n foo = bar\n End_Group\n End_Group\n End\n \"\"\")\n test_group = label['test_group']\n assert isinstance(test_group, LabelGroup)\n assert test_group['foo'] == 'bar'\n\n embedded_object = test_group['embedded_object']\n assert isinstance(embedded_object, LabelObject)\n assert embedded_object['foo'] == 'bar'\n\n embedded_group = test_group['embedded_group']\n assert isinstance(embedded_group, LabelGroup)\n assert embedded_group['foo'] == 'bar'\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(\"\"\"\n BEGIN_GROUP = foo\n END_GROUP = bar\n \"\"\")\n\n\ndef test_alt_group_style():\n label = pvl.loads(\"\"\"\n OBJECT = TEST1\n FOO = BAR\n END_OBJECT = TEST1\n\n GROUP = TEST2\n FOO = BAR\n END_GROUP = TEST2\n\n END\n \"\"\")\n test_group = label['TEST1']\n assert isinstance(test_group, LabelObject)\n assert test_group['FOO'] == 'BAR'\n\n embedded_object = label['TEST2']\n assert isinstance(embedded_object, LabelGroup)\n assert embedded_object['FOO'] == 'BAR'\n\n\ndef test_binary():\n label = pvl.loads(\"\"\"\n binary_number = 2#0101#\n positive_binary_number = +2#0101#\n negative_binary_number = -2#0101#\n End\n \"\"\")\n\n assert isinstance(label['binary_number'], int)\n assert label['binary_number'] == 5\n\n assert isinstance(label['positive_binary_number'], int)\n assert label['positive_binary_number'] == 5\n\n assert isinstance(label['negative_binary_number'], int)\n assert label['negative_binary_number'] == -5\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads('empty = 2##')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads('binary_number = 2#0101')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads('binary_number = 2#01014201#')\n\n\ndef test_octal():\n label = pvl.loads(\"\"\"\n octal_number = 8#0107#\n positive_octal_number = +8#0107#\n negative_octal_number = -8#0107#\n End\n \"\"\")\n\n assert isinstance(label['octal_number'], int)\n assert label['octal_number'] == 71\n\n assert isinstance(label['positive_octal_number'], int)\n assert label['positive_octal_number'] == 71\n\n assert isinstance(label['negative_octal_number'], int)\n assert label['negative_octal_number'] == -71\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads('empty = 8##')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads('octal_number = 8#0107')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads('octal_number = 8#01079#')\n\n\ndef test_hex():\n label = pvl.loads(\"\"\"\n hex_number_upper = 16#100A#\n hex_number_lower = 16#100b#\n positive_hex_number = +16#100A#\n negative_hex_number = -16#100A#\n End\n \"\"\")\n\n assert isinstance(label['hex_number_upper'], int)\n assert label['hex_number_upper'] == 4106\n\n assert isinstance(label['hex_number_lower'], int)\n assert label['hex_number_lower'] == 4107\n\n assert isinstance(label['positive_hex_number'], int)\n assert label['positive_hex_number'] == 4106\n\n assert isinstance(label['negative_hex_number'], int)\n assert label['negative_hex_number'] == -4106\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads('empty = 16##')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads('hex_number_upper = 16#100A')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads('hex_number_upper = 16#100AZ#')\n\n\ndef test_quotes():\n label = pvl.loads(\"\"\"\n foo = 'bar'\n empty = ''\n space = ' test '\n double = \"double'quotes\"\n single = 'single\"quotes'\n mixed = 'mixed\"\\\\'quotes'\n number = '123'\n date = '1918-05-11'\n multiline = 'this is a\n multi-line string'\n continuation = \"The planet Jupi-\n ter is very big\"\n formating = \"\\\\n\\\\t\\\\f\\\\v\\\\\\\\\\\\n\\\\t\\\\f\\\\v\\\\\\\\\"\n End\n \"\"\")\n\n assert isinstance(label['foo'], six.text_type)\n assert label['foo'] == 'bar'\n\n assert isinstance(label['empty'], six.text_type)\n assert label['empty'] == ''\n\n assert isinstance(label['space'], six.text_type)\n assert label['space'] == 'test'\n\n assert isinstance(label['double'], six.text_type)\n assert label['double'] == \"double'quotes\"\n\n assert isinstance(label['single'], six.text_type)\n assert label['single'] == 'single\"quotes'\n\n assert isinstance(label['single'], six.text_type)\n assert label['mixed'] == 'mixed\"\\'quotes'\n\n assert isinstance(label['number'], six.text_type)\n assert label['number'] == '123'\n\n assert isinstance(label['date'], six.text_type)\n assert label['date'] == '1918-05-11'\n\n assert isinstance(label['multiline'], six.text_type)\n assert label['multiline'] == 'this is a multi-line string'\n\n assert isinstance(label['continuation'], six.text_type)\n assert label['continuation'] == 'The planet Jupiter is very big'\n\n assert isinstance(label['formating'], six.text_type)\n assert label['formating'] == '\\n\\t\\f\\v\\\\\\n\\t\\f\\v\\\\'\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads('foo = \"bar')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(\"foo = 'bar\")\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(\"foo = '\\\\bar'\")\n\n\ndef test_comments():\n label = pvl.loads(\"\"\"\n /* comment on line */\n # here is a line comment\n /* here is a multi-\n line comment */\n foo = bar /* comment at end of line */\n weird/* in the */=/*middle*/comments\n baz = bang # end line comment\n End\n \"\"\")\n\n assert len(label) == 3\n\n assert isinstance(label['foo'], six.text_type)\n assert label['foo'] == 'bar'\n\n assert isinstance(label['foo'], six.text_type)\n assert label['weird'] == 'comments'\n\n assert isinstance(label['foo'], six.text_type)\n assert label['baz'] == 'bang'\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(b'/*')\n\n\ndef test_dates():\n label = pvl.loads(\"\"\"\n date1 = 1990-07-04\n date2 = 1990-158\n date3 = 2001-001\n time1 = 12:00\n time_s = 12:00:45\n time_s_float = 12:00:45.4571\n time_tz1 = 15:24:12Z\n time_tz2 = 01:12:22+07\n time_tz3 = 01:12:22+7\n time_tz4 = 01:10:39.4575+07\n datetime1 = 1990-07-04T12:00\n datetime2 = 1990-158T15:24:12Z\n datetime3 = 2001-001T01:10:39+7\n datetime4 = 2001-001T01:10:39.457591+7\n End\n \"\"\")\n\n assert isinstance(label['date1'], datetime.date)\n assert label['date1'] == datetime.date(1990, 7, 4)\n\n assert isinstance(label['date2'], datetime.date)\n assert label['date2'] == datetime.date(1990, 6, 7)\n\n assert isinstance(label['date3'], datetime.date)\n assert label['date3'] == datetime.date(2001, 1, 1)\n\n assert isinstance(label['time1'], datetime.time)\n assert label['time1'] == datetime.time(12)\n\n assert isinstance(label['time_s'], datetime.time)\n assert label['time_s'] == datetime.time(12, 0, 45)\n\n assert isinstance(label['time_s_float'], datetime.time)\n assert label['time_s_float'] == datetime.time(12, 0, 45, 457100)\n\n assert isinstance(label['time_tz1'], datetime.time)\n assert label['time_tz1'] == datetime.time(15, 24, 12, tzinfo=pytz.utc)\n\n assert isinstance(label['time_tz2'], datetime.time)\n assert label['time_tz2'] == datetime.time(1, 12, 22, tzinfo=pytz.FixedOffset(420)) # noqa\n\n assert isinstance(label['time_tz3'], datetime.time)\n assert label['time_tz3'] == datetime.time(1, 12, 22, tzinfo=pytz.FixedOffset(420)) # noqa\n\n assert isinstance(label['time_tz4'], datetime.time)\n assert label['time_tz4'] == datetime.time(1, 10, 39, 457500, pytz.FixedOffset(420)) # noqa\n\n assert isinstance(label['datetime1'], datetime.datetime)\n assert label['datetime1'] == datetime.datetime(1990, 7, 4, 12)\n\n assert isinstance(label['datetime2'], datetime.datetime)\n assert label['datetime2'] == datetime.datetime(1990, 6, 7, 15, 24, 12, tzinfo=pytz.utc) # noqa\n\n assert isinstance(label['datetime3'], datetime.datetime)\n assert label['datetime3'] == datetime.datetime(2001, 1, 1, 1, 10, 39, tzinfo=pytz.FixedOffset(420)) # noqa\n\n assert isinstance(label['datetime4'], datetime.datetime)\n assert label['datetime4'] == datetime.datetime(2001, 1, 1, 1, 10, 39, 457591, pytz.FixedOffset(420)) # noqa\n\n\ndef test_set():\n label = pvl.loads(\"\"\"\n strings = {a, b, c}\n nospace={a,b,c}\n numbers = {1, 2, 3}\n mixed = {a, 1, 2.5}\n multiline = {a,\n b,\n c}\n empty = {}\n End\n \"\"\")\n\n assert isinstance(label['strings'], set)\n assert len(label['strings']) == 3\n assert 'a' in label['strings']\n assert 'b' in label['strings']\n assert 'c' in label['strings']\n\n assert isinstance(label['nospace'], set)\n assert len(label['nospace']) == 3\n assert 'a' in label['nospace']\n assert 'b' in label['nospace']\n assert 'c' in label['nospace']\n\n assert isinstance(label['numbers'], set)\n assert len(label['numbers']) == 3\n assert 1 in label['numbers']\n assert 2 in label['numbers']\n assert 3 in label['numbers']\n\n assert isinstance(label['mixed'], set)\n assert len(label['mixed']) == 3\n assert 'a' in label['mixed']\n assert 1 in label['mixed']\n assert 2.5 in label['mixed']\n\n assert isinstance(label['multiline'], set)\n assert len(label['multiline']) == 3\n assert 'a' in label['multiline']\n assert 'b' in label['multiline']\n assert 'c' in label['multiline']\n\n assert isinstance(label['empty'], set)\n assert len(label['empty']) == 0\n\n\ndef test_sequence():\n label = pvl.loads(\"\"\"\n strings = (a, b, c)\n nospace=(a,b,c)\n numbers = (1, 2, 3)\n mixed = (a, 1, 2.5)\n empty = ()\n multiline = (a,\n b,\n c)\n linewrap = (1.234,1.2-\n 34,1.234-\n ,1.234)\n End\n \"\"\")\n\n assert isinstance(label['strings'], list)\n assert len(label['strings']) == 3\n assert label['strings'][0] == 'a'\n assert label['strings'][1] == 'b'\n assert label['strings'][2] == 'c'\n\n assert isinstance(label['nospace'], list)\n assert len(label['nospace']) == 3\n assert label['nospace'][0] == 'a'\n assert label['nospace'][1] == 'b'\n assert label['nospace'][2] == 'c'\n\n assert isinstance(label['numbers'], list)\n assert len(label['numbers']) == 3\n assert label['numbers'][0] == 1\n assert label['numbers'][1] == 2\n assert label['numbers'][2] == 3\n\n assert isinstance(label['mixed'], list)\n assert len(label['mixed']) == 3\n assert label['mixed'][0] == 'a'\n assert label['mixed'][1] == 1\n assert label['mixed'][2] == 2.5\n\n assert isinstance(label['empty'], list)\n assert len(label['empty']) == 0\n\n assert isinstance(label['multiline'], list)\n assert len(label['multiline']) == 3\n assert label['multiline'][0] == 'a'\n assert label['multiline'][1] == 'b'\n assert label['multiline'][2] == 'c'\n\n assert isinstance(label['linewrap'], list)\n assert len(label['linewrap']) == 4\n assert label['linewrap'][0] == 1.234\n assert label['linewrap'][1] == 1.234\n assert label['linewrap'][2] == 1.234\n assert label['linewrap'][3] == 1.234\n\n\ndef test_units():\n label = pvl.loads(\"\"\"\n foo = 42 \n g = 9.8 \n list = (1, 2, 3) \n cool = (1 )\n End\n \"\"\")\n assert isinstance(label['foo'], Units)\n assert label['foo'].value == 42\n assert label['foo'].units == 'beards'\n\n assert isinstance(label['g'], Units)\n assert label['g'].value == 9.8\n assert label['g'].units == 'm/s'\n\n assert isinstance(label['list'], Units)\n assert isinstance(label['list'].value, list)\n assert label['list'].units == 'numbers'\n\n assert isinstance(label['cool'], list)\n assert isinstance(label['cool'][0], Units)\n assert label['cool'][0].value == 1\n assert label['cool'][0].units == 'number'\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(b'foo = bar <')\n\n\ndef test_delimiters():\n label = pvl.loads(\"\"\"\n foo = 1;\n Object = embedded_object;\n foo = bar;\n End_Object;\n bar = 2;\n Group = embedded_group;\n foo = bar;\n End_Group;\n End;\n \"\"\")\n\n assert isinstance(label, Label)\n assert label['foo'] == 1\n assert label['bar'] == 2\n\n assert isinstance(label['embedded_object'], LabelObject)\n assert label['embedded_object']['foo'] == 'bar'\n\n assert isinstance(label['embedded_group'], LabelGroup)\n assert label['embedded_group']['foo'] == 'bar'\n\n\ndef test_cube_label():\n with open(os.path.join(DATA_DIR, 'pattern.cub'), 'rb') as fp:\n label = pvl.load(fp)\n\n assert isinstance(label['Label'], dict)\n assert label['Label']['Bytes'] == 65536\n\n assert isinstance(label['IsisCube'], dict)\n assert isinstance(label['IsisCube']['Core'], dict)\n assert label['IsisCube']['Core']['StartByte'] == 65537\n assert label['IsisCube']['Core']['Format'] == 'Tile'\n assert label['IsisCube']['Core']['TileSamples'] == 128\n assert label['IsisCube']['Core']['TileLines'] == 128\n\n assert isinstance(label['IsisCube']['Core']['Dimensions'], dict)\n assert label['IsisCube']['Core']['Dimensions']['Samples'] == 90\n assert label['IsisCube']['Core']['Dimensions']['Lines'] == 90\n assert label['IsisCube']['Core']['Dimensions']['Bands'] == 1\n\n assert isinstance(label['IsisCube']['Core']['Pixels'], dict)\n assert label['IsisCube']['Core']['Pixels']['Type'] == 'Real'\n assert label['IsisCube']['Core']['Pixels']['ByteOrder'] == 'Lsb'\n assert label['IsisCube']['Core']['Pixels']['Base'] == 0.0\n assert label['IsisCube']['Core']['Pixels']['Multiplier'] == 1.0\n\n\ndef test_pds3_sample_image():\n infile = os.path.join(PDS_DATA_DIR, \"simple_image_1.lbl\")\n label = pvl.load(infile)\n assert label['RECORD_TYPE'] == 'FIXED_LENGTH'\n assert label['RECORD_BYTES'] == 824\n assert label['LABEL_RECORDS'] == 1\n assert label['FILE_RECORDS'] == 601\n assert label['IMAGE']['LINES'] == 600\n assert label['IMAGE']['LINE_SAMPLES'] == 824\n image_group = label['IMAGE']\n assert image_group['SAMPLE_TYPE'] == 'MSB_INTEGER'\n assert image_group['SAMPLE_BITS'] == 8\n assert abs(image_group['MEAN'] - 51.6778539644) <= 0.00001\n assert image_group['MEDIAN'] == 50.0\n assert image_group['MINIMUM'] == 0\n assert image_group['MAXIMUM'] == 255\n assert image_group['STANDARD_DEVIATION'] == 16.97019\n assert image_group['CHECKSUM'] == 25549531\n\n\ndef test_load_all_sample_labels():\n for filename in PDS_LABELS:\n label = pvl.load(filename)\n assert isinstance(label, Label)\n\n\ndef test_unicode():\n label = pvl.loads(u'foo=bar')\n assert isinstance(label, Label)\n assert label['foo'] == 'bar'\n\n\ndef test_bytes():\n label = pvl.loads(b'foo=bar')\n assert isinstance(label, Label)\n assert label['foo'] == 'bar'\n\n\ndef test_end_comment():\n label = pvl.loads(b'END/* commnet */')\n assert isinstance(label, Label)\n assert len(label) == 0\n\n\ndef test_parse_error():\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(b'foo=')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(b'=')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(b'(}')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(b'foo=')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(b'foo=!')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.loads(b'foo')\n\n with pytest.raises(pvl.decoder.ParseError):\n pvl.load(io.BytesIO(b'foo'))\n\n\nEV = decoder.EmptyValueAtLine\n\n\n@pytest.mark.parametrize(\n 'label, expected, expected_errors',\n [\n (\n 'broken1.lbl',\n [('foo', 'bar'), ('life', EV(2)), ('monty', 'python')],\n [2]\n ),\n (\n 'broken2.lbl',\n [('foo', 'bar'), ('life', EV(2))],\n [2]\n ),\n (\n 'broken3.lbl',\n [('foo', EV(1)), ('life', 42)],\n [1]\n ),\n (\n 'broken4.lbl',\n [('foo', 'bar'), ('life', EV(2)), ('monty', EV(3))],\n [2, 3]\n ),\n (\n 'broken5.lbl',\n [('foo', EV(1)), ('life', EV(2)), ('monty', 'python')],\n [1, 2]\n ),\n (\n 'broken6.lbl',\n [('foo', EV(1)), ('life', EV(1)), ('monty', EV(1))],\n [1, 2, 3]\n ),\n (\n 'broken7.lbl',\n [\n ('foo', 1),\n ('embedded_object', pvl.PVLObject(\n [('foo', 'bar'), ('life', EV(1))]))\n ],\n [4]\n ),\n (\n 'broken8.lbl',\n [\n ('foo', 1),\n ('embedded_group', pvl.PVLGroup(\n [('foo', 'bar'), ('life', EV(1))]))\n ],\n [4]\n ),\n (\n 'broken9.lbl',\n [('foo', 42), ('bar', EV(1))],\n [2]\n ),\n (\n 'broken10.lbl',\n [('foo', Units(42, 'beards')), ('cool', EV(1))],\n [2]\n ),\n (\n 'broken11.lbl',\n [('foo', EV(1)), ('cool', [Units(1, 'beards')])],\n [1]\n ),\n (\n 'broken12.lbl',\n [\n ('strs', ['a', 'b']),\n ('empty', EV(2)), ('multiline', ['a', 'b'])\n ],\n [2]\n\n ),\n (\n 'broken13.lbl',\n [\n ('same', 'line'),\n ('no', 'problem'),\n ('foo', EV(1)), ('bar', EV(2))\n ],\n [1, 2]\n ),\n (\n 'broken14.lbl',\n [('foo', 'bar'), ('weird', EV(3)), ('baz', 'bang')],\n [3]\n ),\n (\n 'broken15.lbl',\n [('foo', 'bar'), ('weird', 'comment'), ('baz', EV(4))],\n [4]\n ),\n (\n 'broken16.lbl',\n [('foo', EV(2)), ('weird', 'comment'), ('baz', 'bang')],\n [2]\n ),\n ])\ndef test_broken_labels(label, expected, expected_errors):\n with open(os.path.join(BROKEN_DIR, label), 'rb') as stream:\n module = pvl.load(stream, strict=False)\n expected = pvl.PVLModule(expected)\n\n assert module == expected\n assert module.errors == expected_errors\n assert not module.valid\n\n with open(os.path.join(BROKEN_DIR, label), 'rb') as stream:\n with pytest.raises(pvl.decoder.ParseError):\n pvl.load(stream, strict=True)\n\n\ndef test_EmptyValueAtLine():\n test_ev = decoder.EmptyValueAtLine(1)\n assert test_ev == ''\n assert 'foo' + test_ev == 'foo'\n assert isinstance(test_ev, str)\n assert test_ev.lineno == 1\n assert int(test_ev) == 0\n assert float(test_ev) == 0.0\n trep = (\n 'EmptyValueAtLine(1 does not have a value. Treat as an empty string)'\n )\n assert repr(test_ev) == trep\n\n\ndef test_load_all_bad_sample_labels():\n for filename in BAD_PDS_LABELS:\n label = pvl.load(filename, strict=False)\n assert isinstance(label, Label)\n","sub_path":"tests/test_decoder.py","file_name":"test_decoder.py","file_ext":"py","file_size_in_byte":25050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"36282180","text":"import unittest\n\n\nclass TestUtils(unittest.TestCase):\n\n def setUp(self):\n self.test = 'Yes'\n\n def test_simple_conversion(self):\n result = 1\n expect = 1\n\n self.assertEqual(result, expect)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"metaform/tests/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"248747024","text":"#!/usr/bin/python\n# tvp-dl is used to download .wmv files from tvp.pl\n# good idea if you're not about to play with silverlight!\n#\n#This is free and unencumbered software released into the public domain.\n#\n#Anyone is free to copy, modify, publish, use, compile, sell, or\n#distribute this software, either in source code form or as a compiled\n#binary, for any purpose, commercial or non-commercial, and by any\n#means.\n#\n#In jurisdictions that recognize copyright laws, the author or authors\n#of this software dedicate any and all copyright interest in the\n#software to the public domain. We make this dedication for the benefit\n#of the public at large and to the detriment of our heirs and\n#successors. We intend this dedication to be an overt act of\n#relinquishment in perpetuity of all present and future rights to this\n#software under copyright law.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n#EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n#MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n#IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR\n#OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,\n#ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n#OTHER DEALINGS IN THE SOFTWARE.\n#\n#For more information, please refer to \n\nimport os, sys, urllib, json, re\n\ndef quit(mess=\"\"):\n print (\"Something is wrong... \" + mess)\n sys.exit()\n\nif len(sys.argv) < 2:\n print(\"Welcome to tvp-dl!\")\n print(\"\\tUsage: %s url\" % sys.argv[0])\n print(\"\\tExample: %s http://www.tvp.pl/warszawa/magazyny/campusnews/wideo/31102013/12878238\" % sys.argv[0])\n sys.exit()\n\nprint (\"Let's start!\")\n\ntry:\n sock = urllib.urlopen(sys.argv[1])\n html = sock.read()\nexcept:\n quit(\"tvp.pl is not working\")\nelse:\n sock.close()\n\nk = re.compile('object_id:\\'[0-9]+\\'').search(html)\n\nif k.group() == None:\n quit(\"tvp-dl is outdated or url is not correct\")\n \nnum = k.group().split('\\'')[1]\n\ntry:\n sock = urllib.urlopen('http://www.tvp.pl/pub/stat/videofileinfo?video_id=%s' % num)\n html = sock.read()\nexcept:\n quit(\"tvp.pl is not working\")\nelse:\n sock.close()\n\ntry:\n d = json.loads(html)\nexcept:\n quit(\"can't load json...\")\n\nif not d['file_name']:\n quit(\"json does not contain video's url\")\n\nprint (\"Downloading...\")\n\n# yes, I know, it's not the best way, but wget is cool\n# if you don't like it, fork it and commit new version!\nos.system(\"wget %s\" % d['video_url'])\n\nprint (\"Done! Saved to %s\" % d['file_name'])\n\n","sub_path":"tvp-dl.py","file_name":"tvp-dl.py","file_ext":"py","file_size_in_byte":2570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"428319279","text":"from rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.views import APIView\n\nfrom myapp.serializers import EventSerializer, UserSerializer\nfrom myapp.models.EventModel import Event\nfrom myapp.models.UserModel import User\n\n\nclass JoinEventView(APIView):\n permission_classes = (IsAuthenticated,)\n\n def post(self, request, pk):\n\n if request.data[\"join\"]:\n event_model = Event.objects.get(id=pk)\n event_model.members.add(request.user.id)\n\n serializer = EventSerializer(event_model,\n data={\"members\": [member.id for member in event_model.members.all()]},\n partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n user_model = User.objects.get(id=request.user.id)\n user_model.events.add(event_model.id)\n\n serializer = UserSerializer(user_model,\n data={\"events\": [event for event in user_model.events.all()]},\n partial=True)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n\n return Response(status=status.HTTP_200_OK)\n\n return Response(status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"myapp/views/JoinEventView.py","file_name":"JoinEventView.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"204353173","text":"from remi import App, start\n# from remigui import GUI\nimport os\nimport remi.gui as tk\nfrom creators import C\nimport datetime\nfrom axis_bank_statement_analyzer_trainer_v2 import load_test_data, ingest_test_pdf\nfrom getbankdata import GetBankData\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport pandas as pd\nfrom user import User\nfrom threading import Thread\n\n\nclass BankStatementAnalyzer(App):\n\n def __init__(self, *args):\n\n super(BankStatementAnalyzer, self).__init__(*args, static_file_path={'path': './resx/'})\n\n self.bank_list = ['Select Bank', 'Axis Bank', 'HDFC Bank', 'Kotak Mahindra Bank', 'ICICI Bank']\n self.date = datetime.date.today().strftime('%d-%m-%Y')\n self.time = datetime.datetime.now().time()\n self.model_name = 'model_ann_98.h5'\n self.cv_name = 'vectorizer.sav'\n self.le_name = 'target_label_encoder.sav'\n self.frame_header_color = 'deepskyblue'\n self.frame_left_color = 'ivory'\n self.frame_footer_left_color = 'honeydew'\n self.frame_right_color = 'whitesmoke'\n self.frame_right_2_color = 'seashell'\n self.frame_login_register_color = 'azure'\n\n def idle(self):\n pass\n\n def main(self):\n self.date = datetime.date.today().strftime('%d-%m-%Y')\n self.time = datetime.datetime.now().time()\n self.dr = pd.DataFrame()\n self.listview = tk.ListView()\n self.frame_left = tk.Container()\n self.frame_right = tk.Container()\n self.frame_header = tk.Container()\n self.frame_right_2 = tk.Container()\n self.master_user = pd.DataFrame()\n self.window = tk.Container()\n self.window.css_background_color = \"azure\"\n self.window.css_height = \"100%\"\n self.window.css_width = \"100%\"\n self.window.css_left = \"0.0px\"\n self.window.css_top = \"0.0px\"\n self.window.css_position = \"absolute\"\n self.frame_header_color = 'deepskyblue'\n self.frame_left_color = 'ivory'\n self.frame_footer_left_color = 'honeydew'\n self.frame_right_color = 'whitesmoke'\n self.frame_right_2_color = 'seashell'\n self.frame_login_register_color = 'azure'\n self.selected_bank = []\n self.registry_info = {}\n self.login_info = {}\n self.frame_header = C.create_container(self.window, 10, 90, 0, 0)\n self.frame_header.css_background_color = self.frame_header_color\n self.frame_header.css_top = \"0%\"\n\n self.frame_footer_left = C.create_container(self.window, 12, 20, 0, 87)\n self.frame_footer_left.css_background_color = self.frame_footer_left_color\n\n self.progress = C.create_progress(self.window, 1, 100, 0, 99, a=0, b=100)\n\n self.frame_left = C.create_container(self.window, 75, 20, 0, 10)\n self.frame_left.css_background_color = self.frame_left_color\n self.frame_right = C.create_container(self.window, 75, 35, 21, 10)\n self.frame_right.css_background_color = self.frame_right_color\n self.frame_right_2 = C.create_container(self.window, 75, 33, 57, 10)\n self.frame_right_2.css_background_color = self.frame_right_2_color\n self.frame_login_register = C.create_container(self.window, 30, 10, 90, 0)\n self.frame_login_register.css_background_color = self.frame_login_register_color\n\n # --------------------- LABELS ---------------------------------------------------------- ]\n\n lbl_header = C.create_label(self.frame_header, 20, 25, 10, 30, text='BANK STATEMENT ANALYZER',\n bg='deepskyblue', fg='white')\n lbl_header.css_font_size = '18px'\n\n lbl_subheader = C.create_label(self.frame_header, 10, 20, 13.35, 60,\n text='-- Aru Raghuvanshi build 07042021',\n bg='deepskyblue', fg='white')\n lbl_subheader.css_font_size = '12px'\n\n lbl_datetime = C.create_label(self.frame_header, 20, 7, 93, 40, text=f'Date: {self.date}',\n bg='deepskyblue', fg='white', align='right')\n lbl_datetime.css_font_size = '14px'\n\n self.notif_1 = C.create_label(self.frame_footer_left, 6, 100, 0, 10, text='')\n self.notif_2 = C.create_label(self.frame_footer_left, 6, 100, 0, 60, text='')\n\n # --------------------- APPENDS --------------------------------------------------------- ]\n # self.window.append(self.frame_right)\n # self.window.append(self.frame_right_2)\n # self.window.append(self.frame_left)\n # self.window.append(self.frame_header)\n # self.window.append(self.frame_footer_left)\n\n self.login_btn = C.create_button(self.window, 3, 7, 92, 1, text='Login',\n command=lambda x: self.login_clicked())\n self.register_btn = C.create_button(self.window, 3, 7, 92, 6, text='Register',\n command=lambda x: self.register_clicked())\n\n return self.window\n\n # ====================== FUNCTIONS ============================================================ ]\n\n def login_clicked(self):\n self.frame_left.empty()\n print(f'Login Button pressed')\n self.frame_login_register.empty()\n self.lbl_username = C.create_label(self.frame_login_register, 7, 40, 5, 40, text='Username:', bg='azure')\n self.lbl_pw = C.create_label(self.frame_login_register, 7, 40, 5, 50, text='Password:', bg='azure')\n self.username = C.create_entry(self.frame_login_register, 7, 52, 40, 40, fg='black', input_type='regular',\n command=self.log_on_enter_username)\n self.pw = C.create_entry(self.frame_login_register, 7, 52, 40, 50, fg='black',\n command=self.on_password, input_type='password')\n self.login_ok = C.create_button(self.frame_login_register, 10, 15, 75, 65, text='OK',\n command=lambda x: self.login_ok_clicked())\n\n def on_password(self, w, val):\n print(\"password: \" + str(val))\n self.login_info['pw1'] = val\n\n\n def login_ok_clicked(self):\n print(f'Ok clicked on Login Button')\n self.frame_login_register.empty()\n\n # Do the username/password match here\n df = pd.read_csv('user_registration_info.csv')\n df.drop('Unnamed: 0', inplace=True, axis=1)\n\n print(f\"Login username: {self.login_info['username']}\") # Aru\n print(f\"Login pw: {self.login_info['pw1']}\") # 22\n\n x = df.loc[df.username == self.login_info['username']]\n y = df.loc[df.pw1 == self.login_info['pw1']]\n print(f'X\\n{x}')\n print(f'Y\\n{y}')\n if x.empty or y.empty:\n C.create_label(self.frame_login_register, 10, 75, 20, 35,\n text='No Match.', bg='azure')\n else:\n C.create_label(self.frame_login_register, 10, 75, 20, 35,\n text=f\"Logged In.\", bg='azure')\n user = self.login_info['username']\n U = User(user)\n self.lgt = C.create_label(self.frame_login_register, 10, 75, 20, 35,\n text=f\"Session: {U.get_name()}\", bg='azure')\n self.logout_btn = C.create_button(self.window, 3, 7, 92, 1, text='Logout',\n command=lambda x: self.logout_clicked(),\n bg='lightgreen')\n\n # --------------------- FILE UPLOADER & SELECTOR -------------------------------------- ]\n upl = C.create_uploader(self.frame_left, 7, 30, 2, 4, filename='./files/')\n upl.onsuccess.do(self.fileupload_successful)\n upl.onfailed.do(self.fileupload_failed)\n\n # --------------------- BUTTONS --------------------------------------------------------- ]\n self.btn_analyze = C.create_button(self.frame_left, 7, 30, 2, 13, bg='cornflowerblue',\n command=lambda x: self.run_analyzer(), text='ANALYZE')\n\n # --------------------- DROPDOWNS --------------------------------------------------------- ]\n self.dropdn = C.create_dropdown(self.frame_left, self.bank_list, 7, 65, 35, 4,\n bg='powderblue', fg='white', command=self.drop_down_changed)\n\n\n def logout_clicked(self):\n\n self.frame_left.empty()\n self.frame_right.empty()\n self.frame_right_2.empty()\n self.frame_footer_left.empty()\n self.login_btn = C.create_button(self.window, 3, 7, 92, 1, text='Login',\n command=lambda x: self.login_clicked())\n self.register_btn = C.create_button(self.window, 3, 7, 92, 6, text='Register',\n command=lambda x: self.register_clicked())\n with self.update_lock:\n self.lgt.set_text(\"\")\n\n\n def register_clicked(self):\n print(f'Register Clicked')\n self.frame_login_register.empty()\n self.lbl_reg_username = C.create_label(self.frame_login_register, 7, 40, 5, 40, text='Username:', bg='azure')\n self.lbl_reg_pw = C.create_label(self.frame_login_register, 7, 40, 5, 50, text='Password:', bg='azure')\n self.lbl_cnf_pw = C.create_label(self.frame_login_register, 7, 40, 5, 60, text='Confirm Password:', bg='azure')\n self.username = C.create_entry(self.frame_login_register, 7, 52, 40, 40, fg='black',\n command=self.reg_on_enter_username)\n self.pw1 = C.create_entry(self.frame_login_register, 7, 52, 40, 50, fg='black',\n command=self.reg_on_enter_pw1, input_type='password')\n self.pw2 = C.create_entry(self.frame_login_register, 7, 52, 40, 60, fg='black',\n command=self.reg_on_enter_pw2, input_type='password')\n self.login_ok = C.create_button(self.frame_login_register, 10, 15, 75, 75, text='OK',\n command=lambda x: self.register_ok_clicked())\n\n\n def reg_on_enter_username(self, w, val):\n self.registry_info['username'] = val\n print(val)\n\n def reg_on_enter_pw1(self, w, val):\n self.registry_info['pw1'] = val\n\n def reg_on_enter_pw2(self, w, val):\n self.registry_info['pw2'] = val\n\n def log_on_enter_username(self, w, val):\n self.login_info['username'] = val\n\n def log_on_enter_pw(self, w, val):\n self.login_info['pw1'] = val\n\n\n\n def register_ok_clicked(self):\n print(f'Ok clicked on Register Button')\n self.frame_login_register.empty()\n\n # Checking if the user already exists in the records\n try:\n df = pd.read_csv('user_registration_info.csv') # Read the User records file\n df.drop('Unnamed: 0', inplace=True, axis=1)\n\n for x in df.username:\n if self.registry_info['username'] in x:\n lbl = C.create_label(self.frame_login_register, 20, 75, 20, 35,\n text='User Record Exists.', bg='azure')\n return\n else:\n if self.registry_info['pw1'] != self.registry_info['pw2']:\n lbl1 = C.create_label(self.frame_login_register, 20, 75, 20, 35,\n text='Passwords donot match.', bg='azure')\n return\n except:\n print(f'User Record File not existing.')\n\n C.create_label(self.frame_login_register, 10, 75, 20, 35,\n text='Registration Successful.', bg='azure')\n\n # Get and store the user registration information to a text file for now, later pickle it.\n ftwo = {k: self.registry_info[k] for k in list(self.registry_info)[:2]}\n self.master_user = self.master_user.append(ftwo, ignore_index=True)\n\n if not os.path.isfile('user_registration_info.csv'):\n self.master_user.to_csv('user_registration_info.csv', mode='a', header='column_names')\n else:\n self.master_user.to_csv('user_registration_info.csv', mode='a', header=False)\n\n\n def drop_down_changed(self, w, drpvalue):\n self.notif_1.set_text('Bank: ' + drpvalue)\n self.selected_bank.append(drpvalue)\n\n\n\n def fileupload_successful(self, w, filename):\n lbl_succ = C.create_label(self.frame_left, 7, 98, 2, 15, text=f'{filename} uploaded.',\n display='flex-end', align='center', justify='space-around')\n lbl_succ.css_background_color = 'lightgreen'\n return filename\n\n\n def fileupload_failed(self, w, filename):\n lbl_fail = C.create_label(self.frame_left, 5,60,40,1, text=f'{filename} Upload Failed')\n lbl_fail.css_background_color = self.frame_left_color\n\n\n def set_notification(self, text, bar=1):\n if bar == 1:\n self.notif_1.set_text(text)\n if bar == 2:\n self.notif_2.set_text(text)\n\n\n def compute_progress(self, val):\n self.progress.set_value(val)\n\n\n def run_analyzer(self):\n self.T = Thread(target=self.run_analysis, daemon=False)\n self.T.start()\n # self.run_analysis()\n\n def run_analysis(self):\n\n try:\n if self.selected_bank[-1] == 'Axis Bank':\n\n with self.update_lock:\n self.progress.set_value(20)\n self.set_notification('Analyzing. Please wait...', bar=2)\n import time\n time.sleep(2)\n\n testpdf = 'files\\\\Aru Axis 1st Apr 2020 - 31st Mar 2021.pdf'\n # testpdf = 'files\\\\AruAxis_17_18.pdf'\n model_name = 'models\\\\model_ann_98.h5'\n cv_name = 'models\\\\vectorizer.sav'\n le_name = 'models\\\\label_encoder.sav'\n\n with self.update_lock:\n self.progress.set_value(50)\n self.set_notification('Running Neural Network...', bar=2)\n df, testcsv = ingest_test_pdf(testpdf) # df from pdf-csv conversion\n\n with self.update_lock:\n self.progress.set_value(80)\n self.set_notification('Rendering results...', bar=2)\n dt, self.df = load_test_data(testcsv, model_name, cv_name, le_name)\n\n self.table = C.create_table(self.frame_right, dt, 91, 97, 2, 4,\n align='left', justify='left',\n display='block')\n\n self.btn_graph = C.create_button(self.frame_left, 7, 30, 35, 13, bg='yellowgreen',\n command=lambda x: self.create_graph(), text='VIEW EXPENSES')\n with self.update_lock:\n self.progress.set_value(0)\n self.set_notification('DONE', bar=2)\n\n self.btn_analyze = C.create_button(self.frame_left, 7, 30, 68, 13, bg='cornflowerblue',\n text='ANALYTICS', command=lambda x: self.clicked_analytics())\n\n else:\n self.set_notification('Sorry. This Bank is currently not supported.')\n\n except IndexError:\n self.set_notification('Please Select Bank from the Dropdown list.')\n\n\n def create_graph(self):\n print('In create Graph')\n def expense_by_category(df, cat='PRED_CAT', exception=False,\n exceptvalue='Woodstock', exceptvalue2='Credit'):\n\n print(f'self.df:\\n{self.df}')\n if exception:\n print('in exception to filter exception values')\n self.dr = df[(df[cat] != exceptvalue) & (df[cat] != exceptvalue2)]\n else:\n self.dr = df[df.DR > 0]\n print('in exception else (normal)')\n\n catdf = self.dr.groupby(cat).sum()['DR'].plot(kind='bar', figsize=(15, 10),\n color='yellowgreen', fontsize=14,\n title='Expenses by Category')\n catdf.set_xlabel('Category of Expense', fontsize=20)\n catdf.set_ylabel('Amount in Rupees', fontsize=20)\n catdf.set_title('Expenses by Category', fontsize=20)\n print(f'catdf: \\n {catdf}')\n\n # with self.update_lock:\n # plt.show() #ValueError: signal only works in main thread\n plt.savefig('resx/expenses.png')\n\n\n items = self.df.PRED_CAT.unique().tolist()\n print(f'df.PRED_CAT items: {items}')\n\n lblv = C.create_label(self.frame_left, 4, 100, 0, 36, text='>> Filter by:', bg='khaki')\n\n self.listview = C.create_listview(self.frame_left, items, 57, 60, 2, 39,\n display='')\n self.listview.onselection.do(self.list_view_on_selected)\n\n expense_by_category(self.df, cat='PRED_CAT', exception=False,\n exceptvalue='Woodstock', exceptvalue2='Credit')\n print('Running expense_by_category, image should open')\n\n\n\n def list_view_on_selected(self, w, selected_item_key):\n \"\"\" The selection event of the listView, returns a key of the clicked event.\n You can retrieve the item rapidly\n \"\"\"\n\n self.listsel = self.listview.children[selected_item_key].get_text()\n print(f'Selected Item in listview: {self.listsel}, type: {type(self.listsel)}')\n\n ct = self.dr[self.dr.PRED_CAT == self.listsel]\n\n # Creates dataframe of the selected entity from the list\n xt = ct.copy()\n ct.DR = ct.DR.astype(str)\n ct.CR = ct.CR.astype(str)\n ct = ct.T\n\n dr_sum, cr_sum = sum(xt.DR), sum(xt.CR)\n\n lr = C.create_label(self.frame_right_2, 5, 95, 2, 95, text=f'Total DR: {dr_sum}',\n bg='lightpink', align='right', justify='right')\n print(f'ct dataframe from list selection:\\n{xt}')\n res = []\n for column in ct.columns:\n li = ct[column].tolist()\n res.append(li)\n print(f'res list: {res}')\n res.insert(0, ['DATE', 'PARTICULARS', 'DR', 'CR', 'TYPE', 'PREDICTED CATEGORY'])\n self.table2 = C.create_table(self.frame_right_2, res, 80, 97, 2, 4,\n align='center', justify='center', display='block')\n self.table2.style['overflow'] = 'overflow'\n self.frame_right_2.append(self.table2)\n\n\n def clicked_analytics(self):\n self.frame_right_2.empty()\n self.img = C.create_image(self.frame_right_2, '/path:expenses.png', 50, 96, 2, 2)\n\n\n\nconfiguration = {'config_project_name': 'MainScreen',\n 'config_address': '127.0.0.1',\n 'config_port': 8084, 'config_multiple_instance': True,\n 'config_enable_file_cache': True,\n 'config_start_browser': True,\n 'config_resourcepath': './resx/'}\n\n\nstart(BankStatementAnalyzer, address=configuration['config_address'], port=configuration['config_port'],\n multiple_instance=configuration['config_multiple_instance'],\n enable_file_cache=configuration['config_enable_file_cache'],\n start_browser=configuration['config_start_browser'])\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":19429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"346635547","text":"import RPi.GPIO as GPIO\nimport time\n\n\nclass TaxiLantern:\n\n def __init__(self, pin):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup(pin, GPIO.OUT)\n self.pin = pin\n\n def on(self):\n print('Lamp is on')\n GPIO.output(self.pin, GPIO.HIGH)\n\n def off(self):\n GPIO.output(self.pin, GPIO.LOW)\n\n\nif __name__ == \"__main__\":\n\n lantern = TaxiLantern(40)\n\n try:\n\n while True:\n print(\"low\")\n lantern.off()\n time.sleep(0.5)\n\n print(\"high\")\n lantern.on()\n time.sleep(1)\n\n finally:\n GPIO.cleanup()","sub_path":"TaxiLantern.py","file_name":"TaxiLantern.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"64353572","text":"from __future__ import unicode_literals\n\nfrom django.db import models\nfrom components.db import Procedure, ProcedureMixin, procedure_defaults\n# from tansy.lookupModels.mdlLkpSchExamGradingSystem import mdlClsLkpSchExamGradingSystem\nfrom tansy.lookupModels.mdlLkpSchSubjectType import mdlClsLkpSchSubjectType\nfrom tansy.lookupModels.mdlLkpOrgFacility import mdlClsLkpOrgFacility\n\nclass mdlClsSubjectGetData(models.Manager, ProcedureMixin):\n\n def mdlFnSprocSubjectGrid(self):\n procedure = 'sproc_sch_subject_grid'\n Procedure(self, procedure, [], [], [], False)\n return self.get_result()\n\n objIstLkpOrgFacility = mdlClsLkpOrgFacility()\n objIstLkpSchSubjectType = mdlClsLkpSchSubjectType()\n\nclass mdlClsSubjectDML(models.Model, ProcedureMixin):\n\n objIstSubjectGridData = mdlClsSubjectGetData()\n\n subject_entity_id = models.BigAutoField(primary_key=True, default=0)\n subject_name = models.CharField(max_length=128)\n subject = models.CharField(max_length=128)\n subject_short_code = models.CharField(max_length=128)\n short_code = models.CharField(max_length=128)\n subject_type_id = models.BigIntegerField(default=0)\n reporting_order = models.BigIntegerField(default=0)\n facility_entity_id = models.BigIntegerField(default=0)\n facility_ids = models.IntegerField(default=0)\n entity_id = models.IntegerField(default=0)\n active = models.IntegerField(default=0)\n\n def mdlFnSprocOrgEntityScope(self):\n procedure = 'sproc_org_entity_scope'\n\n iparams = [\n ':iparam_entity_id'\n ]\n idata = self.get_input_params(iparams)\n Procedure(self, procedure, iparams, idata, [], False)\n res = self.get_result()\n\n def mdlFnSprocSubjectDetail(self):\n procedure = 'sproc_sch_subject_detail'\n\n iparams = [\n ':iparam_subject_entity_id'\n ]\n idata = self.get_input_params(iparams)\n Procedure(self, procedure, iparams, idata, [], False)\n res = self.get_result()\n\n def mdlFnSprocSubjectInsert(self, request):\n procedure = 'sproc_sch_subject_dml_ins'\n iparams = [\n '-iparam_subject_name',\n '-iparam_subject_short_code',\n ':iparam_subject_type_id',\n '-iparam_reporting_order',\n ':iparam_facility_ids',\n ]\n idata = self.get_input_params(iparams)\n iparams, idata = procedure_defaults(request, iparams, idata)\n oparams = [\n '@oparam_subject_entity_id'\n ]\n Procedure(self, procedure, iparams, idata, oparams)\n\n def mdlFnSprocSubjectUpdate(self, request):\n procedure = 'sproc_sch_subject_dml_upd'\n iparams = [\n ':iparam_subject_entity_id',\n '-iparam_subject_name',\n '-iparam_subject_short_code',\n ':iparam_subject_type_id',\n '-iparam_reporting_order',\n ':iparam_active',\n ':iparam_facility_ids',\n ]\n idata = self.get_input_params(iparams)\n iparams, idata = procedure_defaults(request, iparams, idata)\n Procedure(self, procedure, iparams, idata)\n\n def mdlFnSprocSubjectDelete(self, request):\n procedure = 'sproc_sch_subject_dml_del'\n iparams = [\n ':iparam_subject_entity_id'\n ]\n idata = [self.subject_entity_id]\n iparams, idata = procedure_defaults(request, iparams, idata)\n Procedure(self, procedure, iparams, idata)\n","sub_path":"appSchool/models/modelSubject.py","file_name":"modelSubject.py","file_ext":"py","file_size_in_byte":3455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"514164953","text":"\"\"\"\nCHEN pumping test analysis\nLinear system solvers to be used in other methods\n\nFunctions:\n L1MLR > L1 Multi-linear regression\n MLR > Multi-linear regression\n\"\"\"\n\nimport numpy as _np\nimport data_validation as _data_validation\n\n\n\"\"\"______________________ LINEAR SOLVER METHODS ____________________________\"\"\"\n\n\n# L1 Multi-linear regression\n# Input\n# x [ndarray] independent variable nxm\n# y [ndarray] dependent variable nx1\n# Output\n# B [ndarray] coefficients of fitted model, B[0] intercept, B[1] slope\ndef L1MLR(x, y, error=1e-6):\n # Check input data\n x = _data_validation.to_ndarray(x)\n y = _data_validation.to_ndarray(y)\n\n if x.ndim == 1: # from row vector to columns vector\n x = x.reshape((len(x), 1))\n n, m = x.shape # size\n\n # Create matrix system\n A = _np.ones((n, m + 1))\n A[:, 1:] = x[:] # avoid origin interception\n B1 = MLR(A, y, True) # first approximation\n B = B1 + 9999.0\n\n # Main loop\n while _np.max(_np.abs(B - B1)) > error:\n B1 = B.copy() # update coefficients\n W = _np.abs(B1[0] + _np.dot(x, B1[1:]) - y)\n W[W < error] = error # replace small values\n W = (1.0 / W) ** 0.5 # observation weights (based on residuals)\n A1 = W[:, None] * A\n B = MLR(A1, W * y, True) # new coefficients\n\n return(B) # End of function\n\n\ndef MLR(x, y, origin=False):\n \"\"\"\n Multi-linear regression\n\n INPUTS:\n x [ndarray] independent variable nxm\n y [ndarray] dependent variable nx1\n origin [boolean] if true then intersection is fixed with origin (0,0)\n OUTPUTS:\n coef [ndarray] coefficients of fitted model, coef[0] intercept, coef[1] slope\n \"\"\"\n\n # Check input data\n x = _data_validation.to_ndarray(x)\n y = _data_validation.to_ndarray(y)\n\n if x.ndim == 1: # from row vector to columns vector\n x = x.reshape((len(x), 1))\n n, m = x.shape # size\n\n if origin: # origin intersection\n A = x.copy()\n else: # optimize intersection\n A = _np.ones((n, m + 1))\n A[:, 1:] = x[:]\n\n # Solve system\n coef = _np.dot(_np.linalg.inv(_np.dot(A.transpose(), A)),\n _np.dot(A.transpose(), y))\n return(coef) # End Function\n\n","sub_path":"build/lib/chen/utilities/linear_regression.py","file_name":"linear_regression.py","file_ext":"py","file_size_in_byte":2301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"510072559","text":"\n\n#calss header\nclass _PARLAY():\n\tdef __init__(self,): \n\t\tself.name = \"PARLAY\"\n\t\tself.definitions = [u'to use or develop money, skills, etc. in a way that makes more money or leads to success: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_parlay.py","file_name":"_parlay.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"13600903","text":"from pprint import pprint\nimport requests\nimport sys\nfrom io import BytesIO\nfrom hashlib import md5 as _md\nimport json\nfrom GUI import *\n\nclock = pygame.time.Clock()\nFPS = 60\n\n\ndef no_color(string):\n res = ''\n fill = True\n for i in string:\n if fill and i == '\\x1b':\n fill = False\n continue\n if not fill and i == 'm':\n fill = True\n continue\n res += i\n return res\n\n\nERROR_STRING = '\\x1b[31;1mError\\x1b[0m \\x1b[1m(\\x1b[36;1m%s\\x1b[1m): \\x1b[4;1m%s\\x1b[0m'\nif 'win' in sys.platform:\n try:\n import colorama\n colorama.init()\n except ImportError:\n ERROR_STRING = no_color(ERROR_STRING)\n\n\nURLS = {\n \"geocode\": \"http://geocode-maps.yandex.ru/1.x/\",\n \"static\": \"https://static-maps.yandex.ru/1.x/\",\n \"search\": \"https://search-maps.yandex.ru/v1/\"\n}\n\nGEOCODE = 'geocode'\nSTATIC = 'static'\nSEARCH = 'search'\nAPI_KEY = open('api_key', 'r').read()\nSIZE = 600, 450\nSIZE_RECT = pygame.Rect((0, 0), SIZE)\n\nRADIUS = 50 / 111144\n\n\ndef md5(string):\n return str(_md(str(string).encode('utf-8')).hexdigest())\n\n\ndef get_coord(location, sco='longlat'):\n params = {\n \"geocode\": location,\n \"sco\": sco\n }\n response = get_request(GEOCODE, params)\n if response:\n res = response.json()['response']['GeoObjectCollection']['featureMember'][0]['GeoObject']['Point']['pos']\n return tuple(map(float, res.split()))\n\n\ndef convert_bytes(bytes_string):\n im = Image.open(BytesIO(bytes_string)).convert(\"RGBA\")\n return pygame.image.fromstring(im.tobytes(), im.size, im.mode)\n\n\ndef get_request(url, params=None, **kwargs):\n try:\n if url in URLS:\n if url == SEARCH:\n if params is None:\n url += \"&apikey=\" + API_KEY + \"&lang=ru_RU\"\n else:\n params['apikey'] = API_KEY\n params['lang'] = 'ru_RU'\n\n if url == GEOCODE:\n if params is None:\n url += \"&format=json\"\n else:\n params['format'] = 'json'\n\n url = URLS[url]\n res = requests.get(url, params=params)\n if not res:\n print(ERROR_STRING % (res.status_code, res.reason), \"\\n\\nURL:\", res.url)\n sys.exit(res.status_code)\n else:\n return res\n except Exception as err:\n print(ERROR_STRING % (type(err).__name__, err))\n sys.exit(1)\n\n\ndef get_geo_object(locate):\n locate = locate if type(locate) in [tuple, list] else [locate]\n params = {\n \"geocode\": str_param(*locate)\n }\n\n res = get_request(GEOCODE, params).json()\n try:\n geo_object = res[\"response\"][\"GeoObjectCollection\"][\"featureMember\"][0][\"GeoObject\"]\n return geo_object\n except IndexError:\n return get_geo_object(\"Украина, село Криворучка\")\n\n\ndef str_param(*params):\n return ','.join(map(str, params))\n\n\ndef get_address(coords, postal_code=False):\n geo_object = get_geo_object(coords)[\"metaDataProperty\"][\"GeocoderMetaData\"] # type: dict\n json.dump(geo_object, open('geo.json', 'w'), ensure_ascii=False, indent=2)\n address = geo_object[\"text\"]\n if postal_code and get_postal_code(coords):\n address += \", \" + geo_object[\"Address\"]['postal_code']\n return address\n\n\ndef get_postal_code(coords):\n geo_object = get_geo_object(coords)[\"metaDataProperty\"][\"GeocoderMetaData\"] # type: dict\n return geo_object.get(\"Address\", {'postal_code': None}).get('postal_code', None)\n\n\n# POINTS #\npoints = []\n\n\nclass Point:\n def __init__(self, long, lat, style='pm2', color='wt', size='m', content=''):\n \"\"\"\n https://tech.yandex.ru/maps/doc/staticapi/1.x/dg/concepts/markers-docpage/\n :return: point data\n \"\"\"\n self.pos = long, lat\n self.style = style\n self.color = color\n self.size = size\n self.content = content\n\n def __str__(self):\n return str_param(str_param(*self.pos), ''.join(filter(lambda x: x,\n (self.style, self.color, self.size, self.content))))\n\n\ndef create_point(long, lat, style='pm2', color='wt', size='m', content=''):\n p = Point(long, lat, style, color, size, content)\n if len(points) <= 100:\n points.append(p)\n return p\n\n\ndef render_points():\n return '~'.join([str(points[i]) + (str(i + 1) if i < 99 else '') for i in range(len(points))])\n\n\ndef geo_search(coord):\n params = {\n \"text\": str_param(*coord),\n \"ll\": str_param(*coord),\n \"spn\": str_param(RADIUS, RADIUS),\n \"type\": \"biz\",\n \"rspn\": 1\n }\n return get_request(SEARCH, params).json()['features']\n\n\n# def screen_biz(coord, screen: pygame.Surface):\n# res = \"Организация: %s\\n\\n\" \\\n# \"Сайт: %s\\n\" \\\n# \"Адресс: %s\\n\" \\\n# \"Телефон(ы): %s\\n\" \\\n# \"Почтовый Адресс: %s\\n\" \\\n# \"Категории: %s\"\n#\n# t = geo_search(coord)\n# if not t:\n# return\n#\n# biz = t[0] # type: dict\n# json.dump(biz, open(\"biz.json\", \"w\"), indent=2, ensure_ascii=False)\n# company = biz['properties']['CompanyMetaData']['name'] # type: dict\n# name = company['name'] # type: str\n#\n# postal_code = company.get('postalCode', 'Не найден')\n# address = company.get('address', 'Адресс не найден')\n# url = company.get('url', 'Сайт не найден')\n#\n# hour = company.get('Hours', {}).get(\"text\", \"\")\n# name += \" (%s)\" % hour if hour else ''\n#\n# categories = ', '.join(map(lambda x: x['name'], company.get('Categories', [])))\n# categories = categories if categories else \"Без категорий\"\n#\n# phones = ', '.join(map(lambda x: x['formatted'], company.get('Phones', [])))\n# phones = phones if phones else \"Телефоны не найдены\"\n#\n# res = res % (name, url, address, phones, postal_code, categories)\n#\n# bg = pygame.Surface(screen.get_size(), pygame.SRCALPHA)\n# bg.fill(to_color(\"#FFFFFF99\"))\n# screen.blit(bg, (0, 0))\n# rect = screen.get_rect() # type: pygame.Rect\n# rect.topleft = 5, 5\n# rect.bottom -= 5\n# rect.right -= 5\n# l = Label(rect, res, \"gray25\", auto_line_break=True)\n# while True:\n# for event in pygame.event.get():\n# if event.type == pygame.QUIT:\n# sys.exit(0)\n# if event.type == pygame.MOUSEBUTTONDOWN and event.button in (1, 3):\n# return True\n#\n# l.render(screen)\n# pygame.display.flip()\n# clock.tick(FPS)\n\n\nif __name__ == '__main__':\n res = geo_search((45.0200828, 53.12381011515711))\n json.dump(geo_search((45.0200828, 53.12381011515711)), open('test.json', 'w'), ensure_ascii=False, indent=2)\n json.dump(geo_search((37.764662, 55.719081)), open('test2.json', 'w'), ensure_ascii=False, indent=2)\n","sub_path":"multitool.py","file_name":"multitool.py","file_ext":"py","file_size_in_byte":6966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"319206140","text":"\"\"\" Circle views \"\"\"\n\n# Django REST\nfrom rest_framework import mixins, viewsets\nfrom rest_framework.permissions import IsAuthenticated\n\n# Serializers\nfrom cride.circles.serializers.circles import CircleModelSerializer\n\n# Models\nfrom cride.circles.models import Circle, Membership\n\n# Permissions\nfrom cride.circles.permissions import IsCircleAdmin\n\nclass CircleViewSet(mixins.CreateModelMixin,\n\t\t\t\t\tmixins.RetrieveModelMixin,\n\t\t\t\t\tmixins.UpdateModelMixin,\n\t\t\t\t\tmixins.ListModelMixin,\n\t\t\t\t\tviewsets.GenericViewSet):\n\tserializer_class = CircleModelSerializer\n\tlookup_field = 'slug_name'\n\n\tdef get_permissions(self):\n\t\t\"\"\" Assign permissions based on circle action \"\"\"\n\t\tpermissions = [IsAuthenticated]\n\t\tif self.action in ['update', 'partial_update']:\n\t\t\tpermissions.append(IsCircleAdmin)\n\t\treturn [permission() for permission in permissions]\n\n\tdef get_queryset(self):\n\t\t\"\"\" Restrict list method to public circles \"\"\"\n\t\tqueryset = Circle.objects.all()\n\t\tif self.action == 'list':\n\t\t\treturn queryset.filter(is_public=True)\n\t\treturn queryset\n\t\n\tdef perform_create(self, serializer):\n\t\t\"\"\" Assign circle admin \"\"\"\n\t\tcircle = serializer.save()\n\t\tuser = self.request.user\n\t\tprofile = user.profile\n\t\tMembership.objects.create(\n\t\t\tuser=user,\n\t\t\tprofile=profile,\n\t\t\tcircle=circle,\n\t\t\tis_admin=True,\n\t\t\tremaining_invitations=10\n\t\t)\n","sub_path":"cride/circles/views/circles.py","file_name":"circles.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"481536582","text":"#Exercise 12\n\n#Write a program that takes a list of numbers (for example, a = [5, 10, 15, 20, 25]) and makes a new list of only the first and last elements of the given list. For practice, write this code inside a function.\n\nimport random\n\ndef create_list(length) :\n\tlistname = random.sample(range(1000), length)\n\treturn listname\n\t\ndef filter_list(listname) :\n\tlistresult = []\n\tlistresult.append(listname[0])\n\tlistresult.append(listname[-1])\n\treturn listresult\n\t\ndef print_result(listname) :\n\tresult = filter_list(listname)\t\n\tprint(\"Random List : \\n\")\t\n\tprint(listname)\n\tprint(\"\\nFirst & Last Elements of List : \\n\")\n\tprint(result)\n\nprint_result((create_list(10)))\n","sub_path":"exercise_12.py","file_name":"exercise_12.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"492915842","text":"\nimport os\nimport unittest\nimport vtk, qt, ctk, slicer\nfrom slicer.ScriptedLoadableModule import *\nimport logging\nimport fnmatch\nimport numpy as np\nimport random\nimport math\n\n\n#\n# MergeCurves\n#\n\nclass MergeCurves(ScriptedLoadableModule):\n \"\"\"Uses ScriptedLoadableModule base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n \n def __init__(self, parent):\n ScriptedLoadableModule.__init__(self, parent)\n self.parent.title = \"MergeCurves\" # TODO make this more human readable by adding spaces\n self.parent.categories = [\"SlicerMorph.SlicerMorph Utilities\"]\n self.parent.dependencies = []\n self.parent.contributors = [\"Sara Rolfe (UW), Murat Maga (UW)\"] # replace with \"Firstname Lastname (Organization)\"\n self.parent.helpText = \"\"\"\n This module interactively merges markups nodes.\n

For more information see the online documentation..

\n \"\"\"\n #self.parent.helpText += self.getDefaultModuleDocumentationLink()\n self.parent.acknowledgementText = \"\"\"\n This module was developed by Sara Rolfe, and Murat Maga for SlicerMorph. SlicerMorph was originally supported by an NSF/DBI grant, \"An Integrated Platform for Retrieval, Visualization and Analysis of 3D Morphology From Digital Biological Collections\" \n awarded to Murat Maga (1759883), Adam Summers (1759637), and Douglas Boyer (1759839). \n https://nsf.gov/awardsearch/showAward?AWD_ID=1759883&HistoricalAwards=false\n \"\"\" # replace with organization, grant and thanks.\n\n#\n# MergeCurvesWidget\n#\n\nclass MergeCurvesWidget(ScriptedLoadableModuleWidget):\n \"\"\"Uses ScriptedLoadableModuleWidget base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n def setup(self):\n ScriptedLoadableModuleWidget.setup(self)\n \n # Instantiate and connect widgets ...\n \n #\n # Parameters Area\n #\n parametersCollapsibleButton = ctk.ctkCollapsibleButton()\n parametersCollapsibleButton.text = \"Parameters\"\n self.layout.addWidget(parametersCollapsibleButton)\n \n # Layout within the dummy collapsible button\n parametersFormLayout = qt.QFormLayout(parametersCollapsibleButton)\n \n #\n # check box to trigger taking screen shots for later use in tutorials\n #\n self.continuousCurvesCheckBox = qt.QCheckBox()\n self.continuousCurvesCheckBox.checked = 0\n self.continuousCurvesCheckBox.setToolTip(\"If checked, redundant points will be removed on merging.\")\n parametersFormLayout.addRow(\"Contiuous curves\", self.continuousCurvesCheckBox)\n \n #\n # Apply Button\n #\n self.applyButton = qt.QPushButton(\"Apply\")\n self.applyButton.toolTip = \"Generate semilandmarks.\"\n self.applyButton.enabled = False\n parametersFormLayout.addRow(self.applyButton)\n \n #\n # markups view\n #\n self.markupsView = slicer.qMRMLSubjectHierarchyTreeView()\n self.markupsView.setMRMLScene(slicer.mrmlScene)\n self.markupsView.setMultiSelection(True)\n self.markupsView.setAlternatingRowColors(True)\n self.markupsView.setDragDropMode(qt.QAbstractItemView().DragDrop)\n self.markupsView.setColumnHidden(self.markupsView.model().transformColumn, True)\n self.markupsView.sortFilterProxyModel().setNodeTypes([\"vtkMRMLMarkupsCurveNode\"])\n parametersFormLayout.addRow(self.markupsView)\n \n #\n # Merge Button\n #\n self.mergeButton = qt.QPushButton(\"Merge highlighted nodes\")\n self.mergeButton.toolTip = \"Generate a single merged markup file from the selected nodes\"\n self.mergeButton.enabled = False\n parametersFormLayout.addRow(self.mergeButton)\n \n # connections\n self.mergeButton.connect('clicked(bool)', self.onMergeButton)\n self.markupsView.connect('currentItemChanged(vtkIdType)', self.updateMergeButton)\n \n # Add vertical spacer\n self.layout.addStretch(1)\n \n def cleanup(self):\n pass\n \n def onMergeButton(self):\n logic = MergeCurvesLogic()\n logic.run(self.markupsView, self.continuousCurvesCheckBox.checked)\n \n def updateMergeButton(self):\n nodes=self.markupsView.selectedIndexes()\n self.mergeButton.enabled = bool(nodes)\n\n#\n# MergeCurvesLogic\n#\n\nclass MergeCurvesLogic(ScriptedLoadableModuleLogic):\n \"\"\"This class should implement all the actual\n computation done by your module. The interface\n should be such that other python code can import\n this class and make use of the functionality without\n requiring an instance of the Widget.\n Uses ScriptedLoadableModuleLogic base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n def run(self, markupsTreeView, continuousCurveOption):\n nodeIDs=markupsTreeView.selectedIndexes()\n nodeList = vtk.vtkCollection()\n for id in nodeIDs:\n if id.column() == 0:\n currentNode = slicer.util.getNode(id.data())\n nodeList.AddItem(currentNode)\n mergedNodeName = \"mergedMarkupsNode\"\n mergedNode = slicer.mrmlScene.AddNewNodeByClass('vtkMRMLMarkupsCurveNode', mergedNodeName)\n purple=[1,0,1]\n mergedNode.GetDisplayNode().SetSelectedColor(purple)\n self.mergeList(nodeList, mergedNode, continuousCurveOption)\n return True\n \n def mergeList(self, nodeList,mergedNode, continuousCurveOption):\n pointList=[] \n connectingNode=False\n # Add semi-landmark points within triangle patches\n for currentNode in nodeList:\n for index in range(currentNode.GetNumberOfControlPoints()):\n if not(index==0 and continuousCurveOption and connectingNode):\n pt = currentNode.GetNthControlPointPositionVector(index)\n pt_array = [pt.GetX(), pt.GetY(), pt.GetZ()]\n if pt_array not in pointList:\n pointList.append(pt_array)\n fiducialLabel = currentNode.GetNthControlPointLabel(index)\n fiducialDescription = currentNode.GetNthControlPointDescription(index)\n #fiducialMeasurement = currentNode.GetNthMeasurement(index)\n mergedNode.AddControlPoint(pt,fiducialLabel)\n mergedIndex = mergedNode.GetNumberOfControlPoints()-1\n mergedNode.SetNthControlPointDescription(mergedIndex,fiducialDescription)\n #mergedNode.SetNthMeasurement(mergedIndex, fiducialMeasurement)\n connectingNode=True \n return True\n \nclass MergeCurvesTest(ScriptedLoadableModuleTest):\n \"\"\"\n This is the test case for your scripted module.\n Uses ScriptedLoadableModuleTest base class, available at:\n https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py\n \"\"\"\n \n def setUp(self):\n \"\"\" Do whatever is needed to reset the state - typically a scene clear will be enough.\n \"\"\"\n slicer.mrmlScene.Clear(0)\n \n def runTest(self):\n \"\"\"Run as few or as many tests as needed here.\n \"\"\"\n self.setUp()\n self.test_MergeCurves1()\n \n def test_MergeCurves1(self):\n \"\"\" Ideally you should have several levels of tests. At the lowest level\n tests should exercise the functionality of the logic with different inputs\n (both valid and invalid). At higher levels your tests should emulate the\n way the user would interact with your code and confirm that it still works\n the way you intended.\n One of the most important features of the tests is that it should alert other\n developers when their changes will have an impact on the behavior of your\n module. For example, if a developer removes a feature that you depend on,\n your test should break so they know that the feature is needed.\n \"\"\"\n \n self.delayDisplay(\"Starting the test\")\n #\n # first, get some data\n #\n import urllib\n downloads = (\n ('http://slicer.kitware.com/midas3/download?items=5767', 'FA.nrrd', slicer.util.loadVolume),\n )\n for url,name,loader in downloads:\n filePath = slicer.app.temporaryPath + '/' + name\n if not os.path.exists(filePath) or os.stat(filePath).st_size == 0:\n logging.info('Requesting download %s from %s...\\n' % (name, url))\n urllib.urlretrieve(url, filePath)\n if loader:\n logging.info('Loading %s...' % (name,))\n loader(filePath)\n self.delayDisplay('Finished with download and loading')\n \n volumeNode = slicer.util.getNode(pattern=\"FA\")\n logic = MergeCurvesLogic()\n self.assertIsNotNone( logic.hasImageData(volumeNode) )\n self.delayDisplay('Test passed!')\n","sub_path":"MergeCurves/MergeCurves.py","file_name":"MergeCurves.py","file_ext":"py","file_size_in_byte":8585,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"274036574","text":"import maya.cmds as cmds\nimport os as os\nimport os.path\nimport shutil\n\nrootPath = 'A:\\VEJFESTEN\\sequences'\n\nfileStructure = \"\"\"FileName : \nCreated :\nLastEdit :\"\"\"\n\n\n#### ----------------------------- Functions ------------------------------ ####\n\ndef listAll(dirPath):\n print\n dirPath\n allFiles = [str(f) for f in os.listdir(dirPath)]\n allFiles = sorted(allFiles, key=str.lower)\n print\n allFiles\n return allFiles\n\n\ndef listFiles(dirPath):\n print\n dirPath\n onlyFiles = [str(f) for f in os.listdir(dirPath) if isfile(join(dirPath, f))]\n onlyFiles = sorted(onlyFiles, key=str.lower)\n print\n onlyFiles\n return onlyFiles\n\n\ndef listFolders(dirPath):\n print\n dirPath\n onlyFolders = [str(f) for f in os.listdir(dirPath) if not isfile(join(dirPath, f))]\n onlyFolders = sorted(onlyFolders, key=str.lower)\n print\n onlyFolders\n return onlyFolders\n\n\ndef compliantFile(fileList, fileStruct):\n compliantFiles = []\n for f in fileList:\n if str(fileStruct) in str(f):\n compliantFiles.append(f)\n print\n fileList\n print\n compliantFiles\n return compliantFiles\n\n\n### -------------------------------------------------------------------------------####\n### ------------------------- UI Col Management -----------------------------###\n#######################################################################################\n\n\n# Transfer Brightness from B to A\ndef brightnessCol(A, b):\n import colorsys\n\n hsvA = colorsys.rgb_to_hsv(A[0], A[1], A[2])\n rgbOut = colorsys.hsv_to_rgb(hsvA[0], hsvA[1], (hsvA[2] + b))\n return rgbOut\n\n\n# Add b in saturation to A color\ndef saturationCol(A, b):\n import colorsys\n\n hsvA = colorsys.rgb_to_hsv(A[0], A[1], A[2])\n rgbOut = colorsys.hsv_to_rgb(hsvA[0], (hsvA[1] + b), hsvA[2])\n return rgbOut\n\n\n### ----------------------------- UI Functions --------------------------------###\n#######################################################################################\n\ndef getSequence():\n global selectedSeq\n global shotFolders\n\n cmds.textScrollList(shotList, edit=True, removeAll=True)\n cmds.textScrollList(fileList, edit=True, removeAll=True)\n\n selectedSeq = cmds.textScrollList(seqList, query=True, selectItem=True)\n shotDir = rootPath + '\\\\' + selectedSeq[0]\n shotFolders = listFolders(shotDir)\n cmds.textScrollList(shotList, edit=True, append=shotFolders)\n\n\ndef getShot():\n global selectedSeq\n global selectedShot\n global fileFolders\n\n cmds.textScrollList(fileList, edit=True, removeAll=True)\n\n selectedShot = cmds.textScrollList(shotList, query=True, selectItem=True)\n fileDir = rootPath + '\\\\' + selectedSeq[0] + '\\\\' + selectedShot[0] + '\\\\' + 'anim' + '\\\\' + 'work'\n try:\n fileFolders = listFiles(fileDir)\n except:\n print\n 'No animation work folder inside this directory'\n cmds.textScrollList(fileList, edit=True, append=fileFolders)\n\n\ndef createShotFile():\n global selectedSeq\n global selectedShot\n\n print\n selectedSeq\n print\n selectedShot\n\n sourceLayout = rootPath + '\\\\' + selectedSeq[0] + '\\\\' + selectedShot[\n 0] + '\\\\' + 'layout' + '\\\\' + 'publish' + '\\\\' + selectedSeq[0] + '_' + selectedShot[0] + '_LAY.ma'\n animPathFolder = rootPath + '\\\\' + selectedSeq[0] + '\\\\' + selectedShot[0] + '\\\\' + 'anim' + '\\\\' + 'work'\n fileNameBase = str(selectedSeq[0]) + '_' + str(selectedShot[0]) + '_ANM_000.ma'\n selectedFile = cmds.textScrollList(fileList, query=True, selectItem=True)\n # check if there is no file selected\n if selectedFile == None:\n # there is no shot selected\n if selectedShot == '':\n cmds.confirmDialog(title='Warning no shot selected', message='Try to select a shot first', button=['OK'],\n defaultButton='OK')\n\n # there is a shot selected\n else:\n # check if there is already an 'anim\\work' folder for the selected shot\n if os.path.isdir(animPathFolder):\n if os.path.isfile(animPathFolder + '\\\\' + fileNameBase):\n cmds.confirmDialog(title='Warning', message='There is already a file created for this shot',\n button=['OK'], defaultButton='OK')\n else:\n createConfirm = cmds.confirmDialog(title='Create a new animation file', ma='center',\n message='Create a file for shot: \\r' + fileNameBase + '?',\n button=['Yes', 'No'], defaultButton='Yes', cancelButton='No',\n dismissString='No')\n if createConfirm == 'Yes':\n try:\n shutil.copyfile(sourceLayout, animPathFolder)\n os.rename(animPathFolder + '\\\\' + selectedSeq[0] + '_' + selectedShot[0] + '_LAY.ma',\n fileNameBase)\n except:\n print\n 'cannot create the new file'\n if not os.path.isfile(sourceLayout):\n createConfirm = cmds.confirmDialog(title='No Layout file', ma='center',\n message='Cannot create file for \\r' + fileNameBase + '\\r Because there is no layout file associated',\n button=['Ok'], defaultButton='Ok', cancelButton='Ok',\n dismissString='Ok')\n\n else:\n os.makedirs(animPathFolder)\n createConfirm = cmds.confirmDialog(title='Create a new animation file', ma='center',\n message='Create a file for shot: \\r' + fileNameBase + '?',\n button=['Yes', 'No'], defaultButton='Yes', cancelButton='No',\n dismissString='No')\n if createConfirm == 'Yes':\n try:\n shutil.copyfile(sourceLayout, animPathFolder)\n os.rename(animPathFolder + '\\\\' + selectedSeq[0] + '_' + selectedShot[0] + '_LAY.ma',\n fileNameBase)\n except:\n print\n 'cannot create the new file'\n\n\n### --------------------------------- SCRIPT INIT --------------------------------- ###\n#######################################################################################\n\nrootFolders = listFolders(rootPath)\nselectedSeq = ''\nselectedShot = ''\nselectedFile = ''\nshotFolders = []\nfileFolders = []\n\n\"\"\"\n# ---- UiStartingPoint\n\"\"\"\n\nwindowID = 'Animation Scene Loader'\nwindowW = 600\nwindowH = 600\ncolorBase = [0.1, 0.4, 0.2]\ncontrastVal = 0.2\ncontrastSatVal = -0.1\ncolorLevelA = brightnessCol(saturationCol(colorBase, contrastSatVal), contrastVal)\ncolorLevelB = brightnessCol(saturationCol(colorLevelA, contrastSatVal), contrastVal)\ncolorLevelC = brightnessCol(saturationCol(colorLevelB, contrastSatVal), contrastVal)\n\ntry:\n if cmds.window(ASLWindow, exists=True):\n cmds.deleteUI(ASLWindow)\nexcept:\n print\n 'first iteration of ASLWindow'\n\nASLWindow = cmds.window(windowID, title=windowID, resizeToFitChildren=True, sizeable=True, w=windowW, h=windowH)\ncmds.columnLayout()\ncmds.text(label=windowID, w=windowW, h=25, bgc=colorBase)\n\ncmds.text(label='', w=windowW, h=5)\ncmds.rowLayout(numberOfColumns=4)\nseqList = cmds.textScrollList(allowMultiSelection=False, append=rootFolders, w=windowW / 4, h=windowH,\n sc='getSequence()')\nshotList = cmds.textScrollList(allowMultiSelection=False, append=shotFolders, w=windowW / 4, h=windowH, sc='getShot()')\nfileList = cmds.textScrollList(allowMultiSelection=False, append=fileFolders, w=windowW / 4, h=windowH)\n\ncmds.rowLayout(numberOfColumns=4, cw3=[5, windowW / 4 - 10, 5])\ncmds.text(label='', w=5)\ncmds.columnLayout(w=windowW / 4 - 10)\n\ncmds.button(label='Increment', w=windowW / 4 - 20, h=15)\ncmds.text(label='', w=5, h=5)\ncmds.text(align='left', label=fileStructure)\n\ncmds.text(label='-------------------------------', h=24, enable=False)\n\ncmds.button(label='Create', w=windowW / 4 - 20, h=15, c='createShotFile()')\ncmds.button(label='Save Into', w=windowW / 4 - 20, h=15)\n\ncmds.text(label='-------------------------------', h=24, enable=False)\n\ncmds.textField(w=windowW / 4 - 20, h=15)\ncmds.text(label='', w=5, h=2)\ncmds.button(label='New Root Version', w=windowW / 4 - 20, h=15)\n\ncmds.setParent('..')\n\ncmds.showWindow(ASLWindow)\n","sub_path":"Maya/ProjectRelated/Vejfesten/Pipeline/animLoader/old/animLoader_v0.1.py","file_name":"animLoader_v0.1.py","file_ext":"py","file_size_in_byte":8802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"545102139","text":"import time\nimport argparse\nimport os\n\nimport networkx as nx\nimport numpy as np\nimport torch\nimport torch.optim as optim\n\nfrom torch_geometric.datasets import TUDataset\nfrom torch_geometric.datasets import Planetoid\nfrom torch_geometric.data import DataLoader\n\nimport torch_geometric.nn as pyg_nn\n\nfrom models import GNNStack, EGNNStack\nfrom utils import build_optimizer, objectview\n\ndef train(dataset, args):\n log_path = './Data/uci/'+args.log_dir+'/'\n os.mkdir(log_path)\n\n # build model\n if args.gnn_type == 'GNN':\n model = GNNStack(dataset.num_node_features, args.hidden_dim, args.embed_dim, \n args)\n elif args.gnn_type == 'EGNN':\n model = EGNNStack(dataset.num_node_features, args.hidden_dim, args.embed_dim, \n args)\n scheduler, opt = build_optimizer(args, model.parameters())\n\n loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True)\n\n # train\n Train_loss = []\n Valid_mse = []\n Valid_l1 = []\n\n mask_defined = False\n for epoch in range(args.epochs):\n train_loss = 0.\n valid_mse = 0.\n valid_l1 = 0.\n for data in loader:\n model.train()\n if (not mask_defined) or (args.fix_train_mask == 0):\n if args.load_train_mask == 1:\n print('loading train validation mask')\n train_mask = np.load(args.train_mask_dir)\n train_mask = torch.BoolTensor(train_mask).view(-1)\n else:\n print('defining train validation mask')\n train_mask = (torch.FloatTensor(data.edge_attr.shape[0], 1).uniform_() < (1-args.valid)).view(-1)\n valid_mask = ~train_mask\n mask_defined = True\n\n known_mask = train_mask.clone().detach()\n known_mask[train_mask] = (torch.FloatTensor(torch.sum(train_mask).item()).uniform_() < args.known)\n # known mask is a mask that masks train mask\n \n x = torch.FloatTensor(np.copy(data.x))\n edge_attr = data.edge_attr.clone().detach()\n edge_index = torch.tensor(np.copy(data.edge_index),dtype=int)\n\n\n if args.remove_unknown_edge == 1:\n known_edge_index = edge_index[:,known_mask]\n known_edge_attr = edge_attr[known_mask]\n train_edge_index = edge_index[:,train_mask]\n train_edge_attr = edge_attr[train_mask]\n else:\n train_edge_index = edge_index\n train_edge_attr = edge_attr.clone().detach()\n train_edge_attr[valid_mask] = 0.\n known_edge_index = edge_index\n known_edge_attr = edge_attr.clone().detach()\n known_edge_attr[~known_mask] = 0.\n\n\n opt.zero_grad()\n pred = model(x, known_edge_attr, known_edge_index, edge_index)\n label = edge_attr\n\n pred_train = pred[train_mask]\n label_train = label[train_mask]\n loss = model.loss(pred_train, label_train)\n loss.backward()\n opt.step()\n train_loss += loss.item()\n\n model.eval()\n pred = model(x, train_edge_attr, train_edge_index, edge_index)\n pred_valid = pred[valid_mask]\n label_valid = label[valid_mask]\n mse = model.metric(pred_valid, label_valid, 'mse')\n valid_mse += mse.item()\n l1 = model.metric(pred_valid, label_valid, 'l1')\n valid_l1 += l1.item()\n\n train_loss /= len(dataset)\n\n Train_loss.append(train_loss)\n Valid_mse.append(valid_mse)\n Valid_l1.append(valid_l1)\n print('epoch: ',epoch)\n print('loss: ',train_loss)\n print('valid mse: ',valid_mse)\n print('valid l1: ',valid_l1)\n\n pred_train = pred_train.detach().numpy()\n label_train = label_train.detach().numpy()\n pred_valid = pred_valid.detach().numpy()\n label_valid = label_valid.detach().numpy()\n\n import pickle\n obj = dict()\n obj['args'] = args\n obj['train_loss'] = Train_loss\n obj['valid_mse'] = Valid_mse\n obj['valid_l1'] = Valid_l1\n obj['pred_train'] = pred_train\n obj['label_train'] = label_train\n obj['pred_valid'] = pred_valid\n obj['label_valid'] = label_valid\n pickle.dump(obj, open(log_path+'result.pkl', \"wb\" ))\n\n torch.save(model.state_dict(), log_path+'model.pt')\n import matplotlib.pyplot as plt\n plt.figure()\n plt.subplot(3,1,1)\n plt.plot(Train_loss,linewidth=1.)\n plt.title('train mse')\n plt.subplot(3,1,2)\n plt.plot(Valid_mse,linewidth=1.)\n plt.title('valid mse')\n plt.subplot(3,1,3)\n plt.plot(Valid_l1,linewidth=1.)\n plt.title('valid mae')\n plt.savefig(log_path+'curve.png')\n plt.close()\n\n plt.figure()\n plot1, = plt.plot(pred_train[::100],linewidth=1.)\n plot2, = plt.plot(label_train[::100],linewidth=1.)\n plt.legend([plot1,plot2],['pred','label'])\n plt.title('final train result')\n plt.savefig(log_path+'final_train.png')\n plt.close()\n plt.figure()\n plot1, = plt.plot(pred_valid[::25],linewidth=1.) \n plot2, = plt.plot(label_valid[::25],linewidth=1.)\n plt.legend([plot1,plot2],['pred','label'])\n plt.title('final valid result')\n plt.savefig(log_path+'final_valid.png')\n plt.close()\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--gnn_type', type=str, default='GNN')\n parser.add_argument('--model_types', type=str, default='EGCN_EGCN_EGCN')\n parser.add_argument('--hidden_dim', type=int, default=6)\n parser.add_argument('--embed_dim', type=int, default=6)\n parser.add_argument('--batch_size', type=int, default=32) # doesn't matter here\n parser.add_argument('--epochs', type=int, default=5000)\n parser.add_argument('--opt', type=str, default='adam')\n parser.add_argument('--opt_scheduler', type=str, default='none')\n parser.add_argument('--opt_restart', type=int, default=0)\n parser.add_argument('--opt_decay_step', type=int, default=0)\n parser.add_argument('--opt_decay_rate', type=float, default=0)\n parser.add_argument('--dropout', type=float, default=0.)\n parser.add_argument('--weight_decay', type=float, default=0.)\n parser.add_argument('--lr', type=float, default=0.001)\n parser.add_argument('--valid', type=float, default=0.3)\n parser.add_argument('--known', type=float, default=1.1)\n parser.add_argument('--fix_train_mask', type=int, default=1) # 1: yes, 0: no\n parser.add_argument('--load_train_mask', type=int, default=1)\n parser.add_argument('--train_mask_dir', type=str, default='./Data/uci/len6336rate0.7seed0.npy')\n parser.add_argument('--remove_unknown_edge', type=int, default=1)\n parser.add_argument('--seed', type=int, default=4)\n parser.add_argument('--log_dir', type=str, default='1')\n args = parser.parse_args()\n args.model_types = args.model_types.split('_')\n\n seed = args.seed\n np.random.seed(seed)\n torch.manual_seed(seed)\n\n \n from uci import get_dataset, UCIDataset\n dataset = UCIDataset(root='/tmp/UCI')\n # dataset = dataset.shuffle() # add this line!\n train(dataset, args) \n\nif __name__ == '__main__':\n main()\n\n","sub_path":"train_uci.py","file_name":"train_uci.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"509923774","text":"\nimport avalon.api\n\n\nclass UnpackLoadedSubset(avalon.api.InventoryAction):\n \"\"\"Unpack loaded subset into scene\n \"\"\"\n\n label = \"Unpack Subset\"\n icon = \"warning\"\n color = \"#ff6666\"\n order = 200\n\n @staticmethod\n def is_compatible(container):\n from maya import cmds\n from avalon.maya.pipeline import AVALON_CONTAINERS\n\n if not container:\n return False\n\n if container[\"loader\"] not in [\n \"CameraLoader\",\n \"LightSetLoader\",\n \"LookLoader\",\n \"MayaShareLoader\",\n \"ModelLoader\",\n \"PointCacheReferenceLoader\",\n \"RigLoader\",\n \"SetDressLoader\",\n ]:\n return False\n\n containers = AVALON_CONTAINERS[1:] # Remove root namespace\n parents = cmds.listSets(object=container[\"objectName\"]) or []\n # Must be a root container\n if containers in parents:\n return True\n return False\n\n def consent(self):\n from reveries.plugins import message_box_warning\n\n title = \"Unpack Subset\"\n msg = (\"Subset will not be able to update nor managed after \"\n \"this action.\\nAre you sure ?\")\n\n return message_box_warning(title, msg, optional=True)\n\n def _get_reference_node(self, members):\n \"\"\"Get the reference node from the container members\n Args:\n members: list of node names\n\n Returns:\n str: Reference node name.\n\n \"\"\"\n from reveries.maya import lib\n\n # Collect the references without .placeHolderList[] attributes as\n # unique entries (objects only) and skipping the sharedReferenceNode.\n references = lib.get_reference_node(members)\n\n if not references:\n return None\n\n # Get highest reference node (least parents)\n highest = lib.get_highest_reference_node(references)\n\n return highest\n\n def process(self, containers):\n from maya import cmds\n from avalon.maya.pipeline import AVALON_CONTAINERS\n from avalon.tools import cbsceneinventory\n from reveries.maya import hierarchy, pipeline\n from reveries.maya.vendor import sticker\n from reveries import REVERIES_ICONS\n\n if not self.consent():\n return\n\n dimmed_icon = REVERIES_ICONS + \"/package-01-dimmed.png\"\n\n for container in containers:\n if not self.is_compatible(container):\n continue\n\n node = container[\"objectName\"]\n members = cmds.sets(node, query=True) or []\n\n reference_node = self._get_reference_node(members)\n if reference_node is not None:\n # Import Reference\n cmds.file(importReference=True, referenceNode=reference_node)\n\n namespace = container[\"namespace\"]\n\n for child in hierarchy.get_sub_container_nodes(container):\n # Update sub-containers' namespace entry\n child_ns = cmds.getAttr(child + \".namespace\")\n new_ns = child_ns[len(namespace):]\n cmds.setAttr(child + \".namespace\", new_ns, type=\"string\")\n # Add to root container\n cmds.sets(child, forceElement=AVALON_CONTAINERS)\n\n # Merge namespace to root\n cmds.namespace(removeNamespace=namespace,\n mergeNamespaceWithRoot=True)\n\n # Update subset group icon\n group = pipeline.get_group_from_container(node)\n if group is not None:\n sticker.put(group, dimmed_icon)\n\n # Delete container\n cmds.delete(node)\n\n # Refresh GUI\n cbsceneinventory.app.window.refresh()\n\n # Update Icon\n sticker.reveal()\n","sub_path":"plugins/maya/inventory/action_unpack_loaded_subset.py","file_name":"action_unpack_loaded_subset.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"200453876","text":"import scrapy\n\n\nclass OlxSpider(scrapy.Spider):\n name = \"olx-spider\"\n allowed_domains = [\"olx.co.id\"]\n start_urls = ['https://www.olx.co.id/bandung-kota_g4000018/disewakan-rumah-apartemen_c5160?filter=type_eq_rumah']\n\n def parse(self,response):\n self.log(response.url+' Visited')\n for ad in response.css('li.EIR5N'):\n item = {\n 'link': ad.css('a::attr(href)').extract_first(),\n 'harga': ad.css('span._89yzn::text').extract_first(),\n 'room': ad.css('span._2TVI3::text').extract_first(),\n 'lokasi': ad.css('span.tjgMj::text').extract_first(),\n 'date': ad.css('span.zLvFQ > span::text').extract_first()\n }\n yield(item)\n\n","sub_path":"olx_scraper/spiders/spider.py","file_name":"spider.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"125350291","text":"def rev_comp(pattern):\n\n rev_comp = []\n\n for x in pattern:\n if x=='A':\n rev_comp.append('T')\n elif x=='T':\n rev_comp.append('A')\n elif x=='G':\n rev_comp.append('C')\n elif x=='C':\n rev_comp.append('G')\n\n return ''.join(rev_comp)[::-1]\n\n\"\"\"===============MAIN===============\"\"\"\n\n\"\"\"\nFirst we must obtain the input pattern\n\"\"\"\n\nf = open('rosalind_revp.txt')\nraw = f.read()\nf.close()\n\npattern = ''.join(raw.strip().split()[1:])\n\n\"\"\"\nNow that we have the input pattern we can work on finding these\n'reverse palindrome' sequences between lengths 4 and 12\n\"\"\"\n\nsite_list = []\n\nfor l in range(4,12+1): \n for i in range(len(pattern)-l+1):\n cur_pattern = pattern[i:i+l]\n if(cur_pattern == rev_comp(cur_pattern)):\n site_list.append((i, l))\n\n\"\"\"\nsort the site list by index \n\"\"\"\n\nsite_list.sort(key=lambda tup: tup[0])\n\nfor tup in site_list:\n print(tup[0]+1, tup[1])\n","sub_path":"files/locating_restriction_sites.py","file_name":"locating_restriction_sites.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"578796283","text":"import numpy as np\nfrom scipy.stats import norm,multivariate_normal\n\n#math\nimport math\n\nK_value=49\n\ndef Speaker_model(b_train,ubm_weights,ubm_means,ubm_var,T_value,gamma_UBM):\n\tprob_set=density_func(b_train,ubm_means,ubm_var,T_value,K_value)\n\tubm_value_set=naive_G_U(prob_set,ubm_weights)\n\tposteri_prob=post_prob_model(prob_set,ubm_weights,ubm_value_set)\n\tnew_mu=mu_model(posteri_prob,b_train)\n\tnew_cov=cov_model(posteri_prob,b_train,new_mu,T_value)\n\tnew_weight=weight_model(posteri_prob,T_value)\n\tnew_mu_adapted,new_cov_adapted,new_weight_adapted=adapted_mode(posteri_prob,new_mu,ubm_means,new_cov,ubm_var,new_weight,ubm_weights,gamma_UBM)\n\treturn new_mu_adapted,new_cov_adapted,new_weight_adapted\n\n\n\n\n\n# probability density function\ndef density_func(b_train,ubm_means,ubm_var,T_value,K_value):\n prob_set=np.zeros((K_value,T_value))\n for k in range(K_value):\n for t in range(T_value):\n prob_set[k,t]=multivariate_normal.pdf(b_train[:,t],ubm_means[k],ubm_var[k,:,:])\n return prob_set\n\n\n#calculate the naive GMM-UBM\ndef naive_G_U(prob_set,ubm_weights): \n ubm_value_set=np.dot(ubm_weights,prob_set)\n return ubm_value_set\n\n#calculate the posteri_prob\ndef post_prob_model(prob_set,ubm_weights,ubm_value_set):\n prob=np.multiply(ubm_weights.reshape(49,1),prob_set)\n posteri_prob=prob/ubm_value_set\n return posteri_prob\n\n\ndef mu_model(posteri_prob,b_train):\n #b_train: features*frames\n #posteri_prob:models*frames\n value_temp=np.dot(posteri_prob,b_train.T)\n new_mu=np.multiply((1/np.sum(posteri_prob,axis=1)),value_temp.T)\n return new_mu.T\n\ndef cov_model(posteri_prob,b_train,new_mu,T_value):\n #new_mu: models*features\n #b_train: features*frames\n #posteri_prob:models*frames\n cov_set =[]\n #calculate mu*mu.T\n for k in range(K_value):\n mu_temp=np.dot(new_mu[k,:].reshape(-1,1),new_mu[k,:].reshape(1,-1))\n #print(mu_temp.shape)\n value_temp=1/np.sum(posteri_prob[k,:])\n sum_temp=0\n for t in range(T_value):\n b_temp=np.dot(b_train[:,t].reshape(-1,1),b_train[:,t].reshape(1,-1))\n #print(b_temp.shape)\n sum_temp+=posteri_prob[k,t]*b_temp\n #print(np.diag(value_temp*sum_temp-mu_temp).shape)\n cov_set.append(np.diag(np.diag(value_temp*sum_temp-mu_temp)))\n cov_set=np.array(cov_set)\n return cov_set\n\ndef weight_model(posteri_prob,T_value):\n return (1/T_value)*np.sum(posteri_prob,axis=1)\n\ndef adapted_mode(posteri_prob,new_mu,ubm_means,new_cov,ubm_var,new_weight,ubm_weights,gamma_UBM):\n \n #caculate alpha\n alpha = np.sum(posteri_prob,axis=1)/(gamma_UBM+np.sum(posteri_prob,axis=1))\n\n #caculate the adapted mean\n new_mu_adapted=np.multiply(alpha,new_mu.T)+np.multiply((1-alpha),ubm_means.T)\n\n #calculate adapted variance \n new_cov_adapted=np.multiply(alpha,new_cov.T)+np.multiply((1-alpha),ubm_var.T)\n #calculate adapted mean\n new_weight_adapted=np.multiply(alpha,new_weight)+np.multiply((1-alpha),ubm_weights.ravel())\n return new_mu_adapted.T,new_cov_adapted.T,new_weight_adapted.ravel()\n\n","sub_path":"task_2_version_2/Speaker_model.py","file_name":"Speaker_model.py","file_ext":"py","file_size_in_byte":3124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"233945790","text":"import sqlite3\nfrom urllib.request import pathname2url\nfrom datetime import datetime, timezone\n\n\nclass Database:\n\n \"\"\"\n check if the database exsits already\n \"\"\"\n @staticmethod\n def check_db_connection():\n try:\n con = sqlite3.connect('file:sensehat.db?mode=rw', uri=True)\n return con\n except sqlite3.OperationalError:\n Database.create_tables()\n con = sqlite3.connect('sensehat.db')\n return con\n\n \"\"\"\n creates tables if the database doesn't exist\n \"\"\"\n @staticmethod\n def create_tables():\n con = sqlite3.connect('sensehat.db')\n with con:\n cur = con.cursor()\n cur.execute(\"\"\"CREATE TABLE SENSEHAT_data (\n timestamp DATETIME,\n temp NUMERIC,\n humidity NUMERIC\n )\"\"\")\n\n cur.execute(\"\"\"CREATE TABLE NOTIFICATION_data(\n timestamp DATETIME\n )\"\"\")\n print('Missing Tables, tables created!')\n\n \"\"\"\n logs temperature and humidity data\n \"\"\"\n @staticmethod\n def log_temp_hum_data(timestamp, temp, humidity):\n conn = Database.check_db_connection()\n curs = conn.cursor()\n curs.execute(\"\"\"INSERT INTO SENSEHAT_data\n values((?), (?), (?))\"\"\", (timestamp, temp, humidity,))\n conn.commit()\n conn.close()\n print('Logged data in DB')\n\n \"\"\"\n logs notification data\n \"\"\"\n @staticmethod\n def log_notification_data(timestamp):\n conn = Database.check_db_connection()\n curs = conn.cursor()\n curs.execute(\"\"\"INSERT INTO NOTIFICATION_data\n values((?))\"\"\", (timestamp,))\n conn.commit()\n conn.close()\n print('Logged notification in DB')\n\n \"\"\"\n Gets all the temperature and humidity data from database\n \"\"\"\n @staticmethod\n def get_all_sensehat_data():\n conn = Database.check_db_connection()\n curs = conn.cursor()\n curs.execute(\"\"\"SELECT * FROM SENSEHAT_data\n ORDER BY timestamp ASC\"\"\")\n sensehat_data = curs.fetchall()\n conn.close()\n print('Retrieved all Data')\n return sensehat_data\n\n \"\"\"\n Gets all the temperature data from database\n \"\"\"\n @staticmethod\n def get_all_temperature_data():\n conn = Database.check_db_connection()\n curs = conn.cursor()\n curs.execute(\"\"\"SELECT temp FROM SENSEHAT_data\n ORDER BY timestamp ASC\"\"\")\n data = curs.fetchall()\n conn.close()\n print('Retrieved all Temperature Data')\n return data\n\n \"\"\"\n Gets all the timestamp data from database\n \"\"\"\n @staticmethod\n def get_all_timestamp_data():\n conn = Database.check_db_connection()\n curs = conn.cursor()\n curs.execute(\"\"\"SELECT timestamp FROM SENSEHAT_data\n ORDER BY timestamp ASC\"\"\")\n data = curs.fetchall()\n conn.close()\n print('Retrieved all timestamp Data')\n return data\n\n \"\"\"\n Gets all the humidity data from database\n \"\"\"\n @staticmethod\n def get_all_humidity_data():\n conn = Database.check_db_connection()\n curs = conn.cursor()\n curs.execute(\"\"\"SELECT humidity FROM SENSEHAT_data\n ORDER BY timestamp ASC\"\"\")\n data = curs.fetchall()\n conn.close()\n print('Retrieved all Humidity Data')\n return data\n\n \"\"\"\n checks if the notification has been sent already for a given date\n (date format '2019-03-25')\n \"\"\"\n @staticmethod\n def has_notified(time):\n # gets the last notification sent from database\n last_notify = Database.get_last_notification()\n if last_notify is None:\n print(\"Has not notified today!\")\n return False\n else:\n # converts utc time to local time\n local_timestamp = Database.get_local_time(last_notify)\n # convert local timestamp to local date\n local_date = Database.get_date_from_timestamp(local_timestamp[0])\n current_date = Database.get_date_from_timestamp(str(time))\n if local_date == current_date:\n print(\"Has notified today!\")\n return True\n else:\n print(\"Has not notified today!\")\n return False\n\n @staticmethod\n def get_last_notification():\n if not Database.is_notification_db_empty():\n conn = Database.check_db_connection()\n curs = conn.cursor()\n curs.execute(\"\"\"SELECT * FROM NOTIFICATION_data\n ORDER BY timestamp DESC LIMIT 1\n \"\"\")\n last = curs.fetchone()\n conn.close()\n return last\n else:\n return None\n\n @staticmethod\n def is_notification_db_empty():\n conn = Database.check_db_connection()\n curs = conn.cursor()\n curs.execute(\"\"\"SELECT * FROM NOTIFICATION_data\"\"\")\n data = curs.fetchone()\n conn.close()\n if data is None:\n return True\n else:\n return False\n\n @staticmethod\n def get_date_from_timestamp(timestamp):\n return timestamp.split(' ', 1)[0]\n\n @staticmethod\n def get_local_time(timestamp):\n return timestamp\n","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"473156885","text":"import numpy as np\nfrom sklearn.ensemble import ExtraTreesRegressor as SKExtraTreesRegressor\n\nfrom evalml.model_family import ModelFamily\nfrom evalml.pipelines import ExtraTreesRegressor\nfrom evalml.problem_types import ProblemTypes\n\n\ndef test_model_family():\n assert ExtraTreesRegressor.model_family == ModelFamily.EXTRA_TREES\n\n\ndef test_problem_types():\n assert ProblemTypes.REGRESSION in ExtraTreesRegressor.supported_problem_types\n assert len(ExtraTreesRegressor.supported_problem_types) == 1\n\n\ndef test_et_parameters():\n clf = ExtraTreesRegressor(n_estimators=20, max_features=\"auto\", max_depth=5, random_state=2)\n expected_parameters = {\n \"n_estimators\": 20,\n \"max_features\": \"auto\",\n \"max_depth\": 5,\n \"min_samples_split\": 2,\n \"min_weight_fraction_leaf\": 0.0,\n \"n_jobs\": -1\n }\n assert clf.parameters == expected_parameters\n\n\ndef test_fit_predict(X_y_regression):\n X, y = X_y_regression\n\n sk_clf = SKExtraTreesRegressor(max_depth=6, random_state=0)\n sk_clf.fit(X, y)\n y_pred_sk = sk_clf.predict(X)\n\n clf = ExtraTreesRegressor()\n clf.fit(X, y)\n y_pred = clf.predict(X)\n\n np.testing.assert_almost_equal(y_pred, y_pred_sk, decimal=5)\n\n\ndef test_feature_importance(X_y_regression):\n X, y = X_y_regression\n\n clf = ExtraTreesRegressor()\n sk_clf = SKExtraTreesRegressor(max_depth=6, random_state=0)\n sk_clf.fit(X, y)\n sk_feature_importance = sk_clf.feature_importances_\n\n clf.fit(X, y)\n feature_importance = clf.feature_importance\n\n np.testing.assert_almost_equal(sk_feature_importance, feature_importance, decimal=5)\n","sub_path":"evalml/tests/component_tests/test_et_regressor.py","file_name":"test_et_regressor.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"356966548","text":"import re\n\nglobalVarDict = {}\nintPattern = re.compile('int')\nfloatPattern = re.compile('float')\n\n# Разбивает текст программы на список слов\ndef clearText(text):\n result = list(re.findall(r'\\w+|\\W+', text))\n space = result.count(\" \")\n for time in range(0, space):\n string = \" \"\n result.remove(string)\n for elem in range(0,len(result)):\n result[elem] = result[elem].replace(\" \", \"\")\n return result\n\n# Находит переменный в тексте. Создается словарь Название: Тип, Значение (если есть)\n# Распознаваемые типы - int и float\n# Ошибка при попытке задать значение перменной без типа\ndef newVar(name,type, value = None):\n if value:\n print (\"Новая перемнная \" + name + \" типа \" + type + \" значения \" + str(value))\n else:\n print (\"Новая перемнная \" + name + \" типа \" + type)\n global globalVarDict\n varInfo = [type]\n if value:\n checkType(type, value, name)\n varInfo.append(value)\n globalVarDict[name] = varInfo\n\n# Перезапись уже существующей переменной\n# Ошибка при попытке задать значение не соответствующее типу переменной\ndef rewriteVar(name, value):\n global globalVarDict\n if list(dict.keys(globalVarDict)).count(name) == 1:\n varInfo = globalVarDict[name]\n if varInfo[1] != value:\n print(\"Старая перемнная \" + name + \" с новым зачение \" + value)\n checkType(varInfo[0], value, name)\n if len(varInfo)>1:\n varInfo[1] = value\n else:\n varInfo.append(value)\n globalVarDict[name] = varInfo\n else:\n print(\"Ошибка типа: Попытка задать значение несуществующей переменной \" + name + \"\\n\")\n exit(0)\n\n# Проверка соответсвия типа переменной и типа вводимых значений\ndef checkType(type, value, name):\n if type == \"int\":\n if re.search(\"_\", value):\n print(\"Ошибка типа: Попытка присвоить переменной \" + name + \" типа int значение типа flout \"+ value + \"\\n\")\n exit(0)","sub_path":"baseTextHandler.py","file_name":"baseTextHandler.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"73162270","text":"import time\nimport signal\nimport sys\nimport atexit\nimport numpy as np\nfrom gym import Env as openAIGym, spaces\nfrom tqdm import tqdm\n\n\nfrom iroko_traffic import TrafficGen\nfrom iroko_state import StateManager\nfrom factories import TopoFactory\n\n\nclass BaseEnv(openAIGym):\n WAIT = 0.0 # amount of seconds the agent waits per iteration\n ACTION_MIN = 0.01\n ACTION_MAX = 1.0\n __slots__ = [\"conf\", \"topo\", \"traffic_gen\", \"state_man\", \"steps\",\n \"reward\", \"progress_bar\", \"killed\",\n \"input_file\", \"output_dir\", \"start_time\"]\n\n def __init__(self, conf):\n self.conf = conf\n # initialize the topology\n self.topo = self._create_topo(conf)\n # initialize the traffic generator and state manager\n self.traffic_gen = TrafficGen(self.topo, conf[\"transport\"])\n self.state_man = StateManager(self.topo, conf)\n self._set_gym_spaces()\n\n # set up variables for the progress bar\n self.steps = 0\n self.reward = 0\n self.reward_2 = 0\n # self.progress_bar = tqdm(total=self.conf[\"iterations\"], leave=False)\n # self.progress_bar.clear()\n\n # Finally, initialize traffic\n self.input_file = None\n self.output_dir = None\n self.set_traffic_matrix(conf[\"tf_index\"])\n self.start_traffic()\n self.start_time = time.time()\n\n # handle unexpected exits scenarios gracefully\n print(\"Registering signal handler.\")\n self.killed = False\n signal.signal(signal.SIGINT, self._handle_interrupt)\n signal.signal(signal.SIGTERM, self._handle_interrupt)\n atexit.register(self.kill_env)\n\n def _create_topo(self, conf):\n topo_options = []\n if \"parallel_envs\" in conf.keys():\n topo_options.append(\"parallel_envs\")\n topo_options.append(conf[\"agent\"].lower())\n return TopoFactory.create(conf[\"topo\"], topo_options)\n\n def _set_gym_spaces(self):\n # set configuration for the gym environment\n num_ports = len(self.topo.get_sw_ports())\n num_features = self.state_man.get_feature_length()\n num_actions = len(self.topo.host_ctrl_map)\n print('-----', num_ports, num_features, num_actions)\n self.action_space = spaces.Box(\n low=self.ACTION_MIN, high=self.ACTION_MAX,\n dtype=np.float32, shape=(num_actions,))\n self.observation_space = spaces.Box(\n low=-np.inf, high=np.inf, dtype=np.int64,\n shape=(num_ports * num_features,))\n\n def set_traffic_matrix(self, index):\n traffic_files = self.topo.TRAFFIC_FILES\n traffic_file = traffic_files[index]\n self.input_file = '%s/%s/%s' % (\n self.conf[\"input_dir\"], self.conf[\"topo\"], traffic_file)\n self.output_dir = '%s/%s' % (self.conf[\"output_dir\"], traffic_file)\n\n def step(self, action):\n self.steps = self.steps + 1\n # self.progress_bar.set_postfix_str(s=\"%.3f reward\" % self.reward)\n # self.progress_bar.update(1)\n\n def reset(self):\n print(\"Resetting environment...\")\n if self.is_traffic_proc_alive():\n self.state_man.reset()\n self.traffic_gen.stop_traffic()\n\n self.traffic_gen.start_traffic(self.input_file, self.output_dir)\n print('-----------------shape', self.observation_space.shape)\n return np.zeros(self.observation_space.shape)\n\n def render(self, mode='human'):\n raise NotImplementedError(\"Method render not implemented!\")\n\n def _handle_interrupt(self, signum, frame):\n print(\"\\nEnvironment: Caught interrupt\")\n self.kill_env()\n sys.exit(1)\n\n def kill_env(self):\n if self.killed:\n print(\"Chill, I am already cleaning up...\")\n return\n self.killed = True\n # self.progress_bar.close()\n if hasattr(self, 'state_man'):\n self.state_man.terminate()\n if hasattr(self, 'traffic_gen'):\n print(\"Stopping traffic\")\n self.traffic_gen.stop_traffic()\n if hasattr(self, 'topo'):\n self.topo.delete_topo()\n print(\"Done with destroying myself.\")\n\n def get_topo(self):\n return self.topo\n\n def is_traffic_proc_alive(self):\n return self.traffic_gen.traffic_is_active()\n\n def start_traffic(self):\n self.traffic_gen.start_traffic(self.input_file, self.output_dir)\n","sub_path":"dc_gym/env_base.py","file_name":"env_base.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"504027122","text":"import cv2\nfrom matplotlib import pyplot as plt\nimport numpy as np\nfrom scipy.spatial import distance as dist\nfrom imutils import perspective\nfrom imutils import contours\nimport imutils\n\n\npath = \"/home/rgukt/Desktop/criotam/friut/shape/WhatsApp Image 2019-03-23 at 2.40.01 PM.jpeg\"\noriginal = cv2.imread(path)\n\ngray = cv2.imread(path,0)\n\npixels_per_inch = None\n\n \ndef midpoint(ptA, ptB):\n\treturn ((ptA[0] + ptB[0]) * 0.5, (ptA[1] + ptB[1]) * 0.5)\n\n\n\nh,w = gray.shape\n\nthresh,bin_image = cv2.threshold(gray,0,255,cv2.THRESH_OTSU)\nkernel = np.ones((5,5))\n\nbin_image = bin_image-255\nerotion = cv2.erode(bin_image,kernel)\n\ndilation = cv2.dilate(erotion,kernel)\n\n\n\n_,cnts,_= cv2.findContours(dilation,cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)\n\n#cnts = imutils.grab_contours(cnts)\n\n(cnts, _) = contours.sort_contours(cnts)\n\nwidth = 2.2 ## in inches\n\nprint(len(cnts))\nfor c in cnts:\n\tif cv2.contourArea(c)<20000:\n\t\tcontinue\n\n\tbox = cv2.minAreaRect(c) ## will get the center,width,height,angle of rotations\n\tbox = cv2.boxPoints(box) ## will get the corners\n\tbox = np.array(box, dtype=\"int\")\n\tbox = perspective.order_points(box)\n\n\n\tcv2.drawContours(original,[box.astype(\"int\")],-1,(0,255,0),10)\n\tfor (x, y) in box:\n\t\tcv2.circle(original, (int(x), int(y)), 5, (0, 0, 255), -1) ## draw the corners\n\t(topleft, topright, bottomright, bottomleft) = box ## Top right ,top right , bottom right , bottom left\n\t\n\t(topleft_toprightX, topleft_toprightY) = midpoint(topleft, topright) ## midpoint of top row of rectangele\n\t(bottomleft_bottomrightX, bottomleft_bottomrightY) = midpoint(bottomleft, bottomright) ## midpoint of bottom row of rectangle\n\n\t(topleft_bottomleftX, topleft_bottomleftY) = midpoint(topleft, bottomleft) ## mid point of left column of rectangle\n\t(topright_bottomrightX, topright_bottomrightY) = midpoint(topright, bottomright) ## mid point of right column of rectangle\n\n\tcv2.line(original, (int(topleft_toprightX), int(topleft_toprightY)), (int(bottomleft_bottomrightX), int(bottomleft_bottomrightY)), ## line ffrom top mid to bottom mid\n\t\t(255, 0, 255), 2)\n\tcv2.line(original, (int(topleft_bottomleftX), int(topleft_bottomleftY)), (int(topright_bottomrightX), int(topright_bottomrightY)), ## line from left mid to right mid\n\t\t(255, 0, 255), 2)\n\tdA = dist.euclidean((topleft_toprightX, topleft_toprightY), (bottomleft_bottomrightX, bottomleft_bottomrightY)) ## height of the rectangle\n\tdB = dist.euclidean((topleft_bottomleftX, topleft_bottomleftY), (topright_bottomrightX, topright_bottomrightY)) ## width of the rectangle\n\n\tif pixels_per_inch is None:\n\t\tpixels_per_inch = dB / width ## reference size\n\n\tdimA = dA / pixels_per_inch ## height\n\tdimB = dB / pixels_per_inch ## width\n\n\tcv2.putText(original, \"{:.1f}in\".format(dimA),\n\t\t(int(topleft_toprightX - 15), int(topleft_toprightY - 10)), cv2.FONT_HERSHEY_SIMPLEX,\n\t\t0.65, (255, 255, 255), 3)\n\tcv2.putText(original, \"{:.1f}in\".format(dimB),\n\t\t(int(topright_bottomrightX ), int(topright_bottomrightY)), cv2.FONT_HERSHEY_SIMPLEX,\n\t\t0.65, (255, 0, 0), 3)\n\nplt.imshow(original)\nplt.show()\n\n\n\n\n","sub_path":"object_size/size.py","file_name":"size.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"540235050","text":"\n#!/usr/bin/env python\n'''\nJob submission for quest\n'''\n#This import is necessary so that we can modify the permissions on the\n#submission script\nimport os\nimport numpy as np\nimport subprocess\nfrom itertools import product\n# Current path\ncurrent_path = os.path.abspath(os.path.join(''))\n\n\nbase_path = os.path.abspath(os.path.join( '/projects', 'p30657','movie_propagation_storage', 'result', 'seed_sampling', 'test', 'contagion'))\nspecific_path = os.path.join('projection', 'seq')\n# Path to save bash file\nbash_path = os.path.abspath(os.path.join(base_path, 'bash_save'))\nif not os.path.exists(bash_path ):\n os.makedirs( bash_path)\n\n# Path to save output file for bash job\no_path = os.path.abspath(os.path.join( base_path, 'out', specific_path))\nif not os.path.exists(o_path ):\n os.makedirs( o_path)\n\n\n# Path to save output and error file for bash job\ne_path = os.path.abspath(os.path.join(base_path, 'error', specific_path))\nif not os.path.exists(e_path ):\n os.makedirs( e_path)\n\n# Path to data file \nd_path = os.path.abspath(os.path.join('/projects', 'p30657','movie_propagation_storage', 'data', 'raw_data'))\nd_path = os.path.join(d_path, 'movies.json')\n\nif not os.path.exists(d_path ):\n raise ValueError\n\n# Path to save output file for result\nr_path = os.path.abspath(os.path.join(base_path, specific_path))\n\nif not os.path.exists(r_path ):\n os.makedirs(r_path)\n\n\n\nprogramname = 'sequential_contagion.py'\nbelief_type = 'empirical'\n# pdtna = [np.arange(0.1,1.1,0.01), [1.0], [1.0], list(range(10)), [1, 2, 5]]\npdtna = [np.arange(0.11, 0.20, 0.01), [1.0], [1.0], [0], [1,2,5,10]]\npdtna_list = list(product(*pdtna))\n\nfor p, d, t, n, a in pdtna_list:\n date = subprocess.Popen('date', stdout=subprocess.PIPE, shell=True)\n (datetime, err) = date.communicate()\n print ('Time process ran', datetime)\n print ('\\t Parameter p is equal to {:.2f}'.format(p))\n print ('\\t Parameter d is equal to {:.2f}'.format(d))\n print ('\\t Parameter t is equal to {:.2f}'.format(t))\n print ('\\t Parameter n is equal to {}'.format(n))\n print ('\\t Parameter a is equal to {}'.format(a))\n with open(bash_path + \"/job_script_proj_p{}d{}t{}n{}a{}.sh\".format(int(100*p), int(100*d), int(100*t), int(n), int(a)), 'w') as queue_out:\n queue_out.write(\n\"\"\"\n#!/bin/bash\n#MSUB -N contagion_jobscript_projection\n#MSUB -A b1022\n\n# ressource list \n#MSUB -l nodes=1:ppn=1\n#MSUB -l walltime=168:00:00\n#MSUB -q buyin\n\n\n# ressource list \n#MSUB -o {}\n#MSUB -e {}\n\npwd\nmodule load python/anaconda3\nsource activate movie_network\ncd\ncd {}\npython {} {} {} --belief_type {} -p {} -d {} -t {} -n {} -a {}\n\"\"\".format(o_path, e_path, current_path, programname, d_path, r_path, belief_type, p, d, t, n, a))\n\n queue_out.close()\n os.system(\"msub {}/job_script_proj_p{}d{}t{}n{}a{}.sh\".format(bash_path, int(100*p), int(100*d), int(100*t), int(n), int(a)))\n","sub_path":"script/model/msub_job_projected_sequential.py","file_name":"msub_job_projected_sequential.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264294599","text":"import requests\nimport json\nimport shutil\nfrom krtc import KerberosTicket\nimport argparse\nfrom pathlib import Path\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--experiment', help='experiment name', type=str, default=os.environ.get('EXPERIMENT', ''))\nparser.add_argument('--psana', help='Setup psana job', type=bool, default=0)\nparser.add_argument('--ffb', help='Setup ffb job', type=bool, default=0)\nparser.add_argument('--queue', help='Queue on the ffb', type=str, default='ffbh3q')\nargs = parser.parse_args()\n\nexp = args.experiment\nhutch = exp[:3].lower()\nFFB_BASE = Path(\"/cds/data/drpsrcf/{}/{}/scratch\".format(hutch, exp))\nPSANA_BASE = Path(\"/cds/data/psdm/{}/{}\".format(hutch, exp))\n\nif args.psana and not args.ffb:\n job_def = {\n 'name': 'smd',\n 'executable': str(PSANA_BASE / 'results/smalldata_tools/arp_scripts/submit_smd.sh'),\n 'trigger': 'MANUAL',\n 'location': 'SLAC',\n 'parameters': '--queue psanaq --norecorder --postRuntable --cores 12 --wait' \n }\nelif args.ffb:\n job_def = {\n 'name': 'smd',\n 'executable': str(FFB_BASE / 'smalldata_tools/arp_scripts/submit_smd.sh'),\n 'trigger': 'START_OF_RUN',\n 'location': 'SRCF_FFB',\n 'parameters': '--queue {} --norecorder --postRuntable --cores 60 --wait'.format(args.queue)\n }\nelse:\n job_def = None\n\nif job_def is not None:\n krbheaders = KerberosTicket('HTTP@pswww.slac.stanford.edu').getAuthHeaders()\n ws_url = 'https://pswww.slac.stanford.edu/ws-kerb/lgbk/lgbk/{}/ws/create_update_workflow_def'.format(exp)\n r = requests.post(ws_url, headers=krbheaders, json=job_def)\n r.raise_for_status()\n print('\\nJOB CREATION LOG: {}'.format(r.json()))","sub_path":"examples/setup_scripts/make_arp_jobs.py","file_name":"make_arp_jobs.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22816643","text":"#!/usr/bin/python3\n'''\ntry:\n nota1 = float (input('digite a primeira nota: '))\n nota2 = float (input('digite a segunda nota: '))\nexcept Exception as e:\n print('Erro: %s' % e)\n exit()\n'''\nqtdNotas = int(input('Digite a quantidade de notas para avaliar'))\n\nmedia = 0\n\nfor x in range(qtdNotas):\n media += float(input('digite a uma nota:'))\n \n\nmedia = media / qtdNotas\n\nif media >= 7:\n result = 'aprovado'\nelif media > 3:\n result = 'recuperacao'\nelse:\n result = 'reprovado'\n\nprint ('resultado : {} media: {}'.format(result, media))","sub_path":"aula02/mediaTry.py","file_name":"mediaTry.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"75584993","text":"import os\nimport sys\nfrom sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import create_engine\nfrom eralchemy import render_er\n\nBase = declarative_base()\n\nclass Follower(Base):\n __tablename__=\"follower\"\n ID=Column(Integer,primary_key=True)\n user_from_id= Column (Integer,nullable=False)\n user_to_id= Column(Integer,ForeignKey(\"user.id\"),nullable=False)\n user=relationship(\"User\")\n\nclass User(Base):\n __tablename__ = \"user\"\n # Here we define columns for the table person\n # Notice that each column is also a normal Python instance attribute.\n id = Column(Integer, primary_key=True)\n username = Column(String, nullable=False)\n firstname= Column (String,nullable=False)\n lastname= Column(String,nullable=False)\n email= Column(String,nullable=False)\n follower=relationship(\"Follower\")\n post=relationship(\"Post\")\n comment=relationship(\"Comment\")\n\nclass Post(Base):\n __tablename__=\"post\"\n id= Column(Integer,primary_key=True)\n user_id=Column(Integer,ForeignKey(\"user.id\"),nullable=False)\n user=relationship(\"User\")\n media=relationship(\"Media\")\n\nclass Media(Base):\n __tablename__=\"media\"\n id = Column(Integer, primary_key=True)\n type=Column(String,nullable=False)\n url= Column(String,nullable=False)\n post_id=Column(Integer, ForeignKey(\"post.id\"),nullable=False)\n post=relationship(\"Post\")\n\nclass Comment(Base):\n __tablename__=\"comment\"\n id = Column(Integer, primary_key=True)\n comment_text=Column(String, nullable=False)\n author_id=Column(Integer, ForeignKey(\"user.id\"),nullable=False)\n post_id=Column(Integer,ForeignKey(\"post.id\") ,nullable=False)\n user=relationship(\"User\")\n\n\n\n# class Address(Base):\n# __tablename__ = 'address'\n# # Here we define columns for the table address.\n# # Notice that each column is also a normal Python instance attribute.\n# id = Column(Integer, primary_key=True)\n# street_name = Column(String(250))\n# street_number = Column(String(250))\n# post_code = Column(String(250), nullable=False)\n# person_id = Column(Integer, ForeignKey('person.id'))\n# person = relationship(Person)\n\ndef to_dict(self):\n return {}\n\n## Draw from SQLAlchemy base\nrender_er(Base, 'diagram.png')","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55486405","text":"import numpy as np\nimport logging\nimport tensorflow as tf\n\n\nfrom ai4med.common.constants import ImageProperty\nfrom ai4med.common.medical_image import MedicalImage\nfrom ai4med.common.shape_format import ShapeFormat\nfrom ai4med.common.transform_ctx import TransformContext\nfrom ai4med.components.transforms.multi_field_transformer import MultiFieldTransformer\n\n\nclass NumpyReader(object):\n \"\"\"Reads Numpy files.\n\n Args:\n dtype: Type for data to be loaded.\n \"\"\"\n def __init__(self, dtype='f4'):\n self._logger = logging.getLogger(self.__class__.__name__)\n self._dtype = np.dtype(dtype)\n\n def read(self, file_name, shape: ShapeFormat):\n # , *args, **kwargs\n # print(args)\n # print(kwargs)\n assert shape, \"Please provide a valid shape.\"\n assert file_name, \"Please provide a filename.\"\n\n if isinstance(file_name, (bytes, bytearray)):\n file_name = file_name.decode('UTF-8')\n data = np.load(file_name, allow_pickle=True).astype(self._dtype)\n\n assert len(data.shape) == shape.get_number_of_dims(), \\\n \"Dims of loaded data and provided shape don't match.\"\n\n img = MedicalImage(data, shape)\n img.set_property(ImageProperty.ORIGINAL_SHAPE, data.shape)\n img.set_property(ImageProperty.FILENAME, file_name)\n return img\n\n\nclass NumpyLoader(MultiFieldTransformer):\n \"\"\"Load Image from Numpy files.\n\n Args:\n shape (ShapeFormat): Shape of output image.\n dtype : Type for output data.\n \"\"\"\n\n def __init__(self, fields, shape, dtype='f4'):\n MultiFieldTransformer.__init__(self, fields=fields)\n self._dtype = np.dtype(dtype)\n self._shape = ShapeFormat(shape)\n self._reader = NumpyReader(self._dtype)\n\n def transform(self, transform_ctx: TransformContext):\n for field in self.fields:\n file_name = transform_ctx[field]\n transform_ctx.set_image(field, self._reader.read(file_name, self._shape))\n\n return transform_ctx\n\n\nclass NumpyTransformation(MultiFieldTransformer):\n\n def __init__(self, fields, dtype='f4'):\n # fields specifies the names of the image fields in the data dict that you want to do operations\n MultiFieldTransformer.__init__(self, fields)\n self.dtype = np.dtype(dtype)\n\n def transform(self, transform_ctx):\n for field in self.fields:\n\n # get the MedicalImage using field\n img = transform_ctx.get_image(field)\n\n # get_data give us a numpy array of data\n img_np = img.get_data()\n\n # do operations on img_np, which is the image\n img_np = np.clip(img_np / 255 , 0, 1)\n\n # create a new MedicalImage use new_image() method\n # which will carry over the properties of the original image\n result_img = img.new_image(img_np, img.get_shape_format())\n\n # set the image back in transform_ctx\n transform_ctx.set_image(field, result_img)\n \n # print('shape afterpre-txm:', result_img.shape)\n\n return transform_ctx\n\n def is_deterministic(self):\n \"\"\" This is not a deterministic transform.\n\n Returns:\n False (bool)\n \"\"\"\n return False\n","sub_path":"mmars/knee2d/CustomTransformations.py","file_name":"CustomTransformations.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"547755771","text":"from django.shortcuts import render, redirect\nfrom django.views.decorators.http import require_http_methods\nfrom django.core.urlresolvers import reverse\nfrom ims_lti_py.tool_config import ToolConfig\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom canvas_oauth.oauth import get_oauth_token\n\nfrom canvas_sdk.methods import accounts\nfrom canvas_sdk import RequestContext, client\n\nimport logging\nimport urllib\n\nlogger = logging.getLogger(__name__)\n\nTERMS = {\n '2014-1': 'Fall 2014-2015',\n '2014-2': 'Spring 2014-2015',\n '2014-5': 'Winter 2014-2015',\n '2014-4': 'Full Year 2014-2015',\n '2014-0': 'Summer 2014-2015',\n '2015-1': 'Fall 2015-2016',\n '2015-2': 'Spring 2015-2016',\n '2015-5': 'Winter 2015-2016',\n '2015-4': 'Full Year 2015-2016',\n '2015-0': 'Summer 2015-2016',\n}\n\n# Create your views here.\n\n\n@require_http_methods(['GET'])\ndef index(request):\n logger.info(\"request to index.\")\n return render(request, 'account_courses/index.html')\n\n\n@login_required\n@csrf_exempt\n@require_http_methods(['POST'])\ndef lti_launch(request):\n if request.user.is_authenticated():\n return redirect('ac:main')\n else:\n return render(request, 'account_courses/error.html', {'message': 'Error: user is not authenticated!'}) \n\n\n@login_required\n@require_http_methods(['GET'])\ndef main(request):\n\n # the current account ID is in custom_canvas_account_id\n canvas_api_token = get_oauth_token(request)\n account_id = request.session['LTI_LAUNCH'].get('custom_canvas_account_id')\n\n canvas_api_url = 'https://%s/api' % request.session['LTI_LAUNCH'].get('custom_canvas_api_domain')\n rc = RequestContext(canvas_api_token, canvas_api_url, per_page=15)\n\n search_term = request.GET.get('search_term')\n term_id = request.GET.get('term_id')\n published = request.GET.get('published')\n page_link = request.GET.get('page_link')\n\n logger.debug(\"search term is %s\" % search_term)\n if search_term == '':\n search_term = None\n\n logger.debug(\"term id is %s\" % term_id)\n if term_id:\n if term_id == 'all':\n term_id = None\n else:\n term_id = 'sis_term_id:%s' % term_id\n\n if page_link:\n api_response = client.get(rc, request.GET.get('page_link'))\n else:\n logger.debug('searching for \"%s\" and term \"%s\"' % (search_term, term_id))\n api_response = accounts.list_active_courses_in_account(\n rc, account_id, search_term=search_term, enrollment_term_id=term_id,\n published=published, per_page=12)\n\n logger.debug(api_response.text)\n account_courses = api_response.json()\n page_links = api_response.links\n logger.debug(page_links)\n\n query_params = request.GET.copy()\n\n query_params.pop('page_link', None)\n query_string = urllib.urlencode(query_params)\n\n self_link = reverse('ac:main') + '?' + query_string\n\n canvas_hostname = request.session['LTI_LAUNCH'].get('custom_canvas_api_domain')\n\n return render(request, 'account_courses/main.html', {\n 'request': request,\n 'account_courses': account_courses,\n 'page_links': page_links,\n 'search_term': search_term,\n 'terms': TERMS,\n 'term_id': term_id,\n 'published': published,\n 'self_link': self_link,\n 'canvas_hostname': canvas_hostname,\n })\n\n\n@require_http_methods(['GET'])\ndef tool_config(request):\n\n if request.is_secure():\n host = 'https://' + request.get_host()\n else:\n host = 'http://' + request.get_host()\n\n url = host + reverse('ac:lti_launch')\n\n lti_tool_config = ToolConfig(\n title='Account Courses Report',\n launch_url=url,\n secure_launch_url=url,\n )\n # this is how to tell Canvas that this tool provides a course navigation link:\n account_nav_params = {\n 'enabled': 'true',\n # optionally, supply a different URL for the link:\n # 'url': 'http://library.harvard.edu',\n 'text': 'Courses in this account',\n }\n lti_tool_config.set_ext_param('canvas.instructure.com', 'account_navigation', account_nav_params)\n lti_tool_config.set_ext_param('canvas.instructure.com', 'privacy_level', 'public')\n lti_tool_config.set_ext_param('canvas.instructure.com', 'tool_id', __name__)\n lti_tool_config.description = 'This LTI tool displays the information about the courses in this account.'\n\n resp = HttpResponse(lti_tool_config.to_xml(), content_type='text/xml', status=200)\n return resp\n","sub_path":"account_courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"249636093","text":"def rec(a, b):\r\n if a > b:\r\n print('Квадрат ',b,' на ', b)\r\n return rec(a-b,b) + rec(a,a)\r\n elif a bool:\n \"\"\"Randomly is either true or false.\"\"\"\n if res.has_children():\n chance = res['chance', '100']\n seed = 'a' + res['seed', '']\n else:\n chance = res.value\n seed = 'a'\n\n # Allow ending with '%' sign\n chance = srctools.conv_int(chance.rstrip('%'), 100)\n\n set_random_seed(inst, seed)\n return random.randrange(100) < chance\n\n\n@make_result_setup('random')\ndef res_random_setup(vmf: VMF, res: Property) -> object:\n weight = ''\n results = []\n chance = 100\n seed = 'b'\n for prop in res:\n if prop.name == 'chance':\n # Allow ending with '%' sign\n chance = srctools.conv_int(\n prop.value.rstrip('%'),\n chance,\n )\n elif prop.name == 'weights':\n weight = prop.value\n elif prop.name == 'seed':\n seed = 'b' + prop.value\n else:\n results.append(prop)\n\n if not results:\n return None # Invalid!\n\n weight = conditions.weighted_random(len(results), weight)\n\n # We also need to execute result setups on all child properties!\n for prop in results[:]:\n if prop.name == 'group':\n for sub_prop in list(prop):\n Condition.setup_result(vmf, prop.value, sub_prop)\n else:\n Condition.setup_result(vmf, results, prop)\n\n return seed, chance, weight, results\n\n\n@make_result('random')\ndef res_random(inst: Entity, res: Property) -> None:\n \"\"\"Randomly choose one of the sub-results to execute.\n\n The `chance` value defines the percentage chance for any result to be\n chosen. `weights` defines the weighting for each result. Both are\n comma-separated, matching up with the results following. Wrap a set of\n results in a `group` property block to treat them as a single result to be\n executed in order.\n \"\"\"\n # Note: 'global' results like \"Has\" won't delete themselves!\n # Instead they're replaced by 'dummy' results that don't execute.\n # Otherwise the chances would be messed up.\n seed, chance, weight, results = res.value # type: str, float, List[int], List[Property]\n\n set_random_seed(inst, seed)\n if random.randrange(100) > chance:\n return\n\n ind = random.choice(weight)\n choice = results[ind]\n if choice.name == 'nop':\n pass\n elif choice.name == 'group':\n for sub_res in choice:\n should_del = Condition.test_result(\n inst,\n sub_res,\n )\n if should_del is RES_EXHAUSTED:\n # This Result doesn't do anything!\n sub_res.name = 'nop'\n sub_res.value = None\n else:\n should_del = Condition.test_result(\n inst,\n choice,\n )\n if should_del is RES_EXHAUSTED:\n choice.name = 'nop'\n choice.value = None\n\n\n@make_result_setup('variant')\ndef res_add_variant_setup(res: Property) -> object:\n if res.has_children():\n count = srctools.conv_int(res['Number', ''], None)\n if count:\n return conditions.weighted_random(\n count,\n res['weights', ''],\n )\n else:\n return None\n else:\n count = srctools.conv_int(res.value, None)\n if count:\n return list(range(count))\n else:\n return None\n\n\n@make_result('variant')\ndef res_add_variant(inst: Entity, res: Property) -> None:\n \"\"\"This allows using a random instance from a weighted group.\n\n A suffix will be added in the form `_var4`.\n Two properties should be given:\n\n - `Number`: The number of random instances.\n - `Weights`: A comma-separated list of weights for each instance.\n\n Any variant has a chance of weight/sum(weights) of being chosen:\n A weight of `2, 1, 1` means the first instance has a 2/4 chance of\n being chosen, and the other 2 have a 1/4 chance of being chosen. \n The chosen variant depends on the position, direction and name of\n the instance.\n\n Alternatively, you can use `\"variant\" \"number\"` to choose from equally-weighted\n options.\n \"\"\"\n set_random_seed(inst, 'variant')\n conditions.add_suffix(inst, \"_var\" + str(random.choice(res.value) + 1))\n\n\n@make_result('RandomNum')\ndef res_rand_num(inst: Entity, res: Property) -> None:\n \"\"\"Generate a random number and save in a fixup value.\n\n If 'decimal' is true, the value will contain decimals. 'max' and 'min' are\n inclusive. 'ResultVar' is the variable the result will be saved in.\n If 'seed' is set, it will be used to keep the value constant across\n map recompiles. This should be unique.\n \"\"\"\n is_float = srctools.conv_bool(res['decimal'])\n max_val = srctools.conv_float(res['max', 1.0])\n min_val = srctools.conv_float(res['min', 0.0])\n var = res['resultvar', '$random']\n seed = 'd' + res['seed', 'random']\n\n set_random_seed(inst, seed)\n\n if is_float:\n func = random.uniform\n else:\n func = random.randint\n\n inst.fixup[var] = str(func(min_val, max_val))\n\n\n@make_result('RandomVec')\ndef res_rand_vec(inst: Entity, res: Property) -> None:\n \"\"\"A modification to RandomNum which generates a random vector instead.\n\n `decimal`, `seed` and `ResultVar` work like RandomNum. `min_x`, `max_y` etc\n are used to define the boundaries. If the min and max are equal that number\n will be always used instead.\n \"\"\"\n is_float = srctools.conv_bool(res['decimal'])\n var = res['resultvar', '$random']\n\n set_random_seed(inst, 'e' + res['seed', 'random'])\n\n if is_float:\n func = random.uniform\n else:\n func = random.randint\n\n value = Vec()\n\n for axis in 'xyz':\n max_val = srctools.conv_float(res['max_' + axis, 0.0])\n min_val = srctools.conv_float(res['min_' + axis, 0.0])\n if min_val == max_val:\n value[axis] = min_val\n else:\n value[axis] = func(min_val, max_val)\n\n inst.fixup[var] = value.join(' ')\n\n\n@make_result_setup('randomShift')\ndef res_rand_inst_shift_setup(res: Property) -> tuple:\n min_x = res.float('min_x')\n max_x = res.float('max_x')\n min_y = res.float('min_y')\n max_y = res.float('max_y')\n min_z = res.float('min_z')\n max_z = res.float('max_z')\n\n return (\n min_x, max_x,\n min_y, max_y,\n min_z, max_z,\n 'f' + res['seed', 'randomshift']\n )\n\n\n@make_result('randomShift')\ndef res_rand_inst_shift(inst: Entity, res: Property) -> None:\n \"\"\"Randomly shift a instance by the given amounts.\n\n The positions are local to the instance.\n \"\"\"\n (\n min_x, max_x,\n min_y, max_y,\n min_z, max_z,\n seed,\n ) = res.value # type: float, float, float, float, float, float, str\n\n set_random_seed(inst, seed)\n\n offset = Vec(\n random.uniform(min_x, max_x),\n random.uniform(min_y, max_y),\n random.uniform(min_z, max_z),\n ).rotate_by_str(inst['angles'])\n\n origin = Vec.from_str(inst['origin'])\n origin += offset\n inst['origin'] = origin\n","sub_path":"src/precomp/conditions/randomise.py","file_name":"randomise.py","file_ext":"py","file_size_in_byte":7459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"196926801","text":"\"\"\"Provides functionality for processing a cursor as part of a DB script\"\"\"\nimport logging\nimport os\n\ndef get_monotonic_time():\n \"\"\"Get a monotonically increasing timestamp.\n\n See: http://stackoverflow.com/a/7424304\n\n Return:\n A relative timestamp, in seconds.\n \"\"\"\n return os.times()[4]\n\ndef process_cursor(cursor, closure, *args, **kwargs):\n \"\"\"\n Given an iterable (say, a mongo cursor) and a closure, call that closure in parallel over the iterable.\n Call order is undefined. Currently launches N python process workers, where N is the number of vcpu cores.\n\n Useful for upgrades that need to touch each document in a database, and don't need an iteration order.\n\n Your closure MUST return True on success. Anything else is logged and treated as a failure.\n A closure that throws an exception will fail the upgrade immediately.\n \"\"\"\n\n begin = get_monotonic_time()\n\n # cores = multiprocessing.cpu_count()\n # pool = multiprocessing.Pool(cores)\n # logging.info('Iterating over cursor with ' + str(cores) + ' workers')\n\n # # Launch all work, iterating over the cursor\n # # Note that this creates an array of n multiprocessing.pool.AsyncResults, where N is table size.\n # # Memory usage concern in the future? Doesn't seem to be an issue with ~120K records.\n # # Could be upgraded later with some yield trickery.\n # results = [pool.apply_async(closure, (document,)) for document in cursor]\n\n # # Read the results back, presumably in order!\n # failed = False\n # for res in results:\n # \tresult = res.get()\n # \tif result != True:\n # \t\tfailed = True\n # \t\tlogging.info('Upgrade failed: ' + str(result))\n\n # logging.info('Waiting for workers to complete')\n # pool.close()\n # pool.join()\n\n logging.info('Proccessing {} items in cursor ...'.format(cursor.count()))\n\n failed = False\n cursor_size = cursor.count()\n cursor_index = 0.0\n next_percent = 5.0\n percent_increment = 5\n if(cursor_size < 20):\n next_percent = 25.0\n percent_increment = 25\n if(cursor_size < 4):\n next_percent = 50.0\n percent_increment = 50\n for document in cursor:\n if 100 * (cursor_index / cursor_size) >= next_percent:\n logging.info('{} percent complete ...'.format(next_percent))\n next_percent = next_percent + percent_increment\n result = closure(document, *args, **kwargs)\n cursor_index = cursor_index + 1\n if result != True:\n failed = True\n logging.info('Upgrade failed: ' + str(result))\n\n if failed is True:\n msg = 'Worker pool experienced one or more failures. See above logs.'\n logging.info(msg)\n raise Exception(msg)\n\n end = get_monotonic_time()\n elapsed = end - begin\n logging.info('Parallel cursor iteration took ' + ('%.2f' % elapsed))\n\n","sub_path":"bin/process_cursor.py","file_name":"process_cursor.py","file_ext":"py","file_size_in_byte":2869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"573101199","text":"from flask_restful import Resource, reqparse\nfrom flask_jwt import jwt_required\nimport sqlite3\nfrom models.item import ItemModel\n\n\nclass ItemList(Resource):\n\n def get(self):\n return {'items': list(map(lambda x: x.json(), ItemModel.query.all()))}\n\n\nclass Item(Resource):\n\n parser = reqparse.RequestParser()\n parser.add_argument('price', required=True, type=float, help=\"this can't be blank\")\n # parser.add_argument('name', required=True, type=str, help=\"this field can't be blank\")\n parser.add_argument('store_id', required=True, type=int, help=\"Every item needs a store id\")\n\n\n @jwt_required()\n def get(self, name):\n item = ItemModel.find_by_name(name)\n if item:\n return item.json()\n return {'message': 'Item not found'}, 404\n\n def post(self, name):\n item = ItemModel.find_by_name(name)\n if item:\n return {\"message\": \"An item with this name '{}' already exits\".format(name)}, 400\n\n req_data = Item.parser.parse_args()\n item = ItemModel(name, **req_data)\n try:\n item.save_to_db()\n except:\n return {\"message\": \"An error occurred while inserting the item\"}, 500\n\n return {'message': 'Item created'}, 201\n\n def delete(self, name):\n item = ItemModel.find_by_name(name)\n if item:\n item.delete_from_db()\n return {\"message\": \"Item deleted\"}, 202\n return {\"message\": \"item with the name '{}' does not exist\".format(name)}, 404\n\n def put(self, name):\n data = Item.parser.parse_args()\n item = ItemModel.find_by_name(name)\n if item is None:\n item = ItemModel(name, data['price'], data['store_id'])\n else:\n item.price = data['price']\n\n item.save_to_db()\n\n return item.json()\n","sub_path":"resources/item.py","file_name":"item.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"7097830","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nimport tensorflow as tf\n\nimport plot_utils\n\ntf.reset_default_graph()\n\n# read the data\n# df = pd.read_csv('data/mcelroy_dataclean.csv') # read data set using pandas\n# df = pd.read_csv('data/wilkerson_dataclean.csv') # read data set using pandas\ndf = pd.read_csv('data/ShieldsJHRData.csv') # read data set using pandas\ndf = df.dropna(inplace = False) # Remove all nan entries.\n\nprint('Data summary:\\n')\nprint(df.describe(), '\\n\\n') # Overview of dataset\n\n# subset for train and test and rescale all values\ndf_train, df_test = train_test_split(df, test_size=0.30)\n\n# we want to predict the H, B, and S given Qbf, D50\n# this only works because HBS are highly correlated.\n# y is output and x is input features\n\n# do some normalization\nscaler = MinMaxScaler() # For normalizing dataset\n\n# min max normalization\n# X_train = scaler.fit_transform(df_train.drop(['Bbf.m', 'Hbf.m', 'S'], axis=1).values)\n# y_train = scaler.fit_transform(df_train[['Bbf.m', 'Hbf.m', 'S']].values)\n# X_test = scaler.fit_transform(df_test.drop(['Bbf.m', 'Hbf.m', 'S'], axis=1).values)\n# y_test = scaler.fit_transform(df_test[['Bbf.m', 'Hbf.m', 'S']].values)\n\n# min max log(x) normalization\n# X_train = scaler.fit_transform(np.log10(df_train.drop(['Bbf.m', 'Hbf.m', 'S'], axis=1).values))\n# y_train = scaler.fit_transform(np.log10(df_train[['Bbf.m', 'Hbf.m', 'S']].values))\n# X_test = scaler.fit_transform(np.log10(df_test.drop(['Bbf.m', 'Hbf.m', 'S'], axis=1).values))\n# y_test = scaler.fit_transform(np.log10(df_test[['Bbf.m', 'Hbf.m', 'S']].values))\n# logged = True\n# normed = True\n\n# log(x) normalization\nX_train = (np.log10(df_train.drop(['Bbf.m', 'Hbf.m', 'S'], axis=1).values))\ny_train = (np.log10(df_train[['Bbf.m', 'Hbf.m', 'S']].values))\nX_test = (np.log10(df_test.drop(['Bbf.m', 'Hbf.m', 'S'], axis=1).values))\ny_test = (np.log10(df_test[['Bbf.m', 'Hbf.m', 'S']].values))\nlogged = True\nnormed = False\n\n# no normalization (be sure to turn off below for plotting)\n# X_train = (df_train.drop(['Bbf.m', 'Hbf.m', 'S'], axis=1).values)\n# y_train = (df_train[['Bbf.m', 'Hbf.m', 'S']].values)\n# X_test = (df_test.drop(['Bbf.m', 'Hbf.m', 'S'], axis=1).values)\n# y_test = (df_test[['Bbf.m', 'Hbf.m', 'S']].values)\n# logged = False\n# normed = False\n\n# set up data for mini-batching during training\nbatch_size = 1\nbuffer_size = 15\nds_train = tf.data.Dataset.from_tensor_slices((X_train, y_train)).repeat().batch(batch_size).shuffle(buffer_size)\nit_train = ds_train.make_one_shot_iterator()\nxs, ys = it_train.get_next()\n\n\ndef denormalize(df, norm_data):\n \"\"\"\n Above written function for denormalization of data after normalizing\n this function will give original scale of values.\n \"\"\"\n\n if logged:\n df = np.log10(df[['Bbf.m', 'Hbf.m', 'S']].values)\n else:\n df = df[['Bbf.m', 'Hbf.m', 'S']].values\n\n if normed:\n scl = MinMaxScaler()\n a = scl.fit_transform(df)\n new = scl.inverse_transform(norm_data)\n else:\n new = norm_data\n \n if logged:\n expt = np.power(10, new)\n return expt\n else:\n return new\n\n\ndef nn_model(X_data, input_dim):\n \"\"\"\n nn_model constructs the neural network model. \n It can be a 1 layer or 2 layer model, with n_nodes.\n Weights and biases are abberviated as W_1, W_2 and b_1, b_2 \n \"\"\"\n\n n_nodes = 1\n\n # layer 1 multiplying and adding bias then activation function\n W_1 = tf.Variable(tf.random_uniform([input_dim, n_nodes], dtype='float64'))\n b_1 = tf.Variable(tf.zeros([n_nodes], dtype = 'float64'))\n layer_1 = tf.add(tf.matmul(X_data, W_1), b_1)\n layer_1 = tf.nn.relu(layer_1)\n\n # layer 2 multiplying and adding bias then activation function \n # W_2 = tf.Variable(tf.random_uniform([n_nodes, n_nodes], dtype='float64'))\n # b_2 = tf.Variable(tf.zeros([n_nodes], dtype = 'float64'))\n # layer_2 = tf.add(tf.matmul(layer_1, W_2), b_2)\n # layer_2 = tf.nn.relu(layer_2)\n\n # output layer multiplying and adding bias then activation function\n W_O = tf.Variable(tf.random_uniform([n_nodes, 3], dtype = 'float64')) # 3 because there are two outputs\n b_O = tf.Variable(tf.zeros([3], dtype = 'float64'))\n output = tf.add(tf.matmul(layer_1, W_O), b_O)\n # output = tf.add(tf.matmul(layer_2, W_O), b_O)\n\n return output, W_O\n\n\n# the model\noutput, W_O = nn_model(xs, X_train.shape[1])\n\n# mean squared error cost function\n# loss = tf.reduce_sum(tf.square(output - ys))\n# loss = tf.reduce_mean(tf.square(output - ys))\nloss = tf.losses.mean_squared_error(output, ys)\n\n# Gradinent Descent optimiztion just discussed above for updating weights and biases\nlearning_rate = 0.01\ntrain = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)\n# train = tf.train.AdamOptimizer(learning_rate).minimize(loss)\n\n# some other initializations\n_loss_summary = tf.summary.scalar(name='loss summary', tensor=loss)\n# correct_pred = tf.argmax(output, 1)\n# accuracy = tf.losses.mean_squared_error(tf.cast(correct_pred, tf.float32), ys)\n# saver = tf.train.Saver()\n\nc_train = []\nc_test = []\n\nsave_training = False\nwith tf.Session() as sess:\n # Initiate session and initialize all vaiables\n sess.run(tf.global_variables_initializer())\n\n writer = tf.summary.FileWriter(\"log/\", sess.graph)\n\n it = 0\n n_epoch = 10\n n_batch_per_epoch = int( np.floor(X_train.shape[0] / batch_size) )\n for i in range(n_epoch):\n ds_train.shuffle(buffer_size)\n for j in range(n_batch_per_epoch):\n # Run loss and train with each batch\n sess.run([loss, train])\n\n c_train.append(sess.run(loss, feed_dict = {xs:X_train, ys:y_train}))\n c_test.append(sess.run(loss, feed_dict = {xs:X_test, ys:y_test}))\n\n loss_summary = sess.run(_loss_summary)\n writer.add_summary(loss_summary, it)\n\n if save_training:\n \n\n intrain_pred_test = sess.run(output, feed_dict={xs:X_test})\n intrain_pred_train = sess.run(output, feed_dict={xs:X_train})\n\n intrain_y_test = denormalize(df_test, y_test)\n intrain_pred_test = denormalize(df_test, intrain_pred_test)\n intrain_y_train = denormalize(df_train, y_train)\n intrain_pred_train = denormalize(df_train, intrain_pred_train)\n figN = plot_utils.compare_plot(df, df_train, df_test, intrain_pred_train, intrain_pred_test)\n figN.savefig('figures/training/{:04d}.png'.format(it))\n plt.close(figN)\n print('Epoch:', i, ', train loss:', c_train[i*n_batch_per_epoch], ', test loss:', c_test[i*n_batch_per_epoch])\n\n it += 1\n \n print('Epoch:', i, ', train loss:', c_train[i*n_batch_per_epoch], ', test loss:', c_test[i*n_batch_per_epoch])\n\n # finished training\n print('\\nTraining complete.')\n print('Total iterations: ', it)\n print('test loss :', sess.run(loss, feed_dict={xs:X_test, ys:y_test}), '\\n')\n writer.close()\n\n # save the model\n # save_path = saver.save(sess, \"log/channel_geom_nn_QDtoHBS.ckpt\")\n\n # predict output of test data after training\n pred_test = sess.run(output, feed_dict={xs:X_test})\n pred_train = sess.run(output, feed_dict={xs:X_train})\n\n # predict for some range\n qlist = np.array([2000, 3000])\n dlist = np.linspace(1, 20, num=19) # np.array([1.0, 5.0, 10, 100])\n bhs = np.empty((qlist.shape[0]*dlist.shape[0], 3))\n dep = 0\n for q in iter(qlist):\n for d in iter(dlist):\n invect = np.log10( np.array([q, d]).reshape(-1, 2) )\n pred_rng = sess.run(output, feed_dict={xs:invect})\n pred_dn = denormalize(df_test, pred_rng)\n bhs[dep, :] = pred_dn\n dep += 1\n bhs = bhs.reshape((dlist.shape[0], 3, -1)) \n\n# denormalize data\ny_test = denormalize(df_test, y_test)\npred_test = denormalize(df_test, pred_test)\ny_train = denormalize(df_train, y_train)\npred_train = denormalize(df_train, pred_train)\n\n\n# plots\nfig1, axes1 = plt.subplots(nrows=1, ncols=2, figsize=(6,4))\naxes1[0].hist([df_train['Qbf.m3s'], df_test['Qbf.m3s']], histtype = 'bar', density = True)\naxes1[0].set_xlabel('Qbf (m3/s)')\naxes1[1].hist([df_train['D50.mm'], df_test['D50.mm']], histtype = 'bar', density = True)\naxes1[1].set_xlabel('D50 (mm)')\nplt.legend(['train', 'test'], loc = 'best')\nfig1.savefig('figures/split.png')\n\nfig2 = plot_utils.compare_plot(df, df_train, df_test, pred_train, pred_test)\nfig2.savefig('figures/compare.png')\n\nfig3, ax3 = plt.subplots(figsize=(6,4))\nax3.plot(np.arange(len(c_train)) / n_batch_per_epoch, np.array(c_train))\nax3.plot(np.arange(len(c_test)) / n_batch_per_epoch, np.array(c_test))\nax3.set_xlabel('epoch')\nax3.set_ylabel('loss')\nplt.legend(['train', 'test'], loc = 'best')\nfig3.savefig('figures/train.png')\n\nfig4, ax4 = plt.subplots(figsize=(8,6))\npd.plotting.scatter_matrix(np.log10(df), ax=ax4)\nfig4.savefig('figures/scatter.png')\n\nfig5, ax5 = plt.subplots()\nax5.matshow(np.log10(df).corr())\nfig5.savefig('figures/corr_mat.png')\n\nfig6, ax6 = plt.subplots(nrows=3, ncols=1, figsize=(6,10))\nax6[0].scatter(df['D50.mm'], df['Hbf.m'])\nfor p in np.arange(bhs.shape[2]):\n ax6[0].plot(dlist, bhs[:, 1, p])\n ax6[0].set_ylabel('depth (m)')\nax6[1].scatter(df['D50.mm'], df['Bbf.m'])\nfor p in np.arange(bhs.shape[2]):\n ax6[1].plot(dlist, bhs[:, 0, p])\n ax6[1].set_ylabel('width (m)')\nax6[2].scatter(df['D50.mm'], df['S'])\nfor p in np.arange(bhs.shape[2]):\n ax6[2].plot(dlist, bhs[:, 2, p])\n ax6[2].set_ylabel('slope (1)')\nax6[2].set_yscale('log')\n# ax6[0].set_xscale('log')\nax6[0].legend(qlist)\n# plt.show()\n\nfig6.savefig('figures/input_test.png')","sub_path":"channel_geom_nn_QDtoHBS.py","file_name":"channel_geom_nn_QDtoHBS.py","file_ext":"py","file_size_in_byte":9770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"496318153","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# NCTR, Nile Center for Technology Research\n# Copyright (C) 2011-2012 NCTR ().\n#\n##############################################################################\n\nfrom openerp.addons.account_voucher.account_voucher import resolve_o2m_operations\nfrom openerp.osv import fields, osv, orm\nfrom openerp.tools.translate import _\nimport netsvc\n\nclass res_company(osv.Model):\n \"\"\" Inherit company model to add field auto_budget to be used in the\n\tcreate confirmation as a condition when it is true to automatically\n\tcheck budget.\n\t\"\"\"\n _inherit = \"res.company\"\n\n _columns = {\n 'auto_budget': fields.boolean('Automatic Budget Check for vouchers.'), \n }\n _defaults = {\n 'auto_budget': True,\n }\n\nclass account_voucher(osv.Model):\n\n _inherit = 'account.voucher'\n\n _columns = {\n 'state':fields.selection(\n [('draft','Draft'),\n ('cancel','Cancelled'),\n ('proforma','Pro-forma'),\n ('no_approve','Budget Not Appoved'),\n ('reversed','Reversed'),\n ('posted','Posted')\n ], 'Status', readonly=True, size=32, track_visibility='onchange',\n help=' * The \\'Draft\\' status is used when a user is encoding a new and unconfirmed Voucher. \\\n \\n* The \\'Pro-forma\\' when voucher is in Pro-forma status,voucher does not have an voucher number. \\\n \\n* The \\'Budget Not Appoved\\' when at least one of budget confirmations related to this voucher didn\\'t approve . \\\n \\n* The \\'Posted\\' status is used when user create voucher,a voucher number is generated and voucher entries are created in account \\\n \\n* The \\'Reversed\\' when voucher\\'s move reversed automatically reversed it\\'s voucher. \\\n \\n* The \\'Cancelled\\' status is used when user cancel voucher.'),\n }\n\n def _check_analytic_account(self, cr, uid, ids, context=None):\n \"\"\"\n Check state of voucher and user_type of account_id\n \n @return: boolean\n \"\"\"\n for voucher in self.browse(cr, uid, ids, context=context):\n if voucher.state != \"draft\":\n for voucher_line in voucher.line_ids:\n if voucher.state != \"draft\" and voucher_line.account_id.user_type.analytic_required and not voucher_line.account_analytic_id and voucher_line.amount > 0.0:\n return False\n return True\n\n def _total_amount_check(self, cr, uid, ids, context=None):\n \"\"\"\n Constraint method that doesn't allow voucher's amount being Zero when state is not draft, cancel or no_approve\n \n @return: boolean\n \"\"\"\n for voucher in self.browse(cr, uid, ids, context=context):\n if voucher.state not in ['draft','cancel','no_approve' ] and voucher.amount==0.0:\n return False\n return True\n\n _constraints = [\n (_check_analytic_account, _('Some accounts required to add analytic account for it!'), ['account_id','account_analytic_id','amount']),\n (_total_amount_check, \"Operation is not completed, Total amount shouldn't be zero!\", []), \n ]\n\n def unlink(self, cr, uid, ids, context=None):\n \"\"\"\n Inherit unlink method to delete all confirmations that belong to the deleted voucher lines\n \n @return: super unlink\n \"\"\"\n confirmation_ids = self.approved_line(cr, uid, ids, context=context)\n res = super(account_voucher, self).unlink(cr, uid, ids, context=context)\n if confirmation_ids:\n self.pool.get('account.budget.confirmation').unlink(cr, uid, confirmation_ids, context=context)\n return res\n \n def onchange_price(self, cr, uid, ids, line_ids, tax_id, partner_id=False, context=None):\n \"\"\"\n Compute the amount from all voucher lines and return it in voucher amount.\n\n @param line_ids: list of voucher line ids\n @param tax_id: list of tax_ids for voucher\n @param partner_id: set partner_id =False as default\n @return: super of onchange_amount and it return total price of voucher line\n with tax_ids amount\n \"\"\"\n line_pool = self.pool.get('account.voucher.line')\n line_ids = resolve_o2m_operations(cr, uid, line_pool, line_ids, [\"amount\",\"state\"], context)\n approve_line_ids = [(0,0,l) for l in line_ids if l.get(\"state\",\"complete\") != \"cancel\"]\n return super(account_voucher,self).onchange_price(cr, uid, ids, approve_line_ids, tax_id, partner_id, context=context)\n \n def create_budget_confirmation(self, cr, uid, ids, context=None):\n \"\"\" \n This Method for creating Budget Confirmation for each Voucher Line with analytic account\n\n @return: boolean True it any confirmation created, or return False\n \"\"\"\n context = context or {}\n wf_service = netsvc.LocalService(\"workflow\")\n confirmation_pool = self.pool.get('account.budget.confirmation')\n currency_pool = self.pool.get('res.currency')\n new_confirm_id = False\n flag = False\n for voucher in self.browse(cr, uid, ids, context=context):\n if voucher.type in ('purchase','sale'): super(account_voucher,self).compute_tax(cr, uid, [voucher.id], context=context)\n if voucher.journal_id.type == 'purchase' or 'purchase' in context:\n for voucher_line in voucher.line_ids:\n if voucher_line.account_id and voucher_line.account_id.user_type.analytic_wk:\n company_currency = voucher.company_id.currency_id.id\n current_currency = voucher.currency_id.id\n context_multi_currency = context.copy()\n context_multi_currency.update({'date': voucher.date})\n total_amount = currency_pool.compute(cr, uid, current_currency, company_currency, voucher_line.total_amount, context=context_multi_currency)\n amount = currency_pool.compute(cr, uid, current_currency, company_currency, voucher_line.amount, context=context_multi_currency)\n val = {\n 'reference': voucher.number,\n 'partner_id': voucher.partner_id.id,\n 'period_id': voucher.period_id.id,\n 'general_account_id': voucher_line.account_id.id,\n 'date': voucher.date,\n 'analytic_account_id': voucher_line.account_analytic_id and voucher_line.account_analytic_id.id,\n 'amount': total_amount or amount,\n 'residual_amount': total_amount or amount,\n 'type':context.get('type','other'),\n 'note':voucher_line.name or '/',\n }\n new_confirm_id = False\n if voucher_line.budget_confirm_id:\n flag = True\n confirmation_pool.write(cr, uid, [voucher_line.budget_confirm_id.id], val, context=context)\n new_confirm_id = voucher_line.budget_confirm_id.id\n elif not voucher_line.budget_confirm_id:\n flag = True\n confirm = confirmation_pool.create(cr, uid, val, context=context)\n new_confirm_id = int(confirm)\n self.pool.get('account.voucher.line').write(cr, uid, [voucher_line.id], {'budget_confirm_id':confirm}, context=context)\n if new_confirm_id and voucher.company_id.auto_budget:\n confirmation_pool.action_cancel_draft(cr, uid, new_confirm_id, context=context)\n wf_service.trg_validate(uid, 'account.budget.confirmation', new_confirm_id, 'complete', cr)\n wf_service.trg_validate(uid, 'account.budget.confirmation', new_confirm_id, 'check', cr)\n return flag\n\n def approved_line(self, cr, uid, ids, context=None):\n \"\"\"\n This method return all voucher lines that have a budget confirmation.\n \n @return: list of all budget_confirm_ids for voucher lines \n \"\"\"\n return [voucher_line.budget_confirm_id.id for voucher in self.browse(cr, uid, ids, context=context) for voucher_line in voucher.line_ids if voucher_line.budget_confirm_id]\n\n def cancel_voucher(self, cr, uid, ids, context=None):\n \"\"\"\n Object button method which canceling all budget confirmation\n and change voucher state to \"cancel\"\n \n @return: super cancel_voucher\n \"\"\"\n wf_service = netsvc.LocalService(\"workflow\")\n for confirmation_id in self.approved_line(cr, uid, ids, context=context):\n wf_service.trg_validate(uid, 'account.budget.confirmation', confirmation_id, 'cancel', cr)\n return super(account_voucher, self).cancel_voucher(cr, uid, ids, context=context)\n\n def confirmation_get(self, cr, uid, ids, context=None):\n \"\"\"\n This method gets all budget confirmation ids of voucher.\n\n @return: list of budget confirmation id\n \"\"\"\n res = []\n for voucher in self.browse(cr, uid, ids, context=context):\n for line in voucher.line_ids:\n if line.budget_confirm_id:\n res.append(line.budget_confirm_id.id)\n return res\n\n def test_state(self, cr, uid, ids, mode, context=None):\n \"\"\"\n Check voucher line and budget_confirmation state \n and write state in voucher line (approved,not approved,cancelled)\n depend on budget confirmation for this line\n\n @param mode :tuple of flags\n @return: Boolean True or False\n \"\"\"\n assert mode in ('finished', 'canceled'), _(\"invalid mode for test_state\")\n finished = True\n canceled = False\n notcanceled = False\n write_approve_ids = []\n write_cancel_ids = []\n write_no_approve_ids = []\n ids = isinstance(ids, list) and ids[0] or ids\n voucher = self.browse(cr, uid, ids, context=context)\n if voucher.type in('payment','receipt'):\n return True\n for line in voucher.line_ids:\n if (not line.budget_confirm_id) or (line.budget_confirm_id.state in ['valid','cancel']):\n if (not line.budget_confirm_id) or (line.budget_confirm_id.state == 'valid' and line.state != 'approve'):\n write_approve_ids.append(line.id)\n elif line.budget_confirm_id.state == 'cancel' and line.state != 'cancel':\n write_cancel_ids.append(line.id)\n else:\n finished = False\n if line.budget_confirm_id:\n if (line.budget_confirm_id.state == 'unvalid'):\n if line.state != 'no_approve':\n write_no_approve_ids.append(line.id)\n else:\n notcanceled = True\n else:\n write_approve_ids.append(line.id)\n if write_approve_ids:\n self.pool.get('account.voucher.line').write(cr, uid, list(set(write_approve_ids)) , {'state': 'approve'}, context=context)\n if write_cancel_ids:\n self.pool.get('account.voucher.line').write(cr, uid, write_cancel_ids, {'state': 'cancel'},context=context)\n if write_no_approve_ids:\n self.pool.get('account.voucher.line').write(cr, uid, write_no_approve_ids, {'state': 'no_approve'}, context=context)\n if not voucher.operation_type:\n res = self.onchange_price(cr, uid, [voucher.id], [(4,l.id) for l in voucher.line_ids], voucher.tax_id and [t.id for t in voucher.tax_id] or [], voucher.partner_id, context).get(\"value\",{})\n #Can't call orm write because of the recursion\n cr.execute(\"UPDATE account_voucher \\\n SET amount=%s, tax_amount=%s WHERE id=%s \",\n (res.get(\"amount\"),res.get(\"tax_amount\"), voucher.id))\n \n canceled=self.test_cancel(cr, uid, [voucher.id],write_cancel_ids, context=context)\n if mode == 'finished':\n return finished\n elif mode == 'canceled':\n return canceled\n if notcanceled:\n return False\n return canceled\n\n def test_cancel(self, cr, uid, ids,cancel_ids, context=None):\n for voucher in self.browse(cr, uid, ids, context=context):\n for line in voucher.line_ids:\n if line.state != 'cancel' and line.id not in cancel_ids:\n return False\n return True\n\nclass account_voucher_line(osv.Model):\n\n _inherit = 'account.voucher.line'\n\n def unlink(self, cr, uid, ids, context=None):\n \"\"\"\n Inherit unlink method to delete budget confirmation that belong to the deleted voucher line.\n \n @return: Deleting selected records\n \"\"\"\n confirmation_ids = [voucher_line.budget_confirm_id.id for voucher_line in self.browse(cr, uid, ids, context=context) if voucher_line.budget_confirm_id]\n line = super(account_voucher_line, self).unlink(cr, uid, ids, context=context)\n if confirmation_ids:\n self.pool.get('account.budget.confirmation').unlink(cr, uid, confirmation_ids, context=context)\n return line\n\n _columns = {\n 'name':fields.char('Description', size=256, required=True),\n 'budget_confirm_id': fields.many2one('account.budget.confirmation', 'Confirmation', select=2, ondelete=\"restrict\"),\n 'state':fields.selection([('complete','Waiting for Approve'),('approve','Approved'),('no_approve','Budget Not Approved'),\n ('cancel','Canceled')], 'State', required=True, readonly=True),\n 'total_amount': fields.float('Total Amount'),\n }\n\n _defaults = {\n 'state': 'complete',\n 'name': '/',\n }\n\n def copy(self, cr, uid, ids, default={}, context=None):\n \"\"\"\n Inherit copy method for voucher line \n \n @param default: dictionary of the values of record to be created,\n @return: super method of copy \n \"\"\"\n return super(account_voucher_line, self).copy(cr, uid, ids, default=default, context=context)\n\n def create(self, cr, uid, vals, context=None):\n \"\"\"\n Inherited - create method to be sure that account and voucher company\n are the same.\n\n @return: list creating voucher lines\n \"\"\"\n vals.update({'budget_confirm_id':False})\n if vals.get('account_id',False) and vals.get('voucher_id',False):\n account_company = self.pool.get('account.account').read(cr, uid, vals['account_id'], ['company_id'])['company_id'][0]\n voucher_company = self.pool.get('account.voucher').read(cr, uid, vals['voucher_id'], ['company_id'])['company_id'][0]\n if account_company != voucher_company:\n raise orm.except_orm(_('Entry Error!'), _('The account company is not like the voucher company!'))\n return super(account_voucher_line, self).create(cr, uid, vals, context=context)\n\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"v_7/Dongola/common/account_voucher_confirmation/account_voucher.py","file_name":"account_voucher.py","file_ext":"py","file_size_in_byte":15462,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"82618842","text":"# grouphandler.py\n# \n# Copyright 2015 Wangolo Joel \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \n\nfrom keystoneclient import v3\nfrom keystoneclient.v3.client import exceptions \nfrom keystoneclient.v3 import client\n\nclass S8UserGroupManager(v3.groups.GroupManager):\n \"\"\"\n Inherits from v3.groups.GroupManager which is from the keystoneclientv3 or version three.\n Objects extends according to personal needs.\n \"\"\"\n def __init__(self, client=None, description=\"S8 Default Group Description\"):\n \"\"\"\n @param: client --> Authenticated user, or admin.\n \"\"\"\n super(S8UserGroupManager, self).__init__(client=client)\n self.description = description\n \n def list_allgroups(self, in_dict=False):\n \"\"\"\n List and return all groups.\n \"\"\"\n try:\n for group in self.list():\n if(in_dict):\n yield group.to_dict()\n else:\n yield group\n except:\n raise \n \n \n def create_new_group(self, name, description):\n \"\"\"\n Creates a new group based on given name, and\n description.\n \"\"\"\n try:\n self.new = self.create(name=name, description=description)\n return self.new \n except exceptions.Conflict as e:\n # Normally project with that name already exists.\n # attempt a search\n if(self.search_group_byid_or_name(name, by_id=False)):\n return {\"GroupAlreadyExists\":True, \"ResponseCode\":e.http_status, \"ResponseURL\":e.url, \"ProjectName\":name}\n else:\n # Looks like it is a different conflict error which we don't know.\n return e \n #raise OurUnknownError\n \n def search_group_byid_or_name(self, _id_or_name, by_id=True):\n \"\"\"\n Search a given group by either id or name.\n NOTE: We cannot search group by there name,\n only by user id.\n Since there can't be duplicate group name in keystone.\n Then we can attempt to search for, by name.\n NOTE: While searching by name won't say self.get(group), but we shall list all domains\n and then test by name\n \"\"\"\n try:\n if(by_id):\n assert(_id_or_name)\n try:\n return self.get(_id_or_name)\n except exceptions.NotFound as e:\n return e\n else:\n assert(_id_or_name), \"Required domain got nothing instead....\"\n for all_ in self.list_allgroups():\n if all_.name == _id_or_name:\n return all_\n except:\n raise \n \n def purge_group(self, group):\n \"\"\"\n \"\"\"\n pass \n #delete\n \n def retrieve_group(self, group):\n \"\"\"\n \"\"\"\n pass \n #get update\nif __name__==\"__main__\":\n keystone = client.Client(endpoint=\"http://192.168.1.2:5000/v3\", token=\"S8@Wangolo\")\n s8 = S8UserGroupManager(keystone)\n #print s8.create_new_group(\"S8DefaultGroup\", \"This is s8 default user group\")\n \n","sub_path":"Public/devs/s8website/authorizationapi/subsystems/grouphandler.py","file_name":"grouphandler.py","file_ext":"py","file_size_in_byte":3901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"630650581","text":"#!/usr/bin/env python\n# encoding: utf-8\n\"\"\"\nvariant_printer.py\n\nPrint the variants of a results queue to a file.\n\nCreated by Måns Magnusson on 2013-01-17.\nCopyright (c) 2013 __MyCompanyName__. All rights reserved.\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport sys\nimport os\nimport multiprocessing\n\nfrom codecs import open\nfrom tempfile import NamedTemporaryFile\nfrom pprint import pprint as pp\nfrom genmod import warning\n\nclass VariantPrinter(multiprocessing.Process):\n \"\"\"docstring for VariantPrinter\"\"\"\n def __init__(self, task_queue, temp_dir, head, verbosity):\n multiprocessing.Process.__init__(self)\n self.task_queue = task_queue\n self.verbosity = verbosity\n self.file_handles = {}\n self.temp_dir = temp_dir\n self.header = head.header\n \n def run(self):\n \"\"\"Starts the printing\"\"\"\n # Print the results to a temporary file:\n number_of_finished = 0\n proc_name = self.name\n if self.verbosity:\n print(('%s: starting!' % proc_name))\n while True:\n next_result = self.task_queue.get()\n if self.verbosity:\n if self.task_queue.full():\n warning('Printing queue full')\n if next_result is None:\n if self.verbosity:\n print('All variants printed!')\n for chromosome in self.file_handles:\n self.file_handles[chromosome].close()\n break\n \n else:\n for variant_id in next_result:\n variant_chrom = next_result[variant_id]['CHROM']\n print_line = [next_result[variant_id].get(entry, '-') for entry in self.header]\n if variant_chrom in self.file_handles:\n self.file_handles[variant_chrom].write('\\t'.join(print_line) + '\\n')\n else:\n temp_file = NamedTemporaryFile(prefix=variant_chrom+'_', dir=self.temp_dir, delete=False)\n temp_file.close()\n self.file_handles[variant_chrom] = open(temp_file.name, mode='w', encoding='utf-8', errors='replace')\n self.file_handles[variant_chrom].write('\\t'.join(print_line) + '\\n')\n return\n \ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"genmod/variant_printer.py","file_name":"variant_printer.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"166403428","text":"from matplotlib import pyplot as plt\nfrom matplotlib.patches import Rectangle\n\n\nclass HyperPipesPlot:\n\n def __init__(self, points):\n self.classes_len = len(points)\n self.points = points\n self.colors = ['b', 'g', 'c', 'm', 'k', 'y']\n self.figure = plt.figure()\n\n def __getVisualHyperPipe__(self, x, y, color):\n min_x, max_x = min(x), max(x)\n min_y, max_y = min(y), max(y)\n return Rectangle((min_x, min_y), (max_x - min_x), (max_y - min_y), fill=None, alpha=1, edgecolor=color)\n\n def plot(self):\n currentAxis = plt.gca()\n\n for i in range(self.classes_len):\n x = self.points[i][0]\n y = self.points[i][1]\n rect = self.__getVisualHyperPipe__(x, y, self.colors[i])\n currentAxis.add_patch(rect)\n class_plot, = plt.plot(\n x, y, self.colors[i] + 's', label=\"Class \" + str(i + 1))\n\n plt.title('HyperPipes Model')\n plt.ylabel('Y Axis')\n plt.xlabel('X Axis')\n plt.legend()\n plt.show()\n\n def add_plot_point(self, point):\n plt.plot(point[0], point[1], 'rs', label=\"Validation\")","sub_path":"hyperpipes/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"166740510","text":"class Carbase:\n def __init__(self, car_type, brand, photo_le_name, carrying):\n self.car_type = car_type\n self.photo_le_name = photo_le_name\n self.brand = brand\n self.carrying = carrying\n\n def get_photo_le_ext(self):\n if self.photo_le_name.count(\".\") == 1:\n return '.' + self.photo_le_name.split('.')[-1]\n else:\n return ''\n\n\nclass Car(Carbase):\n def __init__(self, car_type, brand, passenger_seats_count, photo_le_name, carrying):\n super().__init__(car_type, brand, photo_le_name, carrying)\n self.passenger_seats_count = passenger_seats_count\n\n def __repr__(self):\n return '{} {} {} {} {}'.format(str(self.car_type), str(self.brand), str(self.passenger_seats_count), str(self.photo_le_name), str(self.carrying))\n\n\nclass Truck(Carbase):\n def __init__(self, car_type, brand, photo_le_name, carrying, body_length=0.0, body_width=0.0, body_height=0.0):\n super().__init__(car_type, brand, photo_le_name, carrying)\n self.body_length = body_length\n self.body_width = body_width\n self.body_height = body_height\n\n def get_body_volume(self):\n return self.body_length * self.body_width * self.body_height\n\n def __repr__(self):\n if self.body_length == 0.0:\n return '{} {} {} {}'.format(str(self.car_type), str(self.brand), str(self.photo_le_name), str(self.carrying))\n else:\n return '{} {} {} {} {}'.format(str(self.car_type), str(self.brand), str(self.photo_le_name), str(self.carrying), str(self.body_length)+\"x\"+str(self.body_width)+\"x\"+str(self.body_height))\n\n\nclass Specmachine(Carbase):\n def __init__(self, car_type, brand, photo_le_name, carrying, extra):\n super().__init__(car_type, brand, photo_le_name, carrying)\n self.extra = extra\n\n def __repr__(self):\n return '{} {} {} {} {}'.format(str(self.car_type), str(self.brand), str(self.photo_le_name), str(self.carrying), str(self.extra))\n\n\ndef get_car_list(filename):\n car_list = []\n with open(filename, 'r', encoding='utf-8') as f:\n txt = f.readlines()\n for i in txt:\n if i.count(';') == 6:\n i = i.split(';')\n\n if i[0] == 'car':\n a = Car(i[0], i[1], i[2], i[3], i[5])\n car_list.append(a)\n\n if i[0] == 'truck':\n if i[4] != '':\n b = i[4].split('x')\n a = Truck(i[0], i[1], i[3], i[5], float(b[0]), float(b[1]), float(b[2]))\n car_list.append(a)\n else:\n a = Truck(i[0], i[1], i[3], i[5])\n car_list.append(a)\n\n if i[0] == 'spec_machine':\n a = Specmachine(i[0], i[1], i[3], i[5], i[6][:-2])\n car_list.append(a)\n return car_list\n\n\ndef main():\n return get_car_list('solution.txt')\n\n\nif __name__ == '__main__':\n print(main())\n","sub_path":"solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":2995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343537768","text":"class Node(object):\n def __init__(self, value=None, next=None, previous=None):\n self.value, self.next, self.previous = value, next, previous\n\n\nclass CircularDoubleLinkedNode(object):\n def __init__(self, maxsize=None):\n self.maxsize = maxsize\n self.root = Node()\n self.root.next = self.root\n self.root.previous = self.root\n self.length = 0\n\n def __len__(self):\n return self.length\n\n def headnode(self):\n return self.root.next\n\n def tailnode(self):\n return self.root.previous\n\n def append(self, value):\n if self.maxsize is not None and self.value >= self.maxsize:\n raise Exception('Full')\n node = Node(value)\n tailnode = self.tailnode()\n tailnode.next = node\n node.next = self.root\n node.previous = tailnode\n self.length += 1\n self.root.previous = node\n\n def appendleft(self, value):\n if self.maxsize is not None and self.value >= self.maxsize:\n raise Exception('Full')\n node = Node(value)\n headnode = self.headnode()\n self.root.next = node\n node.next = headnode\n headnode.previous = node\n node.previous = self.root\n self.length += 1\n\n def pop(self):\n if self.root.next is self.root:\n return\n tailnode = self.tailnode()\n prevnode = tailnode.previous\n prevnode.next = self.root\n self.root.previous = prevnode\n value = tailnode.value\n del tailnode\n self.length -= 1\n return value\n\n def popleft(self):\n if self.root.next is self.root:\n return\n headnode = self.headnode()\n self.root.next = headnode.next\n headnode.next.previous = self.root\n value = headnode.value\n del headnode\n self.length -= 1\n return value\n\n def __iter__(self):\n for node in self._iter_node():\n yield node.value\n\n def _iter_node(self):\n curnode = self.root.next\n while curnode is not self.root:\n yield curnode\n curnode = curnode.next\n\n def remove(self, node):\n if self.root.next is self.root:\n raise Exception('remove empty CircularDoubleLinkedNode')\n prevnode = node.previous\n nextnode = node.next\n prevnode.next = nextnode\n nextnode.previous = prevnode\n del node\n self.length -= 1\n\n def reverse_node(self):\n curnode = self.root.previous\n while curnode is not self.root:\n yield curnode\n curnode = curnode.previous\n\n\nclass Queue(object):\n def __init__(self):\n self._items = CircularDoubleLinkedNode()\n\n def __len__(self):\n return len(self._items)\n\n def __iter__(self):\n for item in self._items:\n yield item\n\n def push(self, value):\n self._items.append(value)\n\n def pop(self):\n return self._items.popleft()\n\n def is_empty(self):\n return len(self._items) == 0\n\n\ndef test():\n q = Queue()\n for i in range(5):\n q.push(i)\n assert list(q) == [i for i in range(5)]\n assert len(q) == 5\n for i in range(5):\n assert q.pop() == i\n assert len(q) == 0\n\n\nif __name__ == \"__main__\":\n test()","sub_path":"Introduction to algorithms/Queue_Circular.py","file_name":"Queue_Circular.py","file_ext":"py","file_size_in_byte":3271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321685258","text":"import os\nimport errno\nimport json\nfrom pprint import pprint\nimport random\n\n\nNUM_CLASSES = 100\n\n\ndef select(n, num_samples):\n if (num_samples > n):\n raise Exception(\"n is smaller than number of samples to select!\")\n return random.sample(range(n), num_samples)\n\n\ndef main():\n _map = {}\n\n with open(\"ontology.json\") as f:\n data = json.load(f)\n\n selected_classes = select(len(data), NUM_CLASSES)\n\n for _id in selected_classes:\n category_name = data[_id][\"name\"]\n category_id = data[_id][\"id\"]\n _map[category_name] = category_id\n\n src = 'original.csv'\n\n with open(src, 'r') as _in:\n content = _in.readlines()\n\n for category_name in _map.keys():\n dst = \"../raw/{}/links.csv\".format(category_name)\n category_id = _map[category_name]\n if not os.path.exists(os.path.dirname(dst)):\n try:\n os.makedirs(os.path.dirname(dst))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n with open(dst, 'w') as out:\n for row in content:\n if (row.split()[3].startswith(\"\\\"\" + category_id)):\n out.write(row)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"download/links/crop.py","file_name":"crop.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"527302550","text":"import re, copy\nfrom pathlib import Path\n\nSTITCH_VARS = ['file','correlation','posX','posY','gridX','gridY'] # image stitching values\nVARIABLES = 'rtczyxp'\n\ndef val_variables(variables):\n \"\"\" Validate file pattern variables\n \n Variables for a file pattern should only contain the values in filepattern.VARIABLES.\n In addition to this, only a linear positioning variable (p) or an x,y positioning\n variable should be present, but not both.\n\n There is no return value for this function. It throws an error if an invalid variable\n is present.\n\n Inputs:\n variables - a string of variables, e.g. 'rtxy'\n Outputs:\n None\n \"\"\"\n\n for v in variables:\n assert v in VARIABLES, \"File pattern variables must be one of {}\".format(VARIABLES)\n\n if 'p' in variables:\n assert 'x' not in variables and 'y' not in variables, \"Either x and/or y may be defined or p may be defined, but not both.\"\n\ndef get_regex(pattern):\n \"\"\" Parse a filename pattern into a regular expression\n \n The filename pattern used here mimics that used by MIST, where variables and\n positions are encoded into the string. For example, file_c000.ome.tif that\n indicates channel using the _c, the filename pattern would be file_c{ccc}.ome.tif.\n The only possible variables that can be passed into the filename pattern are\n p, x, y, z, c, t, and r. In the case of p, x, and y, both x&y must be specified\n or p must be specified, but if all three are specified then an error is thrown.\n\n If no filepattern is provided, then a universal expression is returned.\n\n Inputs:\n pattern - Filename pattern\n Outputs:\n regex - Regex used to parse filenames\n variables - Variables found in the filename pattern\n \"\"\"\n\n # Initialize the regular expression\n regex = pattern\n\n # If no regex was supplied, return universal matching regex\n if pattern == None or pattern == '' :\n return '.*', []\n \n # Parse variables\n expr = []\n variables = []\n for g in re.finditer(\"{{[{}]+}}\".format(VARIABLES),pattern):\n expr.append(g.group(0))\n variables.append(expr[-1][1])\n \n # Validate variable choices\n val_variables(variables)\n \n # Generate the regular expression pattern\n for e in expr:\n regex = regex.replace(e,\"([0-9]{\"+str(len(e)-2)+\"})\")\n \n return regex, variables\n\ndef output_name(pattern,files,ind):\n \"\"\" Returns an output name for a single file resulting from multiple images\n\n This function returns a file output name for the image volume\n based on the name of multiple files used to generate it.\n All variables are kept the same as in the original filename,\n but variables in the file name pattern that are not present in ind\n are transformed into a range surrounded by <>.\n For example, if the following files are processed:\n \n image_c000_z000.ome.tif\n image_c000_z001.ome.tif\n image_c000_z002.ome.tif\n image_c001_z000.ome.tif\n image_c001_z001.ome.tif\n image_c001_z002.ome.tif\n \n then if ind = {'c': 0}, the output filename will be:\n image_c000_z<000-002>.ome.tif\n\n Inputs:\n fpattern - A filename pattern indicating variables in filenames\n files - A list of file names\n ind - A dictionary containing the indices for the file name (i.e. {'r':1,'t':1})\n Outputs:\n\n fname - an output file name\n \"\"\"\n\n # Determine the variables that shouldn't change in the filename pattern\n STATICS = [key for key in ind.keys()]\n # If no pattern was supplied, return default image name\n if pattern==None or pattern=='':\n return 'image.ome.tif'\n \n for key in ind.keys():\n assert key in VARIABLES, \"Input dictionary key not a valid variable: {}\".format(key)\n \n # Parse variables\n expr = []\n variables = []\n for g in re.finditer(\"{{[{}]+}}\".format(VARIABLES),pattern):\n expr.append(g.group(0))\n variables.append(expr[-1][1])\n # Generate the output filename\n fname = pattern\n for e,v in zip(expr,variables):\n if v not in STATICS:\n minval = min([int(b) for i in files for a,b in i.items() if a==v])\n maxval = max([int(b) for i in files for a,b in i.items() if a==v])\n fname = fname.replace(e,'<' + str(minval).zfill(len(e)-2) +\n '-' + str(maxval).zfill(len(e)-2) + '>')\n elif v not in ind.keys():\n fname = fname.replace(e,str(0).zfill(len(e)-2))\n else:\n fname = fname.replace(e,str(ind[v]).zfill(len(e)-2))\n \n return fname\n\ndef parse_filename(file_name,pattern=None,regex=None,variables=None,return_empty=True):\n \"\"\" Get the x, y, p, z, c, t, and r indices from a file name\n \n Extract the variable values from a file name. Return as a dictionary.\n\n For example, if a file name and file pattern are:\n file_x000_y000_c000.ome.tif\n file_x{xxx}_y{yyy}_c{ccc}.ome.tif\n\n This function will return:\n {\n 'x': 0,\n 'y': 0,\n 'c': 0\n }\n\n Inputs:\n file_name - List of values parsed from a filename using a filename pattern\n pattern - A file name pattern. Either this or regex must be defined (not both).\n regex - A regular expression used to parse the filename.\n return_empty - Returns undefined variables as -1\n Outputs:\n index - The value of the dimension\n \"\"\"\n # Get the regex if not defined, and validate inputs\n if pattern != None:\n regex,variables = get_regex(pattern)\n elif regex == None:\n ValueError('Either pattern or regex must be specified.')\n elif variables == None:\n ValueError('If regex is an input, then variables must be an input.')\n else:\n val_variables(variables)\n\n # Get variable values from the filename\n groups = re.match(regex,file_name)\n if groups == None: # Don't return anything if the filename doesn't match the regex\n return None\n\n r = {} # Initialize the output\n\n # Initialize variable iterator, include undefined variables\n iter_vars = VARIABLES\n if 'p' in variables:\n iter_vars = iter_vars.replace('x','')\n iter_vars = iter_vars.replace('y','')\n else:\n iter_vars = iter_vars.replace('p','')\n\n # Generate the output\n for v in iter_vars:\n if v not in variables:\n if return_empty:\n r[v] = -1\n else:\n r[v] = int(groups.groups()[[ind for ind,i in zip(range(0,len(variables)),variables) if i==v][0]])\n\n return r\n\ndef parse_vector_line(vector_line,pattern=None,regex=None,variables=None,return_empty=True):\n \"\"\" Get the file, corr, posX, posY, gridX, and gridY information from a vector\n \n This function parses a single line from a stitching vector. It uses parse_filename\n to extract variable values from the file name. It returns a dictionary similar\n to what is returned by parse_filename, except it includes stitching variables\n in the dictionary.\n\n Inputs:\n vector_line - A single line from a stitching vector\n pattern - A file name pattern. Either this or regex must be defined (not both).\n regex - A regular expression used to parse the filename.\n return_empty - Returns undefined variables as -1\n Outputs:\n index - The value of the dimension\n \"\"\"\n\n # regular expression used to parse the vector information\n line_regex = r\"file: (.*); corr: (.*); position: \\((.*), (.*)\\); grid: \\((.*), (.*)\\);\"\n \n # parse the information from the stitching vector line\n stitch_groups = list(re.match(line_regex,vector_line).groups())\n stitch_info = {key:value for key,value in zip(STITCH_VARS,stitch_groups)}\n \n # parse the filename (this does all the sanity checks as well)\n r = parse_filename(stitch_info['file'],pattern,regex,variables,return_empty)\n if r == None:\n return None\n r.update(stitch_info)\n\n return r\n\ndef parse_directory(file_path,pattern,var_order='rtczyx'):\n \"\"\" Parse files in a directory\n \n This function extracts the variables value from each filename in a directory and places\n them in a dictionary that allows retrieval using variable values. For example, if there\n is a folder with filenames using the pattern file_x{xxx}_y{yyy}_c{ccc}.ome.tif, then\n the output will be a dictionary with the following structure:\n output_dictionary[r][t][c][z][y][x]\n\n To access the filename with values x=2, y=3, and c=1:\n output_dictionary[-1][-1][1][-1][3][2]\n\n The -1 values are placeholders for variables that were undefined by the pattern. The value\n stored in the deepest layer of the dictionary is a list of all files that match the variable\n values. For a well formed filename pattern, the length of the list at each set of\n coordinates should be one, but there are some use cases which makes it beneficial to\n store many filenames at each set of coordinates (see below).\n\n A custom variable order can be returned using the var_order keyword argument. When set,\n this changes the structure of the output dictionary. Using the previous example,\n if the var_order value was set to `xyc`, then to access the filename matching x=2, y=3,\n and c=1:\n output_dictionary[2][3][1]\n\n The variables in var_order do not need to match the variables in the pattern, but this\n will cause overloaded lists to be returned. Again using the same example as before,\n if the var_order was set to 'xy', then accessing the file associated with x=2 and y=3\n will return a list of all filenames that match x=2 and y=3, but each filename will have\n a different c value. This may be useful in applications where filenames want to be grouped\n by a particular attribute (channel, replicate, etc).\n\n NOTE: The uvals return value is a list of unique values for each variable index, but not\n all combinations of variables are valid in the dictionary. It is possible that one level\n of the dictionary has different child values.\n\n Inputs:\n file_path - path to a folder containing files to parse\n pattern - A file name pattern.\n var_order - A string indicating the order of variables in a nested output dictionary\n Outputs:\n file_ind - The output dictionary containing all files matching the file pattern, sorted\n by variable value\n uvals - Unique variables for each \n \"\"\"\n\n # validate the variable order\n val_variables(var_order)\n\n # get regular expression from file pattern\n regex, variables = get_regex(pattern)\n\n # initialize the output\n if len(variables) == 0:\n file_ind = []\n else:\n file_ind = {}\n files = [f.name for f in Path(file_path).iterdir() if f.is_file()]\n files.sort()\n\n # Unique values for each variable\n uvals = {key:[] for key in var_order}\n\n # Build the output dictionary\n for f in files:\n \n # Parse filename values\n variables = parse_filename(f,pattern)\n\n # If the filename doesn't match the pattern, don't include it\n if variables == None:\n continue\n \n # Generate the layered dictionary using the specified ordering\n temp_dict = file_ind\n if isinstance(file_ind,dict):\n for key in var_order:\n if variables[key] not in temp_dict.keys():\n if variables[key] not in uvals[key]:\n uvals[key].append(variables[key])\n if var_order[-1] != key:\n temp_dict[variables[key]] = {}\n else:\n temp_dict[variables[key]] = []\n temp_dict = temp_dict[variables[key]]\n \n # Add the file information at the deepest layer\n new_entry = {}\n new_entry['file'] = str(Path(file_path).joinpath(f).absolute())\n if variables != None:\n for key, value in variables.items():\n new_entry[key] = value\n temp_dict.append(new_entry)\n\n for key in uvals.keys():\n uvals[key].sort()\n \n return file_ind, uvals\n\ndef parse_vector(file_path,pattern,var_order='rtczyx'):\n \"\"\" Parse files in a stitching vector\n \n This function works exactly as parse_directory, except it parses files in a stitching\n vector. In addition to the variable values contained in the file dictionary returned\n by this function, the values associated with the file are also contained in the\n dictionary.\n \n The format for a line in the stitching vector is as follows:\n file: (filename); corr: (correlation)); position: (posX, posY); grid: (gridX, gridY);\n \n posX and posY are the pixel positions of an image within a larger stitched image, and\n gridX and gridY are the grid positions for each image.\n \n NOTE: A key difference between this function and parse_directory is the value stored\n under the 'file' key. This function returns only the name of an image parsed\n from the stitching vector, while the value returned by parse_dictionary is a\n full path to an image.\n\n Inputs:\n file_path - path to a folder containing files to parse\n pattern - A file name pattern.\n var_order - A string indicating the order of variables in a nested output dictionary\n Outputs:\n file_ind - The output dictionary containing all files matching the file pattern, sorted\n by variable value\n uvals - Unique variables for each \n \"\"\"\n\n # validate the variable order\n val_variables(var_order)\n\n # get regular expression from file pattern\n regex, variables = get_regex(pattern)\n\n # initialize the output\n if len(variables) == 0:\n file_ind = []\n else:\n file_ind = {}\n\n # Unique values for each variable\n uvals = {key:[] for key in var_order}\n\n # Build the output dictionary\n with open(file_path,'r') as fr:\n for f in fr:\n \n # Parse filename values\n variables = parse_vector_line(f,pattern)\n\n # If the filename doesn't match the patter, don't include it\n if variables == None:\n continue\n \n # Generate the layered dictionary using the specified ordering\n temp_dict = file_ind\n if isinstance(file_ind,dict):\n for key in var_order:\n if variables[key] not in temp_dict.keys():\n if variables[key] not in uvals[key]:\n uvals[key].append(variables[key])\n if var_order[-1] != key:\n temp_dict[variables[key]] = {}\n else:\n temp_dict[variables[key]] = []\n temp_dict = temp_dict[variables[key]]\n \n # Add the file information at the deepest layer\n temp_dict.append(variables)\n\n for key in uvals.keys():\n uvals[key].sort()\n \n return file_ind, uvals\n\ndef get_matching(files,var_order,out_var=None,**kwargs):\n \"\"\" Get filenames that have defined variable values\n \n This gets all filenames that match a set of variable values. Variables must be one of\n filename.VARIABLES, and the inputs must be uppercase. The following example code would\n return all files that have c=0:\n pattern = \"file_x{xxx}_y{yyy}_c{ccc}.ome.tif\"\n file_path = \"./path/to/files\"\n files = parse_directory(file_path,pattern,var_order='cyx')\n channel_zero = get_matching(files,'cyx',C=0)\n\n Multiple coordinates can be used simultaneously, so in addition to C=0 in the above example,\n it is also possible to include Y=0. Further, each variable can be a list of values, and the\n returned output will contain filenames matching any of the input values.\n\n Inputs:\n files - A file dictionary (see parse_directory)\n var_order - A string indicating the order of variables in a nested output dictionary\n out_var - Variable to store results, used for recursion\n kwargs - One of filepattern.VARIABLES, must be uppercase, can be single value or a list of values\n Outputs:\n out_var - A list of all files matching the input values\n \"\"\"\n # Initialize the output variable if needed\n if out_var == None:\n out_var = []\n \n # If there is no var_order, then files should be a list of files.\n if len(var_order)==0:\n if not isinstance(files,list):\n TypeError('Expected files to be a list since var_order is empty.')\n out_var.extend(files)\n return\n\n for arg in kwargs.keys():\n assert arg==arg.upper() and arg.lower() in VARIABLES, \"Input keyword arguments must be uppercase variables (one of R, T, C, Z, Y, X, P)\"\n \n if var_order[0].upper() in kwargs.keys():\n if isinstance(kwargs[var_order[0].upper()],list): # If input was already a list\n v_iter = kwargs[var_order[0].upper()]\n else: # If input was not a list, make it a list\n v_iter = [kwargs[var_order[0].upper()]]\n else:\n v_iter = [i for i in files.keys()]\n v_iter.sort()\n \n for v_i in v_iter:\n if v_i not in files.keys():\n continue\n get_matching(files[v_i],var_order[1:],out_var,**kwargs)\n return out_var\n\nclass FilePattern():\n \"\"\" Main class for handling filename patterns\n \n Most of the functions in filepattern.py return complicated variable structures that might\n be difficult to use in an abstract way. This class provides tools to use the above functions\n in a simpler way. In particular, the iterate function is an iterable that permits simple\n iteration over filenames with specific values and grouped by any desired variable.\n\n \"\"\"\n var_order = 'rtczyx'\n files = {}\n uniques = {}\n\n def __init__(self,file_path,pattern,var_order=None):\n self.pattern, self.variables = get_regex(pattern)\n self.path = file_path\n\n if var_order:\n val_variables(var_order)\n self.var_order = var_order\n\n self.files, self.uniques = parse_directory(file_path,pattern,var_order=self.var_order)\n\n # Get filenames matching values for specified variables\n def get_matching(self,**kwargs):\n \"\"\" Get all filenames matching specific values\n \n This function runs the get_matching function using the objects file dictionary.\n\n Inputs:\n kwargs - One of filepatter.VARIABLES, must be uppercase, can be single values or a list of values\n Outputs:\n files - A list of all files matching the input values\n \"\"\"\n # get matching files\n files = get_matching(self.files,self.var_order,out_var=None,**kwargs)\n return files\n\n def iterate(self,group_by=[],**kwargs):\n \"\"\" Iterate through filenames\n \n This function is an iterable. On each call, it returns a list of filenames that matches a set of\n variable values. It iterates through every combination of variable values.\n\n Variables designated in the group_by input argument are grouped together. So, if group_by='zc', \n then each iteration will return all filenames that have constant values for each variable except z\n and c.\n\n\n In addition to the group_by variable, specific variable arguments can also be included as with the\n get_matching function.\n\n Inputs:\n group_by - String of variables by which the output filenames will be grouped\n kwargs - One of filepatter.VARIABLES, must be uppercase, can be single values or a list of values\n Outputs:\n iter_files - A list of all files matching the input values\n \"\"\"\n # If self.files is a list, no parsing took place so just loop through the files\n if isinstance(self.files,list):\n for f in self.files:\n yield f\n return\n\n # Generate the values to iterate through\n iter_vars = {}\n for v in self.var_order:\n if v in group_by:\n continue\n elif v.upper() in kwargs.keys():\n if isinstance(kwargs[v.upper()],list):\n iter_vars[v] = copy.deepcopy(kwargs[v.upper()])\n else:\n iter_vars[v] = [kwargs[v.upper()]]\n else:\n iter_vars[v] = copy.deepcopy(self.uniques[v])\n \n # Find the shallowest variable in the dictionary structure\n shallowest = None\n for v in iter_vars.keys():\n if -1 in iter_vars[v] and len(iter_vars[v]):\n continue\n else:\n shallowest = v\n break\n\n # If shallowest is undefined, return all file names\n if shallowest == None:\n yield get_matching(self.files,self.var_order,**{key.upper():iter_vars[key][0] for key in iter_vars.keys()})\n return\n\n # Loop through every combination of files\n while len(iter_vars[shallowest])>0:\n # Get list of filenames and return as iterator\n iter_files = []\n iter_files = get_matching(self.files,self.var_order,**{key.upper():iter_vars[key][0] for key in iter_vars.keys()})\n if len(iter_files)>0:\n yield iter_files\n\n # Delete last iteration indices\n for v in reversed(self.var_order):\n if v in group_by:\n continue\n del iter_vars[v][0]\n if len(iter_vars[v])>0:\n break\n elif v == shallowest:\n break\n iter_vars[v] = copy.deepcopy(self.uniques[v])\n \nclass VectorPattern(FilePattern):\n \"\"\" Main class for handling stitching vectors\n \n This class works nearly identically to FilePattern, except it works with lines\n inside of a stitching vector. As with FilePattern, the iterate method will iterate\n through values, which in the case of VectorPattern are parsed lines of a stitching\n vector.\n\n \"\"\"\n \n var_order = 'rtczyx'\n files = {}\n uniques = {}\n \n def __init__(self,file_path,pattern,var_order=None):\n self.pattern, self.variables = get_regex(pattern)\n self.path = file_path\n\n if var_order:\n val_variables(var_order)\n self.var_order = var_order\n\n self.files, self.uniques = parse_vector(file_path,pattern,var_order=self.var_order)","sub_path":"polus-apply-flatfield-plugin/src/filepattern.py","file_name":"filepattern.py","file_ext":"py","file_size_in_byte":22569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"636590252","text":"# ------------------------------------------------------------------------------\n# Copyright (c) 2010-2013, EVEthing team\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without modification,\n# are permitted provided that the following conditions are met:\n#\n# Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n# Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND\n# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.\n# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,\n# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT\n# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR\n# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,\n# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY\n# OF SUCH DAMAGE.\n# ------------------------------------------------------------------------------\n\nfrom django.db import models\n\nfrom thing.models.character import Character\nfrom thing.models.corpwallet import CorpWallet\nfrom thing.models.item import Item\nfrom thing.models.station import Station\n\n\nclass MarketOrder(models.Model):\n \"\"\"Market orders\"\"\"\n order_id = models.BigIntegerField(primary_key=True)\n\n station = models.ForeignKey(Station, on_delete=models.DO_NOTHING)\n item = models.ForeignKey(Item, on_delete=models.DO_NOTHING)\n character = models.ForeignKey(Character, on_delete=models.DO_NOTHING)\n corp_wallet = models.ForeignKey(CorpWallet, null=True, blank=True, on_delete=models.DO_NOTHING)\n\n creator_character_id = models.IntegerField(db_index=True)\n\n escrow = models.DecimalField(max_digits=14, decimal_places=2)\n price = models.DecimalField(max_digits=14, decimal_places=2)\n total_price = models.DecimalField(max_digits=17, decimal_places=2)\n\n buy_order = models.BooleanField(default=False)\n volume_entered = models.IntegerField()\n volume_remaining = models.IntegerField()\n minimum_volume = models.IntegerField()\n issued = models.DateTimeField(db_index=True)\n expires = models.DateTimeField(db_index=True)\n\n def check_undercut(self):\n from thing.models.stationorder import StationOrder\n\n owned_orders_query = MarketOrder.objects.filter(creator_character_id=self.creator_character_id)\n owned_order_ids = [o.order_id for o in owned_orders_query]\n\n orders_query = StationOrder.objects.filter(\n item_id=self.item.id,\n buy_order=self.buy_order,\n ).exclude(order_id__in=owned_order_ids)\n\n if self.buy_order:\n next_order_info = orders_query.filter(price__gte=self.price, station__system__constellation__region_id=self.station.system.constellation.region_id).aggregate(price=models.Max('price'), volume=models.Sum('volume_remaining'))\n next_order_price = next_order_info['price']\n next_order_volume = next_order_info['volume']\n\n else:\n next_order_info = orders_query.filter(price__lte=self.price, station_id=self.station.id).aggregate(price=models.Min('price'), volume=models.Sum('volume_remaining'))\n next_order_price = next_order_info['price']\n next_order_volume = next_order_info['volume']\n\n\n\n if next_order_price is not None\\\n and next_order_price > 0:\n outbid = True\n outbid_price = next_order_price\n outbid_volume = next_order_volume\n else:\n outbid = False\n outbid_price = 0\n outbid_volume = 0\n\n return outbid, outbid_price, outbid_volume\n\n class Meta:\n app_label = 'thing'\n ordering = ('buy_order', 'item__name')\n","sub_path":"thing/models/marketorder.py","file_name":"marketorder.py","file_ext":"py","file_size_in_byte":4259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"337237511","text":"#!/usr/bin/python\n\nimport sys, getopt\nfrom progress.bar import Bar\n\nimport xml.etree.ElementTree\nimport csv\nfrom textstat.textstat import textstat\nimport enchant\n\ndef length (text):\n return len(text.split())\n\ndef reading_level (text):\n try:\n level = textstat.flesch_kincaid_grade(text)\n except ZeroDivisionError:\n level = -1\n return level\n\nd = enchant.Dict(\"en_US\")\n\ndef typos (text):\n num = 0\n for word in text.split():\n num += (d.check(word) == False)\n return num\n\ndef process_xml_to_csv (input_file_name, output_file_name):\n try:\n tree = xml.etree.ElementTree.parse(input_file_name)\n except IOError:\n print(\"Could not open input file\")\n return \n\n e = tree.getroot()\n with open(output_file_name, 'wb') as csv_file:\n csv_writer = csv.writer(csv_file)\n\n bar = Bar('Processing', max = len(e))\n\n for q in e:\n question = ''\n\n q_length = 0\n q_level = 0\n q_typos = 0\n num_answer = 0\n avg_a_length = 0\n avg_a_level = 0 \n avg_a_typos = 0\n avg_a_up = 0\n avg_a_down = 0\n best_a_length = 0\n best_a_level = 0\n best_a_typos = 0\n best_a_up = 0\n best_a_down = 0\n\n for elem in q[0]:\n if elem.tag == 'subject':\n question += elem.text\n if elem.tag == 'content':\n question += elem.text\n elif elem.tag == 'bestanswer':\n num_answer += 1\n avg_a_length += length(elem.text)\n avg_a_level += reading_level(elem.text)\n avg_a_typos += typos(elem.text)\n best_a_length = length(elem.text)\n best_a_level = reading_level(elem.text)\n best_a_typos = typos(elem.text)\n elif elem.tag == 'nbestanswers':\n for other_answer in elem:\n num_answer += 1\n avg_a_length += length(other_answer.text)\n avg_a_level += reading_level(other_answer.text)\n avg_a_typos += typos(other_answer.text)\n\n q_length = length(question)\n q_level = reading_level(question)\n q_typos = typos(question)\n\n avg_a_length /= float(num_answer)\n avg_a_level /= float(num_answer)\n avg_a_typos /= float(num_answer)\n avg_a_up /= float(num_answer)\n avg_a_down /= float(num_answer)\n\n row = [q_length, q_level, q_typos, num_answer,\n avg_a_length, avg_a_level, avg_a_typos, avg_a_up, avg_a_down,\n best_a_length, best_a_level, best_a_typos, best_a_up, best_a_down]\n\n csv_writer.writerow(row)\n bar.next()\n bar.finish()\n\nif __name__ == \"__main__\":\n if len(sys.argv) == 3:\n input_file_name = sys.argv[1]\n output_file_name = sys.argv[2]\n process_xml_to_csv(input_file_name, output_file_name)\n else:\n print(\"usage: run.py \")","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"359787986","text":"from unittest import mock\n\nimport kerastuner\n\nfrom autokeras import oracle as oracle_module\nfrom tests import common\n\n\ndef test_random_oracle_state():\n hyper_graph = common.build_hyper_graph()\n oracle = oracle_module.GreedyOracle(\n objective='val_loss',\n )\n oracle.hyper_graph = hyper_graph\n oracle.set_state(oracle.get_state())\n assert oracle.hyper_graph is hyper_graph\n\n\n@mock.patch('autokeras.oracle.GreedyOracle.get_best_trials')\ndef test_random_oracle(fn):\n hyper_graph = common.build_hyper_graph()\n oracle = oracle_module.GreedyOracle(\n objective='val_loss',\n )\n hp = kerastuner.HyperParameters()\n preprocess_graph, keras_graph = hyper_graph.build_graphs(hp)\n preprocess_graph.build(hp)\n keras_graph.inputs[0].shape = hyper_graph.inputs[0].shape\n keras_graph.build(hp)\n oracle.hyper_graph = hyper_graph\n trial = mock.Mock()\n trial.hyperparameters = hp\n fn.return_value = [trial]\n\n oracle.update_space(hp)\n for i in range(2000):\n oracle._populate_space(str(i))\n\n assert 'optimizer' in oracle._hp_names[oracle_module.GreedyOracle.OPT]\n assert 'classification_head_1/dropout_rate' in oracle._hp_names[\n oracle_module.GreedyOracle.ARCH]\n assert 'image_block_1/block_type' in oracle._hp_names[\n oracle_module.GreedyOracle.HYPER]\n","sub_path":"tests/autokeras/oracle_test.py","file_name":"oracle_test.py","file_ext":"py","file_size_in_byte":1334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"357388286","text":"\"\"\"Classes for magnetic field coils.\"\"\"\n\nfrom abc import ABC\nfrom collections.abc import MutableSequence\n\nimport numpy as np\n\nfrom desc.backend import jnp\nfrom desc.geometry import FourierPlanarCurve, FourierRZCurve, FourierXYZCurve\nfrom desc.geometry.utils import rpz2xyz, xyz2rpz_vec\nfrom desc.grid import Grid\nfrom desc.magnetic_fields import MagneticField, biot_savart\n\n\nclass Coil(MagneticField, ABC):\n \"\"\"Base class representing a magnetic field coil.\n\n Represents coils as a combination of a Curve and current\n\n Subclasses for a particular parameterization of a coil should inherit\n from Coil and the appropriate Curve type, eg MyCoil(Coil, MyCurve)\n - note that Coil must be the first parent for correct inheritance.\n\n Subclasses based on curves that follow the Curve API should only have\n to implement a new __init__ method, all others will be handled by default\n\n Parameters\n ----------\n current : float\n current passing through the coil, in Amperes\n \"\"\"\n\n _io_attrs_ = MagneticField._io_attrs_ + [\"_current\"]\n\n def __init__(self, current, *args, **kwargs):\n self._current = current\n super().__init__(*args, **kwargs)\n\n @property\n def current(self):\n \"\"\"float: Current passing through the coil, in Amperes.\"\"\"\n return self._current\n\n @current.setter\n def current(self, new):\n assert jnp.isscalar(new) or new.size == 1\n self._current = new\n\n def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):\n \"\"\"Compute magnetic field at a set of points.\n\n The coil is discretized into a series of straight line segments, using\n the coil ``grid`` attribute. To override this, include 'grid' as a key\n in the `params` dictionary with the desired grid resolution.\n\n Similarly, the coil current may be overridden by including `current`\n in the `params` dictionary.\n\n Parameters\n ----------\n coords : array-like shape(n,3) or Grid\n coordinates to evaluate field at [R,phi,Z] or [x,y,z]\n params : dict, optional\n parameters to pass to curve\n basis : {\"rpz\", \"xyz\"}\n basis for input coordinates and returned magnetic field\n\n Returns\n -------\n field : ndarray, shape(n,3)\n magnetic field at specified points, in either rpz or xyz coordinates\n \"\"\"\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"rpz\":\n coords = rpz2xyz(coords)\n current = params.pop(\"current\", self.current)\n coil_coords = self.compute_coordinates(**params, basis=\"xyz\")\n B = biot_savart(coords, coil_coords, current)\n if basis == \"rpz\":\n B = xyz2rpz_vec(B, x=coords[:, 0], y=coords[:, 1])\n return B\n\n def __repr__(self):\n \"\"\"Get the string form of the object.\"\"\"\n return (\n type(self).__name__\n + \" at \"\n + str(hex(id(self)))\n + \" (name={}, current={})\".format(self.name, self.current)\n )\n\n\nclass FourierRZCoil(Coil, FourierRZCurve):\n \"\"\"Coil parameterized by fourier series for R,Z in terms of toroidal angle phi.\n\n Parameters\n ----------\n current : float\n current through coil, in Amperes\n R_n, Z_n: array-like\n fourier coefficients for R, Z\n modes_R : array-like\n mode numbers associated with R_n. If not given defaults to [-n:n]\n modes_Z : array-like\n mode numbers associated with Z_n, defaults to modes_R\n NFP : int\n number of field periods\n sym : bool\n whether to enforce stellarator symmetry\n grid : Grid\n default grid for computation\n name : str\n name for this coil\n \"\"\"\n\n _io_attrs_ = Coil._io_attrs_ + FourierRZCurve._io_attrs_\n\n def __init__(\n self,\n current=1,\n R_n=10,\n Z_n=0,\n modes_R=None,\n modes_Z=None,\n NFP=1,\n sym=\"auto\",\n grid=None,\n name=\"\",\n ):\n super().__init__(current, R_n, Z_n, modes_R, modes_Z, NFP, sym, grid, name)\n\n\nclass FourierXYZCoil(Coil, FourierXYZCurve):\n \"\"\"Coil parameterized by fourier series for X,Y,Z in terms of arbitrary angle phi.\n\n Parameters\n ----------\n current : float\n current through coil, in Amperes\n X_n, Y_n, Z_n: array-like\n fourier coefficients for X, Y, Z\n modes : array-like\n mode numbers associated with X_n etc.\n grid : Grid\n default grid or computation\n name : str\n name for this coil\n\n \"\"\"\n\n _io_attrs_ = Coil._io_attrs_ + FourierXYZCurve._io_attrs_\n\n def __init__(\n self,\n current=1,\n X_n=[0, 10, 2],\n Y_n=[0, 0, 0],\n Z_n=[-2, 0, 0],\n modes=None,\n grid=None,\n name=\"\",\n ):\n super().__init__(current, X_n, Y_n, Z_n, modes, grid, name)\n\n\nclass FourierPlanarCoil(Coil, FourierPlanarCurve):\n \"\"\"Coil that lines in a plane.\n\n Parameterized by a point (the center of the coil), a vector (normal to the plane),\n and a fourier series defining the radius from the center as a function of a polar\n angle theta.\n\n Parameters\n ----------\n current : float\n current through the coil, in Amperes\n center : array-like, shape(3,)\n x,y,z coordinates of center of coil\n normal : array-like, shape(3,)\n x,y,z components of normal vector to planar surface\n r_n : array-like\n fourier coefficients for radius from center as function of polar angle\n modes : array-like\n mode numbers associated with r_n\n grid : Grid\n default grid for computation\n name : str\n name for this coil\n\n \"\"\"\n\n _io_attrs_ = Coil._io_attrs_ + FourierPlanarCurve._io_attrs_\n\n def __init__(\n self,\n current=1,\n center=[10, 0, 0],\n normal=[0, 1, 0],\n r_n=2,\n modes=None,\n grid=None,\n name=\"\",\n ):\n super().__init__(current, center, normal, r_n, modes, grid, name)\n\n\nclass CoilSet(Coil, MutableSequence):\n \"\"\"Set of coils of different geometry.\n\n Parameters\n ----------\n coils : Coil or array-like of Coils\n collection of coils\n currents : float or array-like of float\n currents in each coil, or a single current shared by all coils in the set\n \"\"\"\n\n _io_attrs_ = Coil._io_attrs_ + [\"_coils\"]\n\n def __init__(self, *coils, name=\"\"):\n assert all([isinstance(coil, (Coil)) for coil in coils])\n self._coils = list(coils)\n self._name = str(name)\n\n @property\n def name(self):\n \"\"\"str: Name of the curve.\"\"\"\n return self._name\n\n @name.setter\n def name(self, new):\n self._name = str(new)\n\n @property\n def coils(self):\n \"\"\"list: coils in the coilset.\"\"\"\n return self._coils\n\n @property\n def current(self):\n \"\"\"list: currents in each coil.\"\"\"\n return [coil.current for coil in self.coils]\n\n @current.setter\n def current(self, new):\n if jnp.isscalar(new):\n new = [new] * len(self)\n for coil, cur in zip(self.coils, new):\n coil.current = cur\n\n @property\n def grid(self):\n \"\"\"Grid: nodes for computation.\"\"\"\n return self.coils[0].grid\n\n @grid.setter\n def grid(self, new):\n for coil in self.coils:\n coil.grid = new\n\n def compute_coordinates(self, *args, **kwargs):\n \"\"\"Compute real space coordinates using underlying curve method.\"\"\"\n return [coil.compute_coordinates(*args, **kwargs) for coil in self.coils]\n\n def compute_frenet_frame(self, *args, **kwargs):\n \"\"\"Compute Frenet frame using underlying curve method.\"\"\"\n return [coil.compute_frenet_frame(*args, **kwargs) for coil in self.coils]\n\n def compute_curvature(self, *args, **kwargs):\n \"\"\"Compute curvature using underlying curve method.\"\"\"\n return [coil.compute_curvature(*args, **kwargs) for coil in self.coils]\n\n def compute_torsion(self, *args, **kwargs):\n \"\"\"Compute torsion using underlying curve method.\"\"\"\n return [coil.compute_torsion(*args, **kwargs) for coil in self.coils]\n\n def compute_length(self, *args, **kwargs):\n \"\"\"Compute the length of the curve using underlying curve method.\"\"\"\n return [coil.compute_length(*args, **kwargs) for coil in self.coils]\n\n def translate(self, *args, **kwargs):\n \"\"\"Translate the coils along an axis.\"\"\"\n [coil.translate(*args, **kwargs) for coil in self.coils]\n\n def rotate(self, *args, **kwargs):\n \"\"\"Rotate the coils about an axis.\"\"\"\n [coil.rotate(*args, **kwargs) for coil in self.coils]\n\n def flip(self, *args, **kwargs):\n \"\"\"Flip the coils across a plane.\"\"\"\n [coil.flip(*args, **kwargs) for coil in self.coils]\n\n def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):\n \"\"\"Compute magnetic field at a set of points.\n\n Parameters\n ----------\n coords : array-like shape(n,3) or Grid\n coordinates to evaluate field at [R,phi,Z] or [x,y,z]\n params : dict or array-like of dict, optional\n parameters to pass to curves, either the same for all curves,\n or one for each member\n basis : {\"rpz\", \"xyz\"}\n basis for input coordinates and returned magnetic field\n\n Returns\n -------\n field : ndarray, shape(n,3)\n magnetic field at specified points, in either rpz or xyz coordinates\n \"\"\"\n if isinstance(params, dict):\n params = [params] * len(self)\n assert len(params) == len(self)\n B = 0\n for coil, par in zip(self.coils, params):\n B += coil.compute_magnetic_field(coords, par, basis)\n\n return B\n\n @classmethod\n def linspaced_angular(\n cls, coil, current=None, axis=[0, 0, 1], angle=2 * np.pi, n=10, endpoint=False\n ):\n \"\"\"Create a coil set by repeating a coil n times rotationally.\n\n Parameters\n ----------\n coil : Coil\n base coil to repeat\n current : float or array-like, shape(n,)\n current in (each) coil, overrides coil.current\n axis : array-like, shape(3,)\n axis to rotate about\n angle : float\n total rotational extend of coil set.\n n : int\n number of copies of original coil\n endpoint : bool\n whether to include a coil at final angle\n \"\"\"\n assert isinstance(coil, Coil)\n if current is None:\n current = coil.current\n currents = jnp.broadcast_to(current, (n,))\n coils = []\n phis = jnp.linspace(0, angle, n, endpoint=endpoint)\n for i in range(n):\n coili = coil.copy()\n coili.rotate(axis, angle=phis[i])\n coili.current = currents[i]\n coils.append(coili)\n return cls(*coils)\n\n @classmethod\n def linspaced_linear(\n cls, coil, current=None, displacement=[2, 0, 0], n=4, endpoint=False\n ):\n \"\"\"Create a coil group by repeating a coil n times in a straight line.\n\n Parameters\n ----------\n coil : Coil\n base coil to repeat\n current : float or array-like, shape(n,)\n current in (each) coil\n displacement : array-like, shape(3,)\n total displacement of the final coil\n n : int\n number of copies of original coil\n endpoint : bool\n whether to include a coil at final point\n \"\"\"\n assert isinstance(coil, Coil)\n if current is None:\n current = coil.current\n currents = jnp.broadcast_to(current, (n,))\n displacement = jnp.asarray(displacement)\n coils = []\n a = jnp.linspace(0, 1, n, endpoint=endpoint)\n for i in range(n):\n coili = coil.copy()\n coili.translate(a[i] * displacement)\n coili.current = currents[i]\n coils.append(coili)\n return cls(*coils)\n\n @classmethod\n def from_symmetry(cls, coils, NFP, sym=False):\n \"\"\"Create a coil group by reflection and symmetry.\n\n Given coils over one field period, repeat coils NFP times between\n 0 and 2pi to form full coil set.\n\n Or, give coils over 1/2 of a field period, repeat coils 2*NFP times\n between 0 and 2pi to form full stellarator symmetric coil set.\n\n Parameters\n ----------\n coils : Coil, CoilGroup, Coilset\n base coil or collection of coils to repeat\n NFP : int\n number of field periods\n sym : bool\n whether coils should be stellarator symmetric\n \"\"\"\n if not isinstance(coils, CoilSet):\n coils = CoilSet(coils)\n coilset = []\n if sym:\n # first reflect/flip original coilset\n # ie, given coils [1,2,3] at angles [0, pi/6, 2pi/6]\n # we want a new set like [1,2,3,flip(3),flip(2),flip(1)]\n # at [0, pi/6, 2pi/6, 3pi/6, 4pi/6, 5pi/6]\n flipped_coils = []\n normal = jnp.array([-jnp.sin(jnp.pi / NFP), jnp.cos(jnp.pi / NFP), 0])\n for coil in coils[::-1]:\n fcoil = coil.copy()\n fcoil.flip(normal)\n fcoil.flip([0, 0, 1])\n fcoil.current = -1 * coil.current\n flipped_coils.append(fcoil)\n coils = coils + flipped_coils\n for k in range(0, NFP):\n coil = coils.copy()\n coil.rotate(axis=[0, 0, 1], angle=2 * jnp.pi * k / NFP)\n coilset.append(coil)\n\n return cls(*coilset)\n\n def __add__(self, other):\n if isinstance(other, (CoilSet)):\n return CoilSet(*self.coils, *other.coils)\n if isinstance(other, (list, tuple)):\n return CoilSet(*self.coils, *other)\n raise TypeError\n\n # dunder methods required by MutableSequence\n def __getitem__(self, i):\n return self.coils[i]\n\n def __setitem__(self, i, new_item):\n if not isinstance(new_item, Coil):\n raise TypeError(\"Members of CoilSet must be of type Coil.\")\n self._coils[i] = new_item\n\n def __delitem__(self, i):\n del self._coils[i]\n\n def __len__(self):\n return len(self._coils)\n\n def insert(self, i, new_item):\n \"\"\"Insert a new coil into the coilset at position i.\"\"\"\n if not isinstance(new_item, Coil):\n raise TypeError(\"Members of CoilSet must be of type Coil.\")\n self._coils.insert(i, new_item)\n\n def __repr__(self):\n \"\"\"Get the string form of the object.\"\"\"\n return (\n type(self).__name__\n + \" at \"\n + str(hex(id(self)))\n + \" (name={}, with {} submembers)\".format(self.name, len(self))\n )\n","sub_path":"desc/coils.py","file_name":"coils.py","file_ext":"py","file_size_in_byte":14924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586542397","text":"from typing import *\nimport random\nimport copy\nfrom pathlib import Path\n\nimport yaml\nfrom seutil import LoggingUtils, IOUtils, BashUtils\n\nfrom csevo.Environment import Environment\nfrom csevo.Macros import Macros\nfrom csevo.ml.TACCRunner import TACCRunnerConsts\n\n\nclass BiLSTMRunner:\n\n logger = LoggingUtils.get_logger(__name__, LoggingUtils.DEBUG if Environment.is_debug else LoggingUtils.INFO)\n\n def __init__(self, work_dir: Path, year: int, eval_setting: str):\n self.year = year\n self.eval_setting = eval_setting\n self.work_dir: Path = work_dir / f\"{self.eval_setting}-{self.year}\"\n self.model_data_dir: Path = Macros.data_dir / \"models-data\" / \"Bi-LSTM\"\n self.base_config_file: Path = Macros.python_dir / \"configs\" / \"Bi-LSTM.yaml\"\n self.code_dir: Path = self.work_dir / \"code\"\n self.data_dir = self.work_dir / \"data\"\n\n return\n\n REPO_URL = \"https://github.com/JiyangZhang/OpenNMT-py.git\"\n REPO_SHA = \"60125c807d1cb18099a69dbfba699bcdf30560b1\"\n CONDA_ENV = \"csevo\"\n\n def prepare(self):\n self.prepare_code()\n self.prepare_data()\n self.prepare_configs_and_scripts(list(range(Macros.trials)))\n return\n\n def prepare_code(self):\n IOUtils.rm_dir(self.code_dir)\n IOUtils.mk_dir(self.code_dir.parent)\n with IOUtils.cd(self.code_dir.parent):\n BashUtils.run(f\"git clone {self.REPO_URL} {self.code_dir.name}\", expected_return_code=0)\n # end with\n\n with IOUtils.cd(self.code_dir):\n BashUtils.run(f\"git checkout {self.REPO_SHA}\", expected_return_code=0)\n # end with\n\n # copy eval code\n BashUtils.run(f\"cp {Macros.this_dir}/eval/eval_utils.py {self.code_dir}/\")\n return\n\n def prepare_data(self):\n data_prefix = f\"{self.eval_setting}-{self.year}\"\n IOUtils.rm_dir(self.data_dir)\n IOUtils.mk_dir(self.data_dir)\n\n # build dataset used by Open-NMT\n BashUtils.run(f\"cp {self.model_data_dir}/{data_prefix}-{Macros.train}/biLSTM* {self.data_dir}/\",\n expected_return_code=0)\n\n BashUtils.run(f\"cp {self.model_data_dir}/{data_prefix}-{Macros.test_common}/src-test.txt {self.data_dir}/src-{Macros.test_common}.txt\", expected_return_code=0)\n BashUtils.run(f\"cp {self.model_data_dir}/{data_prefix}-{Macros.test_common}/tgt-test.txt {self.data_dir}/tgt-{Macros.test_common}.txt\", expected_return_code=0)\n\n BashUtils.run(f\"cp {self.model_data_dir}/{data_prefix}-{Macros.test_standard}/src-test.txt {self.data_dir}/src-{Macros.test_standard}.txt\", expected_return_code=0)\n BashUtils.run(f\"cp {self.model_data_dir}/{data_prefix}-{Macros.test_standard}/tgt-test.txt {self.data_dir}/tgt-{Macros.test_standard}.txt\", expected_return_code=0)\n\n return\n\n def prepare_configs_and_scripts(self, trials: List[int]):\n with open(self.base_config_file, \"r\") as f:\n base_config = yaml.load(f)\n exp_dir = self.work_dir\n for trial in trials:\n seed = random.randint(0,9)\n trial_dir = exp_dir/f\"trial-{trial}\"\n IOUtils.mk_dir(trial_dir)\n\n config = copy.copy(base_config)\n config[\"data\"] = str(self.data_dir/\"biLSTM\")\n config[\"save_model\"] = str(trial_dir/\"bestLSTM\")\n config_file = trial_dir/\"config.yaml\"\n with open(config_file, \"w+\") as f:\n yaml.dump(config, f)\n\n train_script_file = trial_dir/\"train.sh\"\n train_script = f\"#!/bin/bash\\n\" \\\n f\"source {TACCRunnerConsts.conda_init_path[TACCRunnerConsts.get_cur_cluster()]}\\n\" \\\n f\"module load cuda/10.1 cudnn/7.6.2\\n\" \\\n f\"conda activate {self.CONDA_ENV}\\n\" \\\n f\"cd {self.code_dir}\\n\" \\\n f\"export MKL_SERVICE_FORCE_INTEL=1\\n\"\\\n f\"python3 train.py --config {config_file} --world_size 1 --gpu_ranks 0 -keep_checkpoint 1 \" \\\n f\"-seed {seed} &> {trial_dir}/train-log.txt\\n\"\n IOUtils.dump(train_script_file, train_script, IOUtils.Format.txt)\n BashUtils.run(f\"chmod +x {train_script_file}\", expected_return_code=0)\n\n for test_type in [Macros.test_common, Macros.test_standard]:\n\n test_script_file = trial_dir/f\"{test_type}.sh\"\n output_file = trial_dir / f\"output_{test_type}.txt\"\n test_script = f\"#!/bin/bash\\n\" \\\n f\"source {TACCRunnerConsts.conda_init_path[TACCRunnerConsts.get_cur_cluster()]}\\n\" \\\n f\"module load cuda/10.1 cudnn/7.6.2\\n\" \\\n f\"conda activate {self.CONDA_ENV}\\n\" \\\n f\"cd {self.code_dir}\\n\" \\\n f\"export MKL_SERVICE_FORCE_INTEL=1\\n\"\\\n f\"python3 translate.py \"\\\n f\"--model {trial_dir}/*.pt --output {output_file} --src {self.data_dir}/src-{test_type}.txt \"\\\n f\"&> {trial_dir}/{test_type}-log.txt\\n\" \\\n f\"python3 eval_utils.py \" \\\n f\"{self.data_dir}/tgt-{test_type}.txt {output_file} {trial_dir}/results_{test_type}.json\\n\"\n IOUtils.dump(test_script_file, test_script, IOUtils.Format.txt)\n BashUtils.run(f\"chmod +x {test_script_file}\", expected_return_code=0)\n\n # end for\n\n return\n","sub_path":"python/csevo/ml/BiLSTMRunner.py","file_name":"BiLSTMRunner.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"245692460","text":"import random\n\n\ndef random_value_printer():\n while True:\n # get the value from self.send(value)\n value = yield\n print(str.format('[~] random_value_printer() : {}', value))\n\n\ndef random_value_generator(c):\n while True:\n # create random value in selected range\n value = random.randint(0, 10)\n\n print(str.format(\n '[~] random_value_generator() generated random : {}', value\n ))\n\n # send the random value into the provided generator\n c.send(value)\n yield # equal yield None so in caller we will get the None\n\n\ndef main():\n # create unbound generator object\n rg = random_value_printer()\n\n # prime it to the first yield expression\n rg.send(None)\n\n # create yet another generator object and send into it the first generator\n # object\n pg = random_value_generator(rg)\n\n # iterate the second generator in for .. in loop 10 time\n for _, item in enumerate(range(10), start=1):\n print(str.format('[*] {} iteration', _))\n next(pg)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"core/builtin_features/generators/subgenerator/app3.py","file_name":"app3.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"640926860","text":"from django.test import TestCase\nfrom datetime import date\nfrom hello.forms import BioForm, RequestsPriorityForm, MessagingForm\nfrom django.forms.fields import Field\nfrom dateutil.relativedelta import relativedelta\nfrom hello.models import Requests\nfrom model_mommy import mommy\nfrom django.contrib.auth.models import User\n\n\nclass BioFormTest(TestCase):\n\n def test_bio_form_valid_data(self):\n \"\"\"form is valid when correct data is in form\"\"\"\n form = BioForm({\n 'name': 'Artem',\n 'last_name': 'Test_Lastname',\n 'birthdate': date(1992, 11, 12),\n 'email': 'Test@test.com',\n 'jabber': 'TestJ@ja.com',\n 'skype': 'Test_skype',\n 'other_contacts': 'Test_contacts',\n 'bio': 'Testbio',\n })\n\n self.assertTrue(form.is_valid())\n\n saved_form = form.save()\n\n self.assertEqual(saved_form.name, 'Artem')\n self.assertEqual(saved_form.last_name, 'Test_Lastname')\n self.assertEqual(saved_form.birthdate, date(1992, 11, 12))\n self.assertEqual(saved_form.email, 'Test@test.com')\n self.assertEqual(saved_form.jabber, 'TestJ@ja.com')\n self.assertEqual(saved_form.skype, 'Test_skype')\n self.assertEqual(saved_form.other_contacts, 'Test_contacts')\n self.assertEqual(saved_form.bio, 'Testbio')\n\n def test_bio_form_blank_data(self):\n \"\"\"form is not valid with not correct data\"\"\"\n form = BioForm({})\n self.assertFalse(form.is_valid())\n\n error_message = Field.default_error_messages['required'].capitalize()\n\n self.assertEqual(form.errors, {\n 'name': [error_message],\n 'last_name': [error_message],\n 'birthdate': [error_message],\n 'email': [error_message],\n })\n\n def test_bio_form_min_date(self):\n \"\"\"form is not valid with min date more than 100 years ago\"\"\"\n form = BioForm({\n 'name': 'Artem',\n 'last_name': 'Test_Lastname',\n 'birthdate': date.today() - relativedelta(years=101),\n 'email': 'Test@test.com',\n })\n\n self.assertFalse(form.is_valid())\n\n def test_bio_form_max_date(self):\n \"\"\"form is not valid with max date more than today\"\"\"\n form = BioForm({\n 'name': 'Artem',\n 'last_name': 'Test_Lastname',\n 'birthdate': date.today() + relativedelta(days=1),\n 'email': 'Test@test.com',\n })\n\n self.assertFalse(form.is_valid())\n\n\nclass RequestsPriorityFormTest(TestCase):\n\n def create_test_requests(self):\n requests_data = mommy.make(Requests, _quantity=10)\n requests_objects_user = [request.user for request in requests_data]\n return {'requests_data': requests_data,\n 'requests_objects_user': requests_objects_user}\n\n def test_requests_priority_form_valid_data(self):\n \"\"\"form is valid when correct data is in form\"\"\"\n self.create_test_requests()\n\n instance = Requests.objects.get(id=1)\n\n form = RequestsPriorityForm({\n 'priority': 2\n }, instance=instance)\n\n self.assertTrue(form.is_valid())\n\n saved_form = form.save()\n\n self.assertEqual(saved_form.priority, 2)\n\n def test_requests_priority_form_blank_data(self):\n \"\"\"form is not valid with not correct data\"\"\"\n form = RequestsPriorityForm({})\n\n self.assertFalse(form.is_valid())\n\n error_message = Field.default_error_messages['required'].capitalize()\n\n self.assertEqual(form.errors, {\n 'priority': [error_message],\n })\n\n\nclass MessagingFormTest(TestCase):\n\n def create_test_user(self):\n objects = mommy.make(User)\n return objects\n\n def test_messaging_form_valid_data(self):\n \"\"\"form is valid when correct data is in form\"\"\"\n self.create_test_user()\n\n message = 'GoodMorning'\n channel = '1_2'\n form = MessagingForm({\n 'text': message,\n 'channel': channel\n })\n\n self.assertTrue(form.is_valid())\n\n user = User.objects.get(pk=1)\n saved_form = form.save(commit=False)\n saved_form.sender = user\n saved_form.save()\n\n self.assertEqual(saved_form.text, message)\n self.assertEqual(saved_form.sender, user)\n\n def test_messaging_form_blank_data(self):\n \"\"\"form is not valid with not correct data\"\"\"\n form = MessagingForm({})\n\n self.assertFalse(form.is_valid())\n\n error_message = Field.default_error_messages['required'].capitalize()\n\n self.assertEqual(form.errors, {\n 'text': [error_message],\n 'channel': [error_message]\n })\n","sub_path":"apps/hello/tests/test_hello_forms.py","file_name":"test_hello_forms.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"496483509","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'polls' # namespacing: https://docs.djangoproject.com/en/2.0/intro/tutorial03/#namespacing-url-names \nurlpatterns = [\n #---url pattern without using \"generi views\"\n # ex: /polls/\n #path('', views.index, name='index'),\n # ex: /polls/5/\n #path('specifics//', views.detail, name='detail'),\n # ex: /polls/5/results/\n #path('/results/', views.results, name='results'),\n # ex: /polls/5/vote/\n #path('/vote/', views.vote, name='vote'),\n #URL for bootsrap testing\n #path('bootstrap', views.bt4, name='bt4'),\n\n #---this code Use \"generic views\"\n path('jinja/', views.Jinja2TestsView.as_view(), name='jinja'),\n path('', views.IndexView.as_view(), name='index'),\n path('/', views.DetailView.as_view(), name='detail'),\n path('/results/', views.ResultsView.as_view(), name='results'),\n path('/vote/', views.vote, name='vote'), \n path('base/',views.BaseView.as_view(),name='base'),\n path('register/', views.UserFormView.as_view(), name='register'),\n path('question/add', views.QuestionCreate.as_view(), name='question_add'),\n path('choice/add', views.ChoiceCreate.as_view(), name='choice_add'),\n path('author/add', views.AuthorCreate.as_view(), name='author-create'),\n\n]","sub_path":"polls/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"181692229","text":"import FWCore.ParameterSet.Config as cms\n\nimport random\nimport math\n\nfrom Configuration.StandardSequences.Eras import eras\nprocess = cms.Process('SIM',eras.Run2_2016)\n\n# import of standard configurations\nprocess.load(\"CondCore.CondDB.CondDB_cfi\")\nprocess.load('Configuration.StandardSequences.Services_cff')\nprocess.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')\nprocess.load('FWCore.MessageService.MessageLogger_cfi')\nprocess.load('Configuration.EventContent.EventContent_cff')\nprocess.load('Configuration.StandardSequences.MagneticField_cff')\nprocess.load('Configuration.StandardSequences.Generator_cff')\nprocess.load('IOMC.EventVertexGenerators.VtxSmearedRealistic25ns13TeV2016Collision_cfi')\nprocess.load('GeneratorInterface.Core.genFilterSummary_cff')\nprocess.load('Configuration.StandardSequences.SimIdeal_cff')\nprocess.load('Configuration.StandardSequences.EndOfProcess_cff')\nprocess.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')\n\nprocess.load('Configuration.Geometry.GeometryExtended2016_CTPPS_cff')\n\nprocess.RandomNumberGeneratorService.generator.initialSeed = cms.untracked.uint32(random.randint(0,900000000))\n\nnEvent_ = 1000\nprocess.maxEvents = cms.untracked.PSet(\n input = cms.untracked.int32(nEvent_)\n )\n\nprocess.source = cms.Source(\"EmptySource\")\n\"\"\"\nprocess.source = cms.Source(\"EmptySource\",\n #firstRun = cms.untracked.uint32(306572), # 2016H data\n #firstTime = cms.untracked.uint64(6487615523004612608) # this is needed because it lacks the MC tag, run based\n #firstRun = cms.untracked.uint32(273730), # 2016H data\n #firstTime = cms.untracked.uint64(6286859745043152896) # this is needed because it lacks the MC tag, run based\n firstRun = cms.untracked.uint32(282730), # 2016H data\n firstTime = cms.untracked.uint64(6339435345951588352) # this is needed because it lacks the MC tag, run based\n)\n\"\"\"\n\nprocess.options = cms.untracked.PSet()\n\n\nfrom Configuration.AlCa.GlobalTag import GlobalTag\nprocess.GlobalTag = GlobalTag(process.GlobalTag, 'auto:run2_mc', '')\n#process.GlobalTag = GlobalTag(process.GlobalTag, \"106X_dataRun2_v26\")\n\n# beam optics\n\n# generator\n\nphi_min = -math.pi\nphi_max = math.pi\nt_min = 0.\nt_max = 2.\nxi_min = 0.02\nxi_max = 0.2\necms = 13000.\n\nprocess.generator = cms.EDProducer(\"RandomtXiGunProducer\",\n PGunParameters = cms.PSet(\n PartID = cms.vint32(2212),\n MinPhi = cms.double(phi_min),\n MaxPhi = cms.double(phi_max),\n ECMS = cms.double(ecms),\n Mint = cms.double(t_min),\n Maxt = cms.double(t_max),\n MinXi = cms.double(xi_min),\n MaxXi = cms.double(xi_max)\n ),\n Verbosity = cms.untracked.int32(0),\n psethack = cms.string('single protons'),\n FireBackward = cms.bool(True),\n FireForward = cms.bool(True),\n firstRun = cms.untracked.uint32(1),\n )\n\n\nprocess.ProductionFilterSequence = cms.Sequence(process.generator)\n\n############\nprocess.o1 = cms.OutputModule(\"PoolOutputModule\",\n outputCommands = cms.untracked.vstring('keep *'),\n fileName = cms.untracked.string('step1_SIM2016.root')\n )\n\nprocess.generation_step = cms.Path(process.pgen)\nprocess.simulation_step = cms.Path(process.psim)\n\n\nprocess.genfiltersummary_step = cms.EndPath(process.genFilterSummary)\nprocess.outpath = cms.EndPath(process.o1)\nprocess.schedule = cms.Schedule(process.generation_step,process.genfiltersummary_step,process.simulation_step,process.outpath)\n\n# filter all path with the production filter sequence\nfor path in process.paths:\n getattr(process,path)._seq = process.ProductionFilterSequence * getattr(process,path)._seq\n\n","sub_path":"SimPPS/Configuration/test/pg_step1_GEN_SIM_2016.py","file_name":"pg_step1_GEN_SIM_2016.py","file_ext":"py","file_size_in_byte":3758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"294445434","text":"import pandas as pd\n\n\nclass Event(object):\n r\"\"\"\n Event base class\n \"\"\"\n def __init__(self, timestamp, ticker):\n self.timestamp = timestamp\n self.ticker = ticker\n\n @property\n def type(self):\n return self.__class__\n\n def __repr__(self):\n return '<' + self.__class__.__name__ + ': ' + self.__str__() + '>'\n\n def __str__(self):\n return 'Timestamp: %s, Ticker: %s' % (str(self.timestamp), str(self.ticker))\n\n\nclass MarketEvent(Event):\n r\"\"\"\n Market Event update the ticker.last_timestamp value\n \"\"\"\n def __init__(self, timestamp, ticker):\n super(MarketEvent, self).__init__(timestamp, ticker)\n ticker.last_timestamp = timestamp\n\n\nclass TickEvent(MarketEvent):\n r\"\"\"\n Tick Event is a Market event with data: volume_bid, bid, ask, volume_ask\n\n It updates the ticker.last_value value with (bid + ask) / 2\n\n Data_handler -> Strategy\n \"\"\"\n def __init__(self, timestamp, ticker, volume_bid, bid, ask, volume_ask):\n super(TickEvent, self).__init__(timestamp, ticker)\n self.volume_bid = volume_bid\n self.bid = bid\n self.ask = ask\n self.volume_ask = volume_ask\n self.ticker.last_value = (bid + ask) / 2\n\n def to_frame(self):\n df = pd.DataFrame([{'timestamp': self.timestamp, 'ticker': self.ticker.symbol,\n 'volume_bid': self.volume_bid, 'bid': self.bid,\n 'ask': self.ask, 'volume_ask': self.volume_ask}]\n ).set_index(['timestamp', 'ticker'])\n return df\n\n def __str__(self):\n return super(TickEvent, self).__str__() + ', Volume Bid: %s, Bid: %s, Ask: %s, Volume Ask: %s' % \\\n (str(self.volume_bid), str(self.bid), str(self.ask), str(self.volume_ask))\n\n\nclass BarEvent(MarketEvent):\n r\"\"\"\n Bar Event is a Market event with data:\n period, open_price, high_price, low_price, close_price,\n volume, and adj_close_price (None, if not povided)\n\n It updates the ticker.last_value value with adj_close_price or close_price\n\n Data_handler -> Strategy\n \"\"\"\n def __init__(self, timestamp, ticker, period, open_price, high_price, low_price, close_price,\n volume=None, adj_close_price=None):\n super(BarEvent, self).__init__(timestamp, ticker)\n self.period = period\n self.open_price = open_price\n self.high_price = high_price\n self.low_price = low_price\n self.close_price = close_price\n self.volume = volume\n self.adj_close_price = adj_close_price\n self.ticker.last_value = adj_close_price if adj_close_price else close_price\n\n def to_frame(self):\n df = pd.DataFrame([{'timestamp': self.timestamp, 'ticker': self.ticker.symbol,\n 'open': self.open_price, 'high': self.high_price, 'low': self.low_price,\n 'close': self.close_price, 'volume': self.volume, 'adj_close': self.adj_close_price}]\n ).set_index(['timestamp', 'ticker'])\n return df\n\n def __str__(self):\n return super(BarEvent, self).__str__() + \\\n ', Period: %s, Open: %s, High: %s, Low: %s, Close: %s, Adj Close: %s, Volume: %s' %\\\n (str(self.period), str(self.open_price), str(self.high_price), str(self.low_price),\n str(self.close_price), str(self.adj_close_price), str(self.volume))\n\n\nclass HistoricalDataEvent(MarketEvent):\n r\"\"\"\n Historical Data Event is a Market event with data as a panda dataframe\n It updates the ticker.last_timestamp value with the last value of the index\n Base class for HistoricalTickEvent and HistoricalBarEvent, do not instantiate!\n \"\"\"\n def __init__(self, ticker, data_frame, period=86400):\n super(HistoricalDataEvent, self).__init__(data_frame.index[-1][0], ticker)\n self.df = data_frame\n self.period = period\n\n @property\n def start(self):\n return self.df.index[0][0]\n\n @property\n def end(self):\n return self.df.index[-1][0]\n\n @property\n def length(self):\n return len(self.df.index)\n\n def upsert(self, market_event):\n \"\"\"update or insert a row in the data frame from a market event\"\"\"\n if self.ticker != market_event.ticker:\n raise TypeError\n df = market_event.to_frame()\n self.df = pd.concat([self.df[~self.df.index.isin(df.index)], df]).sort_index()\n\n\nclass HistoricalTickEvent(HistoricalDataEvent):\n r\"\"\"\n Historical Data Event is a Market event with data as a panda dataframe\n It updates the ticker.last_timestamp value with the last value of the index\n\n Data_handler -> Strategy\n \"\"\"\n def __init__(self, ticker, data_frame):\n super(HistoricalTickEvent, self).__init__(ticker, data_frame)\n self.ticker.last_value = (self.df.iloc[-1]['bid'] + self.df.iloc[-1]['ask']) / 2\n self.ticker.last_timestamp = self.df.index[-1][0]\n\n def __str__(self):\n s = super(HistoricalTickEvent, self).__str__() + '\\n'\n row = self.df.iloc[0]\n s += 'First row: Timestamp: %s, Volume Bid: %s, Bid: %s, Ask: %s, Volume Ask: %s \\n' %\\\n (str(self.df.index[0][0]), str(row['volume_bid']), str(row['bid']), str(row['ask']), str(row['volume_ask']))\n row = self.df.iloc[-1]\n s += 'Last row: Timestamp: %s, Volume Bid: %s, Bid: %s, Ask: %s, Volume Ask: %s' %\\\n (str(self.df.index[-1][0]), str(row['volume_bid']), str(row['bid']), str(row['ask']), str(row['volume_ask']))\n return s\n\n\nclass HistoricalBarEvent(HistoricalDataEvent):\n r\"\"\"\n Historical Data Event is a Market event with data as a panda dataframe\n It updates the ticker.last_timestamp value with the last value of the index\n\n Data_handler -> Strategy\n \"\"\"\n def __init__(self, ticker, data_frame):\n super(HistoricalBarEvent, self).__init__(ticker, data_frame)\n if self.df.iloc[-1]['adj_close']:\n self.ticker.last_value = self.df.iloc[-1]['adj_close']\n else:\n self.ticker.last_value = self.df.iloc[-1]['close']\n self.ticker.last_timestamp = self.df.index[-1][0]\n\n def __str__(self):\n s = super(HistoricalBarEvent, self).__str__() + '\\n'\n row = self.df.iloc[0]\n s += 'First row: Timestamp: %s, Open: %s, High: %s, Low: %s, Close: %s, Adj Close: %s, Volume: %s \\n' %\\\n (str(self.df.index[0][0]), str(row['open']), str(row['high']), str(row['low']),\n str(row['close']), str(row['adj_close']), str(row['volume']))\n row = self.df.iloc[-1]\n s += 'Last row: Timestamp: %s, Open: %s, High: %s, Low: %s, Close: %s, Adj Close: %s, Volume: %s' %\\\n (str(self.df.index[-1][0]), str(row['open']), str(row['high']), str(row['low']),\n str(row['close']), str(row['adj_close']), str(row['volume']))\n return s\n\n\nclass ActionEvent(Event):\n r\"\"\"\n Action base class\n It has a timestamp, a ticker and an action\n \"\"\"\n def __init__(self, timestamp, ticker, action):\n super(ActionEvent, self).__init__(timestamp, ticker)\n self.action = action\n\n def __str__(self):\n return super(ActionEvent, self).__str__() + ', Action: %s' % (str(self.action))\n\n\nclass SignalEvent(ActionEvent):\n r\"\"\"\n Signal Event is an action event with a suggested quantity\n Strategy -> Portfolio\n \"\"\"\n def __init__(self, timestamp, ticker, action, suggested_quantity=None):\n super(SignalEvent, self).__init__(timestamp, ticker, action)\n self.suggested_quantity = suggested_quantity\n\n def __str__(self):\n return super(SignalEvent, self).__str__() + ', Suggested Quantity: %s' % (str(self.suggested_quantity))\n\n\nclass OrderEvent(ActionEvent):\n r\"\"\"\n Order Event is an action event with a quantity:\n Portfolio -> Execution\n \"\"\"\n def __init__(self, timestamp, ticker, action, quantity):\n super(OrderEvent, self).__init__(timestamp, ticker, action)\n self.quantity = quantity\n\n def __str__(self):\n return super(OrderEvent, self).__str__() + ', Quantity: %s' % (str(self.quantity))\n\n\nclass FillEvent(OrderEvent):\n r\"\"\"\n Fill Event: Execution -> Portfolio\n \"\"\"\n def __init__(self, timestamp, ticker, action, quantity, price, exchange, commission):\n super(FillEvent, self).__init__(timestamp, ticker, action, quantity)\n self.price = price\n self.exchange = exchange\n self.commission = commission\n\n def __str__(self):\n return super(FillEvent, self).__str__() + ', Price: %s, Exchange: %s, Commission: %s' % \\\n (str(self.price), str(self.exchange), str(self.commission))\n","sub_path":"yatt/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":8686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"592926769","text":"class StackNode:\n '''\n Class of listNode is a data type which holds a value and a link to the next\n node in a list. This object is used in conjunction with linked list\n '''\n\n def __init__(self, value):\n self._value = value\n self._link = None\n\n def value(self):\n return self._value\n\n def link(self):\n return self._link\n\n def newlink(self, node):\n self._link = node\n\n def __str__(self):\n return self._value\n\n\nclass Stack:\n\n def __init__(self):\n self._length = 0\n self._top = None\n \n def top(self):\n return self._top\n\n def length(self):\n return self._length\n \n def empty(self):\n return self._top == None\n \n def push(self, value):\n newNode = StackNode(value)\n\n p = self._top\n newNode.newlink(p)\n\n self._top = newNode\n self._length += 1\n\n def pop(self):\n returnVal = self.peek()\n \n if self._top != None:\n\n if self._top.link() == None:\n self._top = None\n else:\n p = self._top.link()\n self._top = p\n\n self._length -= 1\n \n return returnVal\n\n def peek(self):\n if self._top == None:\n return None\n else:\n return self._top\n\n def __str__(self):\n s = \"\"\n n = self._top\n\n # iterate through list one item at a time until last item reached\n while (n != None):\n # check if last item in list has been reached\n # if so, do not place a comma after the item\n if n.link() != None:\n\n # check if item is type string. If so, place it in quotations\n if type(n.value()) == str:\n s += \"'\" + str(n.value()) + \"', \"\n else:\n s += str(n.value()) + \", \"\n else:\n s += str(n.value())\n\n # advance to next item in list\n n = n.link()\n\n return (\"[\" + s + \"]\")\n\n\n \nsomeStack = Stack() \nsomeStack.push('a')\nsomeStack.pop( )\nsomeStack.push('b')\nsomeStack.push('c')\nsomeStack.push('d')\nsomeStack.push('e')\nsomeStack.pop( )\nd = someStack.pop( )\n\nprint(d)\nprint(someStack)\n\n# b, d, c, a, e","sub_path":"Data Structures/Day 3/stack_class.py","file_name":"stack_class.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"411616961","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import\nimport os\nimport sys\n\n# add up one level dir into sys path\nsys.path.append(os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))))\nos.environ['DJANGO_SETTINGS_MODULE'] = 'luckyplatform.settings'\n\nimport logging\n\nfrom luckyapi.logic.crowdfunding import start_next_activity\n\nfrom luckycommon.db import goods as goods_db\nfrom luckycommon.db import activity as activity_db\n\n\n_LOGGER = logging.getLogger('worker')\n\n\nCARD_CONF = (\n ( # 10:00 AM \n {'tid': 506, 'goods_id': 520, 'stock': 500},\n {'tid': 460, 'goods_id': 474, 'stock': 300},\n {'tid': 461, 'goods_id': 475, 'stock': 250},\n {'tid': 462, 'goods_id': 476, 'stock': 200},\n {'tid': 463, 'goods_id': 477, 'stock': 150},\n ),\n ( # 15:00 AM\n {'tid': 506, 'goods_id': 520, 'stock': 500},\n {'tid': 460, 'goods_id': 474, 'stock': 300},\n {'tid': 461, 'goods_id': 475, 'stock': 250},\n {'tid': 462, 'goods_id': 476, 'stock': 200},\n {'tid': 463, 'goods_id': 477, 'stock': 150},\n ),\n ( # 20:00 AM\n {'tid': 506, 'goods_id': 520, 'stock': 500},\n {'tid': 460, 'goods_id': 474, 'stock': 300},\n {'tid': 461, 'goods_id': 475, 'stock': 250},\n {'tid': 462, 'goods_id': 476, 'stock': 200},\n {'tid': 463, 'goods_id': 477, 'stock': 150},\n ),\n)\n\n\ndef add_stock(goods_id, stock):\n goods = goods_db.get_goods(goods_id)\n total = goods.total + stock\n goods_db.upsert_goods({'total': total}, int(goods_id))\n _LOGGER.info('auto card add stock %s to %s', goods_id, total)\n\n\ndef start_activity(template_id):\n template = activity_db.get_template(template_id)\n if template.status != 1:\n query_dct = {\n 'status': 1,\n }\n need_start = activity_db.update_template(template_id, query_dct)\n _LOGGER.info('auto card update template %s', template_id)\n activity = start_next_activity(0, template_id)\n _LOGGER.info('auto card start activity, id:%s', activity.id)\n\n\ndef start(index):\n card_list = CARD_CONF[index]\n for card_conf in card_list:\n template_id = card_conf['tid']\n goods_id = card_conf['goods_id']\n stock = card_conf['stock']\n add_stock(goods_id, stock)\n start_activity(template_id)\n\n\nif __name__ == \"__main__\":\n index = int(sys.argv[1])\n start(index)\n","sub_path":"luckycommon/zero/start_card.py","file_name":"start_card.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"382298738","text":"import logging\nfrom typing import List\n\nfrom tqdm import tqdm\n\nfrom books.problem import Problem, Solution\nfrom books.solvers.scan_choice import scan_choice\n\nlogger = logging.getLogger(__name__)\n\n\nclass ScheduledLibrary:\n def __init__(self, library_id: int, starting_day: int):\n self.library_id = library_id\n self.starting_day = starting_day\n\n def starting_today(self, day):\n return day == self.starting_day\n\n\ndef solve(problem: Problem) -> Solution:\n\n # sort books by value\n for library in problem.libraries:\n library.sort_books_by_value(problem.books)\n\n solution = Solution()\n scheduled_library_ids: List[ScheduledLibrary] = []\n\n # how many times do we want to do a signup round\n signup_rounds = 4\n signup_round = 0\n\n for day in tqdm(range(problem.number_of_days)):\n logger.debug(\"Starting day %s\", day)\n if not scheduled_library_ids:\n new_signup_day = day\n signup_round += 1\n goal_day = signup_round * (problem.number_of_days / signup_rounds)\n logger.debug(\"Scheduling libraries until day %s\", goal_day)\n\n ranked_library_ids = rank_libraries(\n problem, solution.scanning_library_ids, problem.number_of_days - day\n )\n while new_signup_day < goal_day and ranked_library_ids:\n new_signup_id = ranked_library_ids.pop(0)\n new_signup_day += problem.libraries[new_signup_id].signup_days\n scheduled_library_ids.append(\n ScheduledLibrary(new_signup_id, new_signup_day)\n )\n logger.debug(\n \"Scheduled new library %s to start at day %s\",\n new_signup_id,\n new_signup_day,\n )\n\n if scheduled_library_ids and scheduled_library_ids[0].starting_today(day):\n new_scheduled_id = scheduled_library_ids.pop(0).library_id\n solution.queue_library(new_scheduled_id)\n logger.debug(\"Signed up new library %s\", new_signup_id)\n\n planned_books = scan_choice(problem, solution.scanning_library_ids)\n plan_books(problem, solution, planned_books)\n\n return solution\n\n\ndef plan_books(problem: Problem, solution: Solution, planned_books: dict):\n for place in solution.scanning_queue:\n book_ids_for_library = planned_books.get(place.library_id, [])\n place.book_ids.extend(book_ids_for_library)\n for book_id in book_ids_for_library:\n problem.remove_book(book_id)\n\n\ndef rank_libraries(\n problem: Problem, signed_libraries: List[int], days_left: int\n) -> List[int]:\n ranked_libraries = []\n\n for library_id, _ in enumerate(problem.libraries):\n if library_id not in signed_libraries:\n value = library_value(library_id, problem, signed_libraries, days_left)\n ranked_libraries.append((value, library_id))\n\n ranked_libraries.sort(reverse=True)\n return [library_id for _, library_id in ranked_libraries]\n\n\ndef library_value(\n library_id: int, problem: Problem, signed_libraries: List[int], days_left: int\n) -> float:\n library = problem.libraries[library_id]\n\n # how many books can it do\n book_capacity = (days_left - library.signup_days) * library.capacity\n\n # the greatest books should already be stored in front of the library list\n # divide by the number of libraries that are signed up with that book\n total_value = 0.0\n for book_id in library.book_ids[:book_capacity]:\n amount_of_libraries = 0\n for signed_library_id in signed_libraries:\n signed_library = problem.libraries[signed_library_id]\n if book_id in signed_library.book_ids:\n amount_of_libraries += 1\n total_value += float(problem.books[book_id].value / (amount_of_libraries + 1))\n\n logger.debug(\"Found value %s for library %s\", total_value, library_id)\n return total_value\n","sub_path":"books/solvers/bookworm.py","file_name":"bookworm.py","file_ext":"py","file_size_in_byte":3948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"570546259","text":"# Copyright 2018 Sebastien Alix \n# License LGPL-3.0 or later (http://www.gnu.org/licenses/lgpl).\n\"\"\"Provides the :class:`ProxyJSON` class for JSON-RPC requests.\"\"\"\nimport json\nimport random\nimport io\n\nimport aiohttp\n\n\ndef encode_data(data):\n try:\n return bytes(data, 'utf-8')\n except:\n return bytes(data)\n\n\ndef decode_data(data):\n return io.StringIO(data.decode('utf-8'))\n\n\nasync def ProxyJSON(host, port, timeout=120, ssl=False,\n deserialize=True, client_session=None):\n proxy = ProxyJSONAIO(host, port, timeout, ssl, deserialize)\n await proxy._init(client_session)\n return proxy\n\n\nasync def ProxyHTTP(host, port, timeout=120, ssl=False, client_session=None):\n proxy = ProxyHTTPAIO(host, port, timeout, ssl)\n await proxy._init(client_session)\n return proxy\n\n\nclass Proxy(object):\n \"\"\"Base class to implement a proxy to perform requests.\"\"\"\n def __init__(self, host, port, timeout=120, ssl=False):\n self._root_url = \"{http}{host}:{port}\".format(\n http=(ssl and \"https://\" or \"http://\"), host=host, port=port)\n self._timeout = timeout\n self._builder = URLBuilder(self)\n\n async def _init(self, client_session):\n if client_session is None:\n client_session = aiohttp.ClientSession()\n self._client_session = client_session\n\n def __getattr__(self, name):\n return getattr(self._builder, name)\n\n def __getitem__(self, url):\n return self._builder[url]\n\n\nclass ProxyJSONAIO(Proxy):\n \"\"\"The :class:`ProxyJSONAIO` class provides a dynamic access\n to all JSON methods.\n \"\"\"\n def __init__(self, host, port, timeout=120, ssl=False, deserialize=True):\n Proxy.__init__(self, host, port, timeout, ssl)\n self._deserialize = deserialize\n\n async def __call__(self, url, params):\n data = json.dumps({\n \"jsonrpc\": \"2.0\",\n \"method\": \"call\",\n \"params\": params,\n \"id\": random.randint(0, 1000000000),\n })\n if url.startswith('/'):\n url = url[1:]\n full_url = '/'.join([self._root_url, url])\n headers = {'Content-Type': 'application/json'}\n response = await self._client_session.post(\n full_url, data=encode_data(data), headers=headers)\n resp_data = await response.read()\n if not self._deserialize:\n return resp_data\n return json.load(decode_data(resp_data))\n\n\nclass ProxyHTTPAIO(Proxy):\n \"\"\"The :class:`ProxyHTTPAIO` class provides a dynamic access\n to all HTTP methods.\n \"\"\"\n async def __call__(self, url, data=None, headers=None):\n full_url = '/'.join([self._root_url, url]),\n encoded_data = encode_data(data) if data else None\n return await self._client_session.post(\n full_url, data=encoded_data,\n headers=headers, timeout=self._timeout)\n\n\nclass URLBuilder(object):\n \"\"\"Auto-builds an URL while getting its attributes.\n Used by the :class:`ProxyJSONAIO` and :class:`ProxyHTTPAIO` classes.\n \"\"\"\n def __init__(self, rpc, url=None):\n self._rpc = rpc\n self._url = url\n\n def __getattr__(self, path):\n new_url = self._url and '/'.join([self._url, path]) or path\n return URLBuilder(self._rpc, new_url)\n\n def __getitem__(self, path):\n if path and path[0] == '/':\n path = path[1:]\n if path and path[-1] == '/':\n path = path[:-1]\n return getattr(self, path)\n\n def __call__(self, **kwargs):\n return self._rpc(self._url, kwargs)\n\n def __str__(self):\n return self._url\n","sub_path":"odoorpc_aio/rpc/jsonrpclib.py","file_name":"jsonrpclib.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"521013097","text":"\"\"\"\nCopyright 2017 Deepgram\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport colorsys\nimport os\n\nimport itertools\nfrom collections import OrderedDict\n\nimport numpy\nimport tempfile\n\nfrom . import TrainingHook\nfrom ...loggers import PersistentLogger, Statistic\n\nimport logging\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport math\nlogger = logging.getLogger(__name__)\nfrom ...utils import DisableLogging, idx\n\n###############################################################################\nclass PlotWeightsHook(TrainingHook):\n\t\"\"\" Hook for creating plots of loss.\n\t\"\"\"\n\n\t###########################################################################\n\t@classmethod\n\tdef get_name(cls):\n\t\t\"\"\" Returns the name of the hook.\n\t\t\"\"\"\n\t\treturn 'plot_weights'\n\n\t###########################################################################\n\tdef __init__(self, plot_directory, weight_file, with_weights, plot_every_n_epochs, *args, **kwargs):\n\t\t\"\"\" Creates a new plot hook for plotting weights of layers\n\t\t\"\"\"\n\n\t\tsuper().__init__(*args, **kwargs)\n\n\t\tself.directory = plot_directory\n\t\tif not os.path.exists(self.directory):\n\t\t\tos.makedirs(self.directory)\n\n\t\tself.plot_every_n_epochs = plot_every_n_epochs\n\n\t\tif weight_file is None:\n\t\t\tself.weight_file = None\n\t\telse:\n\t\t\tself.weight_file = weight_file\n\n\t\tself.with_weights = with_weights\n\n\t\ttry:\n\t\t\timport matplotlib\t\t\t\t\t# pylint: disable=import-error\n\t\texcept:\n\t\t\tlogger.exception('Failed to import \"matplotlib\". Make sure it is '\n\t\t\t\t'installed, and if you have continued trouble, please check '\n\t\t\t\t'out our troubleshooting page: https://kur.deepgram.com/'\n\t\t\t\t'troubleshooting.html#plotting')\n\t\t\traise\n\n\t\t# Set the matplotlib backend to one of the known backends.\n\t\tmatplotlib.use('Agg')\n\n\t###########################################################################\n\tdef notify(self, status, log=None, info=None, model=None):\n\t\t\"\"\" Creates the plot.\n\t\t\"\"\"\n\n\t\tfrom matplotlib import pyplot as plt\t# pylint: disable=import-error\n\n\n\n\t\tif status not in (\n\t\t\t# the plotting is allowed only at end of epoch\n\t\t\tTrainingHook.EPOCH_END,\n\t\t):\n\n\t\t\treturn\n\n\n\t\tweight_path = None\n\t\ttempdir = tempfile.mkdtemp()\n\t\tweight_path = os.path.join(tempdir, 'current_epoch_model')\n\t\tmodel.save(weight_path)\n\n\t\tdef plot_weights(kernel_filename):\n\n\t\t\tfilename_cut_dir = kernel_filename[kernel_filename.find(\"dense\") :]\n\n\t\t\tw = idx.load(kernel_filename)\n\n\t\t\tw_min = np.min(w)\n\t\t\tw_max = np.max(w)\n\n\n\t\t\ts1, s2 = w.shape\n\t\t\tif s1 < s2:\n\t\t\t\tw = w.reshape((s2, s1))\n\n\t\t\tflattend_pixels, num_classes = w.shape\n\t\t\tnum_grids = math.ceil(math.sqrt(num_classes))\n\t\t\twidth_pixels = math.ceil(math.sqrt(flattend_pixels))\n\n\t\t\tfig, axes = plt.subplots(num_grids, num_grids)\n\t\t\tfig.subplots_adjust(hspace=0.3, wspace=0.3)\n\n\t\t\tfor i, ax in enumerate(axes.flat):\n\t\t\t\tif i s4:\n\t\t\t\tw = w.reshape((s3, s4, s2, s1))\n\n\t\t\tnum_filters = w.shape[3]\n\t\t\tnum_grids = math.ceil(math.sqrt(num_filters))\n\n\t\t\tfig, axes = plt.subplots(num_grids, num_grids)\n\t\t\tfor i, ax in enumerate(axes.flat):\n\t\t\t\tif i -1 and this_file.find(weight_keywords[1]) > -1:\n\n\t\t\t\t\t\tif weight_keywords[0].find(\"recurrent\") > -1 or weight_keywords[1].find(\"recurrent\") > -1:\n\t\t\t\t\t\t\tplot_rnn_weights(this_file)\n\n\t\t\t\t\t\tif weight_keywords[0].find(\"convol\") > -1 or weight_keywords[1].find(\"convol\") > -1:\n\t\t\t\t\t\t\tplot_conv_weights(this_file)\n\n\t\t\t\t\t\tif weight_keywords[0].find(\"dense\") > -1 or weight_keywords[1].find(\"dense\") > -1:\n\t\t\t\t\t\t\tplot_weights(this_file)\n","sub_path":"kur/model/hooks/plot_weights_hook.py","file_name":"plot_weights_hook.py","file_ext":"py","file_size_in_byte":6238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"384475050","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nPytest conftest file for RHV tests\n\"\"\"\n\nimport pytest\n\n\n@pytest.fixture(scope=\"session\", autouse=True)\ndef prepare_env(request):\n \"\"\"\n Since we need ART to initialized imports are done here\n \"\"\"\n import helpers\n import config\n from art.rhevm_api import resources\n from art.rhevm_api.tests_lib.low_level import hosts as ll_hosts\n\n def finalizer():\n \"\"\"\n Teardown after all tests\n \"\"\"\n # Check unfinished jobs after all tests\n helpers.get_unfinished_jobs_list()\n\n # Clean up all storage domains which are not in GE yaml\n helpers.storage_cleanup()\n request.addfinalizer(finalizer)\n\n \"\"\" Set unfinished jobs to FINISHED status before run tests \"\"\"\n helpers.clean_unfinished_jobs_on_engine()\n\n hosts_list = ll_hosts.get_host_list()\n assert hosts_list, \"No hosts in setup\"\n for host in hosts_list:\n config.HOSTS.append(host.name)\n config.HOSTS_IP.append(host.address)\n config.VDS_HOSTS.append(resources.VDS(host.address, config.HOSTS_PW))\n\n if ll_hosts.is_hosted_engine_configured(\n host_name=hosts_list[0].get_name()\n ):\n config.VM_NAME.append(config.HE_VM)\n config.SD_LIST.append(config.HE_STORAGE_DOMAIN)\n\n helpers.storage_cleanup()\n\n\n@pytest.fixture(autouse=True)\ndef append_captured_log_to_item_stdout(request, caplog):\n \"\"\"\n This fixture will add captured report sections for each item,\n which will be parsed by the junitxml pytest plugin, to produce\n the xml file.\n \"\"\"\n yield\n for when in ('setup', 'call', 'teardown'):\n records = caplog.get_records(when)\n for record in records:\n request.node.add_report_section(\n when, 'stdout',\n record.message.decode('utf-8', errors='replace') + '\\n')\n","sub_path":"art/tests/rhevmtests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"156336342","text":"from jinja2 import Environment, PackageLoader\n\nenv = Environment(loader=PackageLoader('templating', 'templates'))\n\ndef render_main_nav(main_nav):\n template = env.get_template('main_nav.html') \n return template.render(main_nav=main_nav)\n\n\ndef render_section_nav(section_nav):\n template = env.get_template('interior_nav.html')\n return template.render(section_nav=section_nav)\n\n\ndef render_main_template(content):\n template = env.get_template('interior_base.html')\n return template.render(\n title=content[\"title\"],\n breadcrumb=content[\"breadcrumb\"],\n body_classes=content[\"body_classes\"]\n )\n","sub_path":"template_render.py","file_name":"template_render.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"529971117","text":"import os\r\nimport cv2\r\nimport numpy as np\r\nimport random\r\nfrom munkres import Munkres\r\nfrom skimage import io, color\r\nfrom skimage import data, img_as_float\r\nfrom skimage.transform import resize\r\nfrom skimage.color import deltaE_cie76\r\nimport multiprocessing as mp\r\nimport math\r\n\r\n\r\nivdis=[]\r\nM=[]\r\n\r\ndef _dir_is_image(i):\r\n suffix=os.path.basename(i).split('.')[-1]\r\n return suffix in ['jpg','jpeg','png']\r\n\r\n\r\ndef _video_dynamic(path):\r\n print('######',path)\r\n cap = cv2.VideoCapture(path)\r\n if cap.isOpened() == False:\r\n print(\"######Error opening video stream or file\")\r\n ret, frame1 = cap.read()\r\n\r\n prvs = cv2.cvtColor(frame1,cv2.COLOR_BGR2GRAY)\r\n prvs = cv2.resize(prvs, (270, 480), interpolation=cv2.INTER_CUBIC)\r\n\r\n all_his = []\r\n while(1):\r\n ret, frame2 = cap.read()\r\n\r\n if not ret:\r\n break\r\n\r\n next = cv2.cvtColor(frame2,cv2.COLOR_BGR2GRAY)\r\n next = cv2.resize(next, (270, 480), interpolation=cv2.INTER_CUBIC)\r\n flow_next = cv2.calcOpticalFlowFarneback(prvs,next, None, 0.5, 3, 15, 3, 5, 1.2, 0)\r\n all_his.append(np.sqrt(np.sum(flow_next**2)))\r\n prvs = next\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n return np.sum(all_his)/len(all_his)\r\n\r\ndef cal(error):\r\n print(error)\r\n\r\n'''\r\ndef _image_image_dissim(i,j):\r\n img0=data.load(i)\r\n img1=data.load(j)\r\n img0 = resize(img0, (224, 224))\r\n img1 = resize(img1, (224, 224))\r\n img0 = img_as_float(img0)\r\n img1 = img_as_float(img1)\r\n similarity = ssim(img0, img1, data_range=max(img0.max(),img1.max()) - min(img0.min(),img1.min()),multichannel=True)\r\n return 1-similarity\r\n\r\n'''\r\ndef wtf(error):\r\n print(error)\r\n\r\ndef _image_image_dissim(i,j):\r\n\r\n img0 = io.imread(i)\r\n img1 = io.imread(j)\r\n img0 = resize(img0, (224, 224))\r\n img0 = color.rgb2lab(img0)\r\n img1 = resize(img1, (224, 224))\r\n img1 = color.rgb2lab(img1)\r\n diff = deltaE_cie76(img0, img1)\r\n res = sum(sum(diff**2))/(224*224)\r\n return res\r\n\r\ndef _image_video_dissim_par(img0,j):\r\n\r\n #img1 = io.imread(j)\r\n img1 = resize(j, (224, 224))\r\n img1 = color.rgb2lab(img1)\r\n diff = deltaE_cie76(img0, img1)\r\n res = sum(sum(diff**2))/(224*224)\r\n return res\r\n\r\n\r\ndef collect1(result):\r\n global ivdis\r\n ivdis.append(result)\r\n\r\n\r\ndef _video_video_dissim_par(img0,lis):\r\n k=[]\r\n for j in list(range(len(lis))):\r\n diff = deltaE_cie76(img0, lis[j])\r\n res = sum(sum(diff ** 2)) / (224 * 224)\r\n k.append(res)\r\n return k\r\n\r\n\r\ndef collect2(result):\r\n global M\r\n M.append(result)\r\n\r\n'''\r\ndef _image_video_dissim(i,j):\r\n global ivdis\r\n\r\n img0=io.imread(i)\r\n ldiff=100000000\r\n img0 = resize(img0, (224, 224))\r\n img0 = color.rgb2lab(img0)\r\n \r\n ivdis=[]\r\n\r\n pool = mp.Pool()\r\n \r\n # Step 2: parallel by row\r\n for frame in _video_frames(j):\r\n pool.apply_async( _image_video_dissim_par, args=(img0,frame), callback=collect1,error_callback=wtf)\r\n\r\n # Step 3: Don't forget to close\r\n pool.close()\r\n pool.join()\r\n\r\n return min(ivdis)\r\n '''\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef _image_video_dissim(i, j):\r\n img0 = io.imread(i)\r\n ldiff = math.inf\r\n img0 = resize(img0, (224, 224))\r\n img0 = color.rgb2lab(img0)\r\n\r\n for frame in _video_frames(j):\r\n img1 = resize(frame, (224, 224))\r\n img1 = color.rgb2lab(img1)\r\n\r\n diff = deltaE_cie76(img0, img1)\r\n res = sum(sum(diff ** 2)) / (224 * 224)\r\n\r\n if ldiff > res:\r\n ldiff = res\r\n\r\n return ldiff\r\n\r\n\r\n\r\n'''\r\ndef _image_video_dissim(i,j):\r\n # img0=data.load(i)\r\n # mst_sim=0\r\n # for frame in _video_frames(j):\r\n # img1= frame\r\n # img0 = resize(img0, (224, 224))\r\n # img1 = resize(img1, (224, 224))\r\n # img0 = img_as_float(img0)\r\n # img1 = img_as_float(img1)\r\n # similarity = ssim(img0, img1, data_range=max(img0.max(),img1.max()) - min(img0.min(),img1.min()),multichannel=True)\r\n # if mst_sim len(fr_list2):\r\n list1 = [fr_list1[i] for i in random.sample(range(len(fr_list1)), len(fr_list2))] # list to seq?\r\n fr_list1 = list1\r\n elif len(fr_list1) < len(fr_list2):\r\n list2 = [fr_list2[i] for i in random.sample(range(len(fr_list2)), len(fr_list1))] # list to seq?\r\n fr_list2 = list2\r\n\r\n length=len(fr_list1)\r\n M = []\r\n\r\n # tranform them\r\n for i in list(range(length)):\r\n img1=resize(fr_list1[i],(224,224))\r\n fr_list1[i]=color.rgb2lab(img1)\r\n img0 = resize(fr_list2[i], (224, 224))\r\n fr_list2[i] = color.rgb2lab(img0)\r\n\r\n\r\n pool = mp.Pool(8)\r\n\r\n # Step 2: parallel by row\r\n for i in list(range(length)):\r\n pool.apply_async(_video_video_dissim_par, args=(fr_list1[i], fr_list2), callback=collect2,error_callback=cal)\r\n # reordering of rows don't change the result of Hungarian algorithm\r\n pool.close()\r\n pool.join()\r\n\r\n m = Munkres()\r\n indexes = m.compute(M)\r\n total = 0\r\n for row, column in indexes:\r\n value = M[row][column]\r\n total += value\r\n\r\n return total/length\r\n\r\n'''\r\ndef _video_frames(video_dir):\r\n vidcap = cv2.VideoCapture(video_dir)\r\n \r\n success, image = vidcap.read()\r\n # cv2.cvtColor(image,image, cv2.COLOR_BGR2Lab)\r\n while success:\r\n yield image\r\n success, image = vidcap.read()\r\n'''\r\n\r\n\r\ndef _video_frames(video_dir):\r\n vidcap = cv2.VideoCapture(video_dir)\r\n\r\n success, image = vidcap.read()\r\n # cv2.cvtColor(image,image, cv2.COLOR_BGR2Lab)\r\n while success:\r\n su,im=vidcap.read()\r\n if(su):\r\n yield im\r\n success, image = vidcap.read()\r\n","sub_path":"videos/AIalgo/SeqAlgo/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"360866435","text":"import os, sys, getopt\nimport json\nimport datetime\nfrom shutil import rmtree\nimport numpy as np\nfrom dateutil.relativedelta import *\nfrom geotiff import *\nfrom lmoments3 import distr\nimport scipy.stats as stat\n\nimport matplotlib.pylab as plt\nimport matplotlib as mpl\n\ndef simple_plot_index(a,title=\"\", save_img=False,save_dir='.'):\n fig, ax = plt.subplots()\n #cmap='RdBu'\n cmap = mpl.colors.LinearSegmentedColormap.from_list(\"\", [\"red\",\"orange\",\"yellow\",\"white\",\"pink\",\"violet\", \"#0f0f0f\"])\n plt.title(title)\n im = plt.imshow(a, interpolation='none',cmap=cmap)\n plt.colormaps()\n # cbar = fig.colorbar(cax,ticks=[-1, 0, 1, 2, 10],orientation='vertical')\n cbar = fig.colorbar(im, orientation='vertical')\n #plt.clim(-1,1)\n # cbar.ax.set_yticklabels(['< -1', '0', 1, 2,'> 10'])# vertically oriented\n # colorbar\n #ax.format_coord = Formatter(im)\n if save_img:\n plt.savefig(os.path.join(save_dir,title+'.png'))\n else:\n plt.show()\n\n\ndef get_all_dates(foldAdd, nameIndex):\n # remove path separator from end of folder address\n if foldAdd[-1] == os.path.sep:\n foldAdd = foldAdd[:-1]\n\n # get list of all files\n allFiles = [x[2] for x in os.walk(foldAdd)]\n\n\n # get dates from file names\n count = len(nameIndex)\n result = []\n for str in allFiles:\n if len(str) > 0:\n for stf in str:\n if nameIndex in stf:\n date=stf.split('_')[1].split(\".\")[0]\n #date = stf[count+1:count+9]\n result.append(date)\n\n result.sort()\n return result\n\n\ndef get_date(dateStr):\n year = int(dateStr[0:4])\n month = int(dateStr[4:6])\n day = int(dateStr[6:8])\n return year, month, day\n\ndef diff_date_str(curDate, refDate):\n dateVal = datetime.datetime.strptime(curDate, \"%Y%m%d\").date()\n refDateVal = datetime.datetime.strptime(refDate, \"%Y%m%d\").date()\n return (dateVal - refDateVal).days\n\n\ndef convert_to_monthly_data (VARfold, VARprefix, indexFold, monthCount):\n\n dateList = get_all_dates(VARfold, VARprefix)\n dateList.sort()\n\n dateListMonths = [i[0:6] for i in dateList ]\n\n oDateFrom = datetime.datetime.strptime(dateList[0],\"%Y%m%d\")\n oDateTo = datetime.datetime.strptime(dateList[-1],\"%Y%m%d\")\n\n #in case month was not completed\n oDateTo = oDateTo.replace(day=1) + relativedelta(months=+1) + relativedelta(days=-1)\n #oDateTo = oDateTo.replace(day=1) + relativedelta(days=-1)\n\n print(oDateFrom.strftime(\"%Y%m%d\"))\n print(oDateTo.strftime(\"%Y%m%d\"))\n\n #oDateFrom = oDateFrom + relativedelta(months=monthCount)\n oDate = oDateFrom\n while (oDate <= oDateTo):\n\n if oDate < oDateFrom + relativedelta(months=monthCount-1):\n # remove the first monthCount from dateList\n #dateList = [s for s in dateList if oDate.strftime(\"%Y%m\") not in s]\n oDate = oDate +relativedelta(months=+1)\n continue\n year = oDate.strftime(\"%Y\")\n month = oDate.strftime(\"%m\")\n\n if oDate.strftime(\"%Y%m\") in dateListMonths:\n\n dateInMonth = []\n\n for m in range(monthCount):\n\n for date in dateList:\n\n oDateAggr=oDate + relativedelta(months = -m)\n\n year = oDateAggr.strftime(\"%Y\")\n month = oDateAggr.strftime(\"%m\")\n\n if year+month in date[0:6]:\n\n dateInMonth.append(date)\n\n if len(dateInMonth)>0:\n\n calculate_monthly_average(indexFold, VARfold, VARprefix, dateInMonth, oDate.strftime(\"%Y\"), oDate.strftime(\"%m\"), monthCount)\n\n #dateListMonths.append(oDate.strftime(\"%Y%m\"))\n else:\n print (\"Missing data for : \"+ oDate.strftime(\"%Y%m\"))\n\n oDate = oDate +relativedelta(months=+1)\n\n\ndef calculate_monthly_average(indexFold, VARfold, VARprefix, dateInMonth, year, month, monthCount):\n\n maskSnow , col, row, geoTrans, geoproj = readGeotiff (\"mask_bolivia.tif\")\n\n #maskSnow [maskSnow==0] =np.nan\n\n data3d = np.zeros((row, col, len (dateInMonth)))\n\n for i, key in enumerate(dateInMonth):\n #daFold = os.path.join(foldAdd, key[0:4], key[4:6], key[6:8])\n fileName = os.path.join(VARfold, key[0:4], key[4:6], key[6:8],VARprefix + \"_\" + key + \".tif\")\n data , col, row, geoTrans, geoproj = readGeotiff (fileName)\n #simple_plot_index(data,key)\n data3d [:,:,i] = data\n\n if VARprefix == \"MODIS-PET\":\n data_mean = np.nanmean(data3d,axis=2)/8\n BandName=\"Potential_Evap\"\n else:\n data3d[data3d<1]=0 ### to remove noise from PERSIANN\n data_mean = np.nanmean(data3d,axis=2)\n BandName=\"Prec_accumulation\"\n\n os.system(\"mkdir -p \"+os.path.join(indexFold,str(monthCount)+\"-Month-Files\"))\n\n outFileName= os.path.join(indexFold,str(monthCount)+\"-Month-Files\", VARprefix + \"_\" +year+month+\".tif\".format(year, month))\n\n #simple_plot_index(data_mean)\n print (outFileName)\n\n writeGeotiffSingleBand(outFileName, geoTrans, geoproj, data_mean,nodata=np.nan, BandName=BandName)\n\n\ndef create_statistics(mainFoldAdd, newFold, nameIndex, monthCount):\n dateList = get_all_dates(mainFoldAdd, nameIndex)\n dateList.sort()\n# newFold = os.path.join(mainFoldAdd, \"Normal PDF\")\n if not(os.path.isdir(newFold)):\n os.system (\"mkdir -p \"+newFold)\n\n for month in range(1, 13):\n fileNames = []\n for str in dateList:\n yy = int(str[0:4])\n mm = int(str[4:6])\n if mm == month:\n fileNames.append(nameIndex + \"_\" + str + \".tif\")\n compute_statistics (mainFoldAdd, fileNames, newFold, monthCount, month)\n\n\ndef compute_statistics(foldAdd, fileNames, newFold, monthCount, monthStart):\n print(\"Create statistics GeoTiff for {:d} months from month {:02d}\".\n format(monthCount, monthStart))\n\n mask, col, row, geoTrans, geoproj= readGeotiff(\"mask_bolivia.tif\")\n\n data3d = np.zeros((row,col,len(fileNames)))*np.nan\n\n for i, f in enumerate(fileNames):\n\n data , col, row, geoTrans, geoproj = readGeotiff (os.path.join(foldAdd,f))\n data3d [:,:,i]= data\n\n distr_param =np.zeros((row,col,3))*np.nan\n\n invalid_pixels = 0\n\n for i in range(row):\n for j in range(col):\n\n array = data3d[i,j,:]\n array = array[np.logical_not(np.isnan(array))]\n #fit = stat.genextreme.fit(data3d[i,j,:], loc=0, scale = 1) #loc initial guess\n if len(array) < 4:\n continue\n fit_dict = distr.gev.lmom_fit(data3d[i,j,:])\n fit = (fit_dict['c'],fit_dict['loc'],fit_dict['scale'])\n #print (fit)\n max_distance, p_value = stat.kstest(array,\"genextreme\",args=fit)\n #print(\"Kolmogorov-Smirnov test for goodness of fit: \"+str(round(p_value*100))+\"%, max distance: \"+str(max_distance))\n if p_value < 0.3:\n invalid_pixels += 1\n continue\n#\n distr_param [i,j,0] = fit[0]\n distr_param [i,j,1] = fit[1]\n distr_param [i,j,2] = fit[2]\n\n print (\"Invalid pixels: \" + str(round(invalid_pixels/(row*col)*100))+\"%\")\n\n name = \"Statistics-Prec-PET-{:02d}months-Month{:02d}.tif\".format(monthCount, monthStart)\n name = \"Statistics-Prec-PET-{:02d}months-Month{:02d}.tif\".format(monthCount, monthStart)\n fileStatsOut = os.path.join(newFold, name)\n\n writeGeotiff(fileStatsOut,geoTrans, geoproj,distr_param, nodata=np.nan, BandNames=list(fit_dict.keys()),globalDescr = \"SPEI_distr_param_c_loc_scale\")\n\n\n\ndef get_same_values(first, second):\n same = [f for f in first if f in second]\n return same\n\ndef convertPrec_to_monthly_data (PrecFold, PrecNameIndex, PrecmonthlyFold, nmonths):\n\n dateListPrec = get_all_dates(PrecFold, PrecNameIndex)\n\n oDateFrom = datetime.datetime.strptime(dateListPrec[0],\"%Y%m%d\")\n oDateTo = datetime.datetime.strptime(dateListPrec[-1],\"%Y%m%d\")\n\n #oDateFrom = oDateFrom + relativedelta(months=monthCount)\n oDate = oDateFrom\n while (oDate <= oDateTo):\n\n if oDate < oDateFrom + relativedelta(months=monthCount-1):\n # remove the first monthCount from dateList\n #dateList = [s for s in dateList if oDate.strftime(\"%Y%m\") not in s]\n oDate = oDate +relativedelta(months=+1)\n continue\n year = int(oDate.strftime(\"%Y\"))\n month = int(oDate.strftime(\"%m\"))\n\n if oDate.strftime(\"%Y%m\") in dateListMonths:\n convert_to_month_average_data(foldAdd, nameIndex, dateList, newFold, month, year, monthCount)\n #dateListMonths.append(oDate.strftime(\"%Y%m\"))\n else:\n print (\"Missing data for : \"+ oDate.strftime(\"%Y%m\"))\n\n oDate = oDate +relativedelta(months=+1)\n\n return dateListMonths\n\n return dateListPrec\n\ndef computePrec_PET (dateListSPEI, Prec_PETmonthlyFold , PETmonthlyFold, PrecmonthlyFold, PETNameIndex, PrecNameIndex, nmonths):\n\n for date in dateListSPEI:\n filePET = os.path.join (PETmonthlyFold, \"{:d}-Month-Files\".format(nmonths), PETNameIndex+ \"_\" + date + \".tif\")\n filePrec = os.path.join (PrecmonthlyFold, \"{:d}-Month-Files\".format(nmonths), PrecNameIndex+ \"_\" + date + \".tif\")\n\n PET , col, row, geoTrans, geoproj = readGeotiff (filePET)\n\n Prec , col, row, geoTrans, geoproj = readGeotiff (filePrec)\n\n Prec_PET = Prec - PET\n\n months_file_dir = os.path.join (Prec_PETmonthlyFold, \"{:d}-Month-Files\".format(nmonths))\n\n os.system(\"mkdir -p \"+months_file_dir)\n\n filePrec_PET = os.path.join (months_file_dir, \"Prec-PET\"+ \"_\" + date + \".tif\")\n\n writeGeotiffSingleBand (filePrec_PET, geoTrans, geoproj, Prec_PET,nodata=np.nan, BandName=\"Prec_accumulation\")\n print(\"saving: \"+filePrec_PET)\n\n return\n\n\ndef main(argv):\n with open('SPEI_config.json') as jf:\n params = json.load(jf)\n\n # get FAPARFold tif files\n PETFold = params[\"PET_folder\"]\n if PETFold[-1] == os.path.sep:\n PETFold = PETFold[:-1]\n\n PrecFold = params[\"Prec_folder\"]\n if PrecFold[-1] == os.path.sep:\n PrecFold = PrecFold[:-1]\n\n IndexFold = params[\"Index_folder\"]\n if PrecFold[-1] == os.path.sep:\n PrecFold = PrecFold[:-1]\n\n SPEINameIndex = params[\"Index_prefix\"]\n PETNameIndex = params[\"PET_prefix\"]\n PrecNameIndex = params[\"Prec_prefix\"]\n\n PrecPETNameIndex = params[\"Prec-PET_prefix\"]\n\n statsFold = params[\"Stats_folder\"]\n\n aggMonths = params[\"Agg_months\"]\n\n stats = True\n\n opts, a1Args = getopt.getopt(argv,\"hn\",[\"help\",\"nostats\"])\n\n for opt, arg in opts:\n if opt in (\"-n\", \"--nostats\"):\n stats = False\n\n PETmonthlyFold = os.path.join(IndexFold,PETNameIndex+\"-Monthly-Files\")\n PrecmonthlyFold = os.path.join(IndexFold,PrecNameIndex+\"-Monthly-Files\")\n PrecPETmonthlyFold = os.path.join (IndexFold,PrecPETNameIndex+\"-Monthly-Files\")\n\n for nmonths in aggMonths:\n\n # monthly averages\n\n convert_to_monthly_data(PETFold, PETNameIndex, PETmonthlyFold, nmonths)\n\n convert_to_monthly_data(PrecFold, PrecNameIndex, PrecmonthlyFold, nmonths)\n\n dateListPET = get_all_dates(os.path.join(PETmonthlyFold,str(nmonths)+\"-Month-Files\"),PETNameIndex)\n\n dateListPrec = get_all_dates(os.path.join(PrecmonthlyFold,str(nmonths)+\"-Month-Files\"),PrecNameIndex)\n\n dateListSPEI = get_same_values(dateListPET,dateListPrec)\n\n computePrec_PET (dateListSPEI, PrecPETmonthlyFold , PETmonthlyFold, PrecmonthlyFold, PETNameIndex, PrecNameIndex, nmonths)\n\n monthsFold = os.path.join(PrecPETmonthlyFold, \"{:d}-Month-Files\".format(nmonths))\n\n if stats:\n\n \tcreate_statistics(monthsFold, statsFold, PrecPETNameIndex, nmonths)\n\nif __name__ == '__main__':\n argv=sys.argv[1:]\n main(argv)\nelse:\n pass\n","sub_path":"indexes/SPEI/SPEI_stat_base.py","file_name":"SPEI_stat_base.py","file_ext":"py","file_size_in_byte":11953,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"88409414","text":"# source : opencv-python-tutroals\r\n# modify : jogilsang\r\n\r\nimport cv2\r\nimport pyautogui\r\nimport os\r\nimport datetime\r\nimport numpy as np\r\n\r\nos.chdir('C:\\\\Users\\\\user\\\\Desktop\\\\opencv')\r\n# C:\\opencv-master\\opencv-master\\data\\haarcascades\r\nface_cascade = cv2.CascadeClassifier('C:\\\\opencv-master\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_frontalface_default.xml')\r\neye_cascade = cv2.CascadeClassifier('C:\\\\opencv-master\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_eye.xml')\r\n# fullbody_cascade = cv2.CascadeClassifier('C:\\\\opencv-master\\opencv-master\\\\data\\\\haarcascades\\\\haarcascade_smile.xml')\r\n\r\nimg = cv2.imread('sul.jpg')\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nfaces = face_cascade.detectMultiScale(gray, 1.3, 5)\r\n# bodys = fullbody_cascade.detectMultiScale(gray, 1.3, 5)\r\n# faces = face_cascade.detectMultiScale(gray, 1.3)\r\n# faces = face_cascade.detectMultiScale(gray)\r\nfor (x,y,w,h) in faces:\r\n img = cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\r\n roi_gray = gray[y:y+h, x:x+w]\r\n roi_color = img[y:y+h, x:x+w]\r\n eyes = eye_cascade.detectMultiScale(roi_gray)\r\n for (ex,ey,ew,eh) in eyes:\r\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\r\n\r\n\r\ncv2.imshow('img',img)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"opencv-tutorial-study-master/taskCode/facedetection.py","file_name":"facedetection.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"37877585","text":"#!/usr/bin/env python\n\n\nimport sys\nimport argparse\nfrom pdftools import pdf_add\nfrom pdftools.parseutil import parentparser\n\n\ndef process_arguments(args):\n parser = argparse.ArgumentParser(\n parents=[parentparser],\n description=\"Add pages from a source file to an output PDF file. \"\n \"If the output file does not exist a new file will be \"\n \"created.\")\n\n # destination\n parser.add_argument('dest',\n type=str,\n help='destination pdf file')\n\n # source\n parser.add_argument('source',\n type=str,\n default=None,\n help='pdf source file')\n\n # pages\n parser.add_argument('-p',\n '--pages',\n nargs='+',\n help='list of pages to add to the output. '\n 'Examples: 5 1-9 1- -9')\n # output\n parser.add_argument('-o',\n '--output',\n type=str,\n default=None,\n help='name of the output file, if None the destination'\n 'file will be overwritten')\n\n return parser.parse_args(args)\n\n\nif __name__ == \"__main__\":\n args = process_arguments(sys.argv[1:])\n pdf_add(args.dest, args.source, args.pages, args.output)\n","sub_path":"pdfadd.py","file_name":"pdfadd.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"557580824","text":"#-*- coding:utf-8 -*-\n__author__ = \"dongd\"\n__datetime__ = r\"2018\\2\\26 0026\"\n__software__ = \"PyCharm\"\n\nimport unittest\n\nfrom src.commonClass.httpprotocol import MyHttp\n\nfrom src.commonClass.log import logger\n\n\nclass LoginCase(unittest.TestCase):\n def setUp(self):\n logger.info(\"login测试开始。。。\")\n self.my = MyHttp(\"http\",\"yx_jg_web_server\")\n self.url = \"/smartcommunity/login/toLogin\"\n\n def tearDown(self):\n logger.info(\"login测试结束。。。\")\n\n def test_01(self):\n data = {\n \"loginName\":\"admin\",\n \"password\":\"Adminmint\"\n }\n result = self.my.post(url=self.url,data=data)\n json = result.json()\n errorCode =json[\"errorCode\"]\n print(errorCode)\n self.assertEqual(errorCode,0)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"src/testCases/yx_jg_web/test_logincase.py","file_name":"test_logincase.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"594177170","text":"### sinカーブのフィッティングを行う\n### \n\nimport numpy as np\nfrom numpy.random import seed\nimport matplotlib.pyplot as plt\n\nseed(7)#7, 8\n\n# 元となるデータからノイズを加えサンプリング\nx_max, x_min = 8, 0\nn_samples = 6\nx = (x_max - x_min) * np.random.rand(n_samples) + x_min\nmu, sigma = 0, 0.25\nepsiron = np.random.normal(mu, sigma**2, len(x))\ny = np.sin(x) + epsiron\n\n\n# 表示\nplt.scatter(x, y)\nname_fig = \"sample_{}\".format(n_samples)\nplt.title(name_fig)\nplt.xlim([-2, 8])\nplt.ylim([-2, +2])\n\n# 保存\nnp.savez(\"samples.npz\", x, y)\npath_fig = \"./img/sample_{}.png\".format(n_samples)\nplt.savefig(path_fig)\n\n\n\n","sub_path":"StatisticalLearning/Linearmodels/Regularization/generate_data.py","file_name":"generate_data.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264168460","text":"\"\"\"\r\n26/01/2020\r\npremiere version de Tekisuto litteralement texte.\r\ndeve : Mahli,Cloe\r\nconcu initialement pour l'architecture general c'est fonction est une fonction\r\nde gestion des ressources fichiers ttf, qui ne prend que tres peux sur le cpu et le\r\ngpu.\r\nElle a pour but d'afficher du texte progressivement a l'aide d'un fichier ttf.\r\n\"\"\"\r\ndef Fpx2601Tekisuto(string,name,integrated,scale):\r\n text = ''\r\n for i in range(len(string)):\r\n text += string[i]\r\n choose_font = pygame.font.Font(str(name)+\".ttf\",scale)\r\n text_surface = choose_font.render(text, True, (0,0,0))\r\n Wind.blit(text_surface,(0,0+integrated))\r\n pygame.display.flip()\r\n pygame.time.wait(100)\r\n\"\"\"\r\n26/01/2020\r\npremiere version de TekisutoArial version de sa fonction mere en arial.\r\ndeve : Mahli,Cloe\r\nconcu initialement pour l'architecture, qui ne prend que tres peux sur le cpu et le\r\ngpu.\r\nElle a pour but d'afficher du texte progressivement a l'aide d'un fichier ttf.\r\n\"\"\"\r\ndef Fpx2601TekisutoArial(string,integrated,scale):\r\n text = ''\r\n for i in range(len(string)):\r\n text += string[i]\r\n arial_font = pygame.font.Font(\"GeneralsRessources/arial.ttf\",scale)\r\n text_surface = arial_font.render(text, True, (0,0,0))\r\n Wind.blit(text_surface,(0,0+integrated))\r\n pygame.display.flip()\r\n pygame.time.wait(100)\r\n","sub_path":"F1204Tekisuto10.py","file_name":"F1204Tekisuto10.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"581012904","text":"\"\"\"B - Addition and Multiplication\nhttps://atcoder.jp/contests/abc076/tasks/abc076_b\nN\nK\n\n>>> main(4, 3)\n10\n>>> main(10, 10)\n76\n\n\"\"\"\n\n\ndef main(n, k):\n ans = 2\n\n for _ in range(n - 1):\n if (ans * 2) < ans + k:\n ans *= 2 # A\n else:\n ans += k # B\n\n print(ans)\n\n\nif __name__ == \"__main__\":\n n = int(input())\n k = int(input())\n\n main(n, k)\n","sub_path":"abc/abc076/abc076_b.py","file_name":"abc076_b.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"495717372","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom pymongo import MongoClient\n\nclient = MongoClient('localhost', 27017)\ndb = client.dbsparta\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36'}\ndata = requests.get('https://www.genie.co.kr/chart/top200?ditc=D&rtm=N&ymd=20190908', headers=headers)\n\n# HTML을 BeautifulSoup이라는 라이브러리를 활용해 검색하기 용이한 상태로 만듦\nsoup = BeautifulSoup(data.text, 'html.parser')\n\n# select를 이용해서, tbody들을 불러오기\nsongs = soup.select('#body-content > div.newest-list > div > table > tbody > tr')\n\n# movies (tr들) 의 반복문을 돌리기\n\nfor song in songs:\n # movie 안에 a 가 있으면,\n number_tag = song.select_one('tr.list > td.number')\n if number_tag is not None:\n number = number_tag.text\n numbers = re.findall(\"\\d+\",number)\n title_tag = song.select_one('tr.list > td.info > a.title')\n if title_tag is not None:\n title = title_tag.text\n artist_tag = song.select_one('tr.list > td.info > a.artist')\n if artist_tag is not None:\n artist = artist_tag.text\n print((numbers[0]).strip(), title, artist)\n\n","sub_path":"3w_HW1.py","file_name":"3w_HW1.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"180227428","text":"# https://django-authtools.readthedocs.io/en/latest/how-to/invitation-email.html\n# allows a manual user creation by an admin, without setting a password\n\nfrom django.contrib import admin\nfrom .models import DoubleCountingAgreement, DoubleCountingDocFile, DoubleCountingSourcing, DoubleCountingProduction\n\n\n@admin.register(DoubleCountingAgreement)\nclass DoubleCountingAgreementAdmin(admin.ModelAdmin):\n list_display = (\n \"producer\",\n \"production_site\",\n \"period_start\",\n \"period_end\",\n \"dgec_validated\",\n \"dgddi_validated\",\n \"dgpe_validated\",\n )\n list_filter = (\n \"producer\",\n \"period_start\",\n )\n\n\n@admin.register(DoubleCountingSourcing)\nclass DoubleCountingSourcingAdmin(admin.ModelAdmin):\n list_display = (\n \"dca\",\n \"year\",\n \"feedstock\",\n \"origin_country\",\n \"metric_tonnes\",\n )\n list_filter = (\"year\", \"feedstock\", \"origin_country\")\n\n\n@admin.register(DoubleCountingProduction)\nclass DoubleCountingProductionAdmin(admin.ModelAdmin):\n list_display = (\n \"dca\",\n \"year\",\n \"biofuel\",\n \"feedstock\",\n \"max_production_capacity\",\n \"estimated_production\",\n \"requested_quota\",\n \"approved_quota\",\n )\n list_filter = (\"year\", \"biofuel\", \"feedstock\")\n\n\n@admin.register(DoubleCountingDocFile)\nclass DoubleCountingDocFileAdmin(admin.ModelAdmin):\n list_display = (\"dca\", \"url\", \"file_name\")\n","sub_path":"web/doublecount/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"164561124","text":"# The MIT License\n#\n# Copyright 2019 Chris Piker\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell \n# copies of the Software, and to permit persons to whom the Software is \n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in \n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, \n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"Looking up data sources and sub catalogs of das catalogs\"\"\"\n\nfrom . node import *\nfrom . source import *\nfrom . streamsrc import *\n\ntry:\n basestring\nexcept NameError:\n basestring = str\n\n# ########################################################################### #\nclass Catalog(Node):\n\t\"\"\"\n\tCatalog objects. May contain other Catalogs, Data Source Collections or\n\tSpaseRec objects.\n\t\"\"\"\n\tdef __init__(self, dDef, sSubKey, bStub, bGlobal):\n\t\tsuper(Catalog, self).__init__(dDef, bStub, bGlobal)\n\t\tself.sSubKey = sSubKey\n\n\t\t# If I'm a fully realized node then I'll need to initialize minimal child\n\t\t# objects\n\t\tself.subs = None\n\t\tif not self.bStub:\n\t\t\tself._load_stubs()\n\n\tdef _load_stubs(self):\n\t\t# Load stub versions of sub items\n\n\t\tself.subs = {}\n\t\tfor sKey in self.props[self.sSubKey]:\n\n\t\t\tdDef = self.props[self.sSubKey][sKey]\n\n\t\t\t# Assign a name based on how this appeared in the subitems\n\t\t\tsSep = '/'\n\t\t\tif 'separator' in self.props:\n\t\t\t\tif self.props['separator'] == None: sSep = ''\n\t\t\t\telse: sSep = self.props['separator']\n\n\t\t\tdDef['_path'] = \"%s%s%s\"%(self.props['_path'], sSep, sKey)\n\n\t\t\tif 'type' not in dDef:\n\t\t\t\traise CatalogError(\"'type' missing in sources sub-item %s from %s\"%(\n\t\t\t\t\t\t \tsKey, self.url))\n\n\t\t\tif dDef['type'] == 'HttpStreamSrc':\n\t\t\t\tself.subs[sKey] = HttpStreamSrc(dDef, STUB, self.bGlobal)\n\n\t\t\t# Factory time:\n\t\t\tif dDef['type'] == 'Catalog':\n\t\t\t\tself.subs[sKey] = Catalog(dDef, 'catalog', STUB, self.bGlobal)\n\t\t\telif dDef['type'] == 'HttpStreamSrc':\n\t\t\t\tself.subs[sKey] = HttpStreamSrc(dDef, STUB, self.bGlobal)\n\t\t\telif dDef['type'] == 'Collection':\n\t\t\t\tself.subs[sKey] = Collection(dDef, STUB, self.bGlobal)\n\t\t\telif dDef['type'] == 'FileAggSrc':\n\t\t\t\tself.subs[sKey] = FileAggSrc(dDef, STUB, self.bGlobal)\n\n\t\t\t# TODO: Add other source types here...\n\n\t\t\telse:\n\t\t\t\traise NotImplementedError(\n\t\t\t\t\t\"Handling of sub-source type %s has not been implemented\"%dDef['type']\n\t\t\t\t)\n\n\tdef load(self):\n\t\tsuper(Catalog, self).load()\n\t\tself._load_stubs()\n\n\n\t# Providing the dictionary interface, take from page:\n\t# https://docs.python.org/3/reference/datamodel.html?emulating-container-types#emulating-container-types\n\n\tdef __len__(self):\n\t\tif self.bStub: self.load()\n\t\treturn len(self.subs)\n\n\tdef __getitem__(self, key):\n\t\tif not isinstance(key, basestring):\n\t\t\traise TypeError(\"Expected a string type for sub item key\")\n\t\tif self.bStub: self.load()\n\t\treturn self.subs[key]\n\n\n\t# Not settable, skipping __setitem__, __delitem__\n\tdef __iter__(self):\n\t\tif self.bStub: self.load()\n\t\treturn self.subs.__iter__()\n\n\tdef __contains__(self,key):\n\t\tif not isinstance(key, basestring):\n\t\t\traise TypeError(\"Expected a string type for sub item key\")\n\t\tif self.bStub:\tself.load()\n\t\treturn self.subs.__contains__(key)\n\n\tdef keys(self):\n\t\tif self.bStub: self.load()\n\t\treturn self.subs.keys()\n\n# ########################################################################### #\nclass Collection(Catalog):\n\t\"\"\"\n\tData Source Catalog objects. May only contain data source definitions\n\t\"\"\"\n\n\tdef __init__(self, dDef, bStub, bGlobal):\n\n\t\tif ('type' not in dDef) or (dDef['type'] != 'Collection'):\n\t\t\traise CatalogError(\"PyClass, data mismatch, expected 'type' to be \"+\\\n\t\t\t \"'Collection' not '%s'\"%dDef)\n\n\t\tsuper(Collection, self).__init__(dDef, 'sources', bStub, bGlobal)\n\n\tdef _load_stubs(self):\n\t\t# Overload _load_stub so that sub-items can't be catalog types\n\t\tself.subs = {}\n\t\tfor sKey in self.props[self.sSubKey]:\n\n\t\t\tdDef = self.props[self.sSubKey][sKey]\n\t\t\tif 'type' not in dDef:\n\t\t\t\traise CatalogError(\"'type' missing in sources sub-item %s from %s\"%(\n\t\t\t\t\t\t \tsKey, self.url))\n\n\t\t\t# Assign a name based on how this appeared in the subitems\n\t\t\tsSep = '/'\n\t\t\tif 'separator' in self.props:\n\t\t\t\tif self.props['separator'] == None: sSep = ''\n\t\t\t\telse: sSep = self.props['separator']\n\n\t\t\tdDef['_path'] = \"%s%s%s\"%(self.props['_path'], sSep, sKey)\n\n\t\t\tif dDef['type'] == 'HttpStreamSrc':\n\t\t\t\tself.subs[sKey] = HttpStreamSrc(dDef, STUB, self.bGlobal)\n\t\t\telif dDef['type'] == 'FileAggSrc':\n\t\t\t\tself.subs[sKey] = FileAggSrc(dDef, STUB, self.bGlobal)\n\t\t\telse:\n\t\t\t\traise NotImplementedError(\n\t\t\t\t\t\"Illegal sub item type %s in container from %s\"%(dDef['type'],\n\t\t\t\t\tself.props['_url'])\n\t\t\t\t)\n\n\tdef source(self, sPurpose=\"primary\", sPrefType=\"HttpStreamSrc\", sPrefConv=\"das2\"):\n\t\t\"\"\"Get a query interfaces for a data collection\n\n\t\tReturns:\n\t\t\t(dict) A dictionary describing the interfaces available for this\n\t\t\t data collection.\n\t\t\"\"\"\n\n\t\tself.load()\n\n\t\tdGroups = {}\n\n\n\n\n# Group Name Units Default Range Sources\n# ----- ------- ------- ---------- ----------------- -------\n# Time: maximum isotime 2016-05-02 2011-08-05 to now das2\n# minimum isotime 2016-05-01 2011-08-05 to now das2\n# interval seconds 300 das2\n#\n# Efield: units V m**-1 raw, das2\n# V m**-1,\n# V**2 m**2 Hz**-1,\n# V m**-2 H**-1\n#\n# Format: das2_text false boolean das2\n#\n# Reader: negative true boolean das2\n# noise true boolean das2\n# pls true boolean das2\n# threshold true boolean das2\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"das2/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":6596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"168698294","text":"import pygame, os\n\nclass Plate(pygame.sprite.Sprite):\n WALL_SPRITE = pygame.image.load(os.path.join(\"sprite\",\"rock.png\"))\n PATH_SPRITE = pygame.image.load(os.path.join(\"sprite\",\"yellow.png\"))\n SPRITE_SIZE = (30,30)\n\n def __init__(self,position,block_type):\n self.block_type = block_type\n\n if self.block_type == Plate.WALL_SPRITE:\n self.image = block_type\n elif self.block_type == Plate.PATH_SPRITE:\n self.image = block_type\n\n\n self.rect = self.image.get_rect()\n self.rect_x, self.rect_y = position\n\n\n pygame.sprite.Sprite.__init__(self)\n\n\n def draw(self,surface):\n surface.blit(self.image,(self.rect_x,self.rect_y))\n","sub_path":"plate.py","file_name":"plate.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"577624567","text":"import json\nimport os\n\n__author__ = 'cjiang'\n\ndef data_split(input_dir, seeds_output, testing_output):\n seeds_by_type = {}\n tests_by_type = {}\n for subdir, dirs, files in os.walk(input_dir):\n for file in files:\n if file.startswith('.'):\n continue\n with open(subdir + '/' + file) as data_file:\n content = data_file.readlines()\n seeds = []\n tests = []\n type = file.title().split('.')[0]\n for i in range(0, len(content)):\n # if i < len(content)/10:\n # seeds.append(content[i].strip())\n # else:\n tests.append(content[i].strip())\n seeds_by_type[type] = seeds\n tests_by_type[type] = tests\n # with open(seeds_output, 'w+') as output_file:\n # json.dump(seeds_by_type, output_file, sort_keys=True, indent=4, separators=(',', ': '))\n with open(testing_output, 'w+') as output_file:\n json.dump(tests_by_type, output_file, sort_keys=True, indent=4, separators=(',', ': '))\n # return seeds_by_type\n\ndata_split('/Users/cjiang/Downloads/w2v/test/', '/Users/cjiang/Downloads/w2v/qup_data/seeds.txt', '/Users/cjiang/Downloads/w2v/testing.txt')\n# with open('/Users/cjiang/Downloads/w2v/qup_data/cuisines.txt') as f:\n# for i in f.readlines():\n# print i","sub_path":"training_data_generator.py","file_name":"training_data_generator.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"271763561","text":"import json\n\nfrom app.views import QUALITY_METHODS\nfrom app.files.file_work import read_file_if_exists\n\n\nOCRS = ['finereader', 'tesseract']\n\n\ndef append_string_to_file(file_path, string_to_write):\n with open(file_path, \"a\", encoding='utf8') as text_file:\n text_file.write(string_to_write)\n\n\ndef the_main():\n json_results = read_file_if_exists('hocr/results_of_hocr_analysis')\n results_dict = json.loads(json_results)\n for filename, results_by_ocr in results_dict.items():\n method_to_value_list = {method_name: [] for method_name in QUALITY_METHODS.keys()}\n for method_results in map(lambda x: results_by_ocr[x], OCRS):\n for method, value in method_results.items():\n method_to_value_list[method].append(value)\n for method_name, value_list in method_to_value_list.items():\n append_string_to_file(method_name.replace(' ', '_'), ' '.join(map(lambda x: str(x), value_list)) + '\\r\\n')\n\n\nif __name__ == '__main__':\n the_main()\n\n\n\n\n\n\n#\n# from app.text.text_work import count_grams\n#\n# three_grams_result = {}\n#\n# with open('1grams-3.txt', encoding='utf8') as grams_file:\n# for line in grams_file.readlines():\n# freq, word = line.split()\n# word_grams_count = count_grams([word], 3)\n# for three_gram in word_grams_count.keys():\n# if three_gram not in three_grams_result:\n# three_grams_result[three_gram] = 0\n# three_grams_result[three_gram] += word_grams_count[three_gram] * int(freq)\n#\n#\n# def result_grams(grams_result):\n# counts_and_grams = [(-count, gram) for gram, count in grams_result.items()]\n# counts_and_grams.sort()\n# for count, gram in counts_and_grams:\n# yield \"{0}\\t{1}\\r\\n\".format(-count, gram)\n#\n#\n# with open('grams-by3-letters.txt', 'w', encoding='utf8') as output_grams_file:\n# output_grams_file.writelines(result_grams(three_grams_result))","sub_path":"hocr_analysis_results_divider.py","file_name":"hocr_analysis_results_divider.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"302028524","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2009 Tiny SPRL ().\n# Copyright (C) 2010-2014 OpenERP s.a. ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n# Terminal view queue: sudo rabbitmqctl list_queues\n# \n##############################################################################\n# ------------- \n#!/usr/bin/env python\n\nimport xmlrpclib\nimport pika\nimport time\n\ndef rpc_getPrice(addr='http://127.0.0.1', port=8069, dbname='mycompany', data={}):\n #time.sleep(1)\n ids = xmlrpclib.ServerProxy('%s:%s/xmlrpc/object' % (addr, port)).execute(\n dbname, 1, '123', 'general.rpc.queue', 'write', [1], data )\n if ids and len(ids) > 1:\n #time.sleep(1)\n return ids\n return {}\n \n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(\n host='localhost'))\n\nchannel = connection.channel()\n\nchannel.queue_declare(queue='rpc_price_queue')\n\ndef on_request(ch, method, props, body):\n data = {'product_ids': eval(body),\n 'type':'price'}\n print(\" [.] price for (%s)\" % data)\n response = rpc_getPrice(addr='http://localhost',port=8069, dbname='lama', data=data)\n\n ch.basic_publish(exchange='',\n routing_key=props.reply_to,\n properties=pika.BasicProperties(correlation_id = \\\n props.correlation_id),\n body=str(response))\n ch.basic_ack(delivery_tag = method.delivery_tag)\n\nchannel.basic_qos(prefetch_count=100)\nchannel.basic_consume(on_request, queue='rpc_price_queue')\nprint(\" [x] Awaiting RPC requests about price\")\nchannel.start_consuming()\n\n \n\n\n# send message\n","sub_path":"backend/custom/odoo_queue/rpc_price_server.py","file_name":"rpc_price_server.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"588300909","text":"import sys\nsys.stdin = open('input.txt', 'r')\n\ndef rec(i, j, cnt):\n global ans\n if cnt >= ans:\n return\n if i == N-1 and j == N-1:\n if cnt < ans:\n ans = cnt\n return\n if i+1 < N:\n rec(i+1, j, cnt+arr[i+1][j])\n if j+1 < N:\n rec(i, j+1, cnt+arr[i][j+1])\n\n \nfor tc in range(1, int(input())+1):\n N = int(input())\n arr = [list(map(int, input().split())) for _ in range(N)]\n ans = 1690\n rec(0, 0, arr[0][0])\n print('#{} {}'.format(tc, ans))","sub_path":"class/hw/apply_2/5188.py","file_name":"5188.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"639616735","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('mks', '__first__'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Person',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=64)),\n ('img_url', models.URLField(null=True, blank=True)),\n ('phone', models.CharField(max_length=20, null=True, blank=True)),\n ('fax', models.CharField(max_length=20, null=True, blank=True)),\n ('email', models.EmailField(max_length=254, null=True, blank=True)),\n ('family_status', models.CharField(max_length=10, null=True, blank=True)),\n ('number_of_children', models.IntegerField(null=True, blank=True)),\n ('date_of_birth', models.DateField(null=True, blank=True)),\n ('place_of_birth', models.CharField(max_length=100, null=True, blank=True)),\n ('date_of_death', models.DateField(null=True, blank=True)),\n ('year_of_aliyah', models.IntegerField(null=True, blank=True)),\n ('place_of_residence', models.CharField(help_text='an accurate place of residence (for example, an address', max_length=100, null=True, blank=True)),\n ('area_of_residence', models.CharField(help_text='a general area of residence (for example, \"the negev\"', max_length=100, null=True, blank=True)),\n ('place_of_residence_lat', models.CharField(max_length=16, null=True, blank=True)),\n ('place_of_residence_lon', models.CharField(max_length=16, null=True, blank=True)),\n ('residence_centrality', models.IntegerField(null=True, blank=True)),\n ('residence_economy', models.IntegerField(null=True, blank=True)),\n ('gender', models.CharField(blank=True, max_length=1, null=True, choices=[('M', 'Male'), ('F', 'Female')])),\n ('mk', models.ForeignKey(related_name='person', blank=True, to='mks.Member', null=True)),\n ],\n options={\n 'ordering': ('name',),\n 'verbose_name': 'Person',\n 'verbose_name_plural': 'Persons',\n },\n ),\n migrations.CreateModel(\n name='PersonAltname',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=64)),\n ('person', models.ForeignKey(to='persons.Person')),\n ],\n ),\n migrations.CreateModel(\n name='ProcessedProtocolPart',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('protocol_part_id', models.IntegerField()),\n ],\n ),\n migrations.CreateModel(\n name='Role',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('start_date', models.DateField(null=True)),\n ('end_date', models.DateField(null=True, blank=True)),\n ('text', models.CharField(max_length=1024, null=True, blank=True)),\n ('org', models.TextField(null=True, blank=True)),\n ('person', models.ForeignKey(related_name='roles', to='persons.Person')),\n ],\n ),\n migrations.CreateModel(\n name='Title',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=64)),\n ],\n ),\n migrations.AddField(\n model_name='person',\n name='titles',\n field=models.ManyToManyField(related_name='persons', to='persons.Title', blank=True),\n ),\n ]\n","sub_path":"kikar_hamedina/persons/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":4146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"350052001","text":"from random import *\nimport numpy as np\nimport collections\nfrom minimax import *\nfrom tablero import *\nfrom nodos import *\nfrom convertidor import *\n\nclass Tiros:\n def __init__(self):\n currentState = Mat([], 11, 11)\n currentState.Initiate()\n self.State = Thing(currentState)\n self.Ply_num = 2\n self.Score = 0\n self.tabX = 0\n self.tabY = 0\n self.tirosPosiHor = [\n (1, 0),(3,0),(5,0),(7,0),(9,0),(1,2),(3,2),(5,2),(7,2),(9,2),\n (1,4),(3,4),(5,4),(7,4),(9,4),(1,6),(3,6),(5,6),(7,6),(9,6),\n (1,8),(3,8),(5,8),(7,8),(9,8),(1, 10),(3,10),(5,10),(7,10),(9,10)\n ]\n self.tirosPosiVer = [\n (0,1),(0,3),(0,5),(0, 7),(0,9),(2,1),(2,3),(2,5),(2,7),(2,9),\n (4,1),(4,3),(4,5),(4,7),(4,9),(6,1),(6,3),(6,5),(6,7),(6,9),\n (8,1),(8,3),(8,5),(8,7),(8,9),(10, 1),(10,3),(10,5),(10,7),(10,9)\n ]\n\n def Hum(self):\n horizontal = True\n vertical = True\n tocoHor = False\n tocoVer = False\n HumanX = 0\n HumanY = 0\n\n if (len(self.tirosPosiHor) == 0):\n horizontal = False\n\n elif (len(self.tirosPosiVer) == 0):\n vertical = False \n\n\n if (horizontal == True):\n tocoHor = True\n posi = np.random.randint(0,len(self.tirosPosiHor))\n tiro = str(self.tirosPosiHor[posi])\n del self.tirosPosiHor[posi]\n\n elif (vertical == True):\n tocoVer = True\n posi = np.random.randint(0,len(self.tirosPosiVer))\n tiro = str(self.tirosPosiVer[posi])\n del self.tirosPosiVer[posi]\n\n\n if (len(tiro) == 6):\n tiro1 = tiro[1]\n HumanX = int(tiro1)\n\n tiro2 = tiro[4]\n HumanY = int(tiro2)\n\n\n elif (len(tiro) == 7 and tocoHor):\n tiro1 = tiro[1]\n HumanX = int(tiro1)\n\n tiro2 = tiro[4:6]\n HumanY = int(tiro2)\n\n elif (len(tiro) == 7 and tocoVer):\n tiro1 = tiro[1:3]\n HumanX = int(tiro1)\n\n tiro2 = tiro[5]\n HumanY = int(tiro2)\n\n if (HumanX, HumanY) not in self.State.children:\n self.State.Make(HumanX, HumanY, False)\n self.State = self.State.children[(HumanX, HumanY)]\n else:\n self.State = self.State.children[(HumanX, HumanY)]\n\n self.Inte()\n\n def Inte(self):\n move = MiniMax.miniMax(self.State, self.Ply_num)\n self.State = self.State.children[(move[0], move[1])]\n\n (self.tabX, self.tabY) = Convertidor.coorTab(move[0], move[1])\n\n def start(self):\n self.Hum()\n","sub_path":"AI.py","file_name":"AI.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"652063988","text":"import hlt\nfrom hlt import NORTH, EAST, SOUTH, WEST, STILL, Move, Square\nimport random\nimport logging\nlogging.basicConfig(filename='example.log',level=logging.INFO)\nlogging.info('Start log ---------------------')\nmyID, game_map = hlt.get_init()\nhlt.send_init(\"attack weakest\")\n\ndef populate_lists(game_map):\n logging.info('****populate_lists****')\n my_squares = []\n empty_squares =[]\n enemies =[]\n enemy_size = []\n enemy_squares = []\n for square in game_map:\n if(square.owner == 0):\n #logging.info('adding empty')\n empty_squares.append(square)\n elif(square.owner == myID):\n #logging.info('adding owned')\n my_squares.append(square)\n else:\n if square.owner not in enemies:\n #logging.info('adding enemy')\n enemies.append(square.owner)\n enemy_squares.append([])\n #logging.info('adding enemy square')\n enemy_squares[enemies.index(square.owner)].append(square)\n #logging.info(\"MySquares \" + str(len(my_squares)))\n #logging.info(\"EmptySquares \" + str(len(empty_squares)))\n for e in enemies:\n #logging.info(\"EnemySquares enemy: \" +str(e) + \" \" + str(len(enemy_squares[enemies.index(e)])))\n enemy_size.append(len(enemy_squares[enemies.index(e)]))\n #logging.info(\"Smallest enemy: \" + str(enemies[enemy_size.index(min(enemy_size))]) + \" | \" + str(min(enemy_size))) \n #logging.info(\"----------------------------\")\n return my_squares, empty_squares, enemies, enemy_size, enemy_squares\n \ndef get_direction(square_1, square_2):\n x = square_1.x - square_2.x\n y = square_1.y - square_2.y\n logging.info('****get_direction****')\n logging.info(square_1)\n logging.info(square_2)\n if(abs(x) > abs(y)):\n if((x < 0 and abs(x) < game_map.width/2) or (x > 0 and abs(x) > game_map.width/2)):\n logging.info('Return EAST')\n logging.info('********')\n return EAST\n else:\n logging.info('Return WEST')\n logging.info('********')\n return WEST\n else:\n if((y < 0 and abs(y) < game_map.height/2) or (y > 0 and abs(y) > game_map.height/2)):\n logging.info('Return SOUTH')\n logging.info('********')\n return SOUTH\n else:\n logging.info('Return NORTH')\n logging.info('********')\n return NORTH\ndef get_weakest(square):\n tmp_strength = 1000\n tmp_idx = -1\n for idx, n in enumerate(game_map.neighbors(square)):\n if(n.production > 0):\n if (n.strength < tmp_strength and n.owner != myID):\n tmp_strength = n.strength\n tmp_idx = idx\n return tmp_idx, tmp_strength\n\ndef get_prod_ratio(square):\n tmp_ratio = 1000\n tmp_idx = -1\n tmp_strength = 0\n for idx, n in enumerate(game_map.neighbors(square)):\n if(n.production > 0):\n if (n.strength/n.production < tmp_ratio and n.owner != myID):\n tmp_ratio = n.strength/n.production\n tmp_idx = idx\n tmp_strength = n.strength\n return tmp_idx, tmp_strength\n\ndef kill_smallest_enemy(square, enemies, enemy_size, enemy_squares):\n tmp_distance = game_map.width + game_map.height\n smallest_enemy = enemies[enemy_size.index(min(enemy_size))]\n enemy_index = enemies.index(smallest_enemy)\n #logging.info(\"smallest enemy index: \" + str(enemies.index(smallest_enemy)))\n #logging.info(\"smallest enemy var: \" + str(smallest_enemy))\n #logging.info(\"enemies count: \" + str(len(enemies)))\n for e in enemy_squares[enemy_index]:\n #logging.info(e)\n dist = game_map.get_distance(square, e)\n if(dist < tmp_distance):\n tmp_distance = dist\n enemy_square = e\n return enemy_square\n\ndef assign_move(square):\n wait = False\n i, s = get_weakest(square)\n logging.info(\"i: \" + str(i))\n logging.info(\"s: \" + str(s))\n if(i != -1 and s < square.strength):\n return Move(square, i)\n elif(i != -1 ):\n wait = True\n \n## for idx, n in enumerate(game_map.neighbors(square)):\n## if(n.owner != myID and n.strength < square.strength):\n## return Move(square, idx)\n## elif(n.owner != myID):\n## wait = True\n if(wait or square.strength < 35):\n return Move(square, STILL)\n else:\n return Move(square, get_direction(square, kill_smallest_enemy(square, enemies, enemy_size, enemy_squares)))\n #return Move(square, random.choice((NORTH, WEST, SOUTH, EAST)))\n\nwhile True:\n game_map.get_frame()\n my_squares, empty_squares, enemies, enemy_size, enemy_squares = populate_lists(game_map)\n moves = [assign_move(square) for square in my_squares]\n hlt.send_frame(moves)\n","sub_path":"Halite-Python-Starter-Package/MyBot.py","file_name":"MyBot.py","file_ext":"py","file_size_in_byte":4793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575817919","text":"#--------------------------------------------------#\n# App: Regular Expression Date Parser #\n# Date: 08/01/2016 # \n# Author: Ankoor Bhagat # \n#--------------------------------------------------#\n\n# Imports: Flask\nfrom flask import Flask, current_app\nfrom flask import render_template\nfrom flask import request\nfrom flask import redirect\nfrom flask import jsonify\n\n# Imports: Classifier (SVM Model)\nimport regex_date_parser as REGEX\n\n# Create an instance of class \"Flask\" with name of running application as the arg\napp = Flask(__name__)\n\n\n# Index page\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n results = {}\n if request.method == 'POST':\n # Form tag in index.html: \n text_data = request.form['message']\n text_data = text_data.decode('utf-8')\n text_data = text_data.encode('ascii', 'ignore')\n result = REGEX.hybrid_parse(text_data)\n results = result['date']\n return render_template(\"index.html\", result=results)\n\n\n# REST API: POST Request\n@app.route('/api/regex_date_parse', methods=['POST'])\ndef parse_date():\n # Get data\n data = request.get_json(force=True)\n try:\n # Convert input unicode string to Python string\n text_data = data['text'].decode('utf-8')\n text_data = text_data.encode('ascii', 'ignore')\n # Predict date\n result = REGEX.hybrid_parse(text_data)\n # Return result in JSON format\n return jsonify(result)\n except Exception as e:\n return jsonify({'error': e.message})\n \n\nif __name__ == '__main__':\n # Debug mode gives detailed message in case of an error.\n # NOTE: Debug mode is HIGHLY INSECURE\n app.run(debug=True)\n\n\n#----------- API Call Example -------------- #\n# curl -XPOST -d '{\"text\": \"2 years ago\"}' \"http://localhost:5000/api/regex_date_parse\"\n\n","sub_path":"regex_hybrid_dateparser/app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"116780867","text":"#! /usr/bin/env python3\n\nimport re\nimport sys\nimport json\nimport csv\nimport codecs\nfrom datetime import datetime\nfrom fileIO import FileIO\n\n##\n## CONSTANTS and REGEX\n##\n\nSTART = re.compile(\"^From:\teDepartment Notices\")\nSENT = re.compile(\"^Sent:\")\nNOTICE = re.compile(\"^\\*\") # not uniform; some notices have line breaks, \n # so this will not catch all. I've manually edited source\n # based on the AssertionError below\nURL = re.compile(\"http[a-zA-Z-0-9=\\.:/_\\?]*\")\nMESSAGE_NO = re.compile(\"\tMessage #[0-9].*\")\n\nDELIMITER = \"\\t\"\n\n##\n## FUNCTIONS\n##\n\ndef main():\n import argparse\n \n parser = argparse.ArgumentParser()\n parser.add_argument(\"-i\", \"--in\", metavar=\"FILE\", dest=\"in_file\", default=None,\n help=\"Specify the input FILE.\", required=True)\n parser.add_argument(\"-o\", \"--out\", metavar=\"FILE\", dest=\"out_file\", default=None,\n help=\"Specify the output FILE.\", required=True)\n args = parser.parse_args()\n \n in_file = args.in_file\n out_file = args.out_file\n \n # to do - add cla for write tsv\n write_tsv = True\n \n notices = Notices(in_file=in_file, out_file=out_file, write_tsv=write_tsv)\n notices.read()\n notices.parse()\n if write_tsv:\n notices.flatten()\n notices.fmt()\n notices.write()\n\n##\n## CLASSES\n##\n\nclass Notices(FileIO):\n '''\n Class that catalogs notices data from email source\n '''\n def __init__(self, *args, **kwargs):\n FileIO.__init__(self, *args, **kwargs)\n self.is_flat = False\n \n def parse(self):\n switch = False\n line_no = 0\n # False = not inside a record, True = inside\n for line in self.data_in:\n line_no += 1\n if switch == False:\n d = {\"sent_date\": None,\n \"weekday\": None,\n \"notices\": [],\n \"urls\": [],\n \"message_no\": None}\n if START.match(line):\n # we have now entered a record, need new data_dict\n switch = True\n else:\n if SENT.match(line):\n date = line.split(\"\\t\")[1].strip()\n date = datetime.strptime(date, \"%A, %B %d, %Y %I:%M %p\")\n d[\"sent_date\"] = date.isoformat()\n d[\"weekday\"] = date.isoweekday()\n elif NOTICE.match(line):\n l = line.strip(\"^*\\t\").strip().strip(\">\").split(\"<\")\n try:\n assert len(l) == 2\n d[\"notices\"].append(l[0])\n d[\"urls\"].append(l[1])\n except AssertionError:\n print(str(line_no) + \" ** notice / url error: \" + line, file=sys.stderr)\n elif MESSAGE_NO.match(line):\n try:\n d[\"message_no\"] = int(line.strip().split(\" #\")[1])\n except ValueError:\n print(str(line_no) + \" ** message number error: \" + line, file=sys.stderr)\n if d[\"message_no\"] is not None:\n # message number is the last content line of the email, therefore close it out\n self.data_temp.append(d)\n switch = False\n \n def flatten(self):\n '''Converts relational-like output of self.parse() into a flat, csv-like list of dicts'''\n if self.data_temp != []:\n messages = []\n notices = []\n for document in self.data_temp:\n message = {\"message_no\": document[\"message_no\"],\n \"sent_date\": document[\"sent_date\"],\n \"count\": len(document[\"notices\"]),\n \"weekday\": document[\"weekday\"]}\n \n messages.append(message)\n \n for i in range(len(document[\"notices\"])):\n notice = {\"index\": i + 1,\n \"title\": document[\"notices\"][i],\n \"url\": document[\"urls\"][i],\n \"message_no\": document[\"message_no\"],\n \"notice_id\": int(document[\"urls\"][i].split(\"=\")[1])}\n notices.append(notice)\n \n self.is_flat = True\n self.data_temp = {\"messages\": messages, \"notices\": notices}\n else:\n print(\"processed dataset is empty; run parse() before flatten()\", file=sys.stderr)\n \n \n##\n## MAIN \n##\n\nif __name__ == '__main__':\n main()","sub_path":"parse_emails.py","file_name":"parse_emails.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"364449943","text":"from structneur import node_neur\nimport pickle\nfrom neuron import h, gui\nfrom matplotlib import pyplot\nimport numpy\nfrom sys import argv\nfrom scipy.spatial import distance\nimport simplejson\n\nfileloc = open('locationdic.json', 'r')\ndatadict = simplejson.load(fileloc)\nfileloc.close()\n\ndef Node(stimdir):\n\n \"\"\"This is the program for the node model in the paper _.\n When using this program use the following command:\n\n ./x86_64/special -python nodetest.py (number)\n\n Where the numbers are for the direction of the signal:\n\n (number)\n 0: 'rl'\n 1: 'du'\n 2: 'lr'\n 3: 'ud'\n 4: 'full' (forward direction)\n\n The data will be saved in the ./data/node directory.\"\"\"\n\n #--------------------------------------------------------------------------------------------------------------------------------------------\n\n datafile = open(datadict['L1data'],'r')\n L1list = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['Mi1data'],'r')\n Mi1list = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['Tm3data'],'r')\n Tm3list = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['Mi4data'],'r')\n Mi4list = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['Mi9data'],'r')\n Mi9list = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['T4data'],'r')\n T4list = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['T4adata'],'r')\n T4alist = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['T4bdata'],'r')\n T4blist = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['T4cdata'],'r')\n T4clist = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['T4ddata'],'r')\n T4dlist = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict[stimdir],'r')\n stimdata = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['totaldata'],'r')\n fulldata = pickle.load(datafile)\n datafile.close()\n\n datafile = open(datadict['areas'],'r')\n area = simplejson.load(datafile)\n datafile.close()\n\n #--------------------------------------------------------------------------------------------------------------------------------------------\n\n neurdic = {}\n\n print('Creating cells...')\n\n count = 0\n\n #--------------------------------------------------------------------------------------------------------------------------------------------\n\n for f in range(len(fulldata)):\n\n if fulldata[f] == None:\n\n continue\n\n else:\n\n #--------------------------------------------------------------------------------------------------------------------------------------------\n\n if str(fulldata[f].cellID) in area.keys():\n\n neurdic[fulldata[f].cellID] = node_neur.cell(fulldata[f],area[str(fulldata[f].cellID)])\n\n print(area[str(fulldata[f].cellID)])\n\n count += 1\n\n #-------------------------------------------------------------------------------\n\n print('Compartments: ' + str(count))\n\n #-------------------------------------------------------------------------------\n\n print('Connecting cells..')\n\n #-------------------------------------------------------------------------------\n\n from structneur import node_syn\n\n connections = {\n #L1\n 'L1_L1' :[L1list,L1list],\n 'L1_Mi1' :[L1list,Mi1list],\n 'L1_Mi4' :[L1list,Mi4list],\n 'L1_Mi9' :[L1list,Mi9list],\n 'L1_Tm3' :[L1list,Tm3list],\n 'L1_T4' :[L1list,T4list],\n\n #Mi1\n 'Mi1_Mi1' :[Mi1list,Mi1list],\n 'Mi1_L1' :[Mi1list,L1list],\n 'Mi1_Tm3' :[Mi1list,Tm3list],\n 'Mi1_Mi4' :[Mi1list,Mi4list],\n 'Mi1_Mi9' :[Mi1list,Mi9list],\n 'Mi1_T4' :[Mi1list,T4list],\n\n #Tm3\n 'Tm3_Tm3' :[Tm3list,Tm3list],\n 'Tm3_L1' :[Tm3list,L1list],\n 'Tm3_Mi1' :[Tm3list,Mi1list],\n 'Tm3_Mi4' :[Tm3list,Mi4list],\n 'Tm3_Mi9' :[Tm3list,Mi9list],\n 'Tm3_T4' :[Tm3list,T4list],\n\n #Mi4\n 'Mi4_Mi4' :[Mi4list,Mi4list],\n 'Mi4_L1' :[Mi4list,L1list],\n 'Mi4_Mi1' :[Mi4list,Mi1list],\n 'Mi4_Tm3' :[Mi4list,Tm3list],\n 'Mi4_Mi9' :[Mi4list,Mi9list],\n 'Mi4_T4' :[Mi4list,T4list],\n\n #Mi9\n 'Mi9_Mi9' :[Mi9list,Mi9list],\n 'Mi9_L1' :[Mi9list,L1list],\n 'Mi9_Mi1' :[Mi9list,Mi1list],\n 'Mi9_Tm3' :[Mi9list,Tm3list],\n 'Mi9_Mi4' :[Mi9list,Mi4list],\n 'Mi9_T4' :[Mi9list,T4list],\n\n #T4\n 'T4_T4' :[T4list,T4list],\n 'T4_L1' :[T4list,L1list],\n 'T4_Mi1' :[T4list,Mi1list],\n 'T4_Tm3' :[T4list,Tm3list],\n 'T4_Mi4' :[T4list,Mi4list],\n 'T4_Mi9' :[T4list,Mi9list],\n }\n\n synlist = []\n\n for key in neurdic.keys():\n\n #Stimuli-----------------------------------------------------------------------------------------------\n if str(stimdir) == 'full':\n if key in stimdata:\n neurdic[key].create_stim(55)\n else:\n if key in stimdata[0]:\n neurdic[key].create_stim(55)\n if key in stimdata[1]:\n neurdic[key].create_stim(155)\n if key in stimdata[2]:\n neurdic[key].create_stim(255)\n if key in stimdata[3]:\n neurdic[key].create_stim(355)\n if key in stimdata[4]:\n neurdic[key].create_stim(455)\n\n #-----------------------------------------------------------------------------------------------------------------------------------------------\n\n for p in neurdic[key].partners:\n\n part = p[0]\n\n if part in neurdic.keys():\n\n #L1-----------------------------------------------------------------------------------------------\n \"\"\"if key in connections['L1_L1'][0] and part in connections['L1_L1'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],tau=50)\n\n synlist.append(info)\"\"\"\n\n if key in connections['L1_Mi1'][0] and part in connections['L1_Mi1'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\n\n if key in connections['L1_Tm3'][0] and part in connections['L1_Tm3'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\n\n #Mi1-----------------------------------------------------------------------------------------------\n \"\"\"if key in connections['Mi1_Mi1'][0] and part in connections['Mi1_Mi1'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\"\"\"\n\n \"\"\"if key in connections['Mi1_L1'][0] and part in connections['Mi1_L1'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\"\"\"\n\n if key in connections['Mi1_Tm3'][0] and part in connections['Mi1_Tm3'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\n\n if key in connections['Mi1_Mi4'][0] and part in connections['Mi1_Mi4'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\n\n if key in connections['Mi1_Mi9'][0] and part in connections['Mi1_Mi9'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\n\n if key in connections['Mi1_T4'][0] and part in connections['Mi1_T4'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\n\n #Tm3-----------------------------------------------------------------------------------------------\n \"\"\"if key in connections['Tm3_Tm3'][0] and part in connections['Tm3_Tm3'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\"\"\"\n\n \"\"\"if key in connections['Tm3_L1'][0] and part in connections['Tm3_L1'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\"\"\"\n\n if key in connections['Tm3_Mi1'][0] and part in connections['Tm3_Mi1'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\n\n if key in connections['Tm3_Mi4'][0] and part in connections['Tm3_Mi4'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\n\n if key in connections['Tm3_Mi9'][0] and part in connections['Tm3_Mi9'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\n\n if key in connections['Tm3_T4'][0] and part in connections['Tm3_T4'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part])\n\n synlist.append(info)\n\n #Mi4-----------------------------------------------------------------------------------------------\n \"\"\"if key in connections['Mi4_Mi4'][0] and part in connections['Mi4_Mi4'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],e=-80)\n\n synlist.append(info)\"\"\"\n\n \"\"\"if key in connections['Mi4_Mi1'][0] and part in connections['Mi4_Mi1'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],e=-80)\n\n synlist.append(info)\"\"\"\n\n \"\"\"if key in connections['Mi4_Tm3'][0] and part in connections['Mi4_Tm3'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],e=-80)\n\n synlist.append(info)\"\"\"\n\n if key in connections['Mi4_Mi9'][0] and part in connections['Mi4_Mi9'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],e=-80)\n\n synlist.append(info)\n\n if key in connections['Mi4_T4'][0] and part in connections['Mi4_T4'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],e=-80)\n\n synlist.append(info)\n\n #Mi9-----------------------------------------------------------------------------------------------\n \"\"\"if key in connections['Mi9_Mi9'][0] and part in connections['Mi9_Mi9'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],e=-80)\n\n synlist.append(info)\"\"\"\n\n \"\"\"if key in connections['Mi9_Mi1'][0] and part in connections['Mi9_Mi1'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],e=-80)\n\n synlist.append(info)\"\"\"\n\n \"\"\"if key in connections['Mi9_Tm3'][0] and part in connections['Mi9_Tm3'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],e=-80)\n\n synlist.append(info)\"\"\"\n\n if key in connections['Mi9_Mi4'][0] and part in connections['Mi9_Mi4'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],e=-80)\n\n synlist.append(info)\n\n if key in connections['Mi9_T4'][0] and part in connections['Mi9_T4'][1]:\n info = node_syn.synapse(neurdic[key],neurdic[part],e=-80)\n\n synlist.append(info)\n\n print('Synapses: ' + str(len(synlist)))\n #--------------------------------------------------------------------------------------------------------------------------------------------\n\n tlist = []\n Mi1simlist = []\n L1simlist = []\n Tm3simlist = []\n Mi4simlist = []\n Mi9simlist = []\n T4simlist = []\n\n\n T4asimlist = []\n T4bsimlist = []\n T4csimlist = []\n T4dsimlist = []\n\n Mi1data = []\n L1data = []\n Tm3data = []\n Mi4data = []\n Mi9data = []\n T4data = []\n\n T4adata = []\n T4bdata = []\n T4cdata = []\n T4ddata = []\n\n heatmap = []\n\n #------------------------------------------------------------------------------------------------------------\n\n for val in neurdic.values():\n\n v_vec = h.Vector()\n t_vec = h.Vector()\n\n v_vec.record(val.soma(0.5)._ref_v)\n t_vec.record(h._ref_t)\n\n sim = (t_vec,v_vec)\n ID_sim = numpy.array([val.cellID,v_vec],dtype=list)\n\n if val.cellID in Mi1list:\n\n Mi1simlist.append(sim)\n Mi1data.append(v_vec)\n\n if val.cellID in L1list:\n\n L1simlist.append(sim)\n L1data.append(v_vec)\n\n if val.cellID in Tm3list:\n\n Tm3simlist.append(sim)\n Tm3data.append(v_vec)\n\n if val.cellID in Mi4list:\n\n Mi4simlist.append(sim)\n Mi4data.append(v_vec)\n\n if val.cellID in Mi4list:\n\n Mi9simlist.append(sim)\n Mi9data.append(v_vec)\n\n if val.cellID in T4list:\n\n T4simlist.append(sim)\n T4data.append(v_vec)\n\n if val.cellID in T4alist:\n\n T4asimlist.append(sim)\n T4adata.append(v_vec)\n\n if val.cellID in T4blist:\n\n T4bsimlist.append(sim)\n T4bdata.append(v_vec)\n\n if val.cellID in T4clist:\n\n T4csimlist.append(sim)\n T4cdata.append(v_vec)\n\n if val.cellID in T4dlist:\n\n T4dsimlist.append(sim)\n T4ddata.append(v_vec)\n\n #--------------------------------------------------------------------------------------------------------------------------------------------\n\n h.tstop = 1000\n\n print('Running simulation...')\n\n import time\n start_time = time.time()\n\n h.run()\n\n print(\"--- %s seconds ---\" % (time.time() - start_time))\n\n print('Done!')\n\n #--------------------------------------------------------------------------------------------------------------------------------------------\n\n pyplot.figure(str(stimdir))\n pyplot.subplot(2,3,1)\n for token in Mi1simlist:\n pyplot.plot(token[0],token[1])\n\n pyplot.title('Mi1')\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n pyplot.subplot(2,3,2)\n for token in L1simlist:\n pyplot.plot(token[0],token[1])\n\n pyplot.title('L1')\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n pyplot.subplot(2,3,3)\n for token in Tm3simlist:\n pyplot.plot(token[0],token[1])\n\n pyplot.title('Tm3')\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n pyplot.subplot(2,3,4)\n for token in Mi4simlist:\n pyplot.plot(token[0],token[1])\n\n pyplot.title('Mi4')\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n pyplot.subplot(2,3,5)\n for token in Mi9simlist:\n pyplot.plot(token[0],token[1])\n\n pyplot.title('Mi9')\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n pyplot.subplot(2,3,6)\n for token in T4simlist:\n pyplot.plot(token[0],token[1])\n\n pyplot.title('T4')\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n #--------------------------------------------------------------------------------------------------------------------------------------------\n\n pyplot.figure('T4: '+ str(stimdir))\n\n pyplot.subplot(2,2,1)\n for token in T4asimlist:\n pyplot.plot(token[0],token[1])\n\n pyplot.title('T4a')\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n pyplot.subplot(2,2,2)\n for token in T4bsimlist:\n pyplot.plot(token[0],token[1])\n\n pyplot.title('T4b')\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n pyplot.subplot(2,2,3)\n for token in T4csimlist:\n pyplot.plot(token[0],token[1])\n\n pyplot.title('T4c')\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n pyplot.subplot(2,2,4)\n for token in T4dsimlist:\n pyplot.plot(token[0],token[1])\n\n pyplot.title('T4d')\n pyplot.xlabel('time (ms)')\n pyplot.ylabel('mV')\n\n #--------------------------------------------------------------------------------------------------------------------------------------------\n\n pyplot.show()\n\n #--------------------------------------------------------------------------------------------------------------------------------------------\n\n import os\n datafolder = datadict['dataloc'] + 'node/' + str(stimdir)\n\n filename = datafolder + 'Mi1simlist' + '.csv'\n numpy.savetxt(filename,Mi1data,delimiter=',')\n\n filename = datafolder + 'L1simlist' + '.csv'\n numpy.savetxt(filename,L1data,delimiter=',')\n\n filename = datafolder + 'Tm3simlist' + '.csv'\n numpy.savetxt(filename,Tm3data,delimiter=',')\n\n filename = datafolder + 'Mi4simlist' + '.csv'\n numpy.savetxt(filename,Mi4data,delimiter=',')\n\n filename = datafolder + 'Mi9simlist' + '.csv'\n numpy.savetxt(filename,Mi9data,delimiter=',')\n\n filename = datafolder + 'T4simlist' + '.csv'\n numpy.savetxt(filename,T4data,delimiter=',')\n\n filename = datafolder + 'T4asimlist' + '.csv'\n numpy.savetxt(filename,T4adata,delimiter=',')\n\n filename = datafolder + 'T4bsimlist' + '.csv'\n numpy.savetxt(filename,T4bdata,delimiter=',')\n\n filename = datafolder + 'T4csimlist' + '.csv'\n numpy.savetxt(filename,T4cdata,delimiter=',')\n\n filename = datafolder + 'T4dsimlist' + '.csv'\n numpy.savetxt(filename,T4ddata,delimiter=',')\n\n h.quit()\n\nif __name__ == '__main__':\n\n stimlist = ['rl','du','lr','ud','full']\n\n s0 = stimlist[int(argv[-1])]\n\n Node(s0)\n","sub_path":"nodetest.py","file_name":"nodetest.py","file_ext":"py","file_size_in_byte":17555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"633845292","text":"import torch\nimport numpy as np\nimport librosa.filters\nfrom .utils import apply_reduction\n\nfrom .perceptual import SumAndDifference, FIRFilter\n\nclass SpectralConvergenceLoss(torch.nn.Module):\n \"\"\"Spectral convergence loss module.\n\n See [Arik et al., 2018](https://arxiv.org/abs/1808.06719). \n \"\"\"\n def __init__(self):\n super(SpectralConvergenceLoss, self).__init__()\n\n def forward(self, x_mag, y_mag):\n return torch.norm(y_mag - x_mag, p=\"fro\") / torch.norm(y_mag, p=\"fro\")\n\n\nclass LogSTFTMagnitudeLoss(torch.nn.Module):\n \"\"\"Log STFT magnitude loss module.\n \n See [Arik et al., 2018](https://arxiv.org/abs/1808.06719). \n \"\"\"\n def __init__(self):\n super(LogSTFTMagnitudeLoss, self).__init__()\n\n def forward(self, x_mag, y_mag):\n return torch.nn.functional.l1_loss(torch.log(x_mag), torch.log(y_mag))\n\n\nclass STFTLoss(torch.nn.Module):\n \"\"\"STFT loss module.\n \n See [Yamamoto et al. 2019](https://arxiv.org/abs/1904.04472).\n \n Args:\n fft_size (int, optional): FFT size in samples. Default: 1024\n hop_size (int, optional): Hop size of the FFT in samples. Default: 256\n win_length (int, optional): Length of the FFT analysis window. Default: 1024\n window (string, optional): Window to apply before FFT, options include:\n ['hann_window', 'bartlett_window', 'blackman_window', 'hamming_window', 'kaiser_window'] \n Default: 'hann_window'\n w_sc (float, optional): Weight of the spectral convergence loss term. Default: 1.0\n w_mag (float, optional): Weight of the log magnitude loss term. Default: 1.0\n w_phs (float, optional): Weight of the spectral phase loss term (Currently not implemented.). Default: 0.0\n sample_rate (int, optional): Sample rate. Required when scale = 'mel'. Default: None\n scale (string, optional): Optional frequency scaling method, options include:\n ['mel', 'chroma'] \n Default: None\n n_bins (int, optional): Number of scaling frequency bins. Default: None.\n scale_invariance (bool, optional): Perform an optimal scaling of the target. Default: False\n eps (float, optional): Small epsilon value for stablity. Default: 1e-8\n output (str, optional): Format of the loss returned. \n 'loss' : Return only the raw, aggregate loss term.\n 'full' : Return the raw loss, plus intermediate loss terms.\n Default: 'loss'\n reduction (string, optional): Specifies the reduction to apply to the output:\n 'none': no reduction will be applied,\n 'mean': the sum of the output will be divided by the number of elements in the output, \n 'sum': the output will be summed. \n Default: 'mean'\n\n Returns:\n loss: \n Aggreate loss term. Only returned if output='loss'.\n loss, sc_loss, mag_loss, phs_loss: \n Aggregate and intermediate loss terms. Only returned if output='full'.\n \"\"\"\n def __init__(self, \n fft_size=1024, \n hop_size=256, \n win_length=1024, \n window=\"hann_window\", \n w_sc=1.0,\n w_mag=1.0,\n w_phs=0.0,\n sample_rate=None,\n scale=None,\n n_bins=None,\n scale_invariance=False,\n eps=1e-8,\n output=\"loss\",\n reduction=\"mean\"):\n super(STFTLoss, self).__init__()\n self.fft_size = fft_size\n self.hop_size = hop_size\n self.win_length = win_length\n self.window = getattr(torch, window)(win_length)\n self.w_sc = w_sc\n self.w_mag = w_mag\n self.w_phs = w_phs\n self.sample_rate = sample_rate\n self.scale = scale\n self.n_bins = n_bins\n self.scale_invariance = scale_invariance\n self.eps = eps\n self.output = output\n self.reduction = reduction\n\n self.spectralconv = SpectralConvergenceLoss()\n self.logstft = LogSTFTMagnitudeLoss()\n\n # setup mel filterbank\n if self.scale == \"mel\":\n assert(sample_rate != None) # Must set sample rate to use mel scale\n assert(n_bins <= fft_size) # Must be more FFT bins than Mel bins\n fb = librosa.filters.mel(sample_rate, fft_size, n_mels=n_bins)\n self.fb = torch.tensor(fb).unsqueeze(0)\n elif self.scale == \"chroma\":\n assert(sample_rate != None) # Must set sample rate to use chroma scale\n assert(n_bins <= fft_size) # Must be more FFT bins than chroma bins\n fb = librosa.filters.chroma(sample_rate, fft_size, n_chroma=n_bins)\n self.fb = torch.tensor(fb).unsqueeze(0)\n\n def stft(self, x):\n \"\"\" Perform STFT.\n Args:\n x (Tensor): Input signal tensor (B, T).\n\n Returns:\n Tensor: x_mag, x_phs\n Magnitude and phase spectra (B, fft_size // 2 + 1, frames).\n \"\"\"\n x_stft = torch.stft(x, \n self.fft_size, \n self.hop_size, \n self.win_length, \n self.window, \n return_complex=True)\n x_mag = torch.sqrt(torch.clamp((x_stft.real ** 2) + (x_stft.imag ** 2), min=self.eps))\n #x_phs = torch.angle(x_stft) currently not implemented\n return x_mag, None\n\n def forward(self, x, y):\n # compute the magnitude and phase spectra of input and target\n self.window = self.window.to(x.device)\n x_mag, x_phs = self.stft(x.view(-1,x.size(-1)))\n y_mag, y_phs = self.stft(y.view(-1,y.size(-1)))\n\n # apply relevant transforms\n if self.scale is not None:\n x_mag = torch.matmul(self.fb, x_mag)\n y_mag = torch.matmul(self.fb, y_mag)\n\n # normalize scales\n if self.scale_invariance:\n alpha = (x_mag * y_mag).sum([-2,-1]) / ((y_mag ** 2).sum([-2,-1]))\n y_mag = y_mag * alpha.unsqueeze(-1)\n\n # compute loss terms\n sc_loss = self.spectralconv(x_mag, y_mag)\n mag_loss = self.logstft(x_mag, y_mag)\n loss = (self.w_sc * sc_loss) + (self.w_mag * mag_loss)\n loss = apply_reduction(loss, reduction=self.reduction)\n\n if self.output == \"loss\":\n return loss\n elif self.output == \"full\":\n return loss, sc_loss, mag_loss, phs_loss\n\nclass MelSTFTLoss(STFTLoss):\n \"\"\" Mel-scale STFT loss module. \"\"\"\n def __init__(self, \n sample_rate,\n fft_size=1024, \n hop_size=256, \n win_length=1024, \n window=\"hann_window\", \n w_sc=1.0,\n w_mag=1.0,\n w_phs=0.0,\n n_mels=128):\n super(MelSTFTLoss, self).__init__(fft_size, \n hop_size, \n win_length, \n window,\n w_sc,\n w_mag, \n w_phs,\n sample_rate,\n \"mel\",\n n_mels)\n\nclass ChromaSTFTLoss(STFTLoss):\n \"\"\" Chroma-scale STFT loss module. \"\"\"\n def __init__(self, \n sample_rate,\n fft_size=1024, \n hop_size=256, \n win_length=1024, \n window=\"hann_window\", \n w_sc=1.0,\n w_mag=1.0,\n w_phs=0.0,\n n_chroma=12):\n super(ChromaSTFTLoss, self).__init__(fft_size, \n hop_size, \n win_length, \n window,\n w_sc,\n w_mag, \n w_phs,\n sample_rate,\n \"chroma\",\n n_chroma)\n\n\nclass MultiResolutionSTFTLoss(torch.nn.Module):\n \"\"\" Multi resolution STFT loss module.\n \n See [Yamamoto et al., 2019](https://arxiv.org/abs/1910.11480)\n\n Args:\n fft_sizes (list): List of FFT sizes.\n hop_sizes (list): List of hop sizes.\n win_lengths (list): List of window lengths. \n window (string, optional): Window to apply before FFT, options include:\n 'hann_window', 'bartlett_window', 'blackman_window', 'hamming_window', 'kaiser_window'] \n Default: 'hann_window'\n w_sc (float, optional): Weight of the spectral convergence loss term. Default: 1.0\n w_mag (float, optional): Weight of the log magnitude loss term. Default: 1.0\n w_phs (float, optional): Weight of the spectral phase loss term. Default: 0.0\n sample_rate (int, optional): Sample rate. Required when scale = 'mel'. Default: None\n scale (string, optional): Optional frequency scaling method, options include:\n ['mel', 'chroma'] \n Default: None\n n_bins (int, optional): Number of mel frequency bins. Default: 128.\n scale_invariance (bool, optional): Perform an optimal scaling of the target. Default: False\n \"\"\"\n def __init__(self,\n fft_sizes=[1024, 2048, 512],\n hop_sizes=[120, 240, 50],\n win_lengths=[600, 1200, 240],\n window=\"hann_window\",\n w_sc=1.0,\n w_mag=1.0,\n w_phs=0.0,\n sample_rate=None,\n scale=None,\n n_bins=None,\n scale_invariance=False):\n super(MultiResolutionSTFTLoss, self).__init__()\n assert len(fft_sizes) == len(hop_sizes) == len(win_lengths) # must define all\n self.stft_losses = torch.nn.ModuleList()\n for fs, ss, wl in zip(fft_sizes, hop_sizes, win_lengths):\n self.stft_losses += [STFTLoss(fs, \n ss, \n wl,\n window,\n w_sc,\n w_mag,\n w_phs,\n sample_rate,\n scale,\n n_bins,\n scale_invariance)]\n\n def forward(self, x, y):\n mrstft_loss = 0.0\n for f in self.stft_losses:\n mrstft_loss += f(x, y)\n mrstft_loss /= len(self.stft_losses)\n return mrstft_loss\n\n\nclass RandomResolutionSTFTLoss(torch.nn.Module):\n \"\"\"Random resolution STFT loss module.\n\n See [Steinmetz & Reiss, 2020](https://www.christiansteinmetz.com/s/DMRN15__auraloss__Audio_focused_loss_functions_in_PyTorch.pdf)\n\n Args:\n resolutions (int): Total number of STFT resolutions.\n min_fft_size (int): Smallest FFT size.\n max_fft_size (int): Largest FFT size.\n min_hop_size (int): Smallest hop size as porportion of window size.\n min_hop_size (int): Largest hop size as porportion of window size.\n window (str): Window function type.\n randomize_rate (int): Number of forwards before STFTs are randomized. \n \"\"\"\n def __init__(self,\n resolutions=3,\n min_fft_size=16,\n max_fft_size=32768,\n min_hop_size=0.1,\n max_hop_size=1.0,\n windows=[\"hann_window\", \"bartlett_window\", \"blackman_window\", \"hamming_window\", \"kaiser_window\"],\n w_sc=1.0,\n w_mag=1.0,\n w_phs=0.0,\n sample_rate=None,\n scale=None,\n n_mels=None,\n randomize_rate=1):\n super(RandomResolutionSTFTLoss, self).__init__()\n self.resolutions = resolutions\n self.min_fft_size = min_fft_size\n self.max_fft_size = max_fft_size\n self.min_hop_size = min_hop_size\n self.max_hop_size = max_hop_size\n self.windows = windows\n self.randomize_rate = randomize_rate\n self.w_sc = w_sc\n self.w_mag = w_mag\n self.w_phs = w_phs\n self.sample_rate = sample_rate\n self.scale = scale\n self.n_mels = n_mels\n\n self.nforwards = 0\n self.randomize_losses() # init the losses \n\n def randomize_losses(self):\n # clear the existing STFT losses\n self.stft_losses = torch.nn.ModuleList()\n for n in range(self.resolutions):\n frame_size = 2 ** np.random.randint(np.log2(self.min_fft_size), np.log2(self.max_fft_size))\n hop_size = int(frame_size * (self.min_hop_size + (np.random.rand() * (self.max_hop_size-self.min_hop_size))))\n window_length = int(frame_size * np.random.choice([1.0, 0.5, 0.25]))\n window = np.random.choice(self.windows)\n self.stft_losses += [STFTLoss(frame_size, \n hop_size, \n window_length, \n window,\n self.w_sc,\n self.w_mag,\n self.w_phs,\n self.sample_rate,\n self.scale,\n self.n_mels)]\n\n def forward(self, input, target):\n if input.size(-1) <= self.max_fft_size:\n raise ValueError(f\"Input length ({input.size(-1)}) must be larger than largest FFT size ({self.max_fft_size}).\") \n elif target.size(-1) <= self.max_fft_size:\n raise ValueError(f\"Target length ({target.size(-1)}) must be larger than largest FFT size ({self.max_fft_size}).\") \n\n if self.nforwards % self.randomize_rate == 0:\n self.randomize_losses()\n\n loss = 0.0\n for f in self.stft_losses:\n loss += f(input, target)\n loss /= len(self.stft_losses)\n\n self.nforwards += 1\n\n return loss\n\n\nclass SumAndDifferenceSTFTLoss(torch.nn.Module):\n \"\"\" Sum and difference sttereo STFT loss module.\n \n See [Steinmetz et al., 2020](https://arxiv.org/abs/2010.10291)\n\n Args:\n fft_sizes (list, optional): List of FFT sizes.\n hop_sizes (list, optional): List of hop sizes.\n win_lengths (list, optional): List of window lengths.\n window (str, optional): Window function type.\n w_sum (float, optional): Weight of the sum loss component. Default: 1.0\n w_diff (float, optional): Weight of the difference loss component. Default: 1.0\n output (str, optional): Format of the loss returned. \n 'loss' : Return only the raw, aggregate loss term.\n 'full' : Return the raw loss, plus intermediate loss terms.\n Default: 'loss'\n \n Returns:\n loss: \n Aggreate loss term. Only returned if output='loss'.\n loss, sum_loss, diff_loss: \n Aggregate and intermediate loss terms. Only returned if output='full'.\n \"\"\"\n def __init__(self,\n fft_sizes=[1024, 2048, 512],\n hop_sizes=[120, 240, 50],\n win_lengths=[600, 1200, 240],\n window=\"hann_window\",\n w_sum=1.0,\n w_diff=1.0, \n output=\"loss\"):\n super(SumAndDifferenceSTFTLoss, self).__init__()\n self.sd = SumAndDifference() \n self.w_sum = 1.0\n self.w_diff = 1.0\n self.output = output\n self.mrstft = MultiResolutionSTFTLoss(fft_sizes, \n hop_sizes, \n win_lengths, \n window)\n\n def forward(self, input, target):\n input_sum, input_diff = self.sd(input)\n target_sum, target_diff = self.sd(target)\n\n sum_loss = self.mrstft(input_sum, target_sum)\n diff_loss = self.mrstft(input_diff, target_diff)\n loss = ((self.w_sum * sum_loss) + (self.w_diff * diff_loss))/2\n\n if self.output == \"loss\":\n return loss\n elif self.output == \"full\":\n return loss, sum_loss, diff_loss\n","sub_path":"auraloss/freq.py","file_name":"freq.py","file_ext":"py","file_size_in_byte":16632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"304013718","text":"##############\n# Exercise 2.5\n##############\n\n# You can use the supplied test cases for your own testing. Good luck!\nimport re\n\n\ngenlib = {\n 'ATA': 'I', 'ATC': 'I', 'ATT': 'I', 'ATG': 'M',\n 'ACA': 'T', 'ACC': 'T', 'ACG': 'T', 'ACT': 'T',\n 'AAC': 'N', 'AAT': 'N', 'AAA': 'K', 'AAG': 'K',\n 'AGC': 'S', 'AGT': 'S', 'AGA': 'R', 'AGG': 'R',\n 'CTA': 'L', 'CTC': 'L', 'CTG': 'L', 'CTT': 'L',\n 'CCA': 'P', 'CCC': 'P', 'CCG': 'P', 'CCT': 'P',\n 'CAC': 'H', 'CAT': 'H', 'CAA': 'Q', 'CAG': 'Q',\n 'CGA': 'R', 'CGC': 'R', 'CGG': 'R', 'CGT': 'R',\n 'GTA': 'V', 'GTC': 'V', 'GTG': 'V', 'GTT': 'V',\n 'GCA': 'A', 'GCC': 'A', 'GCG': 'A', 'GCT': 'A',\n 'GAC': 'D', 'GAT': 'D', 'GAA': 'E', 'GAG': 'E',\n 'GGA': 'G', 'GGC': 'G', 'GGG': 'G', 'GGT': 'G',\n 'TCA': 'S', 'TCC': 'S', 'TCG': 'S', 'TCT': 'S',\n 'TTC': 'F', 'TTT': 'F', 'TTA': 'L', 'TTG': 'L',\n 'TAC': 'Y', 'TAT': 'Y', 'TAA': '_', 'TAG': '_',\n 'TGC': 'C', 'TGT': 'C', 'TGA': '_', 'TGG': 'W',\n }\n\n\ndef codons_to_aa(orf_list):\n return ''.join([genlib[codon] for codon in orf_list])\n\n\ndef complementary(seq):\n r_seq = \"\"\n for s in seq:\n if s is \"T\":\n r_seq += \"A\"\n if s is \"A\":\n r_seq += \"T\"\n if s is \"C\":\n r_seq += \"G\"\n if s is \"G\":\n r_seq += \"C\"\n return r_seq\n\n\nstart_codon = 'ATG'\nend_codons = ['TAA', 'TAG', 'TGA']\n\n\ndef calculate_end_position_ring(af_end_first, af_start_pos, length, rev):\n start = (af_start_pos + 3 * af_end_first + 2) % length\n if rev:\n start = length - 1 - start\n return start\n\n\ndef calculate_start_position(ind_s, seq_start, length, rev):\n start = seq_start + 3 * ind_s\n if rev:\n start = length - 1 - start\n return start\n\n\ndef calculate_positions(ind_s, ind_e, seq_start, length, rev):\n start = calculate_start_position(ind_s, seq_start, length, rev)\n\n end = seq_start + 3 * ind_e + 2\n if rev:\n end = length - 1 - end\n return start, end\n\n\ndef put_if_exists(l, subl):\n if subl is not None and len(subl) > 0:\n l.extend(subl)\n\n\ndef generate_ends_lib(seq_arr):\n endslib = {}\n\n for codon in end_codons:\n endslib[codon] = [index for index, value in enumerate(seq_arr) if value == codon]\n\n return endslib\n\n\ndef find_stop_minimum_for_start(start_index, endslib):\n mins = []\n for codon in end_codons:\n tmp = [i for i in endslib[codon] if i > start_index]\n if len(tmp) > 0:\n mins.append(tmp[0])\n\n if len(mins) > 0:\n return min(mins)\n else:\n return None\n\n\ndef try_to_get_orf(seq_arr, length, start_position, rev, after_data, end_codon_sorted):\n\n if not (start_codon in seq_arr):\n return None\n\n if not any([e in seq_arr for e in end_codons]):\n return None\n\n starts_arr = [index for index, value in enumerate(seq_arr) if value == start_codon]\n\n endslib = generate_ends_lib(seq_arr)\n\n for ind_start in starts_arr:\n\n end_first = find_stop_minimum_for_start(ind_start, endslib)\n\n if end_first is not None:\n if end_first - ind_start > 33:\n s_pos, e_pos = calculate_positions(ind_start, end_first, start_position, length, rev)\n seq_to_translate = seq_arr[ind_start:end_first]\n to_add = (s_pos, e_pos, codons_to_aa(seq_to_translate), rev)\n\n if not (e_pos in end_codon_sorted):\n end_codon_sorted[e_pos] = []\n end_codon_sorted[e_pos].append(to_add)\n else:\n af_endslib = generate_ends_lib(after_data['seq'])\n af_end_first = find_stop_minimum_for_start(-1, af_endslib)\n\n if af_end_first is not None:\n af_s_pos = calculate_start_position(ind_start, start_position, length, rev)\n af_e_pos = calculate_end_position_ring(af_end_first, after_data['start'], length, rev)\n\n af_seq_to_translate = seq_arr[ind_start:]\n af_seq_to_translate.extend(after_data['seq'][:af_end_first])\n\n if len(af_seq_to_translate) > 33:\n af_to_add = (af_s_pos, af_e_pos, codons_to_aa(af_seq_to_translate), rev)\n if not (af_e_pos in end_codon_sorted):\n end_codon_sorted[af_e_pos] = []\n end_codon_sorted[af_e_pos].append(af_to_add)\n\n\ndef split_frame_analyse(in_genome, reverse, found_orfs):\n\n orfs_list = []\n len_gen = len(in_genome)\n\n for i in range(0, 3, 1):\n seq_base = [in_genome[j: j+3] for j in range(i, len_gen - i - (len_gen - i) % 3, 3)]\n leftover_ind = (len_gen - (len_gen - i) % 3) % len_gen\n leftover_genome_repeat = \"\"\n if leftover_ind != 0:\n leftover_genome_repeat = in_genome[leftover_ind:]\n leftover_genome_repeat += in_genome + in_genome + in_genome[:i]\n len_lgr = len(leftover_genome_repeat)\n after_data = {\n 'start': leftover_ind,\n 'seq': [leftover_genome_repeat[j: j+3] for j in range(0, len_lgr - len_lgr % 3, 3)]\n }\n\n try_to_get_orf(seq_base, len(in_genome), i, reverse, after_data, found_orfs)\n\n return found_orfs\n\n\ndef overlappins_clean(ofrs):\n f_res = []\n for end_key in ofrs:\n min_el = ofrs[end_key][0]\n for el in ofrs[end_key]:\n if len(el[2]) > len(min_el[2]):\n min_el = el\n f_res.append(min_el)\n\n if len(f_res) == 0:\n raise TypeError\n return f_res\n\n\n\ndef get_orfs(genome):\n\n not_dna = re.compile(\"[^ATCG]\")\n if not_dna.match(genome) is not None:\n raise TypeError\n\n got_orfs = {}\n split_frame_analyse(genome, False, got_orfs)\n\n rc_genome = ''.join(reversed(complementary(genome)))\n split_frame_analyse(rc_genome, True, got_orfs)\n\n return overlappins_clean(got_orfs)\n","sub_path":"codechecker/repos/1/collected_files/orffinder/ga53xis.py","file_name":"ga53xis.py","file_ext":"py","file_size_in_byte":5865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"347571108","text":"import sys\nimport os\nimport Preprocess\nimport LoadData\nimport TopNPairIndex\nimport DTW\n\ndef test(fileName, datasetConst, genConst):\n currentFolderName = os.getcwd()\n fileNameWithPath = os.path.join(currentFolderName, genConst['DatasetFolder'], datasetConst['Folder'], fileName) # Generalized for UPCV or UTKinect\n # fileNameWithPath = os.path.join(currentFolderName, genConst['DatasetFolder'], 'UTKinect', fileName)\n preprocessedTestData, frameCount = Preprocess.preprocess(fileNameWithPath, datasetConst, genConst)\n trainData, metaData = LoadData.loadData(genConst)\n\n topNPairIndex = metaData[\"TopNPairIndex\"]\n topNTestPairFrames = TopNPairIndex.extractTopNIndex(preprocessedTestData, topNPairIndex)\n \n userCount = len(trainData)\n # storeMismatch = [sys.float_info.max] * (userCount + 1)\n storeMismatch = [999999] * (userCount + 1)\n\n for user in range(1, userCount + 1):\n storedTrainFrameCount = trainData[str(user)][0]\n storedTrainData = trainData[str(user)][1]\n testData = topNTestPairFrames\n testFrameCount = frameCount\n totalMismatch = 0\n for idx in range(metaData[\"N\"]):\n trainDTW = []\n testDTW = []\n for frame in range(storedTrainFrameCount):\n trainDTW.append(storedTrainData[frame][idx])\n for frame in range(testFrameCount):\n testDTW.append(testData[frame][idx])\n totalMismatch += DTW.dtw(trainDTW, testDTW)\n storeMismatch[user] = totalMismatch\n \n detectedUser = storeMismatch.index(min(storeMismatch))\n # print(\"StoreMisMatch: \",storeMismatch)\n print(detectedUser)\n return storeMismatch,detectedUser,trainDTW,testDTW\n","sub_path":"Test.py","file_name":"Test.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"317966158","text":"import logging\n\nfrom datetime import datetime\nfrom django.core.cache import cache\nfrom django.db import transaction\nfrom django.http import HttpResponse\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view, throttle_classes\nfrom rest_framework.throttling import UserRateThrottle\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.response import Response\n\nfrom restapi.models import Chat\nfrom EphemeralMessages.settings import CHAT_THROTTLE_RATE\nfrom restapi.exceptions import handle_api_exceptions, EphemeralMessageError\nfrom restapi.serializers import (ChatPOSTSerializer, ChatGETIDSerializer, \n ChatGETSerializer, ChatGETUserSerializer)\n\nlogger = logging.getLogger('CHAT')\n\n\nclass JSONResponse(HttpResponse):\n \"\"\"\n An HttpResponse that renders its content into JSON.\n \"\"\"\n def __init__(self, data, **kwargs):\n content = JSONRenderer().render(data)\n kwargs['content_type'] = 'application/json'\n super(JSONResponse, self).__init__(content, **kwargs)\n\n\nclass OncePerDayUserThrottle(UserRateThrottle):\n rate = CHAT_THROTTLE_RATE\n\n\n@api_view(['GET'])\ndef ephemeral_messages_service(request):\n \"\"\"\n A simple ephemeral text message service.\n The service has three RESTful endpoints for accessing data:\n 1. POST /chat\n Creates a new text message for passed in username.\n Request body properties:\n - username: String, The recipient of the message (required)\n - text: String, The content of the message (required)\n - timeout: Integer, The number of seconds the message\n should live before expiring (default 60)\n Response:\n A success response will include:\n - a status code of 201 Created\n - a body with a JSON object\n\n 2. GET /chat/:id\n Returns the message object for the given id.\n Response:\n A success response will include:\n - a JSON object containing the message\n the message's username, text and expiration_date\n\n 3. GET /chats/:username\n Returns a list of all unexpired texts for the user with the\n given username.\n Response:\n A success response will include:\n - a JSON array of messages, each of which contains the\n message's ID and text.\n\n \"\"\"\n logger.debug(\"Home page requested\")\n return Response({'description': 'EphemeralMessages API'})\n\n\n@handle_api_exceptions\ndef set_cache(id, data, timeout):\n \"\"\"\n Set cache key. Exceptions handled by decorator.\n :param id: key\n :param data: value\n :param timeout: timeout\n :return: None\n \"\"\"\n logger.debug(\"Set id %s, data %s in cache with timeout %s\"\n %(id, data, timeout))\n cache.set(id, data, timeout)\n\n\n@handle_api_exceptions\ndef expire_cache(id):\n \"\"\"\n Expire cache key. Exceptions handled by decorator.\n :param id: key\n :return: None\n \"\"\"\n logger.debug(\"Set expiration for id %s\" %id)\n cache.expire(id, 0)\n\n\n@handle_api_exceptions\ndef get_cache(id):\n \"\"\"\n Get cache key. Exceptions handled by decorator.\n :param id: key\n :return: value\n \"\"\"\n logger.debug(\"Get id %s from cache\" %id)\n return cache.get(id)\n\n\n@api_view(['POST'])\n@throttle_classes([OncePerDayUserThrottle])\n@handle_api_exceptions\n@transaction.atomic\ndef create_message(request):\n \"\"\"\n Creates a new text message for passed in username and text.\n\n POST /chat\n \n Sample json request body:\n {\"username\": ,\n \"text\": ,\n \"timeout\": }\n\n \"\"\"\n try:\n serializer = ChatPOSTSerializer(data=request.data)\n if serializer.is_valid():\n chat = serializer.save()\n chat_message = Chat.objects.get(id=chat.id)\n logger.debug(\"Created chat message %s\" % repr(chat_message))\n cache_serializer = ChatGETSerializer(chat_message)\n set_cache(chat.id, cache_serializer.data, timeout = chat.timeout)\n id_serializer = ChatGETIDSerializer(chat_message)\n return Response(id_serializer.data,\n status=status.HTTP_201_CREATED)\n else:\n logger.error(\"Post message is not valid %s\" %\n repr(serializer.data))\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n except EphemeralMessageError as err:\n logger.error(\"Error processing message %s\" % repr(err))\n return Response(EphemeralMessageError,\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['GET'])\n@handle_api_exceptions\ndef get_message(request, chat_id):\n \"\"\"\n Returns the message object for the given id. This service can return both expired and unexpired messages.\n\n GET /chat/:id\n \"\"\"\n try:\n chat_message = get_cache(chat_id)\n if not chat_message:\n chat_message = Chat.objects.get(id=chat_id)\n serializer = ChatGETSerializer(chat_message)\n logger.debug(\"Get chat message from db %s\" % repr(serializer.data))\n return Response(serializer.data, status=status.HTTP_200_OK)\n else:\n logger.debug(\"Get chat message from cache %s\" % repr(chat_message))\n return Response(chat_message, status=status.HTTP_200_OK)\n except Chat.DoesNotExist as ex:\n logger.error(\"Chat id does not exist %s\" % repr(ex))\n return Response(\"Chat id does not exist\",\n status=status.HTTP_400_BAD_REQUEST)\n except EphemeralMessageError as message_processing_error:\n logger.error(\"Error processing request for chat id %s\"\n % repr(message_processing_error))\n return Response(EphemeralMessageError,\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n@api_view(['GET'])\n@handle_api_exceptions\ndef get_user_messages(request, user_name):\n \"\"\"\n Returns a list of all unexpired texts for the user with the given username. Any texts that are recieved are then expired.\n\n GET /chats/:username\n \"\"\"\n try:\n logger.debug(\"Extract chat messages for user %s\" % repr(user_name))\n chat_messages = Chat.objects.filter(username=user_name,\n expiration_date__gt = datetime.now())\n serializer = ChatGETUserSerializer(chat_messages, many=True)\n for chat_message in chat_messages:\n chat_message.timeout = 0\n expire_cache(chat_message.id)\n chat_message.save()\n logger.debug(\"Processed chat messages for user %s\" % repr(user_name))\n return Response(serializer.data, status=status.HTTP_200_OK)\n except EphemeralMessageError as message_processing_error:\n logger.error(\"Error processing request for user %s\"\n % repr(message_processing_error))\n return Response(EphemeralMessageError,\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n","sub_path":"restapi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"130156331","text":"import re\nimport numpy as np\nfrom transformers import BertTokenizer, BertModel, BertTokenizerFast\nimport pandas as pd\nfrom sklearn.svm import SVC\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import plot_confusion_matrix\nimport matplotlib.pyplot as plt\nfrom segmentizer import Segmentizer\nimport time\n\n################################################################\n\ndef ids2text(tokens, concat=True):\n \"\"\"\n This function simply takes the de-tokenized word from BERT and converts to string.\n\n :param tokens:\n :param concat:\n :return: str\n \"\"\"\n single_symbol_pattern = re.compile(r\"^\\W$\")\n\n # First token\n answer = [tokens[0]]\n\n # Select the remaining answer tokens and join them with whitespace.\n for token in tokens[1:]:\n\n # If it's a subword token, then recombine it with the previous token.\n if token[0:2] == '##':\n answer.append(token[2:])\n\n # Otherwise, add the token with possible space.\n else:\n if single_symbol_pattern.match(token):\n answer.append(token)\n else:\n answer.append(' ' + token)\n\n # Format\n if concat:\n answer = \"\".join(answer)\n\n return answer\n\n\n# takes a column with values in a list and returns a dataframe with one value in each column \ndef list_to_dataframe(column):\n df = pd.DataFrame(column.tolist())\n return df\n\n################################################################\n\n\n\n\n# get data\n# qt holds quotes from 100 articles\nprint(\"Loading Quotes...\")\nqt = pd.read_csv(\"data/quotes100.csv\", encoding='utf-16', sep='\\t', index_col=0, converters={'Quotes': eval})\nqt = qt.explode('Quotes').drop(columns=['Pub.', 'HTML', 'Text', 'Titel', 'Område', 'URL', 'Format'])\nqt = qt.dropna()\n\n # split quotes into segements split by . (punktum)\nqt['Quotes'] = qt.Quotes.apply(Segmentizer.get_segments)\nqt = qt.explode('Quotes')\nqt.reset_index(drop=True, inplace=True)\n\n\n# Load the negative examples\n# not_qt holds segments from the danish wikipedia-article on Denmark\nprint(\"Loading Non-Quotes\")\nnot_qt = pd.read_csv(\"data/wiki-segmentized.csv\", sep='\\t', index_col=0)\nnot_qt.columns= ['Quotes']\n\n\n# Settings\nprint (\"Configuring BERT model...\")\nmodel_tag = \"bert-base-multilingual-uncased\"\ncls_loc = 0\n\n# Get a 'tokenizer', it converts words/tokens to token-numbers that represent those words\ntokenizer = BertTokenizerFast.from_pretrained(model_tag)\n\n# Get the BERT model, it takes the tokenized word-numbers and does magic on it (for example get vector output)\nmodel = BertModel.from_pretrained(model_tag)\n\n# prepare data \ndef prepare(text):\n return tokenizer(text, padding=True, return_tensors=\"pt\")\n\n# for sanity\ndef back_to_text(input_ids):\n result = \"\"\n for i in range(input_ids.shape[0]):\n temp = tokenizer.convert_ids_to_tokens(input_ids[i, :]) # Convert token-numbers back to token-strings\n result = result + \" \" + ids2text(temp)\n return result\n\n# takes a string as input - returns a BERT-vector\ndef get_BERT_vectors(strings):\n prepared = tokenizer(strings, padding=True, return_tensors=\"pt\")\n input_ids = prepared[\"input_ids\"]\n token_type_ids = prepared[\"token_type_ids\"]\n attention_mask = prepared[\"attention_mask\"]\n\n model_output = model(\n input_ids=input_ids,\n token_type_ids=token_type_ids,\n attention_mask=attention_mask,\n return_dict=True,\n )\n\n last_hidden_state = model_output[\"last_hidden_state\"]\n vector = last_hidden_state[:, cls_loc, :].detach().numpy() # type: np.ndarray\n\n return vector\n\n\n\n\n\n\n\n\n# convert the strings to BERT vectors in the two tables\nprint(\"Assign vectors to quotes\")\n\n\n\nquotes = qt['Quotes'].to_list() # Get all texts (the .apply-function is quite limited)\nnot_quotes = not_qt['Quotes'].to_list() \nbatch_size = 64 # You should probably use something like 32 or 64\n\n# Split texts into batches\nquote_batches = [quotes[i:min(i + batch_size, len(quotes))] for i in range(0, len(quotes), batch_size)]\nnot_quote_batches = [not_quotes[i:min(i + batch_size, len(not_quotes))] for i in range(0, len(not_quotes), batch_size)]\n\n\n\n\n# Compute all vectors through BERT\n\nstart = time.time()\nquote_vectors = np.concatenate([get_BERT_vectors(val) for val in quote_batches], axis=0)\nnot_quote_vectors = np.concatenate([get_BERT_vectors(val) for val in not_quote_batches], axis=0)\n\nprint('Time: ', time.time()-start)\n\nexit()\n\n# qt['vec'] = qt['Quotes'].apply(get_BERT_vector)\n# print ('Assign vectors to non-quotes')\n# not_qt['vec'] = not_qt['Quotes'].apply(get_BERT_vector)\n# qt['is_quote'] = 1\n# not_qt['is_quote'] = 0\n\n# combine quotes and non-quotes and get the features and labels for training\ncombined = pd.concat([qt, not_qt]).reset_index()\ny = combined.is_quote\nX = list_to_dataframe(combined['vec'])\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.2)\n\n# train classifier on data\nsvm_clf = SVC()\nsvm_clf.fit(X_train, y_train)\n\n# show result\nplot_confusion_matrix(svm_clf, X_test, y_test)\nplt.show()\nscore = svm_clf.score(X_test, y_test)\nprint(\"Score:\", score )\n\n##########################3\n# test classifier on queens speech\n# \nprint('predicting on the queens speech...')\nqueen = Segmentizer.textfile_to_dataframe('data/queen2019.txt').reset_index()\nqueen['vec'] = queen['Quotes'].apply(get_BERT_vector)\nX = list_to_dataframe(queen['vec'])\npredictions = svm_clf.predict(X)\nqueen['predict'] = predictions\npd.set_option('display.max_rows', None)\nprint (queen[['Quotes', 'predict']].head(len(queen)))\n\n\n\n","sub_path":"OldFiles/QuoteBert.py","file_name":"QuoteBert.py","file_ext":"py","file_size_in_byte":5529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"581839012","text":"import serial\nimport time\n\nclass locData:\n\tdef __init__(self):\n\t\tself.accel = { 'x' : None, 'y' : None, 'z' : None }\n\t\tself.gyro = { 'x' : None, 'y' : None, 'z' : None }\n\t\tself.serialMsg = serial.Serial('/dev/ttyACM0', 115200)\n\t\tself.running = True\n\t\tprint(\"Using locData Backend\")\n\n\tdef update(self):\n\t\tself.serialMsg.reset_input_buffer()\n\t\twhile True:\n\t\t\tself.poll()\n\n\tdef run(self):\n\t\tself.serialMsg.reset_input_buffer()\n\t\tself.poll()\n\t\treturn self.accel['x'], self.accel['y'], self.accel['z'], self.gyro['x'], self.gyro['y'], self.gyro['z']\n\n\tdef run_threaded(self):\n\t\treturn self.accel['x'], self.accel['y'], self.accel['z'], self.gyro['x'], self.gyro['y'], self.gyro['z']\n\n\n\tdef poll(self):\n\t\t#print('polling...')\n\t\t#serialMsg.write(122)\n\t\tself.rawMsg = self.serialMsg.readline()\n\t\tmessage = (self.rawMsg.decode().strip())\n\t\tarr = message.split(' ')\n\t\ti = 0\n\t\twhile i will give you an error\n\n#------------------------------------------------------------------------------------------------------------\n\n# To format the user input\nuserIn = input(\"Enter info: \")\n# To print text with variable in python\nuserIn = int(2)\nformatMessage = \"You entered %s\" % userIn\n\n# or\nformatMessage2 = \"You entered {}\".format(userIn)\n\n# New method since python 3.6\nformatMessage36 = f\"You entered {userIn}\"\n\nprint(formatMessage)\n\n# For multiple variables\nx = 20\ny = 100\ntext = \"First is %s and then comes %s\" % (x, y)\nprint(text)\n\ntext36 = f\"First is {x} and then comes {y}\"\nprint(text36)","sub_path":"input.py","file_name":"input.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"634193024","text":"'''\n This program is free software: you can redistribute it and/or modify\n it under the terms of the GNU Lesser General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n This program is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU Lesser General Public License for more details.\n\n You have not received a copy of the GNU Lesser General Public License\n along with this program. Please see .\n\n'''\n\nfrom twisted.python import log\n\nimport curses\nimport locale\nimport os\nfrom curses import panel\nfrom curses import textpad\n\nfrom util import get_real_termial_size\n\nos.environ['ESCDELAY'] = \"25\"\nlocale.setlocale(locale.LC_ALL, '')\n\nclass MessageInput():\n\n def __init__(self, app, position, size):\n\n self.app = app\n\n self.w, self.h = size\n self.x, self.y = position\n self.y += 2\n\n self.__has_focus = False\n\n self.__last_size = None\n h, w = self.__size__()\n\n self.width = w\n self.height = h\n\n border = curses.newwin(h,\n w,\n self.y,\n self.x)\n\n border.box()\n self.border = border\n win = curses.newwin(h - 2, w - 2, self.y + 1, self.x + 1)\n\n self.channel_info = {}\n self.active_channel = ''\n\n win.bkgd(' ', curses.color_pair(4))\n self.__panel__ = panel.new_panel(win)\n\n self.__changed = True\n\n self.textbox = textpad.Textbox(self.__panel__.window(), insert_mode=True)\n\n\n def __size__(self):\n # look for springs (-1 means fix to screen dim)\n\n # use -1 , fill width,\n # -2, half of the width ??\n y, x = get_real_termial_size()\n self.y = y - 5\n w = x - self.x - 1 if self.w < 0 else self.w\n h = y - self.y - 1 if self.h < 0 else self.h\n\n if (w, h) != self.__last_size:\n self.__changed = True\n self.__last_size = (w, h)\n\n return h, w\n\n def set_focus(self, state):\n\n if state != self.__has_focus:\n self.__changed = True\n self.__has_focus = state\n curses.curs_set(state)\n\n def command(self, key):\n '''process commands'''\n\n if key in (curses.KEY_ENTER, 10):\n msg = self.textbox.gather()\n self.app.send_message(msg)\n self.__panel__.window().clear()\n elif key in (27,):\n self.app.focus_channels()\n else:\n self.textbox.do_command(key)\n\n self.draw(True)\n\n\n def draw(self, force=False):\n\n new_size = self.__size__()\n\n self.height = new_size[0]\n self.width = new_size[1]\n\n win = self.__panel__.window()\n\n if self.__changed or force:\n\n self.__changed = False\n\n attr = curses.color_pair(2) if self.__has_focus\\\n else curses.color_pair(0)\n\n self.border.erase()\n self.border.attrset(attr)\n self.border.border()\n self.border.refresh()\n\n win.bkgd(' ', curses.color_pair(0))\n win.attrset(attr)\n win.refresh()\n","sub_path":"twistedcurses/message_input.py","file_name":"message_input.py","file_ext":"py","file_size_in_byte":3358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"553118866","text":"# -*- coding: utf-8 -*-\nfrom .utils import versioned_reverse as reverse\nimport pytest\nfrom .utils import get, assert_fields_exist\nfrom events.models import (\n Event, PublicationStatus, Language\n)\n\n\n# === util methods ===\n\ndef get_list(api_client, version='v1', data=None, query_string=None):\n url = reverse('event-list', version=version)\n if query_string:\n url = '%s?%s' % (url, query_string)\n return get(api_client, url, data=data)\n\n\ndef get_detail(api_client, detail_pk, version='v1', data=None):\n detail_url = reverse('event-detail', version=version, kwargs={'pk': detail_pk})\n return get(api_client, detail_url, data=data)\n\n\ndef assert_event_fields_exist(data, version='v1'):\n # TODO: incorporate version parameter into version aware\n # parts of test code\n fields = (\n '@context',\n '@id',\n '@type',\n 'audience',\n 'created_time',\n 'custom_data',\n 'data_source',\n 'date_published',\n 'description',\n 'end_time',\n 'event_status',\n 'external_links',\n 'id',\n 'images',\n 'in_language',\n 'info_url',\n 'keywords',\n 'last_modified_time',\n 'location',\n 'location_extra_info',\n 'name',\n 'offers',\n 'provider',\n 'provider_contact_info',\n 'publisher',\n 'short_description',\n 'audience_min_age',\n 'audience_max_age',\n 'start_time',\n 'sub_events',\n 'super_event',\n 'super_event_type',\n 'videos',\n 'replaced_by',\n #'pin',\n #'provider_email',\n 'accessible',\n 'multi_day'\n )\n if version == 'v0.1':\n fields += (\n 'origin_id',\n 'headline',\n 'secondary_headline',\n )\n assert_fields_exist(data, fields)\n\n\n# === tests ===\n\n@pytest.mark.django_db\ndef test_get_event_list_html_renders(api_client, event):\n url = reverse('event-list', version='v1')\n response = api_client.get(url, data=None, HTTP_ACCEPT='text/html')\n assert response.status_code == 200, str(response.content)\n\n\n@pytest.mark.django_db\ndef test_get_event_list_check_fields_exist(api_client, event):\n \"\"\"\n Tests that event list endpoint returns the correct fields.\n \"\"\"\n response = get_list(api_client)\n assert_event_fields_exist(response.data['data'][0])\n\n\n@pytest.mark.django_db\ndef test_get_event_detail_check_fields_exist(api_client, event):\n \"\"\"\n Tests that event detail endpoint returns the correct fields.\n \"\"\"\n response = get_detail(api_client, event.pk)\n assert_event_fields_exist(response.data)\n\n\n@pytest.mark.django_db\ndef test_get_unknown_event_detail_check_404(api_client):\n response = api_client.get(reverse('event-detail', kwargs={'pk': 'möö'}))\n assert response.status_code == 404\n\n\n@pytest.mark.django_db\ndef test_get_event_list_verify_text_filter(api_client, event, event2):\n response = get_list(api_client, data={'text': 'event'})\n assert event.id not in [entry['id'] for entry in response.data['data']]\n assert event2.id in [entry['id'] for entry in response.data['data']]\n\n\n@pytest.mark.django_db\ndef test_get_event_list_verify_data_source_filter(api_client, data_source, event, event2):\n response = get_list(api_client, data={'data_source': data_source.id})\n assert event.id in [entry['id'] for entry in response.data['data']]\n assert event2.id not in [entry['id'] for entry in response.data['data']]\n\n\n@pytest.mark.django_db\ndef test_get_event_list_verify_data_source_negative_filter(api_client, data_source, event, event2):\n response = get_list(api_client, data={'data_source!': data_source.id})\n assert event.id not in [entry['id'] for entry in response.data['data']]\n assert event2.id in [entry['id'] for entry in response.data['data']]\n\n\n@pytest.mark.django_db\ndef test_get_event_list_verify_location_filter(api_client, place, event, event2):\n response = get_list(api_client, data={'location': place.id})\n assert event.id in [entry['id'] for entry in response.data['data']]\n assert event2.id not in [entry['id'] for entry in response.data['data']]\n\n\n@pytest.mark.django_db\ndef test_get_event_list_verify_keyword_filter(api_client, keyword, event):\n event.keywords.add(keyword)\n response = get_list(api_client, data={'keyword': keyword.id})\n assert event.id in [entry['id'] for entry in response.data['data']]\n response = get_list(api_client, data={'keyword': 'unknown_keyword'})\n assert event.id not in [entry['id'] for entry in response.data['data']]\n\n\n@pytest.mark.django_db\ndef test_get_event_list_verify_division_filter(api_client, event, event2, event3, administrative_division,\n administrative_division2):\n event.location.divisions.set([administrative_division])\n event2.location.divisions.set([administrative_division2])\n\n # filter using one value\n response = get_list(api_client, data={'division': administrative_division.ocd_id})\n data = response.data['data']\n assert len(data) == 1\n assert event.id in [entry['id'] for entry in data]\n\n # filter using two values\n filter_value = '%s,%s' % (administrative_division.ocd_id, administrative_division2.ocd_id)\n response = get_list(api_client, data={'division': filter_value})\n data = response.data['data']\n assert len(data) == 2\n ids = [entry['id'] for entry in data]\n assert event.id in ids\n assert event2.id in ids\n\n\n@pytest.mark.django_db\ndef test_get_event_list_super_event_filters(api_client, event, event2):\n event.super_event_type = Event.SuperEventType.RECURRING\n event.save()\n event2.super_event = event\n event2.save()\n\n # fetch non-subevents\n response = get_list(api_client, query_string='super_event=none')\n assert len(response.data['data']) == 1\n assert response.data['data'][0]['id'] == event.id\n\n # fetch subevents\n response = get_list(api_client, query_string='super_event='+event.id)\n assert len(response.data['data']) == 1\n assert response.data['data'][0]['id'] == event2.id\n\n\n@pytest.mark.django_db\ndef test_get_event_list_recurring_filters(api_client, event, event2):\n event.super_event_type = Event.SuperEventType.RECURRING\n event.save()\n event2.super_event = event\n event2.save()\n\n # fetch superevents\n response = get_list(api_client, query_string='recurring=super')\n assert len(response.data['data']) == 1\n assert response.data['data'][0]['id'] == event.id\n\n # fetch subevents\n response = get_list(api_client, query_string='recurring=sub')\n assert len(response.data['data']) == 1\n assert response.data['data'][0]['id'] == event2.id\n\n\n@pytest.mark.django_db\ndef test_super_event_type_filter(api_client, event, event2):\n event.super_event_type = Event.SuperEventType.RECURRING\n event.save()\n event2.super_event = event\n event2.save()\n\n # \"none\" and \"null\" should return only the non super event\n for value in ('none', 'null'):\n response = get_list(api_client, query_string='super_event_type=%s' % value)\n ids = {e['id'] for e in response.data['data']}\n assert ids == {event2.id}\n\n # \"recurring\" should return only the recurring super event\n response = get_list(api_client, query_string='super_event_type=recurring')\n ids = {e['id'] for e in response.data['data']}\n assert ids == {event.id}\n\n # \"recurring,none\" should return both\n response = get_list(api_client, query_string='super_event_type=recurring,none')\n ids = {e['id'] for e in response.data['data']}\n assert ids == {event.id, event2.id}\n\n response = get_list(api_client, query_string='super_event_type=fwfiuwhfiuwhiw')\n assert len(response.data['data']) == 0\n\n\n@pytest.mark.django_db\ndef test_get_event_disallow_simultaneous_include_super_and_sub(api_client, event, event2):\n event.super_event_type = Event.SuperEventType.RECURRING\n event.save()\n event2.super_event = event\n event2.save()\n\n # fetch event with super event\n detail_url = reverse('event-detail', version='v1', kwargs={'pk': event2.pk})\n\n # If not specifically handled, the following combination of\n # include parameters causes an infinite recursion, because the\n # super events of sub events of super events ... are expanded ad\n # infinitum. This test is here to check that execution finishes.\n detail_url += '?include=super_event,sub_events'\n response = get(api_client, detail_url)\n assert_event_fields_exist(response.data)\n assert(type(response.data['super_event'] == 'dict'))\n\n\n@pytest.mark.django_db\ndef test_language_filter(api_client, event, event2, event3):\n event.name_sv = 'namn'\n event.save()\n event2.in_language.add(Language.objects.get_or_create(id='en')[0])\n event2.in_language.add(Language.objects.get_or_create(id='sv')[0])\n event2.save()\n event3.name_ru = 'название'\n event3.in_language.add(Language.objects.get_or_create(id='et')[0])\n event3.save()\n\n # Finnish should be the default language\n response = get_list(api_client, query_string='language=fi')\n ids = {e['id'] for e in response.data['data']}\n assert ids == {event.id, event2.id, event3.id}\n\n # Swedish should have two events (matches in_language and name_sv)\n response = get_list(api_client, query_string='language=sv')\n ids = {e['id'] for e in response.data['data']}\n assert ids == {event.id, event2.id}\n\n # English should have one event (matches in_language)\n response = get_list(api_client, query_string='language=en')\n ids = {e['id'] for e in response.data['data']}\n assert ids == {event2.id}\n\n # Russian should have one event (matches name_ru)\n response = get_list(api_client, query_string='language=ru')\n ids = {e['id'] for e in response.data['data']}\n assert ids == {event3.id}\n\n # Chinese should have no events\n response = get_list(api_client, query_string='language=zh_hans')\n ids = {e['id'] for e in response.data['data']}\n assert ids == set()\n\n # Estonian should have one event (matches in_language), even without translations available\n response = get_list(api_client, query_string='language=et')\n ids = {e['id'] for e in response.data['data']}\n assert ids == {event3.id}\n\n\n@pytest.mark.django_db\ndef test_event_list_filters(api_client, event, event2):\n filters = (\n ([event.publisher.id, event2.publisher.id], 'publisher'),\n ([event.data_source.id, event2.data_source.id], 'data_source'),\n )\n\n for filter_values, filter_name in filters:\n q = ','.join(filter_values)\n response = get_list(api_client, query_string='%s=%s' % (filter_name, q))\n data = response.data['data']\n assert(len(data) == 2)\n ids = [e['id'] for e in data]\n assert event.id in ids\n assert event2.id in ids\n\n\n@pytest.mark.django_db\ndef test_publication_status_filter(api_client, event, event2, user, organization, data_source):\n event.publication_status = PublicationStatus.PUBLIC\n event.save()\n\n event2.publication_status = PublicationStatus.DRAFT\n event2.save()\n\n api_client.force_authenticate(user=user)\n\n response = get_list(api_client, query_string='show_all=true&publication_status=public')\n ids = {e['id'] for e in response.data['data']}\n assert event.id in ids\n assert event2.id not in ids\n\n # cannot see drafts from other organizations\n response = get_list(api_client, query_string='show_all=true&publication_status=draft')\n ids = {e['id'] for e in response.data['data']}\n assert event2.id not in ids\n assert event.id not in ids\n\n event2.publisher = organization\n event2.data_source = data_source\n event2.save()\n\n response = get_list(api_client, query_string='show_all=true&publication_status=draft')\n ids = {e['id'] for e in response.data['data']}\n assert event2.id in ids\n assert event.id not in ids\n\n\n@pytest.mark.django_db\ndef test_admin_user_filter(api_client, event, event2, user):\n api_client.force_authenticate(user=user)\n\n response = get_list(api_client, query_string='admin_user=true')\n ids = {e['id'] for e in response.data['data']}\n assert event.id in ids\n assert event2.id not in ids\n\n\n@pytest.mark.django_db\ndef test_redirect_if_replaced(api_client, event, event2, user):\n api_client.force_authenticate(user=user)\n\n event.replaced_by = event2\n event.save()\n\n url = reverse('event-detail', version='v1', kwargs={'pk': event.pk})\n response = api_client.get(url, format='json')\n assert response.status_code == 301\n\n response2 = api_client.get(response.url, format='json')\n assert response2.status_code == 200\n assert response2.data['id'] == event2.pk\n\n\n@pytest.mark.django_db\ndef test_redirect_to_end_of_replace_chain(api_client, event, event2, event3, user):\n api_client.force_authenticate(user=user)\n\n event.replaced_by = event2\n event.save()\n event2.replaced_by = event3\n event2.save()\n\n url = reverse('event-detail', version='v1', kwargs={'pk': event.pk})\n response = api_client.get(url, format='json')\n assert response.status_code == 301\n\n response2 = api_client.get(response.url, format='json')\n assert response2.status_code == 200\n assert response2.data['id'] == event3.pk\n","sub_path":"events/tests/test_event_get.py","file_name":"test_event_get.py","file_ext":"py","file_size_in_byte":13263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"460465851","text":"import csv\n\ncommunity = {}\nprot_comm = {}\nprot_comm_list = {}\nknown_cancer_prot=[]\n\nipfile1= raw_input(\"enter community file: \")\nwith open(ipfile1,\"rb\") as f2:\n reader2 = csv.reader(f2, delimiter=\" \")\n for i, line in enumerate(reader2):\n community[i]=line\n\nipfile2= raw_input(\"enter the protein list file: \")\nwith open(ipfile2, \"rb\") as f1:\n reader1 = csv.reader(f1,delimiter=\"\\t\")\n for line in reader1:\n known_cancer_prot.extend(line)\n\nfor k, val in community.iteritems():\n for i in val:\n if i in known_cancer_prot:\n prot_comm_list.setdefault(i, []).append(k)\n\nipfile3 = raw_input(\"enter the file to save: \")\nwith open (ipfile3,\"w\") as q:\n wri= csv.writer(q,delimiter=\"\\t\")\n for k,v in prot_comm_list.items():\n wri.writerow((k,v))\n","sub_path":"node_in_community_checker.py","file_name":"node_in_community_checker.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23230607","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Sep 18 16:47:56 2018\n\n@author: svd\n\nA bunch of routines for loading data from baphy/matlab\n\n\"\"\"\n\nfrom functools import lru_cache\nfrom pathlib import Path\nimport logging\nimport re\nimport os\nimport os.path\nimport pickle\nimport scipy.io\nimport scipy.io as spio\nimport scipy.ndimage.filters\nimport scipy.signal\nfrom scipy.interpolate import interp1d\nimport numpy as np\nimport collections\nimport json\nimport sys\nimport tarfile\nimport io\nimport datetime\nimport glob\nfrom math import isclose\nimport copy\nfrom itertools import groupby, repeat, chain, product\n\nfrom nems_lbhb import OpenEphys as oe\nfrom nems_lbhb import SettingXML as oes\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport nems.signal\nimport nems.recording\nimport nems.db as db\nfrom nems.recording import Recording\nfrom nems.recording import load_recording\nimport nems_lbhb.behavior as behavior\n\nlog = logging.getLogger(__name__)\n\n# paths to baphy data -- standard locations on elephant\nstim_cache_dir = '/auto/data/tmp/tstim/' # location of cached stimuli\nspk_subdir = 'sorted/' # location of spk.mat files relative to parmfiles\n\n# =================================================================\n\ndef baphy_align_time_openephys(events, timestamps, baphy_legacy_format=False):\n '''\n Parameters\n ----------\n events : DataFrame\n Events stored in BAPHY parmfile\n timestamps : array\n Array of timestamps (in seconds) as read in from openephys\n baphy_legacy_format : bool\n If True, assume that all data before the onset of the first trial are\n discarded (i.e., as is the case when aligning times using the spike\n times file. This results in the first trial having a start timestamp of\n 0.\n '''\n n_baphy = events['Trial'].max()\n n_oe = len(timestamps)\n if n_baphy != n_oe:\n mesg = f'Number of trials in BAPHY ({n_baphy}) and ' \\\n 'OpenEphys ({n_oe}) do not match'\n raise ValueError(mesg)\n\n if baphy_legacy_format:\n timestamps = timestamps - timestamps[0]\n\n events = events.copy()\n for i, timestamp in enumerate(timestamps) :\n m = events['Trial'] == i+1\n events.loc[m, ['start', 'end']] += timestamp\n return events\n\n\n###############################################################################\n# Openephys utility functions\n###############################################################################\ndef load_trial_starts_openephys(openephys_folder):\n '''\n Load trial start times (seconds) from OpenEphys DIO\n\n Parameters\n ----------\n openephys_folder : str or Path\n Path to OpenEphys folder\n '''\n event_file = Path(openephys_folder) / 'all_channels.events'\n data = oe.load(str(event_file))\n header = data.pop('header')\n df = pd.DataFrame(data)\n ts = df.query('(channel == 0) & (eventType == 3) & (eventId == 1)')\n return ts['timestamps'].values / float(header['sampleRate'])\n\n\ndef load_continuous_openephys(fh):\n '''\n Read continous OpenEphys dataset\n\n Parameters\n ----------\n fh : {str, file-like object, buffer}\n If a file-like object or buffer, will read directly from it. If a\n string, will open the file first (and close upon exiting).\n\n Unlike the version provided by OpenEphys, this one can handle reading from\n buffered streams (e.g., such as that provided by a tarfile) or existing\n files.\n\n Example\n -------\n import tarfile\n\n parmfile = '/auto/data/daq/Nameko/NMK004/NMK004e06_p_NON.m'\n manager = io.BAPHYExperiment(parmfile)\n filename = manager.openephys_tarfile_relpath / '126_CH1.continuous'\n tar_fh = tarfile.open(manager.openephys_tarfile, 'r:gz')\n fh = tar_fh.extractfile(str(filename))\n ch_data = load_continous_openephys(fh)\n '''\n if not isinstance(fh, io.IOBase):\n fh = open(fh, 'rb')\n do_close = True\n else:\n do_close = False\n\n header = oe.readHeader(fh)\n scale = float(header['bitVolts'])\n ts_dtype = np.dtype('u2')\n data_dtype = np.dtype('>i2')\n\n timestamps = []\n record_number = []\n data = []\n\n SAMPLES_PER_RECORD = 1024\n\n while True:\n try:\n b = fh.read(ts_dtype.itemsize)\n ts = np.frombuffer(b, ts_dtype, 1)[0]\n b = fh.read(n_dtype.itemsize)\n n = np.frombuffer(b, n_dtype, 1)[0]\n if n != SAMPLES_PER_RECORD:\n raise IOError('Found corrupt record')\n b = fh.read(record_number_dtype.itemsize)\n rn = np.frombuffer(b, record_number_dtype, 1)[0]\n b = fh.read(data_dtype.itemsize * n)\n d = np.frombuffer(b, data_dtype, n) * scale\n _ = fh.read(10)\n\n timestamps.append(ts)\n record_number.append(rn)\n data.append(d)\n except ValueError:\n # We have reached end of file?\n break\n\n timestamps = np.array(timestamps)\n record_number = np.array(record_number)\n data = np.concatenate(data)\n\n if do_close:\n fh.close()\n\n return {\n 'header': header,\n 'timestamps': timestamps,\n 'data': data,\n 'record_number': record_number,\n }\n\n\n###############################################################################\n# Unsorted functions\n###############################################################################\ndef loadmat(filename):\n '''\n this function should be called instead of direct spio.loadmat\n as it cures the problem of not properly recovering python dictionaries\n from mat files. It calls the function check keys to cure all entries\n which are still mat-objects\n '''\n data = spio.loadmat(filename, struct_as_record=False, squeeze_me=True)\n return _check_keys(data)\n\ndef _check_keys(dict):\n '''\n checks if entries in dictionary are mat-objects. If yes\n todict is called to change them to nested dictionaries\n '''\n for key in dict:\n if isinstance(dict[key], spio.matlab.mio5_params.mat_struct):\n dict[key] = _todict(dict[key])\n return dict\n\ndef _todict(matobj):\n '''\n A recursive function which constructs from matobjects nested dictionaries\n '''\n dict = {}\n for strg in matobj._fieldnames:\n elem = matobj.__dict__[strg]\n if isinstance(elem, spio.matlab.mio5_params.mat_struct):\n dict[strg] = _todict(elem)\n else:\n dict[strg] = elem\n return dict\n\n\ndef baphy_mat2py(s):\n\n s3 = re.sub(r';$', r'', s.rstrip())\n s3 = re.sub(r'%', r'#', s3)\n s3 = re.sub(r'\\\\', r'/', s3)\n s3 = re.sub(r\"\\.([a-zA-Z0-9]+)'\", r\"XX\\g<1>'\", s3)\n s3 = re.sub(r\"\\.([a-zA-Z0-9]+)\\+\", r\"XX\\g<1>+\", s3)\n s3 = re.sub(r\"\\.([a-zA-Z0-9]+) ,\", r\"XX\\g<1> ,\", s3)\n s3 = re.sub(r'globalparams\\(1\\)', r'globalparams', s3)\n s3 = re.sub(r'exptparams\\(1\\)', r'exptparams', s3)\n\n s4 = re.sub(r'\\(([0-9]*)\\)', r'[\\g<1>]', s3)\n\n s5 = re.sub(r'\\.wav', r\"\", s4) # MLE eliminates .wav file sufix to not confuse with field ToDo: elimiate .wav from param files ?\n s5 = re.sub(r'\\.([A-Za-z][A-Za-z0-9_]+)', r\"['\\g<1>']\", s5)\n\n s6 = re.sub(r'([0-9]+) ', r\"\\g<0>,\", s5)\n x = s6.split('=')\n if len(x) > 1:\n if ';' in x[1]:\n x[1] = re.sub(r'\\[', r\"np.array([[\", x[1])\n x[1] = re.sub(r'\\]', r\"]])\", x[1])\n x[1] = re.sub(r';','],[', x[1])\n x[1] = re.sub('true','True', x[1])\n x[1] = re.sub('false','False', x[1])\n x[1] = re.sub(r'NaN ', r\"np.nan,\", x[1])\n x[1] = re.sub(r'Inf ', r\"np.inf,\", x[1])\n x[1] = re.sub(r'NaN,', r\"np.nan,\", x[1])\n x[1] = re.sub(r'Inf,', r\"np.inf,\", x[1])\n x[1] = re.sub(r'NaN\\]', r\"np.nan]\", x[1])\n x[1] = re.sub(r'Inf\\]', r\"np.inf]\", x[1])\n s6 = \"=\".join(x)\n\n s7 = re.sub(r\"XX([a-zA-Z0-9]+)'\", r\".\\g<1>'\", s6)\n s7 = re.sub(r\"XX([a-zA-Z0-9]+)\\+\", r\".\\g<1>+\", s7)\n s7 = re.sub(r\"XX([a-zA-Z0-9]+) ,\", r\".\\g<1> ,\", s7)\n s7 = re.sub(r',,', r',', s7)\n s7 = re.sub(r',Hz', r'Hz', s7)\n s7 = re.sub(r'NaN', r'np.nan', s7)\n s7 = re.sub(r'zeros\\(([0-9,]+)\\)', r'np.zeros([\\g<1>])', s7)\n s7 = re.sub(r'{(.*)}', r'[\\g<1>]', s7)\n\n s8 = re.sub(r\" , REF-[0-9]+\", r\" , Reference\", s7)\n s8 = re.sub(r\" , TARG-[0-9]+\", r\" , Reference\", s8)\n\n return s8\n\n\ndef baphy_parm_read(filepath, evpread=True):\n log.info(\"Loading {0}\".format(filepath))\n\n f = io.open(filepath, \"r\")\n s = f.readlines(-1)\n\n globalparams = {}\n exptparams = {}\n exptevents = {}\n\n for ts in s:\n sout = baphy_mat2py(ts)\n # print(sout)\n try:\n exec(sout)\n except KeyError:\n ts1 = sout.split('= [')\n ts1 = ts1[0].split(',[')\n\n s1 = ts1[0].split('[')\n sout1 = \"[\".join(s1[:-1]) + ' = {}'\n try:\n exec(sout1)\n except:\n s2 = sout1.split('[')\n sout2 = \"[\".join(s2[:-1]) + ' = {}'\n try:\n exec(sout2)\n except:\n s3 = sout2.split('[')\n sout3 = \"[\".join(s3[:-1]) + ' = {}'\n try:\n exec(sout3)\n except:\n s4 = sout3.split('[')\n sout4 = \"[\".join(s4[:-1]) + ' = {}'\n exec(sout4)\n exec(sout3)\n exec(sout2)\n exec(sout1)\n exec(sout)\n except NameError:\n log.info(\"NameError on: {0}\".format(sout))\n except:\n log.info(\"Other error on: {0} to {1}\".format(ts,sout))\n\n # special conversions\n\n # convert exptevents to a DataFrame:\n t = [exptevents[k] for k in exptevents]\n d = pd.DataFrame(t)\n if 'ClockStartTime' in d.columns:\n exptevents = d.drop(['Rove', 'ClockStartTime'], axis=1)\n elif 'Rove' in d.columns:\n exptevents = d.drop(['Rove'], axis=1)\n else:\n exptevents = d\n # rename columns to NEMS standard epoch names\n #import pdb\n #pdb.set_trace()\n exptevents.columns = ['name', 'start', 'end', 'Trial']\n for i in range(len(exptevents)):\n if exptevents.loc[i, 'end'] == []:\n exptevents.loc[i, 'end'] = exptevents.loc[i, 'start']\n\n if evpread:\n try:\n # get lick events from evp file \n evpfile = Path(filepath).with_suffix('.evp')\n lick_events = get_lick_events(evpfile, name='LICK')\n log.info(\"evp file for licks: %s\", evpfile)\n \n # add evp lick events, delete baphy lick events\n exptevents = exptevents[~(exptevents.name=='LICK')]\n exptevents = exptevents.append(lick_events, ignore_index=True)\n except:\n log.info(\"Failed loading evp file. Still zipped?\")\n\n if 'ReferenceClass' not in exptparams['TrialObject'][1].keys():\n exptparams['TrialObject'][1]['ReferenceClass'] = \\\n exptparams['TrialObject'][1]['ReferenceHandle'][1]['descriptor']\n # CPP special case, deletes added commas ToDo this might be unecesary, the task is done in MLE code.\n if exptparams['TrialObject'][1]['ReferenceClass'] == 'ContextProbe':\n tags = exptparams['TrialObject'][1]['ReferenceHandle'][1]['Names'] # gets the list of tags\n tag_map = {oldtag: re.sub(r' , ', r' ', oldtag)\n for oldtag in tags} # eliminates commas with regexp and maps old tag to new commales tag\n # places the commaless tags back in place\n exptparams['TrialObject'][1]['ReferenceHandle'][1]['Names'] = list(tag_map.values())\n # extends the tag map adding pre stim and post prefix, and Reference sufix\n epoch_map = dict()\n for sufix, tag in product(['PreStimSilence', 'Stim', 'PostStimSilence'], tags):\n key = '{} , {} , Reference'.format(sufix, tag)\n val = '{} , {} , Reference'.format(sufix, tag_map[tag])\n epoch_map[key] = val\n # replaces exptevents names using the map, i.e. get rid of commas\n exptevents.replace(epoch_map, inplace=True)\n\n return globalparams, exptparams, exptevents\n\n\ndef baphy_convert_user_definable_fields(x):\n '''\n Converts all occurances of the `'UserDefinableFields'` list to a\n dictionary. This is recursive, so it will scan the full dataset returned by\n `baphy_parm_read`.\n\n Example\n ------\n >>> data = {\n 'descriptor': 'NoiseSample',\n 'UserDefinableFields': ['PreStimSilence', 'edit', 0,\n 'PostStimSilence', 'edit', 0,\n 'Duration', 'edit', 0.3,]\n }\n\n >>> baphy_convert_user_definable_fields(data)\n >>> print(data)\n {'descriptor': 'NoiseSample',\n 'UserDefinableFields': {'PreStimSilence': 0, 'PostStimSilence': 0, 'Duration': 0.3}\n '''\n if isinstance(x, dict) and 'UserDefinableFields' in x:\n userdef = x.pop('UserDefinableFields')\n keys = userdef[::3]\n values = userdef[2::3]\n x['UserDefinableFields'] = dict(zip(keys, values))\n if isinstance(x, dict):\n for v in x.values():\n baphy_convert_user_definable_fields(v)\n elif isinstance(x, (tuple, list)):\n for v in x:\n baphy_convert_user_definable_fields(v)\n\n\ndef fill_default_options(options):\n \"\"\"\n fill in default options. use options after adding defaults to specify\n metadata hash\n \"\"\"\n\n options = options.copy()\n\n # set default options if missing\n options['rasterfs'] = int(options.get('rasterfs', 100))\n options['stimfmt'] = options.get('stimfmt', 'ozgf')\n options['chancount'] = int(options.get('chancount', 18))\n options['pertrial'] = int(options.get('pertrial', False))\n options['includeprestim'] = options.get('includeprestim', 1)\n options['pupil'] = int(options.get('pupil', False))\n options['rem'] = int(options.get('rem', False))\n options['pupil_eyespeed'] = int(options.get('pupil_eyespeed', False))\n if options['pupil'] or options['rem']:\n options = set_default_pupil_options(options)\n\n #options['pupil_deblink'] = int(options.get('pupil_deblink', 1))\n #options['pupil_deblink_dur'] = options.get('pupil_deblink_dur', 1)\n #options['pupil_median'] = options.get('pupil_median', 0)\n #options[\"pupil_offset\"] = options.get('pupil_offset', 0.75)\n options['resp'] = int(options.get('resp', True))\n options['stim'] = int(options.get('stim', True))\n options['runclass'] = options.get('runclass', None)\n options['rawid'] = options.get('rawid', None)\n options['facemap'] = options.get('facemap', False)\n\n if options['stimfmt'] in ['envelope', 'parm']:\n log.info(\"Setting chancount=0 for stimfmt=%s\", options['stimfmt'])\n options['chancount'] = 0\n\n return options\n\n\ndef baphy_load_specgram(stimfilepath):\n\n matdata = scipy.io.loadmat(stimfilepath, chars_as_strings=True)\n\n stim = matdata['stim']\n\n stimparam = matdata['stimparam'][0][0]\n\n try:\n # case 1: loadstimfrombaphy format\n # remove redundant tags from tag list and stimulus array\n d = matdata['stimparam'][0][0][0][0]\n d = [x[0] for x in d]\n tags, tagids = np.unique(d, return_index=True)\n\n stim = stim[:, :, tagids]\n except:\n # loadstimbytrial format. don't want to filter by unique tags.\n # field names within stimparam don't seem to be preserved\n # in this load format??\n d = matdata['stimparam'][0][0][2][0]\n tags = [x[0] for x in d]\n\n return stim, tags, stimparam\n\n\ndef baphy_stim_cachefile(exptparams, parmfilepath=None, **options):\n \"\"\"\n generate cache filename generated by loadstimfrombaphy\n\n code adapted from loadstimfrombaphy.m\n \"\"\"\n\n if 'truncatetargets' not in options:\n options['truncatetargets'] = 1\n if 'pertrial' not in options:\n options['pertrial'] = False\n\n if options['pertrial']:\n # loadstimbytrial cache filename format\n pp, bb = os.path.split(parmfilepath)\n bb = bb.split(\".\")[0]\n dstr = \"loadstimbytrial_{0}_ff{1}_fs{2}_cc{3}_trunc{4}.mat\".format(\n bb, options['stimfmt'], options['rasterfs'],\n options['chancount'], options['truncatetargets']\n )\n return stim_cache_dir + dstr\n\n # otherwise use standard load stim from baphy format\n if 'use_target' in options:\n if options['use_target']:\n RefObject = exptparams['TrialObject'][1]['TargetHandle'][1]\n else:\n RefObject = exptparams['TrialObject'][1]['ReferenceHandle'][1]\n elif options['runclass'] is None:\n RefObject = exptparams['TrialObject'][1]['ReferenceHandle'][1]\n elif 'runclass' in exptparams.keys():\n runclass = exptparams['runclass'].split(\"_\")\n if (len(runclass) > 1) and (runclass[1] == options[\"runclass\"]):\n RefObject = exptparams['TrialObject'][1]['TargetHandle'][1]\n else:\n RefObject = exptparams['TrialObject'][1]['ReferenceHandle'][1]\n else:\n RefObject = exptparams['TrialObject'][1]['ReferenceHandle'][1]\n\n dstr = RefObject['descriptor']\n if dstr == 'Torc':\n if 'RunClass' in exptparams['TrialObject'][1].keys():\n dstr += '-'+exptparams['TrialObject'][1]['RunClass']\n else:\n dstr += '-TOR'\n\n # include all parameter values, even defaults, in filename\n fields = RefObject['UserDefinableFields']\n if options['stimfmt']=='envelope':\n x_these_fields=['F0s','ComponentsNumber'];\n else:\n x_these_fields=[];\n\n for cnt1 in range(0, len(fields), 3):\n if RefObject[fields[cnt1]] == 0:\n RefObject[fields[cnt1]] = int(0)\n # print(fields[cnt1])\n # print(RefObject[fields[cnt1]])\n # print(dstr)\n if fields[cnt1] in x_these_fields:\n if type(RefObject[fields[cnt1]]) is int:\n l=['X']\n else:\n l = ['X' for i in range(len(RefObject[fields[cnt1]]))]\n dstr = \"{0}-{1}\".format(dstr, \"__\".join(l))\n else:\n dstr = \"{0}-{1}\".format(dstr, RefObject[fields[cnt1]])\n\n dstr = re.sub(r\":\", r\"\", dstr)\n\n if 'OveralldB' in exptparams['TrialObject'][1]:\n OveralldB = exptparams['TrialObject'][1]['OveralldB']\n dstr += \"-{0}dB\".format(OveralldB)\n else:\n OveralldB = 0\n\n dstr += \"-{0}-fs{1}-ch{2}\".format(\n options['stimfmt'], options['rasterfs'], options['chancount']\n )\n\n if options['includeprestim']:\n dstr += '-incps1'\n\n dstr = re.sub(r\"[ ,]\", r\"_\", dstr)\n dstr = re.sub(r\"[\\[\\]]\", r\"\", dstr)\n\n return stim_cache_dir + dstr + '.mat'\n\ndef parm_tbp(exptparams, **options):\n \"\"\"\n generate parameterized spectrograms for TBP ref/tar stimuli in stim_dict format\n\n :param exptparams:\n :param options:\n :return:\n \"\"\"\n ref = exptparams['TrialObject'][1]['ReferenceHandle'][1]\n tar = exptparams['TrialObject'][1]['TargetHandle'][1]\n\n ref_names = ref['Names']\n tar_names = tar['Names']\n if tar['descriptor'] == 'ToneInNoise':\n tar_tone_names = tar['Tone'][1]['Names']\n tar_noise_bands = np.array(tar['ToneBands'])-1\n tar_fixed_band = tar['ToneFixedBand']\n if len(tar_fixed_band) == 0:\n tar_tone_bands = tar_noise_bands\n else:\n tar_tone_bands = [int(tar_fixed_band)-1] * len(tar_noise_bands)\n\n #_, tar_tone_channels = np.unique(tar_tone_bands, return_index=True)\n # assume there's only one target tone frequency!\n tar_tone_channels = np.full_like(tar_tone_bands, 0)\n\n tar_snrs = tar['SNRs']\n #import pdb; pdb.set_trace()\n elif tar['descriptor'] == 'Tone':\n #import pdb;\n #pdb.set_trace()\n tar_tone_names = tar['Names']\n tar_noise_bands = np.arange(len(tar_tone_names))\n tar_tone_bands = np.arange(len(tar_tone_names))\n tar_tone_channels = tar_tone_bands.copy()\n tar_snrs = np.full(len(tar_tone_names), np.inf)\n\n else:\n raise ValueError(f\"Unsupported TargetClass {tar['descriptor']}\")\n\n stim_dict = {}\n total_bands = len(ref_names) + len(set(tar_tone_bands))\n fs = options['rasterfs']\n prebins = int(fs * ref['PreStimSilence'])\n durbins = int(fs * ref['Duration'])\n postbins = int(fs * ref['PostStimSilence'])\n total_bins = prebins + durbins + postbins\n for i, r in enumerate(ref_names):\n s = np.zeros((total_bands, total_bins))\n s[i, prebins] = 1\n stim_dict[r] = s\n for i, t in enumerate(tar_names):\n s = np.zeros((total_bands, total_bins))\n if np.isfinite(tar_snrs[i]):\n s[tar_noise_bands[i], prebins] = 1\n s[len(ref_names)+tar_tone_channels[i], prebins] = 10 ** (tar_snrs[i] / 20)\n elif tar_snrs[i] > 0:\n s[len(ref_names)+tar_tone_channels[i], prebins] = 1\n else:\n s[tar_noise_bands[i], prebins] = 1\n stim_dict[t] = s\n tags = list(stim_dict.keys())\n stimparam = {'chans': ref_names + list(set(tar_tone_names))}\n\n return stim_dict, tags, stimparam\n\ndef labeled_line_stim(exptparams, **options):\n \"\"\"\n generate parameterized \"spectrogram\" of stimulus where onset of each unique stim/tar/ref/cat event\n is coded in each row\n\n :param exptparams:\n :param options:\n :return:\n \"\"\"\n ref = exptparams['TrialObject'][1]['ReferenceHandle'][1]\n tar = exptparams['TrialObject'][1]['TargetHandle'][1]\n\n ref_names = ref['Names']\n tar_names = tar['Names']\n all_names = ref_names+tar_names\n\n stim_dict = {}\n total_bands = len(ref_names) + len(tar_names)\n fs = options['rasterfs']\n prebins = int(fs * ref['PreStimSilence'])\n durbins = int(fs * ref['Duration'])\n postbins = int(fs * ref['PostStimSilence'])\n total_bins = prebins + durbins + postbins\n for i, r in enumerate(all_names):\n if i==0:\n prebins = int(fs * ref['PreStimSilence'])\n durbins = int(fs * ref['Duration'])\n postbins = int(fs * ref['PostStimSilence'])\n total_bins = prebins + durbins + postbins\n elif i==len(ref_names):\n # shift to using tar lengths\n prebins = int(fs * tar['PreStimSilence'])\n durbins = int(fs * tar['Duration'])\n postbins = int(fs * tar['PostStimSilence'])\n total_bins = prebins + durbins + postbins\n\n s = np.zeros((total_bands, total_bins))\n s[i, prebins] = 1\n stim_dict[r] = s\n tags = list(stim_dict.keys())\n stimparam = {'chans': all_names}\n\n return stim_dict, tags, stimparam\n\n\ndef baphy_load_stim(exptparams, parmfilepath, epochs=None, **options):\n\n if (options['stimfmt']=='parm') & exptparams['TrialObject'][1]['ReferenceClass'].startswith('Torc'):\n import nems_lbhb.strf.torc_subfunctions as tsf\n TorcObject = exptparams['TrialObject'][1]['ReferenceHandle'][1]\n stim, tags, stimparam = tsf.generate_torc_spectrograms(\n TorcObject, rasterfs=options['rasterfs'], single_cycle=False)\n # adjust so that all power is >0\n for k in stim.keys():\n stim[k]=stim[k]+5\n\n # NB stim is a dict rather than a 3-d array\n\n elif (options['stimfmt']=='parm') & \\\n (exptparams['TrialObject'][1]['ReferenceClass']=='NoiseBurst'):\n\n # NB stim is a dict rather than a 3-d array\n stim, tags, stimparam = parm_tbp(exptparams, **options)\n\n elif (options['stimfmt']=='ll'):\n\n # NB stim is a dict rather than a 3-d array\n stim, tags, stimparam = labeled_line_stim(exptparams, **options)\n\n elif exptparams['runclass']=='VOC_VOC':\n stimfilepath1 = baphy_stim_cachefile(exptparams, parmfilepath, use_target=False, **options)\n stimfilepath2 = baphy_stim_cachefile(exptparams, parmfilepath, use_target=True, **options)\n log.info(\"Cached stim: {0}, {1}\".format(stimfilepath1, stimfilepath2))\n # load stimulus spectrogram\n stim1, tags1, stimparam1 = baphy_load_specgram(stimfilepath1)\n stim2, tags2, stimparam2 = baphy_load_specgram(stimfilepath2)\n stim = np.concatenate((stim1,stim2), axis=2)\n if exptparams['TrialObject'][1]['ReferenceHandle'][1]['SNR'] >= 100:\n t2 = [t+'_0dB' for t in tags2]\n tags = np.concatenate((tags1,t2))\n eventmatch='Reference1'\n else:\n t1 = [t+'_0dB' for t in tags1]\n tags = np.concatenate((t1,tags2))\n eventmatch = 'Reference2'\n #import pdb\n #pdb.set_trace()\n for i in range(len(exptevents)):\n if eventmatch in exptevents.loc[i,'name']:\n exptevents.loc[i,'name'] = exptevents.loc[i,'name'].replace('.wav','.wav_0dB')\n exptevents.loc[i,'name'] = exptevents.loc[i,'name'].replace('Reference1','Reference')\n exptevents.loc[i,'name'] = exptevents.loc[i,'name'].replace('Reference2','Reference')\n\n stimparam = stimparam1\n else:\n stimfilepath = baphy_stim_cachefile(exptparams, parmfilepath, **options)\n print(\"Cached stim: {0}\".format(stimfilepath))\n # load stimulus spectrogram\n stim, tags, stimparam = baphy_load_specgram(stimfilepath)\n\n if options[\"stimfmt\"]=='envelope' and \\\n exptparams['TrialObject'][1]['ReferenceClass']=='SSA':\n # SSA special case\n stimo=stim.copy()\n maxval=np.max(np.reshape(stimo,[2,-1]),axis=1)\n print('special case for SSA stim!')\n ref=exptparams['TrialObject'][1]['ReferenceHandle'][1]\n stimlen=ref['PipDuration']+ref['PipInterval']\n stimbins=int(stimlen*options['rasterfs'])\n\n stim=np.zeros([2,stimbins,6])\n prebins=int(ref['PipInterval']/2*options['rasterfs'])\n durbins=int(ref['PipDuration']*options['rasterfs'])\n stim[0,prebins:(prebins+durbins),0:3]=maxval[0]\n stim[1,prebins:(prebins+durbins),3:]=maxval[1]\n tags=[\"{}+ONSET\".format(ref['Frequencies'][0]),\n \"{}+{:.2f}\".format(ref['Frequencies'][0],ref['F1Rates'][0]),\n \"{}+{:.2f}\".format(ref['Frequencies'][0],ref['F1Rates'][1]),\n \"{}+ONSET\".format(ref['Frequencies'][1]),\n \"{}+{:.2f}\".format(ref['Frequencies'][1],ref['F1Rates'][0]),\n \"{}+{:.2f}\".format(ref['Frequencies'][1],ref['F1Rates'][1])]\n\n snr_suff=\"\"\n if 'SNR' in exptparams['TrialObject'][1]['ReferenceHandle'][1].keys():\n SNR = exptparams['TrialObject'][1]['ReferenceHandle'][1]['SNR']\n if SNR<100:\n log.info('Noisy stimulus (SNR<100), appending tag to epoch names')\n snr_suff=\"_{}dB\".format(SNR)\n \n if exptparams['runclass']=='CPN':\n # clean up NTI sequence tags\n tags=[ \"\".join(t.split(\" \")[:2]) if t.startswith(\"sequence\") else t for t in tags]\n\n if (epochs is not None):\n # additional processing steps to convert stim into a dictionary with keys that match epoch names\n # specific to BAPHYExperiment loader.\n \n if (type(stim) is not dict):\n stim_dict = {}\n for eventidx in range(0, len(tags)):\n # save stimulus for this event as separate dictionary entry\n stim_dict[\"STIM_\" + tags[eventidx] + snr_suff] = stim[:, :, eventidx]\n stim = stim_dict\n\n if (type(stim) is dict):\n keys = list(stim.keys())\n new_stim={}\n new_keys=[]\n for k in keys:\n matches = list(set(epochs[epochs.name.str.endswith(k)].name.values))\n for nk in matches:\n new_stim[nk] = stim[k]\n stim = new_stim\n\n #stim_dict = {}\n #for eventidx in range(0, len(tags)):\n # # save stimulus for this event as separate dictionary entry\n # if type(stim) is dict:\n # stim_dict[\"STIM_\" + tags[eventidx] + snr_suff] = stim[tags[eventidx]]\n # else:\n # stim_dict[\"STIM_\" + tags[eventidx] + snr_suff] = stim[:, :, eventidx]\n\n return stim, tags, stimparam\n\n\ndef baphy_load_spike_data_raw(spkfilepath, channel=None, unit=None):\n\n matdata = scipy.io.loadmat(spkfilepath, chars_as_strings=True)\n\n sortinfo = matdata['sortinfo']\n if sortinfo.shape[0] > 1:\n sortinfo = sortinfo.T\n sortinfo = sortinfo[0]\n\n # figure out sampling rate, used to convert spike times into seconds\n spikefs = matdata['rate'][0][0]\n\n return sortinfo, spikefs\n\n\ndef baphy_align_time_BAD(exptevents, sortinfo, spikefs, finalfs=0):\n\n # number of channels in recording (not all necessarily contain spikes)\n chancount = len(sortinfo)\n\n # figure out how long each trial is by the time of the last spike count.\n # this method is a hack!\n # but since recordings are longer than the \"official\"\n # trial end time reported by baphy, this method preserves extra spikes\n TrialCount = np.max(exptevents['Trial'])\n TrialLen_sec = np.array(\n exptevents.loc[exptevents['name'] == \"TRIALSTOP\"]['start']\n )\n TrialLen_spikefs = np.concatenate(\n (np.zeros([1, 1]), TrialLen_sec[:, np.newaxis]*spikefs), axis=0\n )\n\n for c in range(0, chancount):\n if len(sortinfo[c]) and sortinfo[c][0].size:\n s = sortinfo[c][0][0]['unitSpikes']\n s = np.reshape(s, (-1, 1))\n unitcount = s.shape[0]\n for u in range(0, unitcount):\n st = s[u, 0]\n\n # print('chan {0} unit {1}: {2} spikes'.format(c,u,st.shape[1]))\n for trialidx in range(1, TrialCount+1):\n ff = (st[0, :] == trialidx)\n if np.sum(ff):\n utrial_spikefs = np.max(st[1, ff])\n TrialLen_spikefs[trialidx, 0] = np.max(\n [utrial_spikefs, TrialLen_spikefs[trialidx, 0]]\n )\n\n # using the trial lengths, figure out adjustments to trial event times.\n if finalfs:\n log.debug('rounding Trial offset spike times to even number of rasterfs bins')\n # print(TrialLen_spikefs)\n TrialLen_spikefs = np.ceil(TrialLen_spikefs / spikefs*finalfs) / finalfs*spikefs\n Offset_spikefs = np.cumsum(TrialLen_spikefs)\n Offset_sec = Offset_spikefs / spikefs # how much to offset each trial\n\n # adjust times in exptevents to approximate time since experiment started\n # rather than time since trial started (native format)\n for Trialidx in range(1, TrialCount+1):\n ff = (exptevents['Trial'] == Trialidx)\n exptevents.loc[ff, ['start', 'end']] = (\n exptevents.loc[ff, ['start', 'end']] + Offset_sec[Trialidx-1]\n )\n\n log.info(\"{0} trials totaling {1:.2f} sec\".format(TrialCount, Offset_sec[-1]))\n\n # convert spike times from samples since trial started to\n # (approximate) seconds since experiment started (matched to exptevents)\n totalunits = 0\n spiketimes = [] # list of spike event times for each unit in recording\n unit_names = [] # string suffix for each unit (CC-U)\n chan_names = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\n for c in range(0, chancount):\n if len(sortinfo[c]) and sortinfo[c][0].size:\n s = sortinfo[c][0][0]['unitSpikes']\n comment = sortinfo[c][0][0][0][0][2][0]\n log.info('Comment: %s', comment)\n\n s = np.reshape(s, (-1, 1))\n unitcount = s.shape[0]\n for u in range(0, unitcount):\n st = s[u, 0]\n uniquetrials = np.unique(st[0, :])\n\n unit_spike_events = np.array([])\n for trialidx in uniquetrials:\n ff = (st[0, :] == trialidx)\n this_spike_events = (st[1, ff]\n + Offset_spikefs[np.int(trialidx-1)])\n if (comment != []):\n if (comment == 'PC-cluster sorted by mespca.m'):\n # remove last spike, which is stray\n this_spike_events = this_spike_events[:-1]\n unit_spike_events = np.concatenate(\n (unit_spike_events, this_spike_events), axis=0\n )\n\n totalunits += 1\n if chancount <= 8:\n unit_names.append(\"{0}{1}\".format(chan_names[c], u+1))\n else:\n unit_names.append(\"{0:02d}-{1}\".format(c+1, u+1))\n spiketimes.append(unit_spike_events / spikefs)\n\n return exptevents, spiketimes, unit_names\n\n\ndef baphy_align_time(exptevents, sortinfo, spikefs, finalfs=0):\n\n # number of channels in recording (not all necessarily contain spikes)\n chancount = len(sortinfo)\n while chancount>1 and sortinfo[chancount-1].size == 0:\n chancount -= 1\n # figure out how long each trial is by the time of the last spike count.\n # this method is a hack!\n # but since recordings are longer than the \"official\"\n # trial end time reported by baphy, this method preserves extra spikes\n TrialCount = int(np.max(exptevents['Trial']))\n\n hit_trials = exptevents[exptevents.name==\"BEHAVIOR,PUMPON,Pump\"].Trial\n max_event_times = exptevents.groupby('Trial')['end'].max().values\n #import pdb; pdb.set_trace()\n TrialLen_sec = np.array(\n exptevents.loc[exptevents['name'] == \"TRIALSTOP\"]['start']\n )\n if len(hit_trials):\n TrialLen_sec[hit_trials-1]=max_event_times[hit_trials-1]\n\n TrialLen_spikefs = np.concatenate(\n (np.zeros([1, 1]), TrialLen_sec[:, np.newaxis]*spikefs), axis=0\n )\n\n for ch in range(0, chancount):\n if len(sortinfo[ch]) and sortinfo[ch][0].size:\n s = sortinfo[ch][0][0]['unitSpikes']\n s = np.reshape(s, (-1, 1))\n unitcount = s.shape[0]\n for u in range(0, unitcount):\n st = s[u, 0]\n\n # print('chan {0} unit {1}: {2} spikes'.format(c,u,st.shape[1]))\n for trialidx in range(1, TrialCount+1):\n ff = (st[0, :] == trialidx)\n if np.sum(ff):\n utrial_spikefs = np.max(st[1, ff])\n TrialLen_spikefs[trialidx, 0] = np.max(\n [utrial_spikefs, TrialLen_spikefs[trialidx, 0]]\n )\n\n # using the trial lengths, figure out adjustments to trial event times.\n if finalfs:\n log.info('rounding Trial offset spike times'\n ' to even number of rasterfs bins')\n # print(TrialLen_spikefs)\n TrialLen_spikefs = (\n np.ceil(TrialLen_spikefs / spikefs*finalfs) / finalfs*spikefs\n )\n #TrialLen_spikefs = (\n # np.ceil(TrialLen_spikefs / spikefs*finalfs + 1) / finalfs*spikefs\n # )\n # print(TrialLen_spikefs)\n\n Offset_spikefs = np.cumsum(TrialLen_spikefs)\n Offset_sec = Offset_spikefs / spikefs # how much to offset each trial\n # adjust times in exptevents to approximate time since experiment started\n # rather than time since trial started (native format)\n for Trialidx in range(1, TrialCount+1):\n # print(\"Adjusting trial {0} by {1} sec\"\n # .format(Trialidx,Offset_sec[Trialidx-1]))\n ff = (exptevents['Trial'] == Trialidx)\n exptevents.loc[ff, ['start', 'end']] = (\n exptevents.loc[ff, ['start', 'end']] + Offset_sec[Trialidx-1]\n )\n\n # ff = ((exptevents['Trial'] == Trialidx)\n # & (exptevents['end'] > Offset_sec[Trialidx]))\n # badevents, = np.where(ff)\n # print(\"{0} events past end of trial?\".format(len(badevents)))\n # exptevents.drop(badevents)\n\n log.info(\"{0} trials totaling {1:.2f} sec\".format(TrialCount, Offset_sec[-1]))\n\n # convert spike times from samples since trial started to\n # (approximate) seconds since experiment started (matched to exptevents)\n totalunits = 0\n spiketimes = [] # list of spike event times for each unit in recording\n unit_names = [] # string suffix for each unit (CC-U)\n chan_names = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']\n for c in range(0, chancount):\n if len(sortinfo[c]) and sortinfo[c][0].size:\n s = sortinfo[c][0][0]['unitSpikes']\n comment = sortinfo[c][0][0][0][0][2][0]\n log.debug('Comment: %s', comment)\n\n s = np.reshape(s, (-1, 1))\n unitcount = s.shape[0]\n for u in range(0, unitcount):\n st = s[u, 0]\n if st.size:\n log.debug(\"{} {}\".format(u,str(st.shape)))\n uniquetrials = np.unique(st[0, :])\n # print('chan {0} unit {1}: {2} spikes {3} trials'\n # .format(c, u, st.shape[1], len(uniquetrials)))\n\n unit_spike_events = np.array([])\n for trialidx in range(1,TrialCount+1):\n ff = (st[0, :] == trialidx)\n try:\n this_spike_events = (st[1, ff]\n + Offset_spikefs[np.int(trialidx-1)])\n except:\n import pdb\n pdb.set_trace()\n if len(comment) > 0:\n if comment == 'PC-cluster sorted by mespca.m':\n # remove last spike, which is stray\n this_spike_events = this_spike_events[:-1]\n unit_spike_events = np.concatenate(\n (unit_spike_events, this_spike_events), axis=0\n )\n # print(\" trial {0} first spike bin {1}\"\n # .format(trialidx,st[1,ff]))\n\n totalunits += 1\n if chancount <= 8:\n unit_names.append(\"{0}{1}\".format(chan_names[c], u+1))\n else:\n unit_names.append(\"{0:02d}-{1}\".format(c+1, u+1))\n spiketimes.append(unit_spike_events / spikefs)\n\n return exptevents, spiketimes, unit_names\n\n\ndef baphy_align_time_baphyparm(exptevents, finalfs=0, **options):\n TrialCount = int(np.max(exptevents['Trial']))\n\n TrialStarts = exptevents.loc[exptevents['name'].str.startswith(\"TRIALSTART\")]['name']\n\n def _get_start_time(x):\n d = x.split(\",\")\n if len(d)>2:\n time = datetime.datetime.strptime(d[1].strip()+\" \"+d[2], '%Y-%m-%d %H:%M:%S.%f')\n else:\n time = datetime.datetime(2000,1,1)\n return time\n\n def _get_time_diff_seconds(x):\n d = x.split(\",\")\n if len(d)>2:\n time = datetime.datetime.strptime(d[1].strip()+\" \"+d[2], '%Y-%m-%d %H:%M:%S.%f')\n else:\n time = datetime.datetime(2000,1,1)\n return time\n\n TrialStartDateTime = TrialStarts.apply(_get_start_time)\n\n # time first trial started, all epoch times will be measured in seconds from this time\n timezero = TrialStartDateTime.iloc[0]\n\n def _get_time_diff_seconds(x, timezero=0):\n\n return (x-timezero).total_seconds()\n\n TrialStartSeconds = TrialStartDateTime.apply(_get_time_diff_seconds, timezero=timezero)\n\n Offset_sec = TrialStartSeconds.values\n\n if np.sum(Offset_sec)==0:\n log.info('No timestamps in baphy events, inferring trial times from durations')\n Offset_sec = exptevents.loc[exptevents.name=='TRIALSTOP','start'].values\n Offset_sec = np.roll(Offset_sec,1)\n Offset_sec[0]=0\n Offset_sec = np.cumsum(Offset_sec)\n\n exptevents['start']=exptevents['start'].astype(float)\n exptevents['end']=exptevents['end'].astype(float)\n\n # adjust times in exptevents to approximate time since experiment started\n # rather than time since trial started (native format)\n for Trialidx in range(1, TrialCount+1):\n # print(\"Adjusting trial {0} by {1} sec\"\n # .format(Trialidx,Offset_sec[Trialidx-1]))\n ff = (exptevents['Trial'] == Trialidx)\n exptevents.loc[ff, ['start', 'end']] = (\n exptevents.loc[ff, ['start', 'end']] + Offset_sec[Trialidx-1]\n )\n\n if finalfs:\n exptevents['start'] = np.round(exptevents['start']*finalfs)/finalfs\n exptevents['end'] = np.round(exptevents['end']*finalfs)/finalfs\n\n log.info(\"{0} trials totaling {1:.2f} sec\".format(TrialCount, Offset_sec[-1]))\n\n return exptevents\n\n\ndef set_default_pupil_options(options):\n\n options = options.copy()\n options[\"rasterfs\"] = options.get('rasterfs', 100)\n options['pupil'] = options.get('pupil', 1)\n options['pupil_analysis_method'] = options.get('pupil_analysis_method', 'cnn') # or 'matlab'\n options[\"pupil_offset\"] = options.get('pupil_offset', 0.75)\n options[\"pupil_deblink\"] = options.get('pupil_deblink', True)\n options[\"pupil_deblink_dur\"] = options.get('pupil_deblink_dur', 1)\n options[\"pupil_median\"] = options.get('pupil_median', 0)\n options[\"pupil_smooth\"] = options.get('pupil_smooth', 0)\n options[\"pupil_highpass\"] = options.get('pupil_highpass', 0)\n options[\"pupil_lowpass\"] = options.get('pupil_lowpass', 0)\n options[\"pupil_bandpass\"] = options.get('pupil_bandpass', 0)\n options[\"pupil_derivative\"] = options.get('pupil_derivative', '')\n options[\"pupil_mm\"] = options.get('pupil_mm', False)\n options[\"pupil_eyespeed\"] = options.get('pupil_eyespeed', False)\n options[\"rem\"] = options.get('rem', True)\n options[\"rem_units\"] = options.get('rem_units', 'mm')\n options[\"rem_min_pupil\"] = options.get('rem_min_pupil', 0.2)\n options[\"rem_max_pupil\"] = options.get('rem_max_pupil', 1)\n options[\"rem_max_pupil_sd\"] = options.get('rem_max_pupil_sd', 0.05)\n options[\"rem_min_saccade_speed\"] = options.get('rem_min_saccade_speed', 0.5)\n options[\"rem_min_saccades_per_minute\"] = options.get('rem_min_saccades_per_minute', 0.01)\n options[\"rem_max_gap_s\"] = options.get('rem_max_gap_s', 15)\n options[\"rem_min_episode_s\"] = options.get('rem_min_episode_s', 30)\n options[\"verbose\"] = options.get('verbose', False)\n\n return options\n\n\ndef load_pupil_trace(pupilfilepath, exptevents=None, **options):\n \"\"\"\n returns big_rs which is pupil trace resampled to options['rasterfs']\n and strialidx, which is the index into big_rs for the start of each\n trial. need to make sure the big_rs vector aligns with the other signals\n \"\"\"\n\n options = set_default_pupil_options(options)\n\n pupilfilepath = get_pupil_file(pupilfilepath, **options)\n\n rasterfs = options[\"rasterfs\"]\n pupil_offset = options[\"pupil_offset\"]\n pupil_deblink = options[\"pupil_deblink\"]\n pupil_deblink_dur = options[\"pupil_deblink_dur\"]\n pupil_median = options[\"pupil_median\"]\n pupil_mm = options[\"pupil_mm\"]\n pupil_eyespeed = options[\"pupil_eyespeed\"]\n verbose = options[\"verbose\"]\n options['pupil'] = options.get('pupil', True)\n #rasterfs = options.get('rasterfs', 100)\n #pupil_offset = options.get('pupil_offset', 0.75)\n #pupil_deblink = options.get('pupil_deblink', True)\n #pupil_deblink_dur = options.get('pupil_deblink_dur', (1/3))\n #pupil_median = options.get('pupil_median', 0)\n #pupil_mm = options.get('pupil_mm', False)\n #pupil_eyespeed = options.get('pupil_eyespeed', False)\n #verbose = options.get('verbose', False)\n\n if options[\"pupil_smooth\"]:\n raise ValueError('pupil_smooth not implemented. try pupil_median?')\n if options[\"pupil_highpass\"]:\n raise ValueError('pupil_highpass not implemented.')\n if options[\"pupil_lowpass\"]:\n raise ValueError('pupil_lowpass not implemented.')\n if options[\"pupil_bandpass\"]:\n raise ValueError('pupil_bandpass not implemented.')\n if options[\"pupil_derivative\"]:\n raise ValueError('pupil_derivative not implemented.')\n\n # we want to use exptevents TRIALSTART events as the ground truth for the time when each trial starts.\n # these times are set based on openephys data, since baphy doesn't log exact trial start times\n if exptevents is None:\n from nems_lbhb.baphy_experiment import BAPHYExperiment\n\n experiment = BAPHYExperiment.from_pupilfile(pupilfilepath)\n trial_starts = experiment.get_trial_starts()\n exptevents = experiment.get_baphy_events()\n\n #parmfilepath = pupilfilepath.replace(\".pup.mat\",\".m\")\n #globalparams, exptparams, exptevents = baphy_parm_read(parmfilepath)\n #pp, bb = os.path.split(parmfilepath)\n #spkfilepath = pp + '/' + spk_subdir + re.sub(r\"\\.m$\", \".spk.mat\", bb)\n #log.info(\"Spike file: {0}\".format(spkfilepath))\n ## load spike times\n #sortinfo, spikefs = baphy_load_spike_data_raw(spkfilepath)\n ## adjust spike and event times to be in seconds since experiment started\n #exptevents, spiketimes, unit_names = baphy_align_time(\n # exptevents, sortinfo, spikefs, rasterfs\n # )\n\n loading_pcs = 0\n if 'SVD.pickle' in pupilfilepath:\n loading_pcs = options.get('facemap', 0)\n\n log.info(\"SVD.pickle file, assuming single matrix: %s\", pupilfilepath)\n with open(pupilfilepath, 'rb') as fp:\n pupildata = pickle.load(fp)\n\n pupil_diameter = pupildata[:, :loading_pcs]\n\n log.info(\"pupil_diameter.shape: %s\", str(pupildata.shape))\n log.info(\"keeping %d channels: \", loading_pcs)\n\n elif '.pickle' in pupilfilepath:\n with open(pupilfilepath, 'rb') as fp:\n pupildata = pickle.load(fp)\n\n # hard code to use minor axis for now\n options['pupil_variable_name'] = 'minor_axis'\n log.debug(\"Using default pupil_variable_name: %s\", options['pupil_variable_name'])\n log.info(\"Using CNN results for pupiltrace\")\n\n pupil_diameter = pupildata['cnn']['a'] * 2\n # missing frames/frames that couldn't be decoded were saved as nans\n # pad them here\n nan_args = np.argwhere(np.isnan(pupil_diameter))\n\n for arg in nan_args:\n arg = arg[0]\n log.info(\"padding missing pupil frame {0} with adjacent ellipse params\".format(arg))\n try:\n pupil_diameter[arg] = pupil_diameter[arg - 1]\n except:\n pupil_diameter[arg] = pupil_diameter[arg - 1]\n\n pupil_diameter = pupil_diameter[:-1, np.newaxis]\n\n log.info(\"pupil_diameter.shape: \" + str(pupil_diameter.shape))\n\n if pupil_eyespeed:\n try:\n eye_speed = pupildata['cnn']['eyespeed'][:-1, np.newaxis]\n log.info(\"loaded eye_speed\")\n except:\n pupil_eyespeed = False\n log.info(\"eye_speed requested but file does not exist!\")\n\n elif '.pup.mat' in pupilfilepath:\n\n matdata = scipy.io.loadmat(pupilfilepath)\n\n p = matdata['pupil_data']\n params = p['params']\n if 'pupil_variable_name' not in options:\n options['pupil_variable_name'] = params[0][0]['default_var'][0][0][0]\n log.debug(\"Using default pupil_variable_name: %s\", options['pupil_variable_name'])\n if 'pupil_algorithm' not in options:\n options['pupil_algorithm'] = params[0][0]['default'][0][0][0]\n log.debug(\"Using default pupil_algorithm: %s\", options['pupil_algorithm'])\n\n results = p['results'][0][0][-1][options['pupil_algorithm']]\n pupil_diameter = np.array(results[0][options['pupil_variable_name']][0][0])\n if pupil_diameter.shape[0] == 1:\n pupil_diameter = pupil_diameter.T\n log.info(\"pupil_diameter.shape: \" + str(pupil_diameter.shape))\n\n if pupil_eyespeed:\n try:\n eye_speed = np.array(results[0]['eye_speed'][0][0])\n log.debug(\"loaded eye_speed\")\n except:\n pupil_eyespeed = False\n log.info(\"eye_speed requested but file does not exist!\")\n\n fs_approximate = 30 # approx video framerate\n if pupil_deblink & ~loading_pcs:\n dp = np.abs(np.diff(pupil_diameter, axis=0))\n blink = np.zeros(dp.shape)\n blink[dp > np.nanmean(dp) + 6*np.nanstd(dp)] = 1\n # CRH add following line 7-19-2019\n # (blink should be = 1 if pupil_dia goes to 0)\n blink[[isclose(p, 0, abs_tol=0.5) for p in pupil_diameter[:-1]]] = 1\n smooth_width = int(fs_approximate*pupil_deblink_dur)\n box = np.ones([smooth_width]) / smooth_width\n blink = np.convolve(blink[:, 0], box, mode='same')\n blink[blink > 0] = 1\n blink[blink <= 0] = 0\n onidx, = np.where(np.diff(blink) > 0)\n offidx, = np.where(np.diff(blink) < 0)\n\n if onidx[0] > offidx[0]:\n onidx = np.concatenate((np.array([0]), onidx))\n if len(onidx) > len(offidx):\n offidx = np.concatenate((offidx, np.array([len(blink)])))\n deblinked = pupil_diameter.copy()\n if pupil_eyespeed:\n deblinked_eye_speed = eye_speed.copy()\n for i, x1 in enumerate(onidx):\n x2 = offidx[i]\n if x2 < x1:\n log.info([i, x1, x2])\n log.info(\"WHAT'S UP??\")\n else:\n # print([i,x1,x2])\n deblinked[x1:x2, 0] = np.linspace(\n deblinked[x1], deblinked[x2-1], x2-x1\n ).squeeze()\n if pupil_eyespeed:\n deblinked_eye_speed[x1:x2, 0] = np.nan\n\n if verbose:\n plt.figure()\n if pupil_eyespeed:\n plt.subplot(2, 1, 1)\n plt.plot(pupil_diameter, label='Raw')\n plt.plot(deblinked, label='Deblinked')\n plt.xlabel('Frame')\n plt.ylabel('Pupil')\n plt.legend()\n plt.title(\"Artifacts detected: {}\".format(len(onidx)))\n if pupil_eyespeed:\n plt.subplot(2, 1, 2)\n plt.plot(eye_speed, label='Raw')\n plt.plot(deblinked_eye_speed, label='Deblinked')\n plt.xlabel('Frame')\n plt.ylabel('Eye speed')\n plt.legend()\n pupil_diameter = deblinked\n if pupil_eyespeed:\n eye_speed = deblinked_eye_speed\n\n if pupil_eyespeed:\n returned_measurement = eye_speed\n else:\n returned_measurement = pupil_diameter\n\n # resample and remove dropped frames\n\n # find and parse pupil events\n pp = ['PUPIL,' in x['name'] for i, x in exptevents.iterrows()]\n trials = list(exptevents.loc[pp, 'Trial'])\n ntrials = len(trials)\n timestamp = np.zeros([ntrials+1])\n firstframe = np.zeros([ntrials+1])\n for i, x in exptevents.loc[pp].iterrows():\n t = int(x['Trial'] - 1)\n s = x['name'].split(\",[\")\n p = eval(\"[\"+s[1])\n # print(\"{0} p=[{1}\".format(i,s[1]))\n timestamp[t] = p[0]\n firstframe[t] = int(p[1])\n pp = ['PUPILSTOP' in x['name'] for i, x in exptevents.iterrows()]\n lastidx = np.argwhere(pp)[-1]\n\n s = exptevents.iloc[lastidx[0]]['name'].split(\",[\")\n p = eval(\"[\" + s[1])\n timestamp[-1] = p[0]\n firstframe[-1] = int(p[1])\n\n # align pupil with other events, probably by\n # removing extra bins from between trials\n ff = exptevents['name'].str.startswith('TRIALSTART')\n start_events = exptevents.loc[ff, ['start']].reset_index()\n start_events['StartBin'] = (\n np.round(start_events['start'] * rasterfs)\n ).astype(int)\n start_e = list(start_events['StartBin'])\n ff = (exptevents['name'] == 'TRIALSTOP')\n stop_events = exptevents.loc[ff, ['start']].reset_index()\n stop_events['StopBin'] = (\n np.round(stop_events['start'] * rasterfs)\n ).astype(int)\n stop_e = list(stop_events['StopBin'])\n\n # calculate frame count and duration of each trial\n #svd/CRH fix: use start_e to determine trial duration\n duration = np.diff(np.append(start_e, stop_e[-1]) / rasterfs)\n\n # old method: use timestamps in pupil recording, which don't take into account correction for sampling bin size\n # that may be coarser than the video sampling rate\n #duration = np.diff(timestamp) * 24*60*60\n\n frame_count = np.diff(firstframe)\n\n if loading_pcs:\n l = ['pupil']\n elif pupil_eyespeed & options['pupil']:\n l = ['pupil', 'pupil_eyespeed']\n elif pupil_eyespeed:\n l = ['pupil_eyespeed']\n elif options['pupil']:\n l = ['pupil']\n\n big_rs_dict = {}\n\n for signal in l:\n if signal == 'pupil_eyespeed':\n pupil_eyespeed = True\n else:\n pupil_eyespeed = False\n\n # warp/resample each trial to compensate for dropped frames\n strialidx = np.zeros([ntrials + 1])\n #big_rs = np.array([[]])\n all_fs = np.empty([ntrials])\n\n #import pdb;\n #pdb.set_trace()\n\n for ii in range(0, ntrials):\n if loading_pcs:\n d = pupil_diameter[int(firstframe[ii]):int(firstframe[ii]+frame_count[ii]), :]\n\n elif signal == 'pupil_eyespeed':\n d = eye_speed[\n int(firstframe[ii]):int(firstframe[ii]+frame_count[ii]), 0\n ]\n else:\n d = pupil_diameter[\n int(firstframe[ii]):int(firstframe[ii]+frame_count[ii]), 0\n ]\n fs = frame_count[ii] / duration[ii]\n all_fs[ii] = fs\n t = np.arange(0, d.shape[0]) / fs\n if pupil_eyespeed:\n d = d * fs # convert to px/s before resampling\n ti = np.arange(\n (1/rasterfs)/2, duration[ii]+(1/rasterfs)/2, 1/rasterfs\n )\n # print(\"{0} len(d)={1} len(ti)={2} fs={3}\"\n # .format(ii,len(d),len(ti),fs))\n _f = interp1d(t, d, axis=0, fill_value=\"extrapolate\")\n di = _f(ti)\n if ii==0:\n big_rs = di\n else:\n big_rs = np.concatenate((big_rs, di), axis=0)\n if (ii < ntrials-1) and (len(big_rs) > start_e[ii+1]):\n big_rs = big_rs[:start_e[ii+1]]\n elif ii == ntrials-1:\n big_rs = big_rs[:stop_e[ii]]\n\n strialidx[ii+1] = big_rs.shape[0]\n\n if pupil_median:\n kernel_size = int(round(pupil_median*rasterfs/2)*2+1)\n big_rs = scipy.signal.medfilt(big_rs, kernel_size=(kernel_size,1))\n\n # shift pupil (or eye speed) trace by offset, usually 0.75 sec\n offset_frames = int(pupil_offset*rasterfs)\n big_rs = np.roll(big_rs, -offset_frames, axis=0)\n\n # svd pad with final pupil value (was np.nan before)\n big_rs[-offset_frames:] = big_rs[-offset_frames]\n\n # shape to 1 x T to match NEMS signal specs. or transpose if 2nd dim already exists\n if big_rs.ndim==1:\n big_rs = big_rs[np.newaxis, :]\n else:\n big_rs=big_rs.T\n\n if pupil_mm:\n try:\n #convert measurements from pixels to mm\n eye_width_px = matdata['pupil_data']['results'][0][0]['eye_width'][0][0][0]\n eye_width_mm = matdata['pupil_data']['params'][0][0]['eye_width_mm'][0][0][0]\n big_rs = big_rs*(eye_width_mm/eye_width_px)\n except:\n print(\"couldn't convert pupil to mm\")\n\n if verbose:\n #plot framerate for each trial (for checking camera performance)\n plt.figure()\n plt.plot(all_fs.T)\n plt.xlabel('Trial')\n plt.ylabel('Sampling rate (Hz)')\n\n if verbose:\n plt.show()\n\n if len(l)==2:\n big_rs_dict[signal] = big_rs\n\n if len(l)==2:\n return big_rs_dict, strialidx\n else:\n return big_rs, strialidx\n\n\ndef get_rem(pupilfilepath, exptevents=None, **options):\n \"\"\"\n Find rapid eye movements based on pupil and eye-tracking data.\n\n Inputs:\n\n pupilfilepath: Absolute path of the pupil file (to be loaded by\n nems_lbhb.io.load_pupil_trace).\n\n exptevents:\n\n options: Dictionary of analysis parameters\n rasterfs: Sampling rate (default: 100)\n rem_units: If 'mm', convert pupil to millimeters and eye speed to\n mm/s while loading (default: 'mm')\n rem_min_pupil: Minimum pupil size during REM episodes (default: 0.2)\n rem_max_pupil: Maximum pupil size during REM episodes (mm, default: 1)\n rem_max_pupil_sd: Maximum pupil standard deviation during REM episodes\n (default: 0.05)\n rem_min_saccade_speed: Minimum eye movement speed to consider eye\n movement as saccade (default: 0.01)\n rem_min_saccades_per_minute: Minimum saccades per minute during REM\n episodes (default: 0.01)\n rem_max_gap_s: Maximum gap to fill in between REM episodes\n (seconds, default: 15)\n rem_min_episode_s: Minimum duration of REM episodes to keep\n (seconds, default: 30)\n verbose: Plot traces and identified REM episodes (default: True)\n\n Returns:\n\n is_rem: Numpy array of booleans, indicating which time bins occured\n during REM episodes (True = REM)\n\n options: Dictionary of parameters used in analysis\n\n ZPS 2018-09-24: Initial version.\n \"\"\"\n # find appropriate pupil file\n pupilfilepath = get_pupil_file(pupilfilepath)\n\n #Set analysis parameters from defaults, if necessary.\n options = set_default_pupil_options(options)\n\n rasterfs = options[\"rasterfs\"]\n units = options[\"rem_units\"]\n min_pupil = options[\"rem_min_pupil\"]\n max_pupil = options[\"rem_max_pupil\"]\n max_pupil_sd = options[\"rem_max_pupil_sd\"]\n min_saccade_speed = options[\"rem_min_saccade_speed\"]\n min_saccades_per_minute = options[\"rem_min_saccades_per_minute\"]\n max_gap_s = options[\"rem_max_gap_s\"]\n min_episode_s = options[\"rem_min_episode_s\"]\n verbose = options[\"verbose\"]\n\n #Load data.\n load_options = options.copy()\n load_options[\"verbose\"] = False\n if units == 'mm':\n load_options[\"pupil_mm\"] = True\n elif units == \"px\":\n load_options[\"pupil_mm\"] = False\n elif units == 'norm_max':\n raise ValueError(\"TODO: support for norm pupil diam/speed by max\")\n load_options['norm_max'] = True\n\n load_options[\"pupil_eyespeed\"] = True\n pupil_trace, _ = load_pupil_trace(pupilfilepath, exptevents, **load_options)\n pupil_size = pupil_trace[\"pupil\"]\n eye_speed = pupil_trace[\"pupil_eyespeed\"]\n\n pupil_size = pupil_size[0,:]\n eye_speed = eye_speed[0,:]\n\n #Find REM episodes.\n\n #(1) Very small pupil sizes often indicate that the pupil is occluded by the\n #eyelid or underlit. In either case, measurements of eye position are\n #unreliable, so we remove these frames of the trace before analysis.\n pupil_size[np.nan_to_num(pupil_size) < min_pupil] = np.nan\n eye_speed[np.nan_to_num(pupil_size) < min_pupil] = np.nan\n\n #(2) Rapid eye movements are similar to saccades. In our data,\n #these appear as large, fast spikes in the speed at which pupil moves.\n #To mark epochs when eye is moving more quickly than usual, threshold\n #eye speed, then smooth by calculating the rate of saccades per minute.\n saccades = np.nan_to_num(eye_speed) > min_saccade_speed\n minute = np.ones(rasterfs*60)/(rasterfs*60)\n saccades_per_minute = np.convolve(saccades, minute, mode='same')\n\n #(3) To distinguish REM sleep from waking - since it seeems that ferrets\n #can sleep with their eyes open - look for periods when pupil is constricted\n #and doesn't show slow oscillations (which may indicate a different sleep\n #stage or quiet waking).\n # 10-second moving average of pupil size:\n ten_seconds = np.ones(rasterfs*10)/(rasterfs*10)\n smooth_pupil_size = np.convolve(pupil_size, ten_seconds, mode='same');\n # 10-second moving standard deviation of pupil size:\n pupil_sd = pd.Series(smooth_pupil_size)\n pupil_sd = pupil_sd.rolling(rasterfs*10).std()\n pupil_sd = np.array(pupil_sd)\n rem_episodes = (np.nan_to_num(smooth_pupil_size) < max_pupil) & \\\n (np.isfinite(smooth_pupil_size)) & \\\n (np.nan_to_num(pupil_sd) < max_pupil_sd) & \\\n (np.nan_to_num(saccades_per_minute) > min_saccades_per_minute)\n\n #(4) Connect episodes that are separated by a brief gap.\n rem_episodes = run_length_encode(rem_episodes)\n brief_gaps = []\n for i,episode in enumerate(rem_episodes):\n is_gap = not(episode[0])\n gap_time = episode[1]\n if is_gap and gap_time/rasterfs < max_gap_s:\n rem_episodes[i] = (True, gap_time)\n brief_gaps.append((True, gap_time))\n else:\n brief_gaps.append((False, gap_time))\n\n #(5) Remove brief REM episodes.\n rem_episodes = run_length_encode(run_length_decode(rem_episodes))\n brief_episodes = []\n for i,episode in enumerate(rem_episodes):\n is_rem_episode = episode[0]\n episode_time = episode[1]\n if is_rem_episode and episode_time/rasterfs < min_episode_s:\n rem_episodes[i] = (False, episode_time)\n brief_episodes.append((True, episode_time))\n else:\n brief_episodes.append((False, episode_time))\n\n is_rem = run_length_decode(rem_episodes)\n\n #Plot\n if verbose:\n\n samples = pupil_size.size\n minutes = samples/(rasterfs*60)\n time_ax = np.linspace(0, minutes, num=samples)\n\n is_brief_gap = run_length_decode(brief_gaps)\n is_brief_episode = run_length_decode(brief_episodes)\n rem_dur = np.array([t for is_rem,t in rem_episodes if is_rem])/(rasterfs*60)\n\n fig, ax = plt.subplots(4,1)\n if len(rem_dur) == 0:\n title_str = 'no REM episodes'\n elif len(rem_dur) == 1:\n title_str = '1 REM episode, duration: {:0.2f} minutes'.\\\n format(rem_dur[0])\n else:\n title_str = '{:d} REM episodes, mean duration: {:0.2f} minutes'.\\\n format(len(rem_dur), rem_dur.mean())\n title_str = '{:s}\\n{:s}'.format(pupilfilepath, title_str)\n fig.suptitle(title_str)\n\n ax[0].autoscale(axis='x', tight=True)\n ax[0].plot(time_ax, eye_speed, color='0.5')\n ax[0].plot([time_ax[0], time_ax[-1]], \\\n [min_saccade_speed, min_saccade_speed], 'k--')\n ax[0].set_ylabel('Eye speed')\n\n ax[1].autoscale(axis='x', tight=True)\n ax[1].plot(time_ax, saccades_per_minute, color='0', linewidth=2)\n ax[1].plot([time_ax[0], time_ax[-1]], \\\n [min_saccades_per_minute, min_saccades_per_minute], 'k--')\n l0, = ax[1].plot(time_ax[is_rem.nonzero()], \\\n saccades_per_minute[is_rem.nonzero()], 'r.')\n l1, = ax[1].plot(time_ax[is_brief_gap.nonzero()], \\\n saccades_per_minute[is_brief_gap.nonzero()], 'b.')\n l2, = ax[1].plot(time_ax[is_brief_episode.nonzero()], \\\n saccades_per_minute[is_brief_episode.nonzero()], 'y.')\n ax[1].set_ylabel('Saccades per minute')\n\n ax[0].legend((l0,l1,l2), \\\n ('REM', 'Brief gaps (included)', 'Brief episodes (excluded)'), \\\n frameon=False)\n\n ax[2].autoscale(axis='x', tight=True)\n ax[2].plot(time_ax, pupil_size, color='0.5')\n ax[2].plot(time_ax, smooth_pupil_size, color='0', linewidth=2)\n ax[2].plot([time_ax[0], time_ax[-1]], \\\n [max_pupil, max_pupil], 'k--')\n ax[2].plot(time_ax[is_rem.nonzero()], \\\n smooth_pupil_size[is_rem.nonzero()], 'r.')\n ax[2].plot(time_ax[is_brief_gap.nonzero()], \\\n smooth_pupil_size[is_brief_gap.nonzero()], 'b.')\n ax[2].plot(time_ax[is_brief_episode.nonzero()], \\\n smooth_pupil_size[is_brief_episode.nonzero()], 'y.')\n ax[2].set_ylabel('Pupil size')\n\n ax[3].autoscale(axis='x', tight=True)\n ax[3].plot(time_ax, pupil_sd, color='0', linewidth=2)\n ax[3].plot([time_ax[0], time_ax[-1]], \\\n [max_pupil_sd, max_pupil_sd], 'k--')\n ax[3].plot(time_ax[is_rem.nonzero()], \\\n pupil_sd[is_rem.nonzero()], 'r.')\n ax[3].plot(time_ax[is_brief_gap.nonzero()], \\\n pupil_sd[is_brief_gap.nonzero()], 'b.')\n ax[3].plot(time_ax[is_brief_episode.nonzero()], \\\n pupil_sd[is_brief_episode.nonzero()], 'y.')\n ax[3].set_ylabel('Pupil SD')\n ax[3].set_xlabel('Time (min)')\n\n plt.show()\n\n return is_rem, options\n\n\ndef run_length_encode(a):\n \"\"\"\n Takes a 1-dimensional array, returns a list of tuples (elem, n), where\n elem is each symbol in the array, and n is the number of times it appears\n consecutively. For example, if given the array:\n np.array([False, True, True, True, False, False])\n the function will return:\n [(False, 1), (True, 3), (False, 2)]\n\n ZPS 2018-09-24: Helper function for get_rem.\n \"\"\"\n return [(k, len(list(g))) for k,g in groupby(a)]\n\ndef run_length_decode(a):\n \"\"\"\n Reverses the operation performed by run_length_encode.\n\n ZPS 2018-09-24: Helper function for get_rem.\n \"\"\"\n a = [list(repeat(elem,n)) for (elem,n) in a]\n a = list(chain.from_iterable(a))\n return np.array(a)\n\n\ndef cache_rem_options(pupilfilepath, cachepath=None, **options):\n\n pupilfilepath = get_pupil_file(pupilfilepath)\n\n options['verbose'] = False\n if '.pickle' in pupilfilepath:\n jsonfilepath = pupilfilepath.replace('.pickle','.rem.json')\n else:\n jsonfilepath = pupilfilepath.replace('.pup.mat','.rem.json')\n\n if cachepath is not None:\n pp, bb = os.path.split(jsonfilepath)\n jsonfilepath = os.path.join(cachepath, bb)\n\n fh = open(jsonfilepath, 'w')\n json.dump(options, fh)\n fh.close()\n\n\ndef load_rem_options(pupilfilepath, cachepath=None, **options):\n\n pupilfilepath = get_pupil_file(pupilfilepath)\n\n if '.pickle' in pupilfilepath:\n jsonfilepath = pupilfilepath.replace('.pickle','.rem.json')\n else:\n jsonfilepath = pupilfilepath.replace('.pup.mat','.rem.json')\n if cachepath is not None:\n pp, bb = os.path.split(jsonfilepath)\n jsonfilepath = os.path.join(cachepath, bb)\n\n if os.path.exists(jsonfilepath):\n fh = open(jsonfilepath, 'r')\n options = json.load(fh)\n fh.close()\n return options\n else:\n raise ValueError(\"REM options file not found.\")\n\n\ndef get_pupil_file(pupilfilepath, **options):\n \"\"\"\n For backwards compatibility in pupil/rem functions. Default is to load the\n pupil fit from the CNN model fit. However, for some older recordings, this\n may not exist and so you may still want to load the pup.mat file. This\n is a helper function to find which pupil file to load\n 6-28-2019, CRH\n\n options dict added 08.17.2020. In options. specific \"pupil_analysis_method\" to\n specifically say if you matlab / python results for pupil. Default is to use python:\n (options['pupil_analysis_method']='cnn'). If the method you ask for doesn't exist, you'll \n get a log message warning, but it will then try to load the other option.\n \"\"\"\n pupilfilepath=str(pupilfilepath)\n if ('.pickle' in pupilfilepath) & os.path.isfile(pupilfilepath) & (options['pupil_analysis_method']=='cnn'):\n log.info(\"Loading CNN pupil fit from .pickle file\")\n return pupilfilepath\n \n if (options['pupil_analysis_method']=='cnn') & ((not os.path.isfile(pupilfilepath)) | ('pup.mat' in pupilfilepath)):\n\n if not os.path.isfile(pupilfilepath):\n pp, bb = os.path.split(pupilfilepath)\n pupilfilepath = pp + '/sorted/' + bb.split('.')[0] + '.pickle'\n\n if os.path.isfile(pupilfilepath):\n log.info(\"Loading CNN pupil fit from .pickle file\")\n return pupilfilepath\n else:\n raise FileNotFoundError(\"Pupil analysis not found\")\n\n elif os.path.isfile(pupilfilepath):\n pp, bb = os.path.split(pupilfilepath)\n CNN_pupilfilepath = pp + '/sorted/' + bb.split('.')[0] + '.pickle'\n\n if os.path.isfile(CNN_pupilfilepath):\n log.info(\"Loading CNN pupil fit from .pickle file\")\n return CNN_pupilfilepath\n else:\n log.info(\"WARNING: CNN pupil fit doesn't exist, \" \\\n \"Loading pupil fit from .pup.mat file\")\n return pupilfilepath\n\n elif ('pup.mat' in pupilfilepath) & (options['pupil_analysis_method']=='matlab'):\n if os.path.isfile(pupilfilepath):\n return pupilfilepath\n else:\n raise FileNotFoundError(\"Asked for matlab pupil analysis, but results file doesn't exist.\" \\\n \"Check that this video has been analyzed and/or try setting options['pupil_analyis_method']='cnn'\")\n\n else:\n raise FileNotFoundError(\"Pupil analysis not found\")\n\n\ndef baphy_pupil_uri(pupilfilepath, **options):\n \"\"\"\n return uri to pupil signal file\n if cache file doesn't exists, process the pupil data based on the contents\n of the relevant pup.mat file (pupilfilepath) and save to cache file.\n Then return cached filename.\n\n Processing:\n pull out pupil trace determined with specified algorithm\n warp time to match trial times in baphy paramter file\n extract REM trace if velocity signal exists\n\n Cache file location currently hard-coded to:\n /auto/data/nems_db/recordings/pupil/\n\n \"\"\"\n #options['rasterfs']=100\n #options['pupil_mm']=True\n #options['pupil_median']=0.5\n #options['pupil_deblink']=True\n #options['units']='mm'\n #options['verbose']=False\n\n options = set_default_pupil_options(options)\n\n cacheroot = \"/auto/data/nems_db/recordings/pupil/\"\n\n pp, pupbb = os.path.split(pupilfilepath)\n pp_animal, pen = os.path.split(pp)\n pp_daq, animal = os.path.split(pp_animal)\n cachebb = pupbb.replace(\".pup.mat\",\"\")\n cachepath = os.path.join(cacheroot, animal, )\n\n parmfilepath = pupilfilepath.replace(\".pup.mat\",\".m\")\n pp, bb = os.path.split(parmfilepath)\n \n globalparams, exptparams, exptevents = baphy_parm_read(parmfilepath)\n spkfilepath = pp + '/' + spk_subdir + re.sub(r\"\\.m$\", \".spk.mat\", bb)\n log.info(\"Spike file: {0}\".format(spkfilepath))\n # load spike times\n sortinfo, spikefs = baphy_load_spike_data_raw(spkfilepath)\n # adjust spike and event times to be in seconds since experiment started\n\n exptevents, spiketimes, unit_names = baphy_align_time(\n exptevents, sortinfo, spikefs, options[\"rasterfs\"])\n log.info('Creating trial events')\n tag_mask_start = \"TRIALSTART\"\n tag_mask_stop = \"TRIALSTOP\"\n ffstart = exptevents['name'].str.startswith(tag_mask_start)\n ffstop = (exptevents['name'] == tag_mask_stop)\n TrialCount = np.max(exptevents.loc[ffstart, 'Trial'])\n event_times = pd.concat([exptevents.loc[ffstart, ['start']].reset_index(),\n exptevents.loc[ffstop, ['end']].reset_index()],\n axis=1)\n event_times['name'] = \"TRIAL\"\n event_times = event_times.drop(columns=['index'])\n\n pupil_trace, ptrialidx = load_pupil_trace(pupilfilepath=pupilfilepath,\n exptevents=exptevents, **options)\n\n is_rem, options = get_rem(pupilfilepath=pupilfilepath,\n exptevents=exptevents, **options)\n\n pupildata = np.stack([pupil_trace, is_rem], axis=1)\n t_pupil = nems.signal.RasterizedSignal(\n fs=options['rasterfs'], data=pupildata,\n name='pupil', recording=cachebb, chans=['pupil', 'rem'],\n epochs=event_times)\n\n\n return pupil_trace, is_rem, options\n\n\ndef load_raw_pupil(pupilfilepath, fs=None):\n \"\"\"\n Simple function to read continuous pupil trace in w/o baphy trial start alignment etc.\n\n Right now, only works for .pickle pupil files.\n \"\"\"\n\n with open(pupilfilepath, 'rb') as fp:\n pupildata = pickle.load(fp)\n\n\n pupil_diameter = pupildata['cnn']['a'] * 2\n\n # missing frames/frames that couldn't be decoded were saved as nans\n # pad them here\n nan_args = np.argwhere(np.isnan(pupil_diameter))\n\n for arg in nan_args:\n arg = arg[0]\n log.info(\"padding missing pupil frame {0} with adjacent ellipse params\".format(arg))\n try:\n pupil_diameter[arg] = pupil_diameter[arg-1]\n except:\n pupil_diameter[arg] = pupil_diameter[arg-1]\n\n pupil_diameter = pupil_diameter[:-1, np.newaxis]\n\n return pupil_diameter\n\n\ndef load_raw_photometry(photofilepath, fs=None, framen=0):\n \"\"\"\n Simple function to read (and process photometry trace). Will ask for user input to define ROI based\n on the first frame of the video file.\n \"\"\"\n import av\n video_container = av.open(photofilepath)\n video_stream = [s for s in video_container.streams][0]\n\n F_mag1 = []\n F_mag2 = []\n for i, packet in enumerate(video_container.demux(video_stream)):\n if i%1000 == 0:\n print(\"frame: {}\".format(i))\n\n if i < framen:\n frame = packet.decode()[0]\n\n elif i == framen:\n frame = packet.decode()[0]\n frame_ = np.asarray(frame.to_image().convert('LA'))\n plt.imshow(frame_[:, :, 0])\n print(\"Before closing image, locate the center of your ROI!\")\n plt.show()\n print(\"left ROI: \")\n x = int(input(\"x1 center: \"))\n y = int(input(\"y1 center: \"))\n print(\"right ROI: \")\n x2 = int(input(\"x2 center: \"))\n y2 = int(input(\"y2 center: \"))\n\n # define roi:\n roi_width = 4\n x1_range = np.arange(x-roi_width, x+roi_width)\n y1_range = np.arange(y-roi_width, y+roi_width)\n\n x2_range = np.arange(x2-roi_width, x2+roi_width)\n y2_range = np.arange(y2-roi_width, y2+roi_width)\n\n\n roi1 = frame_[:, :, 0][x1_range, :][:, y1_range]\n roi2 = frame_[:, :, 0][x2_range, :][:, y2_range]\n fmag1 = np.mean(roi1)\n fmag2 = np.mean(roi2)\n F_mag1.append(fmag1)\n F_mag2.append(fmag2)\n\n else:\n try:\n frame = packet.decode()[0]\n frame_ = np.asarray(frame.to_image().convert('LA'))\n roi1 = frame_[:, :, 0][x1_range, :][:, y1_range]\n roi2 = frame_[:, :, 0][x2_range, :][:, y2_range]\n fmag1 = np.mean(roi1)\n fmag2 = np.mean(roi2)\n F_mag1.append(fmag1)\n F_mag2.append(fmag2)\n except:\n print('end of file reached')\n\n\n return np.array(F_mag1)[:, np.newaxis], np.array(F_mag2)[:, np.newaxis]\n\n\ndef evpread(filename, options):\n \"\"\"\n VERY crude first pass at reading in evp file using python.\n For now, just reads in aux chans. Created to load lick data\n CRH 06.19.2020\n \"\"\"\n\n auxchans = options.get('auxchans', [])\n\n f = open(filename, 'rb')\n header = np.fromfile(f, count=10, dtype=np.int32)\n\n spikechancount = header[1]\n auxchancount = header[2]\n lfpchancount = header[6]\n trials = header[5]\n aux_fs = header[4]\n\n if len(auxchans) > 0:\n auxchansteps = np.diff(np.concatenate(([0], [a+1 for a in auxchans], [auxchancount+1])))-1\n else:\n auxchansteps = auxchancount\n\n # loop over trials\n trialidx = []\n aux_trialidx = []\n for tt in range(trials):\n # read trial header \n trheader = np.fromfile(f, count=3, dtype=np.int32)\n if trheader.size==0:\n break\n\n if sum(trheader)!=0:\n ta = []\n\n # seek through spikedata\n f.seek(trheader[0]*2*spikechancount, 1)\n \n # read in aux data for this trial\n if (auxchancount > 0) & (len(auxchans) > 0):\n for ii in range(auxchancount):\n if auxchansteps[ii] > 0:\n f.seek(trheader[1]*2*auxchansteps[ii], 1)\n else:\n ta.append(np.fromfile(f, count=trheader[1], dtype=np.int16))\n \n if tt == 0:\n aux_trialidx.append(trheader[1])\n else:\n aux_trialidx.append(trheader[1]+aux_trialidx[tt-1])\n\n else:\n f.seek(trheader[1]*2*auxchancount, 1)\n\n # seek through lfp data\n f.seek(trheader[2]*2*lfpchancount, 1)\n\n # stack data over channels\n if tt == 0:\n ra = np.stack(ta)\n else:\n ra = np.concatenate((ra, np.stack(ta)), axis=-1)\n\n # which trials are extracted\n trialidx.append(tt+1)\n\n else:\n # skip to next trial\n f.seek((trheader[0]*spikechancount)+\n (trheader[1]*auxchancount)+\n (trheader[2]*lfpchancount)*2, 1)\n\n\n # pack and return results\n pack = collections.namedtuple('evpdata', field_names='trialidx aux_fs aux_trialidx aux_data')\n output = pack(trialidx=trialidx,\n aux_fs=aux_fs, aux_trialidx=aux_trialidx, aux_data=ra)\n\n f.close()\n\n return output\n \n\ndef get_lick_events(evpfile, name='LICK'):\n \"\"\"\n Load analog lick data from evp file. Create dataframe of \n lick events in the style of nems exptevents: columns = [name, start, end, Trial]\n \"\"\"\n lickdata = evpread(evpfile, {'auxchans': [0]})\n startidx = lickdata.aux_trialidx\n startidx = np.append(0, startidx[:-1])\n endidx = startidx[1:]\n endidx = np.append(endidx, -1)\n trialidx = lickdata.trialidx\n fs = lickdata.aux_fs\n lick_trace = lickdata.aux_data\n\n s = []\n t = []\n for tidx, eidx, sidx in zip(trialidx, endidx, startidx):\n data = lick_trace[0, sidx:eidx] \n lickedges = np.diff(data - data.mean())\n lickidx = np.argwhere(lickedges > 0).squeeze()\n\n if lickidx.size==0:\n pass\n elif lickidx.size==1:\n s.append(lickidx / fs)\n t.append(tidx)\n elif lickidx.size > 1:\n s.extend(lickidx / fs)\n t.extend([tidx] * len(lickidx))\n\n # build dataframe\n df = pd.DataFrame(columns=['name', 'start', 'end', 'Trial'], index=range(len(s)))\n df['start'] = s\n df['end'] = s\n df['Trial'] = t\n df['name'] = name\n\n return df\n\n\ndef get_mean_spike_waveform(cellid, animal, usespkfile=False):\n \"\"\"\n Return 1-D numpy array containing the mean sorted\n spike waveform\n \"\"\"\n if type(cellid) != str:\n raise ValueError(\"cellid must be string type\")\n\n if usespkfile:\n cparts=cellid.split(\"-\")\n chan=int(cparts[1])\n unit=int(cparts[2])\n sql = f\"SELECT runclassid, path, respfile from sCellFile where cellid = '{cellid}'\"\n d = db.pd_query(sql)\n spkfilepath=os.path.join(d['path'][0], d['respfile'][0])\n matdata = scipy.io.loadmat(spkfilepath, chars_as_strings=True)\n sortinfo = matdata['sortinfo']\n if sortinfo.shape[0] > 1:\n sortinfo = sortinfo.T\n try:\n mwf=sortinfo[0][chan-1][0][0][unit-1]['Template'][0][chan-1,:]\n\n except:\n import pdb\n pdb.set_trace()\n return mwf\n\n # get KS_cluster (if it exists... this is a new feature)\n sql = f\"SELECT kilosort_cluster_id from gSingleRaw where cellid = '{cellid}'\"\n kid = db.pd_query(sql).iloc[0][0]\n \n # find phy results\n site = cellid[:7]\n path = f'/auto/data/daq/{animal}/{site[:-1]}/tmp/KiloSort/'\n res_dirs = os.listdir(path)\n res_dirs = [p for p in res_dirs if site in p]\n results_dir = []\n for r in res_dirs:\n # find results dir with this cellid\n rns = r.split(f'{site}_')[1].split('KiloSort')[0].split('_')[:-1]\n rns = np.sort([int(r) for r in rns])\n sql = f\"SELECT stimfile from sCellFile WHERE cellid = '{cellid}'\"\n _rns = np.sort(db.pd_query(sql)['stimfile'].apply(lambda x: int(x.split(site)[1].split('_')[0])))\n if np.all(_rns == rns):\n results_dir = r\n if results_dir == []:\n raise ValueError(f\"Couldn't find find directory for cellid: {cellid}\")\n\n # get all waveforms for this sorted file\n w = np.load(path + results_dir + '/results/wft_mwf.npy')\n clust_ids = pd.read_csv(path + results_dir + '/results/cluster_group.tsv', '\\t').cluster_id\n kidx = np.argwhere(clust_ids.values == kid)[0][0]\n \n\n # get waveform\n mwf = w[:, kidx]\n\n return mwf\n\n\ndef parse_cellid(options):\n \"\"\"\n figure out if cellid is\n 1) single cellid\n 2) list of cellids\n 3) a siteid\n\n using this, add the field 'siteid' to the options dictionary. If siteid was passed,\n define cellid as a list of all cellids recorded at this site, for this batch.\n\n options: dictionary\n batch - (int) batch number\n cellid - single cellid string, list of cellids, or siteid\n If siteid is passed, return superset of cells. i.e. if some\n cells aren't present in one of the files that is found for this batch,\n don't load that file. To override this behavior, pass rawid list.\n\n returns updated options dictionary and the cellid to extract from the recording\n NOTE: The reason we keep \"cellid to extract\" distinct from the options dictionary\n is so that it doesn't muck with the cached recording hash. e.g. if you want to analyze\n cell1 from a site where you recorded cells1-4, you don't want a different recording\n cached for each cell.\n \"\"\"\n\n options = options.copy()\n\n mfilename = options.get('mfilename', None)\n cellid = options.get('cellid', None)\n batch = options.get('batch', None)\n rawid = options.get('rawid', None)\n cells_to_extract = None\n\n if ((cellid is None) | (batch is None)) & (mfilename is None):\n raise ValueError(\"must provide cellid and batch or mfilename\")\n\n siteid = None\n cell_list = None\n if type(cellid) is list:\n cell_list = cellid\n elif (type(cellid) is str) & ('%' in cellid):\n cell_data = db.pd_query(f\"SELECT cellid FROM Batches WHERE batch=%s and cellid like %s\",\n (batch, cellid))\n cell_list = cell_data['cellid'].to_list()\n elif (type(cellid) is str) & ('-' not in cellid):\n siteid = cellid\n\n if mfilename is not None:\n # simple, db-free case. Just a pass through.\n pass\n if batch is None:\n options['batch'] = 0\n\n elif cell_list is not None:\n # list of cells was passed\n siteid = cellid.split('-')[0]\n cell_list_all, rawid = db.get_stable_batch_cells(batch=batch, cellid=cell_list,\n rawid=rawid)\n options['cellid'] = cell_list_all\n options['rawid'] = rawid\n options['siteid'] = cell_list[0].split('-')[0]\n cells_to_extract = cell_list\n\n elif siteid is not None:\n # siteid was passed, figure out if electrode numbers were specified.\n chan_nums = None\n if '.e' in siteid:\n args = siteid.split('.')\n siteid = args[0]\n chan_lims = args[1].replace('e', '').split(':')\n chan_nums = np.arange(int(chan_lims[0]), int(chan_lims[1])+1)\n\n cell_list, rawid = db.get_stable_batch_cells(batch=batch, cellid=siteid,\n rawid=rawid)\n\n if chan_nums is not None:\n cells_to_extract = [c for c in cell_list if int(c.split('-')[1]) in chan_nums]\n else:\n cells_to_extract = cell_list\n\n options['cellid'] = cell_list\n if len(rawid) != 0:\n options['rawid'] = rawid\n options['siteid'] = siteid\n\n elif cellid is not None:\n # single cellid was passed, want list of all cellids. First, get rawids\n cell_list, rawid = db.get_stable_batch_cells(batch=batch, cellid=cellid,\n rawid=rawid)\n # now, use rawid to get all stable cellids across these files\n siteid = cell_list[0].split('-')[0]\n cell_list, rawid = db.get_stable_batch_cells(batch=batch, cellid=siteid,\n rawid=rawid)\n\n options['cellid'] = cell_list\n options['rawid'] = rawid\n options['siteid'] = siteid\n cells_to_extract = [cellid]\n\n if (len(cells_to_extract) == 0) & (mfilename is None):\n raise ValueError(\"No cellids found! Make sure cellid/batch is specified correctly, \"\n \"or that you've specified an mfile.\")\n\n return list(cells_to_extract), options\n","sub_path":"nems_lbhb/baphy_io.py","file_name":"baphy_io.py","file_ext":"py","file_size_in_byte":85012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"457138183","text":"import sys\nfrom math import *\n\n#####################################\ndef split_num(str, num):\n return [ str[start:start+num] for start in range(0, len(str), num) ]\n# Citation: https://stackoverflow.com/questions/5711452/how-do-i-slice-a-string-every-3-indices\n#####################################\n\ndef transpose_month(inp):\n inp_list = []\n\n for i in range(1, len(inp)):\n inp_list.append(split_num(inp[i].strip(\"\\n\"), 3))\n\n\n rez = zip(*inp_list)\n\n ret = \"\"\n\n k = []\n k.append(inp[0].strip(\"\\n\"))\n\n for i in rez:\n k.append((\"\".join(i)))\n\n return k\n\n######################################################\n\ninp = sys.stdin.readlines()\n\nmonths = []\n\nnum = 1\n\nprint(\" \" + inp[0])\n\nwhile(num+8 <= len(inp)):\n k = floor(num/9)\n months.append([])\n for f in range(3):\n months[k].append([])\n for i in inp[num:num+8]:\n j = i.strip(\"\\n\")\n l = 22\n for f in range(3):\n months[k][f].append(j[f*l : f*l + l])\n\n num += 9\n\n\n\nfor i in range(len(months)):\n for j in range(len(months[i])):\n for k in range(len(months[i][j])):\n months[i][j][k] += \"\\n\"\n months[i][j] = transpose_month(months[i][j])\n\nrez = list(zip(*months))\n\n\nfor i in rez:\n print(\"\".join(\"%s\" % k[0] for k in i))\n for j in range(1, len(i[0])):\n print(\" \".join(\"%s\" % k[j] for k in i))\n","sub_path":"ass1/cal-year-month.py","file_name":"cal-year-month.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"370498131","text":"import os\r\nimport json\r\nimport pytz\r\nimport time\r\nimport urllib2\r\nimport datetime\r\nfrom flask import Flask, render_template, request\r\n\r\napp = Flask(__name__)\r\n\r\nTIMEOUT = 30\r\n\r\n@app.route('/')\r\ndef main():\r\n date = datetime.datetime.now()\r\n #Get weather:\r\n nzWeatherInfo = WeatherInfo('Wellington,nz').getInfoFromAPI()\r\n laWeatherInfo = WeatherInfo('Manhattan_Beach,us').getInfoFromAPI()\r\n return render_template('TOSClock.htm', date=date, nzWeatherInfo=nzWeatherInfo, laWeatherInfo=laWeatherInfo, timeout=TIMEOUT)\r\n\r\nclass WeatherInfo(dict):\r\n def __init__(self, place):\r\n super(WeatherInfo, self).__init__()\r\n weatherApiKey = \"7ef3ef345316959c479dda977fa1f87c\"\r\n weatherApiURL = \"http://api.openweathermap.org/data/2.5/weather?q=\"\r\n self['weatherURL'] = \"%s%s&appid=%s\" % (weatherApiURL, place, weatherApiKey)\r\n self['weatherDataDir'] = \"/tmp\"\r\n self['weatherDataFile'] = \"%s/weatherData_%s.json\" % (self['weatherDataDir'], place)\r\n\r\n def getInfoFromAPI(self):\r\n data = self._getRecentCache()\r\n if not data:\r\n print(\"Opening URL: %s\" % self['weatherURL'])\r\n response = urllib2.urlopen(self['weatherURL'])\r\n data = json.load(response)\r\n if not os.path.exists(self['weatherDataDir']):\r\n os.makedirs(self['weatherDataDir'])\r\n fh = open(self['weatherDataFile'], 'w')\r\n json.dump(data, fh)\r\n fh.close()\r\n self._fillThings(data)\r\n return(self)\r\n\r\n def _getRecentCache(self):\r\n if not os.path.exists(self['weatherDataFile']):\r\n return False\r\n st = os.stat(self['weatherDataFile'])\r\n mtime = st.st_mtime\r\n timeNow = time.time()\r\n age = int((timeNow - mtime) / 60)\r\n if age > 30:\r\n return False\r\n fh = open(self['weatherDataFile'], 'r')\r\n data = json.loads(fh.read())\r\n fh.close()\r\n return data\r\n\r\n def _fillThings(self, info):\r\n self['iconURL'] = \"http://openweathermap.org/img/w/%s.png\" % (info['weather'][0]['icon'])\r\n self['description'] = info['weather'][0]['description'].title()\r\n self['temp_c'] = round(info['main']['temp'] - 273.15)\r\n self['temp_f'] = round(info['main']['temp'] * (9.0/5.0) - 459.67)\r\n self['windspeed_k'] = round(info['wind']['speed'] * 1.94384449, 2)\r\n\r\n@app.route('/clockUpdate')\r\ndef clockUpdate():\r\n valueNz = datetime.datetime.now(pytz.timezone('Pacific/Auckland'))\r\n valueLa = datetime.datetime.now(pytz.timezone('US/Pacific'))\r\n printNzTime = \"%02d:%02d:%02d\" % (valueNz.hour, valueNz.minute, valueNz.second)\r\n printLaTime = \"%02d:%02d:%02d\" % (valueLa.hour, valueLa.minute, valueLa.second)\r\n printNzDate = \"%02d/%02d/%d\" % (valueNz.month, valueNz.day, valueNz.year)\r\n printLaDate = \"%02d/%02d/%d\" % (valueLa.month, valueLa.day, valueLa.year)\r\n return json.dumps(\r\n {'nzTime':printNzTime,\r\n 'laTime':printLaTime,\r\n 'nzDate':printNzDate,\r\n 'laDate':printLaDate\r\n })\r\n\r\n@app.route('/calAllen')\r\ndef calAllen():\r\n attendance = \"In the Office\"\r\n if attendance == \"In the Office\":\r\n attendanceColor = \"#00AA00\"\r\n elif attendance == \"Out of the Office\":\r\n attendanceColor = \"#990000\"\r\n elif attendance == \"Temporarily Unavailable\":\r\n attendanceColor = \"#0033CC\"\r\n else:\r\n attendanceColor = \"#999999\"\r\n return render_template('calAllen.htm', attendance=attendance, attendanceColor=attendanceColor, timeout=TIMEOUT)\r\n\r\n\r\nif __name__ == '__main__':\r\n app.run(debug=True, threaded=True)\r\n\r\n\r\n\r\n\r\nmy_date = datetime.datetime.now()\r\n","sub_path":"TOSClock.py","file_name":"TOSClock.py","file_ext":"py","file_size_in_byte":3673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"485151579","text":"from random import randint\n\nsomatoria = {\n 2: 0,\n 3: 0,\n 4: 0,\n 5: 0,\n 6: 0,\n 7: 0,\n 8: 0,\n 9: 0,\n 10: 0,\n 11: 0,\n 12: 0\n}\n\ndef lançar():\n a = randint(1, 6)\n b = randint(1, 6)\n somatoria[a + b] = somatoria[a + b] + 1\n\ndef main():\n lançamentos = 1000\n for x in range(lançamentos):\n lançar()\n for chave, valor in somatoria.items():\n print(\"%d = %d: %.2f%%\"% (chave, valor, (valor*100/1000)))\nmain()","sub_path":"teste.py","file_name":"teste.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"406779622","text":"#!/usr/bin/env python3\n\"\"\"\nTest our assembly interpreter.\n\"\"\"\n\nimport sys\nimport random\nsys.path.append(\".\") # noqa\n# sys.path.insert(0,'/Users/nikhilvaidyamath/Desktop/\n# devops1/Emu86/assembler/Intel/fp_arithmetic.py')\n\nimport operator as opfunc\nimport functools\n\n\nfrom unittest import TestCase, main\n\nfrom assembler.tokens import MAX_INT, MIN_INT, BITS\nfrom assembler.virtual_machine import intel_machine, STACK_TOP, STACK_BOTTOM\nfrom assembler.assemble import assemble\nfrom assembler.Intel.fp_arithmetic import convert_hex_to_decimal\nfrom assembler.Intel.fp_arithmetic import convert_dec_to_hex\n# from assembler.Intel.math_operations import Mathops\n\nNUM_TESTS = 100\nMAX_SHIFT = BITS // 2\nMIN_TEST = MIN_INT // 10 # right now we don't want to overflow!\nMAX_TEST = MAX_INT // 10 # right now we don't want to overflow!\nMAX_MUL = 10000 # right now we don't want to overflow!\nMIN_MUL = -10000 # right now we don't want to overflow!\nREGISTER_SIZE = BITS\n\nINT = 0\nFLOAT = 1\n\nintel_machine.base = \"dec\"\nintel_machine.flavor = \"intel\"\n\n\nclass AssembleTestCase(TestCase):\n\n #####################\n # Two Operand Tests #\n #####################\n\n def two_op_test(self, operator, instr, low1=MIN_TEST, high1=MAX_TEST,\n low2=MIN_TEST, high2=MAX_TEST, op_type=INT,\n first_val=INT, second_val=INT):\n for i in range(0, NUM_TESTS):\n a = random.randint(low1, high1)\n b = random.randint(low2, high2)\n if op_type == FLOAT:\n if first_val == FLOAT:\n a = float(random.uniform(MIN_MUL, MAX_MUL))\n if second_val == FLOAT:\n b = float(random.uniform(MIN_MUL, MAX_MUL))\n correct = operator(a, b)\n intel_machine.registers[\"ST0\"] = a\n intel_machine.registers[\"ST1\"] = b\n assemble(instr + \" st0, st1\", intel_machine)\n self.assertAlmostEqual(intel_machine.registers[\"ST0\"], correct)\n else:\n correct = operator(a, b)\n intel_machine.registers[\"EAX\"] = a\n intel_machine.registers[\"EBX\"] = b\n assemble(instr + \" eax, ebx\", intel_machine)\n\n self.assertEqual(intel_machine.registers[\"EAX\"], correct)\n\n def test_convert_hex_to_decimal(self):\n self.assertEqual(convert_hex_to_decimal('a2.4c'), 162.296875)\n\n def test_convert_dec_to_hex(self):\n self.assertEqual(convert_dec_to_hex(162.296875), 'a2.4c')\n\n def test_fadd(self):\n self.two_op_test(opfunc.add, \"FADD\", op_type=FLOAT,\n first_val=FLOAT, second_val=FLOAT)\n\n def test_fsub(self):\n self.two_op_test(opfunc.sub, \"FSUB\", op_type=FLOAT,\n first_val=FLOAT, second_val=FLOAT)\n\n def test_fmul(self):\n self.two_op_test(opfunc.mul, \"FMUL\", op_type=FLOAT,\n first_val=FLOAT, second_val=FLOAT)\n\n def test_fdiv(self):\n self.two_op_test(opfunc.truediv, \"FDIV\", op_type=FLOAT,\n first_val=FLOAT, second_val=FLOAT)\n\n def test_add(self):\n self.two_op_test(opfunc.add, \"add\")\n\n def test_sub(self):\n self.two_op_test(opfunc.sub, \"sub\")\n\n def test_imul(self):\n self.two_op_test(opfunc.mul, \"imul\",\n low1=MIN_MUL, high1=MAX_MUL,\n low2=MIN_MUL, high2=MAX_MUL)\n\n def test_and(self):\n self.two_op_test(opfunc.and_, \"and\")\n\n def test_or(self):\n self.two_op_test(opfunc.or_, \"or\")\n\n def test_xor(self):\n self.two_op_test(opfunc.xor, \"xor\")\n\n def test_shl(self):\n self.two_op_test(opfunc.lshift, \"shl\",\n low1=MIN_MUL, high1=MAX_MUL,\n low2=0, high2=MAX_SHIFT)\n\n def test_shr(self):\n self.two_op_test(opfunc.rshift, \"shr\",\n low1=MIN_MUL, high1=MAX_MUL,\n low2=0, high2=MAX_SHIFT)\n\n ###################\n # Single Op Tests #\n ###################\n\n def one_op_test(self, operator, instr, op_type=None, replaces=True):\n \"\"\"replace boolean needed because some fp instructions\n do not replace the contents of the register,\n but rather place it another register\"\"\"\n for i in range(NUM_TESTS): # changeback to num_tests\n if op_type == FLOAT:\n a = float(random.uniform(MIN_MUL, MAX_MUL))\n intel_machine.registers[\"FRB\"] = a # source float register\n correct = operator(a)\n \"\"\"\n if replaces == False:\n intel_machine.registers[\"FRT\"] = None\n #since no replacement, destination float register\n assemble(instr + ' frb', 'intel', intel_machine)\n self.assertEqual(intel_machine.registers[\"FRT\"], correct)\n #since the new value is in the destination register,\n #compare correct to FRT\n else:\"\"\"\n # needs to be corrected to not use frb\n # assemble(instr + ' frb', intel_machine)\n # self.assertEqual(intel_machine.registers[\"FRB\"], correct)\n # since the new value has not been replaced\n # (in source register), compare FRB to correct\n\n else:\n a = random.randint(MIN_TEST, MAX_TEST)\n intel_machine.registers[\"EAX\"] = a\n correct = operator(a)\n assemble(instr + \" eax\", intel_machine)\n self.assertEqual(intel_machine.registers[\"EAX\"], correct)\n\n def test_not(self):\n self.one_op_test(opfunc.inv, \"not\")\n\n def test_neg(self):\n self.one_op_test(opfunc.neg, \"neg\")\n\n def test_inc(self):\n inc = functools.partial(opfunc.add, 1)\n self.one_op_test(inc, \"inc\")\n\n def test_dec(self):\n dec = functools.partial(opfunc.add, -1)\n self.one_op_test(dec, \"dec\")\n \"\"\"\n # def test_fabs(self):\n # self.one_op_test(opfunc.abs, \"FABS\",FLOAT,False)\n # def test_chs(self):\n # self.one_op_test(Mathops.change_sign, \"FCHS\",FLOAT)\n \"\"\"\n ##################\n # Push / Pop #\n ##################\n\n def test_push_and_pop(self):\n # Note: size(correct_stack) = size(stack + memory)\n\n correct_stack = [None] * (STACK_TOP+1)\n\n # Traverse the stack registers.\n for i in range(STACK_TOP, STACK_BOTTOM-1, -1):\n a = random.randint(MIN_TEST, MAX_TEST)\n correct_stack[i] = a\n intel_machine.registers[\"EAX\"] = a\n assemble(\"push eax\", intel_machine)\n\n for i in range(STACK_BOTTOM, STACK_TOP+1):\n assemble(\"pop ebx\", intel_machine)\n self.assertEqual(intel_machine.registers[\"EBX\"], correct_stack[i])\n\n ##################\n # Other #\n ##################\n\n def test_mov(self):\n for i in range(0, NUM_TESTS):\n a = random.randint(MIN_TEST, MAX_TEST)\n correct = a\n intel_machine.registers[\"EAX\"] = a\n assemble(\"mov eax, \" + str(a), intel_machine)\n self.assertEqual(intel_machine.registers[\"EAX\"], correct)\n\n def test_idiv(self):\n for i in range(0, NUM_TESTS):\n a = random.randint(MIN_TEST, MAX_TEST)\n d = random.randint(MIN_TEST, MAX_TEST)\n b = 0\n while(b == 0): # Divisor can't be zero.\n b = random.randint(MIN_TEST, MAX_TEST)\n correct_quotient = (opfunc.lshift(d, REGISTER_SIZE) + a) // b\n correct_remainder = (opfunc.lshift(d, REGISTER_SIZE) + a) % b\n intel_machine.registers[\"EAX\"] = a\n intel_machine.registers[\"EDX\"] = d\n intel_machine.registers[\"EBX\"] = b\n assemble(\"idiv ebx\", intel_machine)\n self.assertEqual(intel_machine.registers[\"EAX\"],\n correct_quotient)\n self.assertEqual(intel_machine.registers[\"EDX\"],\n correct_remainder)\n\n def test_cmp_eq(self):\n intel_machine.registers[\"EAX\"] = 1\n intel_machine.registers[\"EBX\"] = 1\n intel_machine.flags[\"ZF\"] = 0\n intel_machine.flags[\"SF\"] = 0\n assemble(\"cmp eax, ebx\", intel_machine)\n self.assertEqual(intel_machine.flags[\"ZF\"], 1)\n self.assertEqual(intel_machine.flags[\"SF\"], 0)\n\n def test_cmp_l(self):\n intel_machine.registers[\"EAX\"] = 0\n intel_machine.registers[\"EBX\"] = 1\n intel_machine.flags[\"ZF\"] = 0\n intel_machine.flags[\"SF\"] = 0\n assemble(\"cmp eax, ebx\", intel_machine)\n self.assertEqual(intel_machine.flags[\"ZF\"], 0)\n self.assertEqual(intel_machine.flags[\"SF\"], 1)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"tests/Intel/test_assemble.py","file_name":"test_assemble.py","file_ext":"py","file_size_in_byte":8814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"643986585","text":"#coding=utf-8\n#author=godpgf\n\nimport numpy as np\nimport random\n\ndef get_score_percent(scores):\n return 1.0 / (1.0 + np.exp(-scores))\ndef refresh_score_percent(scores,choose_index,last_index):\n --scores[last_index]\n ++scores[choose_index]\n return get_score_percent(scores)\n\n#累加所有没有选择的数据的概率,这个概率受到已经选择的元素影响(和已经选择的元素关联大的将概率小)\ndef sum_percent(relation_table, selection, sel_num, score_percent):\n s = set(selection)\n sum = 0.0\n relation_percent = []\n for i in range(len(score_percent)):\n relation_percent.append(1)\n if not i in s:\n for j in range(sel_num):\n relation_percent[i] *= (1 - relation_table[i][selection[j]])\n sum += relation_percent[i] * score_percent[i]\n return sum, relation_percent\n\n#产生一个随机数,从没有选择的数据中选择一个\ndef select(cur_percent, score_percent, relation_percent, selection):\n s = set(selection)\n for i in range(len(score_percent)):\n if not i in s:\n cur_percent -= score_percent[i] * relation_percent[i]\n if cur_percent < 0:\n return i\n return -1\n\n#随机选择一个已经选过的元素,把它放在末尾,再随机选择一个没选过的元素返回\ndef random_swap(selection,score_percent,relation_table):\n sel_num = len(selection)\n sel_index = random.randint(0,sel_num-1)\n if sel_index != sel_num-1 :\n selection[sel_index],selection[sel_num-1] = selection[sel_num-1], selection[sel_index]\n #s = set(selection)\n sp, rp = sum_percent(relation_table,selection,sel_num-1,score_percent)\n cur_percent = random.uniform(0,sp)\n return select(cur_percent, score_percent,rp,selection)\n\ndef search_best_select(evl, selection, cur_evl, scores, score_percent, relation_table, cur_depth, max_fail_time, fail_continue_percent):\n remain_time = max_fail_time[cur_depth]\n search_depth = len(max_fail_time)\n while remain_time > 0:\n remain_time = remain_time - 1\n choose_index = random_swap(selection, score_percent,relation_table)\n lase_index = selection[len(selection)-1]\n #尝试选择\n selection[len(selection)-1]=choose_index\n evl_value = evl(selection)\n if evl_value > cur_evl :#如果尝试的选择效果好\n #更新数据评分\n score_percent = refresh_score_percent(scores,choose_index,lase_index)\n #清空次数计数器,再测试\n cur_evl = evl_value\n remain_time = max_fail_time[cur_depth]\n else :#如果尝试的选择效果不好\n if cur_depth + 1 == search_depth:\n selection[len(selection) - 1] = lase_index\n continue\n #选择效果不好,有一定概率接着不好的选择继续尝试\n if random.uniform(0,1) < fail_continue_percent:\n scale = (search_depth - cur_depth - 1.0) / search_depth\n new_selection = selection[:]\n evl_value = search_best_select(evl, new_selection, cur_evl, scores, score_percent, relation_table, cur_depth+1, max_fail_time,fail_continue_percent*scale)\n if evl_value > cur_evl :#如果尝试的选择效果好\n #更新数据评分\n score_percent = refresh_score_percent(scores,choose_index,lase_index)\n #清空次数计数器,再测试\n cur_evl = evl_value\n remain_time = max_fail_time[cur_depth]\n #更新选择\n for i in range(len(selection)):\n selection[i] = new_selection[i]\n else:#如果效果还是不好,恢复之前选择,并更新评分\n selection[len(selection)-1] = lase_index\n score_percent = refresh_score_percent(scores,lase_index,choose_index)\n else:\n selection[len(selection)-1] = lase_index\n score_percent = refresh_score_percent(scores,lase_index,choose_index)\n return cur_evl\n\n#随机选择一些参数,返回最大评估值和当前的选择\n#evl:评估值\n#score:每个数据的得分\n#relation_table:数据相关联程度\n#search_depth:错误方向上继续搜索的最大深度\n#max_fail_time:最多失败次数\n#fail_continue_percent:错误方向上继续尝试概率\ndef random_select(evl, sel_num, scores, relation_table, max_fail_time, fail_continue_percent):\n selection = []\n if sel_num == len(scores):\n for i in range(sel_num):\n selection.append(i)\n return selection,evl(selection)\n\n #初始化\n score_percent = get_score_percent(scores)\n\n for i in range(sel_num):\n #s = set(selection)\n sp, rp = sum_percent(relation_table,selection,len(selection),score_percent)\n cur_percent = random.uniform(0,sp)\n cur_index = select(cur_percent, score_percent,rp,selection)\n selection.append(cur_index)\n\n cur_evl = evl(selection)\n evl_value = search_best_select(evl,selection,cur_evl,scores,score_percent,relation_table,0,max_fail_time,fail_continue_percent)\n\n return selection, evl_value","sub_path":"acheat/random_feature_selector.py","file_name":"random_feature_selector.py","file_ext":"py","file_size_in_byte":5212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"606820345","text":"\nimport re\nimport numpy as np\nimport pandas as pd\nfrom lightgbm import LGBMRegressor\nfrom sklearn.linear_model import LinearRegression\nfrom category_encoders import TargetEncoder, CountEncoder, OneHotEncoder\n\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import (\n cross_val_predict\n)\nfrom tools.metrics import (\n apply_metrics,\n prep_data_for_metric,\n get_avg_volumes,\n)\n\nfrom tools.postprocessing import postprocess_submission\n\noffset_name = \"last_before_3_after_0\"\n\n\ndef compute_metrics(preds, lower, upper, y, offset, X, avg_volumes):\n\n id_cols = [\"country\", \"brand\"]\n\n prepped_X = prep_data_for_metric(X, avg_volumes)\n\n prepped_X[\"actuals\"] = y\n prepped_X[\"forecast\"] = np.maximum((preds + 1) * offset, 0)\n prepped_X[\"lower_bound\"] = np.maximum((lower + 1) * offset, 0)\n prepped_X[\"upper_bound\"] = np.maximum((upper + 1) * offset, 0)\n\n return np.mean(abs(prepped_X.groupby(id_cols).apply(apply_metrics)))\n\ndef preprocess(X):\n\n X = X.copy()\n\n offset = X[offset_name]\n # Channel\n # X[\"channel_\"] = \"Mixed\"\n # X.loc[X[\"B\"] > 75, \"channel_\"] = \"B\"\n # X.loc[X[\"C\"] > 75, \"channel_\"] = \"C\"\n # X.loc[X[\"D\"] > 75, \"channel_\"] = \"D\"\n\n # More data for target encoding\n # X[\"month_country\"] = X[\"month_name\"] + \"_\" + X[\"country\"]\n # X[\"month_presentation\"] = X[\"month_name\"] + \"_\" + X[\"presentation\"]\n # X[\"month_area\"] = X[\"month_name\"] + \"_\" + X[\"therapeutic_area\"]\n\n # Month-num\n # X[\"month_country_num\"] = X[\"month_num\"].map(str) + \"_\" + X[\"country\"]\n # X[\"month_presentation_num\"] = X[\"month_num\"].map(str) + \"_\" + X[\"presentation\"]\n # X[\"month_area_num\"] = X[\"month_num\"].map(str) + \"_\" + X[\"therapeutic_area\"]\n # X[\"month_month_num\"] = X[\"month_num\"].map(str) + \"_\" + X[\"month_name\"]\n\n # X[\"presentation_therapeutic\"] = X[\"therapeutic_area\"] + \"_\" + X[\"presentation\"]\n # X[\"therapeutic_channel\"] = X[\"therapeutic_area\"] + \"_\" + X[\"channel_\"]\n # X[\"presentation_channel\"] = X[\"presentation\"] + \"_\" + X[\"channel_\"]\n # X[\"country_channel\"] = X[\"country\"] + \"_\" + X[\"channel_\"]\n # X[\"brand_channel\"] = X[\"brand\"] + \"_\" + X[\"channel_\"]\n # X[\"country_presentation\"] = X[\"country\"] + \"_\" + X[\"presentation\"] + \"_\" + X[\"month_num\"].map(str)\n\n #\n # categorical_cols_freq = [\n # \"country\", \"brand\", \"therapeutic_area\", \"presentation\", \"month_name\",\n # ]\n #\n # freq_encoder_feats = CountEncoder(cols=categorical_cols_freq).fit_transform(\n # full_df.loc[:, categorical_cols_freq]\n # )\n #\n # freq_encoder_feats.columns = [\n # f\"{col}_freq\" for col in freq_encoder_feats.columns\n # ]\n #\n # X = pd.concat([X, freq_encoder_feats], axis=1)\n\n for col in X.columns:\n if re.match(r\".*mean|median\", col):\n X[col] = (X[col] - offset) / offset\n\n # if re.match(r\".*Inf\", col):\n # X.drop(columns=col)\n\n X[\"n_channels\"] = (X[\"A\"] > 10).astype(int) + \\\n (X[\"B\"] > 10).astype(int) + \\\n (X[\"C\"] > 10).astype(int)\n return X\n\n\nif __name__ == \"__main__\":\n\n file_name = \"target_encoders\"\n save = False\n retrain_full_data = False\n\n full_df = pd.read_csv(\"data/gx_merged_lags_months.csv\")\n # volume_features = pd.read_csv(\"data/volume_features.csv\")\n submission_df = pd.read_csv(\"data/submission_template.csv\")\n train_tuples = pd.read_csv(\"data/train_split.csv\")\n valid_tuples = pd.read_csv(\"data/valid_split.csv\")\n\n feat_01 = pd.read_csv(\"data/feat_01.csv\")\n\n full_df = full_df.merge(\n feat_01,\n on=[\"country\", \"brand\", \"month_num\"],\n how=\"left\"\n )\n\n gx_month = pd.read_csv(\"data/gx_month.csv\")\n\n full_df = full_df.merge(\n gx_month,\n on=[\"country\", \"brand\", \"month_name\"],\n how=\"left\"\n )\n\n # full_df = full_df.merge(volume_features, on=[\"country\", \"brand\"])\n\n full_df[\"volume_offset\"] = (full_df[\"volume\"] - full_df[offset_name]) / full_df[offset_name]\n full_df = preprocess(full_df)\n\n test_df = full_df[full_df.test == 1].copy().reset_index(drop=True)\n\n full_df = full_df[full_df.test == 0]\n\n train_df = full_df.merge(train_tuples, how=\"inner\").reset_index(drop=True)\n val_df = full_df.merge(valid_tuples, how=\"inner\").reset_index(drop=True)\n\n # TODO: no need for calculation every time\n avg_volumes = get_avg_volumes()\n\n to_drop = [\"volume\", \"volume_offset\"]\n categorical_cols = [\n \"country\", \"brand\", \"therapeutic_area\", \"presentation\", \"month_name\",\n \"month_country\", \"month_presentation\", \"month_area\",\n \"month_country_num\", \"month_presentation_num\", \"month_area_num\",\n \"month_month_num\",\n # \"presentation_therapeutic\",\n # \"therapeutic_channel\",\n # \"presentation_channel\",\n # \"country_channel\",\n # \"brand_channel\",\n # \"channel_\",\n # \"country_presentation\"\n ]\n\n # Prep data\n train_x = train_df.drop(columns=to_drop)\n train_y = train_df.volume_offset\n train_offset = train_df[offset_name]\n\n full_x = full_df.drop(columns=to_drop)\n full_y = full_df.volume_offset\n full_offset = full_df[offset_name]\n\n val_x = val_df.drop(columns=to_drop)\n val_y = val_df.volume_offset\n val_y_raw = val_df.volume\n val_offset = val_df[offset_name]\n\n test_x = test_df.drop(columns=to_drop)\n test_offset = test_df[offset_name]\n\n # Prep pipeline\n te = OneHotEncoder(cols=categorical_cols)\n te_residual = OneHotEncoder(cols=categorical_cols)\n lgb = LGBMRegressor(\n n_jobs=-1, n_estimators=50, objective=\"regression_l1\"\n )\n lgb_residual = LGBMRegressor(\n n_jobs=-1, n_estimators=10, objective=\"regression_l1\"\n )\n\n pipe = Pipeline([\n (\"te\", te),\n (\"lgb\", lgb)\n ])\n\n pipe_residual = Pipeline([\n (\"te\", te_residual),\n (\"lgb\", lgb_residual)\n ])\n\n # Fit cv model\n cv_preds = cross_val_predict(pipe, train_x, train_y)\n train_y_residual = np.abs(cv_preds - train_y)\n\n pipe.fit(train_x, train_y)\n pipe_residual.fit(train_x, train_y_residual)\n\n preds = pipe.predict(val_x)\n preds_residual = pipe_residual.predict(val_x)\n\n preds_test = pipe.predict(test_x)\n preds_test_residual = pipe_residual.predict(test_x)\n\n # bounds = [0, ,0.5, 1, 1.5, 2]\n bounds = [1]\n\n min_unc = 1e8\n best_upper_bound = 0\n best_lower_bound = 0\n for upper_bound in bounds:\n for lower_bound in list(bounds):\n\n print(f\"Upper bound: {upper_bound}\")\n print(f\"Lower bound: {lower_bound}\")\n metric_pair = compute_metrics(\n preds=preds,\n lower=preds - lower_bound * preds_residual,\n upper=preds + upper_bound * preds_residual,\n y=val_y_raw,\n offset=val_offset,\n X=val_x,\n avg_volumes=avg_volumes\n )\n print(metric_pair)\n\n unc_metric = metric_pair.values[1]\n\n if unc_metric < min_unc:\n min_unc = unc_metric\n best_upper_bound = upper_bound\n best_lower_bound = lower_bound\n\n print(min_unc)\n print(best_upper_bound)\n print(best_lower_bound)\n\n save_val = val_x.copy().loc[:, [\"country\", \"brand\", \"month_num\"]]\n save_val[\"y\"] = val_y_raw\n save_val[\"lower\"] = preds - best_lower_bound * preds_residual\n save_val[\"upper\"] = preds + best_upper_bound * preds_residual\n save_val[\"preds\"] = preds\n save_val[\"lower_raw\"] = (1 + save_val[\"lower\"]) * val_offset\n save_val[\"upper_raw\"] = (1 + save_val[\"upper\"]) * val_offset\n save_val[\"preds_raw\"] = (1 + save_val[\"preds\"]) * val_offset\n\n if save:\n save_val.to_csv(f\"data/blend/val_{file_name}.csv\", index=False)\n val_x.to_csv(f\"data/blend/val_x.csv\", index=False)\n\n # Retrain with full data -> In case of need\n if retrain_full_data:\n\n cv_preds_full = cross_val_predict(pipe, full_x, full_y)\n full_y_residual = np.abs(cv_preds_full - full_y)\n\n pipe.fit(full_x, full_y)\n pipe_residual.fit(full_x, full_y_residual)\n\n preds_test = pipe.predict(test_x)\n preds_test_residual = pipe_residual.predict(test_x)\n\n # submission_df[\"pred_95_low\"] = np.maximum(preds_test - upper_bound * preds_test_residual, 0)\n submission_df[\"pred_95_low\"] = (preds_test - best_lower_bound * preds_test_residual + 1) * test_offset\n submission_df[\"pred_95_high\"] = (preds_test + best_upper_bound * preds_test_residual + 1) * test_offset\n submission_df[\"prediction\"] = (preds_test + 1) * test_offset\n\n # print(submission_df[submission_df.prediction < 0])\n submission_df = postprocess_submission(submission_df)\n\n submission_df[\"pred_95_low\"] = np.maximum(submission_df[\"pred_95_low\"], 0)\n submission_df[\"pred_95_high\"] = np.maximum(submission_df[\"pred_95_high\"], 0)\n submission_df[\"prediction\"] = np.maximum(submission_df[\"prediction\"], 0)\n if save:\n submission_df.to_csv(f\"submissions/submission_{file_name}.csv\", index=False)\n\n","sub_path":"models/lgbm.py","file_name":"lgbm.py","file_ext":"py","file_size_in_byte":9050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"616152847","text":"import socket\nimport threading\nfrom threading import Thread\nimport _thread\nimport time\nimport SimpleClient\nimport pickle\nimport string\nimport pygame, sys\nfrom pygame.locals import *\nimport random\nimport re\nfrom bad_stuff import *\n#import main_menu\n\ndef clientWait(s, username):\n def button(msg,x,y,w,h,button_pressed,button_unpressed):\n ''' x: The x location of the top left coordinate of the button box.\n\n y: The y location of the top left coordinate of the button box.\n\n w: Button width.\n\n h: Button height.\n\n ic: Inactive color (when a mouse is not hovering).\n\n ac: Active color (when a mouse is hovering).\n '''\n mouse = pygame.mouse.get_pos()\n click = pygame.mouse.get_pressed()\n \n if x+w > mouse[0] > x and y+h > mouse[1] > y:\n # pygame.draw.rect(LOGIN_TOP_SURFACE, ac,(x,y,w,h))\n LOGIN_TOP_SURFACE.blit(button_pressed, (x, y))\n if click[0] == 1 and msg == \"Quit\":\n #LOGIN_TOP_SURFACE.blit(REFRESH_BUTTON_PRESSED, (1300, 700))\n return(True)\n if click[0] == 1 and msg == \"Back\":\n return(True)\n else:\n # pygame.draw.rect(LOGIN_TOP_SURFACE, ic,(x,y,w,h))\n LOGIN_TOP_SURFACE.blit(button_unpressed, (x, y))\n \n smallText = pygame.font.Font(\"OldNewspaperTypes.ttf\",20)\n textSurf, textRect = text_objects(msg, smallText, l_colors[WHITE])\n textRect.center = ( (x+(w/2)), (y+(h/2)) )\n #LOGIN_TOP_SURFACE.blit(textSurf, textRect)\n\n\n\n def text_objects(text, font, color):\n textSurface = font.render(text, True, color)\n return textSurface, textSurface.get_rect()\n\n # Program function\n \n def display_players(x_panel_position, y_panel_position):\n print(\"Number of clients: \" + str(len(clients)))\n for i in clients:\n DISPLAYSURF.blit(SERVER_BAR, (x_panel_position, y_panel_position))\n # display the name of the client\n DISPLAYSURF.blit(SERVER_FONT.render(str(i[1]), True, (0,0,0)), (x_panel_position + 25, y_panel_position + 25))\n DISPLAYSURF.blit(BOOT_BUTTON, (x_panel_position + 1200, y_panel_position + 25))\n l_boot_spots.append((x_panel_position, y_panel_position + 25, i[1]))\n y_panel_position += 100\n \n def search(x_panel_position, y_panel_position, y_offset):\n while True:\n packet, addr = client_socket.recvfrom(4096)\n if packet != None:\n try:\n server_info = pickle.loads(packet)\n l_servers.append(server_info)\n print(\"added a server: \" + server_info[0])\n print(\"server length is: \" + str(l_servers))\n #display_servers(x_panel_position, y_panel_position, y_offset)\n except:\n #This is happening. why?\n print(\"Client tried connecting to itself\")\n else:\n print(\"No recv_data\")\n print(\"done displaying servers\")\n \n def request(x_panel_position, y_panel_position, y_offset):\n print(\"looping requesting servers\")\n del l_servers[:]\n y_offset = 0\n print(\"servers after deleting: \", str(l_servers))\n client_socket.sendto(data.encode('ascii'), address)\n\n def beginGame(s):\n new_server = (s[0].recv(1024).decode(), 9998)\n s[0].close()\n SimpleClient.play(new_server, s[1])\n\n #def Loginclient():\n # Initialize pygame\n pygame.init()\n\n # Graphics Constants\n IMAGE_FILE_PATH = \"ImageFiles/\"\n LOGIN_BACKGROUND = pygame.image.load(IMAGE_FILE_PATH + \"client_login_background.png\")\n BLACK_BACKGROUND = pygame.image.load(IMAGE_FILE_PATH + \"client_login_background2.png\")\n SERVER_BAR = pygame.image.load(IMAGE_FILE_PATH + \"Server.png\")\n JOIN_BUTTON_PRESSED = pygame.image.load(IMAGE_FILE_PATH + \"JoinButton_pressed.png\")\n JOIN_BUTTON_UNPRESSED = pygame.image.load(IMAGE_FILE_PATH + \"JoinButton_unpressed.png\")\n REFRESH_BUTTON_UNPRESSED = pygame.image.load(IMAGE_FILE_PATH + \"RefreshButton_unpressed.png\")\n REFRESH_BUTTON_PRESSED = pygame.image.load(IMAGE_FILE_PATH + \"RefreshButton_pressed.png\")\n UP_ARROW = pygame.image.load(IMAGE_FILE_PATH + \"upArrow.png\")\n DOWN_ARROW = pygame.image.load(IMAGE_FILE_PATH + \"downArrow.png\")\n BACK_BUTTON_UNPRESSED = pygame.image.load(IMAGE_FILE_PATH + \"back_button_unpressed.png\")\n BACK_BUTTON_PRESSED = pygame.image.load(IMAGE_FILE_PATH + \"back_button_pressed.png\")\n #specify that shift is not pressed\n shifted = False\n\n # Declare list of join button spots\n l_join_spots = []\n\n # Declare Server Font\n SERVER_FONT = pygame.font.Font(\"OldNewspaperTypes.ttf\", 35)\n width, height = SERVER_FONT.size(\"Username:\")\n print(\"width of A is: \" + str(width))\n print(\"height of A is: \" + str(height))\n\n BLACK = 1\n WHITE = 2\n RED = 3\n GREEN = 4\n BLUE = 5\n BABY_BLUE = 6\n BRIGHT_RED = 7\n BRIGHT_GREEN = 8\n\n # Position of the text box\n x_pos = 100\n y_pos = 725\n\n # Colors for buttons\n l_colors = {\n BLACK :(0,0,0),\n WHITE :(255,255,255),\n RED :(200,0,0),\n GREEN :(0,200,0),\n BLUE :(0,66,255),\n BABY_BLUE :(0,223,255),\n BRIGHT_RED :(255,0,0),\n BRIGHT_GREEN :(0,255,0),\n }\n\n GRAY = (55,55,55)\n\n BLOCK_COLOR = (53,115,255)\n\n # Position of back button\n x_back_button = 5\n y_back_button = 5\n\n # Position of the arrows\n arrow_x_pos = 1500\n up_arrow_y_pos = 550\n down_arrow_y_pos = 50\n\n # Position of refresh button\n refresh_x_pos = 1500\n refresh_y_pos = 700\n\n # Position of the Server Panel\n x_panel_position = 100\n y_panel_position = 100\n\n # Offset for scrolling\n y_offset = 0\n \n pushed_back=False\n \n # Declare the Surface\n #LOGIN_TOP_SURFACE = pygame.display.set_mode((0,0), pygame.FULLSCREEN)\n LOGIN_TOP_SURFACE = pygame.display.set_mode((0,0))\n waiting_for_response = SERVER_FONT.render(\"Waiting for Server response...\", 1, (255,0,0))\n \n #socket_exists = False\n '''\n # Client network stuff\n l_servers = []\n address = ('255.255.255.255', 8080)\n data = \"Request\"\n temp = socket.gethostbyname_ex(socket.gethostname())[-1]\n host = temp[-1]\n # Main starts here\n if socket_exists == False:\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n client_socket.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n client_socket.bind((host, 8080))\n client_socket.sendto(data.encode('ascii'), address)\n '''\n \n t_connect = threading.Thread(target=beginGame, args=(s,username))\n t_connect.daemon = True\n t_connect.start()\n\n # Get the username\n while True:\n mouse = pygame.mouse.get_pos()\n LOGIN_TOP_SURFACE.blit(waiting_for_response, (200, 800))\n \n #get user events\n events = pygame.event.get()\n for event in events:\n if event.type == QUIT:\n #end game\n t_connect.close()\n pygame.quit()\n sys.exit()\n if event.type == KEYUP:\n if event.key == K_LSHIFT or event.key == K_RSHIFT:\n shifted = False\n print(\"shifted is now false\")\n if event.type == KEYDOWN:\n just_accessed = False\n if event.key == K_ESCAPE:\n #and the game and close the window\n pygame.quit()\n sys.exit()\n if event.key == K_UP:\n SERVERS_AREA = LOGIN_TOP_SURFACE.get_clip()\n #if (len(l_servers) > 5):\n LOGIN_TOP_SURFACE.blit(BLACK_BACKGROUND, (100, 100))\n y_offset -= 100\n #display_servers(x_panel_position, y_panel_position, y_offset)\n if event.key == K_DOWN:\n SERVERS_AREA = LOGIN_TOP_SURFACE.get_clip()\n if (SERVERS_AREA.x <= 100 and SERVERS_AREA.y <= 100):#put in server checking too need to find out how to get the position of a surface.\n print(\"Servers area x is: \" + str(SERVERS_AREA.x))\n print(\"Servers area y is: \" + str(SERVERS_AREA.y))\n LOGIN_TOP_SURFACE.blit(BLACK_BACKGROUND, (100, 100))\n y_offset += 100\n #display_servers(x_panel_position, y_panel_position, y_offset)\n\n if event.type == MOUSEBUTTONDOWN:\n x_mouse_position_main, y_mouse_position_main = pygame.mouse.get_pos()\n print(str(x_mouse_position_main) + str(y_mouse_position_main))\n print(\"clicked mounce here\")\n \n #click refresh\n if refresh_x_pos <= x_mouse_position_main <= refresh_x_pos + 200 and refresh_y_pos <= y_mouse_position_main <= refresh_y_pos + 100:\n # print(\"clicked refresh\")\n # request(x_panel_position, y_panel_position, y_offset)\n t_connect._stop()\n s.close()\n return(True)\n # Blit the stuffs onto the screen\n #display_servers(x_panel_position, y_panel_position, y_offset)\n LOGIN_TOP_SURFACE.blit(LOGIN_BACKGROUND, (0,0))\n LOGIN_TOP_SURFACE.blit(DOWN_ARROW, (arrow_x_pos, down_arrow_y_pos))\n LOGIN_TOP_SURFACE.blit(UP_ARROW, (arrow_x_pos, up_arrow_y_pos))\n waiting_on_players = SERVER_FONT.render(\"Waiting on players:\", 1, (0,255,255))\n LOGIN_TOP_SURFACE.blit(waiting_for_response, (200, 800))\n pushed_back = button(\"Quit\",refresh_x_pos,refresh_y_pos,200,100,REFRESH_BUTTON_PRESSED,REFRESH_BUTTON_UNPRESSED)\n pushed_back = button(\"Back\",x_back_button,y_back_button,75,50,BACK_BUTTON_PRESSED,BACK_BUTTON_UNPRESSED)\n \n \n \n #click back button\n # if x_back_button <= x_mouse_position_main <= x_back_button + 75 and y_back_button <= y_mouse_position_main <= y_back_button + 50:\n # client_socket.close()\n # return\n \n # #click up arrow\n # if arrow_x_pos <= x_mouse_position_main <= arrow_x_pos + 100 and up_arrow_y_pos <= y_mouse_position_main <= up_arrow_y_pos + 50:\n # SERVERS_SURFACE.scroll(0, -100)\n # #click down arrow\n # if arrow_x_pos <= x_mouse_position_main <= arrow_x_pos + 100 and down_arrow_y_pos <= y_mouse_position_main <= down_arrow_y_pos + 50:\n # SERVERS_SURFACE.scroll(0, 100)\n \n pygame.display.update()\n \nif __name__ == '__main__':\n clientWait()","sub_path":"GameforMac/clientWait.py","file_name":"clientWait.py","file_ext":"py","file_size_in_byte":10405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"173064066","text":"import urllib\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.parse import urlencode, quote_plus, unquote\nfrom urllib.request import urlopen, urlretrieve\nimport urllib\nimport os\nimport cv2\nimport numpy as np\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nimport time\nimport urllib.request\n\nbase_url = 'https://search.naver.com/search.naver?where=nexearch&sm=top_hty&fbm=1&ie=utf8&query=' \nplusUrl = input('검색어 입력: ') \nurl = base_url + quote_plus(plusUrl) + '%EC%B6%9C%EC%97%B0%EC%A7%84' \n\nhtml = urlopen(url)\nsoup = bs(html, \"html.parser\")\nname = soup.find(\"div\", class_=\"list_image_info _content\").find_all(\"li\")\n\nfind_imglist= list()\nfind_casting = list()\nfind_namelist = list()\n\nfor item in name:\n find_name = item.find_all(class_=\"_text\")[1] #주인공 이름\n find_namelist.append(find_name.get_text())\n\n find_img = item.find(class_='item').find_all(class_='thumb')\n for j in find_img:\n img = j.find('img')\n find_imglist.append(img.get('src'))\n find_casting.append(img.get('alt'))\n\n# find_imglist = np.array(find_imglist)\n# find_casting = np.array(find_casting)\n# find_namelist = np.array(find_namelist)\n\n# np.save('./MJK/data/npy/find_imglist.npy', arr=find_imglist)\n# np.save('./MJK/data/npy/find_casting.npy', arr=find_casting)\n# np.save('./MJK/data/npy/find_namelist.npy', arr=find_namelist)\n\n\nfor i in range(2):\n path = './teamproject/images2/'+str(i)+'/'\n os.makedirs(path, exist_ok=True)\n driver = webdriver.Chrome(r\"D:\\workspace\\Study\\teamProject\\chromedriver.exe\")\n driver.get(\"https://www.google.co.kr/imghp?hl=ko&tab=wi&authuser=0&ogbl\")\n elem = driver.find_element_by_name(\"q\")\n elem.send_keys(find_namelist[i])\n elem.send_keys(Keys.RETURN)\n\n SCROLL_PAUSE_TIME = 1\n # Get scroll height\n last_height = driver.execute_script(\"return document.body.scrollHeight\")\n while True:\n # Scroll down to bottom\n driver.execute_script(\"window.scrollTo(0, document.body.scrollHeight);\")\n # Wait to load page\n time.sleep(SCROLL_PAUSE_TIME)\n # Calculate new scroll height and compare with last scroll height\n new_height = driver.execute_script(\"return document.body.scrollHeight\")\n if new_height == last_height:\n try:\n driver.find_element_by_css_selector(\".mye4qd\").click()\n except:\n break\n last_height = new_height\n\n images = driver.find_elements_by_css_selector(\".rg_i.Q4LuWd\")\n count = 1\n for image in images:\n try:\n image.click()\n time.sleep(1)\n imgUrl = driver.find_element_by_xpath('/html/body/div[2]/c-wiz/div[3]/div[2]/div[3]/div/div/div[3]/div[2]/c-wiz/div[1]/div[1]/div/div[2]/a/img').get_attribute(\"src\")\n opener=urllib.request.build_opener()\n opener.addheaders=[('User-Agent','Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/36.0.1941.0 Safari/537.36')]\n urllib.request.install_opener(opener)\n urllib.request.urlretrieve(imgUrl, path + str(count) + \".jpg\")\n count = count + 1\n except:\n pass","sub_path":"WSJ/teamProject/CV00_crwaling02.py","file_name":"CV00_crwaling02.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"651325284","text":"# -*- coding: utf-8 -*-\n\n# 2019/9/27 0027 上午 10:15 \n\n__author__ = 'RollingBear'\n\nimport xlrd\n\ntry:\n import xml.etree.cElementTree as ET\nexcept ImportError:\n import xml.etree.ElementTree as ET\n\nexcelPath = 'G:\\\\烟盒标注\\\\整合\\\\香烟盒标注信息汇总20190305-2.xlsx'\nxmlPath = 'G:\\\\烟盒标注\\\\整合\\\\cig-img-train\\\\1\\\\OAT\\\\img (1).oa'\ntxtPath = 'G:\\\\烟盒标注\\\\整合\\\\cig-img-train\\\\1\\\\OAT\\\\img (1).txt'\n\n\ndef loadExcel(path):\n excel = xlrd.open_workbook(path)\n sheet = excel.sheet_by_name(excel.sheet_names()[0])\n keyList = []\n valueList = []\n\n for i in range(sheet.nrows):\n key = sheet.cell_value(i, 0)\n keyList.append(key)\n value = sheet.cell_value(i, 1)\n valueList.append(value)\n\n return keyList, valueList\n\n\ndef loadXml(xmlPath, txtPath, keyList, valueList):\n tree = ET.ElementTree(file=xmlPath)\n root = tree.getroot()\n i = 0\n with open(txtPath, 'r') as f:\n txtList = f.readlines()\n\n with open(txtPath, 'w+') as f:\n for child_of_root in root.iter(tag='Text'):\n if valueList[keyList.index(child_of_root.text)] == '':\n f.write(txtList[i])\n i += 1\n continue\n else:\n txtList[i].replace(child_of_root.text, valueList[keyList.index(child_of_root.text)])\n f.write(txtList[i])\n child_of_root.text = valueList[keyList.index(child_of_root.text)]\n i += 1\n tree.write(xmlPath)\n\n\nif __name__ == '__main__':\n keyList, valueList = loadExcel(excelPath)\n\n loadXml(xmlPath, txtPath, keyList, valueList)\n","sub_path":"loadExcel.py","file_name":"loadExcel.py","file_ext":"py","file_size_in_byte":1638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"569482185","text":"import math\n\n\ndef insertionsort(A, n, g):\n cnt = 0\n for i in range(g, n):\n v = A[i]\n j = i - g\n while j >= 0 and A[j] > v:\n A[j + g] = A[j]\n j = j - g\n cnt += 1\n A[j + g] = v\n return cnt\n\n\ndef shellsort(A, n):\n G = [] \n gap = 1\n while gap <= math.ceil(n / 3):\n G.append(gap)\n gap = gap * 3 + 1\n G = G[::-1]\n m = len(G)\n print(m)\n print(*G)\n cnt = 0\n for i in range(m):\n cnt += insertionsort(A, n, G[i])\n print(cnt)\n\n\nn = int(input())\nA = []\nfor i in range(n):\n A.append(int(input()))\nshellsort(A, n)\nfor i in range(n):\n print(A[i])","sub_path":"Python_codes/p02262/s420895507.py","file_name":"s420895507.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"11766228","text":"\"\"\"\nK-Means Algorithm Implementation\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport random\nfrom read_csvfile import read_csvfile\nimport math\n\n\"\"\"\nData-Set Information\n\"\"\"\n\ncity = \"san-francisco-ca\"\nfilename = \"data/propertyInfo/{}.csv\".format(city)\ndata = read_csvfile(filename)\ndata[\"cluster\"] = -1\n\n\"\"\"\nGenerating Centroids\n\"\"\"\n\n# amount of centroids chosen\n\ndef centroids(house_data,k,column_name):\n # takes in housing data parameter\n #value of amount of centroids to be created\n centroid = []\n # create list to add random centroids\n pass\n for x in range(k):\n # loop from 0 to 5\n centroid.append(random.randint(1,house_data[column_name].max()))\n # adds a random integer to the list\n return centroid\n # return the array of random points\n\nlist_of_centroids = centroids(data, 5,'zestimate')\n\n# easier to store function call in a variable\n\n\"\"\"\nUse list of centroids to create clusters of homes\n\"\"\"\n\n\ndef cluster(centroids, house_data, column_name):\n shortest_distance = []\n sum_of_centroid = []\n # create an list that compares distance from centeroid to house\n\n # new centroids to compare to original\n houses = house_data[column_name]\n # grabs the data table at column 'zestimate' or a different parameter\n for x in range(len(centroids)):\n # loop from 0 to 5\n centroid = centroids[x]\n # each centroid\n distance_to_centroids = []\n # iterating through the list of centroids and locating each individual centroid\n for idx, y in enumerate(houses):\n offset = len(houses) * x\n # print(idx, len(houses))\n home = houses[idx]\n # home is located at a specific index\n homee = np.asscalar(home.astype(int))\n # converting data type from numpy to int\n shortest_distance.append([math.sqrt(abs(homee-centroid)**2), centroid, y])\n # add to the list an array with euclidean distance,centroid number,initial value\n distance = math.sqrt(abs(homee-centroid)**2)\n # euclidean distance\n distance_to_centroids.append(distance)\n # adding to list\n house_data.loc[idx + offset, 'cluster'] = centroid\n # update the cell with new cluster value in data set\n sum_of_centroid.append(sum(distance_to_centroids))\n # adding sum of data to list\n avg_of_centroid = []\n # list of average values\n for e in sum_of_centroid:\n avg_of_centroid.append(e/len(sum_of_centroid))\n # adds average values to list\n return avg_of_centroid\n #shortest_distance.sort(key=lambda a: a[0])\n # sorts zeroth index of list inside list which is the distance\n #return shortest_distance[0]\n # returns the cluster that is closest to the home\n\nclustering_function = cluster(list_of_centroids,data,'zestimate')\n\n\"\"\"\n Here we create the convergence method\n\"\"\"\n\n\ndef convergence(param1, param2):\n func_1 = list(map(int, param1))\n func_2 = list(map(int, param2))\n for old in func_1:\n for new in func_2:\n if abs(func_1[old] - func_2[new]) > 1:\n centroids(data, 5, 'zestimate')\n cluster(list_of_centroids, data, 'zestimate')\n else:\n return param2\n\n\n\"\"\"\nFormatting\n\"\"\"\npd.set_option('display.max_rows', len(data['zestimate']))\nx = data['zestimate']\npd.set_option('display.float_format', lambda x: '%.0f' % x)\n\n\n# print(cluster(list_of_centroids, data,'zestimate'))\nprint(convergence(clustering_function, list_of_centroids))\n#print(clustering_function)\n#print(data)\n\n# for item in myData:\n# print(item)\n#print([data.cluster])\n#print(data['cluster'])\n# for item in data['cluster']:\n# print(item)\n\n#convergence(list_of_centroids)\n#print([data.cluster])\n\n\n","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":3838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"552165615","text":"import os\nimport MySQLdb\nimport time\nimport psutil\n\ndef connectDB():\n db = MySQLdb.connect(host='mysqlhost',user=\"root\",passwd=\"root\",db=\"test\",port=3306)\n if(db):\n curs = db.cursor()\n return curs,db\n \n else:\n print(\"Unable to connect to MySQL... Aborting!!\")\n sys.exit(0)\n\n\ndef getLoadAvg():\n\treturn psutil.cpu_percent()\n\ndef insertToDB():\n\tcursor,connection = connectDB()\n\tquery = \"create table if not exists Loads (l_id int not null auto_increment,value varchar(255),primary key(l_id));\"\n\tcursor.execute(query)\n\tcurrent_load = getLoadAvg()\n\tprint(current_load)\n\tquery = \"insert into Loads(value) values(\"+str(current_load)+\");\"\n\tcursor.execute(query)\n\tconnection.commit()\n\n\n\nif __name__ == '__main__':\n\t#blah\n\twhile True:\n\t\tinsertToDB()\n\t\ttime.sleep(50)\n","sub_path":"microservices_dummy_arch/ms1/getLoad.py","file_name":"getLoad.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"258536964","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n#%% load data\nimport pandas as pd\nimport numpy as np\nimport scipy\nfrom scipy.sparse import coo_matrix, hstack,csr_matrix\nimport gc\nfrom sklearn.linear_model import SGDRegressor\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.model_selection import RandomizedSearchCV\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.svm import LinearSVC\nfrom sklearn.linear_model import LinearRegression\n\ndef save_sparse_csr(filename, array):\n # note that .npz extension is added automatically\n np.savez(filename, data=array.data, indices=array.indices,\n indptr=array.indptr, shape=array.shape)\n\n\ndef load_sparse_csr(filename):\n # here we need to add .npz extension manually\n loader = np.load(filename + '.npz')\n return csr_matrix((loader['data'], loader['indices'], loader['indptr']),\n shape=loader['shape'])\n\ndef rmsle(y, y_pred):\n\tassert len(y) == len(y_pred)\n\tterms_to_sum = [(y_pred[i] - y[i]) ** 2.0 for i,pred in enumerate(y_pred)]\n\treturn (sum(terms_to_sum) * (1.0/len(y))) ** 0.5\n\nX_train = load_sparse_csr('X_train')\nntrain = X_train.shape[0]\nX = load_sparse_csr('X')\nX_test = X[ntrain:]\ndel X\ntrain = pd.read_csv('train.csv')\ny_train = train['price']\ndel train;gc.collect() \n\n#%% SGDRegressor\nparam_grid = [\n {'penalty': ['elasticnet','l1','l2']}]\nsgd = SGDRegressor()\ngrid_search = GridSearchCV(sgd, param_grid, cv = 3)\ngrid_search.fit(X_train, y_train)\ngrid_search.best_params_\n\nsgd_elas = SGDRegressor(penalty = 'elasticnet')\nsgd_elas.fit_transform(X_train, y_train)\npred_sgd_elas = sgd.predict(X_train)\nrmsle(y_train,pred_sgd_elas) # 0.565\n\nsgd_l2 = SGDRegressor(penalty = 'l2')\nsgd_l2.fit_transform(X_train,y_train)\npred_sgd_l2 = sgd_l2.predict(X_train)\nrmsle(y_train,pred_sgd_l2) # 0.55418\n\n\n#%%\nfrom sklearn.kernel_ridge import KernelRidge\nkr = KernelRidge()\nkr.fit(X_train, y_train)\npred_kr = kr.predict(X_train)\nrmsle(y_train,pred_kr)\n\n#%%\nfrom sklearn.svm import LinearSVR\nfrom sklearn.preprocessing import MaxAbsScaler\nsvm_reg = Pipeline((\n (\"scaler\", MaxAbsScaler()),\n (\"svm\", LinearSVR(epsilon=1.5)),\n ))\nsvm_reg.fit(X_train, y_train)\npred_svm1 = svm_reg.predict(X_train)\nrmsle(y_train,pred_svm1) #0.6039\n\n# kernel svm\n#from sklearn.svm import SVR\n#svm_poly_reg = SVR(kernel=\"poly\", degree=2, C=100, epsilon=0.1)\n#svm_poly_reg.fit(X_train, y_train)\n\n#%% simple tree\nfrom sklearn.tree import DecisionTreeRegressor \ntree_reg = DecisionTreeRegressor(max_depth=2,min_samples_leaf=10)\ntree_reg.fit(X_train, y_train)\ntree_pre = tree_reg.predict(X_train)\nrmsle(y_train,tree_pre) #0.7136\n\n\n#%% bagging\nfrom sklearn.ensemble import BaggingRegressor\nfrom sklearn.tree import ExtraTreeRegressor\nbag_reg = BaggingRegressor(\n ExtraTreeRegressor(max_depth=2,min_samples_leaf=10), \n n_estimators=10,\n bootstrap=True, n_jobs=-1,bootstrap_features = True\n )\nbag_reg.fit(X_train,y_train)\npre_bag = bag_reg.predict(X_train)\nrmsle(y_train,pre_bag) #0.7080\n\n#%%\nfrom sklearn.ensemble import RandomForestRegressor\nregr = RandomForestRegressor(max_depth=2,min_samples_leaf=10)\nregr.fit(X_train,y_train)\npred_regr = regr.predict(X_train)\nrmsle(y_train,pred_regr) #0.7136\n\n#%%\n\n#If your AdaBoost ensemble is overfitting the training set,\n# you can try reducing the number of estimators or more strongly regularizing the base estimator.\nfrom sklearn.ensemble import AdaBoostRegressor\nada = AdaBoostRegressor(base_estimator = DecisionTreeRegressor(max_depth=2,min_samples_leaf=10))\nada.fit(X_train,y_train)\npre_ada = ada.predict(X_train)\nrmsle(y_train,pre_ada) #0.7327\n\nparam_grid_ada = {'learning_rate': [0.1,0.01,0.005],\n 'n_estimators':[50,100,200,500]}\ngrid_search_ada = RandomizedSearchCV(ada, param_distributions = param_grid_ada, n_iter = 10)\ngrid_search_ada.fit(X_train, y_train)\n\n\n#%%\nimport xgboost as xgb\ngbm = xgb.XGBClassifier(\n #learning_rate = 0.02,\n n_estimators= 2000,\n max_depth= 4,\n min_child_weight= 2,\n #gamma=1,\n gamma=0.9, \n subsample=0.8,\n colsample_bytree=0.8,\n objective= 'binary:logistic',\n nthread= -1,\n scale_pos_weight=1)\n\ngbm.fit(X_train, y_train)\n\n\n\n#%% stacking using mlxtend\nfrom mlxtend.regressor import StackingRegressor\n\n\nlr_stack = LinearRegression()\nsvr_lr_stack = LinearSVR(epsilon=1.5)\nsgd_elas_stack = SGDRegressor(penalty='elasticnet')\ntree_reg_stack = DecisionTreeRegressor(max_depth=2,min_samples_leaf=10)\n\nstregr = StackingRegressor(regressors=[lr_stack, svr_lr_stack,sgd_elas_stack], \n meta_regressor=tree_reg_stack,verbose=1)\nstregr.fit(X_train, y_train)\n\n#from sklearn.model_selection import GridSearchCV\n#from sklearn.linear_model import Lasso\n#\n## Initializing models\n#\n#lr = LinearRegression()\n#svr_lin = SVR(kernel='linear')\n#ridge = Ridge(random_state=1)\n#lasso = Lasso(random_state=1)\n#svr_rbf = SVR(kernel='rbf')\n#regressors = [svr_lin, lr, ridge, lasso]\n#stregr = StackingRegressor(regressors=regressors, \n# meta_regressor=svr_rbf)\n#\n#params = {'lasso__alpha': [0.1, 1.0, 10.0],\n# 'ridge__alpha': [0.1, 1.0, 10.0],\n# 'svr__C': [0.1, 1.0, 10.0],\n# 'meta-svr__C': [0.1, 1.0, 10.0, 100.0],\n# 'meta-svr__gamma': [0.1, 1.0, 10.0]}\n#\n#grid = GridSearchCV(estimator=stregr, \n# param_grid=params, \n# cv=5,\n# refit=True)\n#grid.fit(X, y)\n#\n#for params, mean_score, scores in grid.grid_scores_:\n# print(\"%0.3f +/- %0.2f %r\"\n# % (mean_score, scores.std() / 2.0, params))\n\n#%% stacking by hand\nfrom sklearn.ensemble import (RandomForestClassifier, AdaBoostClassifier, \n GradientBoostingClassifier, ExtraTreesClassifier)\nfrom sklearn.svm import SVC\nfrom sklearn.cross_validation import KFold\n\nntrain = train.shape[0]\nntest = test.shape[0]\nSEED = 0 # for reproducibility\nNFOLDS = 5 # set folds for out-of-fold prediction\nkf = KFold(ntrain, n_folds= NFOLDS, random_state=SEED)\n\n\n\nclass SklearnHelper(object):\n def __init__(self, reg, seed=0, params=None):\n params['random_state'] = seed\n self.reg = reg(**params)\n\n def train(self, x_train, y_train):\n self.reg.fit(x_train, y_train)\n\n def predict(self, x):\n return self.reg.predict(x)\n \n def fit(self,x,y):\n return self.reg.fit(x,y)\n \n def feature_importances(self,x,y):\n print(self.reg.fit(x,y).feature_importances_)\n \n# Out-of-Fold Predictions\ndef get_oof(reg, X_train, y_train, X_test):\n oof_train = np.zeros((ntrain,))\n oof_test = np.zeros((ntest,))\n oof_test_skf = np.empty((NFOLDS, ntest))\n\n for i, (train_index, test_index) in enumerate(kf):\n x_tr = X_train[train_index]\n y_tr = y_train[train_index]\n x_te = X_train[test_index]\n\n reg.train(x_tr, y_tr)\n\n oof_train[test_index] = reg.predict(x_te)\n oof_test_skf[i, :] = reg.predict(X_test)\n\n oof_test[:] = oof_test_skf.mean(axis=0)\n return oof_train.reshape(-1, 1), oof_test.reshape(-1, 1)\n\n#first layer\nsgd_params = {'penalty':'elasticnet'}\nlsvr_params = {'epsilon':1.5}\ndt_params = {'max_depth':2,'min_samples_leaf':10}\n\nsgd = SklearnHelper(reg = SGDRegressor, seed = SEED, params=sgd_params)\nlsvr = SklearnHelper(reg = LinearSVR, seed = SEED, params = lsvr_params)\ndt = SklearnHelper(reg = DecisionTreeRegressor, seed = SEED, params = dt_params)\n\nsgd_oof_train, sgd_oof_test = get_oof(sgd, X_train, y_train, X_test)\nlsvr_oof_train, lsvr_oof_test = get_oof(lsvr, X_train, y_train, X_test)\ndt_oof_train, dt_oof_test = get_oof(dt, X_train, y_train, X_test)\n\n# second layer\n#base_prediction_train = pd.DataFrame({'SGD': sgd_oof_train.ravel(),\n# 'linearSVR': lsvr_oof_train.ravel(),\n# 'DecitionTree': dt_oof_train.ravel()})\n#base_prediction_train.head()\nx_train_2 = np.concatenate((sgd_oof_train,lsvr_oof_train,dt_oof_train), axis = 1)\nx_test_2 = np.concatenate((sgd_oof_test, lsvr_oof_test, dt_oof_test), axis = 1)\n\n# make predictionn using randomforestregressor\nregr2 = RandomForestRegressor(n_jobs=-1,n_estimators=100,max_depth=2,min_samples_leaf=10,verbose=1)\nregr2.fit(x_train_2, y_train)\npre_regr2 = regr2.predict(x_test_2)\n\n","sub_path":"several_models.py","file_name":"several_models.py","file_ext":"py","file_size_in_byte":8355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"109117474","text":"import requests\nfrom lxml import html\nimport log.log as log\nimport random\nimport traceback\nimport re\nimport os\n\nfrom concurrent.futures import ThreadPoolExecutor as TPE\nfrom concurrent.futures import as_completed\n\nproxyLogger = log.get_logger(__name__)\n\nhttpsCount = 0\nhttpCount = 0\n\n\ndef proxyScraper():\n \"\"\"\n Parses htmlobject using xpath search pulling IP, port, Country, type, HTTPS and Time discovered\n information for a single request\n :param htmlobject:\n :return: proxyDict\n \"\"\"\n uri = 'https://free-proxy-list.net/'\n\n pageContent = requests.get(url=uri, headers=rand_useragent(), timeout=10)\n\n tree = html.fromstring(pageContent.content)\n\n proxyIP = [item for item in tree.xpath('//table/tbody/tr/td[1]/text()')]\n proxyPort = [item for item in tree.xpath('//table/tbody/tr/td[2]/text()')]\n proxyCountry = [item for item in tree.xpath('//table/tbody/tr/td[4]/text()')]\n proxyType = [item for item in tree.xpath('//table/tbody/tr/td[5]/text()')]\n proxyHTTPS = [item for item in tree.xpath('//table/tbody/tr/td[7]/text()')]\n proxyDiscovered = [item for item in tree.xpath('//table/tbody/tr/td[8]/text()')]\n\n proxyDict = {key: value for key, *value in\n zip(proxyCountry, proxyIP, proxyPort, proxyType, proxyHTTPS, proxyDiscovered)}\n\n httpsProxy = ['https', ]\n httpProxy = ['http']\n\n for key, value in proxyDict.items():\n if value[3] == 'yes':\n httpsProxy.append(f'https://{value[0]}:{value[1]}')\n else:\n httpProxy.append(f'http://{value[0]}:{value[1]}')\n\n proxyLogger.info(\n f'From {len(httpsProxy) + len(httpProxy)} available proxies, {len(httpsProxy)} are HTTPS capable and {len(httpProxy)} for HTTP')\n\n return httpsProxy, httpProxy\n\n\ndef rand_useragent():\n \"\"\"\n Picks a random user-agent when function is called\n Returns a dictionary containing the user-agent\n :return: html_headers\n \"\"\"\n useragent = [\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/600.8.9 (KHTML, like Gecko) Version/8.0.8 Safari/600.8.9',\n 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36',\n 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_5) AppleWebKit/537.36 (KHTML, like Gecko) Cafari/537.36',\n 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0'\n ]\n\n html_headers = {\n 'User-Agent': f'{random.choice(useragent)}'\n }\n return html_headers\n\n\ndef proxyCheck(proxyURI):\n \"\"\"\n Acts on a URI (proxyURI) passed by the multiprocessor function\n proxyURI is checked using regex if it uses the https:// protocol\n A proxyURI that passes the regex check is sent a request with https parameters\n A proxyURI that fails the regex check is sent a request with http parameters\n The proxyURI is then used to send a request to test_site\n A 200 response places the proxyURI to a proxyList(https/http list)\n :param proxyURI:\n :return: httpsProxy, httpProxy\n \"\"\"\n\n httpsProxy = []\n httpProxy = []\n\n test_site = \"http://api.ipify.org/?format=json\"\n\n pattern = r'(https)://'\n\n if bool(re.match(pattern, proxyURI)) == True:\n param_proxy = {\n 'https': proxyURI\n }\n\n # Test HTTPS proxy if active\n try:\n r = requests.get(test_site, headers=rand_useragent(), proxies=param_proxy, timeout=(20, 15))\n status = r.status_code\n if status is 200:\n httpsProxy.append(proxyURI)\n proxyLogger.info(f'Proxy {proxyURI} is Online. Appending to active HTTPS proxy list.')\n counter('https')\n except Exception as error:\n proxyLogger.error(f'Error on proxy {proxyURI}. Exception: {error} Stack Trace: {traceback.print_exc()}')\n pass\n else:\n param_proxy = {\n 'http': proxyURI\n }\n\n # Test HTTP proxy if active\n try:\n r = requests.get(test_site, headers=rand_useragent(), proxies=param_proxy, timeout=(60, 45))\n status = r.status_code\n if status is 200:\n httpProxy.append(proxyURI)\n proxyLogger.info(f'Proxy {proxyURI} is Online. Appending to active HTTP proxy list.')\n counter('http')\n except Exception as error:\n proxyLogger.error(f'Error on proxy {proxyURI}. Exception: {error} Stack Trace: {traceback.print_exc()}')\n pass\n\n return httpsProxy, httpProxy\n\n\ndef counter(protocol_type):\n global httpsCount, httpCount\n\n if protocol_type == 'https':\n httpsCount += 1\n elif protocol_type == 'http':\n httpCount += 1\n\n\ndef file_write(line):\n \"\"\"\n Appends line of type str to file\n list is first converted to a string\n line is stripped of 1st and last characters \"[\" and \"]\"\n \"\"\"\n\n target_dir = os.path.abspath(os.path.join(__file__, '../../proxy'))\n\n with open(f'{target_dir}/proxies.txt', 'a') as write_file:\n write_file.write(line)\n write_file.write('\\n')\n\n\ndef multithreader(proxyList):\n httpsProxy = []\n httpProxy = []\n\n with TPE(max_workers=50) as executor:\n futures = [executor.submit(proxyCheck, uri) for uri in proxyList]\n for future in as_completed(futures):\n output = future.result()\n if len(output[0]) == 1:\n httpsProxy.append(output[0].pop(0))\n elif len(output[1]) == 1:\n httpProxy.append(output[1].pop(0))\n\n proxyLogger.info(\n f'{httpsCount + httpCount} active proxies, {httpsCount} are HTTPS capable and {httpCount} for HTTP')\n\n combinedProxies = httpsProxy + httpProxy\n\n for item in combinedProxies:\n file_write(item)\n\n\n return httpsProxy, httpProxy\n\n\ndef main():\n httpsProxyList, httpProxyList = proxyScraper()\n\n httpsProxies, _ = multithreader(httpsProxyList[1:])\n _, httpProxies = multithreader(httpProxyList[1:])\n\n proxyLogger.info(f'\\nHTTPS\\n{httpsProxies}\\n\\nHTTP\\n{httpProxies}')\n\n return httpsProxies, httpProxies\n\nif __name__ == '__main__':\n main()\n","sub_path":"rappler_spider/rappler_spider/proxy/scrapeProxy.py","file_name":"scrapeProxy.py","file_ext":"py","file_size_in_byte":6112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"316904072","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# NOTE: This file is copied (rather than symlinked) since a symlink **outside**\n# of the package tree won't get copied during a ``pip install``.\n\nimport os\nimport pathlib\nimport subprocess\nimport sys\n\nimport numpy as np\nimport setuptools\n\n\nFORTRAN_LIBRARY_PREFIX = \"libraries: =\"\nGFORTRAN_MISSING_LIBS = \"\"\"\\\n``gfortran`` default library path not found via:\n\n$ gfortran -print-search-dirs\n{}\"\"\"\nGFORTRAN_BAD_PATH = \"``gfortran`` library path {} is not a directory.\"\n\n\ndef gfortran_search_path():\n \"\"\"Get the library directory paths for ``gfortran``.\n\n Looks for ``libraries: =`` in the output of ``gfortran -print-search-dirs``\n and then parses the paths. If this fails for any reason, this method will\n print an error and return ``library_dirs``.\n\n Returns:\n List[str]: The library directories for ``gfortran``.\n \"\"\"\n cmd = (\"gfortran\", \"-print-search-dirs\")\n process = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n return_code = process.wait()\n # Bail out if the command failed.\n if return_code != 0:\n return []\n\n cmd_output = process.stdout.read().decode(\"utf-8\")\n # Find single line starting with ``libraries: ``.\n search_lines = cmd_output.strip().split(\"\\n\")\n library_lines = [\n line[len(FORTRAN_LIBRARY_PREFIX) :]\n for line in search_lines\n if line.startswith(FORTRAN_LIBRARY_PREFIX)\n ]\n if len(library_lines) != 1:\n msg = GFORTRAN_MISSING_LIBS.format(cmd_output)\n print(msg, file=sys.stderr)\n return []\n\n # Go through each library in the ``libraries: = ...`` line.\n library_line = library_lines[0]\n accepted = set()\n for part in library_line.split(os.pathsep):\n full_path = os.path.abspath(part.strip())\n if os.path.isdir(full_path):\n accepted.add(full_path)\n else:\n # Ignore anything that isn't a directory.\n msg = GFORTRAN_BAD_PATH.format(full_path)\n print(msg, file=sys.stderr)\n\n return sorted(accepted)\n\n\ndef get_extra_objects(here):\n return (\n os.path.join(here, \"object_files\", \"types.o\"),\n os.path.join(here, \"object_files\", \"forall_.o\"),\n os.path.join(here, \"object_files\", \"do_.o\"),\n os.path.join(here, \"object_files\", \"spread_.o\"),\n os.path.join(here, \"object_files\", \"serial_.o\"),\n os.path.join(here, \"object_files\", \"vs_algorithm.o\"),\n )\n\n\ndef extension_modules(here, name):\n extra_objects = get_extra_objects(here)\n missing = [path for path in extra_objects if not os.path.isfile(path)]\n if missing:\n parts = [\"Missing object file(s):\"]\n parts.extend(f\"- {path}\" for path in missing)\n parts.extend(\n [\n \"\",\n f\"here: {here}\",\n f\"__file__: {__file__}\",\n \"\",\n \"files in `here`:\",\n ]\n )\n files_here = pathlib.Path(here).glob(\"*\")\n parts.extend(f\"- {path}\" for path in files_here)\n\n msg = \"\\n\".join(parts)\n raise RuntimeError(msg)\n\n extension = setuptools.Extension(\n f\"{name}._binary\",\n [os.path.join(name, \"_binary.c\")],\n extra_objects=extra_objects,\n include_dirs=[np.get_include()],\n libraries=[\"gfortran\"],\n library_dirs=gfortran_search_path(),\n )\n return [extension]\n\n\ndef do_setup(here, name):\n ext_modules = extension_modules(here, name)\n setuptools.setup(\n name=name,\n packages=[name],\n install_requires=[\"numpy\"],\n ext_modules=ext_modules,\n )\n","sub_path":"src/python-bakeoff-opt/setup_shared.py","file_name":"setup_shared.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"343453846","text":"from locust import HttpUser,task,TaskSet,between,events\nimport sys,json,time,random\nsys.path.append(\"E:/myTestFile/TestObject/zhongfuan/yunzhangben/yzb_performance\")\nfrom common.qianMing import GetDataSign\nclass PublicRequest(TaskSet):\n\n def requestMethod(self,url,data,header):\n with self.client.post(url,data=data,headers=header,verify=False,allow_redirects=False,catch_response=True) as response:\n return response\n\n\n \n\n def publicRequest(self,url,urlName,public_data,header):\n # public_data = json.dumps(public_data)\n with self.client.post(url,data = public_data,headers=header,name=urlName+url,verify=False,allow_redirects=False,catch_response=True) as response:\n print(\"响应结果======{}\".format(response.text))\n if \"[200]\" in str(response):\n result = json.loads(response.text)\n if 'status' in result and result[\"status\"] == 200 or 'status' in result and result[\"status\"] == \"200\":\n time.sleep(random.randint(1,3))\n response.success()\n return result\n else:\n response.failure(\"报错url==={}-{} ,参数==={} ,报错原因==={}\".format(urlName,url,public_data,response.text))\n\n else:\n print(\"报错url==={}-{} ,参数==={} ,报错原因==={}\".format(urlName,url,public_data,response))\n response.failure(\"报错url==={}-{} ,参数==={} ,报错原因==={}\".format(urlName,url,public_data,response))\n \n \n\n\n","sub_path":"yunzhangben/备用/yzb_regression/common/publicRequestMethod.py","file_name":"publicRequestMethod.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"333843423","text":"invoer = \"5-9-7-1-7-8-3-2-4-8-7-9\"\nLijst = []\nfor waarde in invoer:\n if waarde != '-':\n Lijst.append(int(waarde))\nLijst.sort()\nprint('Geimporteerde lijst van ints: ' + str(Lijst))\nprint('Grootste getal: ' + str(max(Lijst)) + ' en Kleinste getal: ' + str(min(Lijst)))\nprint('Aantal getallen: ' + str(len(Lijst))+ ' en de som van de getallen: '+ str(sum(Lijst)))\nprint('Gemiddelde: ' + str(sum(Lijst)/len(Lijst)))\n","sub_path":"PycharmProjects/PROG/Les 6/6_3.py","file_name":"6_3.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"434375004","text":"# Min, Mean, Median, Average, Standard Deviation,\n#Variance\n\nimport numpy as np\n\na = np.array([[1,2,3,4],[5,4,1,8]])\n# print(np.min(a))\n# print(np.min(a,axis = 0))\n# print(np.min(a,axis = 1))\n\n# b = np.array([1,2,3,4,5])\n# m = sum(b)/5\n# print(m)\n\n# print(np.mean(b))\n# print(np.mean(a,axis = 0))\n# print(np.mean(a,axis = 1))\n\nc = np.array([1,5,4,2,0])\n# print(np.median(c))\n# print(np.mean(c))\n\n# wt = np.array([1,2,3,4,5])\n# print(np.average(c,weights =wt))\n\n# Standard Deviation\nu = np.mean(c)\nmyStd = np.sqrt(np.mean(abs(c-u)**2))\nprint(myStd)\n\n#Inbuilt\ndev = np.std(c)\nprint(dev)\n\n#Varaince\nprint(myStd**2)\nprint(np.var(c))","sub_path":"Numpy/numpy_statisticalComputation.py","file_name":"numpy_statisticalComputation.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"611434976","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Mar 3 18:09:50 2018\r\n\r\n@author: mima\r\n\r\nNeed flat rate table\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom datetime import datetime as dt\r\nimport time\r\nfrom pandas.tseries.offsets import BDay\r\n\r\n\r\n \r\ndef import_data():\r\n t2 = time.process_time()\r\n\r\n data = pd.ExcelFile('C://Users//mima//Documents//price_freight_assay_data.xlsx')\r\n raw_rates = pd.ExcelFile('C://Users//mima//Documents//flat_rates.xlsx')\r\n trader_assessed = pd.ExcelFile('L://TRADING//ANALYSIS//GLOBAL//Arb Models//Pecking Order 2018.xlsm')\r\n \r\n assay = pd.read_excel(data, 'assay', index_col = 'Database_Name').to_dict('index')\r\n ws = pd.read_excel(data, 'ws')\r\n expiry_table = pd.read_excel(data, 'expiry', index_col = 'Month')\r\n ports = pd.read_excel(data, 'ports')\r\n sub_to_ws = pd.read_excel(data, 'sub_to_ws', header = None)\r\n sub_to_ws = sub_to_ws.set_index([0]).to_dict()\r\n \r\n \"\"\"table containing the basrah base worldscale that they fix their freight against\"\"\"\r\n basrah_ws_base = pd.read_excel(data, 'basrah_ws_base', index_col = 'Date')\r\n \r\n \"\"\"Take in the crude prices and codes and convert to a dataframe.\r\n We need to take the first 2 rows of the prices with no headers as this will give us the cude name and the code ascociated\r\n Then transpose from rows to columns and rename the columns. This will be for later when we determine crude prices basis desired comaprison\"\"\"\r\n #prices_reference = (pd.read_excel(data, 'paper prices', header = None).iloc[0:2,1:]).transpose().rename(columns={0:'Name', 1: 'Code'}) \r\n \r\n \"\"\"Merge the WS table with the prices table, slice df so 2016 onwards (Flat rates last date is 2015). \r\n We don't drop rows now as dropping would be dependent on any nans in any column\"\"\"\r\n #total = prices.merge(ws_table, how = 'inner', left_index = True, right_index = True)\r\n #total = total.merge(paper_prices, how = 'inner', left_index = True, right_index = True)\r\n #total = total.iloc[total.index > dt(2015,12,31)]\r\n \r\n \"\"\"this new total table generates all the prices in one place for us\"\"\"\r\n total = pd.read_excel(data, 'price_warehouse', header = 4).drop(['Timestamp'])\r\n total.index = pd.to_datetime(total.index)\r\n total.sort_index(inplace=True)\r\n total.fillna(method='ffill', inplace=True)\r\n total = total[total.index > dt(2015,1,1)]\r\n \r\n \"\"\"We know there are some perculiarities in the data, such as the OSPs. So create this table here to handle. Found out need to shift the prices back a month but in order\r\n to identify which ones, needed the list of OSP crudes\"\"\"\r\n exceptions = {\r\n 'Arab Extra Light':\r\n {'ROTTERDAM':{'Code':'AAIQQ00','Index':'BWAVE'},\r\n 'AUGUSTA':{'Code':'AAWQK00','Index':'BWAVE'},\r\n 'HOUSTON':{'Code':'AAIQZ00','Index':'WTI'},\r\n 'SINGAPORE':{'Code':'AAIQV00','Index':'OMAN/DUBAI'}},\r\n 'Arab Light':\r\n {'ROTTERDAM':{'Code':'AAIQR00','Index':'BWAVE'},\r\n 'AUGUSTA':{'Code':'AAWQL00','Index':'BWAVE'},\r\n 'HOUSTON':{'Code':'AAIRA00','Index':'WTI'},\r\n 'SINGAPORE':{'Code':'AAIQW00','Index':'OMAN/DUBAI'}},\r\n 'Arab Medium':\r\n {'ROTTERDAM':{'Code':'AAIQS00','Index':'BWAVE'},\r\n 'AUGUSTA':{'Code':'AAWQM00','Index':'BWAVE'},\r\n 'HOUSTON':{'Code':'AAIRB00','Index':'WTI'},\r\n 'SINGAPORE':{'Code':'AAIQX00','Index':'OMAN/DUBAI'}},\r\n 'Arab Heavy':\r\n {'ROTTERDAM':{'Code':'AAIQT00','Index':'BWAVE'},\r\n 'AUGUSTA':{'Code':'AAWQN00','Index':'BWAVE'},\r\n 'HOUSTON':{'Code':'AAIRC00','Index':'WTI'},\r\n 'SINGAPORE':{'Code':'AAIQY00','Index':'OMAN/DUBAI'}},\r\n 'Basrah Light':\r\n {'ROTTERDAM':{'Code':'AAIPH00','Index':'Dated'},\r\n 'AUGUSTA':{'Code':'AAIPH00','Index':'Dated'},\r\n 'HOUSTON':{'Code':'AAIPG00','Index':'WTI'},\r\n 'SINGAPORE':{'Code':'AAIPE00','Index':'OMAN/DUBAI'}},\r\n 'Basrah Heavy':\r\n {'ROTTERDAM':{'Code':'AAXUC00','Index':'Dated'},\r\n 'AUGUSTA':{'Code':'AAXUC00','Index':'Dated'},\r\n 'HOUSTON':{'Code':'AAXUE00','Index':'Mars'},\r\n 'SINGAPORE':{'Code':'AAXUA00','Index':'OMAN/DUBAI'}},\r\n 'Iranian Heavy':\r\n {'ROTTERDAM':{'Code':'AAIPB00','Index':'BWAVE'},\r\n 'AUGUSTA':{'Code':'AAUCH00','Index':'BWAVE'},\r\n #'Iranian Heavy':{'HOUSTON':{'Code':abcde,'Index':'WTI'}},\r\n 'SINGAPORE':{'Code':'AAIOY00','Index':'OMAN/DUBAI'}},\r\n 'Iranian Light':\r\n {'ROTTERDAM':{'Code':'AAIPA00','Index':'BWAVE'},\r\n 'AUGUSTA':{'Code':'AAUCJ00','Index':'BWAVE'},\r\n 'SINGAPORE':{'Code':'AAIOX00','Index':'OMAN/DUBAI'}},\r\n 'Forozan':\r\n {'ROTTERDAM':{'Code':'AAIPC00','Index':'BWAVE'},\r\n 'AUGUSTA':{'Code':'AAUCF00','Index':'BWAVE'},\r\n 'SINGAPORE':{'Code':'AAIOZ00','Index':'OMAN/DUBAI'}},\r\n 'Isthmus':{'ROTTERDAM':{'Code':'AAIQC00','Index':'Dated'},\r\n 'AUGUSTA':{'Code':'AAIQC00','Index':'Dated'},\r\n 'HOUSTON':{'Code':'AAIPZ00','Index':'WTI'},\r\n 'SINGAPORE':{'Code':'AAIQE00','Index':'OMAN/DUBAI'}},\r\n 'Maya':{'ROTTERDAM':{'Code':'AAIQB00','Index':'Dated'},\r\n 'AUGUSTA':{'Code':'AAIQB00','Index':'Dated'},\r\n 'HOUSTON':{'Code':'AAIPY00','Index':'WTI'},\r\n 'SINGAPORE':{'Code':'AAIQD00','Index':'OMAN/DUBAI'}}\r\n }\r\n \r\n crudes_to_shift = pd.DataFrame.from_dict({(crude,destination): exceptions[crude][destination] \r\n for crude in exceptions.keys() \r\n for destination in exceptions[crude].keys()}, \r\n orient='index')\r\n \r\n \"\"\"convert the dataseries to a list, then use setr to get the unique items, then convert back to a list\"\"\" \r\n crudes_to_shift = list(set(list(crudes_to_shift['Code'])))\r\n \r\n \"\"\"Fopr the crudes in the list, I want to resample the series at the month start so there is a common value for the start of each month,\r\n I then want shift these values by 1 backwards, in this case because we resampled, this automatically means shift abck one month,\r\n I then want to re-index the new dataframe to conform to where we are putting it back into, and finally I assign the total dataframe where the \r\n column headers are equal to the crude list, the new shifted and filled forward values to make sure everything lines up\"\"\"\r\n total[crudes_to_shift] = total[crudes_to_shift].resample('MS').mean().shift(-1, freq='MS').reindex(total.index).fillna(method='ffill') \r\n\r\n #total['AAXUC00']\r\n \r\n \"\"\"This will help with the date error. Turn the index into a numpy array and then assign the value\"\"\"\r\n if total.index[-1] - total.index[-2] > pd.Timedelta(days=2):\r\n total.index.values[-1] = total.index[-2] + pd.Timedelta(days=1)\r\n\r\n\r\n \"\"\"Clean the column hedaers so no white spcaes - use simple list comprehension and set headers equal to cleaned\"\"\"\r\n cleaned_column_headers = [i.strip() for i in total.columns.values]\r\n total.columns = cleaned_column_headers\r\n \r\n \"\"\"The below was get rid of the row in the index that hax NaT against it and then expand to daily and fill backwards\"\"\"\r\n crude_diffs = pd.read_excel(trader_assessed, 'Crude Diffs Traders', header = 0)\r\n crude_diffs = crude_diffs.loc[pd.notnull(crude_diffs.index)]\r\n crude_diffs = crude_diffs.drop([name for name in crude_diffs.columns if 'Unnamed' in name], axis=1)\r\n\r\n \r\n #crude_diffs.index = crude_diffs.index.map(lambda x : x + 1*BDay())\r\n crude_diffs = crude_diffs.reindex(total.index).fillna(method='bfill').fillna(method='ffill')\r\n \r\n \"\"\"Slice the crude diffs where the dates in the index are the same as the dates in the total dataframe\"\"\"\r\n #crude_diffs = crude_diffs[crude_diffs.index.isin(total.index)]\r\n crudes_diff_against_osp = ['Basrah Light','Basrah Heavy']\r\n codes_list = [x for x in crude_diffs.columns if x not in crudes_diff_against_osp]\r\n \r\n \"\"\"Apply the values in crude diffs to the correct codes and dates in the total dataframe\"\"\"\r\n total.update(crude_diffs[codes_list])\r\n \r\n \r\n \r\n \r\n \"\"\"We have to convert the prices that are in absolutes into a diff vs a local index, and if there are, set to zero.\r\n This is LOOP Sour\"\"\"\r\n total['AALSM01'].loc[total['AALSM01'] > 30] = total['AALSM01'].loc[total['AALSM01'] > 30] - total['CLc1']\r\n #total.loc[total.index.isin(crude_diffs.index), codes_list] = crude_diffs[codes_list]\r\n #total[codes_list]\r\n \r\n #total.update(crude_diffs[codes_list])\r\n \"\"\" Need this for the sulphur table\"\"\"\r\n forties_sulphur = pd.read_excel(trader_assessed, 'Forties de-esc', header = [22], parse_cols=\"H:I\").set_index('week ending')\r\n forties_sulphur = forties_sulphur.loc[pd.notnull(forties_sulphur.index)]\r\n forties_sulphur = forties_sulphur.reindex(total.index).fillna(method='ffill')\r\n\r\n \"\"\"Also need to adjust the cfds to take into account the inter month BFOE spread\"\"\" \r\n cfd_list = ['PCAKA00','PCAKC00','PCAKE00','PCAKG00','AAGLU00','AAGLV00','AALCZ00','AALDA00']\r\n temp = total[cfd_list].sub(pd.Series(total['PCAAQ00'] - total['PCAAR00']), axis=0)\r\n temp = temp[temp.index > dt(2017,6,30)]\r\n total.loc[total.index.isin(temp.index), list(temp.columns)] = temp[list(temp.columns)]\r\n \r\n \"\"\"This turns the 5 years of rate matricies into a table for use to reference - 12/04/2018\"\"\" \r\n rates = []\r\n for x,y in enumerate([name.split()[2] for name in raw_rates.sheet_names]):\r\n f = pd.read_excel(raw_rates, sheetname = x, header = None).iloc[1:47,1:]\r\n lplen = len(f.iloc[:,1])\r\n dplen = len(f.iloc[1,:])\r\n for j in range(1, dplen):\r\n for i in range(1,lplen):\r\n LoadPort = f.iloc[i,0]\r\n DischargePort = f.iloc[0,j]\r\n Year = y\r\n Rate = f.iloc[i,j]\r\n rates.append({'LoadPort':LoadPort, 'DischargePort': DischargePort, 'Year':Year,'Rate':Rate})\r\n \r\n rate_data = pd.DataFrame(rates)\r\n \r\n \"\"\"Also initialise the temp df with index of total. Temp df is tol hold the dataseries needed to calculate the freight\"\"\"\r\n df = pd.DataFrame(index=total.index)\r\n df['Date'] = df.index\r\n \r\n \"\"\"This function allows us to apply the expiration date for the wti futures used to determine what structure we apply to the CMA\r\n Have tried timing and slight improvment with the blow of 0.2seconds....\"\"\"\r\n \r\n t = time.process_time()\r\n\r\n for_dates = lambda x: (expiry_table.loc[(expiry_table.index.month == x.month)&(expiry_table.index.year == x.year)]['Expiry']).iat[0]\r\n \r\n df['Expiry'] = df['Date'].apply(for_dates)\r\n df.drop(['Date'], inplace=True, axis=1)\r\n \r\n \r\n \r\n \r\n\r\n print(\"df['Expiry'] created successfully: Time was {}\".format(time.process_time() - t))\r\n print(\"Temp DataFrame created successfully\")\r\n print(\"import_data() created successfully: Time was {}\".format(time.process_time() - t2))\r\n \r\n return assay, ws, ports, total, rate_data, sub_to_ws, df, basrah_ws_base, crude_diffs, forties_sulphur, exceptions, crudes_to_shift\r\n\r\n#crude = 'Amna'\r\n#destination = 'Rotterdam'\r\n#import_data()\r\n#assay, ws, ports, total, rate_data, sub_to_ws, df, basrah_ws_base, crude_diffs = import_data() \r\n \r\ndef arb(crude,destination,assay, ws, ports, total, rate_data, sub_to_ws, df, basrah_ws_base, crude_diffs, forties_sulphur, exceptions, crudes_to_shift): \r\n #crude = 'Azeri'\r\n #destination = 'Rotterdam'\r\n \r\n #crude = 'Amna'\r\n #destination = 'Houston'\r\n \r\n \"\"\"create the dataframes for use later\"\"\"\r\n df_freight = pd.DataFrame(index=df.index)\r\n df_prices = pd.DataFrame(index=df.index)\r\n \r\n index_wti = [x.lower().strip() for x in ['WTI F1','WTI CMA','1ST LINE WTI','2D LINE WTI','L.A WTI','FORWARD WTI','WTI']]\r\n index_dtd = [x.lower().strip() for x in ['DATED BRENT', 'DATED','N.SEA DATED','BTC Dated', 'MED DATED','WAF DATED','CANADA DATED','CANADA BRENT DATED','ANGOLA DATED',' GHANA DATED']]\r\n index_dub = [x.lower().strip() for x in ['DUBAI','DUBAI M2','OMAN/DUBAI']]\r\n crudes_diff_against_osp = crudes_to_shift\r\n \r\n \"\"\"Declare the main prices that will be used in order to use shorthand notation\"\"\"\r\n \r\n \r\n dtd = total['PCAAS00']\r\n dub = total['AAVMR00']\r\n wtim1 = total['CLc1'] \r\n wtim2 = total['CLc2']\r\n brentm1 = total['LCOc1']\r\n brentm2 = total['LCOc2']\r\n wti_cma_m1 = total['AAVSN00']\r\n cfd1 = total['PCAKG00']\r\n cfd2 = total['AAGLV00']\r\n cfd3 = total['PCAKE00']\r\n cfd4 = total['PCAKG00']\r\n cfd5 = total['AAGLU00']\r\n cfd6 = total['AAGLV00']\r\n cfd7 = total['AALCZ00']\r\n cfd8 = total['AALDA00']\r\n wti_br_m1 = wtim1 - brentm1\r\n wtim1_m2 = wtim1-wtim2\r\n brentm1_m2 = brentm1 - brentm2\r\n efpm2 = total['AAGVX00']\r\n efs2 = total['AAEBS00']\r\n mars_wti2 = total['AAKTH00']\r\n dfl_m1 = total['AAEAA00']\r\n\r\n days = 5\r\n loadport = assay[crude]['LoadPort'] \r\n sub_region = ports[ports['Name'] == loadport]['Subregion'].map(sub_to_ws[1]).to_string(index = False) # NB the index = False is make sure we dont take in the index number given in the output\r\n sub_region_2 = ports[ports['Name'] == destination]['Subregion'].map(sub_to_ws[1]).to_string(index = False)\r\n discharge_price_region = ports[ports['Name'] == destination]['Subregion'].map(sub_to_ws[3]).to_string(index = False)\r\n\r\n expiry_condition = df.index < df.Expiry\r\n cfd_condition = cfd1 > cfd2\r\n\r\n \"\"\"Need to handle the one month forward OSP concept, so here, take the dataframe for the exceptions above, condense to monhtly values which wont\r\n change the value as same for each day, shift that forward then re-expand\"\"\"\r\n if assay[crude]['Code'] == 'multiple': \r\n diff = total[exceptions[crude][discharge_price_region]['Code']]\r\n crude_vs = exceptions[crude][discharge_price_region]['Index'].lower().strip()\r\n else: \r\n diff = total[assay[crude]['Code']]\r\n crude_vs = assay[crude]['Index'].lower().strip()\r\n \r\n \r\n \"\"\"This is to make sure we use the CIF port if within the loaidng region, or switch to FOB loading port and FOB price if not\"\"\"\r\n if (assay[crude]['Basis'] == 'CIF') & (sub_region_2 != sub_region):\r\n loadport = assay[crude]['FOBLoadPort']\r\n sub_region = ports[ports['Name'] == loadport]['Subregion'].map(sub_to_ws[1]).to_string(index = False)\r\n #diff = total[assay[crude]['FOBCode']]\r\n else:\r\n pass \r\n \r\n def construct_freight():\r\n def calculate_flat_rate():\r\n \"\"\"create the flat rates table for the rates calculations and column creation\"\"\" \r\n flat_rate_table = rate_data.loc[(rate_data['LoadPort'] == loadport)&\r\n (rate_data['DischargePort'] == destination)]\r\n \r\n def calculate_flat_rates(x):\r\n return float(flat_rate_table.loc[flat_rate_table['Year'].astype(int) == x, 'Rate'])\r\n \r\n \"\"\"Vectorising the function amkes it applicable over an array - before had to use pandas which was element wise application - i.e. SLOW\"\"\"\r\n v_calculate_flat_rates = np.vectorize(calculate_flat_rates)\r\n df_freight['Rate'] = np.apply_along_axis(v_calculate_flat_rates,0,np.array(df.index.year))\r\n \r\n \r\n \r\n if ports[ports['Name'] == destination]['Country'].iat[0] == 'South Korea':\r\n flat_rate_table = rate_data.loc[(rate_data['LoadPort'] == 'Ruwais')&\r\n (rate_data['DischargePort'] == 'Singapore')]\r\n v_calculate_flat_rates = np.vectorize(calculate_flat_rates)\r\n df_freight['Murban_Sing_Flat'] = np.apply_along_axis(v_calculate_flat_rates,0,np.array(df.index.year))\r\n \r\n return df_freight\r\n \r\n def calculate_port_costs():\r\n \"\"\"These are for the odd costs, tax rebates, etc\"\"\"\r\n df_freight['Costs'] = 0\r\n \r\n # This is the export cost out of Houston\r\n if sub_region in (['US GULF (padd 3)']):\r\n df_freight['Houston_Load_Costs'] = np.where(df_freight.index > dt(2018,2,28),0.09,0)\r\n df_freight['Costs'] += df_freight['Houston_Load_Costs']\r\n \r\n # Port costs to discharge in Rotterdam \r\n if destination == 'Rotterdam':\r\n df_freight['Rott_Discharge_Costs'] = 0.15\r\n df_freight['Costs'] += df_freight['Rott_Discharge_Costs']\r\n \r\n # Port costs to discharge in Houston\r\n if destination == 'Houston':\r\n df_freight['Hous_Discharge_Costs'] = 0.25\r\n df_freight['Costs'] += df_freight['Hous_Discharge_Costs']\r\n \r\n if loadport == 'Basrah':\r\n df_freight['Basrah_Costs'] = 0.76\r\n df_freight['Costs'] += df_freight['Basrah_Costs']\r\n \r\n if loadport == 'Ras Tanura':\r\n df_freight['Saudi_Costs'] = 0.66\r\n df_freight['Costs'] += df_freight['Saudi_Costs']\r\n \r\n return df_freight \r\n\r\n \r\n\r\n\r\n def freight_and_quality_exceptions():\r\n if crude in ('Forties'):\r\n df_freight['Buzzard_Content'] = forties_sulphur['buzzard content']\r\n df_freight['Implied_Sulphur'] = df_freight['Buzzard_Content'] * 0.012 + 0.003\r\n df_freight['De-Escalator_Threshold'] = np.round(df_freight['Implied_Sulphur'], 3)\r\n df_freight['De-Escalator_Counts'] = np.minimum(0, 6-df_freight['Implied_Sulphur']*1000)\r\n df_freight['Platts_De_Esc'] = total['AAUXL00']\r\n df_freight['Forties_Margin_Impact'] = df_freight['Platts_De_Esc'] * df_freight['De-Escalator_Counts'] * -1\r\n df_freight['Costs'] += df_freight['Forties_Margin_Impact']\r\n \r\n if crude in ('Basrah Light','Basrah Heavy'):\r\n \"\"\"This handles the freight escalation calculation from Iraq - the base is sent by SOMO, and table is in databse / excel wb\"\"\"\r\n monthly_averages = total['PFAOH00'].asfreq(BDay()).resample('BMS').mean() # resampled so we have the business month start, corrects averaging error if cma\r\n func_ma_on_days = lambda x: (monthly_averages.loc[(monthly_averages.index.month == x.month)&(monthly_averages.index.year == x.year)]).iat[0]\r\n \r\n \"\"\"Create funcs to handle basrah base and flat rate values, apply over df and calc esclator\"\"\"\r\n func_ws_base = lambda x: (basrah_ws_base.loc[(basrah_ws_base.index.year == x.year)]['SOMO_WS']).iat[0]\r\n func_fr = lambda x: (basrah_ws_base.loc[(basrah_ws_base.index.year == x.year)]['SOMO_FlatRate']).iat[0]\r\n func_bhapi = lambda x: (basrah_ws_base.loc[(basrah_ws_base.index.year == x.year)]['BasrahHeavyAPI']).iat[0] \r\n func_blapi = lambda x: (basrah_ws_base.loc[(basrah_ws_base.index.year == x.year)]['BasrahLightAPI']).iat[0] \r\n df_freight['Date'] = df_freight.index\r\n df_freight['WS Month Avg'] = df_freight['Date'].apply(func_ma_on_days)\r\n df_freight['SOMO Base WS'] = df_freight['Date'].apply(func_ws_base)\r\n # We have to apply the corrcetion here after SOMO dropped their base rate earlier this year - assumption\r\n # only valid for 2018\r\n df_freight['SOMO Base WS'].iloc[(df_freight.index >= dt(2018,4,1))&(df_freight.index <= dt(2018,12,31))] = 25 \r\n df_freight['Base_FR_for_esc'] = df_freight['Date'].apply(func_fr)\r\n \r\n if crude == 'Basrah Light':\r\n df_freight['API Esc'] = df_freight['Date'].apply(func_blapi)\r\n else:\r\n df_freight['API Esc'] = df_freight['Date'].apply(func_bhapi) \r\n \r\n \r\n df_freight['WS for Esc'] = (df_freight['WS Month Avg'] - df_freight['SOMO Base WS']) * df_freight['Base_FR_for_esc'] / 7.3 / 100\r\n df_freight.drop(['Date'], axis = 1, inplace=True)\r\n #df_freight[['WS for Esc','API Esc']] = df_freight[['WS for Esc','API Esc']].resample('MS').mean().shift(-1, freq='MS').reindex(total.index).fillna(method='ffill')\r\n\r\n # South Korean particulars\r\n if ports[ports['Name'] == destination]['Country'].iat[0] == 'South Korea':\r\n # Freight rebate on imported crudes\r\n df_freight['Murban_Freight_Comp'] = total['PFAOC00'] / 100 * df_freight['Murban_Sing_Flat'] / 7.66 #Murban density conversion\r\n df_freight['UKC-Yosu_VLCC'] = total['AASLA00'] * 1000000 / 2000000\r\n df_freight['Freight_Rebate'] = np.maximum(df_freight['UKC-Yosu_VLCC'] - df_freight['Murban_Freight_Comp'], 0.6)\r\n df_freight['Costs'] -= df_freight['Freight_Rebate']\r\n \r\n # Tax rebate on crudes out of Europe\r\n if ports[ports['Name'] == loadport]['Region'].iat[0] in (['NW EUROPE','MED']):\r\n df_freight['FTA_Tax_Rebate'] = 0.006 * total['LCOc1']\r\n df_freight['Costs'] -= df_freight['FTA_Tax_Rebate']\r\n \r\n # Tax rebate on crudes out of the US\r\n if ports[ports['Name'] == loadport]['Region'].iat[0] in (['N AMERICA']):\r\n df_freight['FTA_Tax_Rebate'] = 0.005 * total['CLc1']\r\n df_freight['Costs'] -= df_freight['FTA_Tax_Rebate']\r\n \r\n # Costs ascociated with lifting CPC based on delays\r\n if crude == 'CPC Blend':\r\n df_freight['TS_Delays'] = np.maximum(total['AAWIL00'] + total['AAWIK00'] - 2,0)\r\n df_freight['TS_Demur'] = total['AAPED00']\r\n df_freight['TS_Demur_Costs'] = df_freight['TS_Delays'].mul(df_freight['TS_Demur'])/130\r\n df_freight['Costs'] += df_freight['TS_Demur_Costs']\r\n \r\n # Costs ascociated with lifting Urals, actually a rebate as giving back port costs that are included in CIF price\r\n if crude in (['Urals Nth', 'Urals Med']):\r\n df_freight['Urals_Cif_Rebate'] = 0.11\r\n df_freight['Costs'] -= df_freight['Urals_Discharge_Costs']\r\n \r\n if crude == 'Forties':\r\n df_freight['Forties_Mkt_Discount'] = 0.5\r\n df_freight['Costs'] -= df_freight['Forties_Mkt_Discount']\r\n else:\r\n pass\r\n \r\n \r\n \r\n return df_freight\r\n \r\n def calculate_freight():\r\n \r\n if (assay[crude]['Basis'] == 'CIF')& (sub_region_2 != sub_region):\r\n \"\"\"We need to essentially do the calculate freight function again, but rename the 'Rate' column to 'Onward_Rate\"\"\"\r\n loadport = assay[crude]['FOBLoadPort'] \r\n cif_destination = assay[crude]['LoadPort'] \r\n flat_rate_table = rate_data.loc[(rate_data['LoadPort'] == loadport)&\r\n (rate_data['DischargePort'] == cif_destination)]\r\n \r\n def calculate_flat_rates(x):\r\n return float(flat_rate_table.loc[flat_rate_table['Year'].astype(int) == x, 'Rate'])\r\n \r\n \"\"\"Vectorising the function amkes it applicable over an array - before had to use pandas which was element wise application - i.e. SLOW\"\"\"\r\n v_calculate_flat_rates = np.vectorize(calculate_flat_rates)\r\n df_freight['Onward_Rate'] = np.apply_along_axis(v_calculate_flat_rates,0,np.array(df.index.year))\r\n \r\n sub_region_onward = ports[ports['Name'] == loadport]['Subregion'].map(sub_to_ws[1]).to_string(index = False) # NB the index = False is make sure we dont take in the index number given in the output\r\n sub_region_onward_2 = ports[ports['Name'] == cif_destination]['Subregion'].map(sub_to_ws[1]).to_string(index = False)\r\n \r\n ws_codes = ws[(ws['Origin'] == sub_region_onward)&(ws['Destination'] == sub_region_onward_2)]\r\n \r\n df_freight['Onward_Costs'] = 0\r\n if cif_destination == 'Rotterdam':\r\n df_freight['Onward_Rott_Discharge_Costs'] = 0.15\r\n df_freight['Onward_Costs'] += df_freight['Onward_Rott_Discharge_Costs']\r\n \r\n onward_vessel_size = []\r\n for i in list(ws_codes['Code']):\r\n #i = 'PFAGN10'\r\n onward_size = ws_codes[ws_codes['Code'] == i]['Size'].iat[0][0:3] + str('_Onward')\r\n onward_vessel_size.append(onward_size)\r\n onward_name = ws_codes[ws_codes['Code'] == i]['Name'].iat[0] + str('_Onward')\r\n if ws_codes[ws_codes['Code'] == i]['Terms'].values == 'lumpsum':\r\n df_freight[onward_name] = total[ws_codes[ws_codes['Code'] == i]['Code'].values] * 1000000\r\n df_freight[onward_size] = total[ws_codes[ws_codes['Code'] == i]['Code'].values] * 1000000 / (ws_codes[ws_codes['Code'] == i]['bbls'].values * 1000) + df_freight['Costs']\r\n df_freight.drop(['Rate'], axis=1)\r\n else:\r\n df_freight[onward_name] = total[i]\r\n df_freight[onward_size] = total[i] / 100 * df_freight['Onward_Rate'] / assay[crude]['Conversion'] + df_freight['Onward_Costs']\r\n\r\n\r\n \"\"\"This finds the correct worldscale rate and adjusts if it is lumpsum\"\"\"\r\n ws_codes = ws[(ws['Origin'] == sub_region)&(ws['Destination'] == sub_region_2)]\r\n \r\n vessel_size = []\r\n \r\n for i in list(ws_codes['Code']):\r\n #i = 'PFAGN10'\r\n size = ws_codes[ws_codes['Code'] == i]['Size'].iat[0]\r\n vessel_size.append(size)\r\n name = ws_codes[ws_codes['Code'] == i]['Name'].iat[0]\r\n if ws_codes[ws_codes['Code'] == i]['Terms'].values == 'lumpsum':\r\n df_freight[name] = total[ws_codes[ws_codes['Code'] == i]['Code'].values] * 1000000\r\n df_freight[size] = total[ws_codes[ws_codes['Code'] == i]['Code'].values] * 1000000 / (ws_codes[ws_codes['Code'] == i]['bbls'].values * 1000) + df_freight['Costs']\r\n df_freight.drop(['Rate'], axis=1)\r\n else:\r\n df_freight[name] = total[i]\r\n df_freight[size] = total[i] / 100 * df_freight['Rate'] / assay[crude]['Conversion'] + df_freight['Costs']\r\n \r\n if 'WS for Esc' in df_freight.columns.values:\r\n for i in vessel_size:\r\n df_freight[i] = df_freight[i] - df_freight['WS for Esc'] + df_freight['API Esc']\r\n \r\n if (assay[crude]['Basis'] == 'CIF')& (sub_region_2 != sub_region):\r\n for i in list(zip(vessel_size,onward_vessel_size)):\r\n df_freight[i[0]] = df_freight[i[0]] - df_freight[i[1]]\r\n \r\n return df_freight\r\n \r\n calculate_flat_rate()\r\n \r\n calculate_port_costs()\r\n\r\n freight_and_quality_exceptions()\r\n \r\n calculate_freight()\r\n\r\n return df_freight\r\n\r\n def convert_prices():\r\n if crude in crudes_diff_against_osp:\r\n df_prices['OSP'] = diff\r\n df_prices['Diff to OSP'] = crude_diffs[crude]\r\n df_prices['diff'] = diff + crude_diffs[crude]\r\n else:\r\n df_prices['diff'] = diff\r\n \"\"\"depending on discharge, choose the appropriate index\"\"\"\r\n def convert_wti():\r\n df_prices['outright'] = wtim1\r\n if crude_vs in ['wti cma']:\r\n df_prices['wti_cma_m1'] = wti_cma_m1\r\n df_prices['wtim1'] = wtim1\r\n df_prices['vs_wti'] = diff + wti_cma_m1 - wtim1\r\n \r\n elif crude_vs in ['mars']:\r\n df_prices['mars_wti2'] = mars_wti2\r\n df_prices['vs_wti'] = diff + mars_wti2\r\n \r\n elif crude_vs in index_wti:\r\n df_prices['wtim1_m2'] = wtim1_m2\r\n df_prices['vs_wti'] = np.where(expiry_condition,\r\n diff + wtim1_m2,\r\n diff)\r\n \r\n elif crude_vs in index_dtd:\r\n cfd_condition = cfd4 > cfd8\r\n df_prices['cfd4'] = cfd4\r\n df_prices['cfd8'] = cfd8\r\n df_prices['efpm2'] = efpm2\r\n df_prices['wti_br_m1'] = wti_br_m1\r\n df_prices['vs_wti'] = np.where(cfd_condition,\r\n diff + cfd8 + efpm2 - (wti_br_m1),\r\n diff + cfd4 + efpm2 - (wti_br_m1))\r\n\r\n elif crude_vs in index_dub:\r\n \"\"\" This is because all the eastern crudes heading here have diffs against BWAVE\"\"\"\r\n pass\r\n else:\r\n df_prices['vs_wti'] = diff\r\n return df_prices\r\n \r\n def convert_dtd():\r\n df_prices['outright'] = dtd\r\n# =============================================================================\r\n# conditions = [(expiry_condition & cfd_condition),\r\n# (expiry_condition & np.invert(cfd_condition)),\r\n# (np.invert(expiry_condition) & cfd_condition),\r\n# (np.invert(expiry_condition) & np.invert(cfd_condition))]\r\n# choices = [(diff + wtim1_m2 - (brentm2 - wtim2) - efpm2 - cfd2),\r\n# (diff + wtim1_m2 - (brentm2 - wtim2) - efpm2 - cfd1),\r\n# (diff - (brentm2 - wtim2) - efpm2 - cfd2),\r\n# (diff - (brentm2 - wtim2) - efpm2 - cfd1)]\r\n# =============================================================================\r\n\r\n if crude_vs in ['wti cma']:\r\n \"\"\"Here use cfd 8 on reccomendation of Andrea as by the time it loads only cfd wk 8 applicable\"\"\"\r\n df_prices['wti_cma_m1'] = wti_cma_m1\r\n df_prices['wtim1'] = wtim1\r\n df_prices['wti_br_m1'] = wti_br_m1\r\n df_prices['wtim1_m2'] = wtim1_m2\r\n df_prices['efpm2'] = efpm2\r\n df_prices['cfd8'] = cfd8\r\n df_prices['vs_dtd'] = diff + wti_cma_m1 - wtim1 + wtim1_m2 + wti_br_m1 - efpm2 - cfd8 \r\n \r\n elif crude_vs in ['bwave']:\r\n \"\"\"Here use cfd 8 on reccomendation of Andrea as by the time it loads only cfd wk 8 applicable\"\"\"\r\n df_prices['dfl_m1'] = dfl_m1\r\n df_prices['vs_dtd'] = diff - dfl_m1 \r\n \r\n elif crude_vs in index_wti:\r\n df_prices['cfd8'] = cfd8\r\n df_prices['wtim1_m2'] = wtim1_m2\r\n df_prices['wti_br_m1'] = wti_br_m1\r\n df_prices['efpm2'] = efpm2\r\n df_prices['vs_dtd'] = np.where(expiry_condition,\r\n diff + wtim1_m2 + wti_br_m1 - efpm2 - cfd8,\r\n diff + wti_br_m1 - efpm2 - cfd8)\r\n \r\n elif crude_vs in index_dtd:\r\n if crude in ('Basrah Light','Basrah Heavy'): \r\n df_prices['cfd3'] = cfd3\r\n df_prices['cfd4'] = cfd4\r\n df_prices['structure'] = ((cfd3 - cfd4)/7) * 7\r\n df_prices['vs_dtd'] = df_prices['diff'] + df_prices['structure']\r\n else:\r\n df_prices['cfd3'] = cfd3\r\n df_prices['cfd5'] = cfd5\r\n if sub_region == sub_region_2:\r\n df_prices['vs_dtd'] = diff\r\n elif sub_region == 'WAF':\r\n df_prices['structure'] = ((cfd3 - cfd5)/14) * days\r\n df_prices['vs_dtd'] = diff + df_prices['structure']\r\n else:\r\n df_prices['structure'] = ((cfd3 - cfd5)/7) * days\r\n df_prices['vs_dtd'] = diff + df_prices['structure']\r\n else:\r\n df_prices['vs_dtd'] = diff\r\n return df_prices\r\n \r\n def convert_dub():\r\n df_prices['outright'] = dub\r\n if crude_vs in ['wti cma']:\r\n \"\"\"Here use cfd 8 on reccomendation of Andrea as by the time it loads only cfd wk 8 applicable\"\"\"\r\n df_prices['wti_cma_m1'] = wti_cma_m1\r\n df_prices['wtim1'] = wtim1\r\n df_prices['wti_br_m1'] = wti_br_m1\r\n df_prices['wtim1_m2'] = wtim1_m2\r\n df_prices['efs2'] = efs2\r\n df_prices['vs_dub'] = diff + wti_cma_m1 - wtim1 + wtim1_m2 + wti_br_m1 + efs2 \r\n \r\n elif crude_vs in index_wti:\r\n df_prices['wtim1_m2'] = wtim1_m2\r\n df_prices['wti_br_m1'] = wti_br_m1\r\n df_prices['brentm2'] = brentm2\r\n df_prices['efs2'] = efs2\r\n df_prices['vs_dub'] = np.where(expiry_condition,\r\n diff + wtim1_m2 + wti_br_m1 + efs2,\r\n diff + wti_br_m1 + efs2)\r\n \r\n elif crude_vs in index_dtd:\r\n df_prices['cfd6'] = cfd6\r\n df_prices['efpm2'] = efpm2\r\n df_prices['efs2'] = efs2\r\n df_prices['vs_dub'] = diff + cfd6 + efpm2 + efs2\r\n \r\n else:\r\n df_prices['vs_dub'] = diff\r\n \r\n return df_prices\r\n \r\n index_region = ports[ports['Name'] == destination]['Subregion'].map(sub_to_ws[2]).to_string(index = False)\r\n func_list = {'wti':convert_wti, 'dtd':convert_dtd, 'dub':convert_dub}\r\n [f() for index, f in func_list.items() if index == index_region][0]\r\n return df_prices\r\n \r\n try:\r\n df_freight = construct_freight()\r\n except Exception as e: print(e), print('df_freight') \r\n \r\n try:\r\n df_prices = convert_prices()\r\n except Exception as e: print(e), print('df_prices') \r\n \r\n temp = pd.concat([df_prices,df_freight], axis=1)\r\n price_index = [price_index for price_index in df_prices.columns if 'vs_' in price_index][0]\r\n freight_list = [freight for freight in df_freight.columns if 'max' in freight or 'VLCC' in freight]\r\n \r\n try:\r\n \r\n for k in freight_list:\r\n try:\r\n name = str(k[:4]) + str('_landed_') + str(price_index)\r\n except Exception as e: print('name fails') \r\n if destination == loadport:\r\n temp[name] = df_prices[price_index]\r\n else:\r\n try:\r\n temp[name] = df_prices[price_index].add(df_freight[k])\r\n except Exception as e: print('temp fails') \r\n except Exception as e: print('check')\r\n\r\n return temp\r\n\r\n\r\n#temp.resample('W-FRI').mean()\r\n\r\n\r\n\r\n \r\n ","sub_path":"arb_model.py","file_name":"arb_model.py","file_ext":"py","file_size_in_byte":35707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"399293266","text":"import csv\r\nimport re\r\nimport numpy as np\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.naive_bayes import MultinomialNB\r\nfrom sklearn.naive_bayes import GaussianNB\r\nfrom pprint import pprint\r\n\r\n\r\ntrainingfile = \"trainingset_all.csv\"\r\n\r\n#testfile = \"kp_testset_MLM.txt\"\r\n#testfile = \"dp_testset_nonMLM.txt\"\r\n#testfile = \"exa_testset_antimlm.txt\"\r\ntestfile = \"testsetMLMclean.txt\"\r\n#testfile = \"testsetnonMLMclean.txt\"\r\n#testfile = \"testset.txt\"\r\noutputfile = \"Results_classifierGNB.txt\"\r\n\r\n\r\nwith open (testfile, encoding = \"utf-8\") as input:\r\n data = input.read().lower()\r\n\r\ndata = re.sub(r'(#discoverunder2k)', \"\", data)\r\ndata = re.sub(r'(#joinmeorwatchme)', \"\", data)\r\ndata = re.sub(r'(#arbonne)', \"\", data)\r\ndata = re.sub(r'(#monathair)', \"\", data)\r\ndata = re.sub(r'(#younique)', \"\", data)\r\ndata = re.sub(r'(#herbalifenutrition)', \"\", data)\r\n\r\ntestposts = data.split(\";;;\")\r\n\r\n\r\nposts = []\r\n\r\nlabels = []\r\n\r\nwith open(trainingfile, encoding='utf-8') as input:\r\n csvread = csv.DictReader(input)\r\n\r\n for row in csvread:\r\n post = row[\"Text\"]\r\n posts.append(post)\r\n #0 is for non-MLM, 1 is for MLM\r\n label = row[\"Tag\"]\r\n labels.append(label)\r\n\r\n\r\nbow_vectorizer = CountVectorizer()\r\n\r\npost_vectors = bow_vectorizer.fit_transform(posts)\r\n\r\n\r\nclassifierGNB = GaussianNB()\r\n\r\npost_array = post_vectors.toarray().astype(np.uint8)\r\nclassifierGNB.fit(post_array, labels)\r\n\r\n\r\nresultsGNB = []\r\n\r\nfor testpost in testposts:\r\n test_vector = bow_vectorizer.transform([testpost])\r\n test_array = test_vector.toarray().astype(np.uint8)\r\n predictionGNB = [classifierGNB.predict(test_array)]\r\n resultGNB = predictionGNB[0] if predictionGNB[0] else \"unclear\"\r\n resultsGNB.append(resultGNB)\r\n\r\n#pprint(resultsGNB)\r\n\r\n\r\ni=0\r\npostid = 1\r\n\r\nMLMcount = 0\r\nnonMLMcount = 0\r\nundeterminedcount = 0\r\n\r\n\r\npostamount = len(testposts)\r\n\r\nwith open(outputfile, \"w\", encoding =\"utf-8\") as output:\r\n print(\"Classification results for file: \" + testfile, file = output)\r\n print(\"Amount of posts analyzed: %d \\n\" % postamount, file = output)\r\n #for post in posts:\r\n while i < postamount:\r\n print(\"\\n \\n Post Nr. %d \\n\" % postid, file = output)\r\n print(testposts[i], file = output)\r\n print(\"\\n Classification result for Post Nr. %d: \" % postid, file = output)\r\n result = resultsGNB[i][0]\r\n print(result, file = output)\r\n resultint = int(result)\r\n if resultint == 0:\r\n print(\"non-MLM\", file = output)\r\n nonMLMcount += 1\r\n elif resultint == 1:\r\n print(\"MLM\", file = output)\r\n MLMcount += 1\r\n else:\r\n print(\"undetermined\", file = output)\r\n undeterminedcount += 1\r\n i+=1\r\n postid+=1\r\n print(\"\\n \\n Amount of undetermined posts: \" + str(undeterminedcount), file = output)\r\n mlmpercentage = MLMcount / postamount * 100\r\n nonmlmpercentage = nonMLMcount / postamount * 100\r\n print(\"Amount of posts classified as MLM posts: \" + str(MLMcount) + \" (%d percent)\" % mlmpercentage, file =output )\r\n print(\"Amount of posts classified as non-MLM posts: \" + str(nonMLMcount) + \"(%d percent)\" % nonmlmpercentage, file = output)\r\n\r\n#Brief summary to be printed to the console\r\nprint(\"Classification results for file: \" + testfile)\r\nprint(\"Amount of posts analyzed: %d\" % postamount)\r\nprint(\"Amount of undetermined posts: \" + str(undeterminedcount))\r\nprint(\"Amount of posts classified as MLM posts: \" + str(MLMcount) + \" (%d percent)\" % mlmpercentage)\r\nprint(\"Amount of posts classified as non-MLM posts: \" + str(nonMLMcount) + \"(%d percent)\" % nonmlmpercentage)\r\n","sub_path":"classifierGNB.py","file_name":"classifierGNB.py","file_ext":"py","file_size_in_byte":3668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"328881606","text":"'''\nVessels that impose structure on data\n\nCreated on Feb 24, 2012\n\n@author: christian\n'''\n\nimport numpy as np\nimport scipy.stats\n\nimport data as _data\nimport eelbrain.utils.statfuncs as _statfuncs\n\n\n\nclass celltable:\n \"\"\"\n Attributes\n ----------\n \n indexes\n list of indexes\n \n cells\n dict(index -> label)\n \n data\n dict(index -> cell_data) \n \n group\n dict(index -> match_values)\n \n within\n pairwise square; True if all match_values are equal, False \n otherwise (i.e. whether a dependent measures test is appropri-\n ate or not)\n \n all_within\n True if np.all(self.within)\n \n \"\"\"\n def __init__(self, Y, X, match=None, sub=None, match_func=np.mean):\n \"\"\"\n divides Y into cells defined by X\n \n Y dependent measurement\n X factor or interaction\n match factor on which cases are matched (i.e. subject for a repeated \n measures comparisons). If several data points with the same \n case fall into one cell of X, they are combined using \n match_func. If match is not None, celltable.groups contains the\n {Xcell -> [match values of data points], ...} mapping corres-\n ponding to self.data\n sub Bool Array of length N specifying which cases to include\n match_func: see match\n \n \n e.g.\n >>> c = S.celltable(Y, A%B, match=subject)\n \n \"\"\"\n if _data.isfactor(Y):\n if sub is not None:\n Y = Y[sub]\n else:\n Y = _data.asvar(Y, sub)\n \n X = _data.ascategorial(X, sub)\n assert X.N == Y.N\n \n if match:\n match = _data.asfactor(match, sub)\n assert match.N == Y.N\n self.groups = {}\n \n # save args\n self.X = X\n self.Y = Y\n self.sub = sub\n self.match = match\n\n # extract cells and cell data\n self.data = {}\n self.data_indexes = {}\n self.cells = X.cells\n self.indexes = sorted(X.cells.keys())\n for cell in self.indexes:\n sub = X==cell\n self.data_indexes[cell] = sub\n newdata = Y.x[sub]\n if match:\n # get match ids\n group = match.x[sub]\n occurring_ids = np.unique(group)\n \n # sort\n if len(occurring_ids) < len(group):\n newdata = np.array([match_func(newdata[group==ID]) \n for ID in occurring_ids])\n group = occurring_ids\n else:\n sort_arg = np.argsort(group)\n group = group[sort_arg]\n newdata = newdata[sort_arg]\n \n self.groups[cell] = group\n self.data[cell] = newdata\n \n if match:\n # determine which cells compare values for dependent values on \n # match_variable\n# n_cells = len(self.indexes)\n# self.within = np.empty((n_cells, n_cells), dtype=bool)\n self.within = {}\n for cell1 in self.indexes:\n for cell2 in self.indexes:\n if cell1==cell2:\n self.within[cell1,cell2] = True\n else:\n v = self.groups[cell1] == self.groups[cell2]\n if v is not False:\n v = all(v)\n self.within[cell1,cell2] = v\n self.within[cell2,cell1] = v\n self.all_within = np.all(self.within.values())\n else:\n self.within = self.all_within = False\n \n def __repr__(self):\n args = [self.Y.name, self.X.name]\n rpr = \"celltable(%s)\"\n if self.match != None:\n args.append(\"match=%s\"%self.match.name)\n if self.sub != None:\n indexes = ' '.join(str(i) for i in self.sub[:4])\n args.append(\"match=[%s...]\" % indexes)\n return rpr % (', '.join(args))\n \n def get_data(self, out=list):\n if out is dict:\n return self.data\n elif out is list:\n return [self.data[i] for i in self.indexes]\n \n def get_statistic(self, function=np.mean, out=dict, a=1, **kwargs):\n \"\"\"\n :returns: function applied to all data cells.\n \n :arg function: can be string, '[X]sem', '[X]std', or '[X]ci' with X being \n float, e.g. '2sem'\n :arg out: can be dict or list.\n :arg a: multiplier (if not provided in ``function`` string)\n \n :arg kwargs: are submitted to the statistic function \n \n \"\"\"\n if isinstance(function, basestring):\n if function.endswith('ci'):\n if len(function) > 2:\n a = float(function[:-2])\n elif a == 1:\n a = .95\n function = _statfuncs.CIhw\n elif function.endswith('sem'):\n if len(function) > 3:\n a = float(function[:-3])\n function = scipy.stats.sem\n elif function.endswith('std'):\n if len(function) > 3:\n a = float(function[:-3])\n function = np.std\n if 'ddof' not in kwargs:\n kwargs['ddof'] = 1\n else:\n raise ValueError('unrecognized statistic: %s'%function)\n \n if out in [list, np.array]:\n as_list = [a * function(self.data[i], **kwargs) for i in self.indexes]\n if out is list:\n return as_list\n else:\n return np.array(as_list)\n elif out is dict:\n return dict((i, a * function(self.data[i], **kwargs)) for i in self.indexes)\n else:\n raise ValueError(\"out not in [list, dict]\")\n","sub_path":"eelbrain/vessels/structure.py","file_name":"structure.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"440514612","text":"from progress.bar import IncrementalBar\nimport librosa\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\n# TODO: Fill Inference audio to match size for STFT\n# TODO: Document That I have used zero padding for STFT\n# TODO: Change values in args.py to get 16128Hz as frame rate and all the other parameters.\n# TODO: Rename audio_to_audio_frame_stack() to time_series_to_frame_stack()\n# TODO: Substitute the old functions by the ones here.\n# TODO: Start training from the weights provided in the repo.\n# TODO: Padding noise with something different than zeros\n# (maybe with frames from earlier in the same noise audio).\n# So when the speakers stops talking, the noise persists.\n# TODO: Think of a way to produce more samples by mixing noises or something.\n# Maybe increasing its amplitude. Check blend_noise_randomly()\n# TODO: Handle different encoding types of audio + channels (2 vs 1 channel).\n# Best way is maybe convert from 2 to 1 channel.\n\n\n''''STEPS FOR CREATING DATASET\n1. Audio files to numpy (audio_files())\n 1.1. Load .wav\n 1.2. audio to audio frame stack (audio_to_audio_frame_stack())\n 1.3. Append result from 1.2 to audio list (list_sound_array)\n2. Blend noise randomly (blend_noise_randomly())\n3. noisy_voice_long = reshape\n4. save noisy_voice_long\n5. Repeat 3. & 4. for voice_long and noise_long\n'''\n\n\n\ndef audio_files_to_numpy(audio_dir, list_audio_files, sample_rate, frame_length, hop_length_frame, min_duration):\n \"\"\"This function take audio files of a directory and merge them\n in a numpy matrix of size (total_frames,frame_length)\n for a sliding window of size hop_length_frame\n\n Args:\n audio_dir (str): Directory where audios are located\n list_audio_files (list): string list of names of audio files.\n sample_rate (int): Sample rate of audios.\n frame_length (int): Length of frames.\n min_duration (int): Mininum duration of the audios to be added.\n\n Returns:\n np.ndarray: numpy ndarray shape (total_frames, frame_length).\n \"\"\"\n\n list_sound_array = []\n list_audio_below_min_duration = []\n\n print(\"Total files to be processed: {}\".format(len(list_audio_files)))\n\n for file in IncrementalBar('Audio file to numpy').iter(list_audio_files):\n # open the audio file\n y, sr = librosa.load(os.path.join(audio_dir, file), sr=sample_rate)\n total_duration = librosa.get_duration(y=y, sr=sr)\n\n if (total_duration >= min_duration):\n list_sound_array.append(\n audio_to_audio_frame_stack(\n sound_data=y,\n frame_length=frame_length,\n hop_length_frame=hop_length_frame))\n\n else:\n list_audio_below_min_duration.append(os.path.join(audio_dir, file))\n print(\"The following file {} is below the min duration\".format(os.path.join(audio_dir, file)))\n\n if list_audio_below_min_duration:\n print(\"Following files are below minimum duration ({} seconds) and won't be included: \".format(min_duration))\n for audio_below_duration in list_audio_below_min_duration:\n print(\" {}\".format(audio_below_duration))\n\n if list_sound_array:\n return np.vstack(list_sound_array)\n else:\n print(\"There aren't any files above minimum duration ({} seconds).\".format(min_duration))\n\n\ndef audio_to_audio_frame_stack(sound_data, frame_length, hop_length_frame, include_if_bigger_than=0.2):\n \"\"\"This function take an audio and split into several frames\n in a numpy matrix of size (number_of_frames,frame_length).\n\n Args:\n sound_data (list): List of amplitudes returned from librosa.load().\n frame_length (int): Length of frames.\n hop_length_frame (int): Sliding window.\n include_if_bigger_than (float): Value between 0 and 1. Default to 0.2. Include last window (that will be padded) if it is greater than a percentage of the sliding window.\n Returns:\n np.ndarray: Multidimensional array of shape (number_of_frames, frame_length).\n I\n note: to match window size (i.e hop_length_frame), it applies a zero padding.\n \"\"\"\n\n sound_data_list = []\n time_series_length = sound_data.shape[0]\n for start in range(0, time_series_length, hop_length_frame):\n frame = sound_data[start:(start + frame_length)]\n if(frame.shape[0] == hop_length_frame):\n sound_data_list.append(frame)\n elif(frame.shape[0] < hop_length_frame and frame.shape[0] > (include_if_bigger_than * hop_length_frame)):\n # if it is the last element, add zero padding to match hop_length_frame\n frame = np.pad(frame, (0, hop_length_frame-frame.shape[0]), 'constant')\n sound_data_list.append(frame)\n\n return np.vstack(sound_data_list)\n\n\ndef audio_to_magnitude_db_and_phase(n_fft, hop_length_fft, audio):\n \"\"\"This function takes an audio and convert into spectrogram,\n it returns the magnitude in dB and the phase\"\"\"\n '''\n print(\"\\naudio\")\n print(audio)\n print(audio.shape)\n\n print(\"n_fft\")\n print(n_fft)\n\n print(\"hop_length_fft\")\n print(hop_length_fft)\n '''\n stftaudio = librosa.stft(audio, n_fft=n_fft, hop_length=hop_length_fft)\n stftaudio_magnitude, stftaudio_phase = librosa.magphase(stftaudio)\n '''\n print(\"stftaudio\")\n print(stftaudio)\n print(stftaudio.shape)\n '''\n stftaudio_magnitude_db = librosa.amplitude_to_db(\n stftaudio_magnitude, ref=np.max)\n '''\n print(\"stftaudio magnitude and phase shapes: \")\n print(stftaudio_magnitude_db.shape)\n print(stftaudio_phase.shape)\n '''\n return stftaudio_magnitude_db, stftaudio_phase\n\n\ndef numpy_audio_to_matrix_spectrogram(numpy_audio, dim_square_spec, n_fft, hop_length_fft):\n \"\"\"This function takes as input a numpy audio of size (nb_frame,frame_length), and return\n a numpy containing the matrix spectrogram for amplitude in dB and phase. It will have the size\n (nb_frame,dim_square_spec,dim_square_spec)\"\"\"\n nb_audio = numpy_audio.shape[0]\n m_mag_db = np.zeros((nb_audio, dim_square_spec, dim_square_spec))\n m_phase = np.zeros((nb_audio, dim_square_spec, dim_square_spec), dtype=complex)\n\n for i in IncrementalBar('Numpy audio to matrix spectrogram').iter(range(nb_audio)):\n m_mag_db[i, :, :], m_phase[i, :, :] = audio_to_magnitude_db_and_phase(\n n_fft, hop_length_fft, numpy_audio[i])\n\n print('\\nShape of Spectrograms generated: {}\\n'.format(m_mag_db.shape[1:]))\n return m_mag_db, m_phase\n\n\ndef save_audio(y, sample_rate, output_name='audio_ouput.wav'):\n '''\n Save audio file given y (amplitude values) and sample_rate.\n By default, output name is 'audio_output.wav'\n\n Args:\n y (ndarray): shape should be (n,) or (2,n).\n '''\n librosa.output.write_wav(output_name, y, sample_rate)\n print(\"Duration of written audio is: {} seconds.\".format(librosa.get_duration(y=y, sr=sample_rate)))\n\n\ndef plot_time_series(time_series_list, time_series_titles):\n '''Plot a list of time series in different subfigures.\n args:\n time_series_list: List of time_series to plot\n time_series_titles: list of strings containing titles of subplots\n '''\n if (len(time_series_list) != len(time_series_titles)):\n raise Exception(\"time_series_list and time_series_titles should have the same lenght. There should be a title for each time serie.\")\n fig, axs = plt.subplots(len(time_series_list), sharex=True, sharey=True, gridspec_kw={'hspace': 0})\n for idx, time_serie in enumerate(time_series_list):\n fig.suptitle('Time series')\n axs[idx].plot(time_serie)\n axs[idx].set_title(time_series_titles[idx], loc='right')\n plt.show()\n\n\n\n\ndef main():\n audio_dir = 'spectrogramVisualizing/medium'\n sample_rate = 8000 # Hz\n frame_length = sample_rate + 64 # a bit more than sample_rate for avoiding overlapping.\n min_duration = 1 # Seconds\n hop_length_frame = sample_rate + 64\n n_fft = 255\n hop_length_fft = 63\n\n\n path_save_spectrogram = 'Train/spectrogram/'\n path_save_time_serie = 'Train/time_serie/'\n path_save_sound = 'Train/sound/'\n path_train = \"Train/finished_28spk_para_example/\"\n\n\n\n audio_folders = sorted(os.listdir(path_train)) # get folder names\n print(audio_folders)\n clean_list = []\n noise_list = []\n for file_name in IncrementalBar('Processing').iter(audio_folders):\n print(\"\\n{}\\n\".format(file_name))\n path_to_audio = path_train + file_name\n path_to_clean = path_to_audio +'/clean.wav'\n path_to_noise = path_to_audio + '/noise.wav'\n print(\"path to audio: {}\".format(path_to_audio))\n print(\"path to clean: {}\".format(path_to_clean))\n print(\"path to noise: {}\".format(path_to_noise))\n clean_list.append(path_to_clean)\n noise_list.append(path_to_noise)\n\n # 1. Audio files to numpy\n # audio_dir = 'spectrogramVisualizing/All_together'\n audio_dir = ''\n # [START] AUDIO FILES TO NUMPY + SAVE LONG WAVES\n import glob\n # clean_list = [os.path.basename(x) for x in sorted(glob.glob(\"{}/clean*\".format(audio_dir)))]\n # noise_list = [os.path.basename(x) for x in sorted(glob.glob(\"{}/noise*\".format(audio_dir)))]\n # print(clean_list)\n # print(noise_list)\n\n # Squared spectrogram dimensions\n dim_square_spec = int(n_fft / 2) + 1\n\n # CLEAN VOICE\n clean_voice = audio_files_to_numpy(audio_dir, clean_list[6000:], sample_rate, frame_length, hop_length_frame, min_duration)\n save_audio(clean_voice.flatten(), sample_rate, \"clean_long.wav\")\n # Save to disk for Training / QC\n np.save(path_save_time_serie + 'voice_timeserie', clean_voice)\n # Create Amplitude and phase of the sounds\n m_amp_db_voice, m_pha_voice = numpy_audio_to_matrix_spectrogram(\n clean_voice, dim_square_spec, n_fft, hop_length_fft)\n np.save(path_save_spectrogram + 'voice_amp_db', m_amp_db_voice)\n np.save(path_save_spectrogram + 'voice_pha_db', m_pha_voice)\n\n\n\n # NOISE\n noise = audio_files_to_numpy(audio_dir, noise_list[6000:], sample_rate, frame_length, hop_length_frame, min_duration)\n save_audio(noise.flatten(), sample_rate, \"noise_long.wav\")\n # Save to disk for Training / QC\n np.save(path_save_time_serie + 'noise_timeserie', noise)\n # Create Amplitude and phase of the sounds\n m_amp_db_noise, m_pha_noise = numpy_audio_to_matrix_spectrogram(\n noise, dim_square_spec, n_fft, hop_length_fft)\n np.save(path_save_spectrogram + 'noise_amp_db', m_amp_db_noise)\n np.save(path_save_spectrogram + 'noise_pha_db', m_pha_noise)\n\n\n # NOISY FILE\n noisy = clean_voice + noise\n print(\"shape of clean_voice: {}\".format(clean_voice.shape))\n print(\"shape of noisy: {}\".format(noisy.shape))\n print(\"shape of noise: {}\".format(noise.shape))\n print(\"\\n\\n NaN in CLEAN: {}\\n\\n\".format(np.isnan(clean_voice).any()))\n print(\"\\n\\n NaN in NOISE: {}\\n\\n\".format(np.isnan(noise).any()))\n print(\"\\n\\n NaN in NOISY: {}\\n\\n\".format(np.isnan(noisy).any()))\n\n if np.isnan(clean_voice).any():\n print(np.argwhere(np.isnan(clean_voice)))\n if np.isnan(noise).any():\n print(np.argwhere(np.isnan(noise)))\n if np.isnan(noisy).any():\n print(np.argwhere(np.isnan(noisy)))\n save_audio(noisy.flatten(), sample_rate, \"noisy_long.wav\")\n # Save to disk for Training / QC\n np.save(path_save_time_serie + 'noisy_voice_timeserie', noisy)\n # Create Amplitude and phase of the sounds\n m_amp_db_noisy_voice, m_pha_noisy_voice = numpy_audio_to_matrix_spectrogram(\n noisy, dim_square_spec, n_fft, hop_length_fft)\n np.save(path_save_spectrogram + 'noisy_voice_amp_db', m_amp_db_noisy_voice)\n np.save(path_save_spectrogram + 'noisy_voice_pha_db', m_pha_noisy_voice)\n\n\n print(\"shape of clean_voice: {}\".format(clean_voice.shape))\n print(\"shape of noisy: {}\".format(noisy.shape))\n print(\"shape of noise: {}\".format(noise.shape))\n # [END] AUDIO FILES TO NUMPY + SAVE LONG WAVES\n\n\n '''\n # Display a spectrogram\n\n import matplotlib.pyplot as plt\n librosa.display.specshow(librosa.amplitude_to_db(D,ref=np.max), y_axis='log', x_axis='time')\n plt.title('Power spectrogram')\n plt.colorbar(format='%+2.0f dB')\n plt.tight_layout()\n plt.show()\n '''\n\n\n\n\n\n\n\n\n\n # TODO: Dimensions of histogram\n # TODO: create spectrograms in numpy arrays\n # TODO: Save spectrograms on disk\n\nif __name__== \"__main__\":\n main()\n\n\n\n'''\nNAN VALUES OF NOISY\n[[10856 6347]\n [10856 6402]\n [10856 6403]\n [10856 6414]\n [10856 6415]\n [10856 6474]\n [10856 6475]\n [10856 6486]\n [10856 6487]\n [10856 6526]\n [10856 6527]\n [10856 6538]\n [10856 6539]\n [10856 6550]\n [10856 6551]\n [10856 6562]\n [10856 6563]\n [10856 6854]\n [10856 6855]\n [12471 1800]\n [12471 2124]\n [12471 2125]\n [12471 2132]\n [12677 3318]\n [12677 3319]\n [13421 2945]]\n\n'''\n","sub_path":"spectrogramVisualizing.py","file_name":"spectrogramVisualizing.py","file_ext":"py","file_size_in_byte":12927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"159906686","text":"from django import forms\nfrom article.models import Article\n\n\nclass UserArticleQueryForm(forms.ModelForm):\n class Meta:\n model = Article\n fields = [\n 'article_link',\n ]\n widgets = {\n 'article_link': forms.TextInput(attrs={\n 'placeholder': 'Enter the url of a blog article.'\n })\n }\n\n\n","sub_path":"article/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"435180573","text":"import torch\r\nimport torch.nn.functional as F\r\n\r\nclass Net(torch.nn.Module):\r\n def __init__(self,**kwargs):\r\n super(Net,self).__init__()\r\n\r\n # Net accepts dictionary of inputs\r\n self.args = kwargs \r\n\r\n # defining RNN topology\r\n self.input_size = self.args['input_size'] # defining feature size\r\n self.hs1 = self.args['hs_1'] # hidden size is equivalent to the number of nodes in hidden layers\r\n self.hs2 = self.args['hs_2'] # number of layers\r\n self.output_size = self.args['output_size'] # defining output size \r\n\r\n # defining neural architecture\r\n self.hidden1 = torch.nn.Linear(self.input_size, self.hs1) # neurons in hidden layer1\r\n self.hidden2 = torch.nn.Linear(self.hs1, self.hs2) # neurons in hidden layer2\r\n\r\n # defining output layer\r\n self.p_mu = torch.nn.Linear(self.hs2, self.output_size) # mean outputs\r\n self.p_std = torch.nn.Linear(self.hs2, self.output_size) # std outputs\r\n \r\n def forward(self, inputs):\r\n\r\n x = inputs \r\n z = torch.tanh(self.hidden1(x)) # activation function of hidden layer 1\r\n y = torch.tanh(self.hidden2(z)) # activation function of hidden layer 2\r\n \r\n mu = self.p_mu(y) # linear output layer for mean prediction\r\n std = self.p_std(y) # linear output layer for std prediction\r\n \r\n mu = torch.sigmoid(mu) # activation layer\r\n std = torch.sigmoid(std) # activation layer\r\n\r\n return (mu, std)\r\n\r\n def weights_init(self,m):\r\n # function to reset weights after each fold\r\n if isinstance(m, torch.nn.Linear):\r\n torch.nn.init.xavier_uniform_(m.weight.data)\r\n if m.bias is not None:\r\n torch.nn.init.zeros_(m.bias)\r\n\r\n\r\n\r\n","sub_path":"ANNClass.py","file_name":"ANNClass.py","file_ext":"py","file_size_in_byte":2070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"541559182","text":"# The main code that will be running in a loop forever to operate the fishtank. Feel free to use and/or modify this\n# for your own projects. :)\n# by Brittany A Howard. Contact: howardba@umich.edu\n\nimport time\nimport datetime\nfrom configparser import ConfigParser\nfrom dateutil.relativedelta import relativedelta\n\nimport picamera\nimport tweepy\n\n# import the darkopy functions\nfrom darkopy import timeset\nfrom darkopy import temp\nfrom darkopy import light\nfrom darkopy import twitter\nfrom darkopy import camera\n\n\n# this is the main function for the darkopy project. It keeps track of time and prompts everything to happen.\ndef run(verbose=False, debug=False):\n\n # read the config file\n desired_temp, temp_tolerance, image_resolution, \\\n video_resolution, image_path, video_path, video_length = read_config('config.cfg')\n\n # set up timing\n t0 = datetime.datetime.today() # local time --- NO, THIS IS UTC, ISNT IT?\n\n # set the first time we want each action to occur\n time_clean, time_selfie, time_video, time_food = set_first_times(t0)\n\n # set up how often each action is prompted\n delta_clean, delta_selfie, delta_video, delta_food = set_delta()\n\n # set time tolerance - don't make this too low! It needs to be a few times the runtime of this code.\n # TODO: definitely needs to be adjustable\n time_tol = datetime.timedelta(minute=3)\n\n print(\"Settings set.\")\n\n # set some counters for naming files\n image_num = 1\n video_num = 1\n\n # start infinite loop\n if verbose:\n print(\"All set up. Leggo!\")\n\n while True:\n\n # get the current time\n time_now = datetime.datetime.today()\n if debug:\n print('Current time: {}'.format(time_now))\n\n # turn the heater on or off if needed\n temp.control_temp(desired_temp=desired_temp,\n temp_tolerance=temp_tolerance,\n verbose=verbose)\n\n # check: is it time for a selfie?\n if time_selfie-time_tol <= time_now <= time_selfie+time_tol:\n\n image_path = camera.take_image(image_resolution=image_resolution,\n image_path=image_path,\n image_num=image_num,\n verbose=verbose)\n\n # TODO: make a list of messages which can be chosen randomly\n # EVEN BETTER IDEA HOLY SHIT: USE MARKOV CHAIN AND TRUMP TWEETS SO\n # THAT MY FISH WILL SAY TRUMP-LIKE THINGS\n # https://filiph.github.io/markov/\n tweet_message = '#IWokeUpLikeThis #darkopy'\n twitter.write_tweet_text_image(tweet_message=tweet_message,\n image_path=image_path,\n cfg=cfg,\n twitter_api=twitter_api,\n verbose=verbose)\n\n image_num += 1\n\n # perhaps say if image_num is odd, post to twitter, if not, just save it\n time_selfie += delta_selfie # might need relativedelta for day/month overflows\n\n # check: is it time for a water change? -- weekly\n if time_clean-time_tol <= time_now <= time_clean+time_tol:\n\n tweet_message = 'Time for a water change! #darkopy'\n twitter.write_tweet_text_only(tweet_message, cfg, twitter_api)\n\n time_clean += delta_clean # might need relativedelta for day/month overflows\n\n # check: is it time for food? --- daily\n if time_food-time_tol <= time_now <= time_food+time_tol:\n\n tweet_message = 'Feeeeeeeeed meeeeeeeeeee. #darkopy'\n twitter.write_tweet_text_only(tweet_message=tweet_message,\n cfg=cfg,\n twitter_api=twitter_api)\n\n time_food += delta_food # might need relativedelta for day/month overflows\n\n # check: is it time for a video?\n if time_video-time_tol <= time_now <= time_video+time_tol:\n\n video_path = darkopy.take_video(video_resolution=video_resolution,\n video_length=video_length,\n video_path=video_path,\n video_num=video_num,\n verbose=verbose)\n\n # only post every other video on twitter so it doesn't get obnoxious\n if video_num % 2 == 0:\n\n tweet_message = 'Video killed the radio star. #darkopy'\n twitter.write_tweet_text_video(tweet_message=tweet_message,\n video_path=video_path,\n cfg=cfg,\n twitter_api=twitter_api,\n verbose=verbose)\n\n video_num += 1\n\n time_video += delta_video # might need relativedelta for day/month overflows\n\n # set a pause so this doesn't repeat too quickly and overwhelm the pi\n time.sleep(60)\n\n\ndef set_first_times(t0):\n # TODO: should probably make the times into arguments/things in the config file\n time_clean = timeset.set_time_clean(t0) # we want this at 7pm, the sunday after the loop begins\n time_selfie = timeset.set_time_selfie(t0) # we want this at the next hour\n time_video = timeset.set_time_video(t0) # we want this at the next half hour\n time_food = timeset.set_time_food(t0) # we want this at 6:30 pm\n\n return(time_clean, time_selfie, time_video, time_food)\n\n\ndef set_deltas():\n # set up how often each action is prompted\n # TODO: make these arguments, or something to set in the config file?\n delta_clean = datetime.timedelta(day=7)\n delta_selfie = datetime.timedelta(hour=1)\n delta_video = datetime.timedelta(hour=1)\n delta_food = datetime.timedelta(day=1)\n\n return(delta_clean, delta_selfie, delta_video, delta_food)\n\n\ndef read_config(config_filename):\n # load config file\n parser = ConfigParser()\n parser.read(config_filename)\n if debug:\n print('Config file parsed')\n\n # set up twitter\n cfg = parser.get('Twitter', 'cfg') # twitter uses odd config file formats\n twitter_api = twitter.get_api(cfg)\n if verbose:\n print(\"Twitter configured.\")\n\n # set desired temp for aquarium\n desired_temp = parser.get('Temperature', 'desired_tank_temp')\n temp_tolerance = parser.get('Temperature', 'temp_tolerance')\n\n # camera settings\n image_resolution = parser.get('Camera', 'image_resolution')\n video_resolution = parser.get('Camera', 'video_resolution')\n image_path = parser.get('Camera', 'image_path')\n video_path = parser.get('Camera', 'video_path')\n video_length = parser.get('Camera', 'video_length')\n\n return(desired_temp, temp_tolerance, image_resolution, video_resolution, image_path, video_path, video_length)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":7037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"610448012","text":"from __future__ import absolute_import\nfrom flask import Flask\nfrom celery import Celery\nimport jinja2\n\n#\n# CELERY Object Factory\n#\n\ndef create_celery(app=None):\n \"\"\"\n CELERY OBJ YARATMA\n \"\"\"\n app = app or create_app()\n celery = Celery(__name__, broker=app.config['CELERY_BROKER_URL'])\n celery.conf.update(app.config)\n celery.config_from_object('celeryconf')\n TaskBase = celery.Task\n\n class ContextTask(TaskBase):\n abstract = True\n\n def __call__(self, *args, **kwargs):\n with app.app_context():\n return TaskBase.__call__(self, *args, **kwargs)\n\n celery.Task = ContextTask\n celery.app = app\n return celery\n\n#\n# FLASK Object Factory\n# BROKER_URL = 'redis://localhost:6379/0'\n\ndef create_app():\n \"\"\"\n FLASK APP YARATMA\n \"\"\"\n app = Flask(__name__)\n app.config['SECRET_KEY'] = 'top-secret!'\n app.config['CELERY_BROKER_URL'] = 'redis://localhost:6379/0'\n app.config['CELERY_RESULT_BACKEND'] = 'redis://localhost:6379/0'\n my_loader = jinja2.ChoiceLoader([\n app.jinja_loader,\n jinja2.FileSystemLoader('/Users/efevngrs/PycharmProjects/Feliza/framework/templates')\n ])\n app.jinja_loader = my_loader\n return app\n","sub_path":"config/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"511469826","text":"#!/usr/bin/env python\n\n# Name: pl_stats.py\n# Desc: Calculate statistics for Prediction League\n\nimport logging\nimport json\nimport os\n\nimport pygsheets\n\n\ngs_key_file = '.gsheets.service.json'\n\nlogger = logging.getLogger(__name__)\n\nplayers = []\n\nstatistic_order = ['pos_wrestler', 'neg_wrestler', 'overvalued_wrestler', 'undervalued_wrestler', 'pos_title', 'neg_title', 'pos_matchtype', 'neg_matchtype',\n 'pos_ppv', 'neg_ppv', 'pos_card', 'neg_card']\n\nstat_info = {\n 'pos_wrestler':\n {\n 'order': 1,\n 'positive': True,\n 'show': 5,\n 'label': 'Most Accurate Wrestlers',\n 'desc': 'These are the most accurate predictions for when a wrestler is involved in a match (winner or loser).',\n 'count': {\n 'type': 'player',\n 'group': 'wrestlers',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'wrestlers',\n 'select': 'all',\n }\n },\n 'neg_wrestler':\n {\n 'order': 2,\n 'positive': False,\n 'show': 5,\n 'label': 'Least Accurate Wrestlers',\n 'desc': 'These are the most accurate predictions for when a wrestler is involved in a match (winner or loser).',\n 'count': {\n 'type': 'player',\n 'group': 'wrestlers',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'wrestlers',\n 'select': 'all',\n }\n },\n 'overvalued_wrestler':\n {\n 'order': 3,\n 'positive': False,\n 'show': 5,\n 'label': 'Most Overvalued Wrestlers',\n 'desc': 'How often the wrestler lost when picked by the player to win',\n 'count': {\n 'type': 'player',\n 'group': 'player_winners',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'player_winners',\n 'select': 'all',\n }\n },\n 'undervalued_wrestler':\n {\n 'order': 4,\n 'positive': False,\n 'show': 5,\n 'label': 'Most Undervalued Wrestlers',\n 'desc': \"How often the player picked the wrestler to win out of all of the wrestler's wins\",\n 'count': {\n 'type': 'player',\n 'group': 'player_winners',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'match_winners',\n 'select': 'all',\n }\n },\n 'pos_title':\n {\n 'order': 5,\n 'label': 'Most Accurate Title Matches',\n 'positive': True,\n 'show': 5,\n 'desc': 'How often the player correctly guessed the result of a match for a specific title',\n 'count': {\n 'type': 'player',\n 'group': 'titles',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'titles',\n 'select': 'all',\n }\n },\n 'neg_title':\n {\n 'order': 6,\n 'label': 'Least Accurate Title Matches',\n 'positive': False,\n 'show': 5,\n 'desc': 'How often the player correctly guessed the result of a match for a specific title',\n 'count': {\n 'type': 'player',\n 'group': 'titles',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'titles',\n 'select': 'all',\n }\n },\n 'pos_matchtype':\n {\n 'order': 7,\n 'label': 'Most Accurate Match Types',\n 'positive': True,\n 'show': 5,\n 'minimum': 2,\n 'desc': 'How often the player correctly guessed the result of a match for a specific match type',\n 'count': {\n 'type': 'player',\n 'group': 'match_types',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'match_types',\n 'select': 'all',\n }\n },\n 'neg_matchtype':\n {\n 'order': 8,\n 'label': 'Least Accurate Match Types',\n 'positive': False,\n 'show': 5,\n 'minimum': 2,\n 'desc': 'How often the player correctly guessed the result of a match for a specific match type',\n 'count': {\n 'type': 'player',\n 'group': 'match_types',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'match_types',\n 'select': 'all',\n }\n },\n 'pos_ppv':\n {\n 'order': 9,\n 'label': 'Most Accurate PPV Series',\n 'positive': True,\n 'show': 5,\n 'minimum': 2,\n 'minimum_database': 'ppvs',\n 'minimum_noun': 'PPVs',\n 'desc': 'The most accurate complete pay-per-view series',\n 'count': {\n 'type': 'player',\n 'group': 'ppvs',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'ppvs',\n 'select': 'all',\n }\n },\n 'neg_ppv':\n {\n 'order': 10,\n 'label': 'Least Accurate PPV Series',\n 'positive': False,\n 'show': 5,\n 'minimum': 2,\n 'minimum_database': 'ppvs',\n 'minimum_noun': 'PPVs',\n 'desc': 'The least accurate complete pay-per-view series',\n 'count': {\n 'type': 'player',\n 'group': 'ppvs',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'ppvs',\n 'select': 'all',\n }\n },\n 'pos_card':\n {\n 'order': 11,\n 'label': 'Most Accurate Cards (Individual PPVs)',\n 'positive': True,\n 'show': 5,\n 'minimum': 4,\n 'desc': 'The most accurate complete pay-per-view cards',\n 'count': {\n 'type': 'player',\n 'group': 'cards',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'cards',\n 'select': 'all',\n }\n },\n 'neg_card':\n {\n 'order': 12,\n 'label': 'Least Accurate Cards (Individual PPVs)',\n 'positive': False,\n 'show': 5,\n 'minimum': 4,\n 'desc': 'The least accurate complete pay-per-view cards',\n 'count': {\n 'type': 'player',\n 'group': 'cards',\n 'select': 'correct',\n },\n 'total': {\n 'type': 'player',\n 'group': 'cards',\n 'select': 'all',\n }\n },\n}\n\nstatics = {\n 'name_row': 1,\n 'data_row': 2,\n 'title_column': 1,\n 'a_match_column': 2,\n 'b_match_column': 3,\n 'winner_column': 4,\n 'name_column': 5,\n }\n\ntag_teams = {\n 'Bludgeon Brothers': ['Erick Rowan', 'Luke Harper'],\n 'Gallows & Anderson': ['Luke Gallows', 'Karl Anderson'],\n 'Hardy Boyz': ['Matt Hardy', 'Jeff Hardy'],\n 'The Hardy Boyz': ['Matt Hardy', 'Jeff Hardy'],\n 'Hype Bros': ['Zack Ryder', 'Mojo Rawley'],\n 'The Revival': ['Dash Wilder', 'Scott Dawson'],\n 'The Shield': ['Seth Rollins', 'Roman Reigns', 'Dean Ambrose'],\n 'New Day': ['Kofi Kingston', 'Big E', 'Xavier Woods'],\n 'The Bar': ['Sheamus', 'Cesaro'],\n 'Sheamus & Cesaro': ['Sheamus', 'Cesaro'],\n 'American Alpha': ['Jason Jordan', 'Chad Gable'],\n 'Breezango': ['Fandango', 'Tyler Breeze'],\n 'The Usos': ['Jimmy Uso', 'Jey Uso'],\n 'Vaudevillians': ['Aiden English', 'Simon Gotch'],\n 'The Ascension': ['Konnor', 'Viktor'],\n 'Enzo & Cass': ['Enzo Amore', 'Big Cass'],\n 'The B-Team': ['Bo Dallas', 'Curtis Axel']\n}\n\ncruiserweights = ['The Brian Kendrick', 'Enzo Amore', 'Neville', 'TJ Perkins', 'Lince Dorado', 'Kalisto', 'Drew Gulak', 'Noam Dar',\n 'Rich Swann', 'Akira Tozawa']\n\nrenames = {\n 'WWE': 'WWE Championship',\n 'RAW Tag': 'Raw Tag',\n 'SDL Tag': 'Smackdown Tag',\n 'RAW Women': \"Raw Women\",\n 'SDL Women': \"Smackdown Women\",\n 'Intercontinental': 'Intercontinental',\n 'US': 'United States',\n 'Cruiserweight': 'Cruiserweight',\n}\n\ndatabases = {'wrestlers': [], 'match_types': [], 'titles': [], 'ppvs': []}\n\nuncounted = ['Draw', 'Royal Rumble']\n\noverall_wins = {}\noverall_losses = {}\n\n\nclass Player:\n \"\"\"\n Player class with pick result data\n \"\"\"\n\n def __init__(self, raw_data, column):\n \"\"\"\n Initialize Player class\n :param raw_data: raw_data matrix of PPV card\n \"\"\"\n self.name = str(raw_data[statics.get('name_row')][column]).capitalize() # Player Name\n\n if len(self.name) < 3:\n self.name = self.name.upper()\n\n self.temp = { # Temporary data for each PPV\n 'raw_data': raw_data, # Raw data group from PPV card\n 'column': column, # Associated name column for player\n }\n self.stats = { # Statistics groups\n 'picks': 0, # Total number of picks\n 'points': 0, # Total number of correct guesses\n 'meta': { # Meta categories for statistics\n 'match_types': {}, # Different match types\n 'titles': {}, # Different championships on the line\n 'wrestlers': {}, # Wrestlers involved in the match\n 'match_winners': {}, # Correct/incorrect for winners of the match\n 'player_winners': {}, # Correct/incorrect for picked winners\n 'against_crowd': {'correct': 0, 'incorrect': 0, # Accuracy when picking against everybody else\n 'correct_list': [], 'incorrect_list': []}, # List of recorded matches against everybody else\n 'ppvs': {}, # Annual PPV tracking\n 'cards': {} # Individual PPV card tracking\n },\n }\n\n def get_pick(self, row):\n \"\"\"\n Get a list of picks for a certain row\n :param row: row to parse\n :return: list of wrestlers\n \"\"\"\n\n return parse_names(self.temp['raw_data'][row][self.temp['column']])\n\n def add_result(self, result, group, item=None):\n \"\"\"\n Add result to a meta group\n :param result: 'correct' or 'incorrect'\n :param group: category of item\n :param item: individual within the group\n :return: nothing\n \"\"\"\n\n if item == '' or item in uncounted:\n return\n\n # if self.name == 'David' and str(group).endswith('winners') and item == 'Matt Hardy':\n # print('{} : {} {} {}'.format(self.name, group, item, result))\n\n # Skip but warn with invalid result\n if result != 'correct' and result != 'incorrect':\n logger.warning(\"Invalid result: '{}'. Valid results: 'correct', 'incorrect'\".format(result))\n return\n\n # Ignore meta team names\n if group == 'wrestlers' or group == 'winners' or group == 'losers':\n if str(item).startswith('Raw') or str(item).startswith('SDL') or str(item).startswith('Men') or str(item).startswith('Women'):\n return\n\n # Skip but warn with invalid groups\n if group not in self.stats['meta'].keys():\n logger.warning(\"Invalid Player group: {} (Valid groups: {})\".format(group, ', '.join(self.stats['meta'].keys())))\n return\n\n if renames.get(item):\n item = renames.get(item)\n\n if item in cruiserweights:\n return\n\n # Increment group without item\n if not item:\n try:\n self.stats['meta'][group][result] += 1\n except KeyError:\n print(\"item: {}, group: {}, result: {}\".format(item, group, result))\n print(json.dumps(self.stats['meta']))\n return\n\n # Initialize an item if it doesn't exist in the group\n if item not in self.stats['meta'][group].keys():\n self.stats['meta'][group][item] = {'correct': 0, 'incorrect': 0}\n\n # Add a result for the item in the group\n self.stats['meta'][group][item][result] += 1\n\n\nclass Match:\n \"\"\"\n Match object with match data\n \"\"\"\n\n def __init__(self, raw_data, row):\n \"\"\"\n Initialize a new match\n :param raw_data: full cell matrix from PPV sheet\n :param row: row to load the match from\n \"\"\"\n self.raw_data = raw_data\n self.row = row\n self.all_picks = []\n\n column = statics.get('name_column')\n\n while self.raw_data[row][column]:\n self.all_picks.append(parse_names(self.raw_data[row][column]))\n column += 1\n\n def describe(self):\n \"\"\"\n Compile an easy to read string of match data\n :return: string\n \"\"\"\n\n if len(self.get_types()) == 0 or self.get_types()[0] == '':\n type_text = 'regular'\n else:\n type_text = ', '.join(self.get_types())\n\n if len(self.get_titles()) == 0 or self.get_titles()[0] == '':\n title_text = ''\n elif len(self.get_titles()) == 1:\n title_text = ' for the {} title'.format(self.get_titles()[0])\n else:\n title_text = ' for the {} titles'.format(', '.join(self.get_titles()))\n\n return str('{} defeated {} in a {} match{}'.format(\n ', '.join(self.get_wrestlers('winners')), ', '.join(self.get_wrestlers('losers')), type_text, title_text))\n\n def get_titles(self):\n \"\"\"\n Return a list of titles in title cell\n :return: list object containing all match titles\n \"\"\"\n\n titles = []\n\n cell_data = self.raw_data[self.row][statics.get('title_column')].split('\\n')\n\n for data in cell_data:\n if data.startswith('('):\n continue\n else:\n titles.append(data)\n\n for title in titles:\n if title not in databases['titles']:\n databases['titles'].append(title)\n\n return titles\n\n def get_types(self):\n \"\"\"\n Return a list of types in title cell\n :return: list object containing all match types\n \"\"\"\n\n types = []\n\n cell_data = self.raw_data[self.row][statics.get('title_column')].split('\\n')\n\n for data in cell_data:\n if data.startswith('('):\n types.append(data[data.find(\"(\")+1:data.find(\")\")])\n else:\n continue\n\n for match_type in types:\n if match_type not in databases['match_types']:\n databases['match_types'].append(match_type)\n\n return types\n\n def get_wrestlers(self, modal='all'):\n \"\"\"\n Return a list of wrestlers for a match\n :param modal: all, winners, or losers\n :return: list object containing all wrestlers\n \"\"\"\n\n if modal == 'all':\n wrestlers = parse_names(self.raw_data[self.row][statics.get('a_match_column')]) + parse_names(self.raw_data[self.row][statics.get('b_match_column')])\n\n elif modal == 'winners':\n wrestlers = parse_names(self.raw_data[self.row][statics.get('winner_column')])\n\n elif modal == 'losers':\n wrestlers = []\n for wrestler in parse_names(self.raw_data[self.row][statics.get('a_match_column')]) + parse_names(self.raw_data[self.row][statics.get('b_match_column')]):\n if wrestler not in parse_names(self.raw_data[self.row][statics.get('winner_column')]) and wrestler != '':\n wrestlers.append(wrestler)\n\n else:\n logger.warning(\"Invalid modal: '{}'. Valid modals: all, winners, losers\".format(modal))\n wrestlers = []\n\n for wrestler in wrestlers:\n if wrestler not in databases['wrestlers']:\n databases['wrestlers'].append(wrestler)\n\n return wrestlers\n\n\ndef parse_names(cell_data):\n \"\"\"\n Extract a list of names from a given cell\n :param cell_data: raw cell data string\n :return: list object of names found\n \"\"\"\n\n changed = True\n names = [cell_data]\n\n while changed:\n changed = False\n for name in names:\n if tag_teams.get(name) and not changed:\n names.remove(name)\n for new_name in tag_teams.get(name):\n names.append(new_name)\n changed = True\n for c in [',', 'vs.', '&', '\\n']:\n if c in name and not changed:\n new_names = name.split(c)\n names.remove(str(name))\n for new_name in new_names:\n names.append(new_name)\n changed = True\n if name == '' and not changed:\n names.remove(name)\n changed = True\n\n if str(name).startswith(' ') or str(name).endswith(' '):\n if not changed:\n names.remove(name)\n names.append(str(name).strip())\n changed = True\n\n for name in names:\n if name.lower().startswith('raw ') or name.lower().startswith('sdl '):\n names.remove(name)\n\n return names\n\n\ndef open_sheet():\n \"\"\"\n Create an API connection to Google Sheets\n :return: Google Sheets API connection\n \"\"\"\n\n sheet_api = pygsheets.authorize(service_file=gs_key_file)\n\n return sheet_api.open('Prediction League Archive')\n\n\ndef current_totals():\n\n for player in players:\n print(\"Player: {0:<20s} Points: {1:d}\".format(player.name, player.points))\n\n\ndef load_ppv(sheet_api, mode, search):\n \"\"\"\n Load data from a PPV sheet and return in a dictionary\n :param sheet_api: active Google Sheets API connection\n :param mode: 'name' or 'index'\n :param search: search item to load\n :return: dictionary of PPV data\n \"\"\"\n\n if mode != 'title' and mode != 'index':\n logger.warning(\"Invalid mode: '{}'... valid modes: 'name', 'index'\".format(mode))\n return 0\n\n sheet = sheet_api.worksheet(mode, search)\n\n if sheet:\n if sheet.title == 'Statistics':\n return 1\n else:\n print(\"Loaded PPV: {}\".format(sheet.title))\n\n else:\n logger.warning(\"PPV not found: {} = {}\".format(mode, search))\n\n if sheet.title not in databases['ppvs']:\n databases['ppvs'].append(sheet.title)\n\n raw_data = sheet.get_all_values(returnas='matrix')\n\n name_column = statics.get('name_column')\n\n for player in players:\n player.temp['raw_data'] = raw_data\n\n while name_column < len(raw_data[statics.get('name_row')]) and raw_data[statics.get('name_row')][name_column] != '':\n\n player_found = False\n\n for player in players:\n\n if str(player.name).capitalize() == str(raw_data[statics.get('name_row')][name_column]).capitalize():\n\n player_found = True\n\n if not player_found:\n\n players.append(Player(raw_data, name_column))\n\n name_column += 1\n\n data_row = statics.get('data_row')\n\n while data_row < len(raw_data):\n\n match = Match(raw_data, data_row)\n\n if 'Uncounted' in match.get_types():\n data_row += 1\n continue\n\n for player in players:\n\n try:\n if player.get_pick(data_row):\n player.stats['picks'] += 1\n else:\n continue\n except IndexError:\n continue # Player doesn't exist on this card\n\n match_winners = set(match.get_wrestlers('winners'))\n player_winners = set(player.get_pick(data_row))\n\n if match_winners.issubset(player_winners):\n player.stats['points'] += 1\n player.add_result('correct', 'cards', sheet.title)\n player.add_result('correct', 'ppvs', str(''.join(i for i in sheet.title if not i.isdigit())).strip())\n if len(player.get_pick(data_row)) == len(match.get_wrestlers('winners')) and match.all_picks.count(player.get_pick(data_row)) == 1:\n if (len(player.get_pick(data_row)) * 2) < len(match.all_picks):\n player.add_result('correct', 'against_crowd')\n player.stats['meta']['against_crowd']['correct_list'].append((', '.join(player_winners), sheet.title))\n for match_type in match.get_types():\n player.add_result('correct', 'match_types', match_type)\n for title in match.get_titles():\n player.add_result('correct', 'titles', title)\n for wrestler in match.get_wrestlers('all'):\n player.add_result('correct', 'wrestlers', wrestler)\n for wrestler in match_winners:\n player.add_result('correct', 'match_winners', wrestler)\n for wrestler in player_winners:\n player.add_result('correct', 'player_winners', wrestler)\n\n else:\n player.add_result('incorrect', 'cards', sheet.title)\n player.add_result('incorrect', 'ppvs', str(''.join(i for i in sheet.title if not i.isdigit())).strip())\n if len(player.get_pick(data_row)) == len(match.get_wrestlers('losers')) and match.all_picks.count(player.get_pick(data_row)) == 1:\n if (len(player.get_pick(data_row)) * 2) < len(match.all_picks):\n player.add_result('incorrect', 'against_crowd')\n player.stats['meta']['against_crowd']['incorrect_list'].append((', '.join(player_winners), sheet.title))\n for match_type in match.get_types():\n player.add_result('incorrect', 'match_types', match_type)\n for title in match.get_titles():\n player.add_result('incorrect', 'titles', title)\n for wrestler in match.get_wrestlers('all'):\n player.add_result('incorrect', 'wrestlers', wrestler)\n for wrestler in match_winners:\n player.add_result('incorrect', 'match_winners', wrestler)\n for wrestler in player_winners:\n player.add_result('incorrect', 'player_winners', wrestler)\n\n data_row += 1\n\n return 1\n\n\ndef write_to_html():\n \"\"\"\n Write the statistics database to several HTML files\n :return: nothing\n \"\"\"\n\n player_percents = {}\n\n pwd = os.path.dirname(__file__) # <-- absolute dir the script is in\n file_path = 'index.html'\n\n filename = os.path.join(pwd, file_path)\n\n main_file = open(filename, 'w+')\n\n main_file.write('\\n')\n main_file.write('\\n')\n main_file.write('\\n\\nPrediction League Stats\\n\\n
\\n')\n main_file.write('

Prediction League Stats


\\n')\n main_file.write('
Click any name header to see the full list of data for that statistic


')\n\n main_file.write('
')\n main_file.write('Overall Accuracy')\n main_file.write('
\\n
')\n main_file.write('How often a pick was correct')\n main_file.write('
\\n')\n\n main_file.write('')\n\n for player in players:\n main_file.write(''.format(player.name))\n\n main_file.write('')\n\n max_against_crowd = 0\n\n for player in players:\n ac_correct = player.stats['points']\n ac_total = player.stats['picks']\n if ac_total > max_against_crowd:\n max_against_crowd = ac_total\n try:\n main_file.write(''.format(player.name.lower(), ac_correct / ac_total))\n main_file.write(''.format(player.name.lower(), ac_correct, ac_total))\n except ZeroDivisionError:\n main_file.write(''.format(player.name.lower()))\n\n main_file.write('
{}
{1:.0%}{1} out of {2}
')\n\n for statistic in statistic_order:\n\n if 'count' not in stat_info[statistic].keys():\n return\n\n if 'minimum' in stat_info[statistic].keys():\n minimum = stat_info[statistic]['minimum']\n else:\n minimum = 3\n\n main_file.write('
')\n main_file.write(stat_info[statistic]['label'])\n main_file.write('
\\n
')\n if 'minimum_noun' in stat_info[statistic].keys():\n minimum_noun = stat_info[statistic]['minimum_noun']\n else:\n minimum_noun = 'matches'\n main_file.write('(minimum {} {})'.format(minimum, minimum_noun))\n main_file.write('
\\n
')\n main_file.write(stat_info[statistic]['desc'])\n\n main_file.write('
\\n')\n\n main_file.write('')\n\n percents = {}\n\n for player in players:\n\n player_link = '{}'.format(player.name.lower(), statistic.lower(), player.name)\n\n main_file.write(''.format(player_link))\n\n raw_percents = {}\n\n c_group = stat_info[statistic]['count']['group']\n c_select = stat_info[statistic]['count']['select']\n t_group = stat_info[statistic]['total']['group']\n t_select = stat_info[statistic]['total']['select']\n\n for item in list(player.stats['meta'][c_group].keys()) + list(set(player.stats['meta'][t_group].keys()) - set(player.stats['meta'][c_group].keys())):\n if item in player.stats['meta'][c_group].keys() and c_select in player.stats['meta'][c_group][item].keys():\n count = player.stats['meta'][c_group][item][c_select]\n else:\n count = 0\n if item in player.stats['meta'][t_group].keys():\n if t_select == 'all':\n total = 0\n if 'correct' in player.stats['meta'][t_group][item].keys():\n total += player.stats['meta'][t_group][item]['correct']\n if 'incorrect' in player.stats['meta'][t_group][item].keys():\n total += player.stats['meta'][t_group][item]['incorrect']\n else:\n if t_select in player.stats['meta'][t_group][item].keys():\n total = player.stats['meta'][t_group][item][t_select]\n else:\n total = 0\n else:\n total = 0\n\n db_count = 0\n\n if 'minimum_database' in stat_info[statistic].keys():\n\n for db_item in databases[stat_info[statistic]['minimum_database']]:\n if item in db_item:\n print('{} matched to {}'.format(item, db_item))\n db_count += 1\n\n print('{} Count: {} | Minimum: {}'.format(item, db_count, minimum))\n if db_count < minimum:\n continue\n else:\n if total < minimum:\n continue\n\n raw_percents[item] = (count, total)\n\n # print('raw_percents = {}'.format(raw_percents))\n\n percents[player.name] = sorted(sorted(raw_percents.items(), key=lambda x: x[1][1], reverse=True),\n key=lambda x: (x[1][0] / x[1][1]), reverse=stat_info[statistic]['positive'])\n\n write_individual_player_statistic(player.name, percents[player.name], statistic)\n\n # print('{} sorted_percents: {}'.format(player.name, percents[player.name]))\n\n player_percents[statistic] = percents\n\n main_file.write('\\n')\n\n if 'show' in stat_info[statistic].keys():\n show = stat_info[statistic]['show']\n else:\n show = 3\n\n for row in range(0, show):\n main_file.write('')\n for player in players:\n try:\n main_file.write(''.format(\n player.name.lower(), (percents[player.name][row][1][0] / percents[player.name][row][1][1])))\n main_file.write(''.format(\n player.name.lower(), percents[player.name][row][0], percents[player.name][row][1][0], percents[player.name][row][1][1]))\n except IndexError:\n main_file.write(''.format(player.name.lower()))\n main_file.write('\\n')\n\n main_file.write('
{}
{1:.0%}{1}({2} out of {3})
')\n\n main_file.write('
')\n main_file.write('Against the Crowd')\n main_file.write('
\\n
')\n main_file.write('(minimum 3 players)')\n main_file.write('
\\n
')\n main_file.write('Accuracy when the player is the only one who picked that winner against everybody else')\n main_file.write('
\\n')\n\n main_file.write('')\n\n for player in players:\n main_file.write(''.format(player.name))\n\n main_file.write('')\n\n max_against_crowd = 0\n\n for player in players:\n ac_correct = player.stats['meta']['against_crowd']['correct']\n ac_total = ac_correct + player.stats['meta']['against_crowd']['incorrect']\n if ac_total > max_against_crowd:\n max_against_crowd = ac_total\n try:\n main_file.write(''.format(player.name.lower(), ac_correct / ac_total))\n main_file.write(''.format(player.name.lower(), ac_correct, ac_total))\n except ZeroDivisionError:\n main_file.write(''.format(player.name.lower()))\n\n main_file.write('')\n\n for row in range(0, max_against_crowd):\n main_file.write('\\n')\n for player in players:\n full_list = player.stats['meta']['against_crowd']['correct_list'] + player.stats['meta']['against_crowd']['incorrect_list']\n try:\n if full_list[row] in player.stats['meta']['against_crowd']['correct_list']:\n status = '✔'\n else:\n status = '✘'\n main_file.write(''.format(player.name.lower(), status))\n main_file.write(''.format(player.name.lower(), full_list[row][0], full_list[row][1]))\n except IndexError:\n main_file.write(''.format(player.name.lower()))\n main_file.write('\\n')\n\n main_file.write('
{}
{1:.0%}{1} out of {2}
{1}{1}
{2}
')\n\n main_file.write('
')\n main_file.close()\n\n return\n\n\ndef write_individual_player_statistic(player_name, stat_percents, stat):\n \"\"\"\n Write an individual player + stat HTML file\n :param player_name: player name\n :param stat_percents: percents list\n :param stat: statistic name\n :return: nothing\n \"\"\"\n\n pwd = os.path.dirname(__file__)\n file_path = 'stats/pl_{}_{}.html'.format(player_name.lower(), stat.lower())\n\n filename = os.path.join(pwd, file_path)\n\n stat_file = open(filename, 'w+')\n\n stat_file.write('\\n')\n stat_file.write('\\n')\n stat_file.write('\\n\\nPrediction League Stats\\n\\n
\\n')\n stat_file.write('

{}: {}


\\n'.format(player_name, stat_info[stat]['label']))\n stat_file.write('

')\n stat_file.write('')\n\n for stat_percent in stat_percents:\n try:\n percent = (stat_percent[1][0] / stat_percent[1][1])\n except ZeroDivisionError:\n percent = 0\n\n stat_file.write('')\n stat_file.write(''.format(percent))\n stat_file.write('\\n')\n\n stat_file.write('
{0:.0%}')\n stat_file.write('
{}
'.format(stat_percent[0]))\n stat_file.write('({} out of {})'.format(stat_percent[1][0], stat_percent[1][1]))\n stat_file.write('
\\n')\n\n stat_file.close()\n\n return\n\n\ndef main():\n \"\"\"\n Main function for gathering Prediction League stats\n \"\"\"\n\n gs = open_sheet()\n\n all_ppvs = gs.worksheets()\n\n for ppv in all_ppvs:\n load_ppv(gs, 'index', ppv.index)\n\n write_to_html()\n\n\nif __name__ == '__main__':\n\n logging.basicConfig(level=logging.WARNING, format='%(name)s %(message)s')\n\n main()\n","sub_path":"pl_stats.py","file_name":"pl_stats.py","file_ext":"py","file_size_in_byte":35059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"114682934","text":"import sys\nimport math\nfrom collections import deque\n\nsys.setrecursionlimit(1000000)\nMOD = 10 ** 9 + 7\ninput = lambda: sys.stdin.readline().strip()\nNI = lambda: int(input())\nNMI = lambda: map(int, input().split())\nNLI = lambda: list(NMI())\nSI = lambda: input()\n\n\ndef make_grid(h, w, num): return [[int(num)] * w for _ in range(h)]\n\n\ndef main():\n H, N = NMI()\n M = [NLI() for _ in range(N)]\n # dp[i] はiダメージ与えたときの最小の魔力\n dp = [10**10] * (H+10)\n dp[0] = 0\n for i in range(H):\n for m in M:\n dp[min(i+m[0], H)] = min(dp[min(i+m[0], H)], dp[i] + m[1])\n print(dp[H])\n\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"Python_codes/p02787/s685344822.py","file_name":"s685344822.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"127154072","text":"# # Copyright (c) 2019-present, HuggingFace Inc.\n# All rights reserved.\n# This source code is licensed under the BSD-style license found in the\n# LICENSE file in the root directory of this source tree.\nimport logging\nimport random\nfrom argparse import ArgumentParser\nfrom itertools import chain\nfrom pprint import pformat\nimport warnings\n\nimport torch\nimport torch.nn.functional as F\n\nfrom transformers import OpenAIGPTLMHeadModel, OpenAIGPTTokenizer, GPT2LMHeadModel, GPT2Tokenizer\nfrom train_faiss_option1 import SPECIAL_TOKENS, build_input_from_segments, add_special_tokens_\nfrom utils import get_dataset, download_pretrained_model\nfrom sentence_transformers import SentenceTransformer\nimport faiss\nimport numpy as np\n\ndef top_filtering(logits, top_k=0., top_p=0.9, threshold=-float('Inf'), filter_value=-float('Inf')):\n \"\"\" Filter a distribution of logits using top-k, top-p (nucleus) and/or threshold filtering\n Args:\n logits: logits distribution shape (vocabulary size)\n top_k: <=0: no filtering, >0: keep only top k tokens with highest probability.\n top_p: <=0.0: no filtering, >0.0: keep only a subset S of candidates, where S is the smallest subset\n whose total probability mass is greater than or equal to the threshold top_p.\n In practice, we select the highest probability tokens whose cumulative probability mass exceeds\n the threshold top_p.\n threshold: a minimal threshold to keep logits\n \"\"\"\n assert logits.dim() == 1 # Only work for batch size 1 for now - could update but it would obfuscate a bit the code\n top_k = min(top_k, logits.size(-1))\n if top_k > 0:\n # Remove all tokens with a probability less than the last token in the top-k tokens\n indices_to_remove = logits < torch.topk(logits, top_k)[0][..., -1, None]\n logits[indices_to_remove] = filter_value\n\n if top_p > 0.0:\n # Compute cumulative probabilities of sorted tokens\n sorted_logits, sorted_indices = torch.sort(logits, descending=True)\n cumulative_probabilities = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)\n\n # Remove tokens with cumulative probability above the threshold\n sorted_indices_to_remove = cumulative_probabilities > top_p\n # Shift the indices to the right to keep also the first token above the threshold\n sorted_indices_to_remove[..., 1:] = sorted_indices_to_remove[..., :-1].clone()\n sorted_indices_to_remove[..., 0] = 0\n\n # Back to unsorted indices and set them to -infinity\n indices_to_remove = sorted_indices[sorted_indices_to_remove]\n logits[indices_to_remove] = filter_value\n\n indices_to_remove = logits < threshold\n logits[indices_to_remove] = filter_value\n\n return logits\n\ndef build_input_from_segments1(persona, history, reply, tokenizer, lm_labels=False, with_eos=True):\n \"\"\" Build a sequence of input from 3 segments: persona, history and last reply. \"\"\"\n bos, eos, speaker1, speaker2 = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS[:-1])\n sequence = [[bos] + list(chain(*persona))] + history + [reply + ([eos] if with_eos else [])]\n sequence = [sequence[0]] + [[1 if (len(sequence)-i) % 2 else 0] + s for i, s in enumerate(sequence[1:])]\n instance = {}\n instance[\"input_ids\"] = list(chain(*sequence))\n instance[\"token_type_ids\"] = [1 if i % 2 else 0 for i, s in enumerate(sequence) for _ in s]\n instance[\"mc_token_ids\"] = len(instance[\"input_ids\"]) - 1\n instance[\"lm_labels\"] = [-100] * len(instance[\"input_ids\"])\n if lm_labels:\n instance[\"lm_labels\"] = ([-100] * sum(len(s) for s in sequence[:-1])) + [-100] + sequence[-1][1:]\n return instance\n\ndef sample_sequence(personality, history, tokenizer, model, args, current_output=None):\n special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)\n if current_output is None:\n current_output = []\n\n for i in range(args.max_length):\n if args.option_faiss == 3:\n instance = build_input_from_segments1(personality, history, current_output, tokenizer, with_eos=False)\n else:\n instance = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)\n \n input_ids = torch.tensor(instance[\"input_ids\"], device=args.device).unsqueeze(0)\n token_type_ids = torch.tensor(instance[\"token_type_ids\"], device=args.device).unsqueeze(0)\n\n logits = model(input_ids, token_type_ids=token_type_ids)\n if isinstance(logits, tuple): # for gpt2 and maybe others\n logits = logits[0]\n logits = logits[0]\n logits = logits[0, -1, :] / args.temperature\n logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)\n probs = F.softmax(logits, dim=-1)\n\n prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)\n if i < args.min_length and prev.item() in special_tokens_ids:\n while prev.item() in special_tokens_ids:\n if probs.max().item() == 1:\n warnings.warn(\"Warning: model generating special token with probability 1.\")\n break # avoid infinitely looping over special token\n prev = torch.multinomial(probs, num_samples=1)\n\n if prev.item() in special_tokens_ids:\n break\n current_output.append(prev.item())\n\n return current_output\n\ndef run():\n parser = ArgumentParser()\n parser.add_argument(\"--dataset_path\", type=str, default=\"\", help=\"Path or url of the dataset. If empty download from S3.\")\n parser.add_argument(\"--dataset_cache\", type=str, default='./dataset_cache', help=\"Path or url of the dataset cache\")\n parser.add_argument(\"--model\", type=str, default=\"openai-gpt\", help=\"Model type (openai-gpt or gpt2)\", choices=['openai-gpt', 'gpt2']) # anything besides gpt2 will load openai-gpt\n parser.add_argument(\"--model_checkpoint\", type=str, default=\"\", help=\"Path, url or short name of the model\")\n parser.add_argument(\"--max_history\", type=int, default=2, help=\"Number of previous utterances to keep in history\")\n parser.add_argument(\"--device\", type=str, default=\"cuda\" if torch.cuda.is_available() else \"cpu\", help=\"Device (cuda or cpu)\")\n\n parser.add_argument(\"--no_sample\", action='store_true', help=\"Set to use greedy decoding instead of sampling\")\n parser.add_argument(\"--max_length\", type=int, default=20, help=\"Maximum length of the output utterances\")\n parser.add_argument(\"--min_length\", type=int, default=1, help=\"Minimum length of the output utterances\")\n parser.add_argument(\"--seed\", type=int, default=0, help=\"Seed\")\n parser.add_argument(\"--temperature\", type=int, default=0.7, help=\"Sampling softmax temperature\")\n parser.add_argument(\"--top_k\", type=int, default=0, help=\"Filter top-k tokens before sampling (<=0: no filtering)\")\n parser.add_argument(\"--top_p\", type=float, default=0.9, help=\"Nucleus filtering (top-p) before sampling (<=0.0: no filtering)\")\n parser.add_argument(\"--option_faiss\", type=int, default=0, help=\"What faiss option is selected\")\n parser.add_argument(\"--random_personality\", type=int, default=0, help=\"Random personality or another\")\n\n args = parser.parse_args()\n\n logging.basicConfig(level=logging.INFO)\n logger = logging.getLogger(__file__)\n logger.info(pformat(args))\n\n if args.model_checkpoint == \"\":\n if args.model == 'gpt2':\n raise ValueError(\"Interacting with GPT2 requires passing a finetuned model_checkpoint\")\n else:\n args.model_checkpoint = download_pretrained_model()\n\t\n\t\n if args.seed != 0:\n \trandom.seed(args.seed)\n \ttorch.random.manual_seed(args.seed)\n \ttorch.cuda.manual_seed(args.seed)\n\n\n logger.info(\"Get pretrained model and tokenizer\")\n tokenizer_class, model_class = (GPT2Tokenizer, GPT2LMHeadModel) if args.model == 'gpt2' else (OpenAIGPTTokenizer, OpenAIGPTLMHeadModel)\n tokenizer = tokenizer_class.from_pretrained(args.model_checkpoint)\n model = model_class.from_pretrained(args.model_checkpoint)\n model.to(args.device)\n add_special_tokens_(model, tokenizer)\n\n logger.info(\"Sample a personality\")\n dataset = get_dataset(tokenizer, args.dataset_path, args.dataset_cache)\n personalities = [dialog[\"persona_info\"] for dataset in dataset.values() for dialog in dataset]\n if args.random_personality == 0:\n personality = random.choice(personalities)\n else:\n personality = personalities[args.random_personality-1]\n logger.info(\"Selected personality: %s\", tokenizer.decode(chain(*personality)))\n personality_decoded = []\n for i in personality:\n personality_decoded.append(tokenizer.decode(i))\n model_faiss = SentenceTransformer('distilbert-base-nli-stsb-mean-tokens')\n embeddings_persona = model_faiss.encode(personality_decoded, show_progress_bar=False) \n # Step 1: Change data type\n embeddings_persona = np.array([embedding for embedding in embeddings_persona]).astype(\"float32\")\n\n # Step 2: Instantiate the index\n index = faiss.IndexFlatL2(embeddings_persona.shape[1])\n\n # Step 3: Pass the index to IndexIDMap\n index = faiss.IndexIDMap(index)\n # Step 4: Add vectors and their IDs\n index.add_with_ids(embeddings_persona, np.array(list(range(0,embeddings_persona.shape[0]))))\n history = []\n while True:\n raw_text = input(\">>> \")\n while not raw_text:\n print('Prompt should not be empty!')\n raw_text = input(\">>> \")\n history.append(tokenizer.encode(raw_text))\n selected_personality = []\n history_decoded = []\n selected_personality_encoded = []\n for i in history[-(args.max_history):]:\n history_decoded.append(tokenizer.decode(i))\n if args.option_faiss == 1:\n #Búsqueda Faiss:\n history_splitted = \" \".join(history_decoded)\n history_encoded = model_faiss.encode([history_splitted],show_progress_bar=False)\n D, I = index.search(np.array(history_encoded), k=len(personality_decoded))\n #history_faiss_selected.append(history)\n #persona_faiss_selected.append(persona_complete[I[0][0]])\n selected_personality = personality_decoded[I[0][0]]\n selected_personality_encoded=(tokenizer.encode(selected_personality))\n\n elif args.option_faiss == 2:\n if len(history) > 1:\n history_encoded = model_faiss.encode([history_decoded[-2]],show_progress_bar=False)\n else:\n history_encoded = model_faiss.encode([history_decoded[-1]],show_progress_bar=False)\n D, I = index.search(np.array(history_encoded), k=len(personality_decoded))\n selected_personality = personality_decoded[I[0][0]]\n selected_personality_encoded=(tokenizer.encode(selected_personality))\n\n elif args.option_faiss == 3:\n if len(history) > 1:\n history_encoded = model_faiss.encode([history_decoded[-2]], show_progress_bar=False)\n else:\n history_encoded = model_faiss.encode([history_decoded[-1]],show_progress_bar=False)\n D, I = index.search(np.array(history_encoded), k=len(personality_decoded))\n persona_list = []\n for i in I[0][1:-1]:\n selected_personality.append(personality_decoded[i])\n for i in selected_personality:\n selected_personality_encoded.append(tokenizer.encode(i))\n elif args.option_faiss == 4:\n history_encoded_user = model_faiss.encode([history_decoded[-1]],show_progress_bar=False)\n D, I = index.search(np.array(history_encoded_user), k=len(personality_decoded)) \n \n index_to_be_removed = I[0][0]\n\n persona2 = personality_decoded[:index_to_be_removed] + personality_decoded[index_to_be_removed+1:]\n \n \n embeddings_persona2 = model_faiss.encode(persona2, show_progress_bar=False) \n # Step 1: Change data type\n embeddings_persona2 = np.array([embedding for embedding in embeddings_persona2]).astype(\"float32\")\n\n # Step 2: Instantiate the index\n index2 = faiss.IndexFlatL2(embeddings_persona2.shape[1])\n\n # Step 3: Pass the index to IndexIDMap\n index2 = faiss.IndexIDMap(index2)\n\n # Step 4: Add vectors and their IDs\n index2.add_with_ids(embeddings_persona2, np.array(list(range(0,embeddings_persona2.shape[0])))) \n persona_list = []\n for i in I[0][1:-1]:\n persona_list.append(personality_decoded[i])\n if len(history) >1:\n history_encoded_chatbot = model_faiss.encode([history_decoded[-2]], show_progress_bar=False)\n else:\n history_encoded_chatbot = model_faiss.encode([history_decoded[-1]], show_progress_bar=False)\n T, J = index2.search(np.array(history_encoded_chatbot), k=len(persona2))\n #persona_faiss_selected.append(persona2[J[0][0]])\n selected_personality = persona2[J[0][0]]\n selected_personality_encoded=(tokenizer.encode(selected_personality))\n else:\n selected_personality = personality_decoded\n #selected_personality_encoded = []\n with torch.no_grad():\n out_ids = sample_sequence(selected_personality_encoded, history, tokenizer, model, args)\n history.append(out_ids)\n history = history[-(2*args.max_history+1):]\n out_text = tokenizer.decode(out_ids, skip_special_tokens=True)\n print(personality_decoded)\n print(\"Selected personality: \")\n print(selected_personality)\n print(\"Chatbot's turn:\")\n print(out_text)\nif __name__ == \"__main__\":\n run()\n","sub_path":"interact_faiss_dialog.py","file_name":"interact_faiss_dialog.py","file_ext":"py","file_size_in_byte":13848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"55148769","text":"# pylint: disable=no-value-for-parameter\n# FIXME: E1120:No value for argument 'dml' in method call\n# pylint: disable=protected-access\n# FIXME: Access to a protected member _result_proxy of a client class\n\nimport asyncio\nimport logging\nimport os\nimport re\nimport tempfile\nfrom collections import deque\nfrom concurrent.futures import ThreadPoolExecutor\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Tuple, Union\n\nimport aiobotocore\nimport attr\nimport sqlalchemy as sa\nfrom aiobotocore.client import AioBaseClient\nfrom aiobotocore.session import AioSession, ClientCreatorContext\nfrom aiohttp import web\nfrom aiopg.sa import Engine\nfrom aiopg.sa.result import RowProxy\nfrom servicelib.aiohttp.aiopg_utils import DBAPIError, PostgresRetryPolicyUponOperation\nfrom servicelib.aiohttp.client_session import get_client_session\nfrom servicelib.utils import fire_and_forget_task\nfrom sqlalchemy.dialects.postgresql import insert as pg_insert\nfrom sqlalchemy.sql.expression import literal_column\nfrom tenacity import retry\nfrom yarl import URL\n\nfrom .access_layer import (\n AccessRights,\n get_file_access_rights,\n get_project_access_rights,\n get_readable_project_ids,\n)\nfrom .constants import (\n APP_CONFIG_KEY,\n APP_DB_ENGINE_KEY,\n APP_DSM_KEY,\n APP_S3_KEY,\n DATCORE_ID,\n DATCORE_STR,\n SIMCORE_S3_ID,\n SIMCORE_S3_STR,\n)\nfrom .datcore_adapter import datcore_adapter\nfrom .models import (\n DatasetMetaData,\n FileMetaData,\n FileMetaDataEx,\n file_meta_data,\n get_location_from_id,\n projects,\n)\nfrom .s3wrapper.s3_client import MinioClientWrapper\nfrom .settings import Settings\nfrom .utils import download_to_file_or_raise, expo\n\nlogger = logging.getLogger(__name__)\n\npostgres_service_retry_policy_kwargs = PostgresRetryPolicyUponOperation(logger).kwargs\n\n\ndef setup_dsm(app: web.Application):\n async def _cleanup_context(app: web.Application):\n cfg: Settings = app[APP_CONFIG_KEY]\n\n with ThreadPoolExecutor(max_workers=cfg.STORAGE_MAX_WORKERS) as executor:\n dsm = DataStorageManager(\n s3_client=app.get(APP_S3_KEY),\n engine=app.get(APP_DB_ENGINE_KEY),\n loop=asyncio.get_event_loop(),\n pool=executor,\n simcore_bucket_name=cfg.STORAGE_S3.S3_BUCKET_NAME,\n has_project_db=not cfg.STORAGE_TESTING,\n app=app,\n ) # type: ignore\n\n app[APP_DSM_KEY] = dsm\n\n yield\n\n assert app[APP_DSM_KEY].pool is executor # nosec\n\n logger.info(\"Shuting down %s\", dsm.pool)\n\n # ------\n\n app.cleanup_ctx.append(_cleanup_context)\n\n\ndef to_meta_data_extended(row: RowProxy) -> FileMetaDataEx:\n assert row\n meta = FileMetaData(**dict(row)) # type: ignore\n meta_extended = FileMetaDataEx(\n fmd=meta,\n parent_id=str(Path(meta.object_name).parent),\n ) # type: ignore\n return meta_extended\n\n\n@attr.s(auto_attribs=True)\nclass DatCoreApiToken:\n api_token: Optional[str] = None\n api_secret: Optional[str] = None\n\n def to_tuple(self):\n return (self.api_token, self.api_secret)\n\n\n@attr.s(auto_attribs=True)\nclass DataStorageManager:\n \"\"\"Data storage manager\n\n The dsm has access to the database for all meta data and to the actual backend. For now this\n is simcore's S3 [minio] and the datcore storage facilities.\n\n For all data that is in-house (simcore.s3, ...) we keep a synchronized database with meta information\n for the physical files.\n\n For physical changes on S3, that might be time-consuming, the db keeps a state (delete and upload mostly)\n\n The dsm provides the following additional functionalities:\n\n - listing of folders for a given users, optionally filtered using a regular expression and optionally\n sorted by one of the meta data keys\n\n - upload/download of files\n\n client -> S3 : presigned upload link\n S3 -> client : presigned download link\n datcore -> client: presigned download link\n S3 -> datcore: local copy and then upload via their api\n\n minio/S3 and postgres can talk nicely with each other via Notifications using rabbigMQ which we already have.\n See:\n\n https://blog.minio.io/part-5-5-publish-minio-events-via-postgresql-50f6cc7a7346\n https://docs.minio.io/docs/minio-bucket-notification-guide.html\n \"\"\"\n\n # TODO: perhaps can be used a cache? add a lifetime?\n\n s3_client: MinioClientWrapper\n engine: Engine\n loop: object\n pool: ThreadPoolExecutor\n simcore_bucket_name: str\n has_project_db: bool\n session: AioSession = attr.Factory(aiobotocore.get_session)\n datcore_tokens: Dict[str, DatCoreApiToken] = attr.Factory(dict)\n app: Optional[web.Application] = None\n\n def _create_aiobotocore_client_context(self) -> ClientCreatorContext:\n assert hasattr(self.session, \"create_client\")\n # pylint: disable=no-member\n\n # SEE API in https://botocore.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html\n # SEE https://aiobotocore.readthedocs.io/en/latest/index.html\n return self.session.create_client(\n \"s3\",\n endpoint_url=self.s3_client.endpoint_url,\n aws_access_key_id=self.s3_client.access_key,\n aws_secret_access_key=self.s3_client.secret_key,\n )\n\n def _get_datcore_tokens(self, user_id: str) -> Tuple[Optional[str], Optional[str]]:\n # pylint: disable=no-member\n token = self.datcore_tokens.get(user_id, DatCoreApiToken())\n return token.to_tuple()\n\n async def locations(self, user_id: str):\n locs = []\n simcore_s3 = {\"name\": SIMCORE_S3_STR, \"id\": SIMCORE_S3_ID}\n locs.append(simcore_s3)\n\n api_token, api_secret = self._get_datcore_tokens(user_id)\n\n if api_token and api_secret and self.app:\n if await datcore_adapter.check_user_can_connect(\n self.app, api_token, api_secret\n ):\n datcore = {\"name\": DATCORE_STR, \"id\": DATCORE_ID}\n locs.append(datcore)\n\n return locs\n\n @classmethod\n def location_from_id(cls, location_id: str):\n return get_location_from_id(location_id)\n\n # LIST/GET ---------------------------\n\n # pylint: disable=too-many-arguments\n # pylint: disable=too-many-branches\n # pylint: disable=too-many-statements\n async def list_files(\n self, user_id: str, location: str, uuid_filter: str = \"\", regex: str = \"\"\n ) -> List[FileMetaDataEx]:\n \"\"\"Returns a list of file paths\n\n - Works for simcore.s3 and datcore\n - Can filter on uuid: useful to filter on project_id/node_id\n - Can filter upon regular expression (for now only on key: value pairs of the FileMetaData)\n \"\"\"\n data = deque()\n if location == SIMCORE_S3_STR:\n accesible_projects_ids = []\n async with self.engine.acquire() as conn, conn.begin():\n accesible_projects_ids = await get_readable_project_ids(\n conn, int(user_id)\n )\n has_read_access = (\n file_meta_data.c.user_id == user_id\n ) | file_meta_data.c.project_id.in_(accesible_projects_ids)\n\n query = sa.select([file_meta_data]).where(has_read_access)\n\n async for row in conn.execute(query):\n d = FileMetaData(**dict(row))\n dex = FileMetaDataEx(\n fmd=d, parent_id=str(Path(d.object_name).parent)\n )\n data.append(dex)\n\n if self.has_project_db:\n uuid_name_dict = {}\n # now parse the project to search for node/project names\n try:\n async with self.engine.acquire() as conn, conn.begin():\n query = sa.select([projects]).where(\n projects.c.uuid.in_(accesible_projects_ids)\n )\n\n async for row in conn.execute(query):\n proj_data = dict(row.items())\n\n uuid_name_dict[proj_data[\"uuid\"]] = proj_data[\"name\"]\n wb = proj_data[\"workbench\"]\n for node in wb.keys():\n uuid_name_dict[node] = wb[node][\"label\"]\n except DBAPIError as _err:\n logger.exception(\"Error querying database for project names\")\n\n if not uuid_name_dict:\n # there seems to be no project whatsoever for user_id\n return []\n\n # only keep files from non-deleted project\n clean_data = deque()\n for dx in data:\n d = dx.fmd\n if d.project_id not in uuid_name_dict:\n continue\n #\n # FIXME: artifically fills ['project_name', 'node_name', 'file_id', 'raw_file_path', 'display_file_path']\n # with information from the projects table!\n\n d.project_name = uuid_name_dict[d.project_id]\n if d.node_id in uuid_name_dict:\n d.node_name = uuid_name_dict[d.node_id]\n\n d.raw_file_path = str(\n Path(d.project_id) / Path(d.node_id) / Path(d.file_name)\n )\n d.display_file_path = d.raw_file_path\n d.file_id = d.file_uuid\n if d.node_name and d.project_name:\n d.display_file_path = str(\n Path(d.project_name) / Path(d.node_name) / Path(d.file_name)\n )\n # once the data was sync to postgres metadata table at this point\n clean_data.append(dx)\n\n data = clean_data\n\n elif location == DATCORE_STR:\n api_token, api_secret = self._get_datcore_tokens(user_id)\n return await datcore_adapter.list_all_datasets_files_metadatas(\n self.app, api_token, api_secret\n )\n\n if uuid_filter:\n # TODO: incorporate this in db query!\n _query = re.compile(uuid_filter, re.IGNORECASE)\n filtered_data = deque()\n for dx in data:\n d = dx.fmd\n if _query.search(d.file_uuid):\n filtered_data.append(dx)\n\n return list(filtered_data)\n\n if regex:\n _query = re.compile(regex, re.IGNORECASE)\n filtered_data = deque()\n for dx in data:\n d = dx.fmd\n _vars = vars(d)\n for v in _vars.keys():\n if _query.search(v) or _query.search(str(_vars[v])):\n filtered_data.append(dx)\n break\n return list(filtered_data)\n\n return list(data)\n\n async def list_files_dataset(\n self, user_id: str, location: str, dataset_id: str\n ) -> Union[List[FileMetaData], List[FileMetaDataEx]]:\n # this is a cheap shot, needs fixing once storage/db is in sync\n data = []\n if location == SIMCORE_S3_STR:\n data: List[FileMetaDataEx] = await self.list_files(\n user_id, location, uuid_filter=dataset_id + \"/\"\n )\n\n elif location == DATCORE_STR:\n api_token, api_secret = self._get_datcore_tokens(user_id)\n # lists all the files inside the dataset\n return await datcore_adapter.list_all_files_metadatas_in_dataset(\n self.app, api_token, api_secret, dataset_id\n )\n\n return data\n\n async def list_datasets(self, user_id: str, location: str) -> List[DatasetMetaData]:\n \"\"\"Returns a list of top level datasets\n\n Works for simcore.s3 and datcore\n\n \"\"\"\n data = []\n\n if location == SIMCORE_S3_STR:\n if self.has_project_db:\n try:\n async with self.engine.acquire() as conn, conn.begin():\n readable_projects_ids = await get_readable_project_ids(\n conn, int(user_id)\n )\n has_read_access = projects.c.uuid.in_(readable_projects_ids)\n\n # FIXME: this DOES NOT read from file-metadata table!!!\n query = sa.select([projects.c.uuid, projects.c.name]).where(\n has_read_access\n )\n async for row in conn.execute(query):\n dmd = DatasetMetaData(\n dataset_id=row.uuid,\n display_name=row.name,\n )\n data.append(dmd)\n except DBAPIError as _err:\n logger.exception(\"Error querying database for project names\")\n\n elif location == DATCORE_STR:\n api_token, api_secret = self._get_datcore_tokens(user_id)\n return await datcore_adapter.list_datasets(self.app, api_token, api_secret)\n\n return data\n\n async def list_file(\n self, user_id: str, location: str, file_uuid: str\n ) -> Optional[FileMetaDataEx]:\n\n if location == SIMCORE_S3_STR:\n\n async with self.engine.acquire() as conn, conn.begin():\n can: Optional[AccessRights] = await get_file_access_rights(\n conn, int(user_id), file_uuid\n )\n if can.read:\n query = sa.select([file_meta_data]).where(\n file_meta_data.c.file_uuid == file_uuid\n )\n result = await conn.execute(query)\n row = await result.first()\n return to_meta_data_extended(row) if row else None\n # FIXME: returns None in both cases: file does not exist or use has no access\n logger.debug(\"User %s cannot read file %s\", user_id, file_uuid)\n return None\n\n elif location == DATCORE_STR:\n # FIXME: review return inconsistencies\n # api_token, api_secret = self._get_datcore_tokens(user_id)\n import warnings\n\n warnings.warn(\"NOT IMPLEMENTED!!!\")\n return None\n\n # UPLOAD/DOWNLOAD LINKS ---------------------------\n\n async def upload_file_to_datcore(\n self, _user_id: str, _local_file_path: str, _destination_id: str\n ):\n import warnings\n\n warnings.warn(f\"NOT IMPLEMENTED!!! in {self.__class__}\")\n # uploads a locally available file to dat core given the storage path, optionally attached some meta data\n # api_token, api_secret = self._get_datcore_tokens(user_id)\n # await dcw.upload_file_to_id(destination_id, local_file_path)\n\n async def _metadata_file_updater(\n self,\n file_uuid: str,\n bucket_name: str,\n object_name: str,\n file_size: int,\n last_modified: str,\n max_update_retries: int = 50,\n ):\n \"\"\"\n Will retry max_update_retries to update the metadata on the file after an upload.\n If it is not successfull it will exit and log an error.\n\n Note: MinIO bucket notifications are not available with S3, that's why we have the\n following hacky solution\n \"\"\"\n current_iteraction = 0\n\n async with self._create_aiobotocore_client_context() as aioboto_client:\n current_iteraction += 1\n continue_loop = True\n sleep_generator = expo()\n update_succeeded = False\n\n while continue_loop:\n result = await aioboto_client.list_objects_v2(\n Bucket=bucket_name, Prefix=object_name\n )\n sleep_amount = next(sleep_generator)\n continue_loop = current_iteraction <= max_update_retries\n\n if \"Contents\" not in result:\n logger.info(\"File '%s' was not found in the bucket\", object_name)\n await asyncio.sleep(sleep_amount)\n continue\n\n new_file_size = result[\"Contents\"][0][\"Size\"]\n new_last_modified = str(result[\"Contents\"][0][\"LastModified\"])\n if file_size == new_file_size or last_modified == new_last_modified:\n logger.info(\"File '%s' did not change yet\", object_name)\n await asyncio.sleep(sleep_amount)\n continue\n\n file_e_tag = result[\"Contents\"][0][\"ETag\"].strip('\"')\n # finally update the data in the database and exit\n continue_loop = False\n\n logger.info(\n \"Obtained this from S3: new_file_size=%s new_last_modified=%s file ETag=%s\",\n new_file_size,\n new_last_modified,\n file_e_tag,\n )\n\n async with self.engine.acquire() as conn:\n query = (\n file_meta_data.update()\n .where(file_meta_data.c.file_uuid == file_uuid)\n .values(\n file_size=new_file_size,\n last_modified=new_last_modified,\n entity_tag=file_e_tag,\n )\n ) # primary key search is faster\n await conn.execute(query)\n update_succeeded = True\n if not update_succeeded:\n logger.error(\"Could not update file metadata for '%s'\", file_uuid)\n\n async def upload_link(self, user_id: str, file_uuid: str):\n \"\"\"\n Creates pre-signed upload link and updates metadata table when\n link is used and upload is successfuly completed\n\n SEE _metadata_file_updater\n \"\"\"\n\n async with self.engine.acquire() as conn:\n can: Optional[AccessRights] = await get_file_access_rights(\n conn, int(user_id), file_uuid\n )\n if not can.write:\n logger.debug(\n \"User %s was not allowed to upload file %s\", user_id, file_uuid\n )\n raise web.HTTPForbidden(\n reason=f\"User does not have enough access rights to upload file {file_uuid}\"\n )\n\n @retry(**postgres_service_retry_policy_kwargs)\n async def _init_metadata() -> Tuple[int, str]:\n async with self.engine.acquire() as conn:\n fmd = FileMetaData()\n fmd.simcore_from_uuid(file_uuid, self.simcore_bucket_name)\n fmd.user_id = user_id # NOTE: takes ownership of uploaded data\n\n # if file already exists, we might want to update a time-stamp\n\n # upsert file_meta_data\n insert_stmt = pg_insert(file_meta_data).values(**vars(fmd))\n do_nothing_stmt = insert_stmt.on_conflict_do_nothing(\n index_elements=[\"file_uuid\"]\n )\n await conn.execute(do_nothing_stmt)\n\n return fmd.file_size, fmd.last_modified\n\n file_size, last_modified = await _init_metadata()\n\n bucket_name = self.simcore_bucket_name\n object_name = file_uuid\n\n # a parallel task is tarted which will update the metadata of the updated file\n # once the update has finished.\n fire_and_forget_task(\n self._metadata_file_updater(\n file_uuid=file_uuid,\n bucket_name=bucket_name,\n object_name=object_name,\n file_size=file_size,\n last_modified=last_modified,\n )\n )\n return self.s3_client.create_presigned_put_url(bucket_name, object_name)\n\n async def download_link_s3(self, file_uuid: str, user_id: int) -> str:\n\n # access layer\n async with self.engine.acquire() as conn:\n can: Optional[AccessRights] = await get_file_access_rights(\n conn, int(user_id), file_uuid\n )\n if not can.read:\n # NOTE: this is tricky. A user with read access can download and data!\n # If write permission would be required, then shared projects as views cannot\n # recover data in nodes (e.g. jupyter cannot pull work data)\n #\n logger.debug(\n \"User %s was not allowed to download file %s\", user_id, file_uuid\n )\n raise web.HTTPForbidden(\n reason=f\"User does not have enough rights to download {file_uuid}\"\n )\n\n bucket_name = self.simcore_bucket_name\n async with self.engine.acquire() as conn:\n stmt = sa.select([file_meta_data.c.object_name]).where(\n file_meta_data.c.file_uuid == file_uuid\n )\n object_name: Optional[str] = await conn.scalar(stmt)\n\n if object_name is None:\n raise web.HTTPNotFound(\n reason=f\"File '{file_uuid}' does not exists in storage.\"\n )\n\n link = self.s3_client.create_presigned_get_url(bucket_name, object_name)\n return link\n\n async def download_link_datcore(self, user_id: str, file_id: str) -> URL:\n api_token, api_secret = self._get_datcore_tokens(user_id)\n return await datcore_adapter.get_file_download_presigned_link(\n self.app, api_token, api_secret, file_id\n )\n\n # COPY -----------------------------\n\n async def copy_file_s3_s3(self, user_id: str, dest_uuid: str, source_uuid: str):\n # FIXME: operation MUST be atomic\n\n # source is s3, location is s3\n to_bucket_name = self.simcore_bucket_name\n to_object_name = dest_uuid\n from_bucket = self.simcore_bucket_name\n from_object_name = source_uuid\n # FIXME: This is not async!\n self.s3_client.copy_object(\n to_bucket_name, to_object_name, from_bucket, from_object_name\n )\n\n # update db\n async with self.engine.acquire() as conn:\n fmd = FileMetaData()\n fmd.simcore_from_uuid(dest_uuid, self.simcore_bucket_name)\n fmd.user_id = user_id\n ins = file_meta_data.insert().values(**vars(fmd))\n await conn.execute(ins)\n\n async def copy_file_s3_datcore(\n self, user_id: str, dest_uuid: str, source_uuid: str\n ):\n session = get_client_session(self.app)\n\n # source is s3, get link and copy to datcore\n bucket_name = self.simcore_bucket_name\n object_name = source_uuid\n filename = source_uuid.split(\"/\")[-1]\n\n s3_dowload_link = self.s3_client.create_presigned_get_url(\n bucket_name, object_name\n )\n\n with tempfile.TemporaryDirectory() as tmpdir:\n # FIXME: connect download and upload streams\n local_file_path = os.path.join(tmpdir, filename)\n\n # Downloads S3 -> local\n await download_to_file_or_raise(session, s3_dowload_link, local_file_path)\n\n # Uploads local -> DATCore\n await self.upload_file_to_datcore(\n _user_id=user_id,\n _local_file_path=local_file_path,\n _destination_id=dest_uuid,\n )\n\n async def copy_file_datcore_s3(\n self,\n user_id: str,\n dest_uuid: str,\n source_uuid: str,\n filename_missing: bool = False,\n ):\n session = get_client_session(self.app)\n\n # 2 steps: Get download link for local copy, the upload link to s3\n # TODO: This should be a redirect stream!\n dc_link, filename = await self.download_link_datcore(\n user_id=user_id, file_id=source_uuid\n )\n if filename_missing:\n dest_uuid = str(Path(dest_uuid) / filename)\n\n s3_upload_link = await self.upload_link(user_id, dest_uuid)\n\n with tempfile.TemporaryDirectory() as tmpdir:\n # FIXME: connect download and upload streams\n\n local_file_path = os.path.join(tmpdir, filename)\n\n # Downloads DATCore -> local\n await download_to_file_or_raise(session, dc_link, local_file_path)\n\n # Uploads local -> S3\n s3_upload_link = URL(s3_upload_link)\n async with session.put(\n s3_upload_link,\n data=Path(local_file_path).open(\"rb\"),\n raise_for_status=True,\n ) as resp:\n logger.debug(\n \"Uploaded local -> SIMCore %s . Status %s\",\n s3_upload_link,\n resp.status,\n )\n\n return dest_uuid\n\n async def copy_file(\n self,\n user_id: str,\n dest_location: str,\n dest_uuid: str,\n source_location: str,\n source_uuid: str,\n ):\n if source_location == SIMCORE_S3_STR:\n if dest_location == DATCORE_STR:\n await self.copy_file_s3_datcore(user_id, dest_uuid, source_uuid)\n elif dest_location == SIMCORE_S3_STR:\n await self.copy_file_s3_s3(user_id, dest_uuid, source_uuid)\n elif source_location == DATCORE_STR:\n if dest_location == DATCORE_STR:\n raise NotImplementedError(\"copy files from datcore 2 datcore not impl\")\n if dest_location == SIMCORE_S3_STR:\n await self.copy_file_datcore_s3(user_id, dest_uuid, source_uuid)\n\n async def deep_copy_project_simcore_s3(\n self,\n user_id: str,\n source_project: Dict[str, Any],\n destination_project: Dict[str, Any],\n node_mapping: Dict[str, str],\n ):\n \"\"\"Parses a given source project and copies all related files to the destination project\n\n Since all files are organized as\n\n project_id/node_id/filename or links to datcore\n\n this function creates a new folder structure\n\n project_id/node_id/filename\n\n and copies all files to the corresponding places.\n\n Additionally, all external files from datcore are being copied and the paths in the destination\n project are adapted accordingly\n\n Lastly, the meta data db is kept in sync\n \"\"\"\n source_folder = source_project[\"uuid\"]\n dest_folder = destination_project[\"uuid\"]\n\n # access layer\n async with self.engine.acquire() as conn, conn.begin():\n source_access_rights = await get_project_access_rights(\n conn, int(user_id), project_id=source_folder\n )\n dest_access_rights = await get_project_access_rights(\n conn, int(user_id), project_id=dest_folder\n )\n if not source_access_rights.read:\n logger.debug(\n \"User %s was not allowed to read from project %s\",\n user_id,\n source_folder,\n )\n raise web.HTTPForbidden(\n reason=f\"User does not have enough access rights to read from project '{source_folder}'\"\n )\n\n if not dest_access_rights.write:\n logger.debug(\n \"User %s was not allowed to write to project %s\",\n user_id,\n dest_folder,\n )\n raise web.HTTPForbidden(\n reason=f\"User does not have enough access rights to write to project '{dest_folder}'\"\n )\n\n # build up naming map based on labels\n uuid_name_dict = {}\n uuid_name_dict[dest_folder] = destination_project[\"name\"]\n for src_node_id, src_node in source_project[\"workbench\"].items():\n new_node_id = node_mapping.get(src_node_id)\n if new_node_id is not None:\n uuid_name_dict[new_node_id] = src_node[\"label\"]\n\n async with self._create_aiobotocore_client_context() as aioboto_client:\n\n logger.debug(\n \"Listing all items under %s:%s/\",\n self.simcore_bucket_name,\n source_folder,\n )\n\n # Step 1: List all objects for this project replace them with the destination object name\n # and do a copy at the same time collect some names\n # Note: the / at the end of the Prefix is VERY important, makes the listing several order of magnitudes faster\n response = await aioboto_client.list_objects_v2(\n Bucket=self.simcore_bucket_name, Prefix=f\"{source_folder}/\"\n )\n\n contents: List = response.get(\"Contents\", [])\n logger.debug(\n \"Listed %s items under %s:%s/\",\n len(contents),\n self.simcore_bucket_name,\n source_folder,\n )\n\n for item in contents:\n source_object_name = item[\"Key\"]\n source_object_parts = Path(source_object_name).parts\n\n if len(source_object_parts) != 3:\n # This may happen once we have shared/home folders\n # FIXME: this might cause problems\n logger.info(\n \"Skipping copy of '%s'. Expected three parts path!\",\n source_object_name,\n )\n continue\n\n old_node_id = source_object_parts[1]\n new_node_id = node_mapping.get(old_node_id)\n if new_node_id is not None:\n old_filename = source_object_parts[2]\n dest_object_name = str(\n Path(dest_folder) / new_node_id / old_filename\n )\n\n copy_kwargs = dict(\n CopySource={\n \"Bucket\": self.simcore_bucket_name,\n \"Key\": source_object_name,\n },\n Bucket=self.simcore_bucket_name,\n Key=dest_object_name,\n )\n logger.debug(\"Copying %s ...\", copy_kwargs)\n\n # FIXME: if 5GB, it must use multipart upload Upload Part - Copy API\n # SEE https://botocore.amazonaws.com/v1/documentation/api/latest/reference/services/s3.html#S3.Client.copy_object\n await aioboto_client.copy_object(**copy_kwargs)\n\n # Step 2: List all references in outputs that point to datcore and copy over\n for node_id, node in destination_project[\"workbench\"].items():\n outputs: Dict = node.get(\"outputs\", {})\n for _, output in outputs.items():\n source = output[\"path\"]\n\n if output.get(\"store\") == DATCORE_ID:\n destination_folder = str(Path(dest_folder) / node_id)\n logger.info(\"Copying %s to %s\", source, destination_folder)\n\n destination = await self.copy_file_datcore_s3(\n user_id=user_id,\n dest_uuid=destination_folder,\n source_uuid=source,\n filename_missing=True,\n )\n assert destination.startswith(destination_folder) # nosec\n\n output[\"store\"] = SIMCORE_S3_ID\n output[\"path\"] = destination\n\n elif output.get(\"store\") == SIMCORE_S3_ID:\n destination = str(Path(dest_folder) / node_id / Path(source).name)\n output[\"store\"] = SIMCORE_S3_ID\n output[\"path\"] = destination\n\n fmds = []\n async with self._create_aiobotocore_client_context() as aioboto_client:\n\n # step 3: list files first to create fmds\n # Note: the / at the end of the Prefix is VERY important, makes the listing several order of magnitudes faster\n response = await aioboto_client.list_objects_v2(\n Bucket=self.simcore_bucket_name, Prefix=f\"{dest_folder}/\"\n )\n\n if \"Contents\" in response:\n for item in response[\"Contents\"]:\n fmd = FileMetaData()\n fmd.simcore_from_uuid(item[\"Key\"], self.simcore_bucket_name)\n fmd.project_name = uuid_name_dict.get(dest_folder, \"Untitled\")\n fmd.node_name = uuid_name_dict.get(fmd.node_id, \"Untitled\")\n fmd.raw_file_path = fmd.file_uuid\n fmd.display_file_path = str(\n Path(fmd.project_name) / fmd.node_name / fmd.file_name\n )\n fmd.user_id = user_id\n fmd.file_size = item[\"Size\"]\n fmd.last_modified = str(item[\"LastModified\"])\n fmds.append(fmd)\n\n # step 4 sync db\n async with self.engine.acquire() as conn, conn.begin():\n # TODO: upsert in one statment of ALL\n for fmd in fmds:\n query = sa.select([file_meta_data]).where(\n file_meta_data.c.file_uuid == fmd.file_uuid\n )\n # if file already exists, we might w\n rows = await conn.execute(query)\n exists = await rows.scalar()\n if exists:\n delete_me = file_meta_data.delete().where(\n file_meta_data.c.file_uuid == fmd.file_uuid\n )\n await conn.execute(delete_me)\n ins = file_meta_data.insert().values(**vars(fmd))\n await conn.execute(ins)\n\n # DELETE -------------------------------------\n\n async def delete_file(self, user_id: str, location: str, file_uuid: str):\n \"\"\"Deletes a file given its fmd and location\n\n Additionally requires a user_id for 3rd party auth\n\n For internal storage, the db state should be updated upon completion via\n Notification mechanism\n\n For simcore.s3 we can use the file_name\n For datcore we need the full path\n \"\"\"\n if location == SIMCORE_S3_STR:\n # FIXME: operation MUST be atomic, transaction??\n\n to_delete = []\n async with self.engine.acquire() as conn, conn.begin():\n can: Optional[AccessRights] = await get_file_access_rights(\n conn, int(user_id), file_uuid\n )\n if not can.delete:\n logger.debug(\n \"User %s was not allowed to delete file %s\",\n user_id,\n file_uuid,\n )\n raise web.HTTPForbidden(\n reason=f\"User '{user_id}' does not have enough access rights to delete file {file_uuid}\"\n )\n\n query = sa.select(\n [file_meta_data.c.bucket_name, file_meta_data.c.object_name]\n ).where(file_meta_data.c.file_uuid == file_uuid)\n\n async for row in conn.execute(query):\n if self.s3_client.remove_objects(\n row.bucket_name, [row.object_name]\n ):\n to_delete.append(file_uuid)\n\n await conn.execute(\n file_meta_data.delete().where(\n file_meta_data.c.file_uuid.in_(to_delete)\n )\n )\n\n elif location == DATCORE_STR:\n # FIXME: review return inconsistencies\n api_token, api_secret = self._get_datcore_tokens(user_id)\n await datcore_adapter.delete_file(\n self.app, api_token, api_secret, file_uuid\n )\n\n async def delete_project_simcore_s3(\n self, user_id: str, project_id: str, node_id: Optional[str] = None\n ) -> Optional[web.Response]:\n\n \"\"\"Deletes all files from a given node in a project in simcore.s3 and updated db accordingly.\n If node_id is not given, then all the project files db entries are deleted.\n \"\"\"\n\n # FIXME: operation MUST be atomic. Mark for deletion and remove from db when deletion fully confirmed\n\n async with self.engine.acquire() as conn, conn.begin():\n # access layer\n can: Optional[AccessRights] = await get_project_access_rights(\n conn, int(user_id), project_id\n )\n if not can.delete:\n logger.debug(\n \"User %s was not allowed to delete project %s\",\n user_id,\n project_id,\n )\n raise web.HTTPForbidden(\n reason=f\"User does not have delete access for {project_id}\"\n )\n\n delete_me = file_meta_data.delete().where(\n file_meta_data.c.project_id == project_id,\n )\n if node_id:\n delete_me = delete_me.where(file_meta_data.c.node_id == node_id)\n await conn.execute(delete_me)\n\n async with self._create_aiobotocore_client_context() as aioboto_client:\n # Note: the / at the end of the Prefix is VERY important, makes the listing several order of magnitudes faster\n response = await aioboto_client.list_objects_v2(\n Bucket=self.simcore_bucket_name,\n Prefix=f\"{project_id}/{node_id}/\" if node_id else f\"{project_id}/\",\n )\n\n objects_to_delete = []\n for f in response.get(\"Contents\", []):\n objects_to_delete.append({\"Key\": f[\"Key\"]})\n\n if objects_to_delete:\n response = await aioboto_client.delete_objects(\n Bucket=self.simcore_bucket_name,\n Delete={\"Objects\": objects_to_delete},\n )\n return response\n\n # SEARCH -------------------------------------\n\n async def search_files_starting_with(\n self, user_id: int, prefix: str\n ) -> List[FileMetaDataEx]:\n # Avoids using list_files since it accounts for projects/nodes\n # Storage should know NOTHING about those concepts\n files_meta = deque()\n\n async with self.engine.acquire() as conn, conn.begin():\n # access layer\n can_read_projects_ids = await get_readable_project_ids(conn, int(user_id))\n has_read_access = (\n file_meta_data.c.user_id == str(user_id)\n ) | file_meta_data.c.project_id.in_(can_read_projects_ids)\n\n stmt = sa.select([file_meta_data]).where(\n file_meta_data.c.file_uuid.startswith(prefix) & has_read_access\n )\n\n async for row in conn.execute(stmt):\n meta_extended = to_meta_data_extended(row)\n files_meta.append(meta_extended)\n\n return list(files_meta)\n\n async def create_soft_link(\n self, user_id: int, target_uuid: str, link_uuid: str\n ) -> FileMetaDataEx:\n\n # validate link_uuid\n async with self.engine.acquire() as conn:\n # TODO: select exists(select 1 from file_metadat where file_uuid=12)\n found = await conn.scalar(\n sa.select([file_meta_data.c.file_uuid]).where(\n file_meta_data.c.file_uuid == link_uuid\n )\n )\n if found:\n raise ValueError(f\"Invalid link {link_uuid}. Link already exists\")\n\n # validate target_uuid\n target = await self.list_file(str(user_id), SIMCORE_S3_STR, target_uuid)\n if not target:\n raise ValueError(\n f\"Invalid target '{target_uuid}'. File does not exists for this user\"\n )\n\n # duplicate target and change the following columns:\n target.fmd.file_uuid = link_uuid\n target.fmd.file_id = link_uuid # NOTE: api-server relies on this id\n target.fmd.is_soft_link = True\n\n async with self.engine.acquire() as conn:\n stmt = (\n file_meta_data.insert()\n .values(**attr.asdict(target.fmd))\n .returning(literal_column(\"*\"))\n )\n\n result = await conn.execute(stmt)\n link = to_meta_data_extended(await result.first())\n return link\n\n async def synchronise_meta_data_table(\n self, location: str, dry_run: bool\n ) -> Dict[str, Any]:\n\n PRUNE_CHUNK_SIZE = 20\n\n removed: List[str] = []\n to_remove: List[str] = []\n\n async def _prune_db_table(conn):\n if not dry_run:\n await conn.execute(\n file_meta_data.delete().where(\n file_meta_data.c.object_name.in_(to_remove)\n )\n )\n logger.info(\n \"%s %s orphan items\",\n \"Would have deleted\" if dry_run else \"Deleted\",\n len(to_remove),\n )\n removed.extend(to_remove)\n to_remove.clear()\n\n # ----------\n\n assert ( # nosec\n location == SIMCORE_S3_STR\n ), \"Only with s3, no other sync implemented\" # nosec\n\n if location == SIMCORE_S3_STR:\n\n # NOTE: only valid for simcore, since datcore data is not in the database table\n # let's get all the files in the table\n logger.warning(\n \"synchronisation of database/s3 storage started, this will take some time...\"\n )\n\n async with self.engine.acquire() as conn, self._create_aiobotocore_client_context() as aioboto_client:\n\n number_of_rows_in_db = await conn.scalar(file_meta_data.count()) or 0\n logger.warning(\n \"Total number of entries to check %d\",\n number_of_rows_in_db,\n )\n\n assert isinstance(aioboto_client, AioBaseClient) # nosec\n\n async for row in conn.execute(\n sa.select([file_meta_data.c.object_name])\n ):\n s3_key = row.object_name # type: ignore\n\n # now check if the file exists in S3\n # SEE https://www.peterbe.com/plog/fastest-way-to-find-out-if-a-file-exists-in-s3\n response = await aioboto_client.list_objects_v2(\n Bucket=self.simcore_bucket_name, Prefix=s3_key\n )\n if response.get(\"KeyCount\", 0) == 0:\n # this file does not exist in S3\n to_remove.append(s3_key)\n\n if len(to_remove) >= PRUNE_CHUNK_SIZE:\n await _prune_db_table(conn)\n\n if to_remove:\n await _prune_db_table(conn)\n\n assert len(to_remove) == 0 # nosec\n assert len(removed) <= number_of_rows_in_db # nosec\n\n logger.info(\n \"%s %d entries \",\n \"Would delete\" if dry_run else \"Deleting\",\n len(removed),\n )\n\n return {\"removed\": removed}\n","sub_path":"services/storage/src/simcore_service_storage/dsm.py","file_name":"dsm.py","file_ext":"py","file_size_in_byte":43257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"8535083","text":"#!/usr/bin/env python3\n# vim: set fileencoding=utf-8 :\n\nimport argparse\nimport importlib\nimport time\nimport logging\n\nimport pandas as pandas\n\nfrom room007.eval import cross_validation\nfrom room007.data import info\n\nlogger = logging.getLogger()\n\n\nclass ArgumentParser(object):\n def __init__(self):\n parser = argparse.ArgumentParser(description='Predict with a model.')\n parser.add_argument('-e', '--eval', help='apply to the test data', action='store_true')\n parser.add_argument('-n', '--set-name', default='../interim',\n help='name of the pre-processed data set')\n parser.add_argument('-m', '--model', default='word_tag_predictor',\n help=('the name of the model to train and test, '\n 'should be an importable module containing a Predictor class'))\n parser.add_argument('-a', '--args', nargs='*', default=[],\n help='arguments passed to the constructor of Predictor, values with \":\" are considered kwargs')\n self.parser = parser\n\n def parse_args(self):\n args = self.parser.parse_args()\n args.kwargs = dict(arg.split(':') for arg in args.args)\n args.args = [arg for arg in args.args if ':' not in arg]\n return args\n\n\ndef get_arguments():\n parser = ArgumentParser()\n return parser.parse_args()\n\n\ndef write_predictions(test_name, test_dataframe):\n filename = '{}.out.csv'.format(test_name)\n test_dataframe.to_csv(filename, columns=['id','tags'], index=False)\n\n\ndef get_data(set_name):\n logger.info('loading the data')\n processed_info = info.ProcessedData(set_name)\n train_data_frames = info.get_train_dataframes(processed_info, split_tags=True)\n test_data_frames = info.get_test_dataframes(processed_info)\n logger.info('data loaded')\n return train_data_frames, test_data_frames\n\n\ndef _create_predictor(model, args, kwargs):\n logger.info('creating predictor')\n predictor_factory = importlib.import_module(model).Predictor\n predictor = predictor_factory(*args, **kwargs)\n logger.info('predictor created')\n return predictor\n\n\ndef time_function(fun):\n def timed_fun(*args, **kwargs):\n logger.info('started at {}'.format(time.strftime('%H:%M:%S', time.gmtime())))\n start_time = time.time()\n returns = fun(*args, **kwargs)\n end_time = time.time()\n time_needed = end_time - start_time\n logger.info('finished at {}'.format(time.strftime('%H:%M:%S', time.gmtime())))\n logger.info(\"it took: {0:.0f} seconds\".format(time_needed))\n return returns, time_needed\n return timed_fun\n\n\ndef evaluate_on_test_data(predictor, train_data_frames, test_data_frames):\n train_data = pandas.concat([data for _, data in train_data_frames.items()], ignore_index=True)\n predictor.fit(train_data)\n for frame_name, test_data in test_data_frames.items():\n logger.info('start predicting for {}, test size {}'.format(frame_name, len(test_data)))\n predictions = predictor.predict(test_data)\n logger.info('done predicting')\n test_data['tags'] = predictions\n test_data['tags'] = test_data['tags'].apply(' '.join)\n logger.info('writing result to file')\n write_predictions(frame_name, test_data)\n logger.info('result written')\n\n\ndef cross_validate(predictor, train_data_frames):\n logger.info('started cross-validation testing')\n result = cross_validation.cross_validate(predictor, train_data_frames)\n logger.info('finished cross-validation testing')\n logger.info(\"cross-validation result: {}\".format(result))\n return result\n\n\n@time_function\ndef main():\n args = get_arguments()\n train_data_frames, test_data_frames = get_data(args.set_name)\n predictor = _create_predictor(args.model, args.args, args.kwargs)\n if args.eval:\n evaluate_on_test_data(predictor, train_data_frames, test_data_frames)\n else:\n cross_validate(predictor, train_data_frames)\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(level=logging.INFO)\n main()\n","sub_path":"src/room007/models/train_and_predict.py","file_name":"train_and_predict.py","file_ext":"py","file_size_in_byte":4043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"177256305","text":"#!/usr/bin/python3\n\nimport pygame, sys\nfrom pygame.locals import *\n\nfrom GUI import *\nfrom GUI.geo import Rectangle\nfrom GUI.locals import *\n\nimport random\n\n#initializer\npygame.font.init()\n\n#fonts\nmyfont = pygame.font.SysFont('Comic Sans', 30)\n\n#display stuff\nFPS = 30\nClock = pygame.time.Clock()\nDISPLAYSURF = pygame.display.set_mode((400, 300), 0, 32)\npygame.display.set_caption('hangman')\n\n#colors\nBLACK = ( 0, 0, 0)\nWHITE = (255, 255, 255)\n\n#ints\nbodyParts = 0\n\n#strings\ntopic = (random.choice(list(open('topics.txt'))))\ntopicList = topic.split(',')\nGameString = topicList[0]\ngenre = topicList[1]\ngenre = genre[:-1]\nguessString=''\nfor i in range(len(GameString)):\n if GameString[i] == ' ':\n guessString += ' '\n else:\n guessString += \"-\"\naddString = ''\nfalseString = ''\n\n#text and border from GUI\ntxtbx = InLineTextBox((5,1), 200, BLACK,anchor='TOPLEFT', default_text='guess: ')\nfocus = FocusSelector(txtbx)\nfocus.select(0)\nbound = Rectangle((0,0), (400,300), BLACK, Rectangle.BORDER)\n\n#booleans\nwin = False\nlose = False\nplaying = True\n\n#main loop\nwhile True:\n mouse = pygame.mouse.get_pos()\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n elif event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n print(\"Stopped Early by user\")\n pygame.quit()\n sys.exit() # Time to leave\n elif event.key == K_F1:\n #new game reset\n win = lose = False\n playing = True\n topic = (random.choice(list(open('topics.txt'))))\n topicList = topic.split(',')\n GameString = topicList[0]\n genre = topicList[1]\n genre = genre[:-1]\n guessString = ''\n for i in range(len(GameString)):\n if GameString[i] == ' ':\n guessString += ' '\n else:\n guessString += \"-\"\n falseString = ''\n bodyParts = 0\n \n elif event.key == K_RETURN:\n #check if txtbox char is in gameString\n if playing:\n if GameString.find(txtbx.text) != -1:\n print (\"Correct\")\n #create string for correct placement of char\n for char in GameString:\n if char == txtbx.text:\n addString+=char\n else:\n addString+='-'\n #add placement string to guessstring\n for i in range(len(guessString)):\n if guessString[i] == '-' and addString[i] != '-':\n guessString = guessString[:i] + addString[i] + guessString[i+1:]\n addString = ''\n #check for win\n if guessString == GameString:\n win = True\n playing = False\n else:\n #if wrong: add next body part and wrong char to falseString\n print (\"Wrong\")\n bodyParts+=1\n falseString += \" \" + txtbx.text\n txtbx.clear()\n else:\n if playing:\n focus.selected().update(event)\n\n DISPLAYSURF.fill(WHITE)\n \n #textbox and border\n bound.render(DISPLAYSURF)\n txtbx.render(DISPLAYSURF)\n \n #stand for character\n pygame.draw.aaline(DISPLAYSURF, BLACK, (250, 10), (200, 10), 1)\n pygame.draw.aaline(DISPLAYSURF, BLACK, (200, 10), (200, 30), 1)\n pygame.draw.aaline(DISPLAYSURF, BLACK, (250, 200), (250, 10), 1)\n pygame.draw.aaline(DISPLAYSURF, BLACK, (250, 200), (150, 200), 1)\n \n #text \n textsurface = myfont.render('Genre: ' + genre, True, (0,0,0))\n DISPLAYSURF.blit(textsurface,(20,60))\n \n textsurface = myfont.render(guessString, True, (0,0,0))\n DISPLAYSURF.blit(textsurface,(180,230))\n \n textsurface = myfont.render('Incorrect: ' + falseString, True, (0,0,0))\n DISPLAYSURF.blit(textsurface,(5,280))\n \n #win/lose conditions\n if win:\n textsurface = myfont.render('You Win!', True, (0,0,0))\n DISPLAYSURF.blit(textsurface,(60,230))\n \n if lose:\n textsurface = myfont.render('You Lose Word: ' + GameString, True, (0,0,0))\n DISPLAYSURF.blit(textsurface,(60,260))\n \n #character\n def Head():\n hold = pygame.draw.circle(DISPLAYSURF, BLACK, (200, 50), 20, 1)\n def Body():\n pygame.draw.aaline(DISPLAYSURF, BLACK, (200, 70), (200, 150), 1)\n def LArm():\n pygame.draw.aaline(DISPLAYSURF, BLACK, (170, 110), (200, 95), 1)\n def RArm():\n pygame.draw.aaline(DISPLAYSURF, BLACK, (230, 110), (200, 95), 1)\n def LLeg():\n pygame.draw.aaline(DISPLAYSURF, BLACK, (170, 175), (200, 150), 1)\n def RLeg():\n pygame.draw.aaline(DISPLAYSURF, BLACK, (230, 175), (200, 150), 1)\n Character = [Head,Body,LArm,RArm,LLeg,RLeg]\n \n #loop through active body parts\n for x in range(bodyParts):\n Character[x]()\n #check for lose\n if x == len(Character)-1:\n lose = True\n playing = False\n\n #update\n pygame.display.update()\n Clock.tick(FPS)\n\n \n \n","sub_path":"hang.py","file_name":"hang.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"638022344","text":"import random\nimport os\n\nready_word = [] # Array with characters entered by the user, characters will be written only those that have not yet been named by the user and that are found in the hidden word\nword = [] # Array with characters of the hidden word, this array will lose the character if the user entered it.\nhistory = [] # Array that will store all characters that the user enters\nattention_input = False # This is a trigger that will change to True if the user enters the correct character a second time\nlives = 10 # Number of attempts\\lives of the user, this variable will decrease if the user enters a character that does not exist in the hidden word\ncount_end = 0\nattention_wrong_input = False\nhangman = []\n\ndef input_check_quit(text_command): # function for exiting the game when writing \"QUIT\"\n input_command = input(text_command)\n if(input_command.lower() == \"quit\"):\n print(\"Good bye!\")\n quit()\n else:\n return input_command.lower()\n\ndef take_grafics_from_f(): # a function to draw the graphics from the specified file when you select difficulty level\n global lives\n file_dir = os.path.dirname(os.path.abspath(__file__))\n my_file = os.path.join(file_dir, \"Grafics.txt\")\n while True:\n try:\n print(\"Select difficulty:\\n 1 - Easy\\n 2 - Normal\\n 3 - Hard\")\n command = int(input_check_quit(\"Command - \"))\n with open(my_file,\"r\") as words_f:\n try:\n if command == 2:\n lives = 8\n elif command == 3:\n lives = 6\n os.system(\"cls || clear\")\n return words_f.read().split(\",,\")[command-1]\n except IndexError:\n continue\n except ValueError:\n print(\"Please input a number!\")\n continue\n\n\ndef take_words_from_f(): # function for selecting a category of words and loading them from a specific list\n file_dir = os.path.dirname(os.path.abspath(__file__))\n my_file = os.path.join(file_dir, \"Words.txt\")\n while True:\n try:\n print(\"Select the type of words:\\n 1 - Home\\n 2 - Cities\\n 3 - Countries\\n 4 - Animals \")\n command = int(input_check_quit(\"Command - \"))\n with open(my_file,\"r\") as words_f:\n try:\n os.system(\"cls || clear\")\n return words_f.readlines()[command-1]\n except IndexError:\n print(f\"Please selecet command from 1 to {len(words_f.readlines())}\")\n continue\n except ValueError:\n print(\"Please input a number!\")\n continue\n \ndef check_quant_symb (list_check, letter_check): # Function that is used to output the number of characters already entered in the array(in our case, we check how many identical letters are in the history array)\n count = 0 \n for element in range(len(list_check)): # Every element in the array is checked\n if list_check[element] == letter_check: # If the array element is equal to a character then increase the counter by one\n count +=1\n return count\n \ndef start(): # A function which serves as the base for launching the application, greetings and assign values to some variables\n global ready_word\n global word\n global hangman\n os.system(\"cls || clear\")\n print(\"Welcome to Hangman!\")\n hangman = take_grafics_from_f().split(\",\")\n hangman.reverse()\n words = take_words_from_f().split(\",\") # We divide the string into words, the separator is a space\n word = list(words[random.randint(0,len(words)-1)]) # Select a random word and divide it into characters by placing it in an array\n if \"\\n\" in word:\n word.remove(\"\\n\")\n for element in range(len(word)): # Filling the array with empty characters relative to the hidden word. this array will be updated every time you enter the correct character\n ready_word.append(\"_\")\n print(f\"You have {lives} attempts to guess the word \\nHere is your word - \" + \" \".join(ready_word))\n #print(word) #<-------TEST\n \ndef user_input(): # A function that is responsible for user input and everything that reads it\n print(\"Please input ANY letter\")\n letter = \"\"\n while len(letter) != 1: # The loop checks the number of characters entered by the user, if the number of characters entered is not equal to 1, then try again\n letter = input_check_quit(\"Command - \")\n history.append(letter)\n if len(letter) != 1: # If the number of characters is not equal to 1, then output a notification\n print(\"Numbers of letters aren't equal \\\"1\\\"!\")\n return letter\n\ndef prog_logic(user_input): # This function is responsible for all the main logic of the global ready_word program\n global ready_word\n global word\n global lives\n global attention_input\n global history\n global count_end\n global attention_wrong_input\n if check_quant_symb(history,user_input) > 1: #If the number of characters in the \"history\" is equal to the one entered, more than 1, then we change the trigger to 1, the trigger is responsible for signaling such cases \n attention_input = True \n if (user_input not in \"\".join(word).lower() and (user_input not in \"\".join(ready_word).lower()) and (check_quant_symb(history,user_input) < 2)): # If the entered character does not exist in the array of the hidden word and the array is constantly updated, then one point of attempts\n lives-=1\n attention_wrong_input = True\n for element in range(len(word)): # Given each index in the array of the hidden word\n if word[element].lower() == user_input: # If the entered character is equal to the character in the hidden word \n ready_word[element] = word[element] # Changing the empty character of the updated array to the entered character\n word[element] = \" \" # Changing the same character in the array of the hidden word to a space\n count_end += 1\n return ready_word\n\ndef prog_output(ready_word): # This function is used for displaying game data\n global word\n global attention_input\n global lives\n global attention_wrong_input\n os.system(\"cls || clear\")\n #print(word) #<-------- test\n print(f\"{lives} attempts remaining\")\n print(\" \".join(ready_word))\n if attention_input: \n print(\"History: \"+\"|\".join(history))\n print(\"Be attentive!\")\n attention_input = False\n elif attention_wrong_input == True:\n print(\"History: \"+\"|\".join(history))\n attention_wrong_input = False\n print(hangman[lives])\n print(\"\\n\")\n\ndef winner(): # function for checking the win/lose condition in the game\n if count_end >= len(word):\n print(\"You're WINNER Take a candy!\")\n elif lives <=0:\n print(\"You're HANGMAN!\") \n\ndef main(): # the main function that defines the entry point to the program\n start()\n while count_end < len(word) and lives > 0: \n user_input_variable = user_input()\n prog_output(prog_logic(user_input_variable)) \n winner()\n\nif __name__ == \"__main__\": # function that protects the program from unauthorized launch\n main()","sub_path":"Hangman_+_HotWarmCold/Hangman_1.py","file_name":"Hangman_1.py","file_ext":"py","file_size_in_byte":7282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"633226973","text":"from django.conf.urls import url\n\nfrom sendcloud.views import (\n AddressListView,\n AddressCreateView,\n MemberListView,\n MailTemplateListView\n)\n# from .views import MailTemplateListView\nfrom .views.dashboard import DashboardView\nfrom .views.analytics import InvalidStatView\n# from .views.address import (\n# AddressListView,\n# AddressCreateView\n# )\nfrom .views.api_user import APIUserListView\n\n\nurlpatterns = [\n\n url(r'^dashboard/$', DashboardView.as_view(), name='send_cloud_dashboard'),\n\n url(r'^template/$', MailTemplateListView.as_view(), name='send_cloud_template_list'),\n\n url(r'^address/$', AddressListView.as_view(), name='send_cloud_address_list'),\n url(r'^address/add/?$', AddressCreateView.as_view(), name='send_cloud_address_add'),\n\n url(r'^members/$', MemberListView.as_view(), name='member_list'),\n\n\n url(r'^analytics/invalid/$', InvalidStatView.as_view(), name='send_cloud_invalid_stat'),\n\n url(r'^api/users/$', APIUserListView.as_view(), name='send_cloud_api_user'),\n]\n","sub_path":"sendcloud/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"353916266","text":"# -*- coding:utf-8 -*-\n\nfrom base import Base\nfrom excepcions import *\nimport re\nimport urllib.request as req\nfrom urllib.request import Request, urlopen\n\n\nclass PobBase(Base):\n \"Prepara la petició a l'API\"\n servei = 'pob'\n \n def setOperacio(self, op):\n \"Configura l'operació del servei\"\n if op in ['cerca', 'sug']:\n self.op = op\n if self.op == 'cerca':\n self.q = None\n self.tipus = None\n else:\n raise OperacioNoPermesa('Operació no permesa. Triï entre \"cerca\" o \"sug\"')\n\n def addCommonParamQ(self, q):\n \"Afegeix un paràmetre 'q' a l'URL\"\n #TODO control error\n q = str(q)\n if (len(q)>1):\n self.q = q\n else:\n raise PobBaseParamQNoPermes('La longitud ha de ser més gran de 1')\n \n def addCommonParamTipus(self, tipus):\n \"Afegeix un paràmetre 'tipus' a la URL\"\n tipus = str(tipus)\n if tipus in ['cat','prov','mun','com']: #ToDo, més tipus\n self.tipus = tipus\n else:\n raise PobBaseParamTipusNoPermes('El filtre \"tipus\" només pot ser un string')\n #dhj \n \n def addCercaParamSim(self, sim):\n\t sim = str(sim)\n\t if sim in ['0','1','2']:\n\t self.sim = sim\n\t else:\n\t\t raise PobBaseParamSimNoPermeses('Parametres no permesos')\n\t\t\n\t# def addCercaParamSelect(self, selec):\n\t# \tselec = str(selec)\n\t\n\t# def addCercaParamOrderby(self, order):\n\t# \torder = str(order)\n\t\t\n\t# def addCercaParamPosicio(self, posicio):\n\t# \tposicio = str(posicio)\n\t\t\n\t# Common getters\n\t\n def getOperacio(self):\n \"Retorna l'operació especificada a setOperacio()\"\n # sobreescrivint Base.getOperacio()\n return self.op\n \n def getServei(self):\n \"Retorna el servei que és sempre 'pob'\"\n # sobreescrivint Base.getServei()\n return self.servei\n\n def __getUrlCerca(self):\n \"Construcció específica de l'URL per l'operació dades\"\n if self.q or self.tipus:\n \tself.afegeixUrl('&p=')\n\n if self.q:\n self.afegeixUrl('q/', self.q)\n if self.tipus:\n self.afegeixUrl('tipus/', self.tipus)\n\n def __getUrlSug(self):\n \"Construcció específica de l'URL per l'operació dades\"\n if not (self.q or self.tipus):\n raise FiltreObligatori(\"Dins l'operació 'dades' és obligatori especificar el paràmetre 'id' o el 'i'\")\n \n if self.i:\n self.afegeixUrl('&i=', self.i)\n if self.tipus:\n self.afegeixUrl('&tipus=', self.tipus)\n \n def getUrl(self):\n \"Retorna l'url de la petició\"\n if self.op == None:\n raise OperacioNoEspecificada(\"Error en especificar l'operació: és un paràmetre obligatori!\")\n # cridem a la funció superior per obtenir l'url + bàsic\n self.url = super(PobBase,self).getUrl()\n if self.op == 'cerca':\n self.__getUrlCerca()\n if self.op == 'sug' and self.tipus:\n self.__getUrlSug()\n return self.url\n\n def getData(self):\n\t '''Obté les dades -> string'''\n\t url = ''.join(self.getUrl())\n\t print (\"Connexting to ...\"+url)\n\t request = Request(url, headers={'User-Agent': 'Mozilla/5.0'})\n\t sol = urlopen(request)\n\t self.data = sol.read().decode('utf-8')\n\t return self.data\n\t #print (self.data)\n\ndata = None\n\ndef buscaId(s):\n if globals()['data']:\n return __parse(s, data) # evitem tornar a fer la petició!\n else:\n import urllib.request as req # ho importem dins la funció\n from io import StringIO # i només per primera vegada\n url = 'http://api.idescat.cat/pob/v1/nodes.json'\n sol = req.urlopen(url)\n globals()['data'] = sol.read().decode('utf-8')\n return __parse(s, data)\n\ndef __parse(value, obj):\n obj = obj.split(',\"')\n result = None\n for i in range(len(obj)):\n if value in obj[i]:\n result = obj[i+1] # traiem la resta que no\n result = result[5:-1] # ens interessa\n return result\n if result == None:\n print(\"No s'ha trobat l'id\")\n \ndef debug():\n \"Per facilitar la feina de depuració\"\n global c\n c = PobBase()\n c.setOperacio('cerca')\n #.addId(buscaId('Collbató'))\n #c.addTipus('com,cat')\n\n\n \n","sub_path":"idescat_py/pob_base.py","file_name":"pob_base.py","file_ext":"py","file_size_in_byte":4345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"572639116","text":"import pytest\n\nfrom app import create_app, db\nfrom config import TestConfig\n\n@pytest.fixture\ndef set_db(request):\n app = create_app(TestConfig)\n app_context = app.app_context()\n app_context.push()\n db.create_all()\n\n def teardown():\n db.session.remove()\n db.drop_all()\n app_context.pop()\n request.addfinalizer(teardown)\n","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"330119517","text":"import unittest\nfrom marinov import *\n\nclass TestMarkov(unittest.TestCase):\n def test_split_string(self):\n text = \"je suis une loutre, je bois de la soupe\"\n self.assertEqual([[\"je\", \"suis\", \"une\", \"loutre\"],[\"je\", \"bois\", \"de\", \"la\", \"soupe\"]], get_words(text))\n\n def test_pick_second_word_with_one_option(self):\n text = get_words(\"je suis une loutre je mange du thé\")\n word = \"une\"\n self.assertEqual(\"loutre\", get_next_word(word, text))\n word = \"suis\"\n self.assertEqual(\"une\", get_next_word(word, text))\n\n def test_pick_second_word_with_2_options(self):\n text = get_words(\"je suis une loutre je mange du thé\")\n word = \"je\"\n self.assertIn(get_next_word(word, text), [\"suis\", \"mange\"])\n\n def test_pick_second_word_with_current_word_at_end(self):\n text = get_words(\"je suis une loutre je mange du thé\")\n word = \"thé\"\n self.assertIn(get_next_word(word, text), text)\n\nunittest.main()\n","sub_path":"markovChain/python/test_marinov.py","file_name":"test_marinov.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"561589442","text":"from dataclasses import field\n\nimport direct.actor.Actor\nfrom panda3d.core import Vec3\nfrom panda3d.core import NodePath\nfrom panda3d.core import ClockObject\nfrom panda3d.bullet import BulletWorld\nfrom panda3d.bullet import BulletRigidBodyNode\n\nfrom wecs.core import Component\nfrom wecs.core import System\nfrom wecs.core import and_filter\nfrom wecs.core import or_filter\nfrom wecs.core import UID\n\nfrom wecs.mechanics.clock import Clock\n\n\n@Component()\nclass Model:\n model_name: str = ''\n node: NodePath = None\n\n\n@Component()\nclass Actor:\n pass\n\n\n@Component()\nclass CollidableGeometry:\n collide_mask: int = 1<<0 # bit 0 set\n\n\n@Component()\nclass FlattenStrong:\n pass\n\n\n# Spatial context\n\n@Component()\nclass Scene:\n node: NodePath = field(default_factory=lambda:base.render)\n\n\n# This should vanish... It means \"Initial position\"... I hope.\n@Component()\nclass Position:\n value: Vec3 = field(default_factory=lambda:Vec3(0,0,0))\n\n\n# Bullet physics\n\n@Component()\nclass PhysicsWorld:\n timestep: float = 0.0\n world: BulletWorld = field(default_factory=BulletWorld)\n\n\n@Component()\nclass PhysicsBody:\n node: NodePath = None\n body: NodePath = field(default_factory=BulletRigidBodyNode)\n timestep: float = 0.0\n world: UID = None\n _world: UID = None\n scene: UID = None\n _scene: UID = None\n\n\n# Loading / reparenting / destroying models\n\nclass LoadModels(System):\n entity_filters = {\n 'model': and_filter([\n Model,\n Position,\n or_filter([\n Scene,\n PhysicsBody,\n ]),\n ]),\n }\n\n # TODO\n # Only Model is needed for loading, which then could be done\n # asynchronously.\n def init_entity(self, filter_name, entity):\n # Load\n model = entity[Model]\n if model.node is None:\n if Actor in entity:\n model.node = direct.actor.Actor.Actor(model.model_name)\n else:\n model.node = base.loader.load_model(model.model_name)\n\n # Load hook\n self.post_load_hook(model.node, entity)\n\n # Attach to PhysicsBody or Scene; former takes precedence.\n if CollidableGeometry in entity:\n model.node.set_collide_mask(entity[CollidableGeometry].collide_mask)\n if FlattenStrong in entity:\n model.node.flatten_strong()\n if PhysicsBody in entity:\n parent = entity[PhysicsBody].node\n else:\n parent = entity[Scene].node\n model.node.reparent_to(parent)\n model.node.set_pos(entity[Position].value)\n\n # TODO\n # Destroy node if and only if the Model is removed.\n def destroy_entity(self, filter_name, entity, component):\n # Remove from scene\n if isinstance(component, Model):\n component.node.destroy_node()\n else:\n entity.get_component(Model).node.destroy_node()\n\n def post_load_hook(self, node, entity):\n pass\n\n\n# Bullet physics\n\nclass SetUpPhysics(System):\n entity_filters = {\n 'world': and_filter([PhysicsWorld]),\n 'body': and_filter([PhysicsBody]),\n }\n\n def init_entity(self, filter_name, entity):\n if filter_name == 'body':\n body = entity[PhysicsBody]\n body.node = NodePath(body.body)\n\n def update(self, entities_by_filter):\n for entity in entities_by_filter['body']:\n body = entity[PhysicsBody]\n # Has the physics world simulating this entity changed?\n if body.world != body._world:\n if body._world is not None:\n self.world[body._world][PhysicsWorld].remove_rigid_body()\n body._world = None\n if body.world is not None:\n world_cmpt = self.world[body.world][PhysicsWorld]\n world_cmpt.world.attach_rigid_body(body.body)\n if Position in entity:\n body.node.set_pos(entity[Position].value)\n body._world = body.world\n # Has the scene that this node is attached to changed?\n if body.scene != body._scene:\n scene = self.world[body.scene][Scene]\n body.node.reparent_to(scene.node)\n body._scene = body.scene\n\n def destroy_entity(self, filter_name, entity, components_by_type):\n pass\n\n\nclass DeterminePhysicsTimestep(System):\n entity_filters = {\n # FIXME: PhysicsWorld.clockshould be an entity._uid\n # (or None if the same)\n 'world': and_filter([PhysicsWorld, Clock]),\n 'body': and_filter([PhysicsBody]),\n }\n\n def update(self, entities_by_filter):\n timesteps = {}\n for entity in entities_by_filter['world']:\n world = entity[PhysicsWorld]\n world.timestep = entity[Clock].timestep\n timesteps[entity._uid] = world.timestep\n for entity in entities_by_filter['body']:\n body = entity[PhysicsBody]\n if body.world in timesteps:\n body.timestep = timesteps[body.world]\n\n\nclass DoPhysics(System):\n entity_filters = {\n 'world': and_filter([PhysicsWorld]),\n }\n\n def update(self, entities_by_filter):\n for entity in entities_by_filter['world']:\n world = entity.get_component(PhysicsWorld)\n world.world.do_physics(world.timestep)\n","sub_path":"wecs/panda3d/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"583115528","text":"import modular_inv\n\ndef CRT(a, p, n):\n P = 1\n for i in range(0, n):\n P *= p[i]\n\n inv_temp = 0\n p_temp = 0\n final_sol = 0\n for i in range(0, n):\n p_temp = P//p[i] \n inv_temp = modular_inv.mod_inv(p_temp, p[i]) \n final_sol += (a[i] * inv_temp * p_temp) % P\n return final_sol % P \n\n","sub_path":"Python/NumberTheory/crt.py","file_name":"crt.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"157874832","text":"# coding: utf-8\nimport numpy as np\nimport pandas as pd\nfrom sklearn.tree import DecisionTreeClassifier\nfrom pandas import Series, DataFrame\nfrom sklearn.cross_validation import train_test_split\nimport matplotlib.pyplot as plt\nimport string\n\ntrain_fn = './Downloads/letter-recognition.data'\ndata_col = ['letter','x-box','y-box','width','high','onpix','x-bar','y-bar',\n 'x2bar','y2bar','xybar','x2ybr','xy2br','x-ege','xegvy','y-ege','yegvx']\nX = pd.read_csv(train_fn, sep=',', header=None, names=data_col,\n skiprows=None, na_values='?', keep_default_na=False, engine='python')\n\n\ny = X['letter']\ny.head()\n\ncols = list(X)\ncols.pop(cols.index('letter'))\nX = X[cols]\nX.head()\n#get_ipython().magic(u'pinfo train_test_split')\n\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)\ny_train.head()\nX_train.shape, X_test.shape\n\ndef models_N_weights(X, y, M, k, max_depth):\n model = []\n model_weights = []\n training_errors = []\n\n N = X.shape[0]\n w = np.ones(N) / N\n\n for m in range(M):\n h = DecisionTreeClassifier(max_depth=max_depth)\n h.fit(X, y, sample_weight=w)\n pred = h.predict(X)\n\n eps = w.dot(pred != y)\n alpha = (np.log((1 - eps)*(k - 1)) - np.log(eps)) / 2\n w_new = np.where(y == pred, w * np.exp(-alpha), w * np.exp(alpha))\n w = w_new / w_new.sum()\n\n model.append(h)\n model_weights.append(alpha)\n\n return [model, model_weights]\n\n\n\ndef predict_joined_models(X, model, model_weights, frame, m):\n pred = model[m].predict(X)\n for i, idx in enumerate(frame.index):\n t = frame.get_value(idx, pred[i])\n frame.set_value(idx, pred[i], t + model_weights[m])\n #获取每行最大数据的列名\n return frame.idxmax(axis=1)\n\n\n\ndef error_func(y, y_hat):\n correct_pred = (np.array(y_hat) == np.array(y))\n Err = 1 - float(sum(correct_pred))/len(correct_pred)\n return Err\n\n\n\nmodels = dict()\ntrain_errs = dict()\ntest_errs = dict()\nfor max_depth in range(1, 6):\n M = 100\n k = 26\n M_list = []\n train_err_list = []\n test_err_list = []\n N1= X_train.shape[0]\n frame1 = DataFrame(np.zeros([N1,26]),columns=list(string.uppercase))\n N2= X_test.shape[0]\n frame2 = DataFrame(np.zeros([N2,26]),columns=list(string.uppercase))\n\n model_fit = models_N_weights(X_train, y_train, M, k, max_depth)\n for m in range(M):\n y_hat = predict_joined_models(X_train, model_fit[0], model_fit[1], frame1, m)\n err = error_func(y_train, y_hat)\n train_err_list.append(err)\n\n y_hat = predict_joined_models(X_test, model_fit[0], model_fit[1], frame2, m)\n err = error_func(y_test, y_hat)\n test_err_list.append(err)\n M_list.append(m)\n\n models[max_depth] = M_list\n train_errs[max_depth] = train_err_list\n test_errs[max_depth] = test_err_list\n\n\n\nimport matplotlib.cm as cm\ncolors = iter(cm.rainbow(np.linspace(0, 1, len(models) * 2)))\nfor md in models.keys():\n M_list = models[md]\n train_err_list = train_errs[md]\n test_err_list = test_errs[md]\n plt.plot(M_list, test_err_list, c=next(colors), linestyle='-', label='test, max_depth=%d' % md)\n plt.plot(M_list, train_err_list, c=next(colors), linestyle='--', label='train, max_depth=%d' % md)\n\nplt.xlabel('Number of weak learners')\nplt.ylabel('Error')\nplt.title('Error and number of weak learners')\nplt.legend()\nplt.show()\n","sub_path":"Adaboost-MultiClass.py","file_name":"Adaboost-MultiClass.py","file_ext":"py","file_size_in_byte":3409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"448500425","text":"# スコアが高い順に右端 or 左端に置く事を考える\n# dp[i][l] : l番目まで左に子がいる状態でi番目の子を動かす時の最大値\nn = int(input())\ntmp = [int(x) for x in input().split()]\n\naList=[]\nfor i in range(n):\n aList.append([tmp[i],i])\n\naList.sort(reverse=True)\n\ndp=[[int(-1) for i in range(n+1)] for j in range(n+1)]\ndp[0][0] = 0\n\n# スコアが高い順にi人動かす\nfor i in range(n):\n # 左側に置いた人数。i番目のループでは0〜i人まで左端に置く可能性がある\n for l in range(i+1):\n activity = aList[i][0]\n pos = aList[i][1]\n\n # 右端の今のポジション\n # totalで置いた数-左側に置いた数(i-l)を計算し、右端(n-1)から引く\n r = n-1-(i-l)\n\n # 左に置いた場合\n dp[i+1][l+1] = max(dp[i+1][l+1],dp[i][l] + abs(l-pos) * activity)\n\n # 右に置いた場合\n dp[i+1][l] = max(dp[i+1][l],dp[i][l] + abs(r-pos) * activity)\n\nprint(max(dp[n]))\n","sub_path":"Python_codes/p02709/s302207590.py","file_name":"s302207590.py","file_ext":"py","file_size_in_byte":1010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"26716355","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of Flask-IIIF\n# Copyright (C) 2015 CERN.\n#\n# Flask-IIIF is free software; you can redistribute it and/or modify\n# it under the terms of the Revised BSD License; see LICENSE file for\n# more details.\n\n\"\"\"Test REST API.\"\"\"\n\nfrom io import BytesIO\nfrom unittest.mock import patch\n\nfrom flask import url_for\nfrom PIL import Image\nfrom werkzeug.utils import secure_filename\n\nfrom .helpers import IIIFTestCaseWithRedis\n\n\nclass TestRestAPI(IIIFTestCaseWithRedis):\n\n \"\"\"Test signals and decorators.\"\"\"\n\n def test_api_base(self):\n \"\"\"Test API Base.\"\"\"\n data = dict(uuid=\"valid:id\", version=\"v2\")\n get_the_response = self.get(\"iiifimagebase\", urlargs=data)\n self.assertEqual(get_the_response.status_code, 303)\n\n def test_api_info(self):\n \"\"\"Test API Info not found case.\"\"\"\n from flask import jsonify\n\n id_v1 = url_for(\n \"iiifimagebase\", uuid=\"valid:id-üni\", version=\"v1\", _external=True\n )\n id_v2 = url_for(\n \"iiifimagebase\", uuid=\"valid:id-üni\", version=\"v2\", _external=True\n )\n\n expected = {\n \"v1\": {\n \"@context\": (\n \"http://library.stanford.edu/iiif/\" \"image-api/1.1/context.json\"\n ),\n \"@id\": id_v1,\n \"width\": 1280,\n \"height\": 1024,\n \"profile\": (\n \"http://library.stanford.edu/iiif/image-api/compliance\"\n \".html#level1\"\n ),\n \"tile_width\": 256,\n \"tile_height\": 256,\n \"scale_factors\": [1, 2, 4, 8, 16, 32, 64],\n },\n \"v2\": {\n \"@context\": \"http://iiif.io/api/image/2/context.json\",\n \"@id\": id_v2,\n \"protocol\": \"http://iiif.io/api/image\",\n \"width\": 1280,\n \"height\": 1024,\n \"tiles\": [{\"width\": 256, \"scaleFactors\": [1, 2, 4, 8, 16, 32, 64]}],\n \"profile\": [\"http://iiif.io/api/image/2/level2.json\"],\n },\n }\n get_the_response = self.get(\n \"iiifimageinfo\",\n urlargs=dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n ),\n )\n self.assert200(get_the_response)\n get_the_response = self.get(\n \"iiifimageinfo\",\n urlargs=dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n ),\n )\n self.assertEqual(jsonify(expected.get(\"v2\")).data, get_the_response.data)\n\n get_the_response = self.get(\n \"iiifimageinfo\",\n urlargs=dict(\n uuid=\"valid:id-üni\",\n version=\"v1\",\n ),\n )\n self.assert200(get_the_response)\n self.assertEqual(jsonify(expected.get(\"v1\")).data, get_the_response.data)\n\n def test_api_info_not_found(self):\n \"\"\"Test API Info.\"\"\"\n get_the_response = self.get(\n \"iiifimageinfo\",\n urlargs=dict(\n uuid=\"notfound\",\n version=\"v2\",\n ),\n )\n self.assert404(get_the_response)\n\n def test_api_not_found(self):\n \"\"\"Test API not found case.\"\"\"\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=dict(\n uuid=\"notfound\",\n version=\"v2\",\n region=\"full\",\n size=\"full\",\n rotation=\"0\",\n quality=\"default\",\n image_format=\"png\",\n ),\n )\n self.assert404(get_the_response)\n\n def test_api_internal_server_error(self):\n \"\"\"Test API internal server error case.\"\"\"\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n region=\"full\",\n size=\"full\",\n rotation=\"2220\",\n quality=\"default\",\n image_format=\"png\",\n ),\n )\n self.assert500(get_the_response)\n\n def test_api_iiif_validation_error(self):\n \"\"\"Test API iiif validation case.\"\"\"\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=dict(\n uuid=\"valid:id-üni\",\n version=\"v1\",\n region=\"200\",\n size=\"full\",\n rotation=\"2220\",\n quality=\"default\",\n image_format=\"png\",\n ),\n )\n self.assert400(get_the_response)\n\n def test_api_stream_image(self):\n \"\"\"Test API stream image.\"\"\"\n tmp_file = BytesIO()\n # create a new image\n image = Image.new(\"RGBA\", (1280, 1024), (255, 0, 0, 0))\n image.save(tmp_file, \"png\")\n tmp_file.seek(0)\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n region=\"full\",\n size=\"full\",\n rotation=\"0\",\n quality=\"default\",\n image_format=\"png\",\n ),\n )\n # Check if returns `Last-Modified` key in headers\n # required for `If-Modified-Since`\n self.assertTrue(\"Last-Modified\" in get_the_response.headers)\n\n last_modified = get_the_response.headers[\"Last-Modified\"]\n\n self.assertEqual(get_the_response.data, tmp_file.getvalue())\n\n # Test `If-Modified-Since` recognized properly\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n region=\"full\",\n size=\"full\",\n rotation=\"0\",\n quality=\"default\",\n image_format=\"png\",\n ),\n headers={\"If-Modified-Since\": last_modified},\n )\n\n self.assertEqual(get_the_response.status_code, 304)\n\n urlargs = dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n region=\"200,200,200,200\",\n size=\"300,300\",\n rotation=\"!50\",\n quality=\"color\",\n image_format=\"pdf\",\n )\n\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=urlargs,\n )\n self.assert200(get_the_response)\n\n default_name = \"{name}-200200200200-300300-color-50.pdf\".format(\n name=secure_filename(urlargs[\"uuid\"])\n )\n for dl, name in (\n (\"\", default_name),\n (\"1\", default_name),\n (\"foo.pdf\", \"foo.pdf\"),\n ):\n urlargs[\"dl\"] = dl\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=urlargs,\n )\n self.assert200(get_the_response)\n self.assertEqual(\n get_the_response.headers[\"Content-Disposition\"],\n \"attachment; filename={name}\".format(name=name),\n )\n\n def test_api_decorator(self):\n \"\"\"Test API decorator.\"\"\"\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=dict(\n uuid=\"valid:decorator:id\",\n version=\"v2\",\n region=\"full\",\n size=\"full\",\n rotation=\"0\",\n quality=\"default\",\n image_format=\"png\",\n ),\n )\n self.assert403(get_the_response)\n\n def test_api_abort_all_methods_except_get(self):\n \"\"\"Abort all methods but GET.\"\"\"\n data = dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n region=\"full\",\n size=\"full\",\n rotation=\"0\",\n quality=\"default\",\n image_format=\"png\",\n )\n get_the_response = self.post(\"iiifimageapi\", urlargs=data)\n self.assert405(get_the_response)\n\n get_the_response = self.put(\"iiifimageapi\", urlargs=data)\n self.assert405(get_the_response)\n\n get_the_response = self.delete(\"iiifimageapi\", urlargs=data)\n self.assert405(get_the_response)\n\n get_the_response = self.patch(\"iiifimageapi\", urlargs=data)\n self.assert405(get_the_response)\n\n def test_api_cache_control(self):\n \"\"\"Test cache-control headers\"\"\"\n\n urlargs = dict(\n uuid=\"valid:id-üni\",\n version=\"v2\",\n region=\"200,200,200,200\",\n size=\"300,300\",\n rotation=\"!50\",\n quality=\"color\",\n image_format=\"pdf\",\n )\n\n key = \"iiif:{0}/{1}/{2}/{3}/{4}.{5}\".format(\n urlargs[\"uuid\"],\n urlargs[\"region\"],\n urlargs[\"size\"],\n urlargs[\"quality\"],\n urlargs[\"rotation\"],\n urlargs[\"image_format\"],\n )\n\n cache = self.app.config[\"IIIF_CACHE_HANDLER\"].cache\n\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=urlargs,\n )\n\n self.assertFalse(\"cache-control\" in urlargs)\n\n self.assert200(get_the_response)\n\n self.assertTrue(cache.get(key))\n\n cache.clear()\n\n for cache_control, name in ((\"no-cache\", \"foo.pdf\"), (\"no-store\", \"foo.pdf\")):\n\n urlargs[\"cache-control\"] = cache_control\n\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=urlargs,\n )\n\n self.assert200(get_the_response)\n\n self.assertFalse(cache.get(key))\n\n cache.clear()\n\n for cache_control, name in ((\"public\", \"foo.pdf\"), (\"no-transform\", \"foo.pdf\")):\n\n urlargs[\"cache-control\"] = cache_control\n\n get_the_response = self.get(\n \"iiifimageapi\",\n urlargs=urlargs,\n )\n\n self.assert200(get_the_response)\n\n self.assertTrue(cache.get(key))\n\n cache.clear()\n\n def test_cache_ignore_errors(self):\n \"\"\"Test if cache retrieval errors are ignored when configured.\"\"\"\n from flask import current_app\n\n info_args = dict(uuid=\"valid:id\", version=\"v2\")\n api_args = dict(\n uuid=\"valid:id\",\n version=\"v2\",\n region=\"full\",\n size=\"full\",\n rotation=\"0\",\n quality=\"default\",\n image_format=\"png\",\n )\n\n cache = self.app.config[\"IIIF_CACHE_HANDLER\"].cache\n with patch.object(cache, \"get\", side_effect=Exception(\"test fail\")):\n # Without ignoring errors\n self.assertRaisesRegex(\n Exception, \"test fail\", self.get, \"iiifimageinfo\", urlargs=info_args\n )\n self.assertRaisesRegex(\n Exception, \"test fail\", self.get, \"iiifimageapi\", urlargs=api_args\n )\n\n # Ignore errors\n old_value = current_app.config.get(\"IIIF_CACHE_IGNORE_ERRORS\")\n current_app.config[\"IIIF_CACHE_IGNORE_ERRORS\"] = True\n\n resp = self.get(\"iiifimageinfo\", urlargs=info_args)\n self.assert200(resp)\n resp = self.get(\"iiifimageapi\", urlargs=api_args)\n self.assert200(resp)\n\n current_app.config[\"IIIF_CACHE_REDIS_PREFIX\"] = old_value\n","sub_path":"tests/test_restful_api_with_redis.py","file_name":"test_restful_api_with_redis.py","file_ext":"py","file_size_in_byte":11313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"425055145","text":"from dolfin import *\nfrom dolfin_adjoint import *\nfrom numpy import fliplr, linspace, inf\nfrom os.path import join as osjoin\nfrom scipy.io import loadmat as sc_io_loadmat\nfrom scipy.interpolate import RegularGridInterpolator\nset_log_level(ERROR) \n\nclass InterpolatedParameter(Expression):\n '''\n Class to get tumor cell distributions by interpolating \n based off matrices of tumor cell data\n '''\n def __init__(self,X,Y,image,**kwargs):\n self.X = X # A numpy array giving the X-spacing of the image\n self.Y = Y # Same for Y\n self.image = image # The image of measured material property \n def eval_cell(self,values,x,cell):\n interp_handle = RegularGridInterpolator((self.X,self.Y),self.image)\n values[0] = interp_handle(x)\n \ndef interp(file_loc,mat_name):\n \"\"\"\n Function to accept matlab .mat file with tumor data \n and interpolate values onto mesh\n \"\"\"\n mat = sc_io_loadmat(file_loc)[mat_name]\n mat = fliplr(mat.T)/theta # Needs to be adjusted to fit the mesh correctly\n x,y = mat.shape[0], mat.shape[1]\n mat_interp = InterpolatedParameter(linspace(1,x,x),linspace(1,y,y),mat,degree=1)\n return interpolate(mat_interp,V)\n\ndef set_nonlinear_params(param):\n param['newton_solver']['absolute_tolerance'] = 1E-7\n param['newton_solver']['relative_tolerance'] = 1E-6\n param['newton_solver']['maximum_iterations'] = 51\n param['newton_solver']['relaxation_parameter'] = 1.0\n param['newton_solver']['linear_solver'] = 'gmres'\n param['newton_solver']['preconditioner'] = 'ilu'\n param['newton_solver']['krylov_solver']['absolute_tolerance'] = 1E-8\n param['newton_solver']['krylov_solver']['relative_tolerance'] = 1E-6\n param['newton_solver']['krylov_solver']['maximum_iterations'] = 1000\n param['newton_solver']['krylov_solver']['nonzero_initial_guess'] = True\n\ndef forward(initial_p, name, record=False, annt=False):\n \"\"\" \n Here, we define the forward problem with mechanical functions\n \n -E(u) returns the Green-Lagrange strain tensor\n -sigma(...) returns the actual stress tensor\n -sigma_form(...) returns the stress tensor based on \n the cells (phi), elasticity coefficients, and a \n coefficient beta \n -vonmises(...) calculates the von Mises stress based \n on the actual stress tensor\n \"\"\"\n global t\n I = Identity(2) # Identity tensor\n def E(u):\n return 0.5*(nabla_grad(u) + nabla_grad(u).T)\n def vonmises(u):\n s = sigma(u) - (1./2)*tr(sigma(u))*I # deviatoric stress\n von_Mises = sqrt(3./2*inner(s, s))\n return project(von_Mises, V, annotate=annt)\n\n #Set up linear elasticity problem\n U = VectorFunctionSpace(mesh,'Lagrange',1)\n def boundary(x, on_boundary):\n return on_boundary\n bc = DirichletBC(U, Constant((0.,0.)), boundary)\n p_n = interpolate(initial_p,V)\n v = TestFunction(U)\n \n parameters['form_compiler']['quadrature_degree'] = 2\n parameters['form_compiler']['cpp_optimize'] = True\n parameters['krylov_solver']['nonzero_initial_guess'] = True\n ffc_options = {\"quadrature_degree\": 2, 'cpp_optimize': True}\n \n if lin_hyp == 0:\n def sigma(u):\n s = 2*mu*E(u)+lmbda*tr(E(u))*I\n return s\n u = TrialFunction(U)\n a = inner(2*mu*E(u)+lmbda*tr(E(u))*I,E(v))*dx\n L = inner(2*beta*p_n*I*(mu+lmbda),E(v))*dx\n u = Function(U, annotate=annt)\n def mech():\n solve(a == L, u, bc, \n form_compiler_parameters=ffc_options,\n annotate=annt)\n return u\n else:\n def sigma(u):\n F = I + grad(u) # Deformation gradient\n B = F*F.T\n C = F.T*F\n J = det(F)\n I1 = tr(C)\n s = lmbda*(J-1)*I+mu*(B-1./2*I1*I)/(J**(5./3))\n return s\n def sigma_form(u, phi):\n F = I + grad(u) # Deformation gradient\n Fs = F/(1+beta*phi)\n Bs = Fs*Fs.T\n Js = det(Fs)\n return 1/(1+beta*phi)*(mu/(Js**(5./3))*(Bs-1./2*tr(Bs)*I)+lmbda*(Js-1)*I)\n \n u = Function(U, annotate=annt)\n du = TrialFunction(U)\n F_HE = inner(sigma_form(u, p_n), E(v))*dx\n J_HE = derivative(F_HE,u,du)\n problem_HE = NonlinearVariationalProblem(F_HE, u, bc,\n J=J_HE,\n form_compiler_parameters=ffc_options)\n solver_HE = NonlinearVariationalSolver(problem_HE)\n param_HE = solver_HE.parameters\n set_nonlinear_params(param_HE)\n def mech():\n solver_HE.solve(annotate=annt)\n return u\n\n # First iteration solving for displacement, \n # and using the von mises stress field for D\n disp = mech()\n vm = vonmises(disp)\n D = project(D0*exp(-gammaD*vm),V,annotate=annt)\n #k = project(k0*exp(-gammaK*vm),V,annotate=annotate)\n \n if record: \n # Rename parameters for saving\n u.rename('u_'+name,'displacement')\n p_n.rename('phi_T_'+name,'tumor fraction')\n vm.rename('vm_'+name,'Von Mises')\n D.rename('D_'+name,'diffusion coefficient')\n k.rename('k_'+name,'k field')\n f_timeseries.write(u,t) \n f_timeseries.write(p_n,t)\n f_timeseries.write(vm,t)\n f_timeseries.write(D,t)\n f_timeseries.write(k,t) \n \n # Set up reaction-diffusion problem\n dp = TrialFunction(V)\n p = Function(V,annotate=annt)\n q = TestFunction(V)\n F_RD = (1/dt)*(p - p_n)*q*dx + D*dot(grad(q),grad(p))*dx - k*p*(1 - p)*q*dx \n J_RD = derivative(F_RD,p,dp) \n \n for n in range(num_steps):\n # Solve reaction diffusion\n t += dt\n problem_RD = NonlinearVariationalProblem(F_RD, p,\n J=J_RD,\n form_compiler_parameters=ffc_options)\n solver_RD = NonlinearVariationalSolver(problem_RD)\n param_RD = solver_RD.parameters\n set_nonlinear_params(param_RD)\n solver_RD.solve(annotate=annt)\n p_n.assign(p)\n \n # Solve for displacement and vonmises stress\n disp = mech()\n vm = vonmises(disp)\n D = project(D0*exp(-gammaD*vm),V,annotate=annt)\n #k = project(k0*exp(-gammaK*vm),V,annotate=annotate)\n \n if record and (n%rtime == 0): \n # Rename parameters for saving\n u.rename('u_'+name,'displacement')\n p_n.rename('phi_T_'+name,'tumor fraction')\n vm.rename('vm_'+name,'Von Mises')\n D.rename('D_'+name,'diffusion coefficient')\n k.rename('k_'+name,'k field')\n f_timeseries.write(u,t) \n f_timeseries.write(p_n,t)\n f_timeseries.write(vm,t)\n f_timeseries.write(D,t)\n f_timeseries.write(k,t) \n \n return p\n\n# Callback function for the optimizer\n# Writes intermediate results to a logfile\ndef eval_cb(j, m):\n \"\"\" The callback function keeping a log \"\"\"\n f_log.write(\"objective = %15.10e \\n\" % j)\n\ndef objective(p, target_p):\n return assemble(inner(p-target_p, p-target_p)*dx) \n \ndef optimize(dbg=False):\n # Define the control\n m = [Control(D0), Control(gammaD), Control(k), Control(beta)]\n \n # Execute first time to annotate and record the tape\n p = forward(initial_p, 'annt', True, True)\n\n Obj = objective(p, target_p)\n\n # Prepare the reduced functional\n rf = ReducedFunctional(Obj,m,eval_cb_post=eval_cb)\n \n # upper and lower bound for the parameter field\n D_lb = 0.\n D_ub = 5.\n k_lb, k_ub = Function(V,annotate=False), Function(V,annotate=False)\n k_lb.vector()[:] = 0.\n k_ub.vector()[:] = 5.\n gD_lb = .01\n gD_ub = 1.\n beta_lb = 0.01\n beta_ub = 1.\n bnds = [[D_lb, gD_lb, k_lb, beta_lb],[D_ub, gD_ub, k_ub, beta_ub]]\n\n # Run the optimization\n m_opt = minimize(rf,method='L-BFGS-B', bounds=bnds, \n options={\"disp\":True,\n \"gtol\":2.0e-5,\n \"ftol\":2.0e-7,\n \"maxiter\":100,\n \"maxls\": 15})\n \n return m_opt\n\n#########################################################################\n# MAIN \n# call the function with: python \n########################################################################\n \n\noutput_dir = \"./output/\"\nf_log = open(osjoin(output_dir,'log.txt'),'a')\nrat_num = sys.argv[1]\nrat_idx = int(sys.argv[2])\nday_idx = int(sys.argv[3])\n\n \n# Days data and time steps\ninput_dir = \"../rat-data/rat\"+rat_num+\"/\"\nalldata = sc_io_loadmat(\"../rat-data/finaldata.mat\",)\ndays = alldata['rat'][0][rat_idx][3][0]\ndays[:] = [x-days[0] for x in days];\nday = days[day_idx]\nsteps = []\nfor i in range(1,len(days)):\n steps.append(days[i]-days[i-1])\n\n# Constant inputs for optimization\nD0 = 1.\ngammaD = .5\nk0 = 1.\nbeta = .5\ntheta = 50970. # carrying capacity - normalize cell data by this \nmu = .42 # kPa, bulk shear modulus\nnu = .45\nlmbda = 2*mu*nu/(1-2*nu)\nt1 = time()\nrtime = 1 # How often to record results\n\n#fl = h5py.File(\"./output/rat05le_day2/notime.h5\", \"r\")\n\n# Prepare a mesh\nmesh = Mesh(input_dir+\"gmsh.xml\")\nV = FunctionSpace(mesh, 'CG', 1)\n\n# Load initial tumor condition data\ninitial_p = interp(input_dir+\"tumor_t0.mat\",\"tumor\")\ninitial_p.rename('initial','tumor at day 0')\n\nle_he = ['LE','HE']\n\n\nfor lin_hyp in [0,1]:\n\n # Model parameters\n t = 0. # initial time \n T = day # final time \n num_steps = T*10 # number of time steps\n dt = T/float(num_steps) # time step size\n\n # Load tumor condition data for day to optimize\n target_p = interp(input_dir+\"tumor_t\"+str(day)+\".mat\",\"tumor\") \n target_p.rename('p_day'+str(day),'tumor at day '+str(day))\n\n # Prepare output file\n rat_id = rat_num+\"-\"+str(day)+le_he[lin_hyp]\n f_timeseries = XDMFFile(osjoin(output_dir,rat_id+\"timeseries.xdmf\"))\n f_timeseries.parameters[\"flush_output\"] = True\n f_timeseries.parameters[\"functions_share_mesh\"] = True\n f_nosteps = XDMFFile(osjoin(output_dir,rat_id+\"nosteps.xdmf\"))\n f_nosteps.parameters[\"flush_output\"] = True\n f_nosteps.parameters[\"functions_share_mesh\"] = True\n f_notime = XDMFFile(osjoin(output_dir,rat_id+\"notime.xdmf\"))\n f_notime.parameters[\"flush_output\"] = True\n f_notime.parameters[\"functions_share_mesh\"] = True\n\n ####################### D0 as a constant and k as a field ##########################\n # Initial guesses\n D0 = Constant(D0)\n gammaD = Constant(gammaD) # initial guess of gamma_D\n k = project(Constant(k0),V,annotate=False) # growth rate initial guess\n beta = Constant(beta)\n\n # Optimization \n [D0, gammaD, k, beta] = optimize() # optimize these params using the adjoint method provided by adjoint_dolfin\n\n # Record time and optimized values\n f_log.write('-------------------------------------------------------------')\n f_log.write('RAT '+rat_num+'\\n')\n f_log.write('Linear(0) or Hyper(1): '+str(lin_hyp)+'\\n')\n f_log.write('Day used for optimization: '+str(day)+'\\n')\n f_log.write('Elapsed time is ' + str((time()-t1)/60) + ' minutes\\n') \n f_log.write('gammaD = '+str(gammaD.values()[0])+'\\n')\n f_log.write('D0 = '+str(D0.values()[0])+'\\n')\n f_log.write('beta = '+str(beta.values()[0])+'\\n')\n\n k.rename('k0','diffusion field')\n f_notime.write(k,0.)\n\n # Compare optimized tumor growth to actual at several time points\n t = 0.\n\n model_p = initial_p # Initialize\n model_p.rename('opt_p','optimized tumor')\n f_nosteps.write(model_p, 0.)\n\n target_p = initial_p # Initialize\n target_p.rename('true_p', 'optimized tumor')\n f_nosteps.write(target_p, 0.)\n\n for index2, step in enumerate(steps):\n num_steps = step*10 # number of time steps\n dt = step/float(num_steps) # time step size\n\n # Run forward model using optimized values\n model_p = forward(model_p,'opt',True,False) \n model_p.rename('opt_p','optimized tumor')\n f_nosteps.write(model_p,float(days[index2+1]))\n\n # Save actual tumor for comparison\n target_p = interp(input_dir+\"tumor_t\"+str(days[index2+1])+\".mat\",\"tumor\")\n target_p.rename('true_p','actual tumor')\n f_nosteps.write(target_p,float(days[index2+1]))\n\n # Save J_opt\n f_log.write('J_opt day '+str(days[index2+1])+' = '\n +str(objective(model_p, target_p))+'\\n\\n')\n\nf_log.close()\n\n","sub_path":"code/all_rats_optimization.py","file_name":"all_rats_optimization.py","file_ext":"py","file_size_in_byte":12838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"321699056","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu May 3 08:26:16 2018\r\n\r\n@author: ESFOT\r\n\"\"\"\r\n\r\n\r\nfrom io import open\r\nprint (\"Numeros binarios a decimales\")\r\narchivo1=open('C:/Users/ESFOT/Documents/prueba/1.txt','r')\r\nnum=archivo1.readline()\r\nprint(num)\r\na=2*2*2*2*2\r\nb=2*2*2\r\nc=0\r\nd=0\r\ne=1\r\nres=a+b+c+d+e\r\nprint(res)\r\narchivo1.close()\r\n \r\n\r\n\r\nprint (\"Guardar en otro ducumento\")\r\narchivo2=open('C:/Users/ESFOT/Documents/prueba/2.txt','w')\r\narchivo2.write(\"Jhony Javier Cacuango...\")\r\narchivo2.write(res)\r\narchivo2.close()\r\n\r\n\r\n\r\n","sub_path":"prueba/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"529553676","text":"import os\nfrom custodian.vasp.jobs import VaspJob\nfrom custodian.vasp.handlers import VaspErrorHandler, FrozenJobErrorHandler, MeshSymmetryErrorHandler\nfrom fireworks.core.firework import FireWork, Workflow\nfrom mpworks.dupefinders.dupefinder_vasp import DupeFinderVasp\nfrom mpworks.firetasks.controller_tasks import AddEStructureTask\nfrom mpworks.firetasks.custodian_task import VaspCustodianTask\nfrom mpworks.firetasks.snl_tasks import AddSNLTask\nfrom mpworks.firetasks.vasp_io_tasks import VaspCopyTask, VaspWriterTask, \\\n VaspToDBTask\nfrom mpworks.firetasks.vasp_setup_tasks import SetupGGAUTask, \\\n SetupStaticRunTask, SetupNonSCFTask\nfrom pymatgen import Composition\nfrom pymatgen.io.cifio import CifParser\nfrom pymatgen.io.vaspio_set import MPVaspInputSet, MPGGAVaspInputSet\nfrom pymatgen.matproj.snl import StructureNL\n\n__author__ = 'Anubhav Jain'\n__copyright__ = 'Copyright 2013, The Materials Project'\n__version__ = '0.1'\n__maintainer__ = 'Anubhav Jain'\n__email__ = 'ajain@lbl.gov'\n__date__ = 'Mar 15, 2013'\n\n# TODO: add duplicate checks for DB task - don't want to add the same dir\n# twice!!\n# TODO: different walltime requirements and priority for DB task\n\n\ndef _get_custodian_task(spec):\n task_type = spec['task_type']\n v_exe = 'VASP_EXE' # will be transformed to vasp executable on the node\n if 'optimize structure (2x)' in task_type:\n jobs = VaspJob.double_relaxation_run(v_exe, gzipped=False)\n else:\n jobs = [VaspJob(v_exe)]\n\n handlers = [VaspErrorHandler(), FrozenJobErrorHandler(), MeshSymmetryErrorHandler()]\n params = {'jobs': [j.to_dict for j in jobs],\n 'handlers': [h.to_dict for h in handlers], 'max_errors': 10, 'auto_npar': False, 'auto_gamma': False}\n\n return VaspCustodianTask(params)\n\n\ndef _snl_to_spec(snl, enforce_gga=True):\n spec = {}\n\n mpvis = MPGGAVaspInputSet() if enforce_gga else MPVaspInputSet()\n structure = snl.structure\n\n spec['vasp'] = {}\n spec['vasp']['incar'] = mpvis.get_incar(structure).to_dict\n spec['vasp']['incar']['NPAR'] = 2\n spec['vasp']['poscar'] = mpvis.get_poscar(structure).to_dict\n spec['vasp']['kpoints'] = mpvis.get_kpoints(structure).to_dict\n spec['vasp']['potcar'] = mpvis.get_potcar(structure).to_dict\n spec['_dupefinder'] = DupeFinderVasp().to_dict()\n spec['_priority'] = 2\n # TODO: restore category\n # spec['_category'] = 'Materials Project'\n spec['vaspinputset_name'] = mpvis.__class__.__name__\n spec['task_type'] = 'GGA+U optimize structure (2x)' if spec['vasp'][\n 'incar'].get('LDAU', False) else 'GGA optimize structure (2x)'\n\n spec.update(_get_metadata(snl))\n\n return spec\n\n\ndef _get_metadata(snl):\n md = {'run_tags': ['auto generation v1.0']}\n if '_materialsproject' in snl.data and 'submission_id' in snl.data[\n '_materialsproject']:\n md['submission_id'] = snl.data['_materialsproject']['submission_id']\n\n return md\n\n\ndef snl_to_wf(snl, do_bandstructure=True):\n # TODO: clean this up once we're out of testing mode\n # TODO: add WF metadata\n fws = []\n connections = {}\n\n # add the SNL to the SNL DB and figure out duplicate group\n tasks = [AddSNLTask()]\n spec = {'task_type': 'Add to SNL database', 'snl': snl.to_dict}\n fws.append(FireWork(tasks, spec, name=spec['task_type'], fw_id=0))\n connections[0] = 1\n\n # run GGA structure optimization\n spec = _snl_to_spec(snl, enforce_gga=True)\n tasks = [VaspWriterTask(), _get_custodian_task(spec)]\n fws.append(FireWork(tasks, spec, name=spec['task_type'], fw_id=1))\n\n # insert into DB - GGA structure optimization\n spec = {'task_type': 'VASP db insertion', '_priority': 2,\n '_allow_fizzled_parents': True}\n spec.update(_get_metadata(snl))\n fws.append(FireWork([VaspToDBTask()], spec, name=spec['task_type'], fw_id=2))\n connections[1] = 2\n\n if do_bandstructure:\n spec = {'task_type': 'Controller: add Electronic Structure'}\n spec.update(_get_metadata(snl))\n fws.append(\n FireWork([AddEStructureTask()], spec, name=spec['task_type'], fw_id=3))\n connections[2] = 3\n\n # determine if GGA+U FW is needed\n incar = MPVaspInputSet().get_incar(snl.structure).to_dict\n\n if 'LDAU' in incar and incar['LDAU']:\n spec = {'task_type': 'GGA+U optimize structure (2x)',\n '_dupefinder': DupeFinderVasp().to_dict()}\n spec.update(_get_metadata(snl))\n fws.append(FireWork(\n [VaspCopyTask({'extension': '.relax2'}), SetupGGAUTask(),\n _get_custodian_task(spec)], spec, name=spec['task_type'], fw_id=10))\n connections[2].append(10)\n\n spec = {'task_type': 'VASP db insertion',\n '_allow_fizzled_parents': True}\n spec.update(_get_metadata(snl))\n fws.append(\n FireWork([VaspToDBTask()], spec, name=spec['task_type'], fw_id=11))\n connections[10] = 11\n\n if do_bandstructure:\n spec = {'task_type': 'Controller: add Electronic Structure'}\n spec.update(_get_metadata(snl))\n fws.append(FireWork([AddEStructureTask()], spec, name=spec['task_type'], fw_id=12))\n connections[11] = 12\n\n return Workflow(fws, connections, name=Composition.from_formula(snl.structure.composition.reduced_formula).alphabetical_formula)\n\n\ndef snl_to_wf_ggau(snl):\n\n # TODO: add WF meta\n\n fws = []\n connections = {}\n\n # add the root FW (GGA+U)\n spec = _snl_to_spec(snl, enforce_gga=False)\n tasks = [VaspWriterTask(), _get_custodian_task(spec)]\n fws.append(FireWork(tasks, spec, fw_id=1))\n\n # add GGA insertion to DB\n spec = {'task_type': 'VASP db insertion', '_priority': 2,\n '_category': 'VASP'}\n spec.update(_get_metadata(snl))\n fws.append(FireWork([VaspToDBTask()], spec, fw_id=2))\n connections[1] = 2\n mpvis = MPVaspInputSet()\n\n spec['vaspinputset_name'] = mpvis.__class__.__name__\n\n return Workflow(fws, connections, name=Composition.from_formula(snl.structure.composition.reduced_formula).alphabetical_formula)\n\n\nif __name__ == '__main__':\n s1 = CifParser('test_wfs/Si.cif').get_structures()[0]\n s2 = CifParser('test_wfs/FeO.cif').get_structures()[0]\n\n snl1 = StructureNL(s1, \"Anubhav Jain \")\n snl2 = StructureNL(s2, \"Anubhav Jain \")\n\n snl_to_wf(snl1).to_file('test_wfs/wf_si_dupes.json', indent=4)\n snl_to_wf(snl2).to_file('test_wfs/wf_feo_dupes.json', indent=4)\n","sub_path":"mpworks/workflows/snl_to_wf.py","file_name":"snl_to_wf.py","file_ext":"py","file_size_in_byte":6451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"492618405","text":"import os\nimport glob\nimport psycopg2\nimport pandas as pd\nfrom sql_queries import *\n\n\ndef process_song_file(cur, filepath):\n \"\"\"\n - Read the filepath provided, read the json file and select the data.\n - Insert data on table song and artist tables.\n \"\"\"\n # open song file\n df = pd.read_json(filepath, lines=True)\n\n # insert song record\n cols = [\"song_id\", \"title\", \"artist_id\", \"year\", \"duration\"]\n song_data= df[cols].values[0].tolist() \n cur.execute(song_table_insert, song_data)\n \n # insert artist record\n cols = [\"artist_id\",\"artist_name\",\"artist_location\",\"artist_latitude\",\"artist_longitude\"]\n artist_data = df[cols].values[0].tolist()\n cur.execute(artist_table_insert, artist_data)\n\n\ndef process_log_file(cur, filepath):\n \"\"\"\n - Read the filepath provided, read the json file and select the data filtering by column \"Page\" equal to \"NextSong\".\n - Convert the column \"ts\" to mileseconds and split 'Start_time' column in hour, day, weekofyear, year and weekday.\n - Insert data on time,user and songplay tables.\n \"\"\"\n # open log file\n df = pd.read_json(filepath, lines=True)\n\n # filter by NextSong action\n df = pd.read_json(filepath, lines=True)\n df = df[df['page']=='NextSong']\n\n # convert timestamp column to datetime\n t = pd.to_datetime(df['ts'], unit='ms')\n \n # insert time data records\n time_data = (t.dt.time,t.dt.hour,t.dt.day,t.dt.week,t.dt.month,t.dt.year,t.dt.weekday_name)\n column_labels = ('Start_time','hour','day','weekofyear','month','year','weekday')\n time_df = pd.DataFrame(dict(zip(column_labels,time_data)))\n\n for i, row in time_df.iterrows():\n cur.execute(time_table_insert, list(row))\n\n # load user table\n cols = [\"userId\",\"firstName\",\"lastName\",\"gender\",\"level\"]\n user_df = df[cols]\n \n \n # insert user records\n for i, row in user_df.iterrows():\n cur.execute(user_table_insert, row)\n\n # insert songplay records\n for index, row in df.iterrows():\n \n # get songid and artistid from song and artist tables\n cur.execute(song_select, (row.song, row.artist, row.length))\n results = cur.fetchone()\n \n if results:\n songid, artistid = results\n else:\n songid, artistid = None, None\n\n # insert songplay record\n songplay_data = (index, row.ts, row.userId, row.level, songid, artistid, row.sessionId, row.location, row.userAgent)\n cur.execute(songplay_table_insert, songplay_data)\n\n\ndef process_data(cur, conn, filepath, func):\n \"\"\"\n - Read the filepath provided, get all files and process the data.\n - Return the number of files processed.\n \"\"\"\n # get all files matching extension from directory\n all_files = []\n for root, dirs, files in os.walk(filepath):\n files = glob.glob(os.path.join(root,'*.json'))\n for f in files :\n all_files.append(os.path.abspath(f))\n\n # get total number of files found\n num_files = len(all_files)\n print('{} files found in {}'.format(num_files, filepath))\n\n # iterate over files and process\n for i, datafile in enumerate(all_files, 1):\n func(cur, datafile)\n conn.commit()\n print('{}/{} files processed.'.format(i, num_files))\n\n\ndef main():\n \"\"\" \n - Establishes connection with the sparkify database and gets\n cursor to it. \n \n - Process the function 'process_song_file' . \n \n - Process the function 'process_log_file' .\n \n - Finally, closes the connection. \n \"\"\"\n conn = psycopg2.connect(\"host=127.0.0.1 dbname=sparkifydb user=student password=student\")\n cur = conn.cursor()\n\n process_data(cur, conn, filepath='data/song_data', func=process_song_file)\n process_data(cur, conn, filepath='data/log_data', func=process_log_file)\n\n conn.close()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"etl.py","file_name":"etl.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"256396234","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nfrom numpy import int32\n\n\ndef get_all_connectivity(self, group=None):\n \"\"\"Return the connectivity and tags for a selected group of elements\n\n Parameters\n ----------\n self : ElementMat\n an ElementMat object\n group : numpy.array\n one or several group number\n\n Returns\n -------\n connect_select: ndarray\n Selected connectivity\n tag_select: ndarray\n Selected element tags\n\n \"\"\"\n\n connect = self.connectivity\n elem_groups = self.group\n elem_tags = self.tag\n connect_select = np.array([], dtype=int)\n tag_select = np.array([], dtype=int)\n\n if group is not None:\n group = int32(group)\n if isinstance(group, (int, int32, float, complex)):\n group = np.array([group], dtype=int)\n\n if type(group) is list or type(group) is np.ndarray:\n for grp in group:\n Ipos_select = np.where(elem_groups == grp)[0]\n tag_select = np.concatenate([tag_select, elem_tags[Ipos_select]])\n k = 0\n for Ipos in Ipos_select:\n if k == 0:\n connect_select = np.append(\n connect_select, connect[Ipos, :], axis=0\n )\n k += 1\n else:\n connect_select = np.vstack((connect_select, connect[Ipos, :]))\n k += 1\n\n elif group is None:\n connect_select = connect\n tag_select = elem_tags\n\n else:\n return None, None\n\n return connect_select, tag_select\n","sub_path":"pyleecan/Methods/Mesh/ElementMat/get_all_connectivity.py","file_name":"get_all_connectivity.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"302392138","text":"from app.Model import netviax_request\nfrom app.Model import flight\nfrom flask import request, abort, jsonify\nimport datetime\n\n\n\nclass NetviaxRequestGetFlights(netviax_request.NetviaxRequest):\n\n \"\"\"\n *AdultsQty\tInt\tR\tCantidad de Adultos.\n *ChildsQty\tInt\tO\tCantidad de Niños.\n *InfantsQty\tInt\tO\tCantidad de Bebes.\n TripLegs\tSearchQueryLeg[]\tR\tComposición de los diferentes tramos a buscar.\n TripType\tString\tR\tOneWay, RoundTrip, MultipleFlights.\n CabinType\tString\tR\tEconomy, Business, First.\n ChosenAirlines\tString[]\tO\tCompañías preferidas.\n ForcedDataSource\tString\tO\tEspecificación de GDS.\n FlexibleDate\tBoolean\tO\tIndicador de búsqueda lineal o flexible.\n TaxBreakdown\tBoolean\tO\tIndicador de desglose de Taxes.\n FareStrategy\tBoolean\tO\tSobreescritura de estrategia frente al GDS.\n RulesDetailLevel\tString\tO\tDetalle de reglas GLAS aplicadas.\n \"\"\"\n\n def __init__(self, request):\n\n request = request.json\n flight = request[\"flight\"][\"index\"]\n\n self.Index = flight.get(\"index\", \"\")\n self.flight_number = flight.get(\"flightNumber\", \"\")\n self.airline = flight.get(\"airline\", \"\")\n self.airline_IATA = flight.get(\"airlineIATA\", \"\")\n self.arrival_datetime = flight.get(\"arrivalDatetime\", \"\")\n\n self.adults_qty = flight.get(\"nAdult\")\n self.childs_qty = flight.get(\"nChild\", 0)\n self.infants_qty = flight.get(\"nInfant\", 0)\n self.trip_legs = [{\n \"Origin\": flight.get(\"from\", 0),\n \"Destination\": flight.get(\"to\", 0),\n \"DepartureDate\": flight.get(\"departureDatetime\", 0)\n }]\n\n self.__set_av_values(flight)\n self.__set_chosen_airline(self.airline_IATA)\n\n\n #TODO preguntar idas y vueltas\n self.return_ = False if flight.get(\"return\") is False else True\n self.trip_type = \"RoundTrip\" if self.return_ is True else \"OneWay\"\n if self.trip_type is \"RoundTrip\":\n self.trip_legs.append({\n \"Origin\": flight.get(\"to\", 0),\n \"Destination\": flight.get(\"from\", 0),\n # TODO preguntar por fecha de salida de return\n \"DepartureDate\": \"29/11/2018\"\n })\n\n try:\n for i, leg in enumerate(self.trip_legs):\n self.__check_date_format(self.trip_legs[i][\"DepartureDate\"])\n self.trip_legs[i][\"DepartureDate\"] = self.__parse_date_format(self.trip_legs[i][\"DepartureDate\"])\n except ValueError:\n raise ValueError(\"Formato de fechas incorrecto, el formato utilizado es dd/mm/yyyy\")\n\n self.data_request = {\n \"ClientId\": self.client_id,\n \"ClientEnvironment\": self.client_eviroment,\n \"ClientToken\": self.client_token,\n \"ClientChannel\": self.client_channel,\n \"ClientGLAS\": self.client_glas,\n \"AdultsQty\": self.adults_qty,\n \"ChildsQty\": self.childs_qty,\n \"InfantsQty\": self.infants_qty,\n \"TripLegs\": self.trip_legs,\n \"CabinType\": self.cabin_type,\n \"TripType\": self.trip_type,\n \"nSenior\": self.nSenior,\n \"ChosenAirlines\": self.chosen_airlines\n }\n\n self.__validate_request()\n\n def __check_date_format(self,date_string):\n datetime.datetime.strptime(date_string, '%d/%m/%Y')\n\n def __parse_date_format(self, date_string):\n return datetime.datetime.strptime(date_string, '%d/%m/%Y').strftime('%Y-%m-%dT00:00:00')\n\n def send_netviax_get_flights_response(self):\n\n response = self.netviax_response.json()\n\n if response[\"ErrorMessage\"] == \"\":\n\n self.data_response = {\n \"error\": \"OK\",\n \"flights\": []\n }\n\n quotations = response[\"Quotations\"]\n groups_of_flights = response[\"GroupsOfFlights\"]\n geo_information = response[\"GeoInformation\"]\n airline_information = response[\"AirlineInformation\"]\n\n for index, quotation in enumerate(quotations, start=0):\n\n for rph in quotation[\"RPHsQuoted\"]:\n new_flight = flight.Flight(quotation, rph, groups_of_flights, geo_information, airline_information,\n self.trip_legs[0][\"Origin\"], self.trip_legs[0][\"Destination\"])\n new_flight.Index = index\n new_flight.validReturns = [] if self.trip_type is \"OneWay\" else new_flight.get_valid_returns()\n self.data_response[\"flights\"].append(new_flight.serialize())\n\n elif response[\"ErrorMessage\"] != \"\":\n\n self.data_response = self.send_error_response(response[\"ErrorMessage\"])\n\n return self.data_response\n\n def __get_cabin_class(self, cabin_class):\n cabin_class = int(cabin_class)\n if cabin_class == 0:\n return 'Economy'\n elif cabin_class == 1:\n return 'Business'\n elif cabin_class == 2:\n return 'First'\n else:\n raise KeyError(\"cabin_class incorrecto, valores aceptaros -> 0,1,2\")\n\n def __set_av_values(self, flight):\n\n av = flight.get('AV', [])\n\n for row in av:\n key = row[\"key\"]\n if key == 'nSenior':\n self.nSenior = row[\"value\"]\n if key == 'cabinClass':\n self.cabin_type = self.__get_cabin_class(row[\"value\"])\n\n def __set_chosen_airline(self, iata):\n if iata == \"\":\n self.chosen_airlines = []\n else:\n self.chosen_airlines = [iata]\n\n def __get_trip_type(self, trip_legs):\n\n size = len(trip_legs)\n\n if size == 1:\n return 'OneWay'\n elif size == 2:\n return 'RoundTrip'\n elif size > 2:\n return 'MultipleFlights'\n else:\n raise KeyError(\"trip_type incorrecto, TripLegs\")\n\n def __validate_request(self):\n\n # Chequeo de pasajeros por categoria\n if self.adults_qty > 9 or self.adults_qty <= 0:\n raise Exception(\"La cantidad de adultos es obligatoria y debe ser de 1 a 9\")\n if self.childs_qty > 9 or self.childs_qty < 0:\n raise Exception(\"La cantidad de childs debe ser de 1 a 9\")\n if self.infants_qty > 9 or self.infants_qty < 0:\n raise Exception(\"La cantidad de infants debe ser de 1 a 9\")\n\n # Chequeo que la suma de pasajeros no sea > 9\n if int(self.adults_qty)+int(self.infants_qty)+int(self.childs_qty) > 9:\n raise Exception(\"La suma total de pasajeros se excede de 9\")\n\n #TODO consultar acerca de los trip_type y los trip legs\n if len(self.trip_legs) > 2:\n raise Exception(\"El máximo de tripLegs permitidos es tres\")\n\n","sub_path":"app/Model/netviax_request_get_flights.py","file_name":"netviax_request_get_flights.py","file_ext":"py","file_size_in_byte":6763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"564420259","text":"import socket\nimport base64\nimport simplejson\n\nclass ThisSocket:\n def __init__(self,ip,port):\n my_listener = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n my_listener.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n my_listener.bind((ip, port))\n my_listener.listen(0)\n print(\"Listening...\")\n (self.my_connection, my_address) = my_listener.accept()\n print(\"Connection OK from \" + str(my_address))\n\n def Json_Send(self,data):\n json_data = simplejson.dumps(data)\n self.my_connection.send(json_data.encode(\"utf-8\"))\n\n def Json_Rec(self):\n json_data = \"\"\n while True:\n try:\n json_data = json_data + self.my_connection.recv(1024).decode()\n return simplejson.loads(json_data)\n except ValueError:\n continue\n\n def CommExc(self, command_input):\n self.Json_Send(command_input)\n\n if command_input[0] == \"quit\":\n self.my_connection.close()\n exit()\n\n return self.Json_Rec()\n\n def SaveFile(self,path,content):\n with open(path,\"wb\") as my_file:\n my_file.write(base64.b64decode(content))\n return \"Download OK\"\n\n def FileContents(self,path):\n with open(path,\"rb\") as my_file:\n return base64.b64encode(my_file.read())\n\n def ListenStart(self):\n while True:\n command_input = input(\"Enter command: \")\n command_input = command_input.split(\" \")\n try:\n if command_input[0] == \"upload\":\n my_file_content = self.FileContents(command_input[1])\n command_input.append(my_file_content)\n\n command_output = self.CommExc(command_input)\n\n if command_input[0] == \"download\" and \"Error!\" not in command_output:\n command_output = self.SaveFile(command_input[1],command_output)\n except Exception:\n command_output = \"Error\"\n print(command_output)\n\nListenSocket = ThisSocket(\"IP\",port)\nListenSocket.ListenStart()\n","sub_path":"socketlistener.py","file_name":"socketlistener.py","file_ext":"py","file_size_in_byte":2117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"610126065","text":"import unittest\nfrom generics import task as t\n\nclass TestLctkTask(unittest.TestCase):\n\n def test_task_name_is_set(self): \n name = 'My Random Task'\n task = t.Task(name)\n self.assertEqual(task.name, name)\n\n def test_task_returns_zero_when_run_time_has_bad_data(self):\n task = t.Task('t-one')\n get_time_result = task.get_task_run_time() \n self.assertEqual(get_time_result, 0)\n \n def test_task_run_raises_type_error_with_no_function(self):\n task = t.Task('t-two')\n with self.assertRaises(TypeError):\n task.run()\n\n def test_task_run_executes_function(self):\n task = t.Task('t-three')\n task.task_function = lambda: None\n run_result = task.run()\n self.assertEqual(run_result, None)","sub_path":"web/backend/load_model/tests/test_task.py","file_name":"test_task.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"497101182","text":"import keyword\n\n# 변수 이름은 문자,숫자,_로 구성된다.\nfriend = 1\na = 10\nmy_name = '강수진'\n# myName = '강수진' java 스타일\n_yourname= '둘리'\nmember1 = '도우넛'\n\n# 잘못된 규칙\n# friend$ = 2\n# 1abc = 1\n\n# def = 10\n# keyword List\nprint(keyword.kwlist);\n\n# 한글이름의 변수도 사용이 가능하다.\n가격1 = 1000\nprint(가격1-500);\n\n","sub_path":"variable.py","file_name":"variable.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"477376902","text":"class Person:\n MALE = 'M'\n FEMALE = 'F'\n\n def __init__(self, name, birth_year,\n gender, father=None, mother=None):\n self.name = name\n self.birth_year = birth_year\n self.gender = gender\n self.father = father\n self.mother = mother\n self.kids = []\n\n if self.father:\n self.father.kids.append(self)\n\n if self.mother:\n self.mother.kids.append(self)\n\n def children(self, gender=None):\n if gender:\n children = []\n for person in self.kids:\n if person.gender == gender:\n children.append(person)\n return children\n else:\n return self.kids\n\n def get_sisters(self):\n return self.__get_siblings(self.FEMALE)\n\n def get_brothers(self):\n return self.__get_siblings(self.MALE)\n\n def is_direct_successor(self, other_person):\n return other_person in self.children()\n\n def __get_siblings(self, gender):\n all_siblings = list(self.mother.children() + self.father.children())\n return list(set(sibling for sibling in all_siblings\n if sibling.gender == gender and sibling is not self))\n","sub_path":"task3/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"436480632","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\n__title__ = '__init__.py'\n__author__ = 'JieYuan'\n__mtime__ = '19-1-7'\n\"\"\"\nimport numpy as np\nimport torch\n\n__chars = {237,\n 320,\n 565,\n 569,\n 1115,\n 1548,\n 1592,\n 1727,\n 2205,\n 2509,\n 2707,\n 2862,\n 3057,\n 3224,\n 3257,\n 3302,\n 3357,\n 3435,\n 3450,\n 3653,\n 4160,\n 4546,\n 4637,\n 4697,\n 5008,\n 5250,\n 5380,\n 5423,\n 6025,\n 6110,\n 6542,\n 7057,\n 7066,\n 7360,\n 7435,\n 8290,\n 8291,\n 8292}\n\n\ndef gen_acrostic(opt, model, start_words, ix2word, word2ix, prefix_words=None):\n results = ''\n start_word_len = len(start_words)\n input = (torch.Tensor([word2ix['']]).view(1, 1).long())\n\n hidden = None\n\n index = 0 # 用来指示已经生成了多少句藏头诗\n\n if prefix_words:\n for word in prefix_words:\n output, hidden = model(input, hidden)\n input = (input.data.new([word2ix[word]])).view(1, 1)\n\n for i in range(opt.max_gen_len):\n output, hidden = model(input, hidden)\n top_indexs = output.data[0].topk(40)[1].numpy()\n # for top_index in top_indexs:\n # if top_index not in __chars:\n # break\n l = list(set(top_indexs) - __chars)[:10]\n np.random.shuffle(l)\n top_index = l[0]\n w = ix2word[top_index]\n\n if i % 7 == 0:\n if index == start_word_len:\n # 如果生成的诗歌已经包含全部藏头的词,则结束\n break\n else:\n # 把藏头的词作为输入送入模型\n w = start_words[index]\n index += 1\n # input = (input.data.new([word2ix[w]])).view(1, 1)\n input = (input.data.new([word2ix.get(w, np.random.randint(0, 8000))])).view(1, 1)\n else:\n # 否则的话,把上一次预测是词作为下一个词输入\n input = (input.data.new([word2ix[w]])).view(1, 1)\n\n results += w if i % 7 else w\n # results.append(w if i % 7 else '\\n'+w)\n return results\n","sub_path":"poetry/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"400648268","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2018-06-12 16:12:27\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\n\n#检查基础数据异常:在Sql_script中配置Sql脚本即可\n\nfrom execsqlscript import *\n#\nconnstr = get_conn_mysql_ent()\nsqlscript = '''delete a from PAY_TRANS_RESULT a where a.CREATETIME < date(DATE_ADD(now(),interval -1 month));''';\n\ntry:\n\texec_mysql_noreturn(connstr, sqlscript)\nexcept Exception as e:\n\tprint(\"Mysql语句执行错误:\\n\" + str(e))\nfinally:\n\tprint(\"结束!\")\n\n\n\n","sub_path":"src/Clear_Log_Mysql_PAY_TRANS_RESULT.py","file_name":"Clear_Log_Mysql_PAY_TRANS_RESULT.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"416515282","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n'''\n@Time : 2018/3/22 下午7:21\n@Author : ldx\n@contact : ldx.9@163.com\n@File : restful02.py\n@Software: PyCharm\n'''\n\nfrom flask import Flask, jsonify\n\napp = Flask(__name__)\n\ntasks = [\n {\n 'id': 1,\n 'title': u'Buy groceries',\n 'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',\n 'done': False\n },\n {\n 'id':2,\n 'title': u'Learn Python',\n 'description': u'Need to find a good Python tutorial on the web',\n 'done': False\n }\n]\n\n@app.route('/todo/api/v1.0/tasks', methods=['GET'])\ndef get_tasks():\n return jsonify({'tasks': tasks})\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"untitled/flask/restful02.py","file_name":"restful02.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"648057496","text":"#!/usr/bin/env python\n\nimport sys\nif sys.version_info < (2, 6):\n raise RuntimeError(\"Python 2.6+ is required.\")\n\nimport codecs\nimport logging\nimport optparse\nimport os\nimport unittest\n\nCRLF_OFFSET_ERROR = '\\r\\0\\r\\n\\0'\nCRLF_OFFSET_FIX = '\\r\\0\\n\\0'\n\ndef fsckByteString(content=None, log=None):\n if not content:\n raise ArgumentException('Content must not be empty.')\n \n if content.startswith(codecs.BOM_UTF16):\n if log: log.info('Detected UTF-16 BOM.')\n\n if CRLF_OFFSET_ERROR in content:\n if log: log.error('Byte shift due to improper line ending conversion!')\n if log: log.info('Correcting line endings...')\n content = content.replace(CRLF_OFFSET_ERROR, CRLF_OFFSET_FIX)\n \n if log: log.info('Converting to UTF-8...')\n return content.decode(\"utf16\").encode(\"utf8\")\n\n if content.startswith(codecs.BOM_UTF8):\n if log: log.warn('Detected unneccessary UTF-8 BOM.')\n if log: log.info('Removing BOM...')\n\n return content[len(codecs.BOM_UTF8):]\n \n if log: log.info('No action required.')\n return content\n\n\nclass fscker(unittest.TestCase):\n DATA = \"simple\\r\\ntest\\r\\nof\\r\\nencodings\"\n EXPECTED = DATA.encode(\"utf8\")\n\n def test_valid_utf8(self):\n value = self.DATA.encode(\"utf8\")\n actual = fsckByteString(value)\n self.assertEqual(self.EXPECTED, actual)\n \n def test_valid_utf8_with_bom(self):\n value = codecs.BOM_UTF8 + self.DATA.encode(\"utf8\")\n actual = fsckByteString(value)\n self.assertEqual(self.EXPECTED, actual)\n \n def test_valid_utf16_to_utf8(self):\n value = self.DATA.encode(\"utf16\")\n actual = fsckByteString(value)\n self.assertEqual(self.EXPECTED, actual)\n \n def test_invalid_utf16_to_utf8(self):\n value = self.DATA.encode(\"utf16\").replace('\\n', '\\r\\n')\n actual = fsckByteString(value)\n self.assertEqual(self.EXPECTED, actual)\n\n\nif __name__ == '__main__':\n parser = optparse.OptionParser(usage='Usage: %prog [options] file1 [... fileN]')\n parser.add_option('--test', dest='is_testing', action='store_true', default=False, help='run test suite')\n\n options, files = parser.parse_args()\n logging.basicConfig(format='%(name)s %(levelname)s: %(message)s', level=logging.INFO)\n\n print\n if options.is_testing:\n unittest.TextTestRunner(verbosity=2).run(unittest.TestSuite(\n unittest.defaultTestLoader.loadTestsFromTestCase(fscker)\n ))\n elif files:\n for fname in files:\n log = logging.getLogger(os.path.basename(fname))\n try:\n content = None\n with open(fname, 'rb') as f:\n content = fsckByteString(f.read(), log)\n with open(fname, 'wb') as f:\n f.write(content)\n except Exception as e:\n log.error('\"%s\" could not be checked.', fname)\n log.error(e)\n print\n else:\n parser.print_help()","sub_path":"crlf_fix.py","file_name":"crlf_fix.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"265929187","text":"# coding:utf8\nfrom gfirefly.server.globalobject import GlobalObject\nfrom app.game.core.PlayersManager import PlayersManager\n\n\nremote_gate = GlobalObject().remote['gate']\n\n\n# def send_mail(mail):\n# \"\"\"send mail through gate\"\"\"\n# if get_gate_remote():\n# get_gate_remote().callRemote(\"send_mail\", mail)\n\n\ndef push_message(key, character_id, *args, **kw):\n player = PlayersManager().get_player_by_id(character_id)\n if player:\n pargs = (key, player.dynamic_id) + args\n kw['is_online'] = True\n return remote_gate._reference._service.callTarget(*pargs, **kw)\n else:\n return remote_gate.push_message_remote(key, character_id, args, kw)\n","sub_path":"app/game/action/root/netforwarding.py","file_name":"netforwarding.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"615407119","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated: Apr 2020\r\nClassy Interacting Bouncing Balls in a box!\r\n\r\n@author: Ryan Clement (RRCC)\r\n\"\"\"\r\n\r\nimport random\r\nimport math as m\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.animation as animation\r\n\r\n### Ball Class:\r\nclass Ball:\r\n \"\"\" Class for bouncing balls \"\"\"\r\n ballCount = 0\r\n dt = 0.01 # s\r\n boxU = 10.0 # m\r\n boxD = 0.0 # m\r\n boxL = 0.0 # m\r\n boxR = 10.0 # m\r\n\r\n def __init__(self, x=0, y=0, vx=0, vy=0):\r\n \"\"\" Constructor \"\"\"\r\n self.x = x\r\n self.xo = x\r\n self.y = y\r\n self.yo = y\r\n self.vx = vx\r\n self.vy = vy\r\n self.t = 0.0\r\n Ball.ballCount += 1\r\n\r\n def __del__(self):\r\n \"\"\" Destructor \"\"\"\r\n Ball.ballCount -= 1\r\n\r\n def move(self):\r\n # X\r\n self.xo = self.x\r\n self.x += self.vx*Ball.dt\r\n # Y\r\n self.yo = self.y\r\n self.y += self.vy*Ball.dt\r\n # Collision with wall?\r\n self.__boundaries()\r\n # Time\r\n self.t += Ball.dt\r\n\r\n def __boundaries(self):\r\n # Y\r\n if( self.y < Ball.boxD ):\r\n tD = Ball.dt - (Ball.boxD - self.yo)/self.vy\r\n self.vy *= -1.0\r\n self.y = Ball.boxD + self.vy*tD\r\n self.yo = Ball.boxD\r\n elif( self.y > Ball.boxU ):\r\n tU = Ball.dt - (Ball.boxU - self.yo)/self.vy\r\n self.vy *= -1.0\r\n self.y = Ball.boxU + self.vy*tU\r\n self.yo = Ball.boxU\r\n # X\r\n if( self.x < Ball.boxL ):\r\n tL = Ball.dt - (Ball.boxL - self.xo)/self.vx\r\n self.vx *= -1.0\r\n self.x = Ball.boxL + self.vx*tL\r\n self.xo = Ball.boxL\r\n elif( self.x > Ball.boxR ):\r\n tR = Ball.dt - (Ball.boxR - self.xo)/self.vx\r\n self.vx *= -1.0\r\n self.x = Ball.boxR + self.vx*tR\r\n self.x = Ball.boxR\r\n### END: Ball Class\r\n\r\n\r\n### Collision Functions:\r\ndef collision(balls):\r\n d = 0.3 # For ms=5\r\n for i in np.arange(Ball.ballCount-1):\r\n for j in np.arange(i+1,Ball.ballCount):\r\n drx = balls[i].x - balls[j].x\r\n dry = balls[i].y - balls[j].y\r\n drs = drx*drx + dry*dry\r\n dr = m.sqrt( drs )\r\n if( dr < d ):\r\n # Over-Shot Collision\r\n # TODO: Add switch for method selection\r\n if( False ):\r\n # Physics Based Collision Correction\r\n xi = balls[i].x - 10.0*Ball.dt*balls[i].vx\r\n yi = balls[i].y - 10.0*Ball.dt*balls[i].vy\r\n xj = balls[j].x - 10.0*Ball.dt*balls[j].vx\r\n yj = balls[j].y - 10.0*Ball.dt*balls[j].vy\r\n drx = xi - xj\r\n dry = yi - yj\r\n dvx = balls[i].vx - balls[j].vx\r\n dvy = balls[i].vy - balls[j].vy\r\n drs = d*d\r\n a = dvx*dvx + dvy*dvy\r\n b = 2.0*(drx*dvx + dry*dvy)\r\n c = drx*drx + dry*dry - drs\r\n s = b*b - 4.0*a*c\r\n tp = (-b + m.sqrt(s))/(2.0*a)\r\n tm = (-b - m.sqrt(s))/(2.0*a)\r\n if( tp>0 and tm>0 ):\r\n tc = min(tp,tm)\r\n elif( tm<0 ):\r\n tc = tp\r\n else:\r\n tc = tm\r\n xci = xi + tc*balls[i].vx\r\n yci = yi + tc*balls[i].vy\r\n xcj = xj + tc*balls[j].vx\r\n ycj = yj + tc*balls[j].vy\r\n drx = xci - xcj\r\n dry = yci - ycj\r\n elif( True ):\r\n # Game Engine Style Collision Correction\r\n offset = (d - dr)/2.0\r\n dx = offset*drx/dr\r\n dy = offset*dry/dr\r\n xiNew = balls[i].x + dx\r\n yiNew = balls[i].y + dy\r\n xjNew = balls[j].x - dx\r\n yjNew = balls[j].y - dy\r\n drx = xiNew - xjNew\r\n dry = yiNew - yjNew\r\n drs = d*d\r\n dvx = balls[i].vx - balls[j].vx\r\n dvy = balls[i].vy - balls[j].vy\r\n fac = (dvx*drx + dvy*dry)/drs\r\n delvx = fac*drx\r\n delvy = fac*dry\r\n balls[i].vx -= delvx\r\n balls[i].vy -= delvy\r\n balls[j].vx += delvx\r\n balls[j].vy += delvy\r\n balls[i].x = xiNew\r\n balls[i].y = yiNew\r\n balls[j].x = xjNew\r\n balls[j].y = yjNew\r\n elif( dr == d ):\r\n # Perfect Collision!\r\n # This is going to be a VERY rare event ...\r\n dvx = balls[i].vx - balls[j].vx\r\n dvy = balls[i].vy - balls[j].vy\r\n fac = (dvx*drx + dvy*dry)/drs\r\n delvx = fac*drx\r\n delvy = fac*dry\r\n balls[i].vx -= delvx\r\n balls[i].vy -= delvy\r\n balls[j].vx += delvx\r\n balls[j].vy += delvy\r\n\r\n### END: Collision Functions\r\n\r\n\r\n### Animation Functions:\r\ndef init():\r\n tText.set_text('Time = ')\r\n return scat, tText\r\n\r\ndef animate(i):\r\n s = 'Time = %.1f s' % ballList[0].t\r\n tText.set_text(s)\r\n sList = []\r\n for b in ballList:\r\n sList.append([b.x,b.y])\r\n scat.set_offsets(sList)\r\n # Graphics are updated every 10 time steps.\r\n # ... update positions and check for collisions 10 times.\r\n for i in np.arange(10):\r\n for b in ballList:\r\n b.move()\r\n collision(ballList)\r\n return scat,tText\r\n### END: Animation Functions\r\n\r\n\r\n##### Movie Time!\r\nnumBalls = 25\r\nballList = []\r\nxList = []\r\nyList = []\r\nfor i in range(numBalls):\r\n xR = random.uniform(1,9)\r\n yR = random.uniform(1,9)\r\n vxR = random.uniform(-5.0,5.0)\r\n vyR = random.uniform(-5.0,5.0)\r\n ballList.append( Ball(xR,yR,vxR,vyR) )\r\n xList.append(xR)\r\n yList.append(yR)\r\n\r\nfig, ax = plt.subplots()\r\nax.set_title('Bouncing Balls!')\r\nax.set_xlim([0,10])\r\nax.set_ylim([0,10])\r\ntText = ax.text(4.5, 9.5, 'Time = ')\r\nscat = ax.scatter(xList,yList,c=xList,cmap='gist_rainbow')\r\n# scat = ax.scatter(xList,yList,c=xList,cmap='seismic')\r\n\r\n\r\nani = animation.FuncAnimation(fig, animate, frames=101,\r\n interval=100, blit=True,\r\n init_func=init, repeat=False)\r\n\r\n# Uncomment next two lines to write file to disk.\r\n#pwriter = animation.PillowWriter(fps=5, metadata=dict(artist='Dr. Ryan Clement'))\r\n#ani.save('../movies/bouncing_balls.gif',writer=pwriter)\r\n\r\nplt.show()\r\n##### END: Movie Time\r\n","sub_path":"scripts/collision_box.py","file_name":"collision_box.py","file_ext":"py","file_size_in_byte":6905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"99057382","text":"import discord\nfrom discord.ext import commands\nimport logging\nimport shared\n\n# set logging level\nlogging.basicConfig(level=logging.WARNING)\n\nclass Admins:\n def __init__(self, bot):\n self.bot = bot\n \n # region prefix command\n prefix_brief = \"Changes or shows prefix\"\n prefix_description = \"Displays current prefix or changes prefix if prefix is specified\"\n prefix_help = \"Provide a new prefix to change the prefix for this server.\"\n @commands.command(brief=prefix_brief,description=prefix_description,help=prefix_help)\n @shared.command_is_enabled('prefix_command_enabled')\n async def prefix(self, ctx, *, new_prefix:str=None):\n if new_prefix == None:\n current_prefix = await shared.get_pre(self.bot, ctx.message)\n await ctx.send(\"```My prefix on this server is \"+current_prefix+\n \"\\nTo change the prefix you can type \"+current_prefix+\"prefix (new prefix)```\")\n else:\n # block users without admin permisisons\n if not ctx.message.author.guild_permissions.administrator:\n await ctx.message.channel.send(\"error: you are not an admin\")\n return\n \n # update entry in table or create new entry if no entry exists\n q = shared.db.execute(\"SELECT * FROM Guilds WHERE guild_id = :guild_id\",\n guild_id=ctx.message.guild.id)\n if q == []:\n # create new entry in Guilds table, saving the new prefix\n shared.db.execute(\"INSERT INTO Guilds (name, prefix, guild_id, owner_id, icon_url)\"+\n \"VALUES(:name, :prefix, :guild_id, :owner_id, :icon_url)\",\n name=ctx.message.guild.name, prefix=new_prefix, \n guild_id=ctx.message.guild.id, owner_id=ctx.message.guild.owner.id,\n icon_url=ctx.message.guild.icon_url)\n await ctx.send(\"Prefix updated, my new prefix is \"+new_prefix)\n else:\n # update existing entry in table, changing new prefix\n shared.db.execute(\"UPDATE Guilds SET prefix = :prefix WHERE guild_id = :guild_id\",\n prefix=new_prefix, guild_id=ctx.message.guild.id)\n await ctx.send(\"Prefix updated, my new prefix is \"+new_prefix)\n # endregion\n\n# setup cog\ndef setup(bot):\n bot.add_cog(Admins(bot))","sub_path":"cogs/admins.py","file_name":"admins.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"367807933","text":"\n\nfrom xai.brain.wordbase.nouns._offshoot import _OFFSHOOT\n\n#calss header\nclass _OFFSHOOTS(_OFFSHOOT, ):\n\tdef __init__(self,): \n\t\t_OFFSHOOT.__init__(self)\n\t\tself.name = \"OFFSHOOTS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"offshoot\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_offshoots.py","file_name":"_offshoots.py","file_ext":"py","file_size_in_byte":252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"506912280","text":"# https://www.tensorflow.org/tutorials/keras/regression\n# https://www.kaggle.com/toramky/automobile-dataset\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd\nimport seaborn as sns\n\nfrom automobile import load_data, split_data_train_test, linear_regression\n\n\ndef plot_loss(history):\n plt.figure()\n plt.plot(history.history['loss'], label='loss')\n plt.plot(history.history['val_loss'], label='val_loss')\n plt.ylim([0, 10000])\n plt.xlabel('Epoch')\n plt.ylabel('Error [price]')\n plt.legend()\n plt.grid(True)\n plt.show()\n\n\ndef plot_horsepower(training_x, training_y, x, y):\n # plt.scatter(train_features['Horsepower'], train_labels, label='Data')\n plt.figure()\n plt.scatter(training_x, training_y, label='Data')\n plt.plot(x, y, color='k', label='Predictions')\n plt.xlabel('Horsepower')\n plt.ylabel('price')\n plt.legend()\n plt.show()\n\n\ndef linear_regression_one_input():\n data_frame = load_data()\n sns.heatmap(data_frame.corr(), annot=True)\n train_features, train_labels, test_features, test_labels = split_data_train_test(data_frame, train_size=0.8)\n\n hp_index = data_frame.columns.get_loc(\"horsepower\")\n\n horsepower = train_features[:, hp_index]\n horsepower = np.asarray(horsepower).astype(np.float32)\n\n train_labels_1 = np.asarray(train_labels).astype(np.float32)\n\n horsepower_model, history = linear_regression(horsepower, train_labels_1)\n\n plot_loss(history)\n\n horsepower_model.predict(horsepower[:140])\n\n test_results = {}\n\n horsepower_test = np.asarray(test_features[:, hp_index]).astype(np.float32)\n test_labels_1 = np.asarray(test_labels).astype(np.float32)\n\n test_results['horsepower_model'] = horsepower_model.evaluate(\n horsepower_test,\n test_labels_1, verbose=2)\n\n x = tf.linspace(0.0, 250, 251)\n y = horsepower_model.predict(x)\n\n plot_horsepower(horsepower, train_labels_1, x, y)\n\n horsepower_model.save('saved_model/my_model')\n\n hist = pd.DataFrame(history.history)\n hist['epoch'] = history.epoch\n hist.tail()\n\n\nif __name__ == \"__main__\":\n linear_regression_one_input()\n\n\n# e szerint is rossz: https://www.kaggle.com/vovanthuong/predict-automobile-price","sub_path":"04_automobile/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"492192365","text":"import csv\nimport pandas as pd\nfrom ast import literal_eval\nfrom deploy import deploy\n\nseq= input(\"enter sequence: \")\nfeatures_file= deploy(seq)\n\n\n# print(features_file)\n\ntiny_values=features_file.Tiny\ncol_names= ['tiny_number', \"tiny_percentage\"]\ntiny_df = pd.DataFrame(columns= col_names)\n#Getting out tiny values for sequences:\n# for item in range(len(tiny_values)):\n# val=features_file.Tiny[item]\n# df= literal_eval(val)\n# # print(df)\n# tiny_df= tiny_df.append({\"tiny_number\":\"{}\".format(df[\"number\"]), \"tiny_percentage\": \"{}\".format(df[\"percentage\"])}, ignore_index= True)\n# #get the last column name:\n# ncols= len(features_file.keys())\n# lastcol= features_file.keys()[ncols-1]\n#add a column:\nfeatures_file[\"column_tiny_number\"]= tiny_values[\"number\"]\nfeatures_file[\"column_tiny_percentage\"]= tiny_values[\"percentage\"]\n#features_file.to_csv(\"/home/anjali/06/datasets/features.csv\", sep=\",\")\n\n#\n#\nSmall_values=features_file.Small\ncol_names= ['Small_number', \"Small_percentage\"]\nSmall_df = pd.DataFrame(columns= col_names)\n# #Getting out Small values for sequences:\n# for item in range(len(Small_values)):\n# val=features_file.Small[item]\n# df= literal_eval(val)\n# # print(df)\n# Small_df= Small_df.append({\"Small_number\":\"{}\".format(df[\"number\"]), \"Small_percentage\": \"{}\".format(df[\"percentage\"])}, ignore_index= True)\n# #get the last column name:\n# ncols= len(features_file.keys())\n# lastcol= features_file.keys()[ncols-1]\n# #add a column:\nfeatures_file[\"column_Small_number\"]= Small_values[\"number\"]\nfeatures_file[\"column_Small_percentage\"]= Small_values[\"percentage\"]\n# #features_file.to_csv(\"/home/anjali/06/datasets/features.csv\", sep=\",\")\n#\n#\n\nAliphatic_values=features_file.Aliphatic\n# col_names= ['Aliphatic_number', \"Aliphatic_percentage\"]\n# Aliphatic_df = pd.DataFrame(columns= col_names)\n# #Getting out Aliphatic values for sequences:\n# for item in range(len(Aliphatic_values)):\n# val=features_file.Aliphatic[item]\n# df= literal_eval(val)\n# # print(df)\n# Aliphatic_df= Aliphatic_df.append({\"Aliphatic_number\":\"{}\".format(df[\"number\"]), \"Aliphatic_percentage\": \"{}\".format(df[\"percentage\"])}, ignore_index= True)\n# #get the last column name:\n# ncols= len(features_file.keys())\n# lastcol= features_file.keys()[ncols-1]\n# #add a column:\nfeatures_file[\"column_Aliphatic_number\"]=Aliphatic_values[\"number\"]\nfeatures_file[\"column_Aliphatic_percentage\"]= Aliphatic_values[\"percentage\"]\n# #features_file.to_csv(\"/home/anjali/06/datasets/features.csv\", sep=\",\")\n#\n#\n#\nAromatic_values=features_file.Aromatic\n# col_names= ['Aromatic_number', \"Aromatic_percentage\"]\n# Aromatic_df = pd.DataFrame(columns= col_names)\n# #Getting out Aromatic values for sequences:\n# for item in range(len(Aromatic_values)):\n# val=features_file.Aromatic[item]\n# df= literal_eval(val)\n# # print(df)\n# Aromatic_df= Aromatic_df.append({\"Aromatic_number\":\"{}\".format(df[\"number\"]), \"Aromatic_percentage\": \"{}\".format(df[\"percentage\"])}, ignore_index= True)\n# #get the last column name:\n# ncols= len(features_file.keys())\n# lastcol= features_file.keys()[ncols-1]\n# #add a column:\nfeatures_file[\"column_Aromatic_number\"]= Aromatic_values[\"number\"]\nfeatures_file[\"column_Aromatic_percentage\"]= Aromatic_values[\"percentage\"]\n# #features_file.to_csv(\"/home/anjali/06/datasets/features.csv\", sep=\",\")\n#\n#\n#\nNonPolar_values=features_file.NonPolar\n# col_names= ['NonPolar_number', \"NonPolar_percentage\"]\n# NonPolar_df = pd.DataFrame(columns= col_names)\n# #Getting out NonPolar values for sequences:\n# for item in range(len(NonPolar_values)):\n# val=features_file.NonPolar[item]\n# df= literal_eval(val)\n# # print(df)\n# NonPolar_df= NonPolar_df.append({\"NonPolar_number\":\"{}\".format(df[\"number\"]), \"NonPolar_percentage\": \"{}\".format(df[\"percentage\"])}, ignore_index= True)\n# #get the last column name:\n# ncols= len(features_file.keys())\n# lastcol= features_file.keys()[ncols-1]\n# #add a column:\nfeatures_file[\"column_NonPolar_number\"]= NonPolar_values[\"number\"]\nfeatures_file[\"column_NonPolar_percentage\"]= NonPolar_values[\"percentage\"]\n# #features_file.to_csv(\"/home/anjali/06/datasets/features.csv\", sep=\",\")\n#\n#\n#\n#\nPolar_values=features_file.Polar\n# col_names= ['Polar_number', \"Polar_percentage\"]\n# Polar_df = pd.DataFrame(columns= col_names)\n# #Getting out Polar values for sequences:\n# for item in range(len(Polar_values)):\n# val=features_file.Polar[item]\n# df= literal_eval(val)\n# # print(df)\n# Polar_df= Polar_df.append({\"Polar_number\":\"{}\".format(df[\"number\"]), \"Polar_percentage\": \"{}\".format(df[\"percentage\"])}, ignore_index= True)\n# #get the last column name:\n# ncols= len(features_file.keys())\n# lastcol= features_file.keys()[ncols-1]\n# #add a column:\nfeatures_file[\"column_Polar_number\"]= Polar_values[\"number\"]\nfeatures_file[\"column_Polar_percentage\"]= Polar_values[\"percentage\"]\n# #features_file.to_csv(\"/home/anjali/06/datasets/features.csv\", sep=\",\")\n#\n#\n#\nCharged_values=features_file.Charged\n# col_names= ['Charged_number', \"Charged_percentage\"]\n# Charged_df = pd.DataFrame(columns= col_names)\n# #Getting out Charged values for sequences:\n# for item in range(len(Charged_values)):\n# val=features_file.Charged[item]\n# df= literal_eval(val)\n# # print(df)\n# Charged_df= Charged_df.append({\"Charged_number\":\"{}\".format(df[\"number\"]), \"Charged_percentage\": \"{}\".format(df[\"percentage\"])}, ignore_index= True)\n# #get the last column name:\n# ncols= len(features_file.keys())\n# lastcol= features_file.keys()[ncols-1]\n# #add a column:\nfeatures_file[\"column_Charged_number\"]= Charged_values[\"number\"]\nfeatures_file[\"column_Charged_percentage\"]= Charged_values[\"percentage\"]\n# #features_file.to_csv(\"/home/anjali/06/datasets/features.csv\", sep=\",\")\n#\n#\n#\n#\n#\nBasic_values=features_file.Basic\n# col_names= ['Basic_number', \"Basic_percentage\"]\n# Basic_df = pd.DataFrame(columns= col_names)\n# #Getting out Basic values for sequences:\n# for item in range(len(Basic_values)):\n# val=features_file.Basic[item]\n# df= literal_eval(val)\n# # print(df)\n# Basic_df= Basic_df.append({\"Basic_number\":\"{}\".format(df[\"number\"]), \"Basic_percentage\": \"{}\".format(df[\"percentage\"])}, ignore_index= True)\n# #get the last column name:\n# ncols= len(features_file.keys())\n# lastcol= features_file.keys()[ncols-1]\n# #add a column:\nfeatures_file[\"column_Basic_number\"]=Basic_values[\"number\"]\nfeatures_file[\"column_Basic_percentage\"]= Basic_values[\"percentage\"]\n# #features_file.to_csv(\"/home/anjali/06/datasets/features.csv\", sep=\",\")\n#\n#\n#\nAcidic_values=features_file.Acidic\n# col_names= ['Acidic_number', \"Acidic_percentage\"]\n# Acidic_df = pd.DataFrame(columns= col_names)\n# #Getting out Acidic values for sequences:\n# for item in range(len(Acidic_values)):\n# val=features_file.Acidic[item]\n# df= literal_eval(val)\n# # print(df)\n# Acidic_df= Acidic_df.append({\"Acidic_number\":\"{}\".format(df[\"number\"]), \"Acidic_percentage\": \"{}\".format(df[\"percentage\"])}, ignore_index= True)\n# #get the last column name:\n# ncols= len(features_file.keys())\n# lastcol= features_file.keys()[ncols-1]\n# #add a column:\nfeatures_file[\"column_Acidic_number\"]= Acidic_values[\"number\"]\nfeatures_file[\"column_Acidic_percentage\"]= Acidic_values[\"percentage\"]\n# #features_file.to_csv(\"/home/anjali/06/datasets/features.csv\", sep=\",\")\n#\n\ndrop_cols= [\"Tiny\", \"Small\", \"Aliphatic\", \"Aromatic\", \"NonPolar\", \"Polar\", \"Charged\", \"Basic\", \"Acidic\"]\nfeatures_file.drop(columns=drop_cols, inplace= True)\nfeatures_file.drop_duplicates(subset= \"seq\", inplace= True)\nprint(features_file)\n","sub_path":"getfeatures_deploy.py","file_name":"getfeatures_deploy.py","file_ext":"py","file_size_in_byte":7546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"429933410","text":"import os\r\nimport logging\r\nimport argparse\r\nimport pandas as pd\r\nfrom statistics import mean\r\n\r\nimport torch\r\nimport torch.utils.data as data\r\nimport torchvision.transforms as transforms\r\nfrom torchsummary import summary\r\n\r\nimport models.models as models\r\nimport data.datasets as datasets\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\ndef accuracy(output, target, topk=(1,)):\r\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\r\n maxk = max(topk)\r\n batch_size = target.size(0)\r\n\r\n _, pred = torch.topk(output, maxk, dim=1, largest=True, sorted=True)\r\n pred = pred.t()\r\n correct = pred.eq(target.view(1, -1).expand_as(pred))\r\n\r\n corr_list = []\r\n for k in topk:\r\n correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)\r\n corr_list.append(correct_k.item())\r\n #res.append(correct_k.mul_(100.0 / batch_size))\r\n return corr_list\r\n\r\ndef update_lr(optimizer, lr): \r\n # For updating learning rate \r\n for param_group in optimizer.param_groups:\r\n param_group['lr'] = lr\r\n\r\ndef data_loading(args, split):\r\n if args.model_type == 'resnet18' or args.model_type == 'resnet152':\r\n if split=='train':\r\n transform = transforms.Compose([\r\n transforms.Resize((args.image_size+32, args.image_size+32)),\r\n transforms.RandomCrop((args.image_size, args.image_size)),\r\n transforms.RandomHorizontalFlip(),\r\n transforms.ColorJitter(brightness=0.1,\r\n contrast=0.1, saturation=0.1, hue=0.1),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ])\r\n else:\r\n transform = transforms.Compose([\r\n transforms.Resize((args.image_size, args.image_size)),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ])\r\n else:\r\n transform = None\r\n\r\n if args.dataset_name == 'moeImouto': \r\n dataset = datasets.moeImouto(root=args.dataset_path,\r\n input_size=args.image_size, split=split, transform=transform)\r\n elif args.dataset_name == 'danbooruFaces':\r\n dataset = datasets.danbooruFaces(root=args.dataset_path,\r\n input_size=args.image_size, split=split, transform=transform)\r\n elif args.dataset_name == 'cartoonFace':\r\n dataset = datasets.cartoonFace(root=args.dataset_path,\r\n input_size=args.image_size, split=split, transform=transform) \r\n elif args.dataset_name == \"ZACI20\":\r\n dataset = datasets.ZACI20(\r\n args.dataset_path, input_size=args.image_size, split=split, transform=transform\r\n )\r\n \r\n dataset_loader = data.DataLoader(dataset, batch_size=args.batch_size, \r\n shuffle=True, num_workers=4)\r\n\r\n\r\n return dataset, dataset_loader\r\n\r\ndef model_selection(args, no_classes):\r\n # initiates model and loss \r\n if args.model_type=='shallow':\r\n model = models.ShallowNet(no_classes)\r\n elif args.model_type=='resnet18' or args.model_type=='resnet152':\r\n model = models.ResNet(no_classes, args)\r\n else:\r\n model = models.VisionTransformer(no_classes, args)\r\n return model\r\n\r\ndef train_main(logger, args):\r\n\r\n # makes results_dir if doesn't exist\r\n results_dir = args.results_dir\r\n if not os.path.exists(results_dir):\r\n os.makedirs(results_dir)\r\n\r\n # Device configuration\r\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\r\n # Controlling source of randomness: pytorch RNG\r\n torch.manual_seed(0)\r\n\r\n # dataloader and train/test datasets\r\n train_set, train_loader = data_loading(args, split='train')\r\n _, val_loader = data_loading(args, split='val')\r\n _, test_loader = data_loading(args, split='test')\r\n no_classes = train_set.no_classes\r\n classid_classname_dic = train_set.classes\r\n\r\n # model\r\n model = model_selection(args, no_classes)\r\n model.to(device)\r\n if args.checkpoint_path:\r\n state_dict = torch.load(args.checkpoint_path)\r\n if args.transfer_learning:\r\n # Modifications to load partial state dict\r\n expected_missing_keys = []\r\n '''\r\n if ('patch_embedding.weight' in state_dict):\r\n expected_missing_keys += ['patch_embedding.weight', 'patch_embedding.bias']\r\n if ('pre_logits.weight' in state_dict):\r\n expected_missing_keys += ['pre_logits.weight', 'pre_logits.bias']\r\n \r\n for key in state_dict.keys():\r\n print(key)\r\n '''\r\n if ('model.fc.weight' in state_dict):\r\n expected_missing_keys += ['model.fc.weight', 'model.fc.bias']\r\n for key in expected_missing_keys:\r\n state_dict.pop(key)\r\n #print(key)\r\n model.load_state_dict(state_dict, strict=False)\r\n print('Loaded from custom checkpoint.')\r\n # prints model summary (layers, parameters by giving it a sample input)\r\n summary(model, input_size=iter(train_loader).next()[0].shape[1:])\r\n \r\n # loss and optimizer\r\n criterion = torch.nn.CrossEntropyLoss()\r\n params_to_update = []\r\n for param in model.parameters():\r\n if param.requires_grad == True:\r\n params_to_update.append(param)\r\n #optimizer = torch.optim.Adam(params_to_update, lr=args.learning_rate)\r\n optimizer = torch.optim.SGD(params_to_update, lr=args.learning_rate, momentum=0.9)\r\n \r\n # Train the model\r\n total_step = len(train_loader)\r\n curr_lr = args.learning_rate\r\n train_loss_avg = []\r\n val_loss_avg = []\r\n top1_accuracies = []\r\n top5_accuracies = []\r\n best_epoch = 0\r\n curr_acc = 0\r\n top_acc = 0\r\n\r\n for epoch in range(args.train_epochs):\r\n current_losses = []\r\n for i, (images, labels) in enumerate(train_loader):\r\n images = images.to(device)\r\n labels = labels.to(device)\r\n \r\n # Forward pass\r\n outputs = model(images)\r\n loss = criterion(outputs, labels)\r\n \r\n # Backward and optimize\r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # prints current set of results after each 10 iterations\r\n if (i % 10) == 0:\r\n print (\"Epoch [{}/{}], Step [{}/{}] Loss: {:.8f}\"\r\n .format(epoch+1, args.train_epochs, i+1, total_step, loss.item()))\r\n current_losses.append(loss.item()) \r\n \r\n # Decay learning rate\r\n if (epoch+1) % args.epoch_decay == 0:\r\n curr_lr /= 3\r\n update_lr(optimizer, curr_lr)\r\n \r\n # calculates mean of losses for current epoch and appends to list of avgs\r\n train_loss_avg.append(mean(current_losses)) \r\n\r\n # validates on test set once per epoch, calculates top1/5 acc and val loss avg\r\n curr_acc = validate(device=device, model=model, criterion=criterion, loader=val_loader,\r\n top1_accuracies=top1_accuracies, top5_accuracies=top5_accuracies, val_loss_avg=val_loss_avg)\r\n\r\n # Save the model checkpoint if the top1-acc is higher than current highest\r\n if curr_acc > top_acc:\r\n torch.save(model.state_dict(), os.path.join(results_dir, \r\n '{}.ckpt'.format(args.name)))\r\n top_acc = curr_acc\r\n best_epoch = epoch + 1\r\n \r\n # validate on test set and plot results\r\n validate(device=device, model=model, criterion=criterion, loader=test_loader, \r\n top1_accuracies=top1_accuracies, top5_accuracies=top5_accuracies)\r\n\r\n logger.info('Finished training successfully. Best val accuracy: {}, at epoch no: {}/{}'.format(\r\n top_acc, best_epoch, args.train_epochs))\r\n\r\n # contains the top1/5 accuracies for the validation after each epoch, and the last one for the test\r\n df_accuracies = pd.DataFrame(list(zip(top1_accuracies, top5_accuracies)))\r\n # contains the training and validation loss averages for each epoch\r\n df_losses = pd.DataFrame(list(zip(train_loss_avg, val_loss_avg)))\r\n df_accuracies.to_csv(os.path.join(results_dir, \r\n '{}_accuracies.csv'.format(args.name)), sep=',', header=False, index=False)\r\n df_losses.to_csv(os.path.join(results_dir, \r\n '{}_losses.csv'.format(args.name)), sep=',', header=False, index=False)\r\n\r\ndef validate(device, model, criterion, loader,\r\n top1_accuracies, top5_accuracies, val_loss_avg=[]):\r\n # Test the model (validation set)\r\n # eval mode (batchnorm uses moving mean/variance instead of mini-batch mean/variance)\r\n # dropout probability goes to 0\r\n model.eval()\r\n with torch.no_grad():\r\n correct_1 = 0\r\n correct_5 = 0\r\n total = 0\r\n current_losses = []\r\n\r\n for images, labels in loader:\r\n images = images.to(device)\r\n labels = labels.to(device)\r\n \r\n # Forward pass\r\n outputs = model(images)\r\n loss = criterion(outputs, labels) \r\n current_losses.append(loss.item())\r\n \r\n # calculate top-k (1 and 5) accuracy\r\n total += labels.size(0)\r\n curr_corr_list = accuracy(outputs.data, labels, (1, 5, ))\r\n correct_1 += curr_corr_list[0]\r\n correct_5 += curr_corr_list[1] \r\n \r\n # append avg val loss\r\n val_loss_avg.append(mean(current_losses))\r\n\r\n # compute epoch accuracy in percentages\r\n curr_top1_acc = 100 * correct_1/total\r\n top1_accuracies.append(curr_top1_acc)\r\n print('Val/Test Top-1 Accuracy of the model on the test images: {:.4f} %'.format(curr_top1_acc))\r\n curr_top5_acc = 100 * correct_5/total\r\n top5_accuracies.append(curr_top5_acc)\r\n print('Val/Test Top-5 Accuracy of the model on the test images: {:.4f} %'.format(curr_top5_acc))\r\n\r\n return curr_top1_acc\r\n\r\ndef main():\r\n\r\n logging.basicConfig(filename='logs.txt', level=logging.INFO, \r\n format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',\r\n datefmt='%m/%d/%Y %H:%M:%S',)\r\n \r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--name\", required=True,\r\n help=\"Name of this run. Used for monitoring.\")\r\n parser.add_argument(\"--dataset_name\", choices=[\"moeImouto\", \"danbooruFaces\", \"cartoonFace\", \"ZACI20\"], \r\n default=\"moeImouto\", help=\"Which dataset to use.\")\r\n parser.add_argument(\"--dataset_path\", required=True,\r\n help=\"Path for the dataset.\")\r\n parser.add_argument(\"--model_type\", choices=[\"shallow\", 'resnet18', 'resnet152', \r\n 'B_16', 'B_32', 'L_16', 'L_32', 'H_14',\r\n 'B_16_imagenet1k', 'B_32_imagenet1k', \r\n 'L_16_imagenet1k', 'L_32_imagenet1k'],\r\n default=\"shallow\",\r\n help=\"Which model architecture to use\")\r\n parser.add_argument(\"--results_dir\", default=\"results_training\", type=str,\r\n help=\"The directory where results will be stored\")\r\n parser.add_argument(\"--image_size\", default=224, type=int,\r\n help=\"Image (square) resolution size\")\r\n parser.add_argument(\"--batch_size\", default=256, type=int,\r\n help=\"Batch size for train/val/test.\")\r\n parser.add_argument(\"--train_epochs\", default=200, type=int,\r\n help=\"Total number of epochs for training.\") \r\n parser.add_argument(\"--epoch_decay\", default=50, type=int,\r\n help=\"After how many epochs to decay the learning rate once.\")\r\n parser.add_argument(\"--learning_rate\", default=0.001, type=float,\r\n help=\"Initial learning rate.\") \r\n parser.add_argument(\"--pretrained\", type=bool, default=False,\r\n help=\"For models with pretrained weights available\"\r\n \"Default=False\")\r\n parser.add_argument(\"--checkpoint_path\", type=str, \r\n default=None) \r\n parser.add_argument(\"--transfer_learning\", type=bool, default=False,\r\n help=\"Load partial state dict for transfer learning\"\r\n \"Resets the [embeddings, logits and] fc layer for ViT\"\r\n \"Resets the fc layer for Resnets\"\r\n \"Default=False\") \r\n args = parser.parse_args()\r\n\r\n logger.info(args)\r\n\r\n train_main(logger, args) \r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"classification/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"334308583","text":"# export.py - write csv, read csv buffer into pandas dataframe\n\n\"\"\"Dump spreadsheet values to CSV files and pandas DataFrames.\"\"\"\n\nimport csv\nimport contextlib\n\nfrom ._compat import open_csv, csv_writerows, CsvBuffer, read_csv\n\npandas = None\n\n__all__ = ['open_csv', 'write_csv', 'write_dataframe']\n\nENCODING = 'utf-8'\n\nDIALECT = 'excel'\n\nMAKE_FILENAME = '%(title)s - %(sheet)s.csv'\n\n\ndef write_csv(fileobj, rows, encoding=ENCODING, dialect=DIALECT):\n \"\"\"Dump rows to ``fileobj`` with the given ``encoding`` and CSV ``dialect``.\"\"\"\n csvwriter = csv.writer(fileobj, dialect=dialect)\n csv_writerows(csvwriter, rows, encoding)\n\n\ndef write_dataframe(rows, encoding=ENCODING, dialect=DIALECT, **kwargs):\n \"\"\"Dump ``rows`` to string buffer and load with ``pandas.read_csv()`` using ``kwargs``.\"\"\"\n global pandas\n if pandas is None: # pragma: no cover\n import pandas\n with contextlib.closing(CsvBuffer()) as fd:\n write_csv(fd, rows, encoding, dialect)\n fd.seek(0)\n df = read_csv(pandas, fd, encoding, dialect, kwargs)\n return df\n","sub_path":"gsheets/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"308718294","text":"import glob\nimport cv2\n\n\ndef read_image(img_path):\n \"\"\"\n 将文件夹内的所有图片按照视角进行读取。最后存成字典。\n 字典形式为:\n dict{\n \"view_0\":list(img0, img1, img2...)\n \"view_1\":list(img0, img1, img2...)\n ....\n \"view_11\":list(img0, img1, img2...)\n }\n :param img_path: 图片所在路径\n :return:存储的图像的字典, 按照字典存取顺序的图像名字。\n \"\"\"\n sample_view = dict()\n sample_sort_name = list()\n for view in range(0, 12):\n view_img_path = glob.glob(img_path+\"\\\\*_\"+str(view)+\".png\")\n sample_view[\"view_\" + str(view)] = list()\n for view_img_path_num in range(len(view_img_path)):\n sample_view[\"view_\"+str(view)].append(cv2.imread(view_img_path[view_img_path_num], 0))\n for sample_name in glob.glob(img_path+\"\\\\*_0.png\"):\n sample_sort_name.append(sample_name[sample_name.find(\"LRP\"):sample_name.find(\"_0.png\")])\n\n return sample_view, sample_sort_name\n\n\nif __name__ == \"__main__\":\n img_path = r\"D:\\hongpu_liyi_code_pycharm\\dark_corner\\sample_for_all\\M1_L3\"\n a, b = read_image(img_path)\n print(a, b)","sub_path":"read_image.py","file_name":"read_image.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"424714527","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 30 20:24:13 2019\n\n@author:Jorryt Yuzheng\n\"\"\"\n\nimport numpy\nimport scipy.ndimage as snd\nfrom scipy.interpolate import interp1d\n\nclass lyman:\n \n numpy.seterr(divide='ignore', invalid='ignore') #ignore zero divider\n\n def __init__(self, z, lr,lb,ar,ab,psep,rpb):\n #Basic default parameters for simulating a fake line\n self.DX=0.5\n self.fakeflux=8. \n self.xx=numpy.arange(-1200,1500.,self.DX)\n self.c=299792.458 #speed of light in km/s\n #init parameters \n self.redshift = z\n self.linewidthRed = lr\n self.linewidthBlue = lb\n self.asymRed = ar\n self.asymBlue = ab\n self.separation = psep\n self.relativePeakfluxBluered = rpb\n self.wavelength =1215.67*(1+self.redshift)*(1+self.xx/self.c)\n self.LSF_wavelength=self.MUSE_LSF((1+self.redshift)*1215.67)\n self.LSF_SIGMA=self.LSF_wavelength*self.c/((1+self.redshift)*1215.67)\n\n \n def __repr__(self):\n return 'Code for Semester Project'\n \n\n def half_gaussian(self,x,a,x0,sigma):\n return numpy.nan_to_num(a*numpy.exp(-((numpy.sqrt((x-x0)))**2)**2/(2*sigma**2))) \n \n \n def skewed_gaussian(self,x,a,x0,asym,d):\n return a*numpy.exp((-(x-x0)**2)/(2*(asym*(x-x0)+d)**2))\n \n def smoothed_skewed_gaussian(self,x,a,x0,asym,d):\n LSF_SIGMA_dx=166./(2.355*75.) #75 is the pixel-scale of MUSE dz (e.g. 1 pixel in the wavelength direction corresponds to 75 km/s for this example, need to be improved)\n return snd.gaussian_filter(a*numpy.exp((-(x-x0)**2)/(2*(asym*(x-x0)+d)**2)),sigma=LSF_SIGMA_dx)\n \n def MUSE_LSF(self,wv): #observed wavelength in angstrom; returns the LSF_sigma width, from Eq in https://arxiv.org/abs/1710.03002\n return (6.040+(5.866E-8 * wv**2) - (9.187E-4*wv))/2.355\n \n def intrinsic_double(self):\n #Simulate a double peak\n peak1 = self.skewed_gaussian(self.xx,self.fakeflux,0.+self.separation/2.,self.asymRed,self.linewidthRed)\n peak2 = self.skewed_gaussian(self.xx,self.fakeflux*self.relativePeakfluxBluered,0-self.separation/2.,self.asymBlue,self.linewidthBlue)\n return (1+self.redshift)*(peak1+peak2)\n \n def smooth(self):\n return snd.gaussian_filter(self.intrinsic_double(),sigma=self.LSF_SIGMA/self.DX)\n \n def MuseData(self):\n ###HERE I'm going to decrease the pixel-scale to the same as the MUSE data, which is 1.25 Angstrom.\n ff=interp1d(self.wavelength,self.smooth())\n sim_wav=numpy.arange(numpy.min(self.wavelength)+2,numpy.max(self.wavelength)-2.,1.25)\n sim_vel=((sim_wav/(1215.67*(1+self.redshift)))-1.)*self.c #decreased x range in MUSE data\n sim_line=ff(sim_wav) #decreased y range in MUSE data\n return sim_vel,sim_line\n \n def MuseDataN(self,sigma):\n smoothed_line=numpy.random.normal(self.smooth(),sigma)\n if sigma == None:\n sigma = 3\n ffN=interp1d(self.wavelength,smoothed_line)\n sim_wav=numpy.arange(numpy.min(self.wavelength)+2,numpy.max(self.wavelength)-2.,1.25)\n sim_Nvel=((sim_wav/(1215.67*(1+self.redshift)))-1.)*self.c #decreased x range in MUSE Noise data\n sim_Nline=ffN(sim_wav) #decreased y range in MUSE Noise data\n return sim_Nvel,sim_Nline\n \n","sub_path":"SProjectD.py","file_name":"SProjectD.py","file_ext":"py","file_size_in_byte":3346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"269399940","text":"from magma import wire, compile, EndCircuit\nfrom loam.boards.icestick import IceStick, Mux\n\nicestick = IceStick()\nfor i in range(6):\n icestick.J1[i].input().on()\nicestick.D1.on()\n\nmain = icestick.main()\nI = main.J1[0:4]\nS = main.J1[4:6]\n\nmux = Mux(4)\nmux( I, S )\nwire(mux.O, main.D1)\n\nEndCircuit()\n","sub_path":"tests/test_mantle/mux/mothball/mux4.py","file_name":"mux4.py","file_ext":"py","file_size_in_byte":301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"270999516","text":"# Question 5\n\n\n\ndef input_positive_integer(msg):\n try:\n num = int(input(msg).strip())\n except ValueError:\n print(\"Please input positive integers only!\")\n num = int(input(msg))\n if num < 0:\n print(\"Please input positive integers only!\")\n num = int(input(msg))\n return num\n\n\ndef print_table(limit):\n print(\" \", *range(1, limit))\n print(\"+--\"+\"-\"*2*limit)\n for i in range(1, limit):\n yield (str(i), \"|\", *range(i, i*limit, i))\n\n# \" \".join([\"%d\"%i for i in range(i, i*limit, i)])\n\ndef main():\n lim = input_positive_integer(\"Please Enter the number of terms : \")\n print_table(lim)\n for x in print_table(lim+1):\n print(*x)\n\nmain()\n","sub_path":"Python/Assignment5/question5.py","file_name":"question5.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"31365677","text":"# -*- coding: utf-8; -*-\nfrom hello.models import db, Post\nfrom pytest import fixture\nfrom pytest import mark\nfrom .. import app\n\n\n@fixture\ndef sample_post():\n p1 = None\n try:\n p1 = Post(title='foo', message='bar')\n db.session.add(p1)\n db.session.commit()\n yield p1\n finally:\n app.logger.debug(\"CLEAN UP THE MESS\")\n if p1:\n db.session.delete(p1)\n db.session.commit()\n\n\nclass TestSamplePost(object):\n @classmethod\n def teardown_method(cls):\n assert Post.query.count() == 0\n\n def test_sample_post_cleanup_ok(self, sample_post):\n app.logger.debug(\"sample_post = {}\".format(sample_post))\n assert Post.query.count() == 1\n\n @mark.xfail(raises=Exception)\n def test_sample_post_cleanup_with_error(self, sample_post):\n app.logger.debug(\"sample_post = {}\".format(sample_post))\n raise Exception('Anyway, this is an exception!')\n","sub_path":"tests/unit/fixture_cleanup_test.py","file_name":"fixture_cleanup_test.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"583666711","text":"# https://leetcode.com/problems/sqrtx/\n# Implement int sqrt(int x).\n# binary search\n\nclass Solution:\n def mySqrt(self, x: int) -> int:\n border = [0,x]\n i = int(x/2)\n while not i**2 <= x < (i+1)**2:\n if i**2 > x:\n border[1] = i\n elif (i+1)**2 <= x:\n border[0] = i+1\n i = int((border[1]+border[0])/2)\n return i\n \n \n \n\n\nif __name__==\"__main__\":\n obj = Solution()\n param_1 = obj.mySqrt(100)\n print(param_1)\n","sub_path":"leetcode/69_Sqrt(x).py","file_name":"69_Sqrt(x).py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"116070071","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Nov 19 17:28:05 2019\n\n@author: deesaw\n\"\"\"\n\n#custom exceptions\nclass DeloitteMath(Exception):\n\tdef __init__(self,args):\n\t\tsuper().__init__(args)\n\ne = DeloitteMath(\"I do not know math\")\n\nraise e\n\nmyexp = ValueError(\"pl. check the values\")\n\nraise myexp","sub_path":"Python/exception4.py","file_name":"exception4.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"497260992","text":"'''\r\n WAP in python to accept five numbers and display only\r\n positive numbers.\r\n'''\r\n\r\nt=()\r\nprint(\"Enter five numbers\")\r\nfor i in range(5):\r\n t=t+(int(input()),)\r\n\r\n\r\nprint(\"=====OUTPUT====\")\r\nfor i in t:\r\n if(i>0):\r\n print(i)\r\n\r\n\r\n","sub_path":"22.09.2020 python/22.09.2020 python/tuple_demo3..py","file_name":"tuple_demo3..py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"64838803","text":"# Задача-1:\n# Дан список фруктов.\n# Напишите программу, выводящую фрукты в виде нумерованного списка,\n# выровненного по правой стороне.\n\n# Пример:\n# Дано: [\"яблоко\", \"банан\", \"киви\", \"арбуз\"]\n# Вывод:\n# 1. яблоко\n# 2. банан\n# 3. киви\n# 4. арбуз\n\n# Подсказка: воспользоваться методом .format()\n\n\ndef task1(lst):\n\tfor elm in lst:\n\t\tprint('{}.{:>20}'.format(lst.index(elm)+1, elm))\n\n\nL = []\nwhile True:\n\tfruit = input('Введине название фрукта или для выхода нажмите Enter\\n')\n\tprint(fruit)\n\tif fruit == \"\":\n\t\tbreak\n\telse:\n\t\tL.append(fruit)\nprint(len(L))\ntry:\n\tassert len(L) > 0\nexcept ValueError:\n\tprint('Вы не ввели ни одного элемента')\nelse:\n\ttask1(L)\n\n\n# Задача-2:\n# Даны два произвольные списка.\n# Удалите из первого списка элементы, присутствующие во втором списке.\nlist1 = input('Введите значения первого списка через пробел')\nlist1 = list1.split(' ')\n\nlist2 = input('Введите значения второго списка через пробел')\nlist2 = list2.split(' ')\n\nres = set(list1).difference(set(list2))\nprint(res)\n\n# Задача-3:\n# Дан произвольный список из целых чисел.\n# Получите НОВЫЙ список из элементов исходного, выполнив следующие условия:\n# если элемент кратен двум, то разделить его на 4, если не кратен, то умножить на два.\n\n\ndef task3(lst):\n\tres_lst = []\n\tfor elm in lst:\n\t\tif elm % 2 == 0:\n\t\t\tres_lst.append(elm/4)\n\t\telse:\n\t\t\tres_lst.append(elm*2)\n\treturn res_lst\n\n\nlst = input('Введите значения списка через пробел')\ntry:\n\tlst = map(int, lst.split(' '))\nexcept ValueError:\n\tprint(\"Не все значения списка являются числами!\")\nelse:\n\tprint(task3(lst))\n","sub_path":"lesson02/home_work/hw02_easy.py","file_name":"hw02_easy.py","file_ext":"py","file_size_in_byte":2200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"18231215","text":"# coding: utf-8\n\nfrom copy import deepcopy\nimport uuid\n\nfrom supervisely_lib._utils import take_with_default\nfrom supervisely_lib.video_annotation.video_tag_collection import VideoTagCollection\nfrom supervisely_lib.video_annotation.video_object_collection import VideoObjectCollection\nfrom supervisely_lib.video_annotation.frame_collection import FrameCollection\nfrom supervisely_lib.video_annotation.constants import FRAMES, IMG_SIZE, IMG_SIZE_HEIGHT, IMG_SIZE_WIDTH, \\\n DESCRIPTION, FRAMES_COUNT, TAGS, OBJECTS, VIDEO_ID, KEY, \\\n VIDEOS_MAP, VIDEO_NAME\nfrom supervisely_lib.video_annotation.key_id_map import KeyIdMap\n\nfrom supervisely_lib.video_annotation.video_annotation import VideoAnnotation\nfrom supervisely_lib.video_annotation.constants import FIGURES\nfrom supervisely_lib.pointcloud_annotation.constants import POINTCLOUD_ID\nfrom supervisely_lib.pointcloud_annotation.pointcloud_figure import PointcloudFigure\nfrom supervisely_lib.pointcloud_annotation.pointcloud_object_collection import PointcloudObjectCollection\n\n\nclass PointcloudAnnotation(VideoAnnotation):\n def __init__(self, objects=None, figures=None, tags=None, description=\"\", key=None):\n self._description = description\n self._tags = take_with_default(tags, VideoTagCollection())\n self._objects = take_with_default(objects, VideoObjectCollection())\n self._figures = take_with_default(figures, [])\n self._key = take_with_default(key, uuid.uuid4())\n\n @property\n def img_size(self):\n raise RuntimeError(\"Not supported for pointcloud\")\n\n @property\n def frames_count(self):\n raise RuntimeError(\"Not supported for pointcloud\")\n\n @property\n def figures(self):\n return deepcopy(self._figures)\n\n def validate_figures_bounds(self):\n raise RuntimeError(\"Not supported for pointcloud\")\n\n def to_json(self, key_id_map: KeyIdMap=None):\n res_json = {\n DESCRIPTION: self.description,\n KEY: self.key().hex,\n TAGS: self.tags.to_json(key_id_map),\n OBJECTS: self.objects.to_json(key_id_map),\n FIGURES: [figure.to_json(key_id_map) for figure in self.figures]\n }\n\n if key_id_map is not None:\n pointcloud_id = key_id_map.get_video_id(self.key())\n if pointcloud_id is not None:\n res_json[POINTCLOUD_ID] = pointcloud_id\n\n return res_json\n\n @classmethod\n def from_json(cls, data, project_meta, key_id_map: KeyIdMap=None):\n item_key = uuid.UUID(data[KEY]) if KEY in data else uuid.uuid4()\n if key_id_map is not None:\n key_id_map.add_video(item_key, data.get(POINTCLOUD_ID, None))\n description = data.get(DESCRIPTION, \"\")\n tags = VideoTagCollection.from_json(data[TAGS], project_meta.tag_metas, key_id_map)\n objects = PointcloudObjectCollection.from_json(data[OBJECTS], project_meta, key_id_map)\n\n figures = []\n for figure_json in data.get(FIGURES, []):\n figure = PointcloudFigure.from_json(figure_json, objects, None, key_id_map)\n figures.append(figure)\n\n return cls(objects=objects,\n figures=figures,\n tags=tags,\n description=description,\n key=item_key)\n\n def clone(self, objects=None, figures=None, tags=None, description=None):\n return PointcloudAnnotation(objects=take_with_default(objects, self.objects),\n figures=take_with_default(figures, self.figures),\n tags=take_with_default(tags, self.tags),\n description=take_with_default(description, self.description))\n","sub_path":"supervisely_lib/pointcloud_annotation/pointcloud_annotation.py","file_name":"pointcloud_annotation.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"224907345","text":"\"\"\"\nn, index, maxSum\n nums.length == n\n nums[i] is a positive integer where 0 <= i < n.\n abs(nums[i] - nums[i+1]) <= 1 where 0 <= i < n-1.\n The sum of all the elements of nums does not exceed maxSum.\n nums[index] is maximized.\n\nn = 4, index = 2, maxSum = 6\nnot valid: [1, 2, 3, 2]\ncheapest way: [0, 1, 2, 1]\n1+2+3+4 \n>> to have nums[index] as the max, and keep decreasing 1 on both sides.\n0 to index: assuming nums[index] = x\n\nif x > index:\n left = (x-index+x)*(index+1)/2\nelse:\n left = index - x + 1 + (1 + x)*x/2\nif x > n-1-index:\n right = (x-(n-1-index)+x)*(n-index)/2\nelse:\n right = n-x-index + (1 + x)*x/2\n + (x+x-(n-1-index))*(n-1-index)2-x = maxSum\n\"\"\"\nclass Solution:\n def maxValue(self, n: int, index: int, maxSum: int) -> int:\n l, r = 1, maxSum\n while l < r:\n x = (l+r+1)//2\n if x > index:\n left = (x-index+x)*(index+1)/2\n else:\n left = index - x + 1 + (1 + x)*x/2\n if x > n-1-index:\n right = (x-(n-1-index)+x)*(n-index)/2\n else:\n right = n-x-index + (1 + x)*x/2\n total = left + right - x\n if total > maxSum:\n r = x-1\n else:\n l = x\n return l","sub_path":"1802. Maximum Value at a Given Index in a Bounded Array.py","file_name":"1802. Maximum Value at a Given Index in a Bounded Array.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"462072096","text":"from socket import *\nimport threading\nimport struct\nimport time\nimport cv2\nimport numpy\n\n\nclass Carame_Accept_Object:\n def __init__(self, S_addr_port=(\"\", 9901)):\n self.resolution = (0, 0) # 分辨率\n self.img_fps = 0 # 每秒传输多少帧数\n\n # 设置套接字\n self.server = socket(AF_INET, SOCK_STREAM)\n self.server.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) # 端口可复用\n self.server.bind(S_addr_port)\n self.server.listen(5)\n print(\"the process work in the port:%d\" % S_addr_port[1])\n\n\ndef check_option(object, client):\n # 按格式解码,依照客服的要求帧数和分辨率赋值\n info = struct.unpack('lhh', client.recv(8))\n if info[0] > 0:\n object.img_quality = int(info[0]) # 获取图片传输质量\n object.resolution = list(object.resolution)\n # 获取分辨率\n object.resolution[0] = info[1]\n object.resolution[1] = info[2]\n object.resolution = tuple(object.resolution)\n return 1\n else:\n print(\"check_option 格式解码错误\")\n return 0\n\n\ndef RT_Image(object, client):\n if (check_option(object, client) == 0):\n return\n vc = cv2.VideoCapture(0) # 从摄像头中获取视频\n img_param = [int(cv2.IMWRITE_JPEG_QUALITY), object.img_quality] # 设置传送质量(0-100)\n while True:\n # time.sleep(0.1) # 推迟线程运行0.1s\n _, img = vc.read() # 读取视频每一帧\n # print(vc.get(cv2.CAP_PROP_FPS))\n # cv2.imshow(\"local test\", img)\n\n img = cv2.resize(img, object.resolution) # 按要求调整图像大小(resolution必须为元组)\n _, img_encode = cv2.imencode('.jpg', img, img_param) # 按格式生成图片\n img_code = numpy.array(img_encode) # 转换成矩阵\n img_data = img_code.tobytes() # 生成相应的字符串(字节数据)\n try:\n # 流程中第二次发送信息,按照相应的格式进行打包发送图片\n client.send(\n struct.pack(\n \"l\", len(img_data))\n + img_data)\n except:\n vc.release() # 释放资源\n return\n finally:\n if cv2.waitKey(10) == 27: # 每10ms刷新一次图片,按‘ESC’(27)退出\n break\n\n\nif __name__ == '__main__':\n camera = Carame_Accept_Object()\n while True:\n client, D_addr = camera.server.accept()\n clientThread = threading.Thread(None, target=RT_Image, args=(camera, client,))\n clientThread.start()\n","sub_path":"transmission/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"31527430","text":"import sys\nimport math\nfrom shapely.geometry import LineString\n\ndef intersect(p1,p2,p3,p4,q1,q2,q3,q4):\n line1=LineString([(p1,p2), (p3,p2), (p3,p4), (p1,p4), (p1,p2)])\n line2=LineString([(q1,q2), (q3,q2), (q3,q4), (q1,q4), (q1,q2)])\n ix=line1.intersection(line2)\n if \"EMPTY\" in str(ix):\n print(False)\n elif \"POINT\" in str(ix) or \"MULTIPOINT\" in str(ix):\n print(True)\n\ndef overlap(filename):\n file=open(filename)\n for line in file:\n l=[]\n for word in line.split(','):\n l.append(int(word))\n intersect(l[0],l[1],l[2],l[3],l[4],l[5],l[6],l[7])\n file.close()\n\noverlap(sys.argv[1])\n","sub_path":"Python/nomodule/overlappingrectangles.py3","file_name":"overlappingrectangles.py3","file_ext":"py3","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"523823937","text":"from __future__ import print_function\n\n\ndef myreduce(func, seq, init=''):\n if init:\n tally = init\n for next in seq:\n tally = func(tally, next)\n return tally\n else:\n tally = seq[0]\n for next in seq[1:]:\n tally = func(tally, next)\n return tally\n\nif __name__ == '__main__':\n print(myreduce(lambda x, y: x + y, [1, 2, 3, 4]))\n print(myreduce(lambda x, y: x + y, [1, 2, 3, 4], -10))\n","sub_path":"other/map zip reduce filter/reduce.py","file_name":"reduce.py","file_ext":"py","file_size_in_byte":454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"459835631","text":"from pynput.mouse import Listener\nfrom multiprocessing import Process\nfrom playsound import playsound\nfrom os import listdir\nfrom random import choice\nfrom time import sleep\nsounds=[f'Noises\\\\{i}' for i in listdir('Noises')]\ndef moved(_,__):\n global player\n if not player.is_alive():\n new_player=Process(target=play_sound)\n new_player.start()\n player=new_player\ndef play_sound():\n playsound(choice(sounds))\n sleep(20.0)\nif __name__=='__main__':\n player=Process(target=play_sound)\n with Listener(on_move=moved) as listener:\n listener.join()\n","sub_path":"NoMouse.pyw","file_name":"NoMouse.pyw","file_ext":"pyw","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"136002721","text":"'''\nCreated on 23/07/2014\n\n@author: thinktic\n'''\nimport tkinter\nfrom Fichero import *\n\ndef mostar(ev=None):\n fichero = Fichero (\"agenda.csv\")\n l = fichero.leer()\n resultado = tkinter.Label(top, text=l)\n resultado.pack()\n\ntop = tkinter.Tk()\ntop.geometry('250x150')\n\nhello = tkinter.Label(top, text='Menu')\nhello.pack()\n\nlabel = tkinter.Label(top, text='Pulsa una opcion', font='Helvetica -12 bold')\nlabel.pack()\n\nmostar = tkinter.Button(top, text='Mostar', command=mostar, bg='red', fg='white')\nmostar.pack()\n\n\n\nquit = tkinter.Button(top, text='Salir', command=top.quit, activeforeground='white',\n activebackground='red')\nquit.pack()\n\ntkinter.mainloop()\n","sub_path":"build/scripts-3.4/menu.py","file_name":"menu.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"608911466","text":"\"\"\"takes a list of integers as input and returns the product of all of the elements in the list. For example: product([4, 5, 5]) should return 100 (because 4 * 5 * 5 is 100).\n\nDon’t worry about the list being empty.\nYour function should return an integer.\n\"\"\"\n\ndef product(nums):\n memo = 1\n for n in nums:\n memo = memo * n\n return memo\n","sub_path":"product.py","file_name":"product.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"593346741","text":"# This program uses the subprocess module's Popen function to launch programs\n\nimport subprocess\n\n# Launching Other Programs from Python\ncalcProc = subprocess.Popen('/usr/bin/gnome-calculator')\nprint(calcProc.poll() is None)\nprint(calcProc.wait())\nprint(calcProc.poll())\n\n# Passing Command Line Arguments to Popen()\nsubprocess.Popen(['/usr/bin/gedit', '/home/jose/Documents/Notepad.py']) # Using Ubuntu 18.04\n\n# Running Other Python Scripts\nsubprocess.Popen(['/usr/bin/python3',\n '/home/jose/PycharmProjects/python-tutorials/books/'\n 'AutomateTheBoringStuffWithPython/Chapter01/P02_hello.py']).communicate()\n\n# Opening Files with Default Applications\nfileObj = open('hello.txt', 'w')\nfileObj.write('Hello world!')\nfileObj.close()\nsubprocess.Popen(['see', 'hello.txt'])\n","sub_path":"books/AutomateTheBoringStuffWithPython/Chapter15/P08_popenFunction.py","file_name":"P08_popenFunction.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"445935058","text":"import scrapy\nimport time\nimport json\nimport re\nfrom scrapy.utils import request\nfrom Scrapy_Environment_v1_01.items import InfoItem\n\n\nclass Oubaohuanjing01Spider(scrapy.Spider):\n name = 'Oubaohuanjing_01'\n base_url = 'http://www.hehuzhili.com'\n url_name = '欧保环境网'\n\n def start_requests(self):\n # 90\n for i in range(2):\n url = f'http://www.hehuzhili.com/xydt-{i + 1}.shtml'\n req = scrapy.Request(url=url, callback=self.parse, dont_filter=True)\n yield req\n\n def parse(self, response):\n config_info = response.xpath('//div[@class=\"news_con\"]/dl[@class=\"news_dl\"]/dt')\n for info in config_info:\n url = info.xpath('./a/@href').get()\n title = info.xpath('./a/@title').get()\n publishtime = info.xpath('./span/text()').get()\n issue_time = time.strftime(\"%Y-%m-%d\", time.strptime(publishtime[1:-1], \"%Y年%m月%d日%H:%M\"))\n req = scrapy.Request(url=url, callback=self.parse_detail, dont_filter=True, meta={'title': title, 'issue_time': issue_time})\n news_id = request.request_fingerprint(req)\n req.meta.update({'news_id': news_id})\n yield req\n\n def parse_detail(self, response):\n headers = {}\n for k, v in response.request.headers.items():\n headers[k.decode()] = v[0].decode()\n\n content = response.xpath('//div[@id=\"cntrBody\"]').extract_first()\n issue_time = response.meta['issue_time']\n images = response.xpath('//div[@id=\"cntrBody\"]//img/@src').extract()\n images_url = []\n if images:\n for url in images:\n if 'http' not in url:\n url = self.base_url + url\n images_url.append(url)\n\n item = InfoItem()\n item['news_id'] = response.meta['news_id']\n item['category'] = '环保经济'\n item['content_url'] = response.url\n item['title'] = response.meta['title']\n item['issue_time'] = issue_time if issue_time else None\n item['information_source'] = self.url_name\n item['source'] = self.url_name\n item['author'] = None\n item['content'] = content\n item['images'] = None\n item['title_image'] = None\n item['attachments'] = None\n item['area'] = None\n item['address'] = None\n item['tags'] = None\n item['sign'] = '19'\n item['update_time'] = str(int(time.time() * 1000))\n item['headers'] = headers\n item['images_url'] = images_url\n if item['content']:\n # print(item['title'], item['images_url'], item['issue_time'])\n yield item\n # self.logger.info('title: {}, issue_time: {}'.format(item['title'], issue_time))\n\n\nif __name__ == '__main__':\n from scrapy import cmdline\n cmdline.execute(['scrapy', 'crawl', 'Oubaohuanjing_01'])\n","sub_path":"info_spider/Scrapy_Environment_v1_01/build/lib/Scrapy_Environment_v1_01/spiders/Oubaohuanjing_01.py","file_name":"Oubaohuanjing_01.py","file_ext":"py","file_size_in_byte":2886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"315126239","text":"\n## 2-15-2018\n## Class 4 Activity\n\n## Rewrite HW #1 so that all the code runs with functions and a main function\n## Sentence Tweet Length Calculator\n\n## Funtion to ask for the character limit\ndef set_char_limit():\n return int(input(\"What is the character limit?\\n\"))\n\n## Function to ask for tweet\ndef input_tweet():\n return input(\"Type your tweet here:\\n\")\n\n## Check if the tweet is within the character limit\ndef tweet_length_checker(tweet, char_limit):\n\n if len(tweet) > char_limit:\n print(\"Your tweet is {} characters over the limit.\\n\".format(\\\n len (tweet) - char_limit))\n return False\n else:\n print(\"Your tweet is within the limit\")\n return True\n\ndef main():\n\n char_limit = set_char_limit()\n\n tweet_checked = False # initialize a variable for checking tweet\n\n while tweet_checked == False:\n\n tweet = input_tweet()\n tweet_checked = tweet_length_checker(tweet, char_limit)\n\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Intro_2_Programming/homework_1/class_4_activity.py","file_name":"class_4_activity.py","file_ext":"py","file_size_in_byte":989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"449508451","text":"import sqlite3\nimport os\nfrom ameritrade import Ameritrade\nfrom database import Database\n\n\nclass Portfolio:\n\n def __init__(self, transactions=None):\n if not transactions:\n transactions = []\n self.transactions = transactions\n\n def add_transaction(self, transaction):\n self.transactions.append(transaction)\n\n\nclass TransactionProcessor:\n\n def __init__(self, ameritrade_json):\n self.ameritrade_json = ameritrade_json\n self.transaction_list = []\n self.group_transactions()\n\n def group_transactions(self):\n transactions = {}\n for transaction in self.ameritrade_json:\n print(transaction)\n transaction_type = transaction['type']\n if transaction_type == 'TRADE':\n transaction['transactionId'] = transaction['orderId']\n transaction_id = transaction['transactionId']\n if transaction_id not in transactions:\n transactions[transaction_id] = []\n transactions[transaction_id].append(transaction)\n\n for transaction_id, transaction_pieces in transactions.items():\n transaction_type = transaction_pieces[0]['type']\n if transaction_type == 'TRADE':\n transaction_obj = Trade(transaction_id, transaction_pieces)\n elif transaction_type == 'RECEIVE_AND_DELIVER':\n transaction_obj = RecieveAndDeliever(transaction_id, transaction_pieces)\n elif transaction_type == 'JOURNAL':\n transaction_obj = Journal(transaction_id, transaction_pieces)\n else:\n transaction_obj = Transaction(transaction_id, transaction_pieces)\n self.transaction_list.append(transaction_obj)\n\n def send_to_sql(self, db_name, table_name, transaction_list=None):\n if not transaction_list:\n transaction_list = self.transaction_list\n\n transactions = []\n for transaction in transaction_list:\n data = (transaction.transaction_id, transaction.transaction_type, transaction.symbol, \n transaction.date_time, transaction.net, transaction.fees, transaction.amount)\n transactions.append(data)\n\n database = Database(db_name)\n database.data_entry(table_name, transactions, executemany=True)\n\n\nclass Transaction:\n\n def __init__(self, transaction_id, transaction_pieces, transaction_type=None, auto=True):\n self.transaction_id = transaction_id\n self.transaction_pieces = transaction_pieces\n self.transaction_type = transaction_type\n self.amount = 0\n self.symbol = 'N/A'\n\n if auto:\n self.fees = self.calculate_fees()\n self.net = self.calculate_net()\n try:\n self.date_time = self.format_time()\n except:\n self.date_time = self.format_time(conversion_type=\"settlementDate\")\n\n def calculate_fees(self):\n fees = 0\n for transaction in self.transaction_pieces:\n fees += sum(transaction['fees'].values())\n # fees = sum([sum(transaction['fees'].values()) for transaction in self.transaction_pieces])\n return round(fees, 2)\n \n def calculate_net(self):\n net = 0\n for transaction in self.transaction_pieces:\n net += transaction['netAmount']\n return round(net, 2)\n\n def format_time(self, time_type=\"transactionDate\", conversion_type=None):\n if not conversion_type:\n conversion_type = time_type\n if conversion_type != time_type and conversion_type == \"transactionDate\":\n date_time = f'{self.transaction_pieces[0][conversion_type]} 00:00:00'\n else:\n #\"2018-07-26T15:25:29+0000\" --> \"2018-07-26 15:25:29\"\n date_time = self.transaction_pieces[0][conversion_type].replace(\"T\", \" \").split(\"+\")[0]\n return date_time\n\n\nclass Journal(Transaction):\n\n def __init__(self, transaction_id, transaction_pieces, transaction_type='JOURNAL', auto=True):\n super().__init__(transaction_id, transaction_pieces, transaction_type, auto)\n\n\nclass Trade(Transaction):\n\n def __init__(self, transaction_id, transaction_pieces, transaction_type='TRADE', auto=True):\n super().__init__(transaction_id, transaction_pieces, transaction_type, auto)\n if auto:\n self.amount = self.calculate_amount()\n self.symbol = transaction_pieces[0]['transactionItem']['instrument']['symbol']\n\n def calculate_amount(self):\n amount = 0\n for transaction in self.transaction_pieces:\n piece_amount = transaction['transactionItem']['amount']\n piece_description = transaction['description']\n if piece_description == \"TRADE CORRECTION\":\n continue\n elif piece_description == \"SELL TRADE\":\n amount -= piece_amount\n else:\n amount += piece_amount \n return amount\n\n\nclass RecieveAndDeliever(Transaction):\n\n def __init__(self, transaction_id, transaction_pieces, transaction_type='RECEIVE_AND_DELIVER', auto=True):\n super().__init__(transaction_id, transaction_pieces, transaction_type, auto)\n self.symbol = transaction_pieces[0]['transactionItem']['instrument']['symbol']\n\n\n\nif __name__ == '__main__':\n from privateinfo import MainAccount, SecondAccount, client_id\n # database = Database('MainAccount.db')\n # columns = [('transaction_id', 'TEXT'), ('transaction_type', 'TEXT'), ('symbol', 'TEXT'),\n # ('date', 'TEXT'), ('net', 'TEXT'), ('fees', 'TEXT'), ('amount', 'TEXT') ]\n # database.create_table('TestTable', columns)\n\n ameritrade = Ameritrade(SecondAccount, client_id)\n # processer = TransactionProcessor(transactions_json)\n # processer.send_to_sql('MainAccount.db', 'TestTable')","sub_path":"portfolio.py","file_name":"portfolio.py","file_ext":"py","file_size_in_byte":5842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"288153393","text":"import math\nimport py4j.java_gateway as jg\nfrom pyboof import gateway\n\ndef show( boof_image , title=\"Image\"):\n gateway.jvm.boofcv.gui.image.ShowImages.showWindow(boof_image,title)\n\ndef show_grid( images , columns=-1, title=\"Image Grid\"):\n if type(images) is not tuple or not list:\n images = (images)\n\n array = gateway.new_array(gateway.jvm.java.awt.image.BufferedImage,len(images))\n for idx,image in enumerate(images):\n if jg.is_instance_of(gateway, image, gateway.jvm.java.awt.image.BufferedImage ):\n array[idx] = image\n else:\n array[idx] = gateway.jvm.boofcv.io.image.ConvertBufferedImage.convertTo(image,None,True)\n\n # If no grid is specified try to make it square\n if columns <= 0:\n columns = int(math.sqrt(len(images)))\n gateway.jvm.boofcv.gui.image.ShowImages.showGrid(columns,title,array)\n\ndef show_list( image_name_pairs , title=\"Image List\"):\n if type(image_name_pairs) is not tuple or not list:\n image_name_pairs = (image_name_pairs)\n\n names = []\n buffered = []\n for pair in image_name_pairs:\n if jg.is_instance_of(gateway, pair[0], gateway.jvm.java.awt.image.BufferedImage ):\n buffered.append(pair[0])\n else:\n buffered.append( gateway.jvm.boofcv.io.image.ConvertBufferedImage.convertTo(pair[0],None,True) )\n names.append(pair[1])\n\n panel = gateway.jvm.boofcv.gui.ListDisplayPanel()\n for i in range(len(names)):\n panel.addImage(buffered[i],names[i])\n gateway.jvm.boofcv.gui.image.ShowImages.showWindow(panel,title)\n\ndef colorize_gradient( derivX , derivY ):\n return gateway.jvm.boofcv.gui.image.VisualizeImageData.colorizeGradient(derivX,derivY,-1)","sub_path":"integration/pyboof/python/pyboof/swing.py","file_name":"swing.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"97377781","text":"class TerminalColors:\n '''\n Reference http://ascii-table.com/ansi-escape-sequences.php\n '''\n class ATTRIBUTES:\n NORMAL = ''\n BOLD = ';1'\n UNDERSCORE = ';4'\n BLINK = ';5'\n REVERSE = ';6'\n CONCEALED = ';7'\n\n class FOREGROUND:\n NORMAL = ''\n BLACK = ';30'\n RED = ';31'\n GREEN = ';32'\n YELLOW = ';33'\n BLUE = ';34'\n MAGENTA = ';35'\n CYAN = ';36'\n WHITE = ';37'\n\n class BACKGROUND:\n NORMAL = '' \n BLACK = ';40'\n RED = ';41'\n GREEN = ';42'\n YELLOW = ';43'\n BLUE = ';44'\n MAGENTA = ';45'\n CYAN = ';46'\n WHITE = ';47'\n\n @classmethod\n def cstring(cls, string, attr = 'NORMAL', fore = 'NORMAL', back = 'NORMAL'):\n attr = getattr(cls.ATTRIBUTES, attr.upper(), '')\n fore = getattr(cls.FOREGROUND, fore.upper(), '')\n back = getattr(cls.BACKGROUND, back.upper(), '')\n format_str = attr + fore + back\n if(format_str != ''):\n format_str = format_str[1:]\n return '\\033[{}m{}\\033[0m'.format(format_str, string)\n\n @classmethod\n def cprint(cls, string, attr = 'NORMAL', fore = 'NORMAL', back = 'NORMAL', \n sep = ' ', end = '\\n', flush = False):\n \n print(cls.cstring(string, attr, fore, back), end = end, sep = sep, flush = flush)\n \ndef colorprint(*args, **kwargs):\n TerminalColors.cprint(*args, **kwargs)\ndef colorstring(*args, **kwargs):\n return TerminalColors.cstring(*args, **kwargs)\n\nif __name__ == '__main__':\n cprint('TEST', fore='green', back = 'blue')\n print(cstring('TESTE', fore='blue', back='green')) \n","sub_path":"sources/terminalcolors.py","file_name":"terminalcolors.py","file_ext":"py","file_size_in_byte":1755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"428175612","text":"#!/usr/bin/env python\nimport sys\nfrom heapq import heapify, heappush, heappop\nif sys.version_info > (3,):\n long = int\n\n\ndef main():\n primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, ]\n t = [x for x in primes]\n heapify(t)\n prev = 1\n s = 0\n while True:\n n = heappop(t)\n if (n == prev + 1):\n s += prev\n print(\"Found s = %d for n = %d\" % (s, prev))\n sys.stdout.flush()\n for x in primes:\n # mul = x * n\n # if not t.get_node(mul):\n # t.insert(mul, True)\n heappush(t, x * n)\n prev = n\n\n\nmain()\n","sub_path":"project-euler/581/euler_581_v4_with_heapq.py","file_name":"euler_581_v4_with_heapq.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"499798452","text":"import smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\n\nclass emailMessager:\n\n def __init__(self):\n self.from_address = 'klq26@qq.com'\n # https://service.mail.qq.com/cgi-bin/help?subtype=1&&id=28&&no=1001256%27)\n # 授权码许登录 mail.qq.com 进入“设置”、“账户”,找到 POP3 部分,打开 POP3/STMP 服务,然后点击“生成授权码”\n # 每次变更邮箱密码,都需要通过手机号重新生成。\n self.authorizatioCode = 'gejelhbulxaxbiaa'\n self.to_addr = 'klq26@163.com'\n\n def send(self, to_address = 'klq26@163.com', subject = '测试标题', msg = '测试正文'):\n # 邮箱正文内容,第一个参数为内容,第二个参数为格式(plain 为纯文本),第三个参数为编码\n msg = MIMEText(msg,'plain','utf-8')\n # 邮件信息\n msg['From'] = Header(self.from_address)\n msg['To'] = Header(to_address)\n msg['Subject'] = Header(subject)\n try:\n client = smtplib.SMTP_SSL('smtp.qq.com', smtplib.SMTP_SSL_PORT)\n client.login(self.from_address, self.authorizatioCode)\n client.sendmail(self.from_address, to_address, msg.as_string())\n except smtplib.SMTPException as e:\n print(\"[ERROR] 发送邮件异常:{0}\".format(e))\n finally:\n client.quit()\n\nif __name__ == \"__main__\":\n emailMessager().send()\n\n","sub_path":"Python/messager/emailMessager.py","file_name":"emailMessager.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"393056483","text":"\n# 封装替换功能\nimport re\n\nfrom common.handle_phone import get_new_phone\nfrom common.handle_data import Data\nfrom common.handle_logger import logger\nfrom common.handle_yaml import HandleYaml\n\n\ndef relace_case_with_re_v2(case_dict):\n\n # 实例化HandleYaml类,读取data.yaml中的数据\n global_data = HandleYaml(\"data.yaml\").data\n\n case_str = str(case_dict)\n # 正则提取\n data_mark_list = re.findall(\"#(\\w+)#\", case_str) # 列表\n logger.info(\"从测试用例中,正则提取之后的结果:{}\".format(data_mark_list))\n # 若有phone字段,则先生成一个未注册的手机号码,并设置为Data类的phone属性\n if \"phone\" in data_mark_list:\n logger.info(\"有phone字段,需要生成一个新的尚未注册的手机号,并设置到Data类的phone属性\")\n get_new_phone()\n\n if data_mark_list: # 列表不为空\n for mark in data_mark_list: # 遍历列表里的值\n # mark可能在数据配置文件data.yaml当中,也有可能在Data当中。\n if mark in global_data.keys():\n case_str = case_str.replace(f\"#{mark}#\", str(global_data[mark]))\n logger.info(f\"从data.yaml中取数据,替换mark: #{mark}#,替换后mark值为:{global_data[mark]}\")\n else:\n # 从Data类当中,用对应的数据,去替换。\n case_str = case_str.replace(f\"#{mark}#\", getattr(Data, mark))\n logger.info(f\"从Data类中取数据,替换mark: #{mark}#,替换后mark值为:{getattr(Data, mark)}\")\n\n logger.info(f\"替换完成之后的用例数据(字符串类型)为:\\n {case_str}\")\n return eval(case_str)\n\n\n\n\n","sub_path":"common/handle_replace.py","file_name":"handle_replace.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"331241664","text":"#! python3\n# -*- coding: utf-8 -*-\nimport os\nimport json\nimport jieba\nimport logging\nfrom jieba import analyse\nfrom random import shuffle\nfrom sklearn.cluster import KMeans\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.model_selection import train_test_split\nfrom matplotlib import pyplot as plt\n\n\nlogging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',\n datefmt='%d-%m-%Y:%H:%M:%S', level=logging.DEBUG)\nlogger = logging.getLogger()\n\n\nclass Prepare(object):\n def __init__(self, filename):\n self.filename = filename\n\n def extract(self):\n \"\"\"\n 提取json数据中的原始文本\n :return:\n \"\"\"\n with open(self.filename, 'rb') as f:\n data = json.loads(f.read())\n record = data['RECORDS']\n comment = []\n for item in record:\n logging.info(item['id'])\n single_comment = item['comment_content']\n comment.append(single_comment)\n # break\n return comment\n\n def remove_noise_text(self, raw_comment):\n \"\"\"\n 去除用户名,回复噪音文本\n :return:\n \"\"\"\n comment = []\n for item in raw_comment:\n ret = item.split(u':')[-1]\n if ':' in ret:\n real_comment = ret.split(':')[-1]\n logger.info(real_comment)\n comment.append(real_comment)\n else:\n comment.append(ret)\n comment = [i for i in comment if i != \"\"]\n return comment\n\n def cut_word(self, comment):\n \"\"\"\n 顺序:先分词,去除停用词\n 去除停用词的方法参考:https://www.zhihu.com/question/41199317\n 比较好的方法是用extract_tags函数,这个函数会根据TF-IDF算法将特征词提取出来,\n 在提取之前会去掉停用词,可以人工指定停用词字典\n jieba.analyse.set_stop_words('D:\\\\Python27\\\\stopword.txt')\n tags = jieba.analyse.extract_tags(text,20)\n :return:\n \"\"\"\n all_word = []\n for item in comment:\n ret = list(jieba.cut(item, cut_all=False))\n all_word.extend(ret)\n corpus = [i for i in all_word if i != \" \" and i != \"龙卷风\" and i != \"嘴里塞\"]\n # analyse.set_stop_words('D:\\\\Python36 Project\\\\WuHanNLP_Dev\\\\stop_word\\\\所有停用词.txt')\n # result = analyse.extract_tags(all_word, topK=1000, withWeight=True, allowPOS=())\n # no_stop_word = []\n # for item in result:\n # logger.info(item[0])\n # no_stop_word.append(item[0])\n # return no_stop_word\n return corpus\n\n def cluster(self, corpus):\n \"\"\"\n 开始聚类\n sklearn里面的TF-IDF主要用到了两个函数:CountVectorizer()和TfidfTransformer()。\n CountVectorizer是通过fit_transform函数将文本中的词语转换为词频矩阵。\n 矩阵元素weight[i][j] 表示j词在第i个文本下的词频,即各个词语出现的次数。\n 通过get_feature_names()可看到所有文本的关键字,通过toarray()可看到词频矩阵的结果。\n TfidfTransformer也有个fit_transform函数,它的作用是计算tf-idf值。\n :param corpus:\n :return:\n \"\"\"\n base_path = os.path.abspath(os.path.dirname(__file__))\n stop_word_path = os.path.join(base_path, 'stop_word')\n stop_word_list = []\n with open('%s/%s.txt' % (stop_word_path, '所有停用词'), 'r', encoding='utf8') as f:\n for line in f.readlines():\n stop_word_list.append(line.replace('\\n', ''))\n \"\"\"\n 指定停用词\n \"\"\"\n vectorizer = CountVectorizer(stop_words=stop_word_list)\n transformer = TfidfTransformer()\n tf_idf = transformer.fit_transform(vectorizer.fit_transform(corpus))\n train_x, test_x = train_test_split(tf_idf, test_size=0.2)\n cluster_result = []\n for i in range(2, 11):\n km = KMeans(n_clusters=i)\n km.fit(train_x)\n print(km.inertia_)\n cluster_result.append({'score': -km.score(test_x), 'cluster_num': i})\n return cluster_result\n\n def draw(self, result):\n \"\"\"\n 画error图\n :param result:\n :return:\n \"\"\"\n score = []\n true_ks = []\n for item in result:\n score.append(item['score'])\n true_ks.append(item['cluster_num'])\n plt.figure(figsize=(8, 4))\n plt.plot(true_ks, score, label='error', color='red', linewidth=1)\n plt.xlabel(\"n_features\")\n plt.ylabel(\"error\")\n plt.legend()\n plt.show()\n\n def get_label(self, corpus, n_cluster=5):\n \"\"\"\n 经过matplotlib作图可知最好的簇的个数为5\n :param corpus\n :param n_cluster:\n :return:\n \"\"\"\n vectorizer = CountVectorizer()\n transformer = TfidfTransformer()\n tf_idf = transformer.fit_transform(vectorizer.fit_transform(corpus))\n train_x, test_x = train_test_split(tf_idf, test_size=0.2)\n km = KMeans(n_clusters=n_cluster)\n km.fit(train_x)\n order_centroids = km.cluster_centers_.argsort()[:, ::-1]\n terms = vectorizer.get_feature_names()\n print(vectorizer.get_stop_words())\n for i in range(n_cluster):\n print(\"Cluster %d:\" % i, end='')\n for ind in order_centroids[i, :10]:\n print(' %s' % terms[ind], end='')\n print('\\n')\n\n\ndef main():\n prepare = Prepare('comment.json')\n raw_comment = prepare.extract()\n comment = prepare.remove_noise_text(raw_comment)\n no_stop_word = prepare.cut_word(comment)\n cluster_result = prepare.cluster(no_stop_word)\n prepare.draw(cluster_result)\n prepare.get_label(no_stop_word)\n # with open('no_stop_word.json', 'w', encoding='utf8') as f:\n # f.write(json.dumps(no_stop_word, sort_keys=True, indent=4, ensure_ascii=False))\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"cluster_1.py","file_name":"cluster_1.py","file_ext":"py","file_size_in_byte":6149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"468244889","text":"# Copyright (c) 2020 Gurjit Singh\n\n# This source code is licensed under the MIT license that can be found in\n# the accompanying LICENSE file or at https://opensource.org/licenses/MIT.\n\n\nimport argparse\nimport math\nimport os\nimport pathlib\nimport subprocess\nimport sys\n\n\ndef parseArgs():\n def dirPath(pth):\n pthObj = pathlib.Path(pth)\n if pthObj.is_dir():\n return pthObj\n else:\n raise argparse.ArgumentTypeError(\"Invalid Directory path\")\n\n parser = argparse.ArgumentParser(\n description=\"Compress all child directories in specified folder using 7z.\"\n )\n parser.add_argument(\n \"-d\", \"--dir\", required=True, help=\"Directory path\", type=dirPath\n )\n parser.add_argument(\n \"-s\",\n \"--split\",\n nargs=\"?\",\n default=None,\n const=300,\n type=int,\n help=\"Maximum split size in MB, default is 300 MB\",\n )\n parser.add_argument(\n \"-m\",\n \"--minim-split\",\n nargs=\"?\",\n default=100,\n # const=100,\n type=int,\n help=\"Minimum split size in MB, default is 100 MB\",\n )\n parser.add_argument(\n \"-a\",\n \"--abs\",\n action=\"store_true\",\n help=r\"Use absolute 7z.exe path C:\\Program Files\\7-Zip\\7z.exe\",\n )\n parser.add_argument(\n \"-p\", \"--parent\", action=\"store_true\", help=r\"Compress parent directory.\",\n )\n pargs = parser.parse_args()\n\n return pargs\n\n\ngetCmd = lambda dirPath, abs: [\n \"7z.exe\" if not abs else r\"C:\\Program Files\\7-Zip\\7z.exe\",\n \"a\",\n # \"-t7z\",\n # \"-mx7\",\n # \"-mnt4\",\n f\"{str(dirPath)}.zip\",\n str(dirPath),\n]\n\ngetDirList = lambda dirPath: [x for x in dirPath.iterdir() if x.is_dir()]\n\nbytesToMB = lambda bytes: math.ceil(bytes / float(1 << 20))\n\n\ndef getSize(totalSize, maxSplit):\n fSize = 0\n for i in range(2, 35):\n splitSize = math.ceil(totalSize / i)\n if totalSize <= splitSize:\n continue\n if splitSize <= maxSplit:\n fSize = splitSize\n return i, splitSize\n if fSize == 0:\n return 1, totalSize\n\n\ndef getDirSize(dirPath):\n totalSize = 0\n for childpath, _, childfiles in os.walk(dirPath):\n for file in childfiles:\n totalSize += os.stat(os.path.join(childpath, file)).st_size\n return totalSize\n\n\ndef main(pargs):\n\n dirPath = pargs.dir.resolve()\n if not pargs.parent:\n dirList = getDirList(dirPath)\n else:\n dirList = [str(dirPath)]\n\n if not dirList:\n print(\"Nothing to do.\")\n sys.exit()\n\n minimSplit = pargs.minim_split\n\n for folder in dirList:\n totalSize = bytesToMB(getDirSize(folder))\n\n cmd = getCmd(folder, pargs.abs)\n\n if pargs.split and totalSize >= (minimSplit * 2):\n splitSize = getSize(totalSize, pargs.split)[1]\n if splitSize >= minimSplit:\n cmd.append(f\"-v{splitSize}m\")\n\n print(\"\\n--------------------------------------\")\n print(\"\\n\", cmd)\n print(f\"\\nTotal size of source files { (totalSize) } MB\")\n if pargs.split and totalSize >= (minimSplit * 2):\n print(f\"\\nSplit size: {splitSize} MB\")\n print(\"\\n---------------------------------------\\n\")\n subprocess.run(cmd)\n input(\"\\nPress Enter to continue...\")\n\n\nmain(parseArgs())\n","sub_path":"zip-dirs.py","file_name":"zip-dirs.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"465573908","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\n'''\nRuntime: 1212 ms, faster than 5.03% of Python3 online submissions for Linked List Cycle.\nMemory Usage: 15.9 MB, less than 100.00% of Python3 online submissions for Linked List Cycle.\nFailed to solve it using O(1) (i.e. constant) memory.\n'''\nclass Solution:\n def hasCycle(self, head: ListNode) -> bool:\n if head is None:\n return False\n \n scanned = []\n while head.next is not None:\n if head in scanned:\n return True\n \n scanned.append(head)\n head = head.next\n \n return False\n","sub_path":"141_linked_list_cycle/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"368614680","text":"import json\nimport boto3\n\nprint('Loading function')\n\nglue = boto3.client(service_name='glue', region_name='us-east-2',\n endpoint_url='https://glue.us-east-2.amazonaws.com')\n\ndef lambda_handler(event, context):\n #print(\"Received event: \" + json.dumps(event, indent=2))\n\n try:\n glue.start_crawler(Name='conform')\n except Exception as e:\n print(e)\n print('Error starting crawler')\n raise e","sub_path":"crawler-lambda/conformAutomation.py","file_name":"conformAutomation.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"504714878","text":"from behavior_test import SUMMARY_PATH\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef summary2table(df):\n\n pass\n\n\nif __name__ == '__main__':\n\n summary_df = pd.read_csv(SUMMARY_PATH, index_col=0)\n summary_df[['groupby', 'group']] = summary_df[['groupby', 'group']].fillna('all')\n summary_df = summary_df.loc[summary_df['group'] == 'all'].drop('time', axis=1)\n summary_df = summary_df.loc[summary_df['groupby'] == 'all']\n sep_dict = {}\n table_dict = {}\n for test_type, test_summary in summary_df.groupby('testtype'):\n slim_summary = test_summary.dropna(axis=1, how='all')\n slim_summary = slim_summary[[x for x in slim_summary.columns if \\\n slim_summary[x].dropna().unique().shape[0] != 1]]\n sep_dict[test_type] = slim_summary\n metric_columns = [x for x in slim_summary.columns if x not in {'model', 'dataset', 'groupby'}]\n for m_col in metric_columns:\n table_dict[(test_type, m_col)] = slim_summary.set_index(\\\n ['model', 'dataset'], drop=True)[m_col].dropna().unstack('dataset')\n \n writer = pd.ExcelWriter('./temp2.xlsx', engine='openpyxl') \n for (test_type, metric), table_df in table_dict.items():\n table_df.to_excel(writer, sheet_name='_'.join([test_type, metric]))\n writer.save()\n\n\n\n \n\n if 0:\n df_ms = pd.read_csv('./summary_ms.csv', index_col=0)\n df_yg = pd.read_csv('./summary_yg.csv', index_col=0)\n df_yg.index = pd.MultiIndex.from_tuples([eval(y) for y in df_yg.index], names=['group_by', 'group_tag'])\n df_yg = df_yg.reset_index(drop=False)\n df = pd.concat([df_ms, df_yg], axis=0).reset_index(drop=True)\n\n df['ratio'] = df['freq'] / df['count']\n df['acc'] = [(df['ratio'][i] if df['top'][i] else (1-df['ratio'][i])) for i in df.index]\n df['group_tag'] = [str(x).split('.')[0] for x in df['group_tag']]\n df['model'] = [x.upper() for x in df['model']]\n df.loc[:, 'group_tag'] = [{'0': 'Incorrect', '-1': 'Incorrect', '1': 'Correct', 'all': 'Any'}[x] for x in df['group_tag']]\n if 1:\n auc_table = df.set_index(['group_by', 'group_tag', 'test_type', 'dataset', 'model'])['auc'].dropna().xs('original', level='test_type')\n auc_table = auc_table.unstack('dataset')\n auc_table.index = auc_table.index.droplevel([0,1])\n auc_table.to_csv('./summary_auc.csv')\n\n\n df_acc = df.set_index(['group_by', 'group_tag', 'test_type', 'dataset', 'model'], append=True)['acc'].dropna()\n count_col = df.set_index(['group_by', 'group_tag', 'test_type', 'dataset', 'model'])['count'].replace(0, np.nan).dropna().unstack('dataset')\n \n df_acc.index = df_acc.index.droplevel(0)\n df_acc = df_acc.unstack('dataset').unstack('model').sort_index(axis=0, level=['test_type', 'group_tag'], ascending=[True, True]).dropna(how='all')\n # df_acc = df_acc.reset_index(drop=False)\n # df_acc.columns = \\\n # ['Assistments15', 'Assistments17', 'EdNet_Small', 'Spanish', 'Statics', 'TestCount']\n df_acc.to_csv('summary_formatted.csv')\n\n df['auc']","sub_path":"_present_result.py","file_name":"_present_result.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"407377209","text":"# config_shell.py\n\nimport sys\nimport os\nimport subprocess\n\n\nWORKING_DIR = os.getcwd()\n\n\n\ndef check_if_exists():\n\ttry:\n\t\thome_path = os.path.expanduser('~')\n\t\tos.chdir(home_path)\n\t\tcommand = 'ls -a | grep .zshrc'\n\t\tcompleted_process = subprocess.run(command.split(), stdout=subprocess.PIPE)\n\t\tprint('Ran the command to see .zshrc exists or not - completed_process.stdout=' + str(completed_process.stdout))\n\t\tif completed_process.stdout:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\texcept Exception as e:\n\t\tprint('Could not change path and check if config file exit (on you!): {0}'.format(e))\n\t\texit_program(1)\n\n\ndef ask_overwrite():\n\tif check_if_exists():\n\t\tans = input('The file ~/.zshrc already exists, should we overwrite (y/n)?')\n\t\tif ans == 'y':\n\t\t\tprint('Ok, overwriting with new config...')\n\t\t\treturn True\n\t\telif ans == 'n':\n\t\t\tprint('Ok, not overwriting the file...')\n\t\t\treturn False\n\t\telse:\n\t\t\tprint('Not a valid answer, try again...')\n\t\t\treturn ask_overwrite()\n\telse:\n\t\tprint('The file ~/.zshrc does not exists, creating a new file.')\n\t\treturn True\n\n\ndef copy_shell_config_file():\n\t# careful to not be in home dir\n\tos.chdir(WORKING_DIR)\n\tos.system('cp -f -v ./.zshrc ' + os.path.expanduser('~') + '/.zshrc')\n\n\ndef exit_program(code):\n\tprint(f'Exiting Python program - status_code={code} - file={__file__}')\n\tsys.exit(code)\n\n\ndef config_shell():\n\tif ask_overwrite():\n\t\ttry:\n\t\t\tcopy_shell_config_file()\n\t\texcept Exception as e:\n\t\t\tprint('Could not copy the config file: {0}'.format(e))\n\t\t\texit_program(1)\n\telse:\n\t\tprint('So you should take care of shell config manually.')\n\t\texit_program(0)\n\n\nif __name__ == '__main__':\n\tprint('Python program is running - file=' + __file__)\n\tconfig_shell()\n\n\n","sub_path":"macos-homesick/pyscripts/config_shell.py","file_name":"config_shell.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"422724012","text":"import codecs, sys, utils\n\n\ndef read_cluster(f_path):\n flag = True\n clusters = dict()\n last_key = None\n with codecs.open(f_path,\"r\",\"utf-8\") as input_file:\n for line in input_file:\n if line.strip() == \"\":\n flag = True\n elif flag: # new cluster\n flag = False\n key = line.split(\"\\t\")[0]\n clusters[key] = list()\n last_key = key\n else:\n elements = line.strip().split(\"\\t\")\n clusters[last_key].append([elements[0], float(elements[1]), int(elements[2])])\n return clusters\n\n\ndef filter_members(lst, threshold):\n return [element for element in lst if element[1] <= threshold]\n\ndef cluster_voting(members, N, threshold, key):\n votes = list()\n if threshold:\n members = filter_members(members, threshold)\n members.sort(key=lambda x: x[2]) #sorts per frequency\n for i in range(N):\n if i >= len(members):\n break\n votes.append(\"\\t\".join([members[i][0], key]))\n return votes\n\ndef generate_lexicon(clusters, N, threshold):\n lexicon = list()\n for translation in clusters:\n vote = cluster_voting(clusters[translation], N, threshold, translation)\n lexicon += vote\n return lexicon\n\ndef generate():\n clusters = read_cluster(sys.argv[1])\n N = int(sys.argv[2])\n threshold = None if len(sys.argv) < 4 else float(sys.argv[3])\n lexicon = generate_lexicon(clusters, N, threshold)\n utils.write_file(sys.argv[1] + \".lexicon\", lexicon)\n\nif __name__ == \"__main__\":\n generate()","sub_path":"post_processing/generate_lexicon.py","file_name":"generate_lexicon.py","file_ext":"py","file_size_in_byte":1596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"570298651","text":"# !Python3\nimport tensorflow as tf\nhello = tf.constant('Hello, TensorFlow!')\na = tf.constant([1 ,2], name='a')\nb = tf.constant([2 ,3], name='b')\nresult = a+b\nprint(result)\ntestWeight = tf.Variable(tf.random_normal([2,3], stddev=2))\n\n\nwith tf.Session() as sess: \n print(sess.run(hello).decode())\n print(sess.run(a))\n print(sess.run(result))\n sess.run(testWeight.initializer)\n print(sess.run(testWeight.initial_value))\n \n","sub_path":"tftest.py","file_name":"tftest.py","file_ext":"py","file_size_in_byte":440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"576659550","text":"\"\"\"\nRoutines related to the ocean dynamics.\n\"\"\"\n# Copyright 2020-2022 Shom\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport numpy as np\nimport pandas as pd\nimport xarray as xr\n\nfrom .__init__ import XoaError\nfrom . import coords as xcoords\nfrom . import grid as xgrid\nfrom xoa.interp import grid2locs\nfrom xoa.geo import EARTH_RADIUS\n\n\ndef _get_uv2d_(t, txy, gx, gy, gz, gt, guv):\n \"\"\"Interpolate gridded u and v on track positions\"\"\"\n # Shape\n npts = int(txy.size / 2)\n tx = txy[:npts]\n ty = txy[npts:]\n tt = np.full(npts, t)\n tz = np.zeros(npts)\n\n # Interpolate\n tuv = grid2locs(gx, gy, gz, gt, guv, tx, ty, tz, tt)\n\n # Scale the speed for degrees\n tuv[0] *= 180.0 / (np.pi * EARTH_RADIUS)\n tuv[1] *= 180.0 / (np.pi * EARTH_RADIUS)\n tuv[1] *= np.cos(ty * np.pi / 180)\n\n # Pack velocity\n return tuv.ravel()\n\n\ndef _rk4_(xy, f, t, dt, **kwargs):\n \"\"\"Integrate one time step with RK4\"\"\"\n k1 = dt * f(t, xy, **kwargs)\n k2 = dt * f(t + 0.5 * dt, xy + 0.5 * k1, **kwargs)\n k3 = dt * f(t + 0.5 * dt, xy + 0.5 * k2, **kwargs)\n k4 = dt * f(t + 0.5 * dt, xy + 0.5 * k3, **kwargs)\n return xy + (k1 + k2 + k3 + k4) / 6.0\n\n\ndef _integrate_(xy, f, t0, t1, dt, **kwargs):\n \"\"\"Low level integration with packed, pure-numeric data\"\"\"\n\n # Fit the time steps to the time range\n dt *= 1.0\n if t1 < 0:\n dts = [dt] * int(-t1)\n else:\n nt = int((t1 - t0) // dt)\n dts = [dt] * nt\n dtf = (t1 - t0) % dt\n if dtf:\n dts.append(dtf)\n\n # Interative integration\n t = t0\n tt = [t0]\n xxyy = [xy]\n for dt in dts:\n xy2 = _rk4_(xy, f, t, dt, **kwargs)\n t2 = t + dt\n tt.append(t2)\n xxyy.append(xy2)\n t = t2\n xy = xy2\n return np.array(tt), np.asarray(xxyy)\n\n\ndef flow2d(u, v, xy0, duration, step, date=None):\n \"\"\"Integrate gridded 2D velocities from random positions\n\n Parameters\n ----------\n u: xarray.DataArray\n Gridded zonal velocity\n v: xarray.DataArray\n Gridded meridional velocity\n duration: int, numpy.timedelta64\n Total integration time in seconds\n step: int, numpy.timedelta64\n Integratiin step in seconds\n xy0: int, xarray.Dataset, tuple\n Either a number of particles or a dataset of initial positions\n with longitude and latitude coordinates\n date: None, numpy.datetime64\n A reference date for the time integration\n\n Return\n ------\n xarray.Dataset\n Output positions with ``lon`` and ``lat`` coordinates that vary with time.\n\n \"\"\"\n # Gridded field\n time0 = xcoords.get_time(u, errors=\"ignore\")\n u = u.squeeze(drop=True)\n v = v.squeeze(drop=True)\n if u.ndim != 2 or v.ndim != 2:\n raise XoaError(\"The velocity field must 2D\")\n u = xgrid.to_rect(u)\n gx = xcoords.get_lon(u).values\n gy = xcoords.get_lat(u).values\n if gx.ndim == 1:\n gx = gx.reshape(1, -1)\n if gy.ndim == 1:\n gy = gy.reshape(-1, 1)\n gz = np.zeros((1,) * 5)\n gt = np.zeros(1)\n gu = u.values.reshape((1,) * 2 + u.shape[-2:])\n gv = v.values.reshape(gu.shape)\n guv = np.array([gu, gv])\n\n # Initial positions\n tt0 = None\n if isinstance(xy0, xr.Dataset):\n tx0 = xcoords.get_lon(xy0).values.ravel()\n ty0 = xcoords.get_lon(xy0).values.ravel()\n tt0 = xcoords.get_time(xy0, errors='ignore')\n elif isinstance(xy0, int):\n tx0 = np.random.uniform(gx.min(), gx.max(), xy0)\n ty0 = np.random.uniform(gy.min(), gy.max(), xy0)\n else:\n tx0, ty0 = xy0\n tx0 = np.asarray(tx0)\n ty0 = np.asarray(ty0)\n txy0 = np.concatenate((tx0, ty0))\n\n # Integration\n t0 = 0\n if isinstance(duration, np.timedelta64):\n duration /= np.timedelta64(1, \"s\")\n if isinstance(step, np.timedelta64):\n step /= np.timedelta64(1, \"s\")\n t1 = duration\n tt, txy = _integrate_(txy0, _get_uv2d_, t0, t1, step, gx=gx, gy=gy, gz=gz, gt=gt, guv=guv)\n tx, ty = np.split(txy, 2, axis=1)\n\n # As a dataset\n if date is not None:\n date = pd.to_datetime(date).to_datetime64()\n elif tt0 is not None and len(tt0) <= 1:\n date = tt0.values\n elif time0 is not None:\n date = time0.values\n else:\n date = pd.Timestamp.now().to_datetime64()\n time = xr.DataArray(tt * np.timedelta64(1, 's') + date, dims='time')\n\n return xr.Dataset(\n coords={\n 'lon': (('time', 'particles'), tx),\n 'lat': (('time', 'particles'), ty),\n 'time': time,\n }\n )\n","sub_path":"xoa/dyn.py","file_name":"dyn.py","file_ext":"py","file_size_in_byte":5056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"364524827","text":"from os import listdir\nfrom typing import List, NamedTuple, Optional, Dict\nfrom os.path import isfile, join\nimport wfdb\nfrom scipy import signal\nimport os\nimport numpy as np\nfrom pprint import pprint\nfrom feature_extraction import _dwt\nREPO_ROOT = os.path.dirname(os.path.realpath(__file__))\nFILE_PATH = os.path.join(REPO_ROOT, \"resources/nifeadb/1.0.0/\")\nFS = 500\n\n\nclass EcgRecordInfo(NamedTuple):\n record_name: str\n frequency: int\n label: int\n\n\nclass EcgRecord(NamedTuple):\n abdominal1: np.array\n abdominal2: Optional[np.array] = None\n abdominal3: Optional[np.array] = None\n abdominal4: Optional[np.array] = None\n abdominal5: Optional[np.array] = None\n merged: Optional[np.array] = None\n chest: Optional[np.array] = None\n\n\ndef nearest_even_length(signal: np.array):\n if not len(signal) % 2:\n return len(signal) - 2\n else:\n return len(signal) - 1\n\n\ndef unify_signal_length(output_signals: Dict[EcgRecordInfo, EcgRecord], min_lengths: List[int]):\n unified_output_signals = dict()\n for info in output_signals.keys():\n unified_output_signals[info] = EcgRecord(\n abdominal1=output_signals[info].abdominal1[:min_lengths[0]],\n abdominal2=output_signals[info].abdominal2[:min_lengths[1]],\n abdominal3=output_signals[info].abdominal2[:min_lengths[2]],\n abdominal4=output_signals[info].abdominal2[:min_lengths[3]],\n )\n return unified_output_signals\n\n\ndef load_file_data(dir: str = FILE_PATH) -> Dict[EcgRecordInfo, np.array]:\n label_map = {\"ARR\": 1, \"NR\": 0}\n\n files = [\n os.path.splitext(os.path.basename(f))[0]\n for f in listdir(dir)\n if isfile(join(dir, f)) and all([stop_file not in f for stop_file in [\n \"HEADER\", \"ANNOTATIONS\", \"index\", \"RECORDS\", \"SHA256SUMS\", \"wfdbcal\"\n ]])\n ]\n signals = dict()\n mins = [float(\"inf\") for _i in range(5)]\n for file_path in files:\n record = wfdb.rdrecord(os.path.join(dir, file_path))\n signal_fs = int(record.fs)\n\n raw_data = record.p_signal\n #merged_signal = raw_data[:,1:5].flatten()\n signal_data = EcgRecord(\n abdominal1=raw_data[:, 1] if signal_fs == FS else signal.resample(\n raw_data[nearest_even_length(raw_data[:, 1]), :],\n nearest_even_length(raw_data[:, 1]) // 2),\n abdominal2=raw_data[:, 2] if signal_fs == FS else signal.resample(\n raw_data[nearest_even_length(raw_data[:, 2]), :],\n nearest_even_length(raw_data[:, 2]) // 2),\n abdominal3=raw_data[:, 3] if signal_fs == FS else signal.resample(\n raw_data[nearest_even_length(raw_data[:, 3]), :],\n nearest_even_length(raw_data[:, 3]) // 2),\n abdominal4=raw_data[:, 4] if signal_fs == FS else signal.resample(\n raw_data[nearest_even_length(raw_data[:, 4]), :],\n nearest_even_length(raw_data[:, 4]) // 2),\n # abdominal3=record.p_signal[:, 3],\n # abdominal4=record.p_signal[:, 4],\n # abdominal5=record.p_signal[:, 5] if record.p_signal.shape[1] > 5 else None\n )\n\n mins = [min(len(signal_data.abdominal1), mins[0]),\n min(len(signal_data.abdominal2), mins[1]),\n min(len(signal_data.abdominal3), mins[2]),\n min(len(signal_data.abdominal4), mins[3]),\n # min(len(signal_data.abdominal5), mins[4]),\n ]\n signals[EcgRecordInfo(\n record_name=record.record_name,\n frequency=signal_fs,\n label=label_map[record.record_name.split(\"_\")[0]],\n )] = signal_data\n\n return unify_signal_length(signals, mins)\n\n\nif __name__ == \"__main__\":\n signals = load_file_data(FILE_PATH)\n for _, signal in signals.items():\n print(\"new\")\n print(len(_dwt(signal.abdominal1[:1000])))\n print(len(signal.abdominal1[:1000]))\n\n","sub_path":"load_ecg_data.py","file_name":"load_ecg_data.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"613236647","text":"\"\"\"Clean and concise implementation of this neural network. Performs K-folds cross-validation\n may be used for parameter tuning\"\"\"\nimport pandas as pd\nimport numpy as np\nimport tensorflow as tf\nfrom network import Mercedez, validate_train, infer_train\nfrom utils import preprocess, CrossValidationFolds\n\n# Constants\nPATH = './data/'\nTRAIN = 'train.csv'\nTEST = 'test.csv'\nSUBMIT = True\n\n# Hyper Parameters\nMAX_ITER = 400\nLEARNING_RATE = 1e-3\nLAYERS = [567, 500, 250]\nFOLDS = 5\nDROPOUT = 0.3\n\nFEATURE_DROP = ['X11', 'X93', 'X107', 'X223', 'X235', 'X268', 'X289',\n 'X290', 'X293', 'X297', 'X330', 'X347']\n\n\ndef read_data(path, train, test):\n print('Reading CSV Data...')\n train_df = pd.read_csv(path + train)\n test_df = pd.read_csv(path + test)\n\n num_examples = train_df.shape[0] # Both the training and test set have the same # of examples\n print('Data Read\\n')\n\n return train_df, test_df, num_examples\n\n\ndef k_folds(model, num_folds, train_features, targets):\n r_squared_log = []\n mse_log = []\n data = CrossValidationFolds(train_features, targets, num_folds)\n\n sess = tf.Session()\n\n for i in range(num_folds):\n print('Current fold: {}\\n'.format(data.current_fold + 1))\n (train_input, train_target), (cv_input, cv_target) = data.split()\n\n # Start Training\n cv_loss, cv_r2 = validate_train(sess, data.current_fold, MAX_ITER, DROPOUT,\n model, train_input, train_target, cv_input, cv_target)\n\n mse_log.append(cv_loss)\n r_squared_log.append(cv_r2)\n\n final_mse = np.array(mse_log).mean()\n final_r_squared = np.array(r_squared_log).mean()\n print('K folds finished')\n print('Final validation score, MSE: {0: .2f} | R_squared: {1: .5f}'.format(final_mse, final_r_squared))\n\n\ndef main():\n # Reading data\n train_df, test_df, num_examples = read_data(PATH, TRAIN, TEST)\n\n # Preprocessing\n (train_data, target), (test_data, ID) = preprocess(train_df, test_df, FEATURE_DROP)\n\n # Define model & build graph\n model = Mercedez(batch_size=num_examples, layers=LAYERS, learning_rate=LEARNING_RATE)\n model.build_graph()\n\n if SUBMIT:\n infer_train(MAX_ITER, DROPOUT, model, train_data, target, test_data, ID)\n\n else:\n # Run K-Folds Validation\n k_folds(model, FOLDS, train_data, target)\n\nif __name__ == '__main__':\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"445531030","text":"import requests, itertools, sys\nfrom multiprocessing import Pool\nfrom optparse import OptionParser\n\ndef run(parser):\n \"\"\"\n kicks off the show\n \"\"\"\n options = get_options(parser)\n\n urls = get_urls(options)\n\n if options.parallel > 1:\n p = Pool(options.parallel)\n p.map(make_request, urls)\n else: \n for url in urls:\n make_request(url)\n\ndef init_parser():\n \"\"\"\n Adds appropriate options and returns parser object\n \"\"\"\n parser = OptionParser(\"Usage: -t hostname -f dictionary\")\n parser.add_option(\"-f\",\"--file\",\\\n dest=\"filename\",\\\n help=\"the wordlist to be used\")\n parser.add_option(\"-t\",\"--hostname\",\\\n dest=\"target\",\\\n help=\"the site to bust\")\n parser.add_option(\"-p\",\"--port\",\\\n dest=\"port\",\\\n help=\"the port\",\\\n default=80)\n parser.add_option(\"-l\",\"--parallel\",\\\n dest=\"parallel\",\\\n help=\"number of processes to spawn\",\\\n default=1,\\\n type='int')\n parser.add_option(\"-e\",\"--extension\",\\\n dest=\"extensions\",\\\n help=\"extension to append to the wordlist,\\nformat is -e=php,js,html\",\\\n type='string',\\\n default=[\"html\",\"js\",\"php\",\"jpg\"],\\\n action='callback',\\\n callback=parse_extensions)\n return parser\n\ndef parse_extensions(options, opt, value, parser):\n \"\"\"\n callback to turn -e a,b,c into ['a','b','c'] for extensions\n \"\"\"\n args = []\n if value:\n args = value.split(',')\n\n args = list(filter(lambda x: len(x) > 0, args))\n\n if len(args) > 0:\n setattr(parser.values, options.dest, args)\n\n\ndef get_options(parser):\n \"\"\"\n Error checking, returns the appropriate options\n \"\"\"\n (options, args) = parser.parse_args()\n options.mutli_test=False\n\n if not options.filename:\n parser.error(\"Filename required for dictionary file\")\n\n if not options.target:\n if len(args) == 0:\n parser.error(\"Target required\")\n else:\n options.mutli_test=True\n options.target=args[0]\n\n return options\n\ndef get_urls(options):\n \"\"\"\n takes the options and returns of urls to test\n \"\"\"\n try:\n filename = open(options.filename ,'r')\n except FileNotFoundError as e:\n pexit(\"File not found\")\n\n urls = []\n for line in filename.readlines():\n line = line.replace(\"\\n\",\"\")\n if len(line) > 0:\n url = \"http://\" + options.target + \":\" + str(options.port) + \"/\" + line \n urls.append(url)\n\n new_urls = []\n for ext in options.extensions:\n for url in urls:\n new_urls.append(url + \".\" + ext)\n urls = new_urls + urls\n\n return urls\n\ndef pprint(response, url):\n \"\"\"\n pretty prints output\n \"\"\" \n sys.stdout.write(\"%s %s\\n\" % (url, response.status_code))\n\ndef pexit(string):\n \"\"\"\n Print string and exits\n \"\"\"\n sys.stdout.write(string + \"\\n\")\n exit()\n\ndef make_request(url): \n \"\"\" \n Checks the url for existence\n \"\"\"\n try:\n r = (True, requests.get(url))\n except Exception as e:\n r = (False, e)\n if r[0] and r[1].status_code != 404:\n pprint(r[1], url)\n return r\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"559862241","text":"# step1 Digital tube corresponding to a single number\n# get a string of number, and draw corresponding tubes\n# get the systems's time to draw these tubes\n'''\n******\n* *\n******\n* *\n******\n'''\nimport time\nimport turtle # Draw a single_ended nixie tube\ndef drawgap():\n turtle.penup()\n turtle.fd(5)\ndef drawLine(draw):\n drawgap()\n turtle.pendown() if draw else turtle.penup()\n turtle.fd(40)\n drawgap()\n turtle.right(90)\ndef drawDigit(digit): #Draw seven nixie tubes according to the digit\n drawLine(True) if digit in [2,3,4,5,6,8,9] else drawLine(False)\n drawLine(True) if digit in [0,1,3,4,5,6,7,8,9] else drawLine(False)\n drawLine(True) if digit in [0,2,3,5,6,8,9] else drawLine(False)\n drawLine(True) if digit in [0,2,6,8] else drawLine(False)\n turtle.left(90)\n drawLine(True) if digit in [0,4,5,6,8,9] else drawLine(False)\n drawLine(True) if digit in [0,2,3,5,6,7,8,9] else drawLine(False)\n drawLine(True) if digit in [0,1,2,3,4,7,8,9] else drawLine(False)\n turtle.left(180)\n turtle.penup() #Determine the position for drawing subsequent numbers\n turtle.fd(20) #Determine the position for drawing subsequent numbers\ndef drawDate(date): #the format is \"%Y-%m=%d+\"\n turtle.pencolor(\"red\")\n for i in date:\n if i=='-':\n turtle.write('年',font=(\"Arial\",18,\"normal\"))\n turtle.pencolor(\"green\")\n turtle.fd(40)\n elif i=='=':\n turtle.write('月',font=(\"Arial\",18,\"normal\"))\n turtle.pencolor(\"blue\")\n turtle.fd(40)\n elif i=='+':\n turtle.write('日',font=(\"Arial\",18,\"normal\"))\n else:\n drawDigit(eval(i))\ndef main():\n turtle.setup(800,350,200,200)\n turtle.penup()\n turtle.fd(-300)\n turtle.pensize(5)\n drawDate(time.strftime(\"%Y-%m=%d+\",time.gmtime()))\n turtle.hideturtle()\n turtle.done()\nmain()","sub_path":"fifth_course/function/Ss_digital_tube.py","file_name":"Ss_digital_tube.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"553800996","text":"#!/usr/bin/env python3\n\"\"\"\nmax_Avalam agent.\nCopyright (C) 2015, Joachim de Droogh and Maxime Istasse\n\nThis program is free software; you can redistribute it and/or modify\nit under the terms of the GNU General Public License as published by\nthe Free Software Foundation; version 2 of the License.\n\nThis program is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU General Public License for more details.\n\nYou should have received a copy of the GNU General Public License\nalong with this program; if not, see .\n\n\"\"\"\n\nimport max_avalam\nimport time\nimport max_minimax\nimport random\nimport math\nimport collections\n\n\nclass Defer:\n \"\"\"\n A context manager to defer actions.\n \"\"\"\n def __init__(self, *actions):\n self.actions = list(actions)\n\n def __enter__(self):\n return self\n\n def __call__(self, action):\n self.actions.append(action)\n\n def __exit__(self, exc_type, exc_val, exc_tb):\n while len(self.actions) > 0:\n self.actions.pop()()\n\n\ndef pos(val):\n return val if val > 0 else 0\n\n\ndef neg(val):\n return val if val < 0 else 0\n\n\ndef board_evaluation(ev_function):\n return ev_function\n\n\ndef action_evaluation(ev_function):\n return ev_function\n\n\n# ###### ####### # ###### ######\n# # # # # # # # # # # ###### # # ## #\n# # # # # # # # # # # # # # # # #\n# ###### # # # # ###### # # ##### # # # # #\n# # # # # ####### # # # # # # # ###### # ###\n# # # # # # # # # # # # # # # # # ###\n# ###### ####### # # # # ###### ###### ## # # ###### ###\n\n\n@board_evaluation\ndef towers_difference(board):\n diff = 0\n for i, row in enumerate(board.m):\n for j, tower in enumerate(row):\n if tower > 0:\n diff += 1\n elif tower < 0:\n diff -= 1\n\n return diff\n\n\n@board_evaluation\ndef blocked_towers_difference(board):\n diff = 0\n for i, row in enumerate(board.m):\n for j, tower in enumerate(row):\n if tower == 0:\n continue\n if next(board.get_tower_actions(i, j), None) is None:\n diff += tower//abs(tower)\n\n return diff\n\n\n@board_evaluation\ndef five_towers_difference(board):\n diff = 0\n for i, row in enumerate(board.m):\n for j, tower in enumerate(row):\n if abs(tower) == 5:\n diff += tower // abs(tower)\n\n return diff\n\n\n# # ##### ####### ### ####### # #\n# # # # # # # # # ## # ###### # # ## #\n# # # # # # # # # # # # # # # # #\n# # # # # # # # # # # ##### # # # # #\n# ####### # # # # # # # # # # # ###### # ###\n# # # # # # # # # # ## # # # # # # ###\n# # # ##### # ### ####### # # ###### ## # # ###### ###\n\n\n@action_evaluation\ndef moved_tower(board, action):\n ai, aj, _, _ = action\n tower, _ = board.towers(action)\n return tower // abs(tower)\n\n\n@action_evaluation\ndef build_or_steal_untouchable(board, action):\n \"\"\"Returns 2 if the current action steals a 1, 1 otherwise\n \"\"\"\n ai, aj, bi, bj = action\n a, b = board.towers(action)\n board.play_action(action)\n ret = next(board.get_tower_actions(bi, bj), None) is None\n board.cancel()\n\n if ret:\n return 2 if a > 0 > b else 1\n return 0\n\n\n@action_evaluation\ndef build_five(board, action):\n _, _, r = board.towers(action, True)\n return r // 5\n\n\n@action_evaluation\ndef build_unstealable(board, action):\n with Defer() as defer:\n _, _, bi, bj = action\n\n board.play_action(action)\n defer(board.cancel)\n\n b = board.m[bi][bj]\n for _, _, ai, aj in board.get_tower_actions(bi, bj):\n a = board.m[ai][aj]\n if a*b < 0:\n return 0\n\n return b // abs(b)\n\n\n@action_evaluation\ndef isolating_tower(board, action):\n # If component isolated from the beginning: nope\n if board.isolated_component(action[:2]) != 0:\n return 0\n\n # Not isolated, and then isolated?\n with Defer() as defer:\n board.play_action(action)\n defer(board.cancel)\n\n return board.isolated_component(action[2:])\n\n\n@action_evaluation\ndef isolating_components(board, action, detail=False):\n if board.isolated_component(action[:2]) != 0:\n return 0\n ai, aj, _, _ = action\n neighbors = [a[2:] for a in board.get_tower_actions(ai, aj)]\n board.play_action(action)\n\n visited = set()\n isolated = [0, 0] if detail else 0\n for n in neighbors:\n if n in visited:\n continue\n i = board.isolated_component(n, visited)\n\n if not detail:\n isolated += i\n elif i > 0:\n isolated[0] += i\n elif i < 0:\n isolated[1] -= i\n\n board.cancel()\n return tuple(isolated) if detail else isolated\n\n\n@action_evaluation\ndef action_in_isolated(board, action): # TODO Winning isolation? TODO???\n a, b = board.towers(action)\n if a * b < 0:\n return 0\n return board.isolated_component(action[:2])\n\n\n@action_evaluation\ndef make_other_loose_tower(board, action):\n a, b = board.towers(action)\n return 1 if b < 0 else -1\n\n# TODO Detect when isolating a component, and when playing into one?\n# TODO Favorise leaving a cross?\n# TODO consider avoiding the other player to isolate his towers?\n\n# ###### ####### ## ## ######## #### ######\n# ## ## ## ## ### ## ## ## ## ##\n# ## ## ## #### ## ## ## ##\n# ## ## ## ## ## ## ###### ## ## ####\n# ## ## ## ## #### ## ## ## ##\n# ## ## ## ## ## ### ## ## ## ##\n# ###### ####### ## ## ## #### ######\n\n\ndef eval_board(board, check_end=True):\n \"\"\"\n evaluates a board using our criterias\n \"\"\"\n if check_end and board.is_finished():\n return board.get_score() * 1e10\n\n score = 0\n\n for i, row in enumerate(board.m):\n for j, tower in enumerate(row):\n if tower == 0:\n continue\n if tower > 0:\n score += 3\n elif tower < 0:\n score -= 3\n if tower != 0 and next(board.get_tower_actions(i, j), None) is None:\n score += 2 * tower // abs(tower)\n if abs(tower) == 5:\n score += tower // abs(tower)\n\n # score += towers_difference(board) * 3\n # score += blocked_towers_difference(board) * 2\n # score += five_towers_difference(board) * 1\n\n return score\n\n\ndef eval_action(b, a, rnd=True, check_end=True):\n \"\"\"\n evaluates an action using our criterias\n \"\"\"\n score = 0\n\n if check_end and b.play_action(a).is_finished():\n b.cancel()\n return b.get_score() * 1e10\n elif check_end:\n b.cancel()\n\n x, y = b.towers(a)\n\n score += make_other_loose_tower(b, a)\n score += build_or_steal_untouchable(b, a) * 5\n score += build_five(b, a) * 3\n\n return score + (random.random() / 2 if rnd else 0)\n\n\n\"\"\"\nHere, we'll generate the time and depths boundaries for the steps\n\"\"\"\n\n\ndef gen_time_depth(min, max, *, time_rule, depth_rule):\n l = list()\n for i in range(min, max+1):\n l.append((time_rule(i), depth_rule(i)))\n return l\n\n\ntimes_and_depths = [(0.1, 1)]\n\n\ndef fact(x):\n if x == 0:\n return 1\n else:\n return x * fact(x-1)\n\n# exponent\nx = 2\n# Limited by time\ntimes_and_depths += gen_time_depth(1, 10,\n depth_rule=lambda s: s,\n time_rule=lambda s: fact(s),\n )\n\ndef prec():\n return sum([t[0] for i, t in enumerate(times_and_depths)])\n\n\ndef ap(x):\n times_and_depths.append(x)\n\nap((prec()*1.5, 12)) # 11\nap((prec()*1.5, 12)) # 12\nap((prec()*1.5, 14)) # 13\nap((prec()*1.5, 14)) # 14\nap((prec()*1.5, 16)) # 15\nap((prec()*1.4, 16)) # 16\nap((prec()*1.3, 10)) # 17\nap((prec()*1.2, 8)) # 18\nap((prec()*1.1, 6)) # 19\nap((prec()*1, 6)) # 20\nap((prec()*0.9, 6)) # 21\n\n\n# Limited by depth\ntimes_and_depths += gen_time_depth(22, 30,\n depth_rule=lambda s: 4,\n time_rule=lambda s: 10**s\n )\n\ntimes_and_depths += gen_time_depth(31, 48,\n depth_rule=lambda s: 2,\n time_rule=lambda s: 10**s\n )\n\ndepths = [depth for time, depth in times_and_depths]\ntimes = [time for time, depth in times_and_depths]\n# Make it cumulative, because we want to weight this step, but consider the weight of the future ones for the time left\ntimes = [t / sum(times[:i+1:2]) if i % 2 == 0 else\n t / sum(times[1:i+1:2]) for i, t in enumerate(times)]\n\n\n# Consider plotting for comprehension\n\"\"\"\nx = [i for i in range(48, -1, -1)]\npyplot.plot(x, [times[x] for x in x])\npyplot.plot(x, [depths[x] for x in x])\npyplot.show()\nexit()\n\"\"\"\n\n# ### ###### ######## ## ## ######## ######\n# ## ## ## ## ## ### ## ## ## ##\n# ## ## ## ## #### ## ## ##\n# ## ## ## #### ###### ## ## ## ## ######\n# ######### ## ## ## ## #### ## ##\n# ## ## ## ## ## ## ### ## ## ##\n# ## ## ###### ######## ## ## ## ######\n\n\ndef max_steps_left(board):\n \"\"\"\n The number of movable towers left (approximately)\n This function even has a bias, but we have selected the weight on its basis so we can't remove it.\n \"\"\"\n movable = 0\n for i, row in enumerate(board.m):\n for j, cell in enumerate(row):\n if cell != 0 and next(board.get_tower_actions(i, j), None) is not None:\n movable += 1\n\n return movable\n\n\nclass Agent:\n \"\"\"This is the skeleton of an agent to play the max_Avalam game.\"\"\"\n\n def __init__(self, name=\"Tr1st1n0z0r\"):\n self.name = name\n\n def successors(self, state):\n \"\"\"The successors function must return (or yield) a list of\n pairs (a, s) in which a is the action played to reach the\n state s; s is the new state, i.e. a triplet (b, p, st) where\n b is the new board after the action a has been played,\n p is the player to play the next move and st is the next\n step number.\n \"\"\"\n board, player, step = state\n actions = [(action, eval_action(board, action))\n for action in board.get_actions()]\n actions.sort(key=lambda x: x[1], reverse=self.reverse)\n for action, score in actions:\n nboard = board.clone()\n try:\n nboard.play_action(action)\n except:\n # Should not happen anymore, but if it happens, we would like to know the configuration in which we had\n # an exception\n print(nboard, action)\n nstate = (nboard, -player, step+1)\n yield (action, nstate)\n\n def cutoff(self, state, depth):\n \"\"\"The cutoff function returns true if the alpha-beta/max_minimax\n search has to stop; false otherwise.\n \"\"\"\n board, player, step = state\n if self.left_round < 1 and depth != 1:\n raise TimeoutError('Time left: %d' % self.left_round)\n if board.is_finished() or depth >= self.current_depth:\n return True\n return False\n\n def evaluate(self, state):\n \"\"\"The evaluate function must return an integer value\n representing the utility function of the board.\n \"\"\"\n board, player, step = state\n return eval_board(board)\n\n @property\n def left_round(self):\n return self.total_round - (time.time() - self.begin_round)\n\n def play(self, board, player, step, time_left):\n \"\"\"This function is used to play a move according\n to the board, player and time left provided as input.\n It must return an action representing the move the player\n will perform.\n \"\"\"\n max_steps = max_steps_left(board)\n self.max_steps = max_steps\n self.begin_round = time.time()\n self.total_round = time_left * times[max_steps]\n print(\"still %d steps, so %.2f seconds for this step\" % (max_steps, self.total_round))\n\n new_board = max_avalam.Board(board.get_percepts(player == max_avalam.PLAYER2))\n state = (new_board, player, step)\n\n last_durations = []\n ret = None\n st = None\n for i in range(depths[max_steps]):\n self.current_depth = i+1\n # We shouldn't compute a far iteration if too costly, or odd, except if it is near the end or ends it\n if len(last_durations) >= 2:\n est = max(last_durations[-1], last_durations[-1]*(last_durations[-1]/last_durations[-2]))\n print(\"next step would be computable in %.2f seconds\" % (last_durations[-1]*(last_durations[-1]/last_durations[-2])))\n # Not enough time in this round? (correcting factor for more than 22 steps left)\n if self.left_round < est or (max_steps >= 22 and est >= time_left / 3):\n break\n # We try to skip odd, but take the last ones into account\n if self.current_depth % 2 == 1 and max_steps_left(st[0]) > 10:\n last_durations.append(est)\n continue\n t = time.time()\n try:\n if self.current_depth > 2:\n previous_ret = ret\n score, ret, st = max_minimax.search(state, self, score=True)\n if st[0].is_finished():\n print(\"REACHED THE END. Score: %d\" % st.get_score())\n break\n elif self.current_depth > 2 and self.current_depth % 2 == 1:\n ret = previous_ret\n except TimeoutError:\n print(\"cut %d\" % (step))\n break\n last_durations.append(time.time()-t)\n print(\"depth %d computed in %f seconds\" % (i+1, last_durations[-1]))\n\n # printing the result of the selected move\n print(ret, '%d %d => %d' % new_board.towers(ret, True))\n return ret\n\nif __name__ == \"__main__\":\n max_avalam.agent_main(Agent())\n","sub_path":"avalam/AvalamFramework/max_contest_agent.py","file_name":"max_contest_agent.py","file_ext":"py","file_size_in_byte":14636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"463001181","text":"from django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.http import Http404, HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom main.models import Project\nfrom utils.decorators import render_to\n\n\n@login_required\n@render_to('main/index.html')\ndef index(request):\n owned_projects = Project.objects.filter(owner=request.user)\n projects = Project.objects.filter(users=request.user).exclude(pk__in=[o.pk for o in owned_projects])\n return {\n 'owned_projects': owned_projects,\n 'projects': projects\n }\n\n\n@login_required\n@render_to('main/support.html')\ndef support(request, uuid):\n project = get_object_or_404(Project, uuid=uuid)\n\n if not project.has_access(request.user):\n raise Http404\n\n return {\n 'APP_ID': uuid,\n 'SESSION_COOKIE_NAME': settings.SESSION_COOKIE_NAME,\n 'CLIENT_ID_COOKIE_NAME': settings.CLIENT_ID_COOKIE_NAME\n }\n\n\n@login_required\ndef test_error(request):\n if request.user.is_staff and request.user.is_active:\n raise Exception('Test Sentry')\n return HttpResponse('Hi!')\n","sub_path":"support_site/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"460119006","text":"import cv2\nimport glob\nimport os\nimport re\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef parse_label_info(label_file):\n sd = re.compile(\"([\\w\\d.]+)\")\n label_format =[\"obj_type\", \"truncated\", \"occluded\", \"angle\", \"left\", \"top\", \"right\", \"bottom\", \"height\", \"width\", \"length\", \"x\", \"y\", \"z\", \"rotation\"]\n label_list = list()\n while 1:\n d = label_file.readline()\n if d == \"\":\n break\n result = sd.findall(d)\n object_type = result[0]\n label_info = {label: value for label, value in zip(label_format, result)}\n for label in label_info:\n if label != \"obj_type\":\n label_info[label] = float(label_info[label])\n\n label_list.append(label_info)\n\n return label_list\n\n\ndef draw_sample(img_path, label_root_path):\n img_list = glob.glob(f\"{img_path}\\*.png\")\n for img_path in img_list:\n img = cv2.imread(img_path)\n img_name = os.path.basename(img_path)\n img_name = img_name[:-4]\n label_path = f\"{label_root_path}\\{img_name}.txt\"\n label_file = open(label_path, \"r\", newline=\"\")\n label_list = parse_label_info(label_file)\n\n for label_info in label_list:\n if label_info[\"obj_type\"] ==\"DontCare\":\n obj_color = (255,0,0)\n else:\n obj_color = (0,255,0)\n\n cv2.rectangle(img, (int(label_info[\"left\"]), int(label_info[\"top\"])), (int(label_info[\"right\"]), int(label_info[\"bottom\"])), obj_color)\n cv2.imshow(\"img\",img)\n cv2.waitKey(45)\n\n\ndef visualize_detections(image, boxes, classes, scores, color=(255, 0, 0)):\n\n \"\"\"Visualize Detections\"\"\"\n for box, _cls, score in zip(boxes, classes, scores):\n x1, y1, x2, y2 = box\n cv2.rectangle(image, (x1,y1), (x2,y2), (255,0,0))\n cv2.putText(image, _cls, (x1, y1), 2, 0.5, color, 1,)\n cv2.imshow(\"img\", image)\n cv2.waitKey(45)\n return image\n\n\n\n","sub_path":"Utils/Drawer.py","file_name":"Drawer.py","file_ext":"py","file_size_in_byte":1936,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"423945998","text":"import requests\nimport json\nimport traceback\n\n\ndef showAllBankUsers(expCode):\n try:\n print('\\n/api/bankUser/accounts/allId\\n')\n print('args = ' + str(basic_auth))\n print('expected :\\nusers and status code {}'.format(expCode))\n r = requests.get(host_url + '/api/user/util/allBankUsers', auth=(basic_auth))\n txt = r.text\n bankUsers = json.loads(txt)\n print('response : ')\n print(bankUsers)\n print('show showAllBankUsers status code ' + str(r.status_code))\n for i in bankUsers:\n if i['username'] == username:\n return int(i[\"id\"])\n except:\n traceback.print_exc()\n\ndef showBalance(accountId, expCode):\n try:\n print('/api/bankUser/atm/balance{}\\n')\n print('args = ' + str(basic_auth) + ' accID= ' + str(accountId))\n print('expected :\\nbalance and status code {}'.format(expCode))\n r = requests.get(host_url + \"/api/bankUser/atm/balance{}\".format(accountId), auth=(basic_auth))\n balance = requests.get(host_url + \"/api/bankUser/atm/balance{}\".format(accountId), auth=(basic_auth)).text\n print('response : ')\n print('balance ' + balance)\n print('show balance status code ' + str(r.status_code))\n except:\n traceback.print_exc()\n\n\ndef depo(accountId, deposit, expCode):\n try:\n dto = {\n \"accountId\": accountId,\n \"atmId\": 1,\n \"toAdd\": deposit\n }\n print('/api/bankUser/atm/deposit\\n')\n print('args = ' + str(basic_auth) + 'accID= ' + str(accountId) + str(dto))\n print('expected :\\nstatus code {}'.format(expCode))\n r = requests.post(host_url + '/api/bankUser/atm/deposit', json=dto, auth=(basic_auth))\n print('response : ')\n print('deposit status code ' + str(r.status_code))\n if int(deposit) < 0:\n showBalance(accountId, expCode=400)\n else:\n showBalance(accountId, expCode=200)\n except:\n traceback.print_exc()\n\n\ndef transf(accountId, transfer, expCode):\n try:\n dto = {\n \"amount\": transfer,\n \"fromAccountId\": accountId,\n \"toAccountId\": 2\n }\n print('/api/bankUser/atm/transfer\\n')\n print('args = ' + str(basic_auth) + 'accID= ' + str(accountId) + str(dto))\n print('expected :\\nstatus code {}'.format(expCode))\n r = requests.post(host_url + '/api/bankUser/atm/transfer', json=dto, auth=(basic_auth))\n print('transfer status code ' + str(r.status_code))\n if int(transfer) < 0:\n showBalance(accountId, expCode=400)\n else:\n showBalance(accountId, expCode=200)\n except:\n traceback.print_exc()\n\n\ndef withdrw(accountId, withdraw, expCode):\n try:\n dto = {\n \"accountId\": accountId,\n \"atmId\": 1,\n \"toSub\": withdraw\n }\n print('/api/bankUser/atm/withdraw\\n')\n print('args = ' + str(basic_auth) + 'accID= ' + str(accountId) + str(dto))\n print('expected :\\nstatus code {}'.format(expCode))\n r = requests.post(host_url + '/api/bankUser/atm/withdraw', json=dto, auth=(basic_auth))\n print('withdrw status code ' + str(r.status_code))\n if int(withdraw) < 0:\n showBalance(accountId, expCode=400)\n else:\n showBalance(accountId, expCode=200)\n except:\n traceback.print_exc()\n\n\ndef addcash(addcsh, expCode):\n try:\n dto = {\n \"atmId\": 1,\n \"toAdd\": addcsh\n }\n print('/api/collector/atmCash/add\\n')\n print('args = ' + str(notbasic_auth) + ' ' + str(dto))\n print('expected :\\nstatus code {}'.format(expCode))\n r = requests.post(host_url + '/api/collector/atmCash/add', json=dto, auth=(notbasic_auth))\n print('add status code ' + str(r.status_code))\n except:\n traceback.print_exc()\n\n\ndef createAtm(expCode):\n try:\n print('/api/collector/atmCash/add\\n')\n print('args = ' + str(notbasic_auth))\n print('expected :\\nstatus code {}'.format(expCode))\n r = requests.post(host_url + '/api/collector/atm', auth=(notbasic_auth))\n print('create status code ' + str(r.status_code))\n except:\n traceback.print_exc()\n\n\ndef deleteAtm(id,expCode):\n try:\n print('/api/collector/atm/{}\\n')\n print('args = ' + str(notbasic_auth) + 'id- ' + str(id))\n print('expected :\\nstatus code {}'.format(expCode))\n r = requests.delete(host_url + '/api/collector/atm/{}'.format(id), auth=(notbasic_auth))\n print('delete status code ' + str(r.status_code))\n except:\n traceback.print_exc()\n\n\ndef showallAtm(expCode):\n try:\n print('/api/collector/atm/allId\\n')\n print('args = ' + str(notbasic_auth))\n print('expected :\\nshow atms and status code {}'.format(expCode))\n r = requests.get(host_url + '/api/collector/atm/allId', auth=(notbasic_auth))\n print('show all status code ' + str(r.status_code))\n print('atms ' + str(r.text))\n except:\n traceback.print_exc()\n\n\ndef registr(new_acc_pass, new_acc_usrnm, expCode):\n try:\n dto = {\n \"password\": new_acc_pass,\n \"username\": new_acc_usrnm\n }\n print('/api/reg/bankUser\\n')\n print('args = ' + str(dto))\n print('expected :\\nstatus code {}'.format(expCode))\n r = requests.post(host_url + '/api/reg/bankUser', json=dto)\n print('registration status code ' + str(r.status_code))\n except:\n traceback.print_exc()\n\n\ndef showallBankIds():\n try:\n print('/api/bankUser/accounts/allId\\n')\n print('args = ' + str(basic_auth))\n print('expected :\\nids and status code 200')\n r = requests.get(host_url + '/api/bankUser/accounts/allId', auth=(basic_auth))\n print('response : ')\n print('show bankAccountsOfCurrentUser status code ' + str(r.status_code))\n print('ids ' + str(r.text))\n except:\n traceback.print_exc()\n\n\nhost_url = 'https://intense-savannah-92593.herokuapp.com'\nusername = 'postgres3'\npasswd = '12345678'\ncollector_username = 'collector'\ncollector_passwd = '12345678'\nbasic_auth = username, passwd\nnotbasic_auth = collector_username, collector_passwd\n\nprint('Адекватные параметры')\naccountId = showAllBankUsers(expCode=200)\nprint('Адекватные параметры')\nshowBalance(accountId, expCode=200)\nshowBalance(accountId=100000, expCode=403)\nprint('Неадекватные параметры')\nshowBalance(accountId=-1000, expCode=400)\nshowBalance(accountId=-0.5, expCode=400)\nprint('Адекватные параметры')\ndepo(accountId, deposit=10000, expCode=200)\ndepo(accountId, deposit=1000000000000000000000000000000000000000000000000000000000000000000000000000000000, expCode=200)\ndepo(accountId=1000000, deposit=10000000000000000000000000000000000000000000000000000000000000000000000000, expCode=403)\nprint('Неадекватные параметры')\ndepo(accountId, deposit=-10000000000000000000000000000000000000000000, expCode=400)\ndepo(accountId=-1000000, deposit=10000000000000000000000000000000000000000000000000000000, expCode=400)\nprint('Адекватные параметры')\ntransf(accountId=1, transfer=10000, expCode=200)\ntransf(accountId=1, transfer=1000000000000000000000000000000000000000000000000000000000000000000000000000000000, expCode=200)\ntransf(accountId=1000000, transfer=10000000000000000000000000000000000000000000000000000000000000000000000000, expCode=403)\nprint('Неадекватные параметры')\ntransf(accountId=1, transfer=-10000000000000000000000000000000000000000000, expCode=400)\ntransf(accountId=-1000000, transfer=10000000000000000000000000000000000000000000000000000000, expCode=400)\nprint('Адекватные параметры')\nwithdrw(accountId=1,withdraw=10000, expCode=200)\nwithdrw(accountId=1, withdraw=1000000000000000000000000000000000000000000000000000000000000000000000000000000, expCode=200)\nwithdrw(accountId=1000000, withdraw=10000000000000000000000000000000000000000000000000000000000000000000000000, expCode=403)\nprint('Неадекватные параметры')\nwithdrw(accountId=1, withdraw=-10000000000000000000000000000000000000000000, expCode=400)\nwithdrw(accountId=-1000000, withdraw=10000000000000000000000000000000000000000000000000000000, expCode=400)\nprint('Адекватные параметры')\naddcash(addcsh=10000, expCode=200)\naddcash(addcsh=1000000000000000000000000000000000000000000000000000000000000000000000000000000, expCode=200)\naddcash(addcsh=pow(100, 100), expCode=400)\nprint('Неадекватные параметры')\naddcash(addcsh=-10000000000000000000000000000000000000000000, expCode=400)\nprint('Аадекватные параметры')\ncreateAtm(expCode=201)\nprint('Аадекватные параметры')\ndeleteAtm(id=1, expCode=200)\ndeleteAtm(id=1000123, expCode=204)\nprint('Неадекватные параметры')\ndeleteAtm(id=-1, expCode=400)\ndeleteAtm(id=-1000, expCode=400)\nprint('Аадекватные параметры')\nshowallAtm(expCode=200)\nprint('Аадекватные параметры')\nregistr(new_acc_usrnm='qwe231', new_acc_pass='qwe123qwe', expCode=201)\nregistr(new_acc_usrnm='qwe1', new_acc_pass='qwe123qwe', expCode=409)\nprint('Неадекватные параметры')\nregistr(new_acc_usrnm='-436543624632080931254авпвыарвапы09324=-324=324=-0325-0=fdsf$#@$%#@qwe1',\n new_acc_pass='qwe21-308r0-23idxr-m23фывпавыфп-irmm-c03euirn-0e31uirx-0ewmur-za081,2=832=-18r2193078r21123qwe', expCode=400)\nshowallBankIds()","sub_path":"RGZ/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":9720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"326003744","text":"\"\"\"\n 题目描述:\n 给定一个只包括 '(',')','{','}','[',']' 的字符串 s ,判断字符串是否有效。\n 有效字符串需满足:\n 1、左括号必须用相同类型的右括号闭合。\n 2、左括号必须以正确的顺序闭合。\n\"\"\"\n\n\n# 定义一个栈(顺序栈)\nclass Stack:\n def __init__(self):\n self.stack = []\n\n def isEmpty(self): # 判断是否为空\n return bool(self.stack) # False为空,true不为空\n\n def push(self, value): # 入栈\n self.stack.append(value)\n\n def pop(self): # 出栈\n if self.stack:\n return self.stack.pop()\n else:\n raise LookupError('stack is empty')\n\n def size(self):\n return len(self.stack)\n\n def peek(self):\n if self.stack:\n return self.stack[self.size() - 1]\n else:\n raise LookupError('stack is empty')\n\n def top(self):\n if self.stack:\n return self.stack[-1]\n else:\n raise LookupError('stack is empty')\n\n\n# 自想方法\ndef way_one(s: str):\n braStack = Stack()\n braDict = {'(': ')', '{': '}', '[': ']'} # 定义一个括号键值对字典\n for item in s:\n if item == '(' or item == '{' or item == '[':\n braStack.push(item)\n else:\n if not braStack.isEmpty():\n return False\n leftBra = braStack.pop()\n if item != braDict[leftBra]:\n return False\n # 栈中需无元素\n if not braStack.isEmpty():\n return True\n else:\n return False\n\n","sub_path":"_210318/Valid_Bracket.py","file_name":"Valid_Bracket.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"179047430","text":"from .common_imports import *\n\nclass SimulationSync(object):\n def __init__(self, previous, DEBUG=False, PLOT=False):\n \"\"\"Constructor of SimulationSync. Used for overral simulation control and debug.\"\"\"\n \n self.DEBUG = DEBUG\n \n self.PLOT = PLOT\n \n if self.DEBUG[\"all\"] or self.DEBUG[\"SimulationSync\"]:\n print('Running SimulationSync...')\n \n # String with the followed path for the whole simulation\n self.simulation_path = \"\"\n \n # Stores previous object, from where given object was called\n self.previous = previous\n \n # Dictionary with all information on the message frames\n # self.message_dict = {\n # \"n_frames\": 0,\n # \"type\": [],\n # \"send_iterations\": []\n # }\n self.message_dict = {}\n \n \n def appendToSimulationPath(self, next_path):\n \"\"\"Set new value for self.simulation_path\"\"\"\n \n # If simul sync is enabled\n if self.DEBUG[\"all\"] or self.DEBUG[\"SimulationSync\"]:\n # Append previous info\n next_path = f\"Called << {next_path} >> From << {self.previous} >>\"\n \n if self.simulation_path != \"\":\n # if \"@\" not in next_path:\n # self.simulation_path = f\"{self.simulation_path}\\n>>> {next_path}\"\n # else:\n # self.simulation_path = f\"{self.simulation_path}\\n>>>\\t{next_path}\"\n self.simulation_path = f\"{self.simulation_path}\\n> {next_path};\"\n else:\n self.simulation_path = f\"> {next_path};\"\n \n def showSimulationPath(self):\n \"\"\"Returns value of self.simulation_path\"\"\"\n \n # If simul sync is not enabled\n if not self.DEBUG[\"all\"] and not self.DEBUG[\"SimulationSync\"]:\n return \"<< SimulationSync DEBUG is disabled! >>\"\n \n tabular_data = self.simulation_path.replace('\\n', '')\\\n .replace('<', '').replace('>', '')\\\n .replace('Called', '').strip()\n tabular_data = [line for line in tabular_data.split(';')]\n tabular_data = [[item.strip() for item in line.split('From')] for line in tabular_data]\n \n tabular_data = tabulate(tabular_data, headers=['Called', 'From'])\n \n return f\"\"\"\n************************* SIMULATION PATH *************************\n\n{tabular_data}\n\n*******************************************************************\n\"\"\"\n\n def setSimulationPath(self, simulation_path):\n \"\"\"Set new value for self.simulation_path\"\"\"\n \n self.simulation_path = simulation_path\n \n def getSimulationPath(self):\n \"\"\"Get new value for self.simulation_path\"\"\"\n \n return self.simulation_path\n \n def getDebug(self, module = \"all\"):\n \"\"\"Returns value of self.DEBUG\"\"\"\n \n return self.DEBUG[module]\n\n def setDebug(self, DEBUG, module = \"all\"):\n \"\"\"Set new value for self.DEBUG\"\"\"\n \n self.DEBUG = DEBUG[module]\n \n def getPlot(self, module = \"all\"):\n \"\"\"Returns value of self.PLOT\"\"\"\n \n return self.PLOT[module]\n\n def setPlot(self, PLOT, module = \"all\"):\n \"\"\"Set new value for self.PLOT\"\"\"\n \n self.PLOT = PLOT[module]\n \n def getPrevious(self):\n \"\"\"Returns value of self.previous\"\"\"\n \n return self.previous\n\n def setPrevious(self, previous):\n \"\"\"Set new value for self.previous\"\"\"\n \n self.previous = previous\n \n def showMessageDict(self):\n \"\"\"Returns value of self.message_dict\"\"\"\n \n # If simul sync is not enabled\n if not self.DEBUG[\"all\"] and not self.DEBUG[\"SimulationSync\"]:\n return \"<< SimulationSync DEBUG is disabled! >>\"\n \n all_descriptions = ['TX', 'RX', '# packets',\\\n 'Data type', 'BER (%)',\\\n '# bit error', 'Total #bits']\n tabular_data = []\n # tabular_data.append(all_descriptions)\n for index in range(0, len(self.message_dict[\"tx_info\"])):\n sub_list = [\n str(self.message_dict[\"tx_info\"][index]),\n str(self.message_dict[\"rx_info\"][index]),\n self.message_dict[\"packets\"][index],\n self.message_dict[\"type\"][index],\n f'{self.message_dict[\"BER\"][index]} %',\n f'{self.message_dict[\"NBER\"][index]} bits',\n f'{self.message_dict[\"n_bits\"][index]} bits'\n \n ]\n # WRONG CALCULATION FOR IMAGES!!!!!!!!!!\n \n tabular_data.append(sub_list)\n \n # tabular_data = tabulate(tabular_data, headers=['TX', 'RX', 'Type', 'BER', 'NBER'])\n tabular_data = tabulate(tabular_data, headers = all_descriptions)\n \n return f\"\"\"\n************************* RUN SUMMARY *************************\n\nNumber of frames sent: << {self.message_dict[\"n_frames\"]} >>\n\nFrames detail:\n\n{tabular_data}\n\n*******************************************************************\n\"\"\"\n # return self.message_dict\n\n def setMessageDict(self, message_dict):\n \"\"\"Set new value for self.message_dict\"\"\"\n \n self.message_dict = message_dict\n \n def getMessageDict(self):\n \"\"\"Get new value for self.message_dict\"\"\"\n \n return self.message_dict\n \n def appendToMessageDict(self, key, value):\n \"\"\"Set new value for self.message_dict\"\"\"\n \n # If simul sync is enabled\n if self.DEBUG[\"all\"] or self.DEBUG[\"SimulationSync\"]:\n \n # Create empty list, if does not exist yet\n if key not in self.message_dict.keys():\n self.message_dict[key] = []\n \n # Add to list\n if isinstance(value, list):\n self.message_dict[key] += value\n else:\n self.message_dict[key].append(value)\n \n # print(\"\\n\\nappendToMessageDict....\")\n # print(self.message_dict[key])\n # print(key)\n # print(value)\n # print()\n ","sub_path":"VLC_devel/build/lib/vlcPhy/SimulationSync.py","file_name":"SimulationSync.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"654366732","text":"import multiprocessing\nfrom multiprocessing import Process, Queue\n\n\nfrom rational_explorer import *\nfrom reactive_explorer import *\nfrom datetime import datetime\n\n\nclass MultiProcessExplore:\n\n def __init__(self):\n self.board_sizes: List[int] = [5, 10, 15, 20, 25]\n # self.board_sizes: List[int] = [5, 10]\n self.lock = multiprocessing.Lock()\n self.queue: Queue = Queue()\n self.output = list(np.full(len(Output), 0))\n self.num_caves: int = 20\n\n def explore(self) -> None:\n for size in self.board_sizes:\n processes: List[Process] = []\n self.lock.acquire()\n print(f\"Board: {size}\")\n self.lock.release()\n for i in range(0, self.num_caves):\n processes.append(multiprocessing.Process(target=self._run_explorer, args=(deepcopy(size), i,),\n name=f\"rational_explorer_process_{i}\"))\n for process in processes:\n process.start()\n self.lock.acquire()\n print(f\"Processes 0-{self.num_caves - 1} started for board size: {size}\")\n self.lock.release()\n for process in processes:\n pop = self.queue.get()\n if len(pop) == len(Output):\n for i in range(len(pop)):\n self.output[i] += pop[i]\n\n\n process.join()\n self.lock.acquire()\n print(f\"Processes 0-{self.num_caves - 1} joined for board size: {size}\\n\")\n print(\"Board size: \" + str(size) + \"x\" + str(size))\n print(\"Number of runs: \", self.num_caves)\n print(\"Rational: Number of times gold found: \", self.output[Output.RAT_GOLD])\n print(\"Rational: Number of times died: \", self.output[Output.RAT_DEATHS])\n print(\"Rational: Success rate: \", self.output[Output.RAT_GOLD] / self.num_caves)\n print(\"Rational: Deaths from old age: \", self.output[Output.RAT_OLD] / self.num_caves)\n print(\"Rational: Deaths from pit: \", self.output[Output.RAT_PIT] / self.num_caves)\n print(\"Rational: Deaths from wumpus: \", self.output[Output.RAT_WUMPUS] / self.num_caves)\n print(\"Rational: Average actions taken: \", self.output[Output.RAT_ACTIONS] / self.num_caves)\n print(\"Rational: Average cells explored: \", self.output[Output.RAT_CELLS_EXPLORED] / self.num_caves)\n print()\n print(\"Reactive: Number of times gold found: \", self.output[Output.REA_GOLD])\n print(\"Reactive: Number of times died: \", self.output[Output.REA_DEATHS])\n print(\"Reactive: Success rate: \", self.output[Output.REA_GOLD] / self.num_caves)\n print(\"Reactive: Deaths from old age: \", self.output[Output.REA_OLD] / self.num_caves)\n print(\"Reactive: Deaths from pit: \", self.output[Output.REA_PIT] / self.num_caves)\n print(\"Reactive: Deaths from wumpus: \", self.output[Output.REA_WUMPUS] / self.num_caves)\n print(\"Reactive: Average actions taken: \", self.output[Output.REA_ACTIONS] / self.num_caves)\n print(\"Reactive: Average cells explored: \", self.output[Output.REA_CELLS_EXPLORED] / self.num_caves)\n\n print(\"\\n\\n\")\n self.output = list(np.full(len(Output), 0))\n self.lock.release()\n\n def _run_explorer(self, board_size: int, process_number: int) -> None:\n \"\"\"\n Used to create and run a rationalExplorer on a Board of size board_size.\n :return:\n \"\"\"\n start_time = datetime.now()\n board: Board = Board(board_size)\n board.generate_board()\n rational_explorer: RationalExplorer = RationalExplorer(board)\n reactive_explorer: ReactiveExplorer = ReactiveExplorer(deepcopy(board))\n while not rational_explorer.is_dead and not rational_explorer.has_gold:\n rational_explorer.act()\n while not reactive_explorer.is_dead and not reactive_explorer.has_gold:\n reactive_explorer.act()\n self.lock.acquire()\n\n x_rat: int = rational_explorer.location[0]\n y_rat: int = rational_explorer.location[1]\n\n x_rea: int = reactive_explorer.location[0]\n y_rea: int = reactive_explorer.location[1]\n\n self.queue.put([\n 1 if rational_explorer.is_dead else 0,\n 1 if rational_explorer.has_gold else 0,\n 1 if rational_explorer.board.grid[x_rat][y_rat][CellContent.WUMPUS] else 0,\n 1 if rational_explorer.board.grid[x_rat][y_rat][CellContent.PIT] else 0,\n 1 if rational_explorer.max_age <= rational_explorer.actions_taken else 0,\n rational_explorer.actions_taken,\n\n 1 if reactive_explorer.is_dead else 0,\n 1 if reactive_explorer.has_gold else 0,\n 1 if reactive_explorer.board.grid[x_rea][y_rea][CellContent.WUMPUS] else 0,\n 1 if reactive_explorer.board.grid[x_rea][y_rea][CellContent.PIT] else 0,\n 1 if reactive_explorer.max_age <= reactive_explorer.actions_taken else 0,\n reactive_explorer.actions_taken,\n len(rational_explorer.safe_cells),\n len(reactive_explorer.safe_cells)\n ])\n end_time = datetime.now()\n\n print(\"Finished cave\", process_number, \"in\", end_time - start_time,\n \" \" + (\"X\" if rational_explorer.is_dead else \"G\"),\n \"/\", (\"X\" if reactive_explorer.is_dead else \"G\"),\n \" \" + str(rational_explorer.actions_taken),\n \"/\", str(reactive_explorer.actions_taken),\n \" \" + str(len(rational_explorer.safe_cells)),\n \"/\", str(len(reactive_explorer.safe_cells))\n )\n self.lock.release()\n return\n\n\nif __name__ == '__main__':\n # Parallelism:\n multi_process_explore: MultiProcessExplore = MultiProcessExplore()\n multi_process_explore.explore()\n # num_gold = 0\n # num_deaths = 0\n # num_wump = 0\n # num_pit = 0\n # num_old = 0\n # start = datetime.now()\n # b = Board(5)\n # b.generate_board()\n # e = RationalExplorer(b)\n # print(b)\n # while not e.is_dead and not e.has_gold:\n # print(e)\n # e.act()\n # if e.is_dead:\n # num_deaths += 1\n # if e.has_gold:\n # num_gold += 1\n #\n # x = e.location[0]\n # y = e.location[1]\n #\n # if e.board.grid[x][y][CellContent.WUMPUS]:\n # num_wump += 1\n # if e.board.grid[x][y][CellContent.PIT]:\n # num_pit += 1\n # if e.max_age <= e.actions_taken:\n # num_old += 1\n #\n # end = datetime.now()\n # print(e)\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"276715975","text":"import cv2\nfrom config import cascadePath\nimport math\n\nclass Detector:\n\n\tdef __init__(self):\n\t\tself.face_cascade = cv2.CascadeClassifier(cascadePath+'haarcascade_frontalface_default.xml')\n\t\tself.eye_cascade = cv2.CascadeClassifier(cascadePath+'haarcascade_eye.xml')\n\t\tself.faceBB = None\n\t\tself.eyesBB = {'left':None, 'right':None}\n\n\tdef resetEyesBB(self):\n\t\tself.eyesBB = {'left':None, 'right':None}\n\n\tdef isSameFaceBB(self,bb1,bb2):\n\t\tx1,y1,w1,h1 = bb1\n\t\tx2,y2,w2,h2 = bb2\n\t\ttolerance = 5\n\t\treturn math.fabs(w1-w2) < tolerance and math.fabs(h1-h2) < tolerance and math.fabs(x1-x2) < tolerance and math.fabs(y1-y2) < tolerance\n\n\tdef isSameEyeBB(self,bb1,bb2):\n\t\tx1,y1,w1,h1 = bb1\n\t\tx2,y2,w2,h2 = bb2\n\t\ttoleranceX = 10\n\t\ttoleranceY = 10\n\t\ttoleranceW = 10\n\t\ttoleranceH = 10\n\n\t\treturn math.fabs(w1-w2) < toleranceW and math.fabs(h1-h2) < toleranceH and math.fabs(x1-x2) < toleranceX and math.fabs(y1-y2) < toleranceY\n\n\tdef getFace(self, frame):\n\t\t#Find faces\n\t\tfaces = self.face_cascade.detectMultiScale(\n\t\t\tframe,\n\t\t\tscaleFactor=1.1,\n\t\t\tminNeighbors=5,\n\t\t\tminSize=(50, 50),\n\t\t\tmaxSize=(110, 110),\n\t\t\tflags = cv2.cv.CV_HAAR_SCALE_IMAGE\n\t\t)\n\n\t\t#Skip frame if no face\n\t\tif len(faces) != 1:\n\t\t\treturn None\n\t\t\n\t\tfaceBB = faces[0]\n\n\t\t#Save face for first frame\n\t\tif self.faceBB is None:\n\t\t\tself.faceBB = faceBB\n\t\t#Check if similar bounding box\n\t\telif self.isSameFaceBB(faceBB,self.faceBB):\n\t\t\t#Same-ish BB, load\n\t\t\tfaceBB = self.faceBB\n\t\telse:\n\t\t\t#New BB, save and skip frame\n\t\t\tself.faceBB = faceBB\n\t\t\treturn None\n\n\t\treturn faceBB\n\n\t#Returns left eye then right (on picture)\n\tdef getEyes(self, face):\n\t\t\n\t\t#Find eyes in the face\n\t\teyes = self.eye_cascade.detectMultiScale(\n\t\t\tface,\n\t\t\tscaleFactor=1.1,\n\t\t\tminNeighbors=5,\n\t\t\tminSize=(60, 60),\n\t\t\tmaxSize=(100, 100),\n\t\t\tflags = cv2.cv.CV_HAAR_SCALE_IMAGE\n\t\t)\n\n\t\tif len(eyes) != 2:\n\t\t\treturn None\n\n\t\tleftEyeX = 999\n\t\tleftEye = None\n\t\trightEye = None\n\n\t\t#Find left and right eyes\n\t\tfor ex,ey,ew,eh in eyes:\n\t\t\t#New left eye\n\t\t\tif ex < leftEyeX:\n\t\t\t\tif leftEye is not None:\n\t\t\t\t\trightEye = leftEye\n\t\t\t\t\tleftEye = (ex,ey,ew,eh)\n\t\t\t\telse:\n\t\t\t\t\tleftEye = (ex,ey,ew,eh)\n\t\t\t\t\tleftEyeX = ex\n\t\t\telse:\n\t\t\t\tif leftEye is not None:\n\t\t\t\t\trightEye = (ex,ey,ew,eh)\n\n\t\t#Stabilization\n\t\teyesBB = {'left':leftEye,'right':rightEye}\n\n\t\tfor side in ['left','right']:\n\n\t\t\t#Save first frame BB\n\t\t\tif self.eyesBB[side] is None:\n\t\t\t\tself.eyesBB[side] = eyesBB[side]\n\t\t\t#Load if similar BB\n\t\t\telif self.isSameEyeBB(eyesBB[side],self.eyesBB[side]):\n\t\t\t\teyesBB[side] = self.eyesBB[side]\n\t\t\t#Changed the Bounding Box\n\t\t\telse:\n\t\t\t\t#New BB, save and skip frame\n\t\t\t\tself.eyesBB[side] = eyesBB[side] \n\t\t\t\treturn None\n\t\t\t\n\t\t#Get BB for cropping\n\t\txLeft,yLeft,wLeft,hLeft = eyesBB['left']\n\t\txRight,yRight,wRight,hRight = eyesBB['right']\n\n\t\tfocusOnCenter = False\n\n\t\tif focusOnCenter:\n\t\t\t# #Focus on the center of the eye\n\t\t\twLeftNew = int(wLeft*0.75)\n\t\t\thLeftNew = int(hLeft*0.75)\n\t\t\txLeftNew = int(xLeft+float(wLeft)*0.5-float(wLeftNew)*0.5)\n\t\t\tyLeftNew = int(yLeft+float(hLeft)*0.55-float(hLeftNew)*0.45)\n\n\t\t\txLeft, yLeft, wLeft, hLeft = xLeftNew, yLeftNew, wLeftNew, hLeftNew\n\n\t\t\t# #Focus on the center of the eye\n\t\t\twRightNew = int(wRight*0.75)\n\t\t\thRightNew = int(hRight*0.75)\n\t\t\txRightNew = int(xRight+float(wRight)*0.5-float(wRightNew)*0.5)\n\t\t\tyRightNew = int(yRight+float(hRight)*0.55-float(hRightNew)*0.45)\n\n\t\t\txRight, yRight, wRight, hRight = xRightNew, yRightNew, wRightNew, hRightNew\n\n\t\tleftEyeImage = face[yLeft:yLeft+hLeft, xLeft:xLeft+wLeft]\n\t\trightEyeImage = face[yRight:yRight+hRight, xRight:xRight+wRight]\n\n\t\treturn leftEyeImage, rightEyeImage\n\n\n\n\n\n\n\n\n\n\n","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"156546303","text":"#!/usr/bin/python3.4\n\n\"\"\" Based on OpenWeatherMap \"\"\"\n\n# Standard import\nimport sys\nimport time\nimport traceback\nfrom datetime import datetime\nfrom datetime import timedelta\nfrom datetime import timezone\nimport pymysql\n\n# Proprietary import\nsys.path.append('../utils/')\nimport config\nimport config_private\nfrom loggerfactory import LoggerFactory\nfrom loopingthread import LoopingThread\n\n\n\n\nclass MPPTMonitoringThread(LoopingThread):\n\n def initialize(self):\n self.log=LoggerFactory.get_file_logger(config.log_filename,\"MPPTMonitoringThread\",config.log_level)\n self.log.info(\"Initializing\")\n self.connection=pymysql.connect(host=config_private.db_host,port=config_private.db_port,user=config_private.db_user,passwd=config_private.db_password,db=config_private.db_name)\n self.cursor1=self.connection.cursor() # Scans each suitable MPPT_DATA_STATUS entries in a loop\n self.cursor2=self.connection.cursor() # Do other DB activities related to the content of cursor1\n self.cursor3=self.connection.cursor() # Used to get status descriptions\n self.log.info(\"Successfully initialized\")\n\n def call(self):\n self.log.info(\"Beginning a new run\")\n query=\"delete from MPPT_DATA_STATUS where timestampmyrtc else myrtc-mytstamp\n if delta>timedelta(seconds=config.monitoring_maxtimediff):\n alarms.append(\"MPPT RTC drift is \"+str(delta)+\" (\"+str(myrtc)+\")\")\n if fault!=0:\n alarms.append(\"MPPT FAULT is \"+self.get_status_description(\"fault\",fault))\n if running!=1:\n alarms.append(\"MPPT RUNNING is \"+self.get_status_description(\"running\",running))\n if case_temperature!=0:\n alarms.append(\"MPPT CASE_TEMPERATURE is \"+self.get_status_description(\"case_temperature\",case_temperature))\n if pv_voltage!=0:\n alarms.append(\"MPPT PV_VOLTAGE is \"+self.get_status_description(\"pv_voltage\",pv_voltage))\n if pv_overcurrent!=0:\n alarms.append(\"MPPT PV_OVERCURRENT is \"+self.get_status_description(\"pv_overcurrent\",pv_overcurrent))\n if load_overcurrent!=0:\n alarms.append(\"MPPT LOAD_OVERCURRENT is \"+self.get_status_description(\"load_overcurrent\",load_overcurrent))\n if battery_voltage!=0:\n alarms.append(\"MPPT BATTERY_VOLTAGE is \"+self.get_status_description(\"battery_voltage\",battery_voltage))\n if battery_temperature!=0:\n alarms.append(\"MPPT BATTERY_VOLTAGE is \"+self.get_status_description(\"battery_temperature\",battery_temperature))\n if battery_inner_resistance!=0:\n alarms.append(\"MPPT BATTERY_INNER_RESISTANCE is \"+self.get_status_description(\"battery_inner_resistance\",battery_inner_resistance))\n if battery_identification!=0:\n alarms.append(\"MPPT BATTERY_IDENTIFICATION is \"+self.get_status_description(\"battery_identification\",battery_identification))\n if short_pv!=0:\n alarms.append(\"MPPT SHORT_PV is \"+self.get_status_description(\"short_pv\",short_pv))\n if short_load!=0:\n alarms.append(\"MPPT SHORT_LOAD is \"+self.get_status_description(\"short_load\",short_load))\n if short_charging_mosfet!=0:\n alarms.append(\"MPPT SHORT_CHARGING_MOSFET is \"+self.get_status_description(\"short_charging_mosfet\",short_charging_mosfet))\n if short_load_mosfet!=0:\n alarms.append(\"MPPT SHORT_LOAD_MOSFET is \"+self.get_status_description(\"short_load_mosfet\",short_load_mosfet))\n if short_antireverse_mosfet!=0:\n alarms.append(\"MPPT SHORT_ANTIREVERSE_MOSFET is \"+self.get_status_description(\"short_antireverse_mosfet\",short_antireverse_mosfet))\n if short_any_mosfet!=0:\n alarms.append(\"MPPT SHORT_ANY_MOSFET is \"+self.get_status_description(\"short_any_mosfet\",short_any_mosfet))\n alarmsummary=\" -- \".join(alarms)\n # Recording alarm summary to database \n if len(alarms)>0:\n self.log.error(\"We have alarms from MPPT: \"+str(alarms))\n self.log.info(\"Saving alarm summary to database\")\n query1=\"update MPPT_DATA_STATUS set keep=1 where timestamp like '\"+str(timestamp)+\"'\"\n query2=\"insert into ALARMS (timestamp,component,alarmsummary,delivered,acked) values ('\"+str(timestamp)+\"','MPPT','\"+alarmsummary+\"',0,0)\"\n else:\n self.log.info(\"No alarm summary to be saved\")\n query1=\"update MPPT_DATA_STATUS set keep=0 where timestamp like '\"+str(timestamp)+\"'\"\n query2=\"\"\n try:\n self.cursor2.execute(\"start transaction\")\n self.log.debug(query1)\n self.cursor2.execute(query1)\n if query2!=\"\":\n self.log.debug(query2)\n self.cursor2.execute(query2)\n self.cursor2.execute(\"commit\")\n self.log.debug(\"Commit done\")\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n self.log.error(\"Unable to update alarms/status into database \"+str(e)+\" - TBINFO:\"+str(traceback.extract_tb(exc_tb)))\n self.cursor2.execute(\"rollback\")\n self.log.debug(\"Rollback done\")\n self.log.info(\"Done this run\")\n\n def cleanup(self):\n self.log.info(\"Cleaning up\")\n self.cursor1.close()\n self.cursor2.close()\n self.cursor3.close()\n self.connection.close()\n self.log.info(\"Successfully cleaned up\")\n\n def get_status_description(self,field,status_id):\n description=\"UNKNOWN\"\n query=\"select status_description from MPPT_DESCRIPTION where field='\"+str(field)+\"' and status_id=\"+str(status_id)\n self.log.debug(query)\n try:\n self.cursor3.execute(query)\n description=self.cursor3.fetchone()[0]\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n self.log.error(\"Unable to get status description from database\"+str(e)+\" - TBINFO:\"+str(traceback.extract_tb(exc_tb)))\n return str(status_id)+\" (\"+description+\")\"\n\n\n\n\nif __name__ == '__main__':\n m=MPPTMonitoringThread(180,\"MPPTMonitoringThread\")\n m.initialize()\n m.call()\n m.cleanup()\n\n","sub_path":"solarplant/mpptmonitoring.py","file_name":"mpptmonitoring.py","file_ext":"py","file_size_in_byte":7658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"61304760","text":"\n# coding: utf-8\n\n# In[1]:\n\n\nimport pandas as pd\n\nDS = pd.read_csv(\"https://archive.ics.uci.edu/ml/machine-learning-databases/adult/adult.data\", header=None, names=['age', \n 'workclass', 'fnlwgt', 'education', 'education-num',\n 'marital-status', 'occupation', 'relationship', 'race',\n 'sex', 'capital-gain', 'capital-loss', 'hours-per-week',\n 'native-country', 'Salary'])\n\n\n# In[2]:\n\n\nimport sqlalchemy\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import func\n\nengine = create_engine('sqlite:///:memory:', echo=False)\nBase = declarative_base()\n\nclass Adult(Base):\n __tablename__ = 'adult'\n\n id = Column(Integer, primary_key=True, autoincrement=True)\n age = Column(Integer)\n education_num = Column(Integer)\n marital_status = Column(String)\n occupation = Column(String)\n race = Column(String)\n sex = Column(String)\n hours_per_week = Column(String)\n native_country = Column(String)\n \n def __repr__(self):\n return \"\"\"\"\"\" % (str(self.age), str(self.education_num), \n self.marital_status, self.occupation, self.race, self.sex, self.hours_per_week, self.native_country)\n \nBase.metadata.create_all(engine)\nSession = sessionmaker(bind=engine)\nsession = Session()\n\n\n# In[3]:\n\n\nfor i in range(50):\n a = Adult(age=int(DS.iloc[i]['age']), education_num=DS.iloc[i]['education-num'], \n marital_status=DS.iloc[i]['marital-status'].strip(),\n occupation=DS.iloc[i]['occupation'].strip(), race=DS.iloc[i]['race'].strip(), sex=DS.iloc[i]['sex'].strip(), \n hours_per_week=str(DS.iloc[i]['hours-per-week']), native_country=DS.iloc[i]['native-country'].strip())\n session.add(a)\n \nsession.commit()\n\nrows = session.query(func.count('*')).select_from(Adult).scalar()\n\nprint(\"Total rows inserted into db: %d\" % (rows))\n\n\n# In[4]:\n\n\na = session.query(Adult).filter_by(sex='Male').first()\n\ndbid = a.id\nprint(\"Primary Key: %d\" % (int(dbid)))\nprint(\"Hours Per Week Before Update: %d\" % (int(a.hours_per_week)))\n\na.hours_per_week = 55\n\nsession.commit()\n\na = session.query(Adult).filter_by(id=dbid).first()\nprint(\"Hours Per Week After Update: %d\" % (int(a.hours_per_week)))\n\n\n# In[5]:\n\n\na = session.query(Adult).filter_by(id=2).first()\n\ndbid = a.id\nprint(\"Primary Key: %d\" % (int(dbid)))\nprint(\"Hours Per Week Before Update: %d\" % (int(a.hours_per_week)))\n\na.hours_per_week = 60\n\nsession.commit()\n\na = session.query(Adult).filter_by(id=dbid).first()\nprint(\"Hours Per Week After Update: %d\" % (int(a.hours_per_week)))\n\n\n# In[6]:\n\n\na = session.query(Adult).filter_by(id=2).first()\nsession.delete(a)\nsession.commit()\n\na = session.query(Adult).filter_by(id=2).first()\n\nrows = session.query(func.count('*')).select_from(Adult).scalar()\nprint(\"Total rows remaining in db: %d\" % (rows))\n\nprint(a)\n\n\n# In[7]:\n\n\na = session.query(Adult).filter_by(id=1).first()\nsession.delete(a)\nsession.commit()\n\na = session.query(Adult).filter_by(id=1).first()\n\nrows = session.query(func.count('*')).select_from(Adult).scalar()\nprint(\"Total rows remaining in db: %d\" % (rows))\n\nprint(a)\n\n\n# In[8]:\n\n\nsession.query(Adult).filter(Adult.hours_per_week.in_(['30'])).all()\n\n\n# In[9]:\n\n\nsession.query(Adult).filter(Adult.sex.in_(['Female'])).all()\n\n\n# In[10]:\n\n\nfrom sqlalchemy import func\n\nsession.query(func.count(Adult.sex), Adult.sex).group_by(Adult.sex).all()\n\n\n# In[11]:\n\n\nsession.query(func.count('*')).select_from(Adult).scalar()\n\n","sub_path":"Session 13 Assignment.py","file_name":"Session 13 Assignment.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"628166322","text":"#!/usr/bin/env python\n\n''' Certain tracks in the VEGI browser, such as ChainD (orthology chain depth) and sncRNAS (sRNA),\nhave a strange stutter where every other base has height 0. This script fills in these single base\ngaps with the value from the previous base. '''\n\nfrom __future__ import print_function\nimport sys\nimport array\nimport argparse\n\n\ndef parse_cl():\n parser = argparse.ArgumentParser()\n parser.add_argument('infile', nargs='?', type=argparse.FileType('r'), default=sys.stdin,\n help='BedGraph input file [default: STDIN]')\n parser.add_argument('outfile', nargs='?', type=argparse.FileType('w'), default=sys.stdout,\n help='BedGraph output file [default: STDOUT]')\n return parser.parse_args()\n\n \ndef read_values(infile):\n values = {}\n for line in infile:\n if '#' == line[0] or 'track' == line[0:5]: # ignore comment lines\n print('WARNING: Ignoring line: '+line, sys.stderr)\n continue\n (chrom, start, end, val) = (line.split())[0:4]\n start = int(start); end = int(end); val = float(val)\n if chrom not in values:\n values[chrom] = array.array('f') # float\n if len(values[chrom]) < end:\n values[chrom].extend([0]*(end-len(values[chrom])))\n for i in range(start,end):\n values[chrom][i] = values[chrom][i]+val\n return values\n\ndef scan(values):\n LENGTH = 100\n results = {}\n for chrom in values:\n results[chrom] = array.array('f')\n results[chrom].extend([0]*(len(values[chrom])))\n for i in range(1,len(values[chrom])):\n if values[chrom][i] == 0:\n results[chrom][i] = values[chrom][i-1]\n else:\n results[chrom][i] = values[chrom][i]\n return results\n\n\ndef compress_and_write(results, outfile):\n for chrom in results:\n a = results[chrom]\n i = 0\n x = a[i]\n for j in range(len(a)):\n if x != a[j] or len(a)-1 == j:\n #if args.subtract_1: x=x-1\n if x > 0:\n print('%s\\t%i\\t%i\\t%.1f' % (chrom,i,j,x), file=outfile)\n i = j\n x = a[i]\n\n\n\n\n\nif __name__ == '__main__':\n args = parse_cl()\n values = read_values(args.infile)\n results = scan(values)\n compress_and_write(results, args.outfile)","sub_path":"PIATEA/fix_bedgraph_stutter.py","file_name":"fix_bedgraph_stutter.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"234227841","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon May 29 05:54:31 2017\n\n@author: shubham\n\"\"\"\nimport train\nimport time\n#%%With AUgmentation\n\n#FOR 256x256\nimport imp\nimp.reload(train)\n#DATASET AND MODEL NAMES\ntim = [time.time()]\ntrain_subjects =[1,3,4,5,6,7,8,9,10]\nval_subjects = [2]\ndef get_file_list (subjects,axis):\n prefix = 'LESS_BG/'+'axis'+str(axis)+'/subject-'\n l=[]\n for item in subjects:\n l.append(prefix+str(item)+'-axis-'+str(axis)+'_lbg.h5')\n return l\n \nt = get_file_list(train_subjects,axis=0)\nv=get_file_list(val_subjects,axis=0)\nm='Y6_axis0_fold2.h5'\n\nprint('Training on:',t)\nprint('Validating on:',v)\n#DATASET AND MODEL NAMES\nnames={'training_dataset_file_names': t ,\n 'validation_dataset_file_names': v,\n 'output_model_file_name': m}\n##TRAINING PARAMS\nepochs=70\nbatch_size=8\n##NETWORK PARAMS\nnet_params={'lrate':1e-1,\n 'momentum':0.95,\n 'loss':'mixed',\n 'num_of_inputs': 2,\n 'device': '/gpu:1'}\n\n##AUGMENTATION PARAMS\n##DEFAULT PARAMS\nargs_dict={'featurewise_center':False,\n 'samplewise_center':False,\n 'featurewise_std_normalization':False,\n 'samplewise_std_normalization':False,\n 'zca_whitening':False,\n 'rotation_range':10.0,\n 'width_shift_range':0.2,\n 'height_shift_range':0.2,\n 'shear_range':None,#shear angle in radians\n 'zoom_range':[0.9,1.05],\n 'channel_shift_range':0.,\n 'fill_mode':'nearest',\n 'cval':0.,\n 'horizontal_flip':True,\n 'vertical_flip':True,\n 'rescale':None,\n 'preprocessing_function':None,\n 'data_format':None,\n 'seed':1,\n 'num_of_input_branches':net_params['num_of_inputs'],\n 'max_q_size':1,\n 'elastic_deformation':False}\n\ntrain.train_with_gen (names,batch_size=batch_size,epochs=epochs,interval=1,\n net_params=net_params,generator_args=args_dict)\n\n\ntim.append(time.time())\n#%%AXIS1\nimp.reload(train)\nt = get_file_list(train_subjects,axis=1)\nv=get_file_list(val_subjects,axis=1)\n\nprint('Training on:',t)\nprint('Validating on:',v)\nm='Y6_axis1_fold2.h5'\nprint(m*10)\nprint('THIS IS AN EXPERIMENTAL RUN TO SEE THE EFFECT OF ENSEMBLING. \\\n MODEL USED IS UNET LIKE MODEL NOT THE DENSE ONE.')\n\n#DATASET AND MODEL NAMES\nnames={'training_dataset_file_names': t ,\n 'validation_dataset_file_names': v,\n 'output_model_file_name': m}\n\ntrain.train_with_gen (names,batch_size=batch_size,epochs=epochs,interval=1,\n net_params=net_params,generator_args=args_dict)\n\ntim.append(time.time())\n#%%AXIS2\nimp.reload(train)\nt = get_file_list(train_subjects,axis=2)\nv=get_file_list(val_subjects,axis=2)\n\nprint('Training on:',t)\nprint('Validating on:',v)\nm='Y6_axis2_fold2.h5'\nprint(m*10)\n\n#DATASET AND MODEL NAMES\nnames={'training_dataset_file_names': t ,\n 'validation_dataset_file_names': v,\n 'output_model_file_name': m}\n#\ntrain.train_with_gen (names,batch_size=batch_size,epochs=epochs,interval=1,\n net_params=net_params,generator_args=args_dict)\n\n\ntim.append(time.time())\n\nprint(tim)","sub_path":"run_fold2.py","file_name":"run_fold2.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91844883","text":"import argparse\r\nimport numpy as np\r\nimport os\r\nimport cv2\r\nfrom skimage import io\r\nfrom skimage import measure\r\nfrom skimage import transform\r\nfrom skimage import color\r\n\r\n# Compute the mean-squared error between two images\r\ndef MSE(srcpath, dstpath, gray2rgb = False, scale = 256):\r\n scr = io.imread(srcpath)\r\n dst = io.imread(dstpath)\r\n if gray2rgb:\r\n dst = np.expand_dims(dst, axis = 2)\r\n dst = np.concatenate((dst, dst, dst), axis = 2)\r\n if scale != (0, 0):\r\n scr = cv2.resize(scr, scale)\r\n dst = cv2.resize(dst, scale)\r\n mse = measure.compare_mse(scr, dst)\r\n return mse\r\n\r\n# Compute the normalized root mean-squared error (NRMSE) between two images\r\ndef NRMSE(srcpath, dstpath, gray2rgb = False, scale = 256, mse_type = 'Euclidean'):\r\n scr = io.imread(srcpath)\r\n dst = io.imread(dstpath)\r\n if gray2rgb:\r\n dst = np.expand_dims(dst, axis = 2)\r\n dst = np.concatenate((dst, dst, dst), axis = 2)\r\n if scale != (0, 0):\r\n scr = cv2.resize(scr, scale)\r\n dst = cv2.resize(dst, scale)\r\n nrmse = measure.compare_nrmse(scr, dst, norm_type = mse_type)\r\n return nrmse\r\n\r\n# Compute the peak signal to noise ratio (PSNR) for an image\r\ndef PSNR(srcpath, dstpath, gray2rgb = False, scale = 256):\r\n scr = io.imread(srcpath)\r\n dst = io.imread(dstpath)\r\n if gray2rgb:\r\n dst = np.expand_dims(dst, axis = 2)\r\n dst = np.concatenate((dst, dst, dst), axis = 2)\r\n if scale != (0, 0):\r\n scr = cv2.resize(scr, scale)\r\n dst = cv2.resize(dst, scale)\r\n psnr = measure.compare_psnr(scr, dst)\r\n return psnr\r\n\r\n# Compute the mean structural similarity index between two images\r\ndef SSIM(srcpath, dstpath, gray2rgb = False, scale = 256, RGBinput = True):\r\n scr = io.imread(srcpath)\r\n dst = io.imread(dstpath)\r\n if gray2rgb:\r\n dst = np.expand_dims(dst, axis = 2)\r\n dst = np.concatenate((dst, dst, dst), axis = 2)\r\n if scale != (0, 0):\r\n scr = cv2.resize(scr, scale)\r\n dst = cv2.resize(dst, scale)\r\n ssim = measure.compare_ssim(scr, dst, multichannel = RGBinput)\r\n return ssim\r\n\r\ndef get_files(path):\r\n # read a folder, return the complete path\r\n ret = []\r\n for root, dirs, files in os.walk(path):\r\n for filespath in files:\r\n if os.path.join(root, filespath)[-3:] == 'jpg':\r\n ret.append(os.path.join(root, filespath))\r\n return ret\r\n\r\ndef get_jpgs(path):\r\n # read a folder, return the image name\r\n ret = []\r\n for root, dirs, files in os.walk(path):\r\n for filespath in files:\r\n if filespath[-3:] == 'jpg':\r\n ret.append(filespath)\r\n return ret\r\n \r\n# read a txt expect EOF\r\ndef text_readlines(filename):\r\n # Try to read a txt file and return a list.Return [] if there was a mistake.\r\n try:\r\n file = open(filename, 'r')\r\n except IOError:\r\n error = []\r\n return error\r\n content = file.readlines()\r\n # This for loop deletes the EOF (like \\n)\r\n for i in range(len(content)):\r\n content[i] = content[i][:len(content[i])-1]\r\n file.close()\r\n return content\r\n\r\n# save a list to a txt\r\ndef text_save(content, filename, mode = 'a'):\r\n # Try to save a list variable in txt file.\r\n file = open(filename, mode)\r\n for i in range(len(content)):\r\n file.write(str(content[i]) + '\\n')\r\n file.close()\r\n\r\n# Traditional indexes accuracy for dataset\r\ndef Dset_Acuuracy(refpath_imglist, basepath_imglist, gray2rgb = False, scale = 0):\r\n # Define the list saving the accuracy\r\n nrmselist = []\r\n psnrlist = []\r\n ssimlist = []\r\n nrmseratio = 0\r\n psnrratio = 0\r\n ssimratio = 0\r\n\r\n # Compute the accuracy\r\n for i in range(len(refpath_imglist)):\r\n # Full imgpath\r\n refimgpath = refpath_imglist[i]\r\n imgpath = basepath_imglist[i]\r\n print(refimgpath)\r\n print(imgpath)\r\n # Compute the traditional indexes\r\n nrmse = NRMSE(refimgpath, imgpath, gray2rgb, scale, 'Euclidean')\r\n psnr = PSNR(refimgpath, imgpath, gray2rgb, scale)\r\n ssim = SSIM(refimgpath, imgpath, gray2rgb, scale, True)\r\n nrmselist.append(nrmse)\r\n psnrlist.append(psnr)\r\n ssimlist.append(ssim)\r\n nrmseratio = nrmseratio + nrmse\r\n psnrratio = psnrratio + psnr\r\n ssimratio = ssimratio + ssim\r\n print('The %dth image: nrmse: %f, psnr: %f, ssim: %f' % (i, nrmse, psnr, ssim))\r\n nrmseratio = nrmseratio / len(refpath_imglist)\r\n psnrratio = psnrratio / len(refpath_imglist)\r\n ssimratio = ssimratio / len(refpath_imglist)\r\n\r\n return nrmselist, psnrlist, ssimlist, nrmseratio, psnrratio, ssimratio\r\n \r\nif __name__ == \"__main__\":\r\n\r\n # Create argument parser\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--refpath', type = str, default = 'D:\\\\dataset\\\\Video\\\\test\\\\input_2dataset\\\\videvo', help = 'define reference path')\r\n parser.add_argument('--basepath', type = str, default = 'D:\\\\dataset\\\\Video\\\\test\\\\input\\\\videvo-gray', help = 'define imgpath')\r\n parser.add_argument('--gray2rgb', type = bool, default = False, help = 'whether there is an input is grayscale')\r\n parser.add_argument('--scale', type = tuple, default = (256, 256), help = 'whether the input needs resize')\r\n parser.add_argument('--savelist', type = bool, default = False, help = 'whether the results should be saved')\r\n opt = parser.parse_args()\r\n print(opt)\r\n\r\n # Read all names\r\n refpath_imglist = get_files(opt.refpath)\r\n basepath_imglist = get_files(opt.basepath)\r\n a = get_jpgs(opt.refpath)\r\n b = get_jpgs(opt.basepath)\r\n assert a == b, 'the two dataset contains unpaired images which is wrong'\r\n nrmselist, psnrlist, ssimlist, nrmseratio, psnrratio, ssimratio = Dset_Acuuracy(refpath_imglist, basepath_imglist, gray2rgb = opt.gray2rgb, scale = opt.scale)\r\n print('The overall results: nrmse: %f, psnr: %f, ssim: %f' % (nrmseratio, psnrratio, ssimratio))\r\n\r\n # Save the files\r\n if opt.savelist:\r\n text_save(nrmselist, \"./nrmselist.txt\")\r\n text_save(psnrlist, \"./psnrlist.txt\")\r\n text_save(ssimlist, \"./ssimlist.txt\")\r\n ","sub_path":"simple metrics (PSNR SSIM MSE NRMSE)/NRMSE_PSNR_SSIM_bypath.py","file_name":"NRMSE_PSNR_SSIM_bypath.py","file_ext":"py","file_size_in_byte":6177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"418921784","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n\n#File: cmd_spec.py\n#Description:\n# 1. ont cli cmd;\n# 2. 可用于telnet/ssh/serial;\n#Data: 2018-07\n#Auth: zhang bin\n\n############################################################\n# Golobal Cli #\n############################################################\n#回显:通用回显字符串;\nprompt_disable = \">\"\nprompt = '#'\n#回显:configuration terminal 模式;\nprompt_config_terminal = 'config)#'\nprompt_config = \")#\"\nprompt_error_list = ['unknown']\n#回显:命令执行成功标志;\nprompt_execSuccess = 'Completed: 100| Completed: 90| [OK]| success'\n\n#命令:返回上一级\nexit = 'exit'\n#命令:返回特权模式\nend = 'end'\n#命令:进入特权模式\nenable = 'enable'\n############################################################\n# Show Cli #\n############################################################\n#命令:查看所有运行配置\n\n#命令:查看系统信息(版本号 等)\nshowSystemInfo = 'show system information'\n#命令:查看mac地址表\nshowMacTableVlan = 'show mac-address %s'\n\n############################################################\n# system cli #\n############################################################\nconfig_terminal = 'config terminal'\n\n\n############################################################\n# ont config: profile & auth #\n############################################################\npattern_ont_id = '0/\\d{1,2}/\\d{1,3}'\n\n\nshow_deploy_dba = 'show running-config deploy-profile-dba'\nshow_deploy_vlan = 'show running-config deploy-profile-vlan'\nshow_deploy_line = 'show running-config deploy-profile-line'\nshow_deploy_rule = 'show running-config deploy-profile-rule'\npatten_deploy_id = 'aim\\s(\\d{1,4})'\n\nget_ont_unauth_id_by_pon = 'show ont-find list interface gpon {}' #.format(0/4)\nget_ont_unauth_id_by_sn = 'show ont-find list sn string-hex {}'#.format(0/4)\nget_ont_auth_id_by_sn = 'show ont brief sn string-hex'#.format(sn)\n\naim_id = 'aim {0}' #创建\naim_id_name = 'aim {0} name {1}' #创建\ndel_aim = 'delete aim {0}' #删除\nactive = 'active' #激活\n\n# dba profile config\ndeploy_profile_dba = 'deploy profile dba'\ndba_type1 = 'type1 fix {0}'\ndba_type2 = 'type2 assured {0}'\ndba_type3 = 'type3 assured {0} max {1}'\ndba_type4 = 'type4 max {0}'\ndba_type5 = 'type5 fix {0} assured {1} max {2}'\n\n# vlan profile config\ndeploy_profile_vlan = 'deploy profile vlan'\npvid = 'default vlan {0}'\ntrans_old_to_new_vlan = 'translate old-vlan {0} new-vlan {1}'\ntrans_old_to_new_vlan_pbit = 'translate old-vlan {0} {1} new-vlan {2} {3}'\n\n# traffic profile config\ndeploy_profile_traffic = 'deploy profile traffic'\n\n# line profile config\ndeploy_profile_line = 'deploy profile line'\ndevice_type = 'device type {0}'\nadd_tcont = 'tcont {0}'\ndba_bind_tcont = 'tcont {0} profile dba {1}'\ngem_bind_tcont = 'gem {0} tcont {1} vlan-profile {2}'\ngem_bind_traffic = ''\ngem_bind_vlan = 'gem {0} tcont {1} vlan-profile {2}'\nmapping_mode = ''\nmaping_gem_vlan = ''\nmaping_gem_pbit = ''\nmaping_gem_vlan_pbit = ''\nmaping_gem_port = ''\n\n# ont auth config\ndeploy_profile_rule = 'deploy profile rule'\nont_auth_aim_id = \"aim {0}\" # aim 0/4/103\nont_auth_aim_id_name = \"aim {0} name {1}\" # aim 0/4/103\nont_auth_sn = 'permit sn string-hex {0} line {1} default line {2}' #sn,line_profile,line_prfile\nont_auth_loid = 'permit sn string-hex {0} line {1} default line {2}'#sn,line_profile,line_prfile\nont_auth_password = 'permit sn string-hex {0} line {1} default line {2}'#sn,line_profile,line_prfile\nont_auth_loid_password = 'permit sn string-hex {0} line {1} default line {2}'#sn,line_profile,line_prfile\n\n############################################################\n# show ont config /info #\n############################################################\nshow_ont_profile = ''\nshow_ont_brif_online = ''\nshow_ont_logging =''\nshow_ont_brif_id = 'show ont brief {}'#.format(ont_id)\nshow_ont_brif_sn ='show ont brief sn string-hex {}'\nshow_ont_info = 'show ont info {}'#.format(ont_id)\npatten_ont_version = 'Software\\s*Version\\s*:\\s*(\\S*)'\n\n\n############################################################\n# ont mgmt #\n############################################################\n\n\n#命令:通过ftp升级ont-image\nupload_ont_file = 'load ont-image ftp inet {0} {1} {2} {3}' # ftp,file,user,password.\nont_upgrade_auto = 'ont upgrade auto-reboot {}' #.format(ont_id)\nont_upgrade_manual = 'ont upgrade manual-reboot {}' #.format(ont_id)\nshow_ont_upd_status = 'show ont upgrade-status image {}' #.format(ont_id)\n\nont_reboot = 'ont reboot {}'#.format(ont_id)\nont_reboot_sn = 'ont reboot sn string-hex {}'\n\n\nont_deactive = 'ont deactive {}' #.format(ont_id)\nont_active = 'ont active {}' #.format(ont_id)\n\n############################################################\n# Vlan interface #\n############################################################\n\n\n############################################################\n# Switchport interface #\n############################################################\n\n","sub_path":"OltLib/cmd_spec.py","file_name":"cmd_spec.py","file_ext":"py","file_size_in_byte":5348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"419986575","text":"import pytest\n\n\ndef find_consecutive_runs(num_list):\n index_list = []\n\n for index in range(2, len(num_list)):\n window = num_list[index-2:index+1]\n difference = window[1] - window[0]\n if abs(difference) != 1 or window[2] - window[1] != difference:\n continue\n index_list.append(index-2)\n\n return index_list or None\n\n\n@pytest.mark.parametrize('input_list,expected_output', [\n ([1, 2, 3, 5, 10, 9, 8, 9, 10, 11, 7, 8, 7], [0, 4, 6, 7]),\n ([1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1], None),\n ([1, 2, 3, 4, 5, 7, 9, 22, 87, 88, 89, 88, 87], [0, 1, 2, 8, 10]),\n ([9, 8, 7, 2, 5, 12, 183, 183, 184, 185, 186, 185, 185], [0, 7, 8]),\n ([1, 2, 3], [0]),\n ([1, 2, 1, 2, 1, 2, 3], [4]),\n ([3, 2, 1], [0]),\n ([3, 2, 1, 2, 3], [0, 2]),\n ([], None),\n ([1], None)\n])\n\n\ndef test_find_consecutive_runs(input_list, expected_output):\n assert find_consecutive_runs(input_list) == expected_output\n","sub_path":"samples/fcr54.py","file_name":"fcr54.py","file_ext":"py","file_size_in_byte":946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"338975889","text":"from pathlib import Path\nimport argparse\nfrom shutil import copy\nfrom random import sample\n\n\ndef main(data_dir, backup_dir, percentage, save):\n # temperory:\n data = Path(data_dir) #from user\n classes = data / 'classes.txt'\n out = Path(save) #from user\n train_txt = out / 'train.txt'\n test_txt = out / 'test.txt'\n obj_names = out / 'obj.names'\n obj_data = out / 'obj.data'\n backup = Path(backup_dir) # from user\n\n if not out.exists(): out.mkdir()\n\n #copying classes.txt to obj.names\n if obj_names.exists(): obj_names.unlink()\n copy(classes, obj_names)\n\n # Got the class value\n clss = len(classes.read_text().split('\\n'))\n\n all_imgs = list(data.glob(\"*.jpg\"))\n # run this script in the root folder!\n all_imgs = sample(all_imgs, len(all_imgs))\n\n # percentage split\n percent = percentage # from user\n percent /= 100\n split_at = int(len(all_imgs) * percent) #141 #126 #15\n\n # output checked\n train_imgs = all_imgs[:split_at]\n test_imgs = all_imgs[split_at:]\n\n # making train & test txts\n\n if train_txt.exists(): train_txt.unlink()\n train_txt.write_text('\\n'.join(map(str, train_imgs)))\n\n if test_txt.exists(): test_txt.unlink()\n test_txt.write_text('\\n'.join(map(str, test_imgs)))\n\n write_text = \"classes = {}\\ntrain = {}\\nvalid = {}\\nnames = {}\\nbackup = {}\".format(\n str(clss), str(train_txt.absolute()), \n str(test_txt.absolute()), str(obj_names.absolute()), \n str(backup.absolute()))\n\n if obj_data.exists(): obj_data.unlink()\n obj_data.write_text(write_text)\n\n print('Done! :)')\n\nif __name__ == '__main__':\n # Parser\n parser = argparse.ArgumentParser()\n parser.add_argument('-d', '--data', type=str, required=True, \n help=\"Path to folder containing train & test folders.\")\n parser.add_argument('-o', '--out', type=str, required=True, \n help=\"Path to output folder.\")\n parser.add_argument('-s', '--save', type=str, required=True, \n help=\"Path where obj folder will be saved.\")\n parser.add_argument('-p', '--percent', type=int, required=True, \n help=\"Percent split your dataset.\")\n args = parser.parse_args()\n\n main(args.data, args.out, args.percent, args.save)","sub_path":"Object detection/make_obj_3_6.py","file_name":"make_obj_3_6.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"24985126","text":"from .general import genEquation\r\nimport sympy as sp\r\nimport re\r\n\r\n\r\ndef createMatrix(args=''):\r\n vector = re.findall(r\"[\\w\\.\\-\\+\\*\\/\\(\\)]+\", args)\r\n if len(vector) != 9:\r\n return None\r\n return sp.Matrix([vector[:3], vector[3:6], vector[6:9]])\r\n\r\n\r\ndef check_matrix(args):\r\n mat = createMatrix(args)\r\n # a.norm() ||a||\r\n # a.cross(b) axb\r\n # a.dot(b) a*b\r\n\r\n if mat is None:\r\n return {'result' : 'Podana macierz ma złą długość. Obsługiwane są macierzy o rozmiarach 3x3',\r\n 'error' : True }\r\n\r\n if mat.is_symbolic():\r\n return {\r\n 'operations': genEquation(\"R = \", mat),\r\n 'result': 'Dana macierz jest macierzą symboliczną. Nie możemy sprawdzić jej ortonormalności.'\r\n }\r\n\r\n if mat:\r\n a = mat.col(0)\r\n b = mat.col(1)\r\n c = mat.col(2)\r\n\r\n bad = 0\r\n list_of_out = []\r\n\r\n if not sp.simplify(a.norm()) == 1:\r\n list_of_out.append(\"Wektor a nie jest wektorem znormalizowanym do dlugości jednostkowej.\")\r\n bad = 1\r\n if not sp.simplify(b.norm()) == 1:\r\n list_of_out.append(\"Wektor b nie jest wektorem znormalizowanym do dlugości jednostkowej.\")\r\n bad = 1\r\n if not sp.simplify(c.norm()) == 1:\r\n list_of_out.append(\"Wektor c nie jest wektorem znormalizowanym do dlugości jednostkowej.\")\r\n bad = 1\r\n\r\n if not sp.simplify(a.dot(b)) == 0:\r\n list_of_out.append(\"a ∙ b != 0\")\r\n bad = 1\r\n if not sp.simplify(b.dot(c)) == 0:\r\n list_of_out.append(\"b ∙ c != 0 \")\r\n bad = 1\r\n if not sp.simplify(c.dot(a)) == 0:\r\n list_of_out.append(\"c ∙ a != 0\")\r\n bad = 1\r\n\r\n if not sp.simplify(a.cross(b)) == c:\r\n list_of_out.append(\"a x b != c\")\r\n bad = 1\r\n if not sp.simplify(b.cross(c)) == a:\r\n list_of_out.append(\"b x c != a \")\r\n bad = 1\r\n if not sp.simplify(c.cross(a)) == b:\r\n list_of_out.append(\"c x a != b\")\r\n bad = 1\r\n\r\n det = sp.simplify(mat.det())\r\n inverted = \"\"\r\n\r\n if det != 1:\r\n list_of_out.append(\"Wyznacznik jest równy {}, a musi być równy 1.\".format(det))\r\n bad = 1\r\n else:\r\n if not bad:\r\n inverted = genEquation(\"R^{-1} = R^{T} =\", mat.transpose())\r\n\r\n a1, a2 = \"\", \".\"\r\n if bad:\r\n a1, a2 = \"nie\", \", dlatego że:\"\r\n\r\n return {\r\n 'operations': genEquation(\"R = \", mat) + genEquation(\"a = \", a, \";\", \"b = \", b, \";\", \"c = \", c),\r\n 'msg': list_of_out,\r\n 'result': \"Podana macierz {} jest macierzą ortonormalną{}\".format(a1, a2),\r\n 'inverted': inverted}\r\n\r\n return None\r\n","sub_path":"project/calcul/checkMat.py","file_name":"checkMat.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"309613854","text":"# Copyright (c) 2016-present, Facebook, Inc.\n#\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\n# pyre-strict\n\n\nfrom typing import Callable, Iterable, List, Optional\n\nfrom .inspect_parser import extract_qualified_name\nfrom .model import CallableModel, Model\nfrom .model_generator import Configuration, ModelGenerator\nfrom .view_generator import DjangoUrls, django_urls_from_configuration, get_all_views\n\n\nclass ExitNodeGenerator(ModelGenerator):\n def __init__(\n self,\n django_urls: Optional[DjangoUrls] = None,\n whitelisted_views: Optional[List[str]] = None,\n ) -> None:\n self.django_urls: Optional[\n DjangoUrls\n ] = django_urls or django_urls_from_configuration()\n self.whitelisted_views: List[\n str\n ] = whitelisted_views or Configuration.whitelisted_views\n\n def gather_functions_to_model(self) -> Iterable[Callable[..., object]]:\n django_urls = self.django_urls\n if django_urls is None:\n return []\n return get_all_views(django_urls)\n\n def compute_models(\n self, functions_to_model: Iterable[Callable[..., object]]\n ) -> Iterable[Model]:\n exit_nodes = set()\n\n for view_function in functions_to_model:\n qualified_name = extract_qualified_name(view_function)\n if qualified_name in self.whitelisted_views:\n continue\n try:\n model = CallableModel(\n returns=\"TaintSink[ReturnedToUser]\", callable_object=view_function\n )\n exit_nodes.add(model)\n except ValueError:\n pass\n\n return sorted(exit_nodes)\n","sub_path":"tools/generate_taint_models/get_exit_nodes.py","file_name":"get_exit_nodes.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"431190403","text":"import numpy as np\nfrom engine.strategy import Strategy\n\n\nclass MovingAverageCrossStrategy(Strategy):\n\n def __init__(self, bars, events, portfolio):\n super().__init__(bars, events, portfolio)\n self.strategy_name = 'ma_cross'\n self.short_window = 10\n self.long_window = 100\n self.bought = self._calculate_initial_bought()\n\n def _calculate_initial_bought(self):\n bought = {}\n for s in self.symbol_list:\n bought[s] = 'OUT'\n return bought\n\n def handle_bar(self):\n for symbol in self.symbol_list:\n bars = self.bars.get_latest_bars_values(\n symbol, \"close\", N=self.long_window)\n if bars is not None and bars != []:\n short_sma = np.mean(bars[-self.short_window:])\n long_sma = np.mean(bars[-self.long_window:])\n\n if short_sma > long_sma and self.bought[symbol] == \"OUT\":\n self.order_percent(symbol, 0.1, 'LONG')\n self.bought[symbol] = 'LONG'\n\n elif short_sma < long_sma and self.bought[symbol] == \"LONG\":\n self.order_percent(symbol, 0.1, 'SHORT')\n self.bought[symbol] = 'OUT'\n","sub_path":"engine/strategies/ma_cross.py","file_name":"ma_cross.py","file_ext":"py","file_size_in_byte":1224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"29456281","text":"import argparse\nimport os\nimport yaml\n\nfrom hugin.flowcell_monitor import FlowcellMonitor\n\nCONFIG = {}\nDEFAULT_CONFIG = os.path.join(os.environ['HOME'], '.hugin/config.yaml')\n\ndef monitor_flowcells():\n parser = argparse.ArgumentParser(description=\"A script that will monitor specified run folders and update a Trello board as the status of runs change\")\n parser.add_argument('--config', default=DEFAULT_CONFIG, action='store', help=\"Config file with e.g. Trello credentials and options\")\n args = parser.parse_args()\n\n assert os.path.exists(args.config), \"Could not locate config file {}\".format(args.config)\n\n with open(args.config) as config:\n CONFIG.update(yaml.load(config) or {})\n\n\n flowcell_monitor = FlowcellMonitor(CONFIG)\n flowcell_monitor.update_trello_board()\n\n\nif __name__ == \"__main__\":\n monitor_flowcells()\n\n\n","sub_path":"scripts/monitor_flowcells.py","file_name":"monitor_flowcells.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"66739658","text":"from selenium import webdriver\n\n\nclass CssSelectors():\n\n def findElementByCssSelectors(self):\n\n baseURL = \"https://letskodeit.teachable.com/p/practice\"\n\n # Instantiation of Chrome driver\n driver = webdriver.Chrome()\n\n # Opening provided URL\n driver.get(baseURL)\n\n # Finding element by css selectors:\n # Syntax: tag[attribute='value']\n # '#' symbol for elements with 'id' attribute - '#displayed-text'\n # '.' symbol for elements with 'class' attribute - '.displayed-text'\n # For multiple elements on the page with the same class - 'input#displayed-text' for particular element (in our\n # case it is 'input' element and 'id' is equal to 'displayed-text')\n elementByCssSelectors = driver.find_element_by_css_selector(\"input[id='displayed-text']\")\n\n if elementByCssSelectors is not None:\n print(\"The element with provided 'css selector' exists on the page\")\n\nchromeObject = CssSelectors()\nchromeObject.findElementByCssSelectors()","sub_path":"CSSSelectors/find_element_by_css_selectors.py","file_name":"find_element_by_css_selectors.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"37221745","text":"import json\nimport os\nimport shutil\n\nfrom virtool_cli.utils import create_otu_path\n\nOTU_KEYS = [\n \"_id\",\n \"name\",\n \"abbreviation\",\n \"schema\",\n \"taxid\"\n]\n\nISOLATE_KEYS = [\n \"id\",\n \"source_type\",\n \"source_name\",\n \"default\"\n]\n\nSEQUENCE_KEYS = [\n \"_id\",\n \"accession\",\n \"definition\",\n \"host\",\n \"sequence\"\n]\n\n\ndef run(src_path: str, output: str):\n \"\"\"\n Divide a reference.json file from Virtool into a src tree.\n\n Parameters:\n src_path (str): Path to a reference.json file\n output (str): Path to the where the src tree should be generated\n\n \"\"\"\n\n shutil.rmtree(output, ignore_errors=True)\n os.mkdir(output)\n\n with open(src_path, \"r\") as export_handle:\n data = json.load(export_handle)\n\n for otu in data[\"otus\"]:\n\n otu_path = build_otu(output, otu)\n\n isolates = otu.pop(\"isolates\")\n\n for isolate in isolates:\n\n isolate_path = build_isolate(otu_path, isolate)\n\n sequences = isolate.pop(\"sequences\")\n\n for sequence in sequences:\n build_sequence(isolate_path, sequence)\n\n with open(os.path.join(output, \"meta.json\"), \"w\") as f:\n json.dump({\n \"data_type\": data[\"data_type\"],\n \"organism\": data[\"organism\"]\n }, f)\n\n\ndef build_otu(output: str, otu: dict) -> str:\n \"\"\"\n Creates a directory for all OTUs that begin with a particular\n letter if it doesn't already exist. Generates a directory for a\n given OTU and copies key information about it to a otu.json file.\n\n Parameters:\n output (str): Path to the where the src tree should be generated\n otu (dict): Dictionary of a OTU\n\n Returns:\n otu_path (str): Path to a newly generated OTU directory\n\n \"\"\"\n lower_name = otu[\"name\"].lower()\n first_letter = lower_name[0]\n\n try:\n os.mkdir(os.path.join(output, first_letter))\n except FileExistsError:\n pass\n\n otu_path = create_otu_path(lower_name, output, first_letter)\n os.mkdir(otu_path)\n\n with open(os.path.join(otu_path, \"otu.json\"), \"w\") as f:\n if \"schema\" not in otu:\n otu[\"schema\"] = list()\n\n json.dump({key: otu.get(key) for key in OTU_KEYS}, f, indent=4)\n\n return otu_path\n\n\ndef build_isolate(otu_path: str, isolate: dict) -> str:\n \"\"\"\n Creates a directory for a given isolate and generates\n a isolate.json with key information about it.\n\n Parameters:\n otu_path (str): A path to a specified OTU\n isolate (dict): A dictionary with information on one of the\n OTU's isolates\n\n Returns:\n isolate_path: A path to a newly generated isolate directory\n\n \"\"\"\n isolate_path = os.path.join(otu_path, isolate[\"id\"])\n os.mkdir(isolate_path)\n\n with open(os.path.join(isolate_path, \"isolate.json\"), \"w\") as f:\n json.dump({key: isolate[key] for key in ISOLATE_KEYS}, f, indent=4)\n\n return isolate_path\n\n\ndef build_sequence(isolate_path: str, sequence: dict):\n \"\"\"\n Generates a JSON file for one of the isolate's sequences\n\n Parameters:\n isolate_path (str): A path to a specified isolate\n sequence (dict): A dictionary containing information on one of the \n isolates' sequences\n \n \"\"\"\n with open(os.path.join(isolate_path, \"{}.json\".format(sequence[\"_id\"])), \"w\") as f:\n json.dump({key: sequence[key] for key in SEQUENCE_KEYS}, f, indent=4)\n","sub_path":"virtool_cli/divide.py","file_name":"divide.py","file_ext":"py","file_size_in_byte":3506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"270244439","text":"\"\"\"Unit test for treadmill.appcfg.features.docker\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport unittest\n\nimport mock\n\nfrom treadmill.appcfg import features\n\n\nclass AppCfgDockerFeatureTest(unittest.TestCase):\n \"\"\"Test for docker feature\n \"\"\"\n\n @mock.patch('treadmill.appcfg.features.docker._get_user_uid_gid',\n mock.Mock(return_value=(1, 1)))\n @mock.patch('treadmill.subproc.resolve', mock.Mock(return_value='foo'))\n @mock.patch('treadmill.appcfg.features.docker._get_docker_registry',\n mock.Mock(return_value='foo:5050'))\n def test_docker_feature(self):\n \"\"\"test apply dockerd feature\n \"\"\"\n manifest = {\n 'services': [],\n 'system_services': [],\n 'features': ['docker'],\n 'proid': 'foo',\n }\n\n tm_env = mock.Mock(\n cell='testcell',\n zkurl='zookeeper://foo@foo:123',\n apps_dir='apps',\n root='/var/tmp/treadmill',\n )\n\n self.assertTrue(features.feature_exists('docker'))\n\n feature_mod = features.get_feature('docker')(tm_env)\n feature_mod.configure(manifest)\n self.assertEqual(len(manifest['services']), 1)\n self.assertEqual(manifest['services'][0]['name'], 'dockerd')\n self.assertTrue(manifest['services'][0]['root'])\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/appcfg/docker_feature_test.py","file_name":"docker_feature_test.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"33534816","text":"import soundfile as sf\r\nimport os\r\nimport numpy as np\r\nimport math\r\nimport random\r\n\r\n\r\n# 对纯语音数据添加噪声数据\r\ndef Get_Data(Voice, noise, Vn, snr):\r\n Data, Fs = sf.read(Voice)\r\n data, fs = sf.read(noise)\r\n print(Fs)\r\n print(fs)\r\n if fs == Fs and len(Data)<=len(data):\r\n Average_Energy = np.sum(Data ** 2) / len(Data)\r\n average_energy = np.sum(data ** 2) / len(data)\r\n k = math.sqrt(Average_Energy / average_energy / 10 ** (snr * 0.1))\r\n print(Average_Energy, average_energy, k)\r\n num = random.randint(8000,len(data)-len(Data)-8000)\r\n Data_new = Data + data[num:len(Data)+num] * k\r\n print(Vn)\r\n sf.write(Vn, Data_new, 8000)\r\n return Data, data, Data_new\r\n else:\r\n print('error: fs!=Fs or len(Voice)>len(noise)')\r\n return 0, 0, 0\r\n\r\n\r\ndef main(Voicedir, Noisedir, snr):\r\n # 遍历噪声文件\r\n for files in os.listdir(Noisedir):\r\n print(Noisedir)\r\n Noise_wav = os.path.join(Noisedir, files)\r\n print(Noise_wav)\r\n new_dir = Noisedir + '_' + files.split('.')[0] + '_' + str(snr)\r\n print(new_dir)\r\n # print('*',new_dir)\r\n\r\n # 三个参数分别返回1.父目录Rootdir 2.所有文件夹名字(不含路径) 3.所有文件名字(不含路径)\r\n # 遍历纯语音文件\r\n for Parent, Dirnames, Filenames in os.walk(Voicedir):\r\n print(Voicedir)\r\n print(Parent)\r\n print(Dirnames)\r\n print(Filenames)\r\n for Dirname in Dirnames:\r\n Dirs = os.path.join(Parent, Dirname)\r\n print(Dirs)\r\n for Files in os.listdir(Dirs):\r\n Voice_wav = os.path.join(Dirs, Files)\r\n\r\n # Noisedir噪声父目录 files噪声文件名\r\n # Noise_wav噪声\r\n\r\n # Parent/Voicedir语音父目录 Dirname语音文件夹名\r\n # Dirs语音文件夹目录 Files语音文件名\r\n # Voice_wav语音\r\n\r\n # new_dir含噪语音父目录\r\n # path含噪语音目录\r\n # VN含噪语音\r\n\r\n path = os.path.join(new_dir, Dirname)\r\n if not os.path.exists(path):\r\n os.makedirs(path)\r\n VN = os.path.join(path, Files)\r\n\r\n print(Voice_wav, Noise_wav, VN, snr)\r\n\r\n Voice_Data, noise_data, Voice_noise = Get_Data(Voice_wav, Noise_wav, VN, snr)\r\n\r\n\r\nif __name__ == '__main__':\r\n SNR = [0, 5, 10, 20, 30] # 信噪比\r\n # 在python3中不用担心斜杆正反的烦恼,但是反斜杠要加\\去除特殊转义作用,或者加r''\r\n voicedir = r'G:\\DSP_python\\mix_wav\\wav' # 纯语音文件目录\r\n noisedir = r'G:\\DSP_python\\mix_wav\\noise' # 噪声文件目录\r\n for i in SNR:\r\n main(voicedir, noisedir, i)\r\n print(i)\r\n\r\n# 纯语音文件夹下再建立文件夹,然后才放语音文件.wav\r\n# 纯噪声直接放噪声文件.wav","sub_path":"mix_wav/add_n.py","file_name":"add_n.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"500426999","text":"\"\"\"Tests of builtins (in pytd/builtins/__builtins__.pytd).\n\nFile 3/3. Split into parts to enable better test parallelism.\n\"\"\"\n\nimport unittest\n\n\nfrom pytype import abstract\nfrom pytype import utils\nfrom pytype.tests import test_inference\n\n\nclass BuiltinTests2(test_inference.InferenceTest):\n \"\"\"Tests for builtin methods and classes.\"\"\"\n\n def testSuperAttribute(self):\n ty = self.Infer(\"\"\"\n x = super.__name__\n \"\"\")\n self.assertTypesMatchPytd(ty, \"\"\"\n x = ... # type: str\n \"\"\")\n\n def testSlice(self):\n ty = self.Infer(\"\"\"\n x1 = [1,2,3][1:None]\n x2 = [1,2,3][None:2]\n x3 = [1,2,3][None:None]\n x4 = [1,2,3][1:3:None]\n \"\"\")\n self.assertTypesMatchPytd(ty, \"\"\"\n from typing import List\n x1 = ... # type: List[int]\n x2 = ... # type: List[int]\n x3 = ... # type: List[int]\n x4 = ... # type: List[int]\n \"\"\")\n\n def testImportExternalFunction(self):\n ty = self.Infer(\"\"\"\n from __builtin__ import next\n v = next(iter([1, 2, 3]))\n \"\"\")\n self.assertTypesMatchPytd(ty, \"\"\"\n v = ... # type: int\n \"\"\")\n\n def testAddStrAndBytearray(self):\n ty = self.Infer(\"\"\"\n v = \"abc\" + bytearray()\n \"\"\")\n self.assertTypesMatchPytd(ty, \"\"\"\n v = ... # type: bytearray\n \"\"\")\n\n def testImplicitTypeVarImport(self):\n ty, errors = self.InferAndCheck(\"v = \" + abstract.T)\n self.assertTypesMatchPytd(ty, \"\"\"\n from typing import Any\n v = ... # type: Any\n \"\"\")\n self.assertErrorLogIs(errors, [(1, \"name-error\")])\n\n def testExplicitTypeVarImport(self):\n self.assertNoErrors(\"\"\"\n from __builtin__ import _T\n _T\n \"\"\")\n\n def testClassOfType(self):\n ty = self.Infer(\"\"\"\n v = int.__class__\n \"\"\")\n self.assertTypesMatchPytd(ty, \"\"\"\n from typing import Type\n v = ... # type: Type[type]\n \"\"\")\n\n def testExceptionMessage(self):\n ty = self.Infer(\"\"\"\n class MyException(Exception):\n def get_message(self):\n return self.message\n \"\"\", deep=True)\n self.assertTypesMatchPytd(ty, \"\"\"\n class MyException(Exception):\n def get_message(self) -> str\n \"\"\")\n\n def testIterItems(self):\n ty = self.Infer(\"\"\"\n lst = list({\"a\": 1}.iteritems())\n \"\"\")\n self.assertTypesMatchPytd(ty, \"\"\"\n from typing import List, Tuple\n lst = ... # type: List[Tuple[str, int]]\n \"\"\")\n\n def testSuper(self):\n with utils.Tempdir() as d:\n d.create_file(\"foo.pyi\", \"\"\"\n from typing import Type\n def f(x: type): ...\n def g(x: Type[super]): ...\n \"\"\")\n ty = self.Infer(\"\"\"\n from __future__ import google_type_annotations\n from typing import Any, Type\n import foo\n def f(x): ...\n def g(x: object): ...\n def h(x: Any): ...\n def i(x: type): ...\n def j(x: Type[super]): ...\n f(super)\n g(super)\n h(super)\n i(super)\n j(super)\n foo.f(super)\n foo.g(super)\n v = super\n \"\"\", pythonpath=[d.path], deep=True)\n self.assertTypesMatchPytd(ty, \"\"\"\n from typing import Any, Type\n foo = ... # type: module\n def f(x) -> None: ...\n def g(x: object) -> None: ...\n def h(x: Any) -> None: ...\n def i(x: type) -> None: ...\n def j(x: Type[super]) -> None: ...\n v = ... # type: Type[super]\n \"\"\")\n\n @unittest.skip(\"broken\")\n def testClear(self):\n ty = self.Infer(\"\"\"\\\n x = {1, 2}\n x.clear()\n y = {\"foo\": 1}\n y.clear()\n \"\"\")\n self.assertTypesMatchPytd(ty, \"\"\"\n from typing import Dict, Set\n x = ... # type: Set[nothing]\n y = ... # type: Dict[nothing, nothing]\n \"\"\")\n\n def testCmp(self):\n ty = self.Infer(\"\"\"\n if not cmp(4, 4):\n x = 42\n \"\"\")\n self.assertTypesMatchPytd(ty, \"\"\"\n x = ... # type: int\n \"\"\")\n\n def testRepr(self):\n ty = self.Infer(\"\"\"\n if repr(\"hello world\"):\n x = 42\n \"\"\")\n self.assertTypesMatchPytd(ty, \"\"\"\n x = ... # type: int\n \"\"\")\n\n\nif __name__ == \"__main__\":\n test_inference.main()\n","sub_path":"pytype/tests/test_builtins3.py","file_name":"test_builtins3.py","file_ext":"py","file_size_in_byte":4099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"222976697","text":"import os\n\nfrom setuptools import setup, find_packages\n\nhere = os.path.abspath(os.path.dirname(__file__))\nwith open(os.path.join(here, 'README.txt')) as f:\n README = f.read()\nwith open(os.path.join(here, 'CHANGES.txt')) as f:\n CHANGES = f.read()\n\nrequires = [\n 'httplib2',\n 'lxml',\n 'pyramid',\n ]\n\nsetup(name='PyXadapterlib',\n version='1.9',\n description='X-road client/server library',\n long_description=README + '\\n\\n' + CHANGES,\n classifiers=[\n \"Programming Language :: Python\",\n ],\n author='Ahti Kelder',\n author_email='',\n url='',\n keywords='web wsgi xroad x-tee',\n packages=find_packages(),\n include_package_data=True,\n zip_safe=False,\n install_requires=requires,\n )\n","sub_path":"pyxadapterlib/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"360448221","text":"### \n#\n# Electripy, HAL library for Python\n#\n# Library was originally created for WeIO, www.we-io.net and than forked as a\n# separate project in order to promote concept of using interpreted languages\n# in microcontrolers.\n# \n# This library is common effort of their original creators\n# Uros PETREVSKI, Drasko DRASKOVIC and 8devices team\n#\n# ___ ___ __ ___ __ __ \n# |__ | |__ / ` | |__) | |__) \\ / \n# |___ |___ |___ \\__, | | \\ | | | \n#\n# Hardware Abstraction Layer Library\n# for Python\n#\n# The MIT License (MIT)\n# \n# Copyright (c) 2014 Nodesign.net, Uros PETREVSKI, Drasko DRASKOVIC\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n# \n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# Authors : \n# Uros PETREVSKI \n# Drasko DRASKOVIC \n#\n###\n\nclass Wirings():\n #################################\n # Pins and Global definitions\n #################################\n HIGH = 1\n LOW = 0\n\n INPUT_PULLUP = 4\n INPUT_PULLDOWN = 2\n INPUT_HIGHZ = 0\n INPUT_ADC = 5\n OUTPUT = 1\n PWM0_OUTPUT = 6\n PWM1_OUTPUT = 7\n OUTPUT_PWM = 8\n\n # This is remapping of uper pinouts to WeIO pinouts\n # UPER FUNC WEIO\n PINS = []\n PINS.append(20) #RX 0\n PINS.append(19) #TX 1\n\n PINS.append(13) #MOSI 0 2\n PINS.append(12) #MISO 0 3\n PINS.append(14) #SCK 0 4\n\n PINS.append(5) #MOSI 1 5\n PINS.append(11) #MISO 1 6\n PINS.append(4) #SCK 1 7\n\n ## !!!VERIFY i2c not correct!!!\n PINS.append(34) #SDA 8\n PINS.append(35) #SCL 9\n ##\n\n PINS.append(1) #GPIO 10\n PINS.append(21) #GPIO 11\n PINS.append(0) #GPIO 12\n PINS.append(18) #GPIO 13\n PINS.append(16) #GPIO 14\n PINS.append(27) #GPIO 15\n PINS.append(6) #GPIO 16\n PINS.append(3) #GPIO 17\n PINS.append(9) #GPIO 18\n PINS.append(29) #PWM 0 19\n PINS.append(28) #PWM 0 20\n PINS.append(22) #PWM 0 21\n PINS.append(7) #PWM 1 22\n PINS.append(17) #PWM 1 23\n PINS.append(2) #PWM 1 24\n PINS.append(33) #AD0 25\n PINS.append(32) #AD1 26\n PINS.append(31) #AD2 27\n PINS.append(30) #AD3 28\n PINS.append(26) #AD4 29\n PINS.append(25) #AD5 30\n PINS.append(24) #AD6 31\n PINS.append(23) #AD7 32\n\n # WeIO adc pins\n ADCS = []\n ADCS.append(25)\n ADCS.append(26)\n ADCS.append(27)\n ADCS.append(28)\n ADCS.append(29)\n ADCS.append(30)\n ADCS.append(31)\n ADCS.append(32)\n\n # WeIO pwm pins\n PWMS = []\n PWMS.append(19)\n PWMS.append(20)\n PWMS.append(21)\n PWMS.append(22)\n PWMS.append(23)\n PWMS.append(24)\n\n # Value is in microseconds\n PWM_PERIOD = 1000\n # This is constant of maximum PXM period limit\n PWM_PERIOD_LIMIT_CONST = 65535\n # make sure that this value is always inferior to PWM_PERIOD\n PWM_LIMIT = 255\n\n # Interrupt modes\n # HIGH and LOW were already declared\n # LOW 0 \n # HIGH 1\n CHANGE = 2\n RISING = 3\n FALLING = 4\n\n # number of hard interrupts\n HARD_INTERRUPTS = 8\n\n # interrupt types\n INTERRUPT_TYPE = []\n INTERRUPT_TYPE.append(\"LOW\")\n INTERRUPT_TYPE.append(\"HIGH\")\n INTERRUPT_TYPE.append(\"CHANGE\")\n INTERRUPT_TYPE.append(\"RISING\")\n INTERRUPT_TYPE.append(\"FALLING\")\n","sub_path":"boards/dummy/wirings.py","file_name":"wirings.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"144450701","text":"#又是线段树,带lazy的,加贪心\ndef build(x,tar,l,r,pos):\n global tree\n if tar==l and tar==r:\n tree[pos]=x\n return\n mid=(l+r)//2\n if tar<=mid:\n build(x,tar,l,mid,pos*2+1)\n else:\n build(x,tar,mid+1,r,pos*2+2)\n tree[pos]=min(tree[pos*2+1],tree[pos*2+2])\n return\n\ndef pushdown(pos):\n global tree,lazy\n lazy[pos*2+1]+=lazy[pos]\n lazy[pos*2+2]+=lazy[pos]\n tree[pos*2+1]+=lazy[pos]\n tree[pos*2+2]+=lazy[pos]\n lazy[pos]=0\n return\n\ndef query(tarl,tarr,l,r,pos):\n global tree,lazy\n if tarl==l and tarr==r:\n return tree[pos]\n mid=(l+r)//2\n if lazy[pos]!=0:\n pushdown(pos)\n if tarr<=mid:\n return query(tarl,tarr,l,mid,pos*2+1)\n elif tarl>=mid+1:\n return query(tarl,tarr,mid+1,r,pos*2+2)\n else:\n return min(query(tarl,mid,l,mid,pos*2+1),query(mid+1,tarr,mid+1,r,pos*2+2))\n\ndef update(x,tarl,tarr,l,r,pos):\n global tree,lazy\n if tarl==l and tarr==r:\n lazy[pos]+=x\n tree[pos]+=x\n return\n mid=(l+r)//2\n if lazy[pos]!=0:\n pushdown(pos)\n if tarr<=mid:\n update(x,tarl,tarr,l,mid,pos*2+1)\n elif tarl>=mid+1:\n update(x,tarl,tarr,mid+1,r,pos*2+2)\n else:\n update(x,tarl,mid,l,mid,pos*2+1)\n update(x,mid+1,tarr,mid+1,r,pos*2+2)\n return\n\nn,m=map(int,input().split())\ntree=[0 for i in range(0,4*n+1)]\nlazy=[0 for i in range(0,4*n+1)]\nfor i in range(0,n):\n build(int(input()),i+1,1,n,0)\nop=[]\nfor i in range(0,m):\n op.append(list(map(int,input().split())))\nop=sorted(op,key=lambda x:(x[1],-x[0]))\ncnt=0\nfor i in range(0,m):\n if query(op[i][0],op[i][1],1,n,0)>0:\n cnt+=1\n update(-1,op[i][0],op[i][1],1,n,0)\nprint(cnt)","sub_path":"Code/CodeRecords/2599/60670/290238.py","file_name":"290238.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"449165844","text":"#!/usr/bin/env python3\n# Name: Guillermo Uriarte (guriarte)\n# Group Members: None\n'''\nThis program takes an command line inputted fasta file and finds the unique subsequences for each\nsequence in the file. This is achieved by first reading and parsing the file. Then combining the data and\nchecking against each potential sequence for uniqueness and redundancy. The output is then printed according\nto a specified format with each line representing a unique subsequence for a given sequence.\n'''\n\nimport sys\n\nclass SequenceSubstrings:\n #initialize input\n def __init__ (self, fname=''):\n self.fname = fname\n #dictionary to store sequences\n self.sequences = {}\n \n def doOpen (self):\n if self.fname is '':\n #if fname is empty use system input\n return sys.stdin\n else:\n return open(self.fname)\n \n def readFasta (self):\n #clear header and sequence\n header = ''\n sequence = ''\n \n with self.doOpen() as fileH:\n #clear header and sequence again\n header = ''\n sequence = ''\n #read each line of input file\n line = fileH.readline()\n #for non header lines, set to line\n while not line.startswith('>') :\n line = fileH.readline()\n #strip everything but header and store at header\n header = line[1:].rstrip()\n\n \n for line in fileH:\n if line.startswith ('>'):\n #store header, sequence clear\n header = line[1:].rstrip()\n sequence = ''\n else :\n #created formatted line set to sequence\n sequence += ''.join(line.rstrip().split()).upper()\n #store as value in dictionary with header key\n self.sequences[header] = (sequence, self.substr(sequence))\n\n def substr(self, sequence):\n j=1\n #create set\n uniqueSub=set()\n while True: #loops infinitely\n #for i in the range of the start of sequence to j+1\n for i in range(len(sequence)-j+1):\n #add substring to set uniqueSub\n uniqueSub.add(sequence[i:i+j])\n #stop code once all sequences read\n if j==len(sequence):\n break\n #continue iteration by incrementing j\n j+=1\n return uniqueSub\n \n def union(self):\n #create dictionary\n self.unions = {}\n #for all keys in sequences\n for header in self.sequences:\n #Set values to empty sets for n keys where n = keys in sequences\n #key = header\n self.unions[header] = set()\n #for each item \n for otherHeader in self.sequences:\n #checks to make sure x == y\n if otherHeader == header: continue\n #A = A | B for second element set in dictionary \n self.unions[header] |= self.sequences[otherHeader][1]\n \n def differences(self):\n for header in self.sequences:\n #store dictionray at setunion\n setunion = self.unions[header]\n #get unique substrings for given header by subtracting total unioned set\n setdiff = self.sequences[header][1] - setunion\n #replace second element with new substring values\n self.sequences[header] = (self.sequences[header][0], setdiff)\n \n def rmRedundant(self):\n #for keys of dictionary\n for headers in self.sequences:\n #new set kick\n kick = set()\n #for i of second set of element in sequence dictionary\n for s1 in self.sequences[headers][1]:\n #for j of second set of element in sequence dictionary\n for s2 in self.sequences[headers][1]:\n #checks to make sure x == y\n if s1 == s2 : continue\n #if sequence exist in another\n if s1 in s2:\n #kick out s2 from the set\n kick.add(s2)\n #for elements of set\n for value in kick:\n #remove equal elements if it exists in second set for sequence dictionary\n self.sequences[headers][1].remove(value)\n \n def output(self):\n for headers in sorted(self.sequences):\n #create dictionary of index\n indexDict = {}\n #for i in set 2 of sequence dictionaryy\n for s1 in self.sequences[headers][1]:\n #set i to the index (first position) of a given element\n #key of i = s1\n i = self.sequences[headers][0].index(s1)\n indexDict[i] = s1\n #print keys\n print(headers)\n #print first set of elements for sequence dictionary\n print(self.sequences[headers][0])\n #sort the output\n for i in sorted(indexDict):\n #print dots depending on value of i\n #print content of indexDict[i]\n dots = \".\"*i\n print(dots + indexDict[i])\n \n#class initialization\nreader = SequenceSubstrings()\n\n# step 1\nreader.readFasta()\n\n# step 2\nreader.union()\nreader.differences()\n\n# step 3\nreader.rmRedundant()\nreader.output()\n\n \n \n \n \n \n","sub_path":"findUnique.py","file_name":"findUnique.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"600458834","text":"\r\n# program to print the two dimensional array or list\r\n\r\nrows = int(input(\"Enter row size \\n\"))\r\ncol = int(input(\"Enter column size \\n\"))\r\ntwo_d_array = [[0 for i in range(col)] for j in range(rows)]\r\nprint(\"Enter the Elements of the list\")\r\nfor i in range(rows):\r\n for j in range(col): # loop for creating the list elements during run time\r\n tow_d_array[i][j] = int(input())\r\nprint(two_d_array)\r\n","sub_path":"FunctionalPrograms/TwoDList.py","file_name":"TwoDList.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"107961704","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport boto3\n\nfrom botocore import exceptions\nfrom jacket.db.extend import api as db_api\nfrom jacket.drivers.aws import exception_ex\nfrom jacket.i18n import _LE\nfrom oslo_log import log as logging\nfrom oslo_utils import excutils\n\nLOG = logging.getLogger(__name__)\n\n\nclass AwsClient(object):\n\n def __init__(self, *args, **kwargs):\n self._boto3client = None\n super(AwsClient, self).__init__(*args, **kwargs)\n\n def create_ec2_client(self, context=None):\n project_info = db_api.project_mapper_get(context, context.project_id)\n if not project_info:\n project_info = db_api.project_mapper_get(context,\n \"aws_default\")\n if not project_info:\n raise exception_ex.AccountNotConfig()\n\n username = project_info.pop('aws_access_key_id', None)\n password = project_info.pop(\"aws_secret_access_key\", None)\n region_name = project_info.pop(\"region\", None)\n kwargs = {}\n kwargs['aws_access_key_id'] = username\n kwargs['aws_secret_access_key'] = password\n kwargs['region_name'] = region_name\n return boto3.client('ec2', **kwargs)\n\n def create_resource_client(self, context=None):\n project_info = db_api.project_mapper_get(context, context.project_id)\n if not project_info:\n project_info = db_api.project_mapper_get(context,\n \"aws_default\")\n if not project_info:\n raise exception_ex.AccountNotConfig()\n\n username = project_info.pop('aws_access_key_id', None)\n password = project_info.pop(\"aws_secret_access_key\", None)\n region_name = project_info.pop(\"region\", None)\n kwargs = {}\n kwargs['aws_access_key_id'] = username\n kwargs['aws_secret_access_key'] = password\n kwargs['region_name'] = region_name\n return boto3.resource('ec2', **kwargs)\n\n def get_aws_client(self, context):\n if self._boto3client is None:\n try:\n ec2_client = self.create_ec2_client(context)\n resource_client = self.create_resource_client(context)\n self._boto3client = AwsClientPlugin(ec2_client,\n resource_client)\n except Exception:\n LOG.error(_LE('Create aws client failed.'))\n raise exception_ex.OsAwsConnectFailed\n\n return self._boto3client\n\n\nclass AwsClientPlugin(object):\n\n def __init__(self, ec2_client=None, res_client=None, **kwargs):\n self._ec2_client = ec2_client\n self._ec2_resource = res_client\n\n def create_tags(self, **kwargs):\n self._ec2_client.create_tags(**kwargs)\n\n def create_volume(self, **kwargs):\n vol = None\n try:\n vol = self._ec2_client.create_volume(**kwargs)\n waiter = self._ec2_client.get_waiter('volume_available')\n waiter.wait(VolumeIds=[vol['VolumeId']])\n except Exception as e:\n if vol:\n self.delete_volume(VolumeId=vol['VolumeId'])\n if isinstance(e, exceptions.ClientError):\n reason = e.response.get('Error', {}).get('Message', 'Unkown')\n LOG.error(_LE(\"Aws create volume failed! error_msg: %s\"),\n reason)\n raise exception_ex.ProviderCreateVolumeFailed(reason=reason)\n else:\n raise\n else:\n return vol\n\n def delete_volume(self, **kwargs):\n try:\n self._ec2_client.delete_volume(**kwargs)\n waiter = self._ec2_client.get_waiter('volume_deleted')\n waiter.wait(VolumeIds=[kwargs['VolumeId']])\n except Exception as e:\n if isinstance(e, exceptions.ClientError):\n reason = e.response.get('Error', {}).get('Message', 'Unkown')\n LOG.error(_LE(\"Aws delete volume failed! error_msg: %s\"),\n reason)\n raise exception_ex.ProviderDeleteVolumeFailed(reason=reason)\n else:\n raise\n\n def create_snapshot(self, **kwargs):\n snapshot = None\n try:\n snapshot = self._ec2_client.create_snapshot(**kwargs)\n waiter = self._ec2_client.get_waiter('snapshot_completed')\n waiter.wait(VolumeIds=[snapshot['SnapshotId']])\n except Exception as e:\n if snapshot:\n self.delete_snapshot(SnapshotId=snapshot['SnapshotId'])\n if isinstance(e, exceptions.ClientError):\n reason = e.response.get('Error', {}).get('Message', 'Unkown')\n LOG.error(_LE(\"Aws create snapshot failed! error_msg: %s\"),\n reason)\n raise exception_ex.ProviderCreateSnapshotFailed(reason=reason)\n else:\n raise\n else:\n return snapshot\n\n def describe_volumes(self, **kwargs):\n response = self._ec2_client.describe_volumes(**kwargs)\n volumes = response.get('Volumes', [])\n return volumes\n\n def describe_snapshots(self, **kwargs):\n response = self._ec2_client.describe_snapshots(**kwargs)\n snapshots = response.get('Snapshots', [])\n return snapshots\n\n def delete_snapshot(self, **kwargs):\n try:\n self._ec2_client.delete_snapshot(**kwargs)\n except Exception as e:\n if isinstance(e, exceptions.ClientError):\n reason = e.response.get('Error', {}).get('Message', 'Unkown')\n LOG.error(_LE(\"Aws delete snapshot failed! error_msg: %s\"),\n reason)\n raise exception_ex.ProviderDeleteSnapshotFailed(reason=reason)\n else:\n raise\n\n def create_instance(self, **kwargs):\n instance_ids = []\n try:\n response = self._ec2_client.run_instances(**kwargs)\n instances = response.get('Instances', [])\n for instance in instances:\n instance_ids.append(instance.get('InstanceId'))\n waiter = self._ec2_client.get_waiter('instance_running')\n waiter.wait(InstanceIds=instance_ids)\n return instance_ids\n except Exception:\n with excutils.save_and_reraise_exception():\n if instance_ids:\n self.delete_instances(InstanceIds=instance_ids)\n return instance_ids\n\n def start_instances(self, **kwargs):\n self._ec2_client.start_instances(**kwargs)\n instance_ids = kwargs.get('InstanceIds', [])\n if instance_ids:\n waiter = self._ec2_client.get_waiter('instance_running')\n waiter.wait(InstanceIds=instance_ids)\n\n def stop_instances(self, **kwargs):\n self._ec2_client.stop_instances(**kwargs)\n instance_ids = kwargs.get('InstanceIds', [])\n if instance_ids:\n waiter = self._ec2_client.get_waiter('instance_stopped')\n waiter.wait(InstanceIds=instance_ids)\n\n def delete_instances(self, **kwargs):\n self._ec2_client.terminate_instances(**kwargs)\n instance_ids = kwargs.get('InstanceIds', [])\n if instance_ids:\n waiter = self._ec2_client.get_waiter('instance_terminated')\n waiter.wait(InstanceIds=instance_ids)\n\n def describe_instances(self, **kwargs):\n instances = []\n response = self._ec2_client.describe_instances(**kwargs)\n reservations = response.get('Reservations', [])\n for reservation in reservations:\n instances.extend(reservation.get('Instances'))\n return instances\n\n def reboot_instances(self, **kwargs):\n self._ec2_client.reboot_instances(**kwargs)\n\n def detach_volume(self, **kwargs):\n self._ec2_client.detach_volume(**kwargs)\n volume_id = kwargs.get('VolumeId')\n if volume_id:\n volume_ids = [volume_id]\n waiter = self._ec2_client.get_waiter('volume_available')\n waiter.wait(VolumeIds=volume_ids)\n\n def attach_volume(self, **kwargs):\n self._ec2_client.attach_volume(**kwargs)\n volume_id = kwargs.get('VolumeId')\n if volume_id:\n volume_ids = [volume_id]\n waiter = self._ec2_client.get_waiter('volume_in_use')\n waiter.wait(VolumeIds=volume_ids)\n\n def describe_images(self, **kwargs):\n response = self._ec2_client.describe_images(**kwargs)\n volumes = response.get('Images', [])\n return volumes\n","sub_path":"drivers/aws/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":9095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"389039517","text":"####################### QC rule for RNA Seq quality evaluation - Paired end #######################\n\n# Version: N/A (02/04/2020)\n\n# Niu Du (ndu [at] lji.org)\n# La Jolla Institute for Immunology (LJI)\n# La Jolla, CA USA\n\n# User environments\n# python 3.X \n# Load in threshold file from 'RNA_SEQ_QC_threshold.json'\n# Please use QC parameter table at /mnt/BioAdHoc/Groups/vd-ay/RNASeq_Workflow/Reference/Paired_end_QC_notes.csv for reference\n\nimport json\n\ndef RNA_QC(sample_tuple,dict_threshold):\n if sample_tuple.final_STAR_counts > dict_threshold['final_STAR_counts']:\n A = True\n else:\n A = False \n if sample_tuple.uniquely_mapped_reads_perc > dict_threshold['uniquely_mapped_reads_perc']:\n B = True\n else:\n B = False\n if sample_tuple.exonic_perc > dict_threshold['exonic_perc']:\n C = True\n else:\n C = False\n if sample_tuple.too_short_reads_perc < dict_threshold['too_short_reads_perc']:\n D = True\n else:\n D = False\n if sample_tuple.t_rRNA_counts_perc < dict_threshold['t_rRNA_counts_perc']:\n E = True\n else:\n E = False\n if sample_tuple.Total_genes > dict_threshold['Total_genes']:\n F = True\n else:\n F = False\n if float(sample_tuple.bias_5to3_prim) < dict_threshold['bias_5to3_prim']:\n G = True\n else:\n G = False\n \n \n \n if A and B and C and D and E and F and G:\n return '1.Good'\n elif B and C and D and E and F and G:\n return '2.Reseq'\n else:\n return '3.Manual QC'","sub_path":"QC_rule.py","file_name":"QC_rule.py","file_ext":"py","file_size_in_byte":1545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"90981575","text":"import cmath\nimport contextlib\nimport pytest\nimport threading\n\nfrom bionic.code_hasher import CodeHasher, TypePrefix\n\n\ndef test_code_hasher():\n def barray(value):\n return bytearray(value, \"utf8\")\n\n circular_dict_1_a = {\"k11\": \"v1\"}\n circular_dict_1_b = {\"k21\": \"v2\"}\n circular_dict_1_c = {\"k31\": \"v3\"}\n circular_dict_1_a[\"k12\"] = circular_dict_1_b\n circular_dict_1_b[\"k22\"] = circular_dict_1_c\n circular_dict_1_c[\"k32\"] = circular_dict_1_a\n\n circular_dict_2_a = {\"k11\": \"v1\"}\n circular_dict_2_b = {\"k21\": \"v2\"}\n circular_dict_2_c = {\"k31\": \"v3\"}\n circular_dict_2_a[\"k12\"] = circular_dict_2_b\n circular_dict_2_b[\"k22\"] = circular_dict_2_c\n circular_dict_2_c[\"k32\"] = circular_dict_2_b\n\n def f1():\n v = 10\n return v\n\n def f2():\n v = 20\n return v\n\n def f3():\n v = \"10\"\n return v\n\n def f4():\n return \"10\"\n\n def inc(x):\n return x + 1\n\n def dec(x):\n return x - 1\n\n def quadratic_eq(a, b, c):\n d = b ** 2 - 4 * a * c\n s1 = (b - cmath.sqrt(d)) / (2 * a)\n s2 = (-b - cmath.sqrt(d)) / (2 * a)\n return (s1, s2)\n\n def logistic_reg(train_frame, random_seed, hyperparams_dict):\n from sklearn import linear_model\n\n m = linear_model.LogisticRegression(\n solver=\"liblinear\", random_state=random_seed, **hyperparams_dict\n )\n m.fit(train_frame.drop(\"target\", axis=1), train_frame[\"target\"])\n return m\n\n def a_lot_of_consts(train_frame, random_seed, hyperparams_dict):\n import logging\n\n docstring = \"\"\"\n This function uses a few constants and demonstrates that Bionic\n can hash all of them without any issues.\n \"\"\"\n logging.log(docstring) # Log these variables to avoid F841 errors.\n add_numbers = lambda x, y: x + y # noqa: E731\n four = add_numbers(2, 2)\n logging.log(four)\n seven = add_numbers(3, 4)\n logging.log(seven)\n\n a, b, c = (1, -30, 200)\n (s1, s2) = quadratic_eq(a, b, c)\n assert [s1, s2] == [10, 20]\n\n def f_with_defaults1(x=10, y=20):\n return x + y\n\n def f_with_defaults2(x=20, y=10):\n return x + y\n\n def f_with_defaults3(x=10.0, y=20):\n return x + y\n\n def f_docstring1():\n \"\"\"Docstring1\"\"\"\n pass\n\n def f_docstring2():\n \"\"\"Docstring2\"\"\"\n pass\n\n values = [\n b\"\",\n b\"123\",\n b\"None\",\n barray(\"bytearray\"),\n barray(\"anotherbytearray\"),\n None,\n \"\",\n \"None\",\n \"String1\",\n \"String2\",\n \"0\",\n \"1\",\n \"123\",\n \"1.23\",\n 0,\n 1,\n 123,\n 23,\n 1.23,\n 23.0,\n float(\"inf\"),\n float(\"-inf\"),\n float(\"nan\"),\n True,\n False,\n [],\n [1, 2, 3],\n [1, 2, \"3\"],\n [1, 2],\n (),\n (1, 2, 3),\n (1, 2, \"3\"),\n (1, 2),\n {},\n {1, 2, 3},\n {1, 2, \"3\"},\n {1, 2},\n {0: \"v1\", 1: None, \"2\": [\"value1\", \"value2\"]},\n {0: \"v1\", 1: None, \"2\": [\"value1\", \"value2\"], None: \"none_val\"},\n {\n 0: \"v1\",\n 1: {10: \"v2\", 20: {100: [200, 300]}},\n \"2\": [\"value1\", \"value2\"],\n None: \"none_val\",\n },\n circular_dict_1_a,\n circular_dict_2_a,\n f1,\n f2,\n f3,\n f4,\n inc,\n dec,\n lambda x: x * 2,\n lambda x: x / 2,\n lambda: None,\n quadratic_eq,\n logistic_reg,\n a_lot_of_consts,\n f_with_defaults1,\n f_with_defaults2,\n f_with_defaults3,\n f_docstring1,\n f_docstring2,\n ]\n\n values_with_complex_types = [\n lambda x=threading.Lock(): x,\n threading.Lock(),\n ]\n\n idx_by_hash_value = {}\n for idx, val in enumerate(values + values_with_complex_types):\n if idx >= len(values):\n ctx_mgr = pytest.warns(UserWarning, match=\"Found a constant\")\n else:\n ctx_mgr = contextlib.suppress()\n\n with ctx_mgr:\n hash_value = CodeHasher.hash(val)\n # Hashing again should return the same hash value.\n assert CodeHasher.hash(val) == hash_value\n assert (\n hash_value not in idx_by_hash_value\n ), f\"{values[idx]} and {values[idx_by_hash_value[hash_value]]} have the same hash\"\n idx_by_hash_value[hash_value] = idx\n\n\ndef test_complex_type_warning():\n val = threading.Lock()\n with pytest.warns(\n UserWarning,\n match=\"Found a constant\",\n ):\n assert CodeHasher.hash(val) == CodeHasher.hash(TypePrefix.DEFAULT)\n\n\ndef test_same_func_different_names():\n def f1():\n v = 10\n return v\n\n def f2():\n v = 10\n return v\n\n assert CodeHasher.hash(f1) == CodeHasher.hash(f1)\n assert CodeHasher.hash(f2) == CodeHasher.hash(f2)\n assert CodeHasher.hash(f1) == CodeHasher.hash(f2)\n","sub_path":"tests/test_code_hasher.py","file_name":"test_code_hasher.py","file_ext":"py","file_size_in_byte":5017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"73957411","text":"from tweepy.streaming import StreamListener\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom tweepy import API\nfrom tweepy import Cursor\nimport pandas as pd\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimport credentials\n\n#### Twitter Client #####\n\nclass TwitterClient():\n\n def __init__(self, twitter_user=None):\n self.auth = TwitterAuthenticator().authenticate_twitter_app()\n self.twitter_client = API(self.auth)\n\n self.twitter_user = twitter_user\n\n def get_twitter_client_api(self):\n return self.twitter_client\n\n\n def get_user_timeline_tweets(self, num_tweets):\n tweets = []\n for tweet in Cursor(self.twitter_client.user_timeline, id = self.twitter_user).items(num_tweets):\n tweets.append(tweet)\n return tweets\n\n def get_friend_list(self, num_friends):\n friend_list = []\n for friend in Cursor(self.twitter_client.friends, id= self.twitter_user).items(num_friends):\n friend_list.append(friend)\n return friend_list\n\n def get_home_timeline_tweets(self, num_tweets):\n home_tweets = []\n for tweet in Cursor(self.twitter_client.home_timeline, id = self.twitter_user).items(num_tweets):\n home_tweets.append(tweet)\n return home_tweets\n\n\n\n\n#####Twitter Authenticator ###\nclass TwitterAuthenticator():\n\n def authenticate_twitter_app(self):\n auth = OAuthHandler(credentials.CONSUMER_KEY, credentials.CONSUMER_SECRET)\n auth.set_access_token(credentials.API_TOKEN, credentials.API_TOKEN_SECRET)\n return auth\n\n####Twitter Streamer####\n\nclass TwitterStreamer():\n def __init__(self):\n self.twitter_authenticator = TwitterAuthenticator()\n\n\n def stream_tweets(self, fetched_tweets_filename, hash_tag_list):\n \"\"\"\n Class for Streaming and processing live tweets\n \"\"\"\n #This handles Twitter Authentication and connection to twitter streaming API\n listener = TwitterListener(fetched_tweets_filename)\n\n auth = self.twitter_authenticator.authenticate_twitter_app()\n\n stream = Stream(auth, listener)\n stream.filter(track=hash_tag_list)\n\n\n\nclass TwitterListener(StreamListener):\n\n \"\"\"This is a baisc listener class that just print received tweets to stdout\"\"\"\n def __init__(self, fetched_tweets_filename):\n self.fetched_tweets_filename = fetched_tweets_filename\n\n def on_data(self, data):\n try:\n print(data)\n with open(self.fetched_tweets_filename, 'a') as tf:\n tf.write(data)\n except BaseException as e:\n print('Error on data %s' % str(e))\n return True\n\n def on_error(self, status):\n if status == 420:\n #Returning False on_data method in case rate limit occurs.\n return False\n print(status)\n\n\nclass TwwetAnalyzer():\n \"\"\"\n Functionality to analyze and categorize content from tweets.\n \"\"\"\n def tweets_to_dataframe(self, tweets):\n\n df = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])\n df['id'] = np.array([tweet.id for tweet in tweets])\n df['len'] = np.array([len(tweet.text) for tweet in tweets])\n df['created'] = np.array([tweet.created_at for tweet in tweets])\n df['source'] = np.array([tweet.source for tweet in tweets])\n df['likes'] = np.array([tweet.favorite_count for tweet in tweets])\n df['retweet'] = np.array([tweet.retweet_count for tweet in tweets])\n\n return df\nif __name__ == \"__main__\":\n #Set Pandas Options to see all Coloumns of a data frame\n pd.set_option('display.max_rows', 500)\n pd.set_option('display.max_columns', 500)\n pd.set_option('display.width', 1000)\n\n tweet_analyzer = TwwetAnalyzer()\n\n twitter_client = TwitterClient()\n api = twitter_client.get_twitter_client_api()\n\n tweets = api.user_timeline(screen_name=\"baloise_ch\", count=370)\n df = tweet_analyzer.tweets_to_dataframe(tweets)\n\n #Average length over al tweets\n print(np.mean(df['len']))\n\n #Get Number of likes for the most liked tweets\n print(np.max(df['likes']))\n\n #Get the Number of Rewteets for nost retweetet\n print(np.max(df['retweet']))\n\n #Time Series\n #time_likes = pd.Series(data=df['likes'].values, index=df['created'])\n #time_likes.plot(figsize=(16,4), color='r')\n #plt.show()\n #Time Retweet\n #time_likes = pd.Series(data=df['retweet'].values, index=df['created'])\n #time_likes.plot(figsize=(16,4), color='r')\n #plt.show()\n\n time_likes = pd.Series(data=df['likes'].values, index=df['created'])\n time_likes.plot(figsize=(16,4), color='r', label=\"likes\", legend = True, )\n\n time_likes = pd.Series(data=df['retweet'].values, index=df['created'])\n time_likes.plot(figsize=(16,4), color='g',label=\"retweets\", legend = True,)\n plt.show()\n","sub_path":"twitter_app.py","file_name":"twitter_app.py","file_ext":"py","file_size_in_byte":4860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"597910488","text":"\"\"\"scrumlab URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/2.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom jedzonko.views import IndexView, MainView, RecipesView, SchedulesView, AddRecipeView, AddPlanView, RecipeDetails, \\\n PlanDetails, PlanParticulars, ModifyRecipe, NewPlanDetails\n\n\n\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', IndexView.as_view(), name='landing_page'),\n path('main/', MainView.as_view(), name='dashboard'),\n path('recipe/list', RecipesView.as_view(), name='recipes'),\n path('schedules/list', SchedulesView.as_view(), name='schedules'),\n path('recipe/add', AddRecipeView.as_view(), name='add_recipe'),\n path('plan/add', AddPlanView.as_view(), name='add_plan'),\n path('recipe', RecipeDetails.as_view(), name='recipe_details'),\n path('plan/', PlanParticulars.as_view(), name=\"plan_particulars\"),\n path('recipe/modify/', ModifyRecipe.as_view(), name='modify_recipe'),\n path('plan/add/details', PlanDetails.as_view(), name='plan_add_details'),\n path('new_plan_details', NewPlanDetails.as_view(), name='new_plan_details'),\n]\n","sub_path":"scrumlab/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"449673483","text":"default_path = __file__\ndefault_mode = \"query\"\ndefault_host = \"localhost\"\ndefault_port = 9999\n\nquery_is_ext_ref = \"is:external-reference\"\nquery_is_exported = \"is:exported\"\n\n# Limits for workspace/symbol\nquery_limit_default = 10\nquery_limit_absolute = 250\n\n# LSP error codes\nerror = -32700\ninvalid_request = -32600\nmethod_not_found = -32601\ninvalid_params = -32602\ninternal_error = -32603\nserver_error_start = -32099\nserver_error_start = -32000\n\n# LSP trivia\nlsp_content_type = \"Content-Type: application/vscode-jsonrpc; charset=utf8\"\n\n\n# LSP kinds\nvsc_kind = {\n\t'file': 1,\n\t'module': 2,\n\t'namespace': 3,\n\t'package': 4,\n\t'class': 5,\n\t'method': 6,\n\t'property': 7,\n\t'field': 8,\n\t'constructor': 9,\n\t'enum': 10,\n\t'interface': 11,\n\t'function': 12,\n\t'variable': 13,\n\t'constant': 14,\n\t'string': 15,\n\t'number': 16,\n\t'array': 18,\n\t'boolean': 17\n}\n\n\n# Jedi kinds from string\njedi_kinds = {\n\n\t# Note: Omissions due to unknown mappings:\n\t# 'statement': 0,\n\t# 'keyword': 0,\n\n\t# Jedi type 'none' maps to vsc variable\n\t'none': vsc_kind['constant'],\n\t'type': vsc_kind['class'],\n\t'tuple': vsc_kind['class'],\n\n\t# 'dict' => 'class'\n\t'dict': vsc_kind['class'],\n\t'dictionary': vsc_kind['class'],\n\n\t# Classes / instances\n\t'class': vsc_kind['class'],\n\t'instance': vsc_kind['variable'],\n\n\t# Functions\n\t'function': vsc_kind['function'],\n\t'lambda': vsc_kind['function'],\n\t'generator': vsc_kind['function'],\n\t'method': vsc_kind['method'],\n\n\t# Builtins\n\t'builtin': vsc_kind['class'],\n\t'builtinfunction': vsc_kind['function'],\n\n\t# Scopes\n\t'file': vsc_kind['file'],\n\t'namespace': vsc_kind['namespace'],\n\t'module': vsc_kind['module'],\n\n\t# Regular Python types\n\t'funcdef': vsc_kind['function'],\n\t'property': vsc_kind['property'],\n\t'import': vsc_kind['module'],\n\n\t# Primitives (Python-style)\n\t'constant': vsc_kind['constant'],\n\t'variable': vsc_kind['variable'],\n\t'value': vsc_kind['variable'],\n\t'param': vsc_kind['variable'],\n\t'boolean': vsc_kind['boolean'],\n\t'int': vsc_kind['number'],\n\t'longlean': vsc_kind['number'],\n\t'float': vsc_kind['number'],\n\t'complex': vsc_kind['number'],\n\t'string': vsc_kind['string'],\n\t'unicode': vsc_kind['string'],\n\t'list': vsc_kind['array'],\n\n\t# Special Python types\n\t'xrange': vsc_kind['class'],\n\t'slice': vsc_kind['class'],\n\t'traceback': vsc_kind['class'],\n\t'frame': vsc_kind['class'],\n\t'buffer': vsc_kind['class'],\n\t'dictproxy': vsc_kind['class'],\n}\n","sub_path":"langserver/src/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":2350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"320201745","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver import ActionChains\n\nimport csv\nimport threading\nimport time\ntry:\n import tkinter as tk\n from tkinter import messagebox as msg, filedialog\n from tkinter import *\nexcept:\n import Tkinter as tk\n import tkMessageBox as msg\n from Tkinter import *\n\nfrom GUI import *\n\ndef open_url(url, num_retries=5):\n try:\n driver = webdriver.Chrome()\n driver.get(url)\n except:\n if num_retries > 0:\n driver.quit()\n open_url(url, num_retries-1)\n return driver\n\nclass google_maps_scraper():\n\n def __init__(self, master):\n\n # self.keyword = keyword\n self.start_url = 'https://www.google.com/maps/search/'\n\n self.master = master\n self.total_data = []\n self.gui = main_GUI(master, self.total_data, self.endApplication)\n\n self.running = 1\n self.periodicCall()\n\n self.i = 0\n\n self.url_dict = []\n self.company_names = []\n\n #self.go_to_search()\n\n\n def periodicCall(self):\n if self.running == 0:\n print('Exit')\n import sys\n #self.master.quit()\n self.master.destroy()\n\n sys.exit(1)\n elif self.running == 1:\n if self.gui.start == True:\n self.keyword = self.gui.keyword\n self.running = 2\n self.thread1 = threading.Thread(target=self.go_to_search)\n self.thread1.start()\n elif self.running == 2:\n self.gui.insert_data()\n\n\n self.master.after(2000, self.periodicCall)\n\n def endApplication(self):\n self.running = 0\n\n def go_to_search(self):\n\n filename = self.keyword + '.csv'\n self.output_file = open(filename, 'w', encoding='utf-8', newline='')\n self.writer = csv.writer(self.output_file)\n headers = ['Company Name', 'Address', 'Category', 'Star Rating', 'Number of Reviews',\n 'Phone Number', 'Website']\n self.writer.writerow(headers)\n\n self.driver = open_url(self.start_url)\n\n input = WebDriverWait(self.driver, 50).until(\n EC.presence_of_element_located((By.XPATH, \"//input[@aria-label='Search Google Maps']\")))\n search_btn = WebDriverWait(self.driver, 50).until(\n EC.element_to_be_clickable((By.XPATH, \"//button[@aria-label='Search']\")))\n\n input.send_keys(self.keyword)\n time.sleep(2)\n search_btn.click()\n self.scraping_onepage()\n self.running = 0\n\n def scraping_onepage(self):\n\n self.item_index = 0\n self.skip_index = 0\n while 1:\n if len(self.company_names) >= 50 or self.skip_index >= 80:\n break\n\n print(self.item_index)\n companies = WebDriverWait(self.driver, 50).until(\n EC.presence_of_all_elements_located((By.XPATH, \"//h3[@class='section-result-title']\")))\n self.item_cnt = len(companies)\n\n if companies[self.item_index].text not in self.company_names:\n self.skip_index = 0\n self.company_names.append(companies[self.item_index].text)\n\n self.org_url = self.driver.current_url\n\n companies[self.item_index].click()\n print(self.org_url)\n\n self.scraping_onecompany()\n\n self.driver.get(self.org_url)\n time.sleep(5)\n else:\n self.skip_index += 1\n\n self.item_index += 1\n\n if self.item_index == self.item_cnt:\n self.item_index = 0\n #self.driver.delete_all_cookies()\n next_btn = WebDriverWait(self.driver, 50).until(\n EC.element_to_be_clickable((By.XPATH, \"//button[@id='section-pagination-button-next']\")))\n next_btn.click()\n print('Next Button: Clicked Successfully')\n time.sleep(5)\n companies = WebDriverWait(self.driver, 50).until(\n EC.presence_of_all_elements_located((By.XPATH, \"//h3[@class='section-result-title']\")))\n self.item_cnt = len(companies)\n\n if self.item_cnt == 0:\n break\n self.driver.quit()\n self.output_file.close()\n\n\n def scraping_onecompany(self):\n i = 0\n while 1:\n if i > 200:\n company_name = ''\n break\n else:\n i += 1\n\n company_name = WebDriverWait(self.driver, 20).until(\n EC.presence_of_element_located(\n (By.XPATH, \"//h1[@class='section-hero-header-title']\"))\n )\n if company_name.text is not '':\n company_name = company_name.text\n break\n\n\n try:\n rating = self.driver.find_element_by_css_selector('span.section-star-display').text\n except:\n rating = ''\n try:\n reviews = self.driver.find_element_by_css_selector('ul.section-rating-term-list').find_element_by_css_selector(\n 'button.widget-pane-link').text\n except:\n reviews = ''\n\n section_infos = self.driver.find_elements_by_css_selector('div.section-info')\n\n try:\n address = self.driver.find_elements_by_css_selector('div.section-info')[0].text\n except:\n address = ''\n\n website = ''\n phone = ''\n\n\n for section_info in section_infos:\n try:\n #website_lbl = section_info.find_element_by_xpath(\"//span[@aria-label='Website']\")\n website_lbl = section_info.find_element_by_css_selector(\"span.section-info-icon.maps-sprite-pane-info-website\")\n website = website_lbl.find_element_by_xpath('..').find_element_by_css_selector('span.section-info-text').text\n except:\n pass\n\n try:\n #phone_lbl = section_info.find_element_by_xpath(\"//span[@aria-label='Phone']\")\n phone_lbl = section_info.find_element_by_css_selector(\"span.section-info-icon.maps-sprite-pane-info-phone\")\n phone = phone_lbl.find_element_by_xpath('..').find_element_by_css_selector('span.section-info-text').text\n except:\n pass\n\n try:\n category = self.driver.find_element_by_css_selector('span.section-rating-term').text\n except:\n category = ''\n\n element = {\n 'company_name': company_name,\n 'address': address,\n 'category': category,\n 'star_rating': rating,\n 'review_numbers': reviews,\n 'phone': phone,\n 'websites': website\n }\n #if element not in self.total_data:\n print(element)\n self.total_data.append(element)\n # driver.execute_script(\"window.history.go(-1)\")\n row = [\n element['company_name'],\n element['address'],\n element['category'],\n element['star_rating'],\n element['review_numbers'],\n element['phone'],\n element['websites'],\n ]\n self.writer.writerow(row)\n\n def save_to_csv(self):\n filename = self.keyword + '.csv'\n self.output_file = open(filename, 'w', encoding='utf-8', newline='')\n self.writer = csv.writer(self.output_file)\n headers = ['Company Name', 'Address', 'Category', 'Star Rating', 'Number of Reviews',\n 'Phone Number', 'Website']\n self.writer.writerow(headers)\n for i, element in enumerate(self.total_data):\n row = [\n element['company_name'],\n element['address'],\n element['category'],\n element['star_rating'],\n element['review_numbers'],\n element['phone'],\n element['websites'],\n ]\n self.writer.writerow(row)\n self.output_file.close()\n\n\nif __name__ == '__main__':\n\n start_time = time.time()\n\n keywords = [\n 'solar sacramento ca',\n 'Solar San Diego Ca'\n ]\n\n root = tk.Tk()\n\n app = google_maps_scraper(root)\n root.mainloop()\n elapsed_time = time.time() - start_time\n print(elapsed_time)\n\n\n","sub_path":"Main_google_maps_scraper.py","file_name":"Main_google_maps_scraper.py","file_ext":"py","file_size_in_byte":8523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"127756496","text":"\n# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# Any results you write to the current directory are saved as output.\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sb\nimport seaborn as sns\nplt.style.use('seaborn-whitegrid') \n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.metrics import r2_score\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n\n### matplotlib inline\nprint(pd.__version__)\nprint(np.__version__)\n### time\ndf = pd.read_csv(\"../input/AB_NYC_2019.csv\")\ndf\nimport pandas as pd\nAB_NYC_2019 = pd.read_csv(\"../input/AB_NYC_2019.csv\")\nprint(\"кол-во строк:\",df.shape[0])\nprint(\"кол-во строк:\",df.shape[1])\ndf.info()\ndf.describe()\ndf.head()\ndf.tail()\nimport matplotlib.pyplot as plt\n### matplotlib inline\n\ndf.hist(bins=50, figsize=(20,10))\ndf[\"room_type\"].value_counts()\n#заполним пропущенные значения 0\ndf = df.fillna(0)\ndf.isnull().sum()\ndf.dropna(how='any',inplace=True)\ndf.info() \nhostname_DF = AB_NYC_2019.loc[AB_NYC_2019.price>1500][['name','host_name', 'price']][:11].set_index('host_name').sort_values(by = 'price', ascending = False)\nprint(hostname_DF)\nhostname_DF = AB_NYC_2019.loc[AB_NYC_2019.price>1500][['host_name', 'price']][:11].set_index('host_name').sort_values(by = 'price', ascending = False).plot(kind = 'bar', figsize = (12,5))\nplt.xlabel('host names')\nplt.ylabel('price')\nprint(hostname_DF)\nAB_NYC_2019.groupby(['neighbourhood_group'])['price'].mean().plot(kind = 'bar', figsize=(12,5))\nAB_NYC_2019.groupby(['neighbourhood_group','room_type'])['price'].mean().sort_values(ascending = False)\nplt.figure(figsize=(12,8))\nplt.hist(AB_NYC_2019[\"reviews_per_month\"], rwidth=0.5,bins=np.arange(0, AB_NYC_2019[\"reviews_per_month\"].max() +20, 5))\nplt.figure(figsize=(12,8))\nplt.hist(AB_NYC_2019[\"availability_365\"], rwidth=0.5,bins=np.arange(0, AB_NYC_2019[\"availability_365\"].max() +12, 25))\nplt.figure(figsize=(12,8))\nytickrange = np.arange(0, 14000, 500) \nax = sns.countplot(x='room_type', hue=\"neighbourhood_group\", data=AB_NYC_2019)\nax.set_yticks(ytickrange)\n#отбираем количественные признаки\nAB_NYC_2019_model = AB_NYC_2019[[\"price\",\"minimum_nights\", \"number_of_reviews\", \"reviews_per_month\", \"calculated_host_listings_count\",\"availability_365\"]]\nx=AB_NYC_2019_model[[\"price\",\"minimum_nights\", \"number_of_reviews\", \"reviews_per_month\",\"availability_365\"]]\ny=AB_NYC_2019_model[[\"calculated_host_listings_count\"]]\nx.head()\ny\n#заполним пропуски 0\ndf.drop(['host_id','latitude','longitude','neighbourhood','number_of_reviews','reviews_per_month'], axis=1, inplace=True)\n#Проверка на изменения\ndf.head(5)\n#Кодирование вводных переменных\ndef Encode(df):\n for column in df.columns[df.columns.isin(['neighbourhood_group', 'room_type'])]:\n df[column] = df[column].factorize()[0]\n return df\n\ndf_en = Encode(df.copy())\ndf_en.head(15)\n#Получаем корреляцию между различными переменными\ncorr = df_en.corr(method='kendall')\nplt.figure(figsize=(18,12))\nsns.heatmap(corr, annot=True)\ndf_en.columns\n#Определение независимых переменных и зависимых переменных\nx = df_en.iloc[:,[0,1,3,4,5]]\ny = df_en['price']\n#Получение тестового и тренировочного набора (разделим выборку на 80 и 20%)\nx_train,x_test,y_train,y_test=train_test_split(x,y,test_size=0.2,random_state=11)\nx_train.head()\ny_train.head()\nx_train.shape\n#разделим выборку на 80 и 20%\nx_train, x_test, y_train, y_test = train_test_split(x,y, test_size=0.2, random_state=11)\nx_train = x_train.fillna(0)\nclf_LR = LogisticRegression(C=1, tol=1e-3, solver=\"lbfgs\")\nclf_LR.fit(x_train, y_train)\nx_test = x_test.fillna(0)\ny_pred_LR = clf_LR.predict(x_test)\ncm = confusion_matrix(y_pred_LR, y_test)\nprint(accuracy_score(y_pred_LR, y_test))\n# Confusion matrix\ncm\nfrom sklearn.ensemble import RandomForestClassifier\n\nclf_RF = RandomForestClassifier()\nclf_RF.fit(x_train, y_train)\n# получаем предсказания\ny_pred_RF = clf_RF.predict(x_test)\n#точность модели\ncm = confusion_matrix(y_pred_RF, y_test)\nprint(accuracy_score(y_pred_RF, y_test))\n# Confusion matrix\ncm\n","sub_path":"sources/umida-airbnb.py","file_name":"umida-airbnb.py","file_ext":"py","file_size_in_byte":5238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"181917731","text":"import numpy as np\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch_tools.modules import HiddenModule\n\n\ndef expand_to_bach(value, batch_size, target_type):\n try:\n assert value.shape[0] == batch_size, 'batch size is not equal to the tensor size'\n except Exception:\n value = value * torch.ones(batch_size, dtype=target_type)\n return value.to(target_type)\n\n\ndef apply_modulated_conv_2d(input, style, mc2d, weight):\n batch, in_channel, height, width = input.shape\n style = mc2d.modulation(style).view(batch, 1, in_channel, 1, 1)\n weight = mc2d.scale * weight * style\n\n if mc2d.demodulate:\n demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8)\n weight = weight * demod.view(batch, mc2d.out_channel, 1, 1, 1)\n\n weight = weight.view(batch * mc2d.out_channel, in_channel, mc2d.kernel_size, mc2d.kernel_size)\n\n if mc2d.upsample:\n input = input.view(1, batch * in_channel, height, width)\n weight = weight.view(\n batch, mc2d.out_channel, in_channel, mc2d.kernel_size, mc2d.kernel_size\n )\n weight = weight.transpose(1, 2).reshape(\n batch * in_channel, mc2d.out_channel, mc2d.kernel_size, mc2d.kernel_size\n )\n out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch)\n _, _, height, width = out.shape\n out = out.view(batch, mc2d.out_channel, height, width)\n out = mc2d.blur(out)\n\n elif mc2d.downsample:\n input = mc2d.blur(input)\n _, _, height, width = input.shape\n input = input.view(1, batch * in_channel, height, width)\n out = F.conv2d(input, weight, padding=0, stride=2, groups=batch)\n _, _, height, width = out.shape\n out = out.view(batch, mc2d.out_channel, height, width)\n\n else:\n input = input.view(1, batch * in_channel, height, width)\n out = F.conv2d(input, weight, padding=mc2d.padding, groups=batch)\n _, _, height, width = out.shape\n out = out.view(batch, mc2d.out_channel, height, width)\n\n return out\n\n\ndef make_deformable(model, layer, deformable_class, **kwargs):\n model.style_gan2.convs[layer].conv = deformable_class(\n model.style_gan2.convs[layer].conv, **kwargs)\n\n deformable_conv = model.style_gan2.convs[layer].conv\n model.style_gan2.convs[layer].add_module('deformable_conv', deformable_conv)\n try:\n model.deformable_convs.append(deformable_conv)\n except AttributeError:\n model.deformable_convs = [deformable_conv]\n return model\n\n\nclass DeformableModulatedConv2d(nn.Module):\n def __init__(self, conv_to_deform):\n super(DeformableModulatedConv2d, self).__init__()\n\n for key, val in conv_to_deform.__dict__.items():\n setattr(self, key, val)\n self.shifts = torch.zeros_like(self.weight)\n\n def forward(self, x, style):\n return apply_modulated_conv_2d(x, style, self, self.weight + self.shifts)\n\n\nclass DeformableSubspaceModulatedConv2d(nn.Module):\n def __init__(self, conv_to_deform, basis_vectors, directions_count):\n super(DeformableSubspaceModulatedConv2d, self).__init__()\n\n for key, val in conv_to_deform.__dict__.items():\n setattr(self, key, val)\n self.basis_vectors = basis_vectors\n self.is_deformated = False\n\n assert self.basis_vectors[0].shape == self.weight.shape, \\\n f'unconsisted basis and weight {self.basis_vectors.shape[1:]} != {self.weight.shape}'\n\n basis_dim = len(self.basis_vectors)\n self.shifts_coords = nn.Parameter(torch.randn((directions_count, basis_dim)))\n\n def weight_shifts(self, batch):\n # expand scalar shift params if required\n basis_size = len(self.basis_vectors)\n batch_directions = expand_to_bach(self.batch_directions, batch, torch.long).cuda()\n batch_shifts = expand_to_bach(self.batch_shifts, batch, torch.float32).cuda()\n\n # deformation\n batch_weight_delta = torch.stack(batch * [self.basis_vectors], dim=0)\n # (batch_size, basis_size, *weight.shape)\n batch_basis_coefs = self.shifts_coords[batch_directions]\n # (batch_size, basis_size)\n\n batch_weight_delta = batch_weight_delta.view(batch, basis_size, -1)\n # (batch_size, basis_size, -1)\n batch_basis_coefs = batch_basis_coefs.unsqueeze(-1)\n # (batch_size, basis_size, -1)\n\n batch_weight_delta = (batch_weight_delta * batch_basis_coefs).sum(dim=1)\n # (batch_size, -1)\n batch_weight_delta = F.normalize(batch_weight_delta, p=2, dim=1)\n batch_weight_delta *= batch_shifts[:, None]\n # (batch_size, -1)\n\n batch_weight_delta = batch_weight_delta.view(batch, *self.weight.shape[-4:])\n # (batch_size, c_out, c_in, k_x, k_y)\n return batch_weight_delta\n\n def forward(self, x, style):\n weight = self.weight\n if self.is_deformated:\n weight = weight + self.weight_shifts(x.shape[0])\n return apply_modulated_conv_2d(x, style, self, weight)\n\n\nclass DeformableSVDModulatedConv2d(nn.Module):\n def __init__(self, conv_to_deform, directions_count):\n super(DeformableSVDModulatedConv2d, self).__init__()\n for key, val in conv_to_deform.__dict__.items():\n setattr(self, key, val)\n self.is_deformated = False\n\n weight_matrix = \\\n conv_to_deform.weight.cpu().detach().numpy().reshape((conv_to_deform.weight.shape[-4:]))\n c_out, c_in, k_x, k_y = weight_matrix.shape\n weight_matrix = np.transpose(weight_matrix, (2, 3, 1, 0))\n weight_matrix = np.reshape(weight_matrix, (k_x * k_y * c_in, c_out))\n\n u, s, vh = np.linalg.svd(weight_matrix, full_matrices=False)\n u = torch.FloatTensor(u).cuda()\n vh = torch.FloatTensor(vh).cuda()\n\n self.u = nn.Parameter(u, requires_grad=False)\n self.vh = nn.Parameter(vh, requires_grad=False)\n\n self.direction_to_eigenvalues_delta = nn.Parameter(\n torch.randn(directions_count, len(s)), requires_grad=True)\n\n def weight_shifts(self, batch):\n # expand scalar shift params if required\n batch_directions = expand_to_bach(self.batch_directions, batch, torch.long).cuda()\n batch_shifts = expand_to_bach(self.batch_shifts, batch, torch.float32).cuda()\n\n batch_eigenvalues_delta = self.direction_to_eigenvalues_delta[batch_directions]\n # (batch, len(s))\n batch_weight_delta = self.u @ torch.diag_embed(batch_eigenvalues_delta) @ self.vh\n\n c_out, c_in, k_x, k_y = self.weight.shape[-4:]\n batch_weight_delta = F.normalize(batch_weight_delta.view(batch, -1), dim=1, p=2)\n batch_weight_delta = batch_weight_delta.view(batch, k_x, k_y, c_in, c_out)\n batch_weight_delta = batch_weight_delta.permute(0, 4, 3, 1, 2)\n assert batch_weight_delta.shape == (batch, c_out, c_in, k_x, k_y)\n\n batch_weight_delta *= batch_shifts[:, None, None, None, None]\n return batch_weight_delta\n\n def forward(self, x, style):\n weight = self.weight\n if self.is_deformated:\n weight = weight + self.weight_shifts(x.shape[0])\n return apply_modulated_conv_2d(x, style, self, weight)\n","sub_path":"resources/_under_construction/week08_gans_part1/gans/models/StyleGAN2/weights_deformations.py","file_name":"weights_deformations.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"493424044","text":"import pandas as pd;\nfrom sklearn import linear_model as lm;\n\n\n# Reading data\ndf = pd.read_csv(\"carprices.csv\");\n# print(df);\n\n# Data Munging\ndummies = pd.get_dummies(df[\"Car Model\"]);\n# print(dummies);\n\nmerged = pd.concat([df,dummies],axis = \"columns\"); \n# or you can instead write df = pd.concat([df,dummies],axis = 1);\n# this behaves as if in (rows,columns) columns = 1\n# print(merged.head());\n\n# final is the munged data\n# we remove \"Mercedez Benz C class\" due to a problem called dummy variable trap, Read more form Google\nfinal = merged.drop([\"Car Model\",\"Mercedez Benz C class\"],axis =1);\n# print(final);\n\nX = final.drop([\"Sell Price($)\"], axis = 1);\n# print(X);\ny = final[\"Sell Price($)\"];\n# print (y);\n\n# Model training\nmodel = lm.LinearRegression();\nmodel.fit(X,y);\n\n# Using model to predict selling prices\nprediction = model.predict([[45000,4,0,0],[86000,7,0,1]]);\nprint(prediction);\n\n# Accuracy of my trained model in percent\nacc = model.score(X,y) * 100;\nprint(acc,\"%\");","sub_path":"ML/5_one_hot_encoding/Exercise/my_solution.py","file_name":"my_solution.py","file_ext":"py","file_size_in_byte":983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"368703003","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/kieffer/workspace/fabio/build/lib.macosx-10.6-intel-3.5/fabio/utils/pilutils.py\n# Compiled at: 2020-04-03 09:02:03\n# Size of source mod 2**32: 4279 bytes\n\"\"\"Helper functions using Python Imaging Library (PIL)\n\"\"\"\n__authors__ = [\n 'Jérôme Kieffer', 'Jon Wright']\n__date__ = '25/06/2018'\n__license__ = 'MIT'\n__copyright__ = 'European Synchrotron Radiation Facility, Grenoble, France'\n__status__ = 'stable'\nimport logging, numpy\nlogger = logging.getLogger(__name__)\ntry:\n from PIL import Image\nexcept ImportError:\n Image = None\n\nPIL_TO_NUMPY = {'I;8': numpy.uint8, \n 'I;16': numpy.uint16, \n 'I;16B': numpy.uint16, \n 'I;16L': numpy.uint16, \n 'I;32': numpy.uint32, \n 'I;32L': numpy.uint32, \n 'I;32B': numpy.uint32, \n 'F;32F': numpy.float32, \n 'F;32BF': numpy.float32, \n 'F;64F': numpy.float64, \n 'F;64BF': numpy.float64, \n 'F': numpy.float32, \n '1': numpy.bool, \n 'I': numpy.int32, \n 'L': numpy.uint8, \n 'P': numpy.uint8}\nNUMPY_TO_PIL = {'float32': 'F', \n 'int32': 'F;32NS', \n 'uint32': 'F;32N', \n 'int16': 'F;16NS', \n 'uint16': 'F;16N', \n 'int8': 'F;8S', \n 'uint8': 'F;8'}\n\ndef get_numpy_array(pil_image):\n \"\"\"\n Returns a numpy array from a PIL image\n\n :param PIL.Image pil_image: A PIL Image object\n \"\"\"\n dim1, dim2 = pil_image.size\n if pil_image.mode in PIL_TO_NUMPY:\n dtype = PIL_TO_NUMPY[pil_image.mode]\n else:\n dtype = numpy.float32\n pil_image = pil_image.convert('F')\n try:\n if pil_image.mode == 'P':\n data = numpy.asarray(pil_image.convert('RGB'), dtype)\n else:\n data = numpy.asarray(pil_image, dtype)\n except Exception:\n logger.debug('Backtrace', exc_info=True)\n if hasattr(pil_image, 'tobytes'):\n data = numpy.frombuffer(pil_image.tobytes(), dtype=dtype).copy()\n else:\n data = numpy.frombuffer(pil_image.tobytes(), dtype=dtype).copy()\n if numpy.dtype(dtype).itemsize > 1:\n need_swap = False\n need_swap |= numpy.little_endian and 'B' in pil_image.mode\n need_swap |= not numpy.little_endian and pil_image.mode.endswith('L')\n if need_swap:\n data.byteswap(True)\n data = data.reshape((dim2, dim1))\n\n return data\n\n\ndef create_pil_16(numpy_array):\n \"\"\"\n Convert a numpy array to a Python Imaging Library 16 bit greyscale image.\n\n :param numpy.ndarray numpy_array: A numpy array\n \"\"\"\n if Image is None:\n raise ImportError('PIL is not installed')\n size = numpy_array.shape[:2][::-1]\n if numpy_array.dtype.name in NUMPY_TO_PIL:\n mode2 = NUMPY_TO_PIL[numpy_array.dtype.name]\n mode1 = mode2[0]\n else:\n raise RuntimeError('Unknown numpy type: %s' % numpy_array.dtype.type)\n dats = numpy_array.tobytes()\n pil_image = Image.frombuffer(mode1, size, dats, 'raw', mode2, 0, 1)\n return pil_image","sub_path":"pycfiles/fabio-0.10.0-cp35-cp35m-macosx_10_6_intel/pilutils.cpython-35.py","file_name":"pilutils.cpython-35.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"88377386","text":"newlist = [2,3,6,8,9,10]\nsecondlist = []\n\n\ndef even(newlist):\n x = 0\n while x < 6:\n if newlist[x] % 2 == 0:\n thirdlist = newlist[x]\n secondlist.append(thirdlist)\n x = x + 1 \n print(secondlist)\n else:\n x = x + 1 \n\neven(newlist)\nprint(secondlist)","sub_path":"only_evens.py","file_name":"only_evens.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"202326214","text":"#!/usr/bin/env python3\n#-*- coding: utf-8 -*-\n# This program uses the Google text-to-speach engine.\n## ----------- prerequisites -----------------------\n# To install it run: sudo python3 -m pip install gtts\n# --------------------------------------------------\n# additionaly you can install on Debian on this way:\n# sudo apt install python3-gtts* mpg321\n#### this means: python3-gtts-token/stable,now 1.1.1-1\n#### python3-gtts/stable,now 1.2.0-1\n#### mpg321 is a lightweith mp3 player here runs in -q quiet mode\nfrom gtts import gTTS\nimport os\nfrom time import sleep\nos.system('tput bold; tput setaf 7')\nminta = '''\n\\tOlivier Nakache és Eric Toledano 2011-ben valamibe nagyon\n\\tbeletaláltak az Életrevalókkal: az a film egy lecsúszott,\n\\tkisstílű fekete munkakerülő és egy kőgazdag, sznob,\n\\tfehér arisztokrata egymásra utaltságából költött csodálatosan\n\\temberi és közben állati vicces mesét. Pedig igaz történet\n\\tvolt az alapja: a proli srác ápolóként szegődött a nyaktól\n\\tlefelé lebénult burzsuj mellé, és aztán persze mind a ketten\n\\tkihozták egymásból a legjobbat, csak épp finoman szólva sem\n\\tazokkal a módszerekkel, amik először eszünkbe jutnának.\n\\tA filmből hollywoodi remake is készült, világszerte\n\\t– Budapesten is – játsszák, és általában is\n\\ta francia film egyik legendájává vált.\n'''\nprint('Write in, or paste from the cliboard Ctrl+Shif+V the text you whant to hear.')\ntext_in = input('\\n\\tÍrd be, vagy másold be a szöveget amit fel kell olvasni:\\n\\t\\\nVan egy szöveg-minta, ha nem adnál meg semmit, csak ENTERT ütnél.')\nos.system('tput bold; tput setaf 2') # linux terminal green colored text\n# the \\ on the end of the line enshures a line-break between the code\nprint(\"\\tÉrdemes kipróbálni hogyan olvasná fel\\n\\t\\\negy másik nyelvű ember ezt a szöveget!!!\\n\")\nnyelv = str(input('\\n\\tAdd meg a nyelvet amelyiken szólni fog.\\n\\t\\\n(pl.: hu-->(Magyar), fr-->(Francia, de-->(Német), en-->(English)\\n\\t \\\ntr-->(Török), hi-->(Hindi-Inidia), vagy hu-->(Magyar):\\t\\n'))\nos.system('tput sgr0') # linux terminal end of green colored text\nif len(text_in) <=1:\n text_in = minta\nif len(nyelv) <=1 or nyelv == '':\n nyelv = 'hu'\nos.system('tput sgr0')\nprint(text_in)\nengine = gTTS(text = text_in, lang = nyelv)\nengine.save('kimenet.mp3')\n#os.system('mplayer -really-quiet kimenet.mp3')\nos.system('mpg321 -q kimenet.mp3')\nos.system('afplay kimenet.mp3')\ndef elegedett():\n os.system('tput bold; tput setaf 7')\n elegedett = int(input('\\n\\tEgy 1-től 10-ig tartó skálán értékeld az elégedettségedet:\\n\\t'))\n if elegedett <= 5:\n os.system('tput bold; tput setab 1')\n print('\\n\\t Sajnálom, ezt tudja a Google szövegbeolvasója :\\n')\n os.system('tput sgr0')\n elif elegedett >= 6 and elegedett <=8:\n os.system('tput bold; tput setaf 5')\n print('\\n\\tNa! Akkor egész jónak ítéled,\\n\\tennél jobb nem lesz egy darabig.\\n')\n os.system('tput sgr0')\n elif elegedett >= 9:\n os.system('tput bold; tput setaf 2')\n print('\\n\\tSzuper!\\n\\tIlyen értékelést, nem is vártam!\\n\\tKöszi!\\n')\n os.system('tput sgr0')\nelegedett()\n\n","sub_path":"fordit.py","file_name":"fordit.py","file_ext":"py","file_size_in_byte":3204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"284562874","text":"import glob\nimport pandas as pd\n\ndef _csv_reader(f):\n row = pd.read_csv(f,header=None,squeeze=True, index_col=0)\n return row\n\npath = 'Files/results/**/*findex.csv'\nfL = glob.glob(path)\ndf = pd.read_csv(fL[0])\n\nfor i in range(1,len(fL)):\n\t dfnew = pd.read_csv(fL[i])\n\t df = pd.concat([df,dfnew])\n\ndf.to_csv('Files/findex.csv',index=False)\n\npath = 'Files/results/**/*globalpars.csv'\nfL = glob.glob(path)\ndf = pd.read_csv(fL[0])\n\nfor i in range(1,len(fL)):\n\t dfnew = pd.read_csv(fL[i])\n\t df = pd.concat([df,dfnew])\n\ndf.to_csv('Files/globalpars.csv',index=False)","sub_path":"scrape_output.py","file_name":"scrape_output.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"527158786","text":"\"\"\"\nActive Learning - choosing queries strategies\n(UncertaintySampling (Max Margin) - CMB Sampling : Combination of active learning algorithms\n (distance-based (DIST), diversity-based (DIV)))\n\n\"\"\"\n\nimport copy\nimport time\nimport codecs\nimport random\n\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\n\ntry:\n from sklearn.model_selection import train_test_split\nexcept ImportError:\n from sklearn.cross_validation import train_test_split\n\nfrom strategies.strategies import USampling, CMBSampling\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import accuracy_score\nfrom libact.base.dataset import Dataset\nfrom libact.models import LogisticRegression, SVM\nfrom libact.query_strategies import UncertaintySampling, RandomSampling, QueryByCommittee, HintSVM\n\n\ndef openfile_txt(filepath):\n with open(filepath, 'r', encoding='utf16') as f:\n file = f.read().split('\"\\n[')\n return file\n\n\ndef simulate_w4v(tweet_id):\n element_id = ids_list.index(tweet_id)\n vectorized = vectors_list[element_id]\n return vectorized\n\n\ndef get_vectors_list(filepath):\n vectors_list_x, ids_list_x = [], []\n with open(filepath, 'r', encoding='utf16') as f:\n file = f.read().split('\"\\n[')\n for line in file:\n parts = line.replace('\\n', '').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').split(\";\")\n vectors_list_x.append(parts[0])\n ids_list_x.append(parts[1].replace(' ', ''))\n return vectors_list_x, ids_list_x\n\n\ndef define_label(tweet_id):\n with open(pos_filepath, 'r', encoding='utf16') as f:\n next(f)\n for line in f.readlines():\n parts = line.split(\";\")\n tweets = parts[0].replace('\"', '')\n if tweet_id in tweets:\n label = 1\n break\n else:\n label = 0\n return label\n\n\ndef define_tweet_by_id(line_id):\n with open(csv_filepath, 'r', encoding='utf16') as fp:\n for i, line in enumerate(fp):\n if i == line_id:\n parts = line.split(\";\")\n tweet = parts[2]\n elif i > line_id:\n break\n return tweet\n\n\ndef randomize(X, y):\n permutation = np.random.permutation(y.shape[0])\n X2 = X[permutation]\n y2 = y[permutation]\n return X2, y2\n\n\ndef build_dataset(file):\n target, data = [], []\n for line in file:\n z = np.array(define_label(line[1].replace(' ', '')))\n target.append(z)\n x = np.fromstring(line[0].replace(']', '').replace('[', '').replace(' ', ' '), sep=' ')\n data.append(x)\n target = np.asarray(target)\n data = np.asarray(data)\n return target, data\n\n\ndef balance_dataset():\n file_pos_ids, file_neg_ids = [], []\n file_pos = openfile_txt(pos_filepath_txt)\n for line in file_pos:\n parts = line.replace('\\n', '').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').split(\";\")\n file_pos_ids.append(parts)\n pos_part = random.sample(file_pos_ids, 1000)\n\n file_neg = openfile_txt(neg_filepath_txt)\n for line in file_neg:\n parts = line.replace('\\n', '').replace(' ', ' ').replace(' ', ' ').replace(' ', ' ').split(\";\")\n file_neg_ids.append(parts)\n neg_part = random.sample(file_neg_ids, 1000)\n\n balanced_txt_file = pos_part+neg_part\n random.shuffle(balanced_txt_file)\n # print(balanced_txt_file)\n return balanced_txt_file\n\n\ndef simulate_human_decision(line_id):\n with open(csv_filepath, 'r', encoding='utf16') as fp:\n for i, line in enumerate(fp):\n if i == line_id:\n parts = line.split(\";\")\n tweet_id = parts[0]\n label = define_label(tweet_id)\n elif i > line_id:\n break\n return label\n\n\ndef split_train_test(file):\n target = build_dataset(file)\n n_labeled = 50\n\n X = target[1]\n y = target[0]\n print(np.shape(X))\n print(np.shape(y))\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, stratify=y)\n\n while (np.sum(y_train[:n_labeled]) < 25):\n X_rand, y_rand = randomize(X, y)\n X_train, X_test, y_train, y_test = train_test_split(X_rand, y_rand, test_size=0.2, stratify=y_rand)\n\n print(np.concatenate([y_train[:n_labeled], [None] * (len(y_train) - n_labeled)]))\n\n trn_ds = Dataset(X_train, np.concatenate([y_train[:n_labeled], [None] * (len(y_train) - n_labeled)]))\n tst_ds = Dataset(X_test, y_test)\n\n return trn_ds, tst_ds\n\n\ndef main():\n global pos_filepath, neg_filepath_txt, pos_filepath_txt, dataset_filepath, csv_filepath, vectors_list, ids_list\n dataset_filepath = \"/Users/dndesign/Desktop/active_learning/vecteurs_et_infos/vectors_2015.txt\"\n csv_filepath = \"/Users/dndesign/Desktop/active_learning/donnees/corpus_2015_id-time-text.csv\"\n pos_filepath = \"/Users/dndesign/Desktop/active_learning/donnees/oriane_pos_id-time-text.csv\"\n pos_filepath_txt = \"/Users/dndesign/Desktop/active_learning/vecteurs_et_infos/vectors_2015_pos.txt\"\n neg_filepath_txt = \"/Users/dndesign/Desktop/active_learning/vecteurs_et_infos/vectors_2015_neg.txt\"\n vectors_list, ids_list = get_vectors_list(dataset_filepath)\n\n timestr = time.strftime(\"%Y%m%d_%H%M%S\")\n text_file = codecs.open(\"task_\" + str(timestr) + \".txt\", \"w\", \"utf-8\")\n\n print(\"Loading data...\")\n text_file.write(\"Loading data...\\n\")\n # Open this file\n t0 = time.time()\n file = openfile_txt(dataset_filepath)\n num_lines = sum(1 for line in file)\n print(\"Treating \" + str(num_lines) + \" entries...\")\n text_file.write(\"Treating : %s entries...\\n\" % str(num_lines))\n\n # Number of queries to ask human to label\n quota = 100\n E_out1, E_out2, E_out3, E_out4, E_out6, E_out7 = [], [], [], [], [], []\n balanced_file = balance_dataset()\n trn_ds, tst_ds = split_train_test(balanced_file)\n\n # model = SVM(kernel='linear')\n # model = LogisticRegression()\n model = RandomForestClassifier()\n\n ''' UncertaintySampling (Least Confident)\n\n UncertaintySampling : it queries the instances about which \n it is least certain how to label\n\n Least Confident : it queries the instance whose posterior \n probability of being positive is nearest 0.5\n '''\n qs = UncertaintySampling(trn_ds, method='lc', model=LogisticRegression(C=.01))\n # model.train(trn_ds)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out1 = np.append(E_out1, 1 - score)\n # E_out1 = np.append(E_out1, 1 - model.score(tst_ds))\n\n ''' UncertaintySampling (Max Margin) \n\n '''\n trn_ds2 = copy.deepcopy(trn_ds)\n qs2 = USampling(trn_ds2, method='mm', model=SVM(kernel='linear'))\n # model.train(trn_ds2)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out2 = np.append(E_out2, 1 - score)\n # E_out2 = np.append(E_out2, 1 - model.score(tst_ds))\n\n ''' CMB Sampling \n Combination of active learning algorithms (distance-based (DIST), diversity-based (DIV)) \n '''\n trn_ds3 = copy.deepcopy(trn_ds)\n qs3 = CMBSampling(trn_ds3, model=SVM(kernel='linear'))\n # model.train(trn_ds3)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out3 = np.append(E_out3, 1 - score)\n # E_out3 = np.append(E_out3, 1 - model.score(tst_ds))\n\n ''' Random Sampling \n Random : it chooses randomly a query\n '''\n trn_ds4 = copy.deepcopy(trn_ds)\n qs4 = RandomSampling(trn_ds4, random_state=1126)\n # model.train(trn_ds4)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out4 = np.append(E_out4, 1 - score)\n # E_out4 = np.append(E_out4, 1 - model.score(tst_ds))\n\n ''' QueryByCommittee (Vote Entropy)\n\n QueryByCommittee : it keeps a committee of classifiers and queries \n the instance that the committee members disagree, it also examines \n unlabeled examples and selects only those that are most informative \n for labeling\n\n Vote Entropy : a way of measuring disagreement \n\n Disadvantage : it does not consider the committee members’ class \n distributions. It also misses some informative unlabeled examples \n to label \n '''\n trn_ds6 = copy.deepcopy(trn_ds)\n qs6 = QueryByCommittee(trn_ds6, disagreement='vote',\n models=[LogisticRegression(C=1.0),\n LogisticRegression(C=0.01),\n LogisticRegression(C=100)],\n random_state=1126)\n # model.train(trn_ds6)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out6 = np.append(E_out6, 1 - score)\n # E_out6 = np.append(E_out6, 1 - model.score(tst_ds))\n\n ''' QueryByCommittee (Kullback-Leibler Divergence)\n\n QueryByCommittee : it examines unlabeled examples and selects only \n those that are most informative for labeling\n\n Disadvantage : it misses some examples on which committee members \n disagree\n '''\n trn_ds7 = copy.deepcopy(trn_ds)\n qs7 = QueryByCommittee(trn_ds7, disagreement='kl_divergence',\n models=[LogisticRegression(C=1.0),\n LogisticRegression(C=0.01),\n LogisticRegression(C=100)],\n random_state=1126)\n # model.train(trn_ds7)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out7 = np.append(E_out7, 1 - score)\n # E_out7 = np.append(E_out7, 1 - model.score(tst_ds))\n\n with sns.axes_style(\"darkgrid\"):\n fig = plt.figure()\n ax = fig.add_subplot(1, 1, 1)\n\n query_num = np.arange(0, 1)\n p1, = ax.plot(query_num, E_out1, 'red')\n p2, = ax.plot(query_num, E_out2, 'blue')\n p3, = ax.plot(query_num, E_out3, 'green')\n p4, = ax.plot(query_num, E_out4, 'orange')\n p6, = ax.plot(query_num, E_out6, 'black')\n p7, = ax.plot(query_num, E_out7, 'purple')\n plt.legend(\n ('Least Confident', 'Max Margin', 'Distance Diversity CMB', 'Random Sampling', 'Vote Entropy', 'KL Divergence'),\n loc=4)\n plt.ylabel('Accuracy')\n plt.xlabel('Number of Queries')\n plt.title('Active Learning - Query choice strategies')\n plt.ylim([0, 1])\n plt.show(block=False)\n\n for i in range(quota):\n print(\"\\n#################################################\")\n print(\"Query number \" + str(i) + \" : \")\n print(\"#################################################\\n\")\n text_file.write(\"\\n#################################################\\n\")\n text_file.write(\"Query number %s : \" % str(i))\n text_file.write(\"\\n#################################################\\n\")\n\n ask_id = qs.make_query()\n print(\"\\033[4mUsing Uncertainty Sampling (Least confident) :\\033[0m\")\n print(\"Tweet :\" + define_tweet_by_id(ask_id), end='', flush=True)\n print(\"Simulating human response : \" + str(simulate_human_decision(ask_id)) + \" \\n\")\n text_file.write(\"Using Uncertainty Sampling (Least confident) :\\n\")\n text_file.write(\"Tweet : %s \\n\" % str(define_tweet_by_id(ask_id)))\n text_file.write(\"Simulating human response : %s \\n\\n\" % str(simulate_human_decision(ask_id)))\n trn_ds.update(ask_id, simulate_human_decision(ask_id))\n # model.train(trn_ds)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out1 = np.append(E_out1, 1 - score)\n # E_out1 = np.append(E_out1, 1 - model.score(tst_ds))\n\n ask_id = qs2.make_query()\n print(\"\\033[4mUsing Uncertainty Sampling (Max Margin) :\\033[0m\")\n print(\"Tweet :\" + define_tweet_by_id(ask_id), end='', flush=True)\n print(\"Simulating human response : \" + str(simulate_human_decision(ask_id)) + \" \\n\")\n text_file.write(\"Using Uncertainty Sampling (Smallest Margin) :\\n\")\n text_file.write(\"Tweet : %s \\n\" % str(define_tweet_by_id(ask_id)))\n text_file.write(\"Simulating human response : %s \\n\\n\" % str(simulate_human_decision(ask_id)))\n trn_ds2.update(ask_id, simulate_human_decision(ask_id))\n # model.train(trn_ds2)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out2 = np.append(E_out2, 1 - score)\n # E_out2 = np.append(E_out2, 1 - model.score(tst_ds))\n\n ask_id = qs3.make_query()\n print(\"\\033[4mUsing CMB Distance-Diversity Sampling :\\033[0m\")\n print(\"Tweet :\" + define_tweet_by_id(ask_id), end='', flush=True)\n print(\"Simulating human response : \" + str(simulate_human_decision(ask_id)) + \" \\n\")\n text_file.write(\"Using Uncertainty Sampling (Entropy) :\\n\")\n text_file.write(\"Tweet : %s \\n\" % str(define_tweet_by_id(ask_id)))\n text_file.write(\"Simulating human response : %s \\n\\n\" % str(simulate_human_decision(ask_id)))\n trn_ds3.update(ask_id, simulate_human_decision(ask_id))\n # model.train(trn_ds3)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out3 = np.append(E_out3, 1 - score)\n # E_out3 = np.append(E_out3, 1 - model.score(tst_ds))\n\n ask_id = qs4.make_query()\n print(\"\\033[4mUsing Random Sampling :\\033[0m\")\n print(\"Tweet :\" + define_tweet_by_id(ask_id), end='', flush=True)\n print(\"Simulating human response : \" + str(simulate_human_decision(ask_id)) + \" \\n\")\n text_file.write(\"Using Random Sampling :\\n\")\n text_file.write(\"Tweet : %s \\n\" % str(define_tweet_by_id(ask_id)))\n text_file.write(\"Simulating human response : %s \\n\\n\" % str(simulate_human_decision(ask_id)))\n trn_ds4.update(ask_id, simulate_human_decision(ask_id))\n # model.train(trn_ds4)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out4 = np.append(E_out4, 1 - score)\n # E_out4 = np.append(E_out4, 1 - model.score(tst_ds))\n\n ask_id = qs6.make_query()\n print(\"\\033[4mUsing QueryByCommittee (Vote Entropy) :\\033[0m\")\n print(\"Tweet :\" + define_tweet_by_id(ask_id), end='', flush=True)\n print(\"Simulating human response : \" + str(simulate_human_decision(ask_id)) + \" \\n\")\n text_file.write(\"Using QueryByCommittee (Vote Entropy) :\\n\")\n text_file.write(\"Tweet : %s \\n\" % str(define_tweet_by_id(ask_id)))\n text_file.write(\"Simulating human response : %s \\n\\n\" % str(simulate_human_decision(ask_id)))\n trn_ds6.update(ask_id, simulate_human_decision(ask_id))\n # model.train(trn_ds6)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out6 = np.append(E_out6, 1 - score)\n # E_out6 = np.append(E_out6, 1 - model.score(tst_ds))\n\n ask_id = qs7.make_query()\n print(\"\\033[4mUsing QueryByCommittee (KL Divergence) :\\033[0m\")\n print(\"Tweet :\" + define_tweet_by_id(ask_id), end='', flush=True)\n print(\"Simulating human response : \" + str(simulate_human_decision(ask_id)) + \" \\n\")\n text_file.write(\"Using QueryByCommittee (KL Divergence) :\\n\")\n text_file.write(\"Tweet : %s \\n\" % str(define_tweet_by_id(ask_id)))\n text_file.write(\"Simulating human response : %s \\n\\n\" % str(simulate_human_decision(ask_id)))\n trn_ds7.update(ask_id, simulate_human_decision(ask_id))\n # model.train(trn_ds7)\n model.fit(trn_ds.format_sklearn()[0], trn_ds.format_sklearn()[1])\n predicted = model.predict(tst_ds.format_sklearn()[0])\n score = accuracy_score(tst_ds.format_sklearn()[1], predicted)\n E_out7 = np.append(E_out7, 1 - score)\n # E_out7 = np.append(E_out7, 1 - model.score(tst_ds))\n\n ax.set_xlim((0, i + 1))\n ax.set_ylim((0, max(max(E_out1), max(E_out2), max(E_out3), max(E_out4), max(E_out6), max(E_out7)) + 0.1))\n query_num = np.arange(0, i + 2)\n p1.set_xdata(query_num)\n p1.set_ydata(E_out1)\n p2.set_xdata(query_num)\n p2.set_ydata(E_out2)\n p3.set_xdata(query_num)\n p3.set_ydata(E_out3)\n p4.set_xdata(query_num)\n p4.set_ydata(E_out4)\n p6.set_xdata(query_num)\n p6.set_ydata(E_out6)\n p7.set_xdata(query_num)\n p7.set_ydata(E_out7)\n\n plt.draw()\n\n t2 = time.time()\n time_total = t2 - t0\n print(\"\\n\\n\\n#################################################\\n\")\n print(\"Execution time : %fs \\n\\n\" % time_total)\n text_file.write(\"\\n\\n\\n#################################################\\n\")\n text_file.write(\"Execution time : %fs \\n\" % time_total)\n text_file.close()\n input(\"Press any key to save the plot...\")\n plt.savefig('task_' + str(timestr) + '.png')\n\n print(\"Done\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"task_balanced_dataset.py","file_name":"task_balanced_dataset.py","file_ext":"py","file_size_in_byte":18100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"472306256","text":"import sys\nfrom django.core.files.storage import FileSystemStorage\nfrom django.conf import settings\nfrom django.db import models\nfrom django.db.models import Avg\nfrom django.db.models.signals import post_save, pre_save\nfrom django.dispatch import receiver\n\nfrom django.utils.text import slugify\nfrom django.urls import reverse\nfrom sellers.models import SellerAccount\nimport os\nimport shutil\nfrom PIL import Image\nimport random\nfrom django.core.files import File\n\n\nfrom django.db import IntegrityError\nfrom django.shortcuts import render_to_response\n\n\nUser=settings.AUTH_USER_MODEL\n\n\ndef my_media_location(instance, filename):\n name_ = filename.split('.')\n nazwa = instance.slug\n koncowka = name_[1]\n nowanazwa = \"%s.%s\" % (nazwa, koncowka)\n return \"%s/%s\" % (instance.slug, nowanazwa)\n\n\ndef download_media_location(instance, filename):\n return \"%s/%s\" %(instance.slug, filename)\n\nclass Product(models.Model):\n seller=models.ForeignKey(SellerAccount, default=2, on_delete=models.CASCADE)\n media = models.ImageField(blank=True, null=True, upload_to=my_media_location)\n\n title = models.CharField(max_length=120)\n slug=models.SlugField(blank=True, unique=True)\n description = models.TextField(max_length=220)\n price = models.DecimalField(max_digits=7, decimal_places=2, default=9.99, blank=True)\n sale_price = models.DecimalField(max_digits=7, decimal_places=2, default=9.99, blank=True)\n sale_active = models.BooleanField(default=False)\n\n def get_absolute_url(self):\n view_name = \"products:detail_slug\"\n return reverse(view_name, kwargs={\"slug\":self.slug})\n\n\n\n def get_edit_url(self):\n view_name = \"sellers:product_edit\"\n return reverse(view_name, kwargs={\"pk\":self.id})\n\n\n\n def __str__(self):\n return self.title\n\n\n def get_download(self):\n view_name = \"products:download_slug\"\n url = reverse(view_name, kwargs={\"slug\":self.slug})\n return url\n\n @property\n def get_price(self):\n if self.sale_price and self.sale_active:\n return self.sale_price\n return self.price\n\n\n\n def get_averrat(self):\n p = None\n one = 0\n rat = 0\n try:\n p = Product.objects.get(title=self)\n one = p.productrating_set.aggregate(Avg('rating'))\n rat = '{:.2f}'.format(one['rating__avg'])\n except:\n one = 0\n rat = 0\n return rat\n\n\n def get_html_price(self):\n price = self.get_price\n if price == self.sale_price:\n return self.price\n elif price < self.sale_price:\n return self.sale_price\n else:\n return \" %s %s\" %(self.sale_price, self.price)\n\n\n\ndef create_slug(instance, new_slug=None):\n slug = slugify(instance.title)\n if new_slug is not None:\n slug = new_slug\n\n qs = Product.objects.filter(slug=slug)\n exists = qs.exists()\n if exists:\n new_slug = \"%s-%s\" %(slug, qs.first().id)\n return create_slug(instance, new_slug=new_slug)\n return slug\n\n\n\n\ndef product_pre_save_receiver(sender,instance,*args,**kwargs):\n if not instance.slug:\n instance.slug = create_slug(instance)\n\npre_save.connect(product_pre_save_receiver, sender=Product)\n\n\n\ndef create_new_thumb(media_path, instance, owner_slug, max_length, max_width):\n filename = os.path.basename(media_path)\n thumb = Image.open(media_path)\n size = (max_length, max_width)\n thumb.thumbnail(size, Image.ANTIALIAS)\n temp_loc = \"%s/%s/tmp\" %(settings.MEDIA_ROOT, owner_slug)\n if not os.path.exists(temp_loc):\n os.makedirs(temp_loc)\n temp_file_path = os.path.join(temp_loc, filename)\n temp_image = open(temp_file_path, \"wb\")\n thumb.save(temp_image)\n thumb_data = open(temp_file_path, \"r+b\") # 'rb+'\n thumb_file = File(thumb_data)\n\n instance.media.save(filename, thumb_file)\n shutil.rmtree(temp_loc, ignore_errors=True)\n return True\n\n\n\ndef product_post_save_receiver(sender, instance, created, *args, **kwargs):\n if instance.media:\n try:\n hd, hd_created = Thumbnail.objects.get_or_create(product=instance, type='hd')\n sd, sd_created = Thumbnail.objects.get_or_create(product=instance, type='sd')\n micro, micro_created = Thumbnail.objects.get_or_create(product=instance, type='micro')\n hd_max=(500,500)\n sd_max=(350,350)\n micro_max=(150,150)\n media_path = instance.media.path\n owner_slug = instance.slug\n if hd_created:\n try:\n create_new_thumb(media_path, hd, owner_slug, hd_max[0], hd_max[1])\n except:\n pass\n if sd_created:\n try:\n create_new_thumb(media_path, sd, owner_slug, sd_max[0], sd_max[1])\n except:\n pass\n if micro_created:\n try:\n create_new_thumb(media_path, micro, owner_slug, micro_max[0], micro_max[1])\n except:\n pass\n except:\n print('No media in Product instance')\n\npost_save.connect(product_post_save_receiver, sender=Product)\n\n\n\n\n\ndef thumbnail_location(instance, filename):\n name_ = filename.split('.')\n nazwa = instance.product.slug\n koncowka = name_[1]\n nowanazwa = \"%s.%s\" % (nazwa, koncowka)\n return \"%s/%s\" %(nazwa, nowanazwa)\n\n\nTHUMB_CHOICES=(\n (\"hd\",\"HD\"),\n (\"sd\", \"SD\"),\n (\"micro\", \"Micro\"),\n )\n\n\nclass Thumbnail(models.Model):\n product = models.ForeignKey(\"Product\", on_delete=models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, blank=True, on_delete=models.CASCADE) ##this is the uploading user\n type = models.CharField(max_length=20, choices=THUMB_CHOICES, default='hd')\n height = models.CharField(max_length=20, null=True, blank=True)\n width = models.CharField(max_length=20, null=True, blank=True)\n media = models.ImageField(\n height_field = \"height\",\n width_field = \"width\",\n blank=True, null=True,\n upload_to=thumbnail_location)\n\n def __str__(self):\n if self.media:\n return \"%s\" % (self.media.path)\n else:\n return \"%s\" %(\"empty\")\n\n @property\n def image_url(self):\n if self.media and hasattr(self.media, 'url'):\n return self.media.url\n\n\nclass MyProducts(models.Model):\n user=models.OneToOneField(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n products = models.ManyToManyField(Product, blank=True)\n\n def __str__(self):\n return \"%s\" %(self.products.count())\n\n\n class Meta:\n verbose_name = \"My Products\"\n verbose_name_plural = \"My Products\"\n\n\n\n\n\n\n\nclass ProductRating(models.Model):\n user=models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n product= models.ForeignKey(Product, on_delete=models.CASCADE)\n rating=models.IntegerField(null=True, blank=True)\n verified=models.BooleanField(default=False)\n\n def __str__(self):\n return \"%s\" %(self.rating)\n\n\nclass CuratedProducts(models.Model):\n section_name = models.CharField(max_length=120)\n slug = models.SlugField(blank=True, unique=True)\n products = models.ManyToManyField(Product, blank=True)\n active = models.BooleanField(default=True)\n\n def __str__(self):\n return self.section_name\n\n class Meta:\n verbose_name = \"Curated products\"\n verbose_name_plural = \"Curated products\"\n\n\n\n\ndef create_slug_curated(instance, new_slug=None):\n slug = slugify(instance.section_name)\n if new_slug is not None:\n slug = new_slug\n\n qs = CuratedProducts.objects.filter(slug=slug)\n exists = qs.exists()\n if exists:\n new_slug = \"%s-%s\" %(slug, qs.first().id)\n return create_slug(instance, new_slug=new_slug)\n return slug\n\n\n\ndef curated_pre_save_receiver(sender,instance,*args,**kwargs):\n if not instance.slug:\n instance.slug = create_slug_curated(instance)\n\npre_save.connect(curated_pre_save_receiver, sender=CuratedProducts)\n","sub_path":"products/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"379545728","text":"# -*- coding: utf-8 -*\nfrom flask.ext.wtf import Form\nfrom wtforms import StringField, SubmitField, TextAreaField\nfrom wtforms.validators import Required, Length, Email, Regexp, EqualTo\nfrom wtforms import ValidationError\nfrom ..models import User\nfrom flask.ext.pagedown.fields import PageDownField\n\nDEFAULT_NoteBook_Contents=\"\"\"### Miscellanise\n### Essential\n### 进阶\n### 收获\n### 不足\n### 批判本笔记\n\"\"\"\n\nDEFAULT_NoteSource_Contents=\"\"\"### Miscellanise\n### Essential\n### 进阶\n### 总结\n### 收获\n### 不足\n### 源码赏析\n* #### 项目结构 \n* #### 语句赏析 \n* #### 珠海拾遗\n\n### 批判本笔记\n\"\"\"\n\nclass NoteForm(Form):\n notename = StringField('Note Name', validators=[Required()])\n about_this = TextAreaField('brief introduction', validators=[Required()])\n body = PageDownField( validators=[Required()])\n submit = SubmitField('Submit')\n\n","sub_path":"NoteBook/note/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"629247093","text":"from unittest.mock import patch, Mock, PropertyMock, MagicMock\nfrom mixer.backend.django import mixer\nfrom django.test import TestCase\nfrom django.conf import settings\nfrom core.middleware import *\n\nclass AuthMiddlewareTests(TestCase):\n\n def setUp(self):\n self.request = Mock(path=\"/\", stratako_refresh_token=None)\n self.response = Mock()\n self.callback = MagicMock()\n self.callback.return_value = self.response\n self.mw = AuthenticationMiddleware(self.callback)\n self.user = mixer.blend(User, username=\"john\")\n \n\n @patch(\"core.middleware.User.from_token\")\n def test_middleware_assigns_from_output_to_request(self, mock_from):\n self.request.META = {\"HTTP_AUTHORIZATION\": \"Bearer 12345\"}\n response = self.mw(self.request)\n mock_from.assert_called_with(\"12345\")\n self.assertEqual(self.request.user, mock_from.return_value)\n \n\n @patch(\"core.middleware.User.from_token\")\n def test_middleware_does_nothing_if_no_stratako_refresh_token_flag(self, mock_from):\n response = self.mw(self.request)\n self.assertFalse(self.response.set_cookie.called)\n self.assertFalse(self.response.delete_cookie.called)\n \n\n @patch(\"core.middleware.User.from_token\")\n def test_middleware_deletes_stratako_refresh_token_if_false_flag(self, mock_from):\n self.request.stratako_refresh_token = False\n response = self.mw(self.request)\n self.assertFalse(self.response.set_cookie.called)\n self.response.delete_cookie.assert_called_with(\"stratako_refresh_token\")\n \n\n @patch(\"core.middleware.User.from_token\")\n def test_middleware_can_set_cookie(self, mock_from):\n self.request.stratako_refresh_token = \"ABCDEFGH\"\n response = self.mw(self.request)\n self.assertFalse(self.response.delete_cookie.called)\n self.response.set_cookie.assert_called_with(\n \"stratako_refresh_token\", value=\"ABCDEFGH\", httponly=True, max_age=31536000\n )","sub_path":"core/tests/test_middleware.py","file_name":"test_middleware.py","file_ext":"py","file_size_in_byte":1993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"525161764","text":"# 给定一个无序的整数数组,找到其中最长上升子序列的长度。 \n# \n# 示例: \n# \n# 输入: [10,9,2,5,3,7,101,18]\n# 输出: 4 \n# 解释: 最长的上升子序列是 [2,3,7,101],它的长度是 4。 \n# \n# 说明: \n# \n# \n# 可能会有多种最长上升子序列的组合,你只需要输出对应的长度即可。 \n# 你算法的时间复杂度应该为 O(n2) 。 \n# \n# \n# 进阶: 你能将算法的时间复杂度降低到 O(n log n) 吗? \n# Related Topics 二分查找 动态规划\n\n\n# leetcode submit region begin(Prohibit modification and deletion)\nclass Solution:\n def lengthOfLIS(self, nums: List[int]) -> int:\n # DP\n # if not nums: return 0\n # N = len(nums)\n # dp = [1] * N\n # for i in range(1, N):\n # for j in range(i):\n # if nums[j] < nums[i]:\n # dp[i] = max(dp[i], dp[j] + 1)\n # return max(dp)\n\n # 贪心+二分\n N = len(nums)\n if N < 2: return N\n tails = [nums[0]]\n for i in range(1, N):\n if nums[i] > tails[-1]:\n tails.append(nums[i])\n else:\n left, right = 0, len(tails) - 1\n while left < right:\n mid = (left + right) >> 1\n if tails[mid] < nums[i]:\n left = mid + 1\n else:\n right = mid\n tails[left] = nums[i]\n return len(tails)\n\n\n# leetcode submit region end(Prohibit modification and deletion)\n","sub_path":"Week09/code/[300]最长上升子序列.py","file_name":"[300]最长上升子序列.py","file_ext":"py","file_size_in_byte":1556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"596814961","text":"import math\nimport collections\n\ndef is_valid_sudoku_pythonic(partial_assignment):\n region_size = int(math.sqrt(len(partial_assignment)))\n for i,row in enumerate(partial_assignment):\n for j,c in enumerate(row):\n if c != 0:\n for k in ((i,str(c)),(str(c),j),(i/region_size,j/region_size,str(c))):\n print(\"k\",k)\n\neasy = [[2,9,0,0,0,0,0,7,0],\n [3,0,6,0,0,8,4,0,0],\n [8,0,0,0,4,0,0,0,2],\n [0,2,0,0,3,1,0,0,7],\n [0,0,0,0,8,0,0,0,0],\n [1,0,0,9,5,0,0,6,0],\n [7,0,0,0,9,0,0,0,1],\n [0,0,1,2,0,0,3,0,6],\n [0,3,0,0,0,0,0,5,7]]\nans = is_valid_sudoku_pythonic(easy)\nprint(ans)","sub_path":"EPI/sudoku2.py","file_name":"sudoku2.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"402530636","text":"import re, time, urllib.request, ssl, logging, subprocess, os, pyautogui, datetime, sys\nfrom bs4 import BeautifulSoup\nfrom common.download_path import path\nfrom common.download_path_diarios import path as path_diarios\nfrom crawlerJus import crawlerJus\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.options import Options\n\nssl._create_default_https_context = ssl._create_unverified_context\n\nclass publicacoes_diarios_oficiais(crawlerJus):\n\tdef __init__(self, data=None):\n\t\toptions = Options()\n\t\toptions.headless = True\n\t\tcrawlerJus.__init__(self)\n\t\tlogging.basicConfig(filename=self.cwd+self.dia+self.mes+self.ano+'.log',level=logging.INFO)\n\t\tself.diarios_a_baixar = [self.baixa_stf,self.baixa_ro,self.baixa_rr,self.baixa_pa,self.baixa_ma,self.baixa_to,self.baixa_pi,self.baixa_stj,self.baixa_trf1,\n\t\tself.baixa_trf5,self.baixa_go,self.baixa_rs,self.baixa_ac,self.baixa_trf4,self.baixa_df,self.baixa_sc,self.baixa_rn,self.baixa_trf3,self.baixa_pe,\n\t\tself.baixa_sp,self.baixa_ce,self.baixa_al,self.baixa_ms,self.baixa_am,self.baixa_pr,self.baixa_trt,self.baixa_es,self.baixa_ap,self.baixa_pb,self.baixa_se,\n\t\tself.baixa_mt,self.baixa_trf2]\t\t\n\t\thoje = datetime.date.today().strftime(\"%Y%m%d\")\n\t\tself.ano = hoje[:4]\n\t\tself.mes = hoje[4:6]\n\t\tself.dia = hoje[6:]\n\t\tif len(self.dia)==1:\n\t\t self.dia = \"0\" + self.dia\n\t\tself.data = data\n\t\tif self.data:\n\t\t\tself.ano_pesquisar = self.data[:4]\n\t\t\tself.mes_pesquisar = self.data[4:6]\n\t\t\tself.dia_pesquisar = self.data[6:]\n\n\t\t# FALTA MG, RJ, BA\n\n\tdef baixaEsaj(self,inicio, fim, pag):\n\t\tXPathInicial = \"//*[@id=\\\"cadernosCad\\\"]/option[\"\n\t\tXPathFinal = \"]\"\n\t\tfor i in range(inicio,fim):\n\t\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\t\tdriver.get(pag)\n\t\t\tdriver.find_element_by_xpath(XPathInicial+str(int(i))+XPathFinal).click() \n\t\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"download\\\"]\").click()\n\t\t\tdriver.close()\n\t\t\ttime.sleep(1)\n\n\tdef baixa_ac(self, todos=None):\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tac_dje = \"http://diario.tjac.jus.br/edicoes.php\"\n\t\tif todos:\n\t\t\t# https://diario.tjac.jus.br/edicoes.php?Ano=2018&Mes=1\n\t\t\tcounter = 36\n\t\t\tdriver.get(ac_dje)\n\t\t\taux = True\n\t\t\twhile aux:\n\t\t\t\taux = int(input('Escolha mes e ano na página ou digite 0 para sair'))\n\t\t\t\tpag_ac = driver.page_source\n\t\t\t\tpag_ac_bs = BeautifulSoup(pag_ac,'html.parser')\n\t\t\t\tlink_ac_i = pag_ac_bs.findAll('a',attrs={'title':'Baixar'})\n\t\t\t\tfor l in link_ac_i:\n\t\t\t\t\tlink_ac_f = \"http://diario.tjac.jus.br\"+str(l['href'])\n\t\t\t\t\tself.baixa_html_pdf(link_ac_f,'TJAC_'+str(counter))\n\t\t\t\t\tsubprocess.Popen('mv %s/%s.pdf \"%s/Diarios_ac/dir_005/%s.pdf\"' % (os.getcwd(),'TJAC_'+str(counter),path_diarios,'TJAC_'+str(counter)), shell=True)\n\t\t\t\t\tcounter += 1\n\t\telse:\n\t\t\tpag_ac = self.baixa_pag(ac_dje)\n\t\t\tpag_ac_bs = BeautifulSoup(pag_ac,'html.parser')\n\t\t\tlink_ac_i = pag_ac_bs.find('a',attrs={'title':'Baixar'})\n\t\t\tlink_ac_f = \"http://diario.tjac.jus.br\"+str(link_ac_i['href'])\n\t\t\tdriver.get(link_ac_f)\n\t\t\ttime.sleep(5)\n\t\t\tsubprocess.Popen('cp %s/*.pdf %s/%s.pdf' % (path,path_diarios,'TJAC_'+str(self.dia)+str(self.mes)+str(self.ano)), shell=True)\n\t\t\tsubprocess.Popen('rm %s/*.pdf' % (path), shell=True)\n\n\tdef baixa_al(self):\n\t\tif self.data:\n\t\t\tfrom crawler_jurisprudencia_tjal import crawler_jurisprudencia_tjal\n\t\t\tc = crawler_jurisprudencia_tjal()\n\t\t\tc.download_diario_retroativo(data_especifica=self.dia_pesquisar+'/'+self.mes_pesquisar+'/'+self.ano_pesquisar)\n\t\telse:\n\t\t\tpag_al = \"http://www2.tjal.jus.br/cdje/index.do\"\n\t\t\tself.baixaEsaj(1,3,pag_al)\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/TJAL_%s.pdf' % (path,path_diarios,self.dia+self.mes+self.ano), shell=True)\n\n\tdef baixa_am(self):\n\t\tif self.data:\n\t\t\tfrom crawler_jurisprudencia_tjam import crawler_jurisprudencia_tjam\n\t\t\tc = crawler_jurisprudencia_tjam()\n\t\t\tc.download_diario_retroativo(data_especifica=self.dia_pesquisar+'/'+self.mes_pesquisar+'/'+self.ano_pesquisar)\n\t\telse:\n\t\t\tpag_am = \"http://consultasaj.tjam.jus.br/cdje/index.do\"\n\t\t\tbaixaEsaj(2,5,pag_am)\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/TJCE_%s.pdf' % (path,path_diarios,self.dia+self.mes+self.ano), shell=True)\n\n\tdef baixa_ap(self, ultimo_download=2765):\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tdriver.get('http://tucujuris.tjap.jus.br/tucujuris/pages/consultar-dje/consultar-dje.html')\n\t\ttime.sleep(5)\n\t\tdriver.execute_script(\"document.getElementById('dje-%s').click()\" % (str(ultimo_download+1),))\n\t\tprint(driver.execute_script(\"document.getElementById('dje-%s')\" % (str(ultimo_download+1),)))\n\t\tdriver.execute_script(\"%s.download\" % (str(ultimo_download+1),))\n\t\ttime.sleep(5)\n\t\t# atualizar ultimo_download!!!\n\t\n\tdef baixa_ba(self, primeiro=1207, ultimo=2502):\n\t\tfrom crawler_jurisprudencia_tjba import crawler_jurisprudencia_tjba\n\t\tc = crawler_jurisprudencia_tjba()\n\t\tc.download_diario_retroativo(path_diarios, primeiro=primeiro, ultimo=ultimo)\n\n\tdef baixa_ce(self):\n\t\tif self.data:\n\t\t\tfrom crawler_jurisprudencia_tjce import crawler_jurisprudencia_tjce\n\t\t\tc = crawler_jurisprudencia_tjce()\n\t\t\tc.download_diario_retroativo(data_especifica=self.dia_pesquisar+'/'+self.mes_pesquisar+'/'+self.ano_pesquisar)\n\t\telse:\n\t\t\tpag_ce = \"http://esaj.tjce.jus.br/cdje/index.do\"\n\t\t\tbaixaEsaj(2,3,pag_ce)\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/TJCE_%s.pdf' % (path,path_diarios,self.dia+self.mes+self.ano), shell=True)\n\n\tdef baixa_df(self):\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tdf_dje = \"https://dje.tjdft.jus.br/dje/djeletronico?visaoId=tjdf.djeletronico.comum.internet.apresentacao.VisaoDiarioEletronicoInternetPorData\"\n\t\tpag_df = self.baixa_pag(df_dje)\n\t\tpag_df_bs = BeautifulSoup(pag_df,'html.parser')\n\t\tlink_df_i = pag_df_bs.find('a',href=re.compile('https://dje.tjdft.jus.br/dje/jsp/dje/DownloadDeDiario.jsp'))\n\t\tlink_df_f = str(link_df_i['href'])\n\t\tdriver.get(link_df_f)\n\t\ttime.sleep(15)\n\t\tsubprocess.Popen('mv %s/*.PDF %s/TJDF_%s.pdf' % (path,path_diarios,str(self.dia)+str(self.mes)+str(self.ano)), shell=True)\n\n\tdef baixa_es(self):\n\t\tdiario_es = open(self.dia+self.mes+self.ano+\"es.txt\",\"a\",encoding='utf-8')\n\t\tdje_es = \"https://sistemas.tjes.jus.br/ediario/\"\n\t\tpag_es = self.baixa_pag(dje_es)\n\t\tpag_es_bs = BeautifulSoup(pag_es,'html.parser')\n\t\tlink_es_i = pag_es_bs.find_all('a')\n\t\tlink_es_f = []\n\t\tre_links_es = re.compile(r\"/ediario/index.php/component.+\")\n\t\tfor l in link_es_i:\n\t\t\taux_link = re.search(re_links_es,l['href'])\n\t\t\tif aux_link != None:\n\t\t\t\tlink_es_f.append(\"https://sistemas.tjes.jus.br\"+aux_link.group(0))\n\t\tfor l in link_es_f:\n\t\t\ttry:\n\t\t\t\tpag_aux = self.baixa_pag(l)\n\t\t\t\tsoup = BeautifulSoup(pag_aux, 'html.parser')\n\t\t\t\ttexto_es_i = soup.get_text()\n\t\t\t\ttexto_es_f = re.search(r\"Versão revista(.*?)O e-diário \\(Diário da Justiça Eletrônico\",texto_es_i,re.DOTALL)\n\t\t\t\tif texto_es_f != None:\n\t\t\t\t\ttexto_es_ff = re.sub(r\"Versão revista\",\"\",texto_es_f.group(0))\n\t\t\t\t\ttexto_es_ff = re.sub(r\"O e-diário \\(Diário da Justiça Eletrônico\",\"\",texto_es_ff)\n\t\t\t\t\tdiario_es.write(texto_es_ff)\n\t\t\t\t\tdiario_es.write(\"}}\")\n\t\t\texcept:\n\t\t\t\tpass\n\t\tdiario_es.close()\n\t\tsubprocess.Popen('mv %s/%s*.pdf %s' % (os.getcwd(),self.dia+self.mes+self.ano+\"es.txt\",path_diarios), shell=True)\n\n\tdef baixa_go(self):\n\t\tpastas = ['6235'] # 2018 e 2019 (nov)\n\t\tgo_dje = \"http://tjdocs.tjgo.jus.br/pastas/\"\n\t\tfor pasta in pastas:\n\t\t\tprint('Pasta ',pasta)\n\t\t\tpag_go = self.baixa_pag(go_dje+pasta)\n\t\t\tpag_go_bs = BeautifulSoup(pag_go, 'html.parser')\n\t\t\tlink_go_i = pag_go_bs.find_all('a',href=re.compile(r'/pastas/\\d+'))\n\t\t\tlinks_diarios_GO =[]\n\t\t\tfor i in link_go_i:\n\t\t\t\tlinks_diarios_GO.append(str(i['href']))\n\t\t\tlinks_diarios_GO = list(set(links_diarios_GO[1:]))\n\t\t\tfor link in links_diarios_GO:\n\t\t\t\tfor j in range(1,4):\n\t\t\t\t\tpag_diarios_mes = self.baixa_pag('http://tjdocs.tjgo.jus.br%s?page=%s' % (link,str(j)))\n\t\t\t\t\tpag_diarios_mes_bs = BeautifulSoup(pag_diarios_mes, 'html.parser')\n\t\t\t\t\tlinks_diarios = pag_diarios_mes_bs.find_all('a',href=re.compile(r'/documentos/\\d+'))\n\t\t\t\t\tfor link_d in links_diarios:\n\t\t\t\t\t\tlink_final = 'http://tjdocs.tjgo.jus.br'+link_d['href']+'/download'\n\t\t\t\t\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\t\t\t\t\tdriver.get(link_final)\n\t\t\t\t\t\ttime.sleep(10)\n\t\t\t\t\t\tdriver.close()\n\t\t\t\t\t\tsubprocess.Popen('mv %s/*.pdf \"%s/Diarios_go/\"' % (path,path_diarios), shell=True)\n\n\tdef baixa_ma(self):\n\t\t# if self.data:\n\t\t# \tfrom crawler_jurisprudencia_tjma import crawler_jurisprudencia_tjma\n\t\t# \tc = crawler_jurisprudencia_tjma()\n\t\t# \tc.download_diario_retroativo(data_especifica=self.dia_pesquisar+self.mes_pesquisar+self.ano_pesquisar)\n\t\t# \treturn\n\t\tma_dje = \"http://www.tjma.jus.br/inicio/diario\"\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tdriver.get(ma_dje)\n\t\tdriver.find_element_by_xpath('//*[@id=\"btnConsultar\"]').click()\n\t\tdriver.find_element_by_xpath('//*[@id=\"table1\"]/tbody/tr[2]/td[3]/a[1]').click()\n\t\ttime.sleep(3)\n\t\tdriver.switch_to.window(driver.window_handles[-1])\n\t\turl = driver.current_url\n\t\tdriver.close()\n\t\tprint(url)\n\t\tresponse = urllib.request.urlopen(url,timeout=15)\n\t\tfile = open(str(self.dia+self.mes+self.ano)+\"MA.pdf\", 'wb')\n\t\ttime.sleep(1)\n\t\tfile.write(response.read())\n\t\tfile.close()\n\n\tdef baixa_mg(self):\n\t\tpass\n\n\tdef baixa_mt(self):\n\t\tdje_mt = \"http://www.tjmt.jus.br/dje\"\n\t\tpag_mt = self.baixa_pag(dje_mt)\n\t\tpag_mt_bs = BeautifulSoup(pag_mt,'html.parser')\n\t\tlink_mt_i = pag_mt_bs.find_all('div', attrs={\"class\":\"cadernos-ultima-edicao\"})\n\t\tlink_mt_f = re.findall(r'href=\"(.*?)\"',str(link_mt_i))\n\t\tfor l in range(1,len(link_mt_f)):\n\t\t\tlink_mt_f[l] = re.sub(r' ','%20',link_mt_f[l])\n\t\t\tlink_mt_f[l] = re.sub(r'ç','%C3%A7',link_mt_f[l])\n\t\t\tlink_mt_f[l] = re.sub(r'â','%C3%A2',link_mt_f[l])\n\t\t\tlink_mt_f[l] = re.sub(r'ª','%C2%AA',link_mt_f[l])\n\t\t\tresponse = urllib.request.urlopen(link_mt_f[l],timeout=30)\n\t\t\tfilename = 'TJMT_'+str(l)+'_'+self.dia+self.mes+self.ano+\".pdf\"\n\t\t\tfile = open(filename, 'wb')\n\t\t\tfile.write(response.read())\n\t\t\tfile.close()\n\t\t\ttime.sleep(5)\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/%s.pdf' % (os.getcwd(),path_diarios,filename), shell=True)\n\n\tdef baixa_ms(self):\n\t\tif self.data:\n\t\t\tfrom crawler_jurisprudencia_tjms import crawler_jurisprudencia_tjms\n\t\t\tc = crawler_jurisprudencia_tjms()\n\t\t\tc.download_diario_retroativo(data_especifica=self.dia_pesquisar+'/'+self.mes_pesquisar+'/'+self.ano_pesquisar)\n\t\telse:\n\t\t\tpag_ms = \"https://www.tjms.jus.br/cdje/index.do\"\n\t\t\tself.baixaEsaj(3,5,pag_ms)\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/TJCE_%s.pdf' % (path,path_diarios,self.dia+self.mes+self.ano), shell=True)\n\n\tdef baixa_pa(self):\n\t\tpa_dje = \"http://dje.tjpa.jus.br/\"\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tdriver.get(pa_dje)\n\t\ttime.sleep(4)\n\t\tdriver.find_element_by_css_selector(\"a[ng-click='abrirPDFGrid(urlUltimoDiario)']\").click()\n\t\ttime.sleep(7)\n\t\tdriver.close()\t\n\n\tdef baixa_pb(self):\n\t\tpb_dje = \"https://app.tjpb.jus.br/dje/paginas/diario_justica/publico/buscas.jsf\"\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tdriver.get(pb_dje)\n\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"dje-pdf-recentes\\\"]/li[1]/a\").click()\n\t\ttime.sleep(15)\n\t\tsubprocess.Popen('mv %s/*.pdf %s/TJPB_%s.pdf' % (path,path_diarios,self.dia+self.mes+self.ano), shell=True)\n\n\tdef baixa_pe(self):\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tpe_dje = \"https://www.tjpe.jus.br/dje/djeletronico?visaoId=tjdf.djeletronico.comum.internet.apresentacao.VisaoDiarioEletronicoInternetPorData\"\n\t\tpag_pe = self.baixa_pag(pe_dje)\n\t\tpag_pe_bs = BeautifulSoup(pag_pe,'html.parser')\n\t\tlink_pe_i = pag_pe_bs.find('a',attrs={\"class\":\"downloadPdf\"})\n\t\tlink_pe_f = str(link_pe_i['href'])\n\t\tdriver.get(link_pe_f)\n\t\ttime.sleep(15)\n\t\tsubprocess.Popen('mv %s/*.PDF %s/TJPE_%s.pdf' % (path,path_diarios,str(self.dia)+str(self.mes)+str(self.ano)), shell=True)\n\n\tdef baixa_pi(self):\n\t\tif self.data:\n\t\t\tfrom crawler_jurisprudencia_tjpi import crawler_jurisprudencia_tjpi\n\t\t\tc = crawler_jurisprudencia_tjpi()\n\t\t\tc.download_diario_retroativo(data_especifica=self.dia_pesquisar+self.mes_pesquisar+self.ano_pesquisar)\n\t\t\treturn\n\t\tpi_dje = \"http://www.tjpi.jus.br/site/modules/diario/Init.mtw\"\n\t\tpag_pi = self.baixa_pag(pi_dje)\n\t\tpag_pi_bs = BeautifulSoup(pag_pi,'html.parser')\n\t\tlink_pi_i = pag_pi_bs.find('a',href=re.compile(r'http://www\\.tjpi\\.jus\\.br/diarioeletronico/public.+'))\n\t\tlink_pi_f = str(link_pi_i['href'])\n\t\tresponse = urllib.request.urlopen(link_pi_f,timeout=30)\n\t\tfile7 = open(self.dia+self.mes+self.ano+\"PI.pdf\", 'wb')\n\t\ttime.sleep(2)\n\t\tfile7.write(response.read())\n\t\tfile7.close()\n\n\tdef baixa_pr(self):\n\t\tpag_pr = \"https://portal.tjpr.jus.br/e-dj/publico/diario/pesquisar/filtro.do\"\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tdriver.get(pag_pr)\n\t\tif self.data:\n\t\t\tdriver.find_element_by_name(\"dataVeiculacao\").send_keys(self.dia_pesquisar+'/'+self.mes_pesquisar+'/'+self.ano_pesquisar)\n\t\telse:\n\t\t\tdriver.find_element_by_name(\"dataVeiculacao\").send_keys(self.dia+'/'+self.mes+'/'+self.ano)\n\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"searchButton\\\"]\").click()\n\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"diarioPesquisaForm\\\"]/fieldset/table[3]/tbody/tr/td[3]/a\").click()\n\t\ttime.sleep(5)\n\t\tif self.data:\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/TJPR_%s.pdf' % (path,path_diarios,self.dia_pesquisar+self.mes_pesquisar+self.ano_pesquisar), shell=True)\n\t\telse:\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/TJPR_%s.pdf' % (path,path_diarios,self.dia+self.mes+self.ano), shell=True)\n\n\tdef baixa_rj(self):\n\t\tpass\n\n\tdef baixa_rn(self):\n\t\tif self.data:\n\t\t\tano = self.ano_pesquisar\n\t\t\tmes = self.mes_pesquisar\n\t\t\tdia = self.dia_pesquisar \n\t\telse:\n\t\t\tano = self.ano\n\t\t\tmes = self.mes\n\t\t\tdia = self.dia\n\t\ttri = \"1tri\"\n\t\tif int(mes)>=4 and int(mes)<=6:\n\t\t\ttri=\"2tri\"\n\t\telif int(mes)>=7 and int(mes)<=9:\n\t\t\ttri=\"3tri\"\n\t\telif int(mes)>=10 and int(mes)<=12:\n\t\t\ttri=\"4tri\"\n\t\trn_dje = \"https://www.diario.tjrn.jus.br/\"\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tdriver.get(rn_dje)\n\t\tdriver.find_element_by_xpath(\"/html/body/table/tbody/tr/td/table/tbody/tr[4]/td/table[2]/tbody/tr/td/div/a\").click()\n\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"menu:formMenu:_id16\\\"]\").click()\n\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"pesquisarEdicaoCompletaBean:pesquisa_:_id45\\\"]\").click()\n\t\ttime.sleep(3)\n\t\tlink_rn_f = \"https://www.diario.tjrn.jus.br/djonline/pages/repositoriopdfs/\"+ano+\"/\"+tri+\"/\"+ano+mes+dia+\"/\"+ano+mes+dia+\"_JUD.pdf\"\n\t\tdriver.switch_to.window(driver.window_handles[-1])\n\t\tdriver.get(link_rn_f)\n\t\ttime.sleep(5)\n\n\tdef baixa_ro(self):\n\t\tresponse = urllib.request.urlopen('https://portal.tjro.jus.br/diario-api/ultimo-diario.php',timeout=5)\n\t\tfile = open(self.dia+self.mes+self.ano+\"RO.pdf\", 'wb')\n\t\ttime.sleep(1)\n\t\tfile.write(response.read())\n\t\tfile.close()\n\t\t\n\tdef baixa_rr(self):\n\t\tif self.data:\n\t\t\tlink_final_rr = \"http://diario.tjrr.jus.br/dpj/dpj-\"+self.ano_pesquisar+self.mes_pesquisar+self.dia_pesquisar+\".pdf\"\n\t\t\tfile5 = open(self.dia_pesquisar+self.mes_pesquisar+self.ano_pesquisar+\"RR.pdf\", 'wb')\n\t\telse:\n\t\t\tlink_final_rr = \"http://diario.tjrr.jus.br/dpj/dpj-\"+self.ano+self.mes+self.dia+\".pdf\"\n\t\t\tfile5 = open(self.dia+self.mes+self.ano+\"RR.pdf\", 'wb')\n\t\tresponse = urllib.request.urlopen(link_final_rr,timeout=1)\n\t\ttime.sleep(1)\n\t\tfile5.write(response.read())\n\t\tfile5.close()\n\n\tdef baixa_rs(self):\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\trs_dje = \"http://www3.tjrs.jus.br/servicos/diario_justica/dj.php\"\n\t\tpag_rs = self.baixa_pag(rs_dje)\n\t\tpag_rs_bs = BeautifulSoup(pag_rs,'html.parser')\n\t\tn_edicao_rs = pag_rs_bs.find('select', attrs={\"name\":\"publicacao_edicao\"})\n\t\tn_edicao_rs_i = re.findall(r'Ed\\. \\d+',str(n_edicao_rs))\n\t\tn_edicao_rs_f = str(n_edicao_rs_i[0][4:])\n\t\tlista_rs = ['5','6','7','8']\n\t\tfor i in lista_rs:\n\t\t\tdriver.get(\"http://www3.tjrs.jus.br/servicos/diario_justica/download_edicao.php?tp=\"+i+\"&ed=\"+n_edicao_rs_f)\n\t\t\ttime.sleep(8)\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/%s.pdf' % (path,path_diarios,i+'_TJRS_'+str(self.dia)+str(self.mes)+str(self.ano)), shell=True)\n\n\tdef baixa_sc(self):\n\t\tsc_dje = \"http://busca.tjsc.jus.br/dje-consulta/#/main\"\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tdriver.get(sc_dje)\n\t\ttime.sleep(1)\n\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"div_diarios_publicados\\\"]/span[2]/span/div/ul/li/span/a\").click()\n\t\ttime.sleep(5)\n\t\tdriver.switch_to.window(driver.window_handles[-1])\n\t\tresponse = urllib.request.urlopen(driver.current_url,timeout=1)\n\t\tfile = open('TJSC_'+self.dia+self.mes+self.ano+\".pdf\", 'wb')\n\t\tfile.write(response.read())\n\t\tfile.close()\n\t\tsubprocess.Popen('mv %s/*.pdf %s' % (os.getcwd(),path_diarios), shell=True)\n\n\tdef baixa_se(self):\n\t\tdje_se = \"http://www.diario.tjse.jus.br/diario/internet/pesquisar.wsp?tmp.origem=EXTERNA\"\n\t\tpag_se = self.baixa_pag(dje_se)\n\t\tpag_se_bs = BeautifulSoup(pag_se,'html.parser')\n\t\tlink_se_i = pag_se_bs.find_all('a', attrs={\"href\":\"#\"})\n\t\tlink_se_f = \"http://www.diario.tjse.jus.br/diario/diarios/\"+link_se_i[-1].text.split(\"(\")[0]+\".pdf\"\n\t\tresponse = urllib.request.urlopen(link_se_f.replace('\\n',''),timeout=30)\n\t\tfile = open(self.dia+self.mes+self.ano+\"SE.pdf\", 'wb')\n\t\ttime.sleep(5)\n\t\tfile.write(response.read())\n\t\tfile.close()\n\t\tsubprocess.Popen('mv %s/*.pdf %s/TJSE_%s.pdf' % (os.getcwd(),path_diarios,self.dia+self.mes+self.ano), shell=True)\n\n\tdef baixa_sp(self):\n\t\tif self.data:\n\t\t\tfrom crawler_jurisprudencia_tjsp import crawler_jurisprudencia_tjsp\n\t\t\tc = crawler_jurisprudencia_tjsp()\n\t\t\tc.download_diario_retroativo(data_especifica=self.dia_pesquisar+'/'+self.mes_pesquisar+'/'+self.ano_pesquisar)\n\t\telse:\n\t\t\tpag_sp = \"http://www.dje.tjsp.jus.br/cdje/index.do\"\n\t\t\tself.baixaEsaj(2,7,pag_sp)\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/TJSP_%s.pdf' % (path,path_diarios,self.dia+self.mes+self.ano), shell=True)\n\n\tdef baixa_stf(self,ultimo_download=191):\n\t\tif self.data:\n\t\t\t# PROBLEMA! VERIFICAR SE O DOWNLOAD FOI FEITO. SE ELE FOI, ENTÃO ATUALIZAR ÚLTIMO DOWNLOAD\n\t\t\tlink_final_stf = (\"https://www.stf.jus.br/arquivo/djEletronico/DJE_%s_%s.pdf\" % (self.ano_pesquisar+self.mes_pesquisar+self.dia_pesquisar,ultimo_download))\n\t\t\tresponse = urllib.request.urlopen(link_final_stf,timeout=15)\n\t\t\tfile = open(self.dia_pesquisar+self.mes_pesquisar+self.ano_pesquisar+\"STF.pdf\", 'wb')\n\t\t\ttime.sleep(1)\n\t\t\tfile.write(response.read())\n\t\t\tfile.close()\n\t\telse:\n\t\t\tstf_dje = \"http://www.stf.jus.br/portal/diarioJustica/verDiarioAtual.asp\"\n\t\t\tpag_stf = self.baixa_pag(stf_dje)\n\t\t\tpag_stf_bs = BeautifulSoup(pag_stf, 'html.parser')\n\t\t\tpag_stf_bs_f = pag_stf_bs.find('th',text=re.compile(r\"DJ Nr. \\d+\"))\n\t\t\tn_diario_stf_i = str(pag_stf_bs_f.text)\n\t\t\tn_diario_stf_f = re.findall(r\"\\d+\",n_diario_stf_i)\n\t\t\tlink_final_stf = (\"https://www.stf.jus.br/arquivo/djEletronico/DJE_%s_%s.pdf\" % (self.ano+self.mes+self.dia,n_diario_stf_f[0]))\n\t\t\tresponse = urllib.request.urlopen(link_final_stf,timeout=1)\n\t\t\tfile = open(self.dia+self.mes+self.ano+\"STF.pdf\", 'wb')\n\t\t\ttime.sleep(1)\n\t\t\tfile.write(response.read())\n\t\t\tfile.close()\n\n\tdef baixa_stj(self):\n\t\tif self.data:\n\t\t\tlink_stj_f = \"https://ww2.stj.jus.br/docs_internet/processo/dje/zip/stj_dje_%s.zip\" % (self.data,)\n\t\t\tfile = open(self.dia_pesquisar+self.mes_pesquisar+self.ano_pesquisar+\"STJ.zip\", 'wb')\n\t\telse:\n\t\t\tlink_stj_f = \"https://ww2.stj.jus.br/docs_internet/processo/dje/zip/stj_dje_%s.zip\" % (self.ano+self.mes+self.dia,)\n\t\t\tfile = open(self.dia+self.mes+self.ano+\"STJ.zip\", 'wb')\n\t\tresponse = urllib.request.urlopen(link_stj_f,timeout=300)\n\t\ttime.sleep(2)\n\t\tfile.write(response.read())\n\t\tfile.close()\n\n\tdef baixa_to(self):\n\t\tlink_diario_TO_f = []\n\t\tto_dje = \"https://wwa.tjto.jus.br/diario/pesquisa\"\n\t\tpag_to = self.baixa_pag(to_dje)\n\t\tpag_to_bs = BeautifulSoup(pag_to,'html.parser')\n\t\tlink_to_i = pag_to_bs.find('a',href=re.compile('http://wwa\\.tjto\\.jus\\.br/diario/diariopublicado/.+'))\n\t\tlink_to_f = str(link_to_i['href'])\n\t\tlink_diario_TO_f.append(link_to_f)\n\t\tcont_to = 0\n\t\tfor lk in link_diario_TO_f: \n\t\t\tresponse = urllib.request.urlopen(lk,timeout=5)\n\t\t\tfile1 = open(self.dia+self.mes+self.ano+str(cont_to)+\"TO.pdf\", 'wb')\n\t\t\ttime.sleep(1)\n\t\t\tfile1.write(response.read())\n\t\t\tfile1.close()\n\t\t\tcont_to += 1\n\n\tdef baixa_trf1(self):\n\t\tpag_trf1 = \"https://edj.trf1.jus.br/edj/\"\n\t\tpag_trf1_ini = self.baixa_pag(pag_trf1)\n\t\tpag_trf1_ini_bs = BeautifulSoup(pag_trf1_ini,'html.parser')\n\t\tlinks_trf1_i = pag_trf1_ini_bs.find_all('a',href=re.compile(r\"/edj/handle/123/.+\"))\n\t\tlinks_trf1_i2 = []\n\t\tlinks_trf1_f = []\n\t\tfor ltrf1 in links_trf1_i[4:]:\n\t\t\tlinks_trf1_i2.append(\"https://edj.trf1.jus.br\"+str(ltrf1['href']))\n\t\tfor ltrf1_2 in links_trf1_i2:\n\t\t\taux_link_trf1 = self.baixa_pag(ltrf1_2)\n\t\t\taux_link_trf1_bs = BeautifulSoup(aux_link_trf1,'html.parser')\n\t\t\tlinks_trf1_aux = aux_link_trf1_bs.find('a',href=re.compile(r'/edj/bitstream/handle/123.+'))\n\t\t\tlinks_trf1_f.append(\"https://edj.trf1.jus.br\"+str(links_trf1_aux['href']))\n\t\t\tcont = 0\n\t\tfor lk in links_trf1_f: \n\t\t\tresponse = urllib.request.urlopen(lk,timeout=5)\n\t\t\tfile = open(str(cont)+self.dia+self.mes+self.ano+\"TRF1.pdf\", 'wb')\n\t\t\ttime.sleep(5)\n\t\t\tfile.write(response.read())\n\t\t\tfile.close()\n\t\t\tcont += 1\n\n\tdef baixa_trf2(self):\n\t\ttrf2_dje = \"http://dje.trf2.jus.br/DJE/Paginas/Externas/inicial.aspx\"\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tdriver.get(trf2_dje)\n\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"ctl00_ContentPlaceHolder_ctrInicial_ctrCadernosPorAreaJudicial_lkbCadJudTRF\\\"]\").click()\n\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"ctl00_ContentPlaceHolder_ctrInicial_ctrCadernosPorAreaJudicial_lkbCadJudSJRJ\\\"]\").click()\n\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"ctl00_ContentPlaceHolder_ctrInicial_ctrCadernosPorAreaJudicial_lkbCadJudSJES\\\"]\").click()\n\t\ttime.sleep(15)\n\t\tsubprocess.Popen('mv %s/*.pdf %s' % (path,path_diarios), shell=True)\n\t\t\n\tdef baixa_trf3(self):\n\t\ttrf3_dje = \"http://web.trf3.jus.br/diario/Consulta\"\n\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\tdriver.get(trf3_dje)\n\t\tfor i in range(2,3):\n\t\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"botao-ultima\\\"]/a\").click()\n\t\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"ultimaEdicao\\\"]/li[\"+str(i)+\"]/a\").click()\n\t\t\ttime.sleep(17)\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/TRF3_%s.pdf' % (path,path_diarios,str(i)+self.ano+self.mes+self.dia), shell=True)\n\t\tfor i in range(3,10):\n\t\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"botao-ultima\\\"]/a\").click()\n\t\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"ultimaEdicao\\\"]/li[\"+str(i)+\"]/a\").click()\n\t\t\ttime.sleep(10)\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/TRF3_%s.pdf' % (path,path_diarios,str(i)+self.ano+self.mes+self.dia), shell=True)\n\t\tdriver.close()\n\n\tdef baixa_trf4(self):\n\t\tif self.data:\n\t\t\tano = self.ano_pesquisar\n\t\t\tmes = self.mes_pesquisar\n\t\t\tdia = self.dia_pesquisar\n\t\telse:\n\t\t\tano = self.ano\n\t\t\tmes = self.mes\n\t\t\tdia = self.dia\n\t\ttry:\n\t\t\tlink_inicial = 'https://www2.trf4.jus.br/trf4/diario/download.php?arquivo=%2Fvar%2Fwww%2Fhtml%2Fdiario%2Fdocsa%2Fde_jud_{}1645{}_{}_a.pdf'\n\t\t\tmarcador = {'2018' : '01', '2017' : '01', '2016' : '02', '2015' : '02', '2014' : '02', '2013' : '01', '2012' : '06', '2011' : '01'}\n\t\t\tresponse = urllib.request.urlopen(link_inicial.format(ano+mes+dia, marcador[ano],ano+'_'+mes+'_'+dia),timeout=5)\n\t\t\tfile = open(dia+mes+ano+'.pdf', 'wb')\n\t\t\tfile.write(response.read())\n\t\t\tfile.close()\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/TRF4_%s' % (os.getcwd(),path_diarios,dia+mes+ano), shell=True)\n\t\texcept Exception as e:\n\t\t\tprint(e)\n\n\tdef baixa_trf5(self):\n\t\tfrom selenium.webdriver.support.ui import Select\n\t\torgaos_trf5 = ['TRIBUNAL REGIONAL FEDERAL DA 5ª REGIÃO','Seção Judiciária de Alagoas','Seção Judiciária do Ceará','Seção Judiciária da Paraíba','Seção Judiciária de Pernambuco',\\\n\t\t'Seção Judiciária do Rio Grande do Norte','Seção Judiciária do sergipe']\n\t\tdef trf5_baixa_diarios(orgao): \n\t\t\tdriver = webdriver.Chrome(self.chromedriver)\n\t\t\ttrf5_dje = \"https://www4.trf5.jus.br/diarioeletinternet/\"\n\t\t\tdriver.get(trf5_dje)\n\t\t\torg_trf5 = Select(driver.find_element_by_id(\"frmVisao:orgao\"))\n\t\t\tedicao_trf5_opt = Select(driver.find_element_by_id(\"frmVisao:edicao\"))\n\t\t\tano_trf5_opt = Select(driver.find_element_by_id(\"frmVisao:periodo\"))\n\t\t\torg_trf5.select_by_visible_text(orgao)\n\t\t\tedicao_trf5_opt.select_by_visible_text('Judicial')\n\t\t\tano_trf5_opt.select_by_visible_text(self.ano)\n\t\t\ttime.sleep(1)\n\t\t\tdriver.find_element_by_xpath(\"//*[@id=\\\"frmVisao:j_id48\\\"]\").click()\n\t\t\ttime.sleep(1)\n\t\t\tdriver.execute_script(\"return oamSubmitForm('frmPesquisa','frmPesquisa:tDiarios:0:j_id67','_blank',[]);\")\n\t\t\ttime.sleep(3)\n\t\t\tdriver.close()\n\t\tfor o in orgaos_trf5:\n\t\t\ttry:\n\t\t\t\ttrf5_baixa_diarios(o)\n\t\t\t\tsubprocess.Popen('mv %s/Diário.pdf \"%s/%s.pdf\"' % (path,path_diarios,o.replace(' ','')+str(self.dia)+str(self.mes)+str(self.ano)), shell=True)\n\t\t\texcept Exception as e:\n\t\t\t\tprint(e)\n\n\tdef baixa_trt(self):\n\t\tdje_trt = 'https://aplicacao.jt.jus.br/dejt.html'\n\t\tlinks_trt = [\n\t\t'https://aplicacao.jt.jus.br/Diario_J_TST.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_01.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_02.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_03.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_04.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_05.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_06.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_07.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_08.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_09.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_10.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_11.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_12.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_13.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_14.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_15.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_16.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_17.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_18.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_19.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_20.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_21.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_22.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_23.pdf',\n\t\t'https://aplicacao.jt.jus.br/Diario_J_24.pdf'\n\t\t]\n\t\tresponse = urllib.request.urlopen(links_trt[0],timeout=1)\n\t\tfile = open('Diario_TST_%s.pdf' % (self.dia+self.mes+self.ano,), 'wb')\n\t\tfile.write(response.read())\n\t\tfile.close()\n\t\tsubprocess.Popen('mv %s/*.pdf %s/%s' % (os.getcwd(),path_diarios,'Diario_TST_%s.pdf' % (self.dia+self.mes+self.ano,)), shell=True)\n\t\tfor l in range(1,len(links_trt)):\n\t\t\tresponse = urllib.request.urlopen(links_trt[l],timeout=1)\n\t\t\tfilename = 'TRT_%s.pdf' % (str(l)+'_'+self.dia+self.mes+self.ano,)\n\t\t\tfile = open(filename, 'wb')\n\t\t\tfile.write(response.read())\n\t\t\tfile.close()\n\t\t\tsubprocess.Popen('mv %s/*.pdf %s/%s' % (os.getcwd(),path_diarios,filename), shell=True)\n\n\nif __name__ == '__main__':\n\t# publicacoes = publicacoes_diarios_oficiais(data='20190902')\n\t\n\t# publicacoes = publicacoes_diarios_oficiais()\n\t\n\t# publicacoes.baixa_ac(todos=True)\n\t\n\t# for d in publicacoes.diarios_a_baixar:\n\t# \td()\n\n\t# pub = publicacoes_diarios_oficiais()\n\t# pub.baixa_go()\n\n\tchromedriver = os.getcwd()+\"/chromedriver\"\n\tdatas_p = []\n\tfor i in range(1,32):\n\t\tdia = str(i)\n\t\tif len(dia) == 1:\n\t\t\tdia = '0'+str(i)\n\t\tfor j in range(1,13):\n\t\t\tmes = str(j)\n\t\t\tif len(mes) == 1:\n\t\t\t\tmes = '0'+str(j)\n\t\t\tfor k in range(2018,2020):\n\t\t\t\tdatas_p.append(dia+'/'+mes+'/'+str(k))\n\tfor data_p in datas_p[3:]:\n\t\tdriver = webdriver.Chrome(chromedriver)\n\t\tdriver.get('https://esaj.tjms.jus.br/cdje/index.do')\n\t\tfor i in range(2,4):\n\t\t\tdriver.execute_script(\"popup('/cdje/downloadCaderno.do?dtDiario=%s'+'&cdCaderno=%s&tpDownload=D','cadernoDownload');\" % (data_p, str(i)))\n\t\t\ttime.sleep(8)\n\t\t\tnome_pasta = data_p.replace('/','')\n\t\t\tsubprocess.Popen('mkdir \"%s/Diarios_ms/%s\"' % (path_diarios,nome_pasta), shell=True) \n\t\t\tsubprocess.Popen('mv %s/*.pdf \"%s/Diarios_ms/%s/\"' % (path,path_diarios,nome_pasta), shell=True)\n\t\tdriver.quit()","sub_path":"crawlers/diarios_download.py","file_name":"diarios_download.py","file_ext":"py","file_size_in_byte":27483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"3991605","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.5 (3350)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/edill/mc/lib/python3.5/site-packages/skxray/core/calibration.py\n# Compiled at: 2016-03-04 05:19:32\n# Size of source mod 2**32: 9030 bytes\n\"\"\"\nThis is the module for calibration functions and data\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\nfrom collections import deque\nimport numpy as np, scipy.signal\nfrom .constants import calibration_standards\nfrom .feature import filter_peak_height, peak_refinement, refine_log_quadratic\nfrom .utils import angle_grid, radial_grid, pairwise, bin_edges_to_centers, bin_1D\n\ndef estimate_d_blind(name, wavelength, bin_centers, ring_average, window_size, max_peak_count, thresh):\n r\"\"\"\n Estimate the sample-detector distance\n\n Given a radially integrated calibration image return an estimate for\n the sample-detector distance. This function does not require a\n rough estimate of what d should be.\n\n For the peaks found the detector-sample distance is estimated via\n .. math ::\n\n D = \\frac{r}{\\tan 2\\theta}\n\n where :math:`r` is the distance in mm from the calibrated center\n to the ring on the detector and :math:`D` is the distance from\n the sample to the detector.\n\n Parameters\n ----------\n name : str\n The name of the calibration standard. Used to look up the\n expected peak location\n For valid options, see the name attribute on this function\n\n wavelength : float\n The wavelength of scattered x-ray in nm\n\n bin_centers : array\n The distance from the calibrated center to the center of\n the ring's annulus in mm\n\n ring_average : array\n The average intensity in the given ring of a azimuthally integrated\n powder pattern. In counts [arb]\n\n window_size : int\n The number of elements on either side of a local maximum to\n use for locating and refining peaks. Candidates are identified\n as a relative maximum in a window sized (2*window_size + 1) and\n the same window is used for fitting the peaks to refine the location.\n\n max_peak_count : int\n Use at most this many peaks\n\n thresh : float\n Fraction of maximum peak height\n\n Returns\n -------\n dist_sample : float\n The detector-sample distance in mm. This is the mean of the estimate\n from all of the peaks used.\n\n std_dist_sample : float\n The standard deviation of d computed from the peaks used.\n \"\"\"\n cal = calibration_standards[name]\n cands = scipy.signal.argrelmax(ring_average, order=window_size)[0]\n cands = filter_peak_height(ring_average, cands, thresh * np.max(ring_average), window=window_size)\n peaks_x, peaks_y = peak_refinement(bin_centers, ring_average, cands, window_size, refine_log_quadratic)\n tan2theta = np.tan(cal.convert_2theta(wavelength))\n slc = slice(0, np.min([len(tan2theta), len(peaks_x), max_peak_count]))\n d_array = peaks_x[slc] / tan2theta[slc]\n return (np.mean(d_array), np.std(d_array))\n\n\nestimate_d_blind.name = list(calibration_standards)\n\ndef refine_center(image, calibrated_center, pixel_size, phi_steps, max_peaks, thresh, window_size, nx=None, min_x=None, max_x=None):\n \"\"\"\n Refines the location of the center of the beam.\n\n This relies on being able to see the whole powder pattern.\n\n Parameters\n ----------\n image : ndarray\n The image\n\n calibrated_center : tuple\n (row, column) the estimated center\n\n pixel_size : tuple\n (pixel_height, pixel_width)\n\n phi_steps : int\n How many regions to split the ring into, should be >10\n\n max_peaks : int\n Number of rings to look it\n\n thresh : float\n Fraction of maximum peak height\n\n window_size : int, optional\n The window size to use (in bins) to use when refining peaks\n\n nx : int, optional\n Number of bins to use for radial binning\n\n min_x : float, optional\n The minimum radius to use for radial binning\n\n max_x : float, optional\n The maximum radius to use for radial binning\n\n Returns\n -------\n calibrated_center : tuple\n The refined calibrated center.\n \"\"\"\n if nx is None:\n nx = int(np.mean(image.shape) * 2)\n phi = angle_grid(calibrated_center, image.shape, pixel_size).ravel()\n r = radial_grid(calibrated_center, image.shape, pixel_size).ravel()\n I = image.ravel()\n phi_steps = np.linspace(-np.pi, np.pi, phi_steps, endpoint=True)\n out = deque()\n for phi_start, phi_end in pairwise(phi_steps):\n mask = (phi <= phi_end) * (phi > phi_start)\n out.append(bin_1D(r[mask], I[mask], nx=nx, min_x=min_x, max_x=max_x))\n\n out = list(out)\n ring_trace = []\n for bins, b_sum, b_count in out:\n mask = b_sum > 10\n avg = b_sum[mask] / b_count[mask]\n bin_centers = bin_edges_to_centers(bins)[mask]\n cands = scipy.signal.argrelmax(avg, order=window_size)[0]\n cands = filter_peak_height(avg, cands, thresh * np.max(avg), window=window_size)\n ring_trace.append(bin_centers[cands[:max_peaks]])\n\n tr_len = [len(rt) for rt in ring_trace]\n mm = np.min(tr_len)\n ring_trace = np.vstack([rt[:mm] for rt in ring_trace]).T\n mean_dr = np.mean(ring_trace - np.mean(ring_trace, axis=1, keepdims=True), axis=0)\n phi_centers = bin_edges_to_centers(phi_steps)\n delta = np.mean(np.diff(phi_centers))\n col_shift = np.sum(np.sin(phi_centers) * mean_dr) * delta / (np.pi * pixel_size[1])\n row_shift = np.sum(np.cos(phi_centers) * mean_dr) * delta / (np.pi * pixel_size[0])\n return tuple(np.array(calibrated_center) + np.array([row_shift, col_shift]))","sub_path":"pycfiles/scikit-xray-0.0.5.post0.linux-x86_64.tar/calibration.cpython-35.py","file_name":"calibration.cpython-35.py","file_ext":"py","file_size_in_byte":5750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"293572457","text":"# Chapter 17: Manipulating Images\r\n\r\n# Pillow is a third-party Python module for interacting with image files.\r\n# The module has several functions that make it easy to crop, resize, and edit the content of an image.\r\n\r\n# Colors and RGBA Values\r\n# Computer programs often represent a color in an image as an RGBA value.\r\n# An RGBA value is a group of numbers that specify the amount of red, green, blue, and alpha (or transparency) in a color.\r\n\r\n# In Pillow, RGBA values are represented by a tuple of four integer values. For example, the color red is represented by (255, 0, 0, 255).\r\n# If a color has an alpha value of 0, it is invisible, and it doesn’t really matter what the RGB values are. After all, invisible red looks the same as invisible black.\r\n\r\n# Pillow offers the ImageColor.getcolor() function so you don’t have to memorize RGBA values for the colors you want to use.\r\n\r\nfrom PIL import ImageColor\r\nImageColor.getcolor('red', 'RGBA')\r\n# Output is (255, 0, 0, 255)\r\nImageColor.getcolor('RED', 'RGBA')\r\n# Output is (255, 0, 0, 255)\r\nImageColor.getcolor('Black', 'RGBA')\r\n# Output is (0, 0, 0, 255)\r\nImageColor.getcolor('chocolate', 'RGBA')\r\n# Output is (210, 105, 30, 255)\r\nImageColor.getcolor('CornflowerBlue', 'RGBA')\r\n# Output is (100, 149, 237, 255)\r\n\r\n# Coordinates and Box Tuples\r\n# Image pixels are addressed with x- and y-coordinates, which respectively specify a pixel’s horizontal and vertical location in an image.\r\n# The origin is the pixel at the top-left corner of the image and is specified with the notation (0, 0).\r\n\r\n# The first zero represents the x-coordinate, which starts at zero at the origin and increases going from left to right. The second zero represents the y-coordinate, which starts at zero at the origin and increases going down the image.\r\n\r\n# Manipulating Images with Pillow\r\n# Figure 17-3 is the image that will be used for all the interactive shell examples in this chapter. You can download it from http://nostarch.com/automatestuff/.\r\n\r\nfrom PIL import Image\r\ncatIm = Image.open('zophie.png')\r\n\r\n# Working with the Image Data Type\r\n# An Image object has several useful attributes that give you basic information about the image file it was loaded from: its width and height, the filename, and the graphics format (such as JPEG, GIF, or PNG).\r\n\r\nfrom PIL import Image\r\ncatIm = Image.open('zophie.png')\r\nprint (catIm.size)\r\n# Output is (816, 1088)\r\nwidth, height = catIm.size\r\nprint (width)\r\n# Output is 816\r\nprint (height)\r\n# Output is 1088\r\nprint (catIm.filename)\r\n# Output is 'zophie.png'\r\nprint (catIm.format)\r\n# Output is 'PNG'\r\nprint (catIm.format_description)\r\n# Output is 'Portable network graphics'\r\ncatIm.save('zophie.jpg')\r\n\r\n# Pillow also provides the Image.new() function, which returns an Image object—much like Image.open(), except the image represented by Image.new()’s object will be blank.\r\n\r\nfrom PIL import Image\r\nim = Image.new('RGBA', (100, 200), 'purple')\r\nim.save('purpleImage.png')\r\nim2 = Image.new('RGBA', (20, 20))\r\nim2.save('transparentImage.png')\r\n# Here we create an Image object for an image that’s 100 pixels wide and 200 pixels tall, with a purple background.\r\n\r\ncroppedIm = catIm.crop((335, 345, 565, 560))\r\ncroppedIm.save('cropped.png')\r\n# Cropping Images\r\n# Cropping an image means selecting a rectangular region inside an image and removing everything outside the rectangle.\r\n# The crop() method on Image objects takes a box tuple and returns an Image object representing the cropped image.\r\n# The cropping does not happen in place—that is, the original Image object is left untouched, and the crop() method returns a new Image object.\r\n\r\n# Copying and Pasting Images onto Other Images\r\n# The copy() method will return a new Image object with the same image as the Image object it was called on.\r\n# This is useful if you need to make changes to an image but also want to keep an untouched version of the original.\r\n\r\ncatIm = Image.open('zophie.png')\r\ncatCopyIm = catIm.copy()\r\n\r\n# The paste() method is called on an Image object and pastes another image on top of it.\r\n\r\nfaceIm = catIm.crop((335, 345, 565, 560))\r\nprint (faceIm.size)\r\n# Output is (230, 215)\r\ncatCopyIm.paste(faceIm, (0, 0))\r\ncatCopyIm.paste(faceIm, (400, 500))\r\ncatCopyIm.save('pasted.png')\r\n\r\n# Resizing an Image\r\n# The resize() method is called on an Image object and returns a new Image object of the specified width and height\r\n# It accepts a two-integer tuple argument, representing the new width and height of the returned image.\r\n\r\nwidth, height = catIm.size\r\nquartersizedIm = catIm.resize((int(width / 2), int(height / 2)))\r\nquartersizedIm.save('quartersized.png')\r\nsvelteIm = catIm.resize((width, height + 300))\r\nsvelteIm.save('svelte.png')\r\n\r\n# Rotating and Flipping Images\r\n# Images can be rotated with the rotate() method, which returns a new Image object of the rotated image and leaves the original Image object unchanged.\r\n# The argument to rotate() is a single integer or float representing the number of degrees to rotate the image counterclockwise.\r\n\r\ncatIm.rotate(90).save('rotated90.png')\r\ncatIm.rotate(180).save('rotated180.png')\r\ncatIm.rotate(270).save('rotated270.png')\r\n\r\n# You can also get a “mirror flip” of an image with the transpose() method. You must pass either Image.FLIP_LEFT_RIGHT or Image.FLIP_TOP_BOTTOM to the transpose() method.\r\n\r\ncatIm.transpose(Image.FLIP_LEFT_RIGHT).save('horizontal_flip.png')\r\ncatIm.transpose(Image.FLIP_TOP_BOTTOM).save('vertical_flip.png')\r\n\r\n# Changing Individual Pixels\r\n# The color of an individual pixel can be retrieved or set with the getpixel() and putpixel() methods.\r\n# These methods both take a tuple representing the x- and y-coordinates of the pixel.\r\n# The putpixel() method also takes an additional tuple argument for the color of the pixel. This color argument is a four-integer RGBA tuple or a three-integer RGB tuple.\r\n\r\nim = Image.new('RGBA', (100, 100))\r\nim.getpixel((0, 0))\r\n(0, 0, 0, 0)\r\nfor x in range(100):\r\n for y in range(50):\r\n im.putpixel((x, y), (210, 210, 210))\r\n\r\nfrom PIL import ImageColor\r\nfor x in range(100):\r\n for y in range(50, 100):\r\n im.putpixel((x, y), ImageColor.getcolor('darkgray', 'RGBA'))\r\nprint (im.getpixel((0, 0)))\r\n# Output is (210, 210, 210, 255)\r\nprint (im.getpixel((0, 50)))\r\n# Output is (169, 169, 169, 255)\r\nim.save('putPixel.png')\r\n\r\n# Drawing on Images\r\n# If you need to draw lines, rectangles, circles, or other simple shapes on an image, use Pillow’s ImageDraw module.\r\n\r\nfrom PIL import Image, ImageDraw\r\nim = Image.new('RGBA', (200, 200), 'white')\r\ndraw = ImageDraw.Draw(im)\r\n\r\n# Drawing Shapes\r\n# The following ImageDraw methods draw various kinds of shapes on the image. The fill and outline parameters for these methods are optional and will default to white if left unspecified.\r\n\r\n# Points\r\n# The point(xy, fill) method draws individual pixels. The xy argument represents a list of the points you want to draw.\r\n# The fill argument is the color of the points and is either an RGBA tuple or a string of a color name, such as 'red'. The fill argument is optional.\r\n\r\n# Lines\r\n# The line(xy, fill, width) method draws a line or series of lines. xy is either a list of tuples, such as [(x, y), (x, y), ...], or a list of integers, such as [x1, y1, x2, y2, ...].\r\n# Each point is one of the connecting points on the lines you’re drawing.\r\n# The optional fill argument is the color of the lines, as an RGBA tuple or color name.\r\n# The optional width argument is the width of the lines and defaults to 1 if left unspecified.\r\n\r\n# Rectangles\r\n# The rectangle(xy, fill, outline) method draws a rectangle. The xy argument is a box tuple of the form (left, top, right, bottom).\r\n# The left and top values specify the x- and y-coordinates of the upper-left corner of the rectangle, while right and bottom specify the lower-right corner.\r\n# The optional fill argument is the color that will fill the inside of the rectangle.\r\n# The optional outline argument is the color of the rectangle’s outline.\r\n\r\n# Ellipses\r\n# The ellipse(xy, fill, outline) method draws an ellipse. If the width and height of the ellipse are identical, this method will draw a circle.\r\n# The xy argument is a box tuple (left, top, right, bottom) that represents a box that precisely contains the ellipse.\r\n\r\n# Polygons\r\n# The polygon(xy, fill, outline) method draws an arbitrary polygon.\r\n# The xy argument is a list of tuples, such as [(x, y), (x, y), ...], or integers, such as [x1, y1, x2, y2, ...], representing the connecting points of the polygon’s sides.\r\n# The last pair of coordinates will be automatically connected to the first pair.\r\n# The optional fill argument is the color of the inside of the polygon, and the optional outline argument is the color of the polygon’s outline.\r\n\r\n# Example:\r\nfrom PIL import Image, ImageDraw\r\nim = Image.new('RGBA', (200, 200), 'white')\r\ndraw = ImageDraw.Draw(im)\r\ndraw.line([(0, 0), (199, 0), (199, 199), (0, 199), (0, 0)], fill='black')\r\ndraw.rectangle((20, 30, 60, 60), fill='blue')\r\ndraw.ellipse((120, 30, 160, 60), fill='red')\r\ndraw.polygon(((57, 87), (79, 62), (94, 85), (120, 90), (103, 113)), fill='brown')\r\nfor i in range(100, 200, 10):\r\n draw.line([(i, 0), (200, i - 100)], fill='green')\r\nim.save('drawing.png')\r\n\r\n# Drawing Text\r\n# The ImageDraw object also has a text() method for drawing text onto an image. The text() method takes four arguments: xy, text, fill, and font.\r\n\r\n# The xy argument is a two-integer tuple specifying the upper-left corner of the text box.\r\n# The text argument is the string of text you want to write.\r\n# The optional fill argument is the color of the text.\r\n# The optional font argument is an ImageFont object, used to set the type-face and size of the text. This is described in more detail in the next section.\r\n\r\n# Now that you’ve imported Pillow’s ImageFont module, you can call the ImageFont.truetype() function, which takes two arguments. The first argument is a string for the font’s TrueType file—this is the actual font file that lives on your hard drive.\r\n# The second argument to ImageFont.truetype() is an integer for the font size in points (rather than, say, pixels).\r\n\r\nfrom PIL import Image, ImageDraw, ImageFont\r\nimport os\r\nim = Image.new('RGBA', (200, 200), 'white')\r\ndraw = ImageDraw.Draw(im)\r\ndraw.text((20, 150), 'Hello', fill='purple')\r\nfontsFolder = 'FONT_FOLDER' # e.g. 'Library/Fonts'\r\narialFont = ImageFont.truetype(os.path.join(fontsFolder, 'arial.ttf'), 32)\r\ndraw.text((100, 150), 'Howdy', fill='gray', font=arialFont)\r\nim.save('text.png')\r\n\r\n# Practice Questions\r\n# 1. What is an RGBA value?\r\n\r\n # An RGBA value is a tuple of 4 integers, each ranging from 0 to 255. The four integers correspond to the amount of red, green, blue, and alpha (transparency) in the color.\r\n\r\n# 2. How can you get the RGBA value of 'CornflowerBlue' from the Pillow module?\r\n\r\n # A function call to ImageColor.getcolor('CornflowerBlue', 'RGBA') will return (100, 149, 237, 255), the RGBA value for that color.\r\n\r\n# 3. What is a box tuple?\r\n\r\n # A box tuple is a tuple value of four integers: the left edge x-coordinate, the top edge y-coordinate, the width, and the height, respectively.\r\n\r\n# 4. What function returns an Image object for, say, an image file named zophie.png?\r\n\r\n # Image.open('zophie.png')\r\n\r\n# 5. How can you find out the width and height of an Image object’s image?\r\n\r\n # imageObj.size is a tuple of two integers, the width and the height.\r\n\r\n# 6. What method would you call to get Image object for a 100×100 image, excluding the lower left quarter of it?\r\n\r\n # imageObj.crop((0, 50, 50, 50)). Notice that you are passing a box tuple to crop(), not four separate integer arguments.\r\n\r\n# 7. After making changes to an Image object, how could you save it as an image file?\r\n\r\n # Call the imageObj.save('new_filename.png') method of the Image object.\r\n\r\n# 8. What module contains Pillow’s shape-drawing code?\r\n\r\n # The ImageDraw module contains code to draw on images.\r\n\r\n# 9. Image objects do not have drawing methods. What kind of object does? How do you get this kind of object?\r\n\r\n # ImageDraw objects have shape-drawing methods such as point(), line(), or rectangle(). They are returned by passing the Image object to the ImageDraw.Draw() function.\r\n","sub_path":"Manipulating Images.py","file_name":"Manipulating Images.py","file_ext":"py","file_size_in_byte":12298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"351227141","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for\n# license information.\n# --------------------------------------------------------------------------\n\nimport logging\nimport json\nfrom azure.iot.device.common.transport import pipeline_ops_base\nfrom azure.iot.device.common.transport.mqtt import pipeline_ops_mqtt\nfrom azure.iot.device.common.transport.mqtt import pipeline_events_mqtt\nfrom azure.iot.device.common.transport.pipeline_stages_base import PipelineStage\nfrom azure.iot.device.iothub.models import Message, MethodRequest\nfrom azure.iot.device.iothub.transport import constant\nfrom azure.iot.device.iothub.transport import pipeline_ops_iothub\nfrom azure.iot.device.iothub.transport import pipeline_events_iothub\nfrom . import mqtt_topic\n\nlogger = logging.getLogger(__name__)\n\n\nclass IotHubMQTTConverter(PipelineStage):\n \"\"\"\n PipelineStage which converts other Iot and IotHub operations into Mqtt operations. This stage also\n converts mqtt pipeline events into Iot and IotHub pipeline events.\n \"\"\"\n\n def __init__(self):\n super(IotHubMQTTConverter, self).__init__()\n self.feature_to_topic = {}\n\n def _run_op(self, op):\n\n if isinstance(op, pipeline_ops_iothub.SetAuthProviderArgs):\n # if we get auth provider args from above, we save some, use some to build topic names,\n # and always pass it down because we know that the MQTT Provider stage will also want\n # to receive these args.\n self._set_topic_names(device_id=op.device_id, module_id=op.module_id)\n\n if op.module_id:\n client_id = \"{}/{}\".format(op.device_id, op.module_id)\n else:\n client_id = op.device_id\n\n username = \"{hostname}/{client_id}/?api-version=2018-06-30\".format(\n hostname=op.hostname, client_id=client_id\n )\n\n if op.gateway_hostname:\n hostname = op.gateway_hostname\n else:\n hostname = op.hostname\n\n self.continue_with_different_op(\n original_op=op,\n new_op=pipeline_ops_mqtt.SetConnectionArgs(\n client_id=client_id, hostname=hostname, username=username, ca_cert=op.ca_cert\n ),\n )\n\n elif isinstance(op, pipeline_ops_iothub.SendTelemetry) or isinstance(\n op, pipeline_ops_iothub.SendOutputEvent\n ):\n # Convert SendTelementry and SendOutputEvent operations into Mqtt Publish operations\n topic = mqtt_topic.encode_properties(op.message, self.telemetry_topic)\n self.continue_with_different_op(\n original_op=op,\n new_op=pipeline_ops_mqtt.Publish(topic=topic, payload=op.message.data),\n )\n\n elif isinstance(op, pipeline_ops_iothub.SendMethodResponse):\n # Sending a Method Response gets translated into an MQTT Publish operation\n topic = mqtt_topic.get_method_topic_for_publish(\n op.method_response.request_id, str(op.method_response.status)\n )\n payload = json.dumps(op.method_response.payload)\n self.continue_with_different_op(\n original_op=op, new_op=pipeline_ops_mqtt.Publish(topic=topic, payload=payload)\n )\n\n elif isinstance(op, pipeline_ops_base.EnableFeature):\n # Enabling a feature gets translated into an Mqtt subscribe operation\n topic = self.feature_to_topic[op.feature_name]\n self.continue_with_different_op(\n original_op=op, new_op=pipeline_ops_mqtt.Subscribe(topic=topic)\n )\n\n elif isinstance(op, pipeline_ops_base.DisableFeature):\n # Disabling a feature gets turned into an Mqtt unsubscribe operation\n topic = self.feature_to_topic[op.feature_name]\n self.continue_with_different_op(\n original_op=op, new_op=pipeline_ops_mqtt.Unsubscribe(topic=topic)\n )\n\n else:\n # All other operations get passed down\n self.continue_op(op)\n\n def _set_topic_names(self, device_id, module_id):\n \"\"\"\n Build topic names based on the device_id and module_id passed.\n \"\"\"\n self.telemetry_topic = mqtt_topic.get_telemetry_topic_for_publish(device_id, module_id)\n self.feature_to_topic = {\n constant.C2D_MSG: (mqtt_topic.get_c2d_topic_for_subscribe(device_id, module_id)),\n constant.INPUT_MSG: (mqtt_topic.get_input_topic_for_subscribe(device_id, module_id)),\n constant.METHODS: (mqtt_topic.get_method_topic_for_subscribe()),\n }\n\n def _handle_pipeline_event(self, event):\n \"\"\"\n Pipeline Event handler function to convert incoming Mqtt messages into the appropriate IotHub\n events, based on the topic of the message\n \"\"\"\n if isinstance(event, pipeline_events_mqtt.IncomingMessage):\n topic = event.topic\n\n if mqtt_topic.is_c2d_topic(topic):\n message = Message(event.payload)\n mqtt_topic.extract_properties_from_topic(topic, message)\n self.handle_pipeline_event(pipeline_events_iothub.C2DMessageEvent(message))\n\n elif mqtt_topic.is_input_topic(topic):\n message = Message(event.payload)\n mqtt_topic.extract_properties_from_topic(topic, message)\n input_name = mqtt_topic.get_input_name_from_topic(topic)\n self.handle_pipeline_event(\n pipeline_events_iothub.InputMessageEvent(input_name, message)\n )\n\n elif mqtt_topic.is_method_topic(topic):\n rid = mqtt_topic.get_method_request_id_from_topic(topic)\n method_name = mqtt_topic.get_method_name_from_topic(topic)\n method_received = MethodRequest(\n request_id=rid, name=method_name, payload=json.loads(event.payload)\n )\n self._handle_pipeline_event(pipeline_events_iothub.MethodRequest(method_received))\n\n else:\n logger.warning(\"Warning: dropping message with topic {}\".format(topic))\n\n else:\n # all other messages get passed up\n PipelineStage._handle_pipeline_event(self, event)\n","sub_path":"azure-iot-device/azure/iot/device/iothub/transport/mqtt/pipeline_stages_iothub_mqtt.py","file_name":"pipeline_stages_iothub_mqtt.py","file_ext":"py","file_size_in_byte":6479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"141572180","text":"line = []\r\nline1 = []\r\nf = open(\"toExpansion1.txt\",\"r\")\r\nf1 = open(\"abbreviation.txt\",\"r\")\r\nlines = f1.readlines()\r\nfor line in lines:\r\n line1.append(line.strip('\\n'))\r\nprint(line1)\r\ni = 0\r\nfor line in f:\r\n # while i < line.count(' '):\r\n for i in range(line.strip().count(' ') + 1):\r\n if line.strip().split(' ')[i] in line1:\r\n print(line.strip().split(' ')[i])\r\n # i = i + 1\r\n\r\n\r\n# fobj=open('1.txt','w+')\r\n# with open('toExpansion1.txt','r') as f:\r\n# for line in f:\r\n# with open('abbreviation.txt','r') as obj:\r\n# for strs in obj.readlines():\r\n# if line.strip() in strs.strip():\r\n# fobj.write(strs.strip()+\"\\n\")\r\n# fobj.close()","sub_path":"Expansion/toExpansion.py","file_name":"toExpansion.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124375265","text":"#!/usr/local/bin/python3.5\n\nimport time, datetime\nfrom subprocess import call\n\nYear = 2016\nMonth = 1\nDay = 9\nHour = 1\nMinute = 1\nSecond = 1\n\nwhile True:\n Datetime = datetime.datetime(Year, Month, Day, Hour, Minute, Second)\n diff = Datetime - datetime.datetime.now()\n diff = str(diff)\n\n day_str, not_useful, time_str = diff.split()\n hour_str, minute_str, second = time_str.split(\":\")\n second_str, not_useful = second.split(\".\")\n\n years = int(day_str) / 365\n\n print(diff)\n\n year_final, year_fraction = divmod(years, 1)\n int_year_final = int(year_final)\n str_years_final = str(int_year_final)\n\n rounded_days = round(year_fraction * 365.25)\n int_days_final = int(rounded_days)\n str_days_final = str(int_days_final)\n\n Date0 = [str_years_final]\n Date1 = [str_days_final]\n Date2 = [hour_str]\n Date3 = [minute_str]\n Date4 = [second_str]\n\n if (int_year_final != 0):\n if str_years_final <= str(\"9\"):\n str_years_final = str(str_years_final[1])\n\n for str0 in Date0:\n if (str0 != str(0)):\n if (str(str_years_final) == str(\"1\")):\n print(\"-\" * 10)\n print(\"|\" + \" \" + str_years_final + \" \" + \"Year\" + \" \" + \"|\")\n print(\"-\" * 10)\n else:\n print(\"-\" * 13)\n print(\"|\" + \" \" + str_years_final + \" \" + \"Years\" + \" \" + \"|\")\n print(\"-\" * 13)\n\n if (int_days_final != 0):\n if str_days_final <= str(\"09\"):\n str_days_final = str(str_days_final[1])\n\n for str1 in Date1:\n if (str1 != str(0)):\n if (str(str_days_final) == str(\"1\")):\n print(\"-\" * 9)\n print(\"|\" + \" \" + str_days_final + \" \" + \"Day\" + \" \" + \"|\")\n print(\"-\" * 9)\n else:\n print(\"-\" * 12)\n print(\"|\" + \" \" + str_days_final + \" \" + \"Days\" + \" \" + \"|\")\n print(\"-\" * 12)\n\n if (int(hour_str) != 0):\n if hour_str <= str(\"09\"):\n hour_str = str(hour_str[1])\n\n for str2 in Date2:\n if (hour_str != str(0)):\n if (str(hour_str) == str(\"1\")):\n print(\"-\" * 10)\n print(\"|\" + \" \" + hour_str + \" \" + \"Hour\" + \" \" + \"|\")\n print(\"-\" * 10)\n else:\n print(\"-\" * 12)\n print(\"|\" + \" \" + hour_str + \" \" + \"Hours\" + \" \" + \"|\")\n print(\"-\" * 12)\n\n if (int(minute_str) != 0):\n if minute_str <= str(\"09\"):\n minute_str = str(minute_str[1])\n\n for str3 in Date3:\n if (minute_str != str(0)):\n if (str(minute_str) == str(\"1\")):\n print(\"-\" * 12)\n print(\"|\" + \" \" + minute_str + \" \" + \"Minute\" + \" \" + \"|\")\n print(\"-\" * 12)\n else:\n print(\"-\" * 14)\n print(\"|\" + \" \" + minute_str + \" \" + \"Minutes\" + \" \" + \"|\")\n print(\"-\" * 14)\n\n if (int(second_str) != 0):\n if second_str <= str(\"09\"):\n second_str = str(second_str[1])\n\n for str4 in Date4:\n if (str(second_str) == str(\"1\")):\n print(\"-\" * 13)\n print(\"|\" + \" \" + second_str + \" \" + \"Second\" + \" \" + \"|\")\n print(\"-\" * 13)\n else:\n print(\"-\" * 14)\n print(\"|\" + \" \" + second_str + \" \" + \"Seconds\" + \" \" + \"|\")\n print(\"-\" * 14)\n\n if (str_years_final == 0 and str_days_final == 0 and hour_str == 0 and minute_str == 0 and second_str == 0):\n break\n\n # Date0 = [str_years_final]\n # Date1 = [str_days_final]\n # Date2 = [hour_str]\n # Date3 = [minute_str]\n # Date4 = [second_str]\n\n time.sleep(1)\n","sub_path":"Python/Countdown Timer/Version 2/Countdown Timer v2.0.py","file_name":"Countdown Timer v2.0.py","file_ext":"py","file_size_in_byte":3260,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"305774908","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 3 13:36:35 2018\n\n@author: colinpate\n\"\"\"\n\nimport os\nfrom PIL import Image\nimport numpy as np\n\nheight = 256\nwidth = 256\n\ndef main():\n file_dir = input(\"Test output directory?\")\n \n file_list = os.listdir(file_dir + \"\\\\images\")\n \n overall_se = 0\n pt_ct = 0\n \n for i in range(len(file_list)):\n filename = file_list[i]\n if filename[-11:] == \"targets.png\":\n pt_ct += width * height\n this_im_se = 0\n \n print(\"Pic \" + str(i) + \" out of \" + str(len(file_list)))\n targ_im = Image.open(file_dir + \"\\\\images\\\\\" + filename)\n targ_arr = np.reshape(np.asarray(targ_im), (height, width, 3))\n out_im = Image.open(file_dir + \"\\\\images\\\\\" + filename[:-11] + \"outputs.png\")\n out_arr = np.reshape(np.asarray(out_im), (height, width, 3))\n input_im = Image.open(file_dir + \"\\\\images\\\\\" + filename[:-11] + \"inputs.png\")\n in_arr = np.reshape(np.asarray(input_im), (height, width, 3))\n \n for x in range(width):\n for y in range(height):\n if in_arr[x,y,0] == 0:\n overall_se += (float(targ_arr[x,y,0]) - float(out_arr[x,y,0]))**2\n this_im_se += (float(targ_arr[x,y,0]) - float(out_arr[x,y,0]))**2\n print(\"This image MSE: \" + str(this_im_se / (width * height)))\n print(\"Final MSE: \" + str(overall_se / pt_ct))\nmain()","sub_path":"mse_calc.py","file_name":"mse_calc.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"153535431","text":"import argparse\nimport configparser\nimport os\nimport pickle\nimport CPR_utils as util\n\nimport boto3\nimport cv2 as cv\n\ndef prepare(dataset_input_path):\n data = [] # List of images\n labels = [] # List of labels\n\n # Load all directory\n for root, dirs, files in os.walk(dataset_input_path):\n\n # Filter every folder\n for dir in dirs:\n print(\" Class : \\t \\t \" + dir)\n # Filter all files in the directory\n for filename in os.listdir(dataset_input_path + \"/\" + dir):\n # Make sure that our file is text\n if filename.endswith('.jpg'):\n img = cv.imread(dataset_input_path + \"/\" + dir + \"/\" + filename)\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n data.append(gray)\n labels.append(dir)\n\n # Save test data and labels\n data_path = \"../tmp/data.pickle\"\n labels_path = \"../tmp/labels.pickle\"\n pickle.dump(data, open(data_path, \"wb\"))\n pickle.dump(labels, open(labels_path, \"wb\"))\n\n print('Length data : ' + str(len(data)))\n print('Length labels : ' + str(len(labels)))\n print('Processs finished !')\n\n return [data_path, labels_path]\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='Please provide amazon credentials')\n parser.add_argument('--conf', default=\"../conf/config.cfg\", help='the path of config.cfg')\n args = parser.parse_args()\n\n config = configparser.ConfigParser()\n config.read(args.conf)\n\n aws_access_key_id = config['AWS_access_credentials']['aws_access_key_id']\n aws_secret_access_key = config['AWS_access_credentials']['aws_secret_access_key']\n\n client = boto3.client('s3', aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key)\n\n dataset_input_path = '../dataset'\n if not os.path.exists(dataset_input_path):\n os.makedirs(dataset_input_path)\n\n paths = prepare(dataset_input_path)\n\n for f in paths:\n client.upload_file(f, config['buckets']['chardata'], os.path.basename(f))\n\n print(\"All character files in local have been prepared and uploaded to S3.\")","sub_path":"dags/PrepareCNNData.py","file_name":"PrepareCNNData.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"425515013","text":"import os\nimport cv2\n\ndef get_files(path):\n # read a folder, return the complete path\n ret = []\n for root, dirs, files in os.walk(path):\n for filespath in files:\n ret.append(os.path.join(root, filespath))\n return ret\n\ndef get_jpgs(path):\n # read a folder, return the image name\n ret = []\n for root, dirs, files in os.walk(path):\n for filespath in files:\n ret.append(filespath)\n return ret\n\nbasepath = 'F:\\\\submitted papers\\\\my papers\\\\CVPR NTIRE 2020 Spectral Reconstruction\\\\latex\\\\img'\nimglist = get_files(basepath)\nnamelist = get_jpgs(basepath)\n\nfor i in range(len(imglist)):\n img = cv2.imread(imglist[i])\n savename = os.path.join(basepath, namelist[i][:-4] + '.jpg')\n cv2.imwrite(savename, img, [int(cv2.IMWRITE_JPEG_QUALITY), 70])\n","sub_path":"png2jpg.py","file_name":"png2jpg.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"387793988","text":"import transactions\nimport user\nimport stocks\nimport read_crypto_data\nimport get_crypto_data\nimport wallet_carrier\nfrom datetime import date\n\ndef getCountrySpecificStocks(country):\n countrySpecificStocks = []\n for market in stocks.getMarketsInCountry(country):\n countrySpecificStocks.extend(market.stocks)\n print(countrySpecificStocks)\n return countrySpecificStocks\n\n# Create stock markets\nnsd = stocks.Stock_Market('USA', 'NASDAQ')\nsp = stocks.Stock_Market('USA', 'S&P 500')\natg = stocks.Stock_Market('Greece', 'ATG')\n\n# Get Stock Data in a specific date range\nsp.getStockAttributes('TSLA',date(2020,2,2), date(2020,3,1)).printStock()\n\n# Create two users\nkypr = user.User('kypr','kyprianosmantis@gmail.com', '123456')\nkypr2 = user.User('kypr2','kyprianosmantis@gmail.com', '123456')\n\n# Create carriers (and add them to the Carrier Dictionary)\nwallet_carrier.Carrier('National Bank of Greece')\nwallet_carrier.Carrier('Piraeus Bank')\nwallet_carrier.Carrier('PayPal')\n\n# Create new carrier accounts and add them to \n# the corresponding carrier's accounts dictionary\ntempAcc = wallet_carrier.Account(1234567890, 'Qwerty!@34', 'National Bank of Greece', 1000)\nwallet_carrier.carrierDictionary[tempAcc.carrierName].accounts[tempAcc.ID] = tempAcc\ntempAcc = wallet_carrier.Account(4567890123, 'Qwerty!@34', 'Piraeus Bank', 2500)\nwallet_carrier.carrierDictionary[tempAcc.carrierName].accounts[tempAcc.ID] = tempAcc\ntempAcc = wallet_carrier.Account('kiprianosmantis@gmail.com', 'Qwerty!@34', 'PayPal', 4000)\nwallet_carrier.carrierDictionary[tempAcc.carrierName].accounts[tempAcc.ID] = tempAcc\n\n# Add payment methods to wallet\nkypr.wallet.addPaymentMethod('National Bank of Greece',(1234567890, 'Qwerty!@34'))\nkypr.wallet.addPaymentMethod('Piraeus Bank',(4567890123, 'Qwerty!@34'))\nkypr.wallet.addPaymentMethod('PayPal',('kiprianosmantis@gmail.com', 'Qwerty!@34'))\n#wallet_carrier.printAllAccountsInAllCarriers()\n\n# Buy 2 shares at Tesla and 3 shares at Plaisio (private transactions)\nkypr.buy({sp.stocks['TSLA']:(2, False),atg.stocks['PlAIS.AT']:(3, False)})\n\n# Show balance in accounts after payment\nkypr.wallet.printPaymentMethods()\n\n# Check user's Trading History\nprint('Trading History of user',kypr.username)\nfor transaction in kypr.getUserTransactions():\n transaction.printTransaction()\n\n# Create Crypto Market\ncm = read_crypto_data.Crypto_Market()\n\n# Print all crypto coins in the market\ncm.printAllCryptoNames()\n\n# Get Crypto Data in a specific date range\ncm.getCryptoAttributes('BTC', datetime(2021,1,1)).printCrypto()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"124975556","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# Copyright (C) 2015 Lorenzo Battistini \n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nimport openerp.tests.common as test_common\nfrom openerp import addons\n\n\nclass TestPecMessages(test_common.SingleTransactionCase):\n\n def getFile(self, filename):\n path = addons.get_module_resource('l10n_it_pec_messages',\n 'tests', 'data', filename)\n with open(path) as test_data:\n content = test_data.read()\n return content\n\n def setUp(self):\n super(TestPecMessages, self).setUp()\n self.thread_model = self.registry('mail.thread')\n self.message_model = self.registry('mail.message')\n self.mail_model = self.registry('mail.mail')\n self.fetchmail_model = self.registry('fetchmail.server')\n self.compose_msg_model = self.registry('mail.compose.message')\n\n def test_message_1(self):\n cr, uid = self.cr, self.uid\n msg = self.getFile('message1')\n context = {\n 'lang': 'en_US',\n 'tz': False,\n 'uid': 1,\n 'fetchmail_cron_running': True,\n 'server_type': u'imap',\n 'fetchmail_server_id': 1,\n }\n self.thread_model.message_process(\n cr, uid, None, msg, save_original=False, strip_attachments=False,\n context=context)\n msg_ids = self.message_model.search(\n cr, uid, [\n ('pec_msg_id', '=',\n 'opec275.20141107165200.03048.08.1.17@pec.aruba.it')])\n self.assertEqual(len(msg_ids), 1)\n msg = self.message_model.browse(cr, uid, msg_ids[0])\n self.assertEqual(msg.pec_type, 'posta-certificata')\n self.assertEqual(msg.direction, 'in')\n imap_server_id = self.ref('l10n_it_pec_messages.imap_pec_server')\n self.assertEqual(msg.server_id.id, imap_server_id)\n self.assertEqual(msg.email_from, 'thinkstudio@pec.it')\n self.assertEqual(\n msg.message_id,\n u'')\n self.assertFalse(msg.author_id)\n\n def test_message_2_with_partner(self):\n cr, uid = self.cr, self.uid\n msg_file = self.getFile('message2')\n accettazione_msg_file = self.getFile('message2_accettazione')\n consegna_msg_file = self.getFile('message2_consegna')\n fetch_context = {\n 'lang': 'en_US',\n 'tz': False,\n 'uid': 1,\n 'fetchmail_cron_running': True,\n 'server_type': u'imap',\n 'fetchmail_server_id': 1,\n }\n imap_server_id = self.ref('l10n_it_pec_messages.imap_pec_server')\n self.fetchmail_model.write(cr, uid, [imap_server_id], {\n 'force_create_partner_from_mail': True,\n })\n self.thread_model.message_process(\n cr, uid, None, msg_file, save_original=False,\n strip_attachments=False, context=fetch_context)\n msg_ids = self.message_model.search(\n cr, uid, [\n ('pec_msg_id', '=',\n 'opec275.20141127151216.06559.08.1.17@pec.aruba.it')])\n self.assertEqual(len(msg_ids), 1)\n msg = self.message_model.browse(cr, uid, msg_ids[0])\n self.assertEqual(msg.author_id.name, u'thinkstudio@pec.it')\n self.assertEqual(msg.email_from, 'thinkstudio@pec.it')\n context = {\n 'lang': 'en_US',\n 'search_disable_custom_filters': True,\n 'new_pec_mail': True,\n 'tz': False,\n 'uid': 1,\n 'show_pec_email': True,\n 'active_model': 'mail.message',\n 'reply_pec': True,\n 'default_composition_mode': 'reply',\n 'pec_messages': True,\n 'default_partner_ids': [msg.author_id.id],\n 'active_ids': msg_ids,\n 'active_id': msg_ids[0],\n }\n wizard_id = self.compose_msg_model.create(\n cr, uid, {'body': u'

replying to message2

'}, context=context)\n self.compose_msg_model.send_mail(cr, uid, [wizard_id], context=context)\n sent_msg_ids = self.registry('mail.mail').search(\n cr, uid, [('parent_id', '=', msg_ids[0])])\n self.assertEqual(len(sent_msg_ids), 1)\n sent_msg = self.mail_model.browse(cr, uid, sent_msg_ids[0])\n self.assertEqual(sent_msg.pec_type, 'posta-certificata')\n # setting message_id according to test data about\n # delivery and reception messages\n sent_msg.write({\n 'message_id': \"<1415985992.182905912399292.346704098667155-\"\n \"openerp-private@elbati-Vostro-3550>\"\n })\n\n # accettazione\n self.thread_model.message_process(\n cr, uid, None, accettazione_msg_file, save_original=False,\n strip_attachments=False, context=fetch_context)\n accettazione_msg_ids = self.message_model.search(\n cr, uid, [\n ('pec_msg_id', '=',\n 'opec275.20141114182632.23219.07.1.48@pec.aruba.it'),\n ('pec_type', '=', 'accettazione')])\n self.assertEqual(len(accettazione_msg_ids), 1)\n accettazione_msg = self.message_model.browse(\n cr, uid, accettazione_msg_ids[0])\n self.assertEqual(\n accettazione_msg.pec_msg_parent_id.id, sent_msg.mail_message_id.id)\n self.assertEqual(accettazione_msg.err_type, 'nessuno')\n # no delivery message received yet\n sent_msg.refresh()\n self.assertEqual(sent_msg.message_ok, False)\n\n # consegna\n self.thread_model.message_process(\n cr, uid, None, consegna_msg_file, save_original=False,\n strip_attachments=False, context=fetch_context)\n consegna_msg_ids = self.message_model.search(\n cr, uid, [\n ('pec_msg_id', '=',\n 'opec275.20141114182632.23219.07.1.48@pec.aruba.it'),\n ('pec_type', '=', 'avvenuta-consegna')])\n consegna_msg = self.message_model.browse(\n cr, uid, consegna_msg_ids[0])\n self.assertEqual(\n consegna_msg.pec_msg_parent_id.id, sent_msg.mail_message_id.id)\n self.assertEqual(consegna_msg.err_type, 'nessuno')\n self.assertEqual(len(consegna_msg_ids), 1)\n # delivery and reception messages received\n sent_msg.refresh()\n self.assertEqual(sent_msg.message_ok, True)\n","sub_path":"l10n_it_pec_messages/tests/test_pec_messages.py","file_name":"test_pec_messages.py","file_ext":"py","file_size_in_byte":7270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"62109217","text":"from lib.hand_eval import convert_string_to_int, score_best_five\nfrom Global import State\nfrom random import random\n\n\nclass Turn(object):\n\n @classmethod\n def get_action(cls, data):\n # GETACTION potSize numBoardCards [boardCards] [stackSizes] numActivePlayers [activePlayers] numLastActions [lastActions] numLegalActions [legalActions] timebank\n data = data.split()\n getaction = data.pop(0)\n potSize = int(data.pop(0))\n numBoardCards = int(data.pop(0))\n\n board_cards = []\n for _ in range(numBoardCards):\n board_cards.append(convert_string_to_int(data.pop(0)))\n\n stack1 = int(data.pop(0))\n stack2 = int(data.pop(0))\n stack3 = int(data.pop(0))\n\n numActivePlayers = int(data.pop(0))\n active1 = data.pop(0)\n active2 = data.pop(0)\n active3 = data.pop(0)\n\n numLastActions = int(data.pop(0))\n\n prev_actions = []\n for _ in range(numLastActions):\n prev_actions.append(data.pop(0))\n\n numLegalActions = int(data.pop(0))\n\n legal_actions = []\n for _ in range(numLegalActions):\n legal_actions.append(data.pop(0))\n\n if numLegalActions == 1:\n return legal_actions[0]\n\n State.timebank = float(data.pop(0))\n\n\n # These are the variables based on position\n seat = State.seat\n numActivePlayers = numActivePlayers\n score = score_best_five(board_cards + State.hole_cards)\n\n\n # The logic will be to consider the probability that we win and compute\n # First the pot odd, then the implied odds, then the fold equity\n\n # CHECK / BET\n # CALL / FOLD / RAISE\n\n\n\n # Case 1\n #######################################################################\n # Nobody else has acted\n\n # TODO: consider fold equity for betting and reverse pot odds\n if any([x for x in legal_actions if 'CHECK' in x]):\n # Determine if we should show strength and how much\n\n # If we have a hand, then bet, if we don't then do not\n bet_prob = 0\n\n # We bet if we have more than a pair\n if score[0] > 1:\n bet_prob = 1\n elif score[0] == 1:\n val_of_pair = score[1]\n # val_of_pair goes from 0 - 12\n bet_prob += .28\n bet_prob += val_of_pair * .06\n # scaled so always betting aces\n elif score[0] == 0:\n bet_prob += score[1][0] * .01\n\n r = random()\n if r < bet_prob:\n betting_action = [x for x in legal_actions if 'BET' in x]\n if not betting_action:\n return 'CHECK'\n b, lo, hi = betting_action[0].split(':')\n lo = int(lo)\n hi = int(hi)\n\n # BET\n if score[0] >= 4:\n # Max bet with a straight\n bet_amt = hi\n return 'RAISE:%d' % bet_amt\n\n if score[0] >= 2:\n bet_amt = max(min(int((.25 + random()) * hi * State.aggressiveness), hi), lo)\n return 'RAISE:%d' % bet_amt\n\n if score[0] >= 1:\n bet_amt = max(min(int((.05 * score[1]) * hi * State.aggressiveness), hi), lo)\n return 'RAISE:%d' % bet_amt\n\n bet_amt = lo\n return 'RAISE:%d' % bet_amt\n else:\n return 'CHECK'\n\n\n # Case 2\n #######################################################################\n # Need to decide if we should FOLD / CALL / RAISE\n # TODO: Consider if we are facing multiple bets. Tune this\n\n if any([x for x in legal_actions if 'CALL' in x]):\n # Compute pot odds\n call_action = [x for x in legal_actions if 'CALL' in x][0]\n call_amt = int(call_action.split(':')[-1])\n pot_size = potSize\n\n pot_odds = float(call_amt) / (2 * call_amt + potSize)\n\n\n # Determine what the odds of winning are by guessing\n guessed_win_prob = 0\n if score[0] == 0:\n guessed_win_prob = float(score[1][0] / 13) / 40\n\n if score[0] <= 2:\n # PAIR\n if score[0] == 1:\n guessed_win_prob += .05 * score[1]\n\n # TWO PAIR\n if score[0] == 2:\n guessed_win_prob += .7\n guessed_win_prob += .05 * score[1]\n\n if pot_odds < guessed_win_prob:\n return call_action\n\n return 'FOLD'\n\n\n betting_action = [x for x in legal_actions if 'RAISE' in x]\n if not betting_action:\n return call_action\n b, lo, hi = betting_action[0].split(':')\n lo = int(lo)\n hi = int(hi)\n\n # FULL HOUSE or better is always max raise\n if score[0] >= 6:\n return 'RAISE:%d' % hi\n\n # FLUSH\n if score[0] == 5:\n # If the kicker is high enough\n if score[1] >= 10:\n return 'RAISE:%d' % hi\n else:\n if random() < score[1] * .1:\n return 'RAISE:%d' % hi\n else:\n return call_action\n\n # STRAIGHT\n if score[0] == 4:\n # If the kicker is high enough\n if score[1] >= 10:\n return 'RAISE:%d' % hi\n return call_action\n\n\n # Otherwise we want to get the pot bigger\n if pot_odds < 2 * guessed_win_prob:\n return 'RAISE:%d' % hi\n\n return call_action\n\n\n return 'CHECK'\n","sub_path":"players/v0/Turn.py","file_name":"Turn.py","file_ext":"py","file_size_in_byte":5829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"275427966","text":"#! /usr/bin/env python\n\nfrom pyfeyn.user import *\nfrom pyx import *\n\nfd = FeynDiagram()\n\nin1 = Point(-4, 2)\nin2 = Point(-4, -2)\nout1 = Point(4, -2)\nout2 = Point(4, 2)\nin_vtx = Vertex(-2, 0, mark=CIRCLE)\nout_vtx = Vertex(2, 0, mark=CIRCLE)\n\nfa1 = Fermion(in1, in_vtx).addArrow().addLabel(r\"\\Pelectron\")\nfa2 = Fermion(in_vtx, in2).addArrow().addLabel(r\"\\Ppositron\")\nbos = Photon(in_vtx, out_vtx).addLabel(r\"\\Pphoton/\\PZ\")\nfb1 = Fermion(out1, out_vtx).addArrow(0.2).addLabel(r\"\\APquark\")\nfb2 = Fermion(out_vtx, out2).addArrow(0.8).addLabel(r\"\\Pquark\")\nglu = Gluon(midpoint(out_vtx, out1), midpoint(out_vtx, out2))\nglu.invert().bend(0.5).addLabel(\"\\Pgluon\", displace=0.25)\n\nnumcopies = 10\nangle = 0.8\nc1 = fd.drawToCanvas()\nc2 = canvas.canvas()\nc2.insert(c1, [trafo.rotate(-numcopies*angle)])\n\nc = canvas.canvas()\nfor i in range(numcopies):\n trans = 1 - ((i+1)/float(numcopies))**8\n c.insert(c2, [trafo.rotate((i+1)*angle), color.transparency(trans)])\n c.insert(c1, [trafo.translate(0.1 - 0.01*(i+1), -5 + 0.05*(i+1)), color.transparency(trans)])\n\nc.writetofile(\"pyfeyn-test7.pdf\")\n","sub_path":"examples/pyfeyn-test7.py","file_name":"pyfeyn-test7.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"287742371","text":"from django.shortcuts import render\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom blog.models import Feed\n\n\ndef about_view(request):\n\tcontext = {}\n\tfeeds = Feed.objects.all().order_by('-rating')[:4]\n\tcontext['feeds'] = feeds\n\treturn render(request, 'blog/home_page.html', context)\n\n\ndef detail_view(request, id_feed):\n\tcontext = {}\n\ttry:\n\t\tfeed = Feed.objects.get(id=id_feed)\n\texcept ObjectDoesNotExist:\n\t\tfeed = None\n\tcontext['feed'] = feed\n\treturn render(request, 'blog/detail_page.html', context)","sub_path":"blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"224229687","text":"from glob import glob\nfrom collections import defaultdict\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport matplotlib as mpl\nimport click\nimport matplotlib.patches as mpatches\nimport os\n\ndef config_params(font_size=7):\n mpl.rcParams.update(mpl.rcParamsDefault)\n plt.rcParams['font.sans-serif'] = ['arial']\n plt.rcParams['font.size'] = font_size\n plt.rcParams['font.family'] = ['sans-serif']\n plt.rcParams['svg.fonttype'] = 'none'\n plt.rcParams['mathtext.fontset'] = 'custom'\n plt.rcParams['mathtext.cal'] = 'arial'\n plt.rcParams['mathtext.rm'] = 'arial'\n\n\ndef get_summary_results(path_results):\n\n all_stab_avg = glob(os.path.join(path_results,'avgStability_*'))\n all_stab_specific = glob(os.path.join(path_results, 'processesStabAvg_*'))\n all_err_avg = glob(os.path.join(path_results, 'avgReconstructionErrorPercentage_*'))\n\n results = defaultdict(dict)\n for f in all_stab_avg:\n number_signatures = int(f.split('_')[-1])\n with open(f, 'rt') as infile:\n for line in infile:\n results['stab_avg'][number_signatures] = float(line.rstrip())\n\n for f in all_err_avg:\n number_signatures = int(f.split('_')[-1])\n with open(f, 'rt') as infile:\n for line in infile:\n results['stab_err'][number_signatures] = float(line.rstrip())\n\n for f in all_stab_specific:\n number_signatures = int(f.split('_')[-1])\n df = pd.read_csv(f, sep='\\t')\n results['avg_specific'][number_signatures] = list(df.loc[0].values)\n\n return results\n\n\ndef plot_signatures(results, path_results):\n\n config_params(5)\n fig, ax = plt.subplots(1, 1, figsize=(3, 1.5))\n\n ax2 = ax.twinx()\n sorted_numbers = sorted(results['stab_avg'].keys())\n lines_avg = []\n lines_err = []\n xticks = []\n\n for signature in sorted_numbers:\n\n ax.scatter(signature, results['stab_avg'][signature], color='darkred', s = 8)\n ax.scatter([signature for _ in range(signature)], results['avg_specific'][signature], color='darkred', alpha=0.2, s = 8)\n ax2.scatter(signature, results['stab_err'][signature], color='orange', s = 8)\n lines_avg.append(results['stab_avg'][signature])\n lines_err.append(results['stab_err'][signature])\n xticks.append(signature)\n\n ax.set_ylabel('Stability')\n ax2.set_ylabel('Reconstruction Error')\n ax.plot(xticks, lines_avg, color='darkred')\n ax2.plot(xticks, lines_err, color='orange')\n ax.set_xlabel('Signatures Active')\n\n pop_a = mpatches.Patch(color='darkred', label='Average Signatures Stability')\n pop_b = mpatches.Patch(color='orange', label='Average Reconstruction Error')\n\n plt.legend(handles=[pop_a, pop_b], bbox_to_anchor=(1.1, 1.25))\n plt.savefig('{}/signatures_analysis.png'.format(path_results), dpi=600, bbox_inches='tight')\n plt.close()\n\n\n@click.command()\n@click.option('--path_results',\n type=click.Path(exists=True),\n help=\"Input data\",\n required=True)\ndef run(path_results):\n results = get_summary_results(path_results)\n plot_signatures(results, path_results)\n\nif __name__ == '__main__':\n run()\n","sub_path":"postprocessing/plot_select_signatures.py","file_name":"plot_select_signatures.py","file_ext":"py","file_size_in_byte":3170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"472406907","text":"from django.core.mail import send_mail\nfrom django.shortcuts import render\n\n# Create your views here.\nfrom Test.models import profile, banerImage, portfolio_image\nfrom portfolio_Mz.settings import EMAIL_HOST_USER\n\n\ndef index(request):\n profiles = profile.objects.all()\n banerImages = banerImage.objects.all()\n portfolio_images = portfolio_image.objects.all()\n\n return render(request,'index.html',{'profiles':profiles,'banerImages':banerImages,'portfolio_images':portfolio_images})\n\n\ndef send_email(request):\n print('working')\n if request.method == 'POST':\n name = request.POST['name']\n email = request.POST['email']\n subject = request.POST['subject']\n number = request.POST['number']\n if name and email and subject and number:\n print(name, subject, email)\n print('email')\n email = send_mail(' from :{}'.format(email), 'Hey, it\\'s {}. Phone Number: {} '.format(name, number) + subject,\n EMAIL_HOST_USER, ['MijanHawlader746@gmail.com', ], fail_silently=False)\n print(email)\n\n return render(request, 'index.html')\n\n else:\n return render(request, 'index.html')","sub_path":"Test/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"358396356","text":"import RPi.GPIO as gpio\nimport time, os, sys\n\nclass Sides:\n def __init__(self, TRIG, ECHO):\n self.__TRIG = TRIG\n self.__ECHO = ECHO\n self.__distance = 0\n self.getSideDist()\n \n @property\n def Trig(self):\n return self.__TRIG\n \n @Trig.setter\n def Trig(self, value):\n self.__TRIG = value\n \n @property\n def Echo(self):\n return self.__ECHO\n \n @Echo.setter\n def Echo(self, value):\n self.__ECHO = value\n \n @property\n def Distance(self):\n return self.__distance\n \n @Distance.setter\n def Distance(self, value):\n self.__Distance = value\n \n def getSideDist(self):\n #This is a pre written method which checks an ultrasonic sensor that is connected to the pi\n #So the pins will be imported depending which sensor is running in the while loop further down\n gpio.setup(self.__TRIG,gpio.OUT)\n gpio.setup(self.__ECHO,gpio.IN)\n gpio.output(self.__TRIG, False)\n #print \"Waiting For Sensor To Settle\"\n time.sleep(0.2)\n gpio.output(self.__TRIG, True)\n time.sleep(0.00001)\n gpio.output(self.__TRIG, False)\n while gpio.input(self.__ECHO)==0:\n pulse_start = time.time()\n while gpio.input(self.__ECHO)==1:\n pulse_end = time.time()\n pulse_duration = pulse_end - pulse_start\n distance = pulse_duration * 17150\n distance = round(distance, 2)\n #print (\"Distance: \" + str(distance) + \"cm\")\n self.__distance = distance\n","sub_path":"robot/sides.py","file_name":"sides.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"111545876","text":"import os\nimport cv2\n\ndirname = os.path.dirname(__file__)\n\n\nclass DIP:\n def reverse_rgb(self, img):\n b = img[:, :, 0].copy()\n g = img[:, :, 1].copy()\n r = img[:, :, 2].copy()\n\n img[:, :, 0] = r\n img[:, :, 1] = g\n img[:, :, 2] = b\n\n return img\n\n\nif __name__ == '__main__':\n img = cv2.imread(os.path.join(dirname, '../imori.jpg'))\n\n dip = DIP()\n img = dip.reverse_rgb(img)\n\n cv2.imwrite(os.path.join(dirname, 'imori_out.jpg'), img)\n","sub_path":"Question_01_10/custom_py/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"623635100","text":"# -*- coding: utf-8 -*-\nfrom apps.database.session import db\nfrom config import JsonConfig\n\n\ndef get_model(model):\n if JsonConfig.get_data('TESTING'):\n return model.test_model\n return model\n\n\nclass TestMixin:\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n message = db.Column(db.String(120))\n\n def __init__(self, message=None):\n self.message = message\n\n def __repr__(self):\n return ''.format(self.id)\n\n\nclass TestTestModel(TestMixin, db.Model):\n __tablename__ = 'test_tests'\n __table_args__ = {'extend_existing': True}\n\n\nclass TestModel(TestMixin, db.Model):\n __tablename__ = 'tests'\n __table_args__ = {'extend_existing': True}\n\n test_model = TestTestModel\n\nTest = get_model(TestModel)\n","sub_path":"apps/database/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"151199699","text":"# Autor: Andrea Romo Ortega\r\n# Misión 7\r\n\r\n\r\n# dividir\r\ndef dividir(residuo, divisor):\r\n cociente = 0\r\n\r\n while residuo >= divisor:\r\n residuo = residuo - divisor\r\n\r\n cociente = cociente + 1\r\n\r\n print(\"El residuo es: \", residuo)\r\n\r\n print(\"El cociente es: \", cociente)\r\n\r\n\r\n# encontrar mayor\r\n\r\ndef encontrarMayor():\r\n\r\n num = int(input(\"Escribe un número positivo [ -1 para salir]: \"))\r\n\r\n mayor = num\r\n\r\n while num != -1:\r\n\r\n num = int(input(\"Escribe un número positivo [ -1 para salir]: \"))\r\n\r\n if num > mayor:\r\n\r\n mayor = num\r\n\r\n if mayor == -1:\r\n print (\"No hay mayor\")\r\n\r\n else:\r\n\r\n print (\"El mayor es \", mayor)\r\n\r\n\r\n\r\n\r\n\r\ndef main():\r\n\r\n salir = False\r\n\r\n while not salir:\r\n\r\n print (\"\"\"Menu\r\n 1. Calcular divisiones\r\n 2. Encontrar mayor\r\n 3. salir\r\n \"\"\")\r\n menu = int(input(\"Teclea tu opción\"))\r\n\r\n if menu == 1:\r\n\r\n\r\n residuo = int(input(\"Escribe el dividendo: \"))\r\n divisor = int(input(\"Escribe el divisor: \"))\r\n dividir(residuo, divisor)\r\n elif menu == 2:\r\n encontrarMayor()\r\n\r\n elif menu ==3:\r\n print(\"Hasta pronto :D\")\r\n salir = True\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\nmain()","sub_path":"Misión_7.py","file_name":"Misión_7.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"416585190","text":"from functools import reduce\n\ndef euclidGCD(a, b):\n x,y, u,v = 0,1, 1,0\n while a != 0:\n q, r = b//a, b%a\n m, n = x-u*q, y-v*q\n b,a, x,y, u,v = a,r, u,v, m,n\n gcd = b\n return gcd, x, y\n\ndef factor(n):\n # slow step\n count = 2.0\n while(not (n / count).is_integer()):\n count += 1\n\n # divide by test values until primes are found\n return (int(count), int(n/count))\n\ndef computePrivate(p, q, e):\n totient = (p - 1) * (q - 1)\n\n gcd, d, b = modInv = euclidGCD(e, totient)\n\n return d % totient\n\ndef getPrivateKey(n, e):\n # get primes\n p,q = factor(n)\n\n # compute private key\n return computePrivate(p, q, e)\n\nif __name__ == '__main__':\n print(getPrivateKey(1037, 7))\n input(\"press enter to close\")\n","sub_path":"Numero4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"544538637","text":"import numpy as np\r\nimport pygame\r\n\r\n#Custom object containing data for displaying an animated GIF\r\n#Created as python is not very friendly when it comes to interacting with animated GIFs\r\nclass AnimatedGif:\r\n def __init__(self, new_x, new_y, new_w, new_h, new_paths, new_fps=10):\r\n self.frameCounter = 0 #Index of frame array that contains the next image to be displayed\r\n self.x = new_x\r\n self.y = new_y\r\n self.w = new_w\r\n self.h = new_h\r\n self.fps = new_fps #FPS = Frames per Second\r\n self.time_dif = int(1000/self.fps)\r\n self.init_ticks = pygame.time.get_ticks()\r\n self.noOfFrames = len(new_paths)\r\n self.framePaths = np.array(new_paths) #Array of string, to store the file paths\r\n self.frames = np.array([None] * self.noOfFrames) #Array of pygame Surface objects, to store the actual graphical frames\r\n for n in range(self.noOfFrames):\r\n self.frames[n] = pygame.transform.scale(pygame.image.load(new_paths[n]), [self.w, self.h])\r\n\r\n #Return frame of the GIF to be displayed at any given moment, based on ticks (i.e. time) and fps of the gif\r\n #The GIF loops indefinitely\r\n def getCurFrame(self):\r\n self.frameCounter = int((pygame.time.get_ticks() - self.init_ticks) / self.time_dif) % self.noOfFrames\r\n return self.frames[self.frameCounter]\r\n","sub_path":"anigif/anigif.py","file_name":"anigif.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"121205434","text":"import numpy as np\nimport pointClass\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.preprocessing import StandardScaler\nfrom scipy.spatial import ConvexHull\nfrom math import sqrt\nimport codecs\nfrom operator import add\nimport json\nimport sys\nimport fspLib\n\ndef binCoordinate(strCord, fBinSize):\n return str(int(float(strCord)/fBinSize)*fBinSize)\n\nclass ScoreRecord:\n def __init__(self, record,score):\n self.lat = record.lat\n self.lon = record.lon\n self.text = record.text.replace(u'\\n', u' ')\n self.score = str((int(float(score)*10000.)*1.)/10000.)\n self.username = record.user\n self.dt = record.dt\n self.img = record.img\n self.cluster = -1\n\n def toDict(self):\n obj = {\n 'date': str(self.dt.date()),\n 'datetime':str(self.dt),\n 'sco': self.score,\n 'cap': self.text,\n 'usr': self.username,\n 'lon': self.lon,\n 'lat': self.lat,\n }\n if self.img is not None and len(self.img) > 0:\n obj['img'] = self.img\n return obj\n\nclass ScoreBin:\n def __init__(self, record=None):\n self.users = set([])\n self.lat = ''\n self.lon = ''\n self.dt = None\n self.records = []\n self.poly = []\n self.objPoly = None\n self.postsInHull = -1\n if record is not None:\n self.lat = record.lat\n self.lon = record.lon\n self.dt = record.dt\n self.records.append(record)\n self.users.add(record.username)\n\n def addRecord(self, record):\n self.records.append(record)\n self.users.add(record.username)\n if record.dt < self.dt:\n self.dt = record.dt\n\n def toDict(self):\n return {\n 'date': str(self.dt.date()),\n 'datetime': str(self.dt),\n 'lat': self.lat,\n 'lon': self.lon,\n 'nUnique': len(self.users),\n 'nTotal': len(self.records),\n 'poly': list(self.poly),\n 'background': self.postsInHull,\n 'posts': map(lambda x: x.toDict(), self.records)\n }\n\ndef assignToCluster(recordList, epsilon, nMin):\n lalo = []\n for obj in recordList:\n lalo.append([float(obj.lon), float(obj.lat)])\n\n X = StandardScaler().fit_transform(lalo)\n fitObj = StandardScaler().fit(lalo)\n laEps = epsilon/fitObj.std_[0]\n loEps = epsilon/fitObj.std_[1]\n fitEps = sqrt(laEps*laEps+loEps*loEps)\n db = DBSCAN(eps=fitEps, min_samples=nMin).fit(X)\n for ind in range(len(recordList)):\n recordList[ind].cluster = db.labels_[ind]\n\ndef createHull(cluster, bUseTime):\n loLa = set([])\n for point in cluster.records:\n loLa.add(str(point.lon)+\",\"+str(point.lat))\n loLa = map(lambda x: [float(x.split(\",\")[0]), float(x.split(\",\")[1])],loLa)\n if len(loLa) > 2:\n loLa = np.array(loLa)\n hull = ConvexHull(loLa)\n lClustPoints = []\n lPointObj = []\n for verts in hull.vertices:\n lClustPoints.append([loLa[verts,1], loLa[verts,0]])\n lPointObj.append(pointClass.Point(loLa[verts,0], loLa[verts,1]))\n if bUseTime:\n cluster.objPoly = pointClass.spaceTimePlane(lPointObj,[(cluster.dt.date(), cluster.dt.date())],\"Hull\")\n else:\n cluster.objPoly = pointClass.Polygon(lPointObj)\n cluster.poly = lClustPoints\n else:\n lPointObj = []\n lClustPoints = []\n for lolaOff in [(-0.001,-0.001), (-0.001,0.001), (0.001,0.001), (0.001,-0.001),]:\n offset = [loLa[0][0]+lolaOff[0], loLa[0][1]+lolaOff[1]]\n lPointObj.append(pointClass.Point(offset[0], offset[1]))\n lClustPoints.append([offset[1], offset[0]])\n if bUseTime:\n cluster.objPoly = pointClass.spaceTimePlane(lPointObj,[(cluster.dt.date(), cluster.dt.date())],\"Hull\")\n else:\n cluster.objPoly = pointClass.Polygon(lPointObj)\n cluster.poly = lClustPoints\n\ndef makeTotalsArray(record, bc_lClustPoly, bUseTime):\n lRet = []\n for poly in bc_lClustPoly.value:\n if not bUseTime and poly.bPointInPoly(pointClass.Point(record.lon,record.lat)):\n lRet.append(1)\n elif bUseTime and poly.bEventInTime(pointClass.Point(record.lon,record.lat),record.dt.date()):\n lRet.append(1)\n else:\n lRet.append(0)\n return lRet\n\ndef totalTweets(bin, rdd):\n lPointObj = []\n for cPoint in bin.poly:\n lPointObj.append(pointClass.Point(cPoint[1], cPoint[0]))\n clusterPoly = pointClass.Polygon(lPointObj)\n bin.postsInHull = rdd.filter(lambda x: clusterPoly.bPointInPoly(pointClass.Point(x[\"longitude\"], x[\"latitude\"]))).count()\n\n\n\ndef scoreIndPosts(fileName,scoredRecords,outdir='scoreFiles'):\n \"\"\"\n Score posts and write to a file\n :param fileName:\n :param scoredRecords:\n \"\"\"\n fOut = codecs.open(outdir+\"/\"+fileName, encoding=\"utf-8\",mode=\"wb\")\n for term in scoredRecords:\n record = term[0][1]\n score = term[1]\n buffer = [record.lat,record.lon,record.text,score,record.user,record.dt,record.img]\n buffer = map(lambda x: x if type(x) == unicode else unicode(x),buffer) #convert floats / ints to unicode for writing\n buffer = map(lambda x: x.replace(u'\\n', u''),buffer)\n fOut.write(u'\\t'.join(buffer) + u'\\n')\n fOut.close()\n\n\n\ndef clusterByLocation(sc,records,scoredRecords,fClusterSearchEpsilon,nMinClusterPoints,nMinClusterUnique,fileName,outdir='scoreFiles'):\n \"\"\"\n Create clusters based out record locations, and collect total background activity for each cluster.\n :param sc:\n :param records:\n :param scoredRecords:\n :param fClusterSearchEpsilon:\n :param nMinClusterPoints:\n :param fileName:\n :param outdir:\n :return:\n \"\"\"\n\n # assign clusters and filter out non clustered records\n recordList = map(lambda term: ScoreRecord(term[0][1],term[1]),scoredRecords)\n assignToCluster(recordList, fClusterSearchEpsilon, nMinClusterPoints)\n recordList = filter(lambda x: x.cluster != -1, recordList)\n\n # collect records per cluster and filter out records that don't meet\n # min user threshold\n clustDict = {}\n for record in recordList:\n key = str(record.cluster)\n if key not in clustDict:\n clustDict[key] = ScoreBin(record)\n else:\n clustDict[key].addRecord(record)\n bins = clustDict.values()\n bins = filter(lambda x: len(x.users)>=nMinClusterUnique, bins)\n if len(bins) == 0:\n sys.exit(\"No clusters found, you need to relax cluster parameters\")\n\n\n lClustPoly = []\n for bin in bins:\n createHull(bin, False)\n if bin.objPoly is not None:\n lClustPoly.append(bin.objPoly)\n\n bc_lClustPoly = sc.broadcast(lClustPoly)\n lBackground = records.map(lambda x: makeTotalsArray(x, bc_lClustPoly, False)).reduce(lambda x, y: map(add, x, y))\n\n for i in range(len(bins)):\n bins[i].postsInHull = lBackground[i]\n\n bins = map(lambda x: x.toDict(), bins)\n writeDict = {\"type\":\"place\", \"clusters\":bins}\n with codecs.open(outdir+\"/\"+fileName, encoding=\"utf-8\",mode=\"wb\") as fOut:\n json.dump(writeDict, fOut)\n return writeDict\n\n\ndef clusterByDayAndLocation(sc,records,scoredRecords,fClusterSearchEpsilon,nMinClusterPoints,nMinClusterUnique,fileName,outdir='scoreFiles'):\n \"\"\"\n Cluster records by date, then by location. Collect background for each cluster.\n :param sc:\n :param records:\n :param scoredRecords:\n :param fClusterSearchEpsilon:\n :param nMinClusterPoints:\n :param fileName:\n :param outdir:\n :return:\n \"\"\"\n\n # collect records by date\n dateDict = {}\n for term in scoredRecords:\n score = term[1]\n rowObj = term[0][1]\n record = ScoreRecord(rowObj,score)\n recordDate = record.dt.date().isoformat()\n if recordDate not in dateDict:\n dateDict[recordDate] = [record]\n else:\n dateDict[recordDate].append(record)\n\n # for each date cluster records on that date\n bins = []\n for date,recordObjects in dateDict.iteritems():\n assignToCluster(recordObjects, fClusterSearchEpsilon, nMinClusterPoints)\n recordObjects = filter(lambda x: x.cluster != -1, recordObjects)\n clustDict = {}\n for record in recordObjects:\n key = str(record.cluster)\n if key not in clustDict:\n clustDict[key] = ScoreBin(record)\n else:\n clustDict[key].addRecord(record)\n bins.extend(clustDict.values())\n\n # filter out any clusters that don't meet min user requirements\n bins = filter(lambda x: len(x.users)>=nMinClusterUnique, bins)\n if len(bins) == 0:\n sys.exit(\"No clusters found, you need to relax cluster parameters\")\n\n\n lClustPoly = []\n for bin in bins:\n createHull(bin, True)\n if bin.objPoly is not None:\n lClustPoly.append(bin.objPoly)\n\n bc_lClustPoly = sc.broadcast(lClustPoly)\n lBackground = records.map(lambda x: makeTotalsArray(x, bc_lClustPoly, True)).reduce(lambda x, y: map(add, x, y))\n for i in range(len(bins)):\n bins[i].postsInHull = lBackground[i]\n\n writeDict = {\"type\":\"event\", \"dates\":{}}\n for bin in bins:\n key = str(bin.dt.date())\n if key not in writeDict[\"dates\"].keys():\n writeDict[\"dates\"][key] = {\"clusters\": [bin.toDict()]}\n else:\n writeDict[\"dates\"][key][\"clusters\"].append(bin.toDict())\n\n with codecs.open(outdir+\"/\"+fileName, encoding=\"utf-8\",mode=\"wb\") as fOut:\n json.dump(writeDict, fOut)\n\n return writeDict\n\ndef locationBasedOutput(bByDate, jobNm, vecAndPredictions, sNum, fBinSize, revLookup, bUseStopFilter, bc_lStopWords):\n nNonZeros = vecAndPredictions.count()\n nToTake = int(nNonZeros*sNum)\n if sNum>1.:\n nToTake=int(sNum)\n outCol = vecAndPredictions.sortBy(lambda x: x[1], False).take(nToTake)\n\n if bByDate == True:\n datesJson = {}\n for point in outCol:\n record = point[0][1]\n breakPoint = record[0].find(\"_\",9)\n offset = fBinSize/2.\n lat = float(record[0][9:breakPoint])\n if lat > 0:\n lat = lat + offset\n else:\n lat = lat - offset\n lon = float(record[0][breakPoint+1:])\n if lon > 0:\n lon = lon + offset\n else:\n lon = lon - offset\n sdate = record[0][:8]\n sdate = sdate[0:4]+\"-\"+sdate[4:6]+\"-\"+sdate[6:]\n if sdate not in datesJson.keys():\n datesJson[sdate] = {\"clusters\":[]}\n thisCluster = {\"nUnique\":5,\"background\":100, \"nTotal\":len(record[1]), \"lon\":lon, \"lat\":lat, \"date\":sdate, \"posts\":[], \"score\":point[1]}\n labeledP = point[0][0]\n tups = zip(labeledP.features.values, labeledP.features.indices)\n thisDict = set(map(lambda x: revLookup[x[1]], sorted(tups, key=lambda x: x[0], reverse=True)[:5]))\n thisCluster[\"dict\"] = list(thisDict)\n for post in record[1]:\n includePost = False\n for w in fspLib.wordBreak(post.text, bUseStopFilter, bc_lStopWords):\n if w in thisDict:\n includePost = True\n break\n if includePost:\n thisPost = {\"sco\":1,\"cap\":post.text,\"lon\":post.lon,\"lat\":post.lat,\"date\":post.dt.strftime(\"%Y-%m-%d\"),\"usr\":post.user,\"source\":post.source, \"datetime\": post.dt.strftime(\"%Y-%m-%d %H:%M:%S\")}\n thisCluster[\"posts\"].append(thisPost)\n thisCluster[\"poly\"] = [[lat+offset,lon+offset],[lat+offset,lon-offset],[lat-offset,lon-offset],[lat-offset,lon+offset]]\n datesJson[sdate][\"clusters\"].append(thisCluster)\n\n retDict = {\"type\":\"event\", \"dates\":datesJson}\n with codecs.open(\"scoreFiles/\"+jobNm, encoding=\"utf-8\",mode=\"wb\") as fOut:\n json.dump(retDict, fOut)\n return retDict\n else:\n clusterList = []\n for point in outCol:\n record = point[0][1]\n breakPoint = record[0].find(\"_\")\n offset = fBinSize/2.\n lat = float(record[0][:breakPoint])\n if lat > 0:\n lat = lat + offset\n else:\n lat = lat - offset\n lon = float(record[0][breakPoint+1:])\n if lon > 0:\n lon = lon + offset\n else:\n lon = lon - offset\n thisCluster = {\"nUnique\":5,\"background\":100, \"nTotal\":len(record[1]), \"lon\":lon, \"lat\":lat, \"posts\":[], \"score\":point[1]}\n labeledP = point[0][0]\n tups = zip(labeledP.features.values, labeledP.features.indices)\n thisDict = set(map(lambda x: revLookup[x[1]], sorted(tups, key=lambda x: x[0], reverse=True)[:5]))\n thisCluster[\"dict\"] = list(thisDict)\n nPosts = 0\n for post in record[1]:\n includePost = False\n for w in fspLib.wordBreak(post.text, bUseStopFilter, bc_lStopWords):\n if w in thisDict:\n includePost = True\n break\n if includePost:\n nPosts = nPosts + 1\n thisPost = {\"sco\":1,\"cap\":post.text,\"lon\":post.lon,\"lat\":post.lat,\"date\":post.dt.strftime(\"%Y-%m-%d\"),\"usr\":post.user,\"source\":post.source, \"datetime\": post.dt.strftime(\"%Y-%m-%d %H:%M:%S\")}\n thisCluster[\"posts\"].append(thisPost)\n if nPosts >= 100:\n break\n thisCluster[\"poly\"] = [[lat+offset,lon+offset],[lat+offset,lon-offset],[lat-offset,lon-offset],[lat-offset,lon+offset]]\n clusterList.append(thisCluster)\n\n retDict = {\"type\":\"place\", \"clusters\":clusterList}\n with codecs.open(\"scoreFiles/\"+jobNm, encoding=\"utf-8\",mode=\"wb\") as fOut:\n json.dump(retDict, fOut)\n\n return retDict\n\n\ndef locationBasedOutputV2(bByDate, jobNm, vecAndPredictions, sNum, revLookup, writeFileOutput, exempDict):\n nNonZeros = vecAndPredictions.count()\n nToTake = int(nNonZeros*sNum)\n if sNum>1.:\n nToTake=int(sNum)\n outCol = vecAndPredictions.sortBy(lambda x: x[1], False).take(nToTake)\n if bByDate:\n datesJson = {}\n for point in outCol:\n record = point[0][1]\n offset = record[4]/2.\n lat = float(record[1])\n if lat > 0:\n lat = lat + offset\n else:\n lat = lat - offset\n lon = float(record[2])\n if lon > 0:\n lon = lon + offset\n else:\n lon = lon - offset\n sdate = record[5].date().isoformat()\n if sdate not in datesJson.keys():\n datesJson[sdate] = {\"clusters\":[]}\n thisCluster = {\"nTotal\":record[3], \"lon\":lon, \"lat\":lat, \"score\":point[1]}\n labeledP = record[0]\n tups = zip(labeledP.features.values, labeledP.features.indices)\n thisDict = set(map(lambda x: revLookup[x[1]], sorted(tups, key=lambda x: x[0], reverse=True)[:25]))\n thisCluster[\"dict\"] = list(thisDict)\n thisCluster[\"poly\"] = [[lat+offset,lon+offset],[lat+offset,lon-offset],[lat-offset,lon-offset],[lat-offset,lon+offset]]\n datesJson[sdate][\"clusters\"].append(thisCluster)\n retDict = {\"type\":\"event\", \"dates\":datesJson, \"modelDict\": exempDict}\n\n else:\n clusterList = []\n for point in outCol:\n record = point[0][1]\n offset = record[4]/2.\n lat = float(record[1])\n if lat > 0:\n lat = lat + offset\n else:\n lat = lat - offset\n lon = float(record[2])\n if lon > 0:\n lon = lon + offset\n else:\n lon = lon - offset\n thisCluster = {\"nTotal\":record[3], \"lon\":lon, \"lat\":lat, \"score\":point[1]}\n labeledP = record[0]\n tups = zip(labeledP.features.values, labeledP.features.indices)\n thisDict = set(map(lambda x: revLookup[x[1]], sorted(tups, key=lambda x: x[0], reverse=True)[:25]))\n thisCluster[\"dict\"] = list(thisDict)\n thisCluster[\"poly\"] = [[lat+offset,lon+offset],[lat+offset,lon-offset],[lat-offset,lon-offset],[lat-offset,lon+offset]]\n clusterList.append(thisCluster)\n retDict = {\"type\":\"place\", \"clusters\":clusterList, \"modelDict\": exempDict}\n\n if writeFileOutput:\n with codecs.open(\"scoreFiles/\"+jobNm, encoding=\"utf-8\",mode=\"wb\") as fOut:\n json.dump(retDict, fOut)\n return retDict\n\n\n","sub_path":"geqe-ml/lib/clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":16734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154930203","text":"from PIL import Image, ImageDraw, ImageFont\n\ndef addNum(image, num):\n\timg = Image.open(image)\n\tx,y = img.size\n\tfont = ImageFont.truetype(\"Monaco.otf\", x/4)\n\tdraw = ImageDraw.Draw(img)\n\tdraw.ellipse((x*0.7,0,x,x*0.3), outline=\"rgb(255,0,0)\", fill=\"rgb(255,0,0)\")\n\tdraw.text((x*0.78,0), str(num), font=font, fill=\"rgb(255,255,255)\")\n\tdel draw\n\n\timg.show()\n\nif __name__ == '__main__':\n\taddNum(\"andy.png\", 3)","sub_path":"0000/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"509987736","text":"from mpg123 import Mpg123, Out123\nimport asyncio\nimport aiohttp\n\nmp3 = Mpg123()\n\nout = Out123()\n\nasync def radio_streaming(mp3, out):\n async with aiohttp.ClientSession() as session:\n async with session.get('http://stream.radiometal.com:8010') as response:\n while True:\n chunk = await response.content.read(4096)\n if not chunk:\n break\n mp3.feed(chunk)\n for frame in mp3.iter_frames(out.start):\n out.play(frame)\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(radio_streaming(mp3, out))\n","sub_path":"examples/radio_aiohttp.py","file_name":"radio_aiohttp.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"233570755","text":"# Copyright 2022 Huawei Technologies Co., Ltd\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ============================================================================\n\"\"\"\nGPT-2 finetune and evaluation script for Language Modeling task.\n\"\"\"\nimport argparse\nimport math\nimport os\nimport time\nimport numpy as np\nfrom easydict import EasyDict as edict\n\nimport moxing\nimport mindspore.common.dtype as mstype\nfrom mindspore import context, Tensor, load_checkpoint, export\nfrom mindspore.nn.wrap.loss_scale import DynamicLossScaleUpdateCell\nfrom mindspore.nn import AdamWeightDecay, Lamb, Momentum\nfrom mindspore.train.model import Model\nfrom mindspore.train.callback import CheckpointConfig, ModelCheckpoint, TimeMonitor, LossMonitor\nfrom mindspore.train.serialization import load_param_into_net\n\nfrom src.gpt2_for_finetune import GPT2FinetuneCell, GPT2LM\nfrom src.utils.lr_schedule import GPT2LearningRate\nfrom src.dataset import create_language_model_dataset\nfrom src.utils.get_config_setting import get_train_setting, get_model_setting\nfrom src.GPT2_model import GPT2Config\n\ndef get_config(size_gpt2):\n '''\n GPT-2 finetune config and GPT-2 model config\n Args:\n size_gpt2: The size of gpt2 model.\n return:\n cfg: The gpt2 config.\n gpt2_net_cfg: The gpt2 network config.\n '''\n cfg = edict({\n 'gpt2_network': 'large',\n 'optimizer': 'Lamb',\n 'AdamWeightDecay': edict({\n 'learning_rate': 5e-5,\n 'end_learning_rate': 1e-7,\n 'power': 1.0,\n 'weight_decay': 0.01,\n 'decay_filter': lambda x: 'layernorm' not in x.name.lower() and 'bias' not in x.name.lower(),\n 'eps': 1e-6,\n }),\n 'Lamb': edict({\n 'learning_rate': 2e-5,\n 'end_learning_rate': 1e-7,\n 'power': 1.0,\n 'weight_decay': 0.01,\n 'decay_filter': lambda x: 'layernorm' not in x.name.lower() and 'bias' not in x.name.lower(),\n }),\n 'Momentum': edict({\n 'learning_rate': 2e-5,\n 'momentum': 0.9,\n }),\n })\n\n cfg.gpt2_network = size_gpt2\n\n if cfg.gpt2_network == 'small':\n gpt2_net_cfg = GPT2Config(\n batch_size=1,\n seq_length=1024,\n vocab_size=50257,\n d_model=768,\n num_hidden_layers=12,\n num_attention_heads=12,\n intermediate_size=3072,\n hidden_act=\"gelu\",\n hidden_dropout=0.1,\n attention_dropout=0.1,\n max_position_embeddings=1024,\n initializer_range=0.02,\n input_mask_from_dataset=True,\n summary_first_dropout=0.1,\n dtype=mstype.float32,\n compute_type=mstype.float16,\n )\n if cfg.gpt2_network == 'medium':\n gpt2_net_cfg = GPT2Config(\n batch_size=1,\n seq_length=1024,\n vocab_size=50257,\n d_model=1024,\n num_hidden_layers=24,\n num_attention_heads=16,\n intermediate_size=4096,\n hidden_act=\"gelu\",\n hidden_dropout=0.1,\n attention_dropout=0.1,\n max_position_embeddings=1024,\n initializer_range=0.02,\n input_mask_from_dataset=True,\n summary_first_dropout=0.1,\n dtype=mstype.float32,\n compute_type=mstype.float16,\n )\n if cfg.gpt2_network == 'large':\n gpt2_net_cfg = GPT2Config(\n batch_size=4,\n seq_length=1024,\n vocab_size=50257,\n d_model=1280,\n num_hidden_layers=36,\n num_attention_heads=20,\n intermediate_size=5120,\n hidden_act=\"gelu\",\n hidden_dropout=0.1,\n attention_dropout=0.1,\n max_position_embeddings=1024,\n initializer_range=0.02,\n input_mask_from_dataset=True,\n summary_first_dropout=0.1,\n dtype=mstype.float32,\n compute_type=mstype.float16,\n )\n\n return cfg, gpt2_net_cfg\n\ndef _get_last_ckpt(ckpt_dir):\n '''\n from ckpt path get ckpt name\n '''\n ckpt_files = [ckpt_file for ckpt_file in os.listdir(ckpt_dir)\n if ckpt_file.endswith('.ckpt')]\n if not ckpt_files:\n print(\"No ckpt file found.\")\n return None\n\n return os.path.join(ckpt_dir, sorted(ckpt_files)[-1])\n\ndef do_export(load_ckpt_path, save_air_path, gpt2_net_cfg):\n '''\n frozen to air\n '''\n out_url = \"/cache/output/\"\n if not os.path.exists(out_url):\n print('the problem is out_url here')\n os.makedirs(out_url, exist_ok=True)\n\n Load_checkpoint_path = _get_last_ckpt(load_ckpt_path)\n\n net = GPT2LM(config=gpt2_net_cfg,\n is_training=False,\n use_one_hot_embeddings=False)\n\n print(Load_checkpoint_path)\n load_checkpoint(Load_checkpoint_path, net=net)\n\n net.set_train(False)\n\n input_ids = Tensor(np.zeros([gpt2_net_cfg.batch_size, gpt2_net_cfg.seq_length]), mstype.int32)\n input_mask = Tensor(np.zeros([gpt2_net_cfg.batch_size, gpt2_net_cfg.seq_length]), mstype.int32)\n label_ids = Tensor(np.zeros([gpt2_net_cfg.batch_size, gpt2_net_cfg.seq_length]), mstype.int32)\n input_data = [input_ids, input_mask, label_ids]\n print(\"==================== Start exporting ==================\")\n print(\" | Ckpt path: {}\".format(Load_checkpoint_path))\n print(\" | Air path: {}\".format(save_air_path))\n export(net, *input_data, file_name=out_url+'gpt2', file_format=\"AIR\")\n moxing.file.copy_parallel(out_url, save_air_path)\n print(\"==================== Exporting finished ==================\")\n\ndef do_train(dataset=None,\n network=None,\n load_checkpoint_path=\"\",\n save_checkpoint_path=\"\",\n epoch_num=1,\n cfg=None,\n gpt2_net_cfg=None):\n \"\"\"\n Do train\n Args:\n dataset: the train dataset.\n network: the network with loss\n load_checkpoint_path: the file path which saved pretrained model checkpoint.\n save_checkpoint_path: the file path which will save finetuned model checkpoint.\n epoch_num: the number of epoch.\n cfg: The gpt2 config.\n gpt2_net_cfg: The gpt2 network config.\n \"\"\"\n if load_checkpoint_path == \"\":\n raise ValueError(\"Pretrain model missed, finetune task must load pretrain model!\")\n\n steps_per_epoch = dataset.get_dataset_size()\n\n # optimizer\n if cfg.optimizer == 'AdamWeightDecay':\n lr_schedule = GPT2LearningRate(learning_rate=cfg.AdamWeightDecay.learning_rate,\n end_learning_rate=cfg.AdamWeightDecay.end_learning_rate,\n warmup_steps=int(steps_per_epoch * epoch_num * 0.1),\n decay_steps=steps_per_epoch * epoch_num,\n power=cfg.AdamWeightDecay.power)\n params = network.trainable_params()\n\n decay_params = list(filter(cfg.AdamWeightDecay.decay_filter, params))\n other_params = list(filter(lambda x: not cfg.AdamWeightDecay.decay_filter(x), params))\n group_params = [{'params': decay_params, 'weight_decay': cfg.AdamWeightDecay.weight_decay},\n {'params': other_params, 'weight_decay': 0.0}]\n optimizer = AdamWeightDecay(group_params, lr_schedule, eps=cfg.AdamWeightDecay.eps)\n elif cfg.optimizer == 'Lamb':\n lr_schedule = GPT2LearningRate(learning_rate=cfg.Lamb.learning_rate,\n end_learning_rate=cfg.Lamb.end_learning_rate,\n warmup_steps=int(steps_per_epoch * epoch_num * 0.1),\n decay_steps=steps_per_epoch * epoch_num,\n power=cfg.Lamb.power)\n optimizer = Lamb(network.trainable_params(), lr_schedule)\n elif cfg.optimizer == 'Momentum':\n optimizer = Momentum(network.trainable_params(), cfg.Momentum.learning_rate, cfg.Momentum.momentum)\n else:\n raise Exception(\"Optimizer not supported. support: [AdamWeightDecay, Lamb, Momentum]\")\n\n # load checkpoint into network\n ckpt_config = CheckpointConfig(save_checkpoint_steps=steps_per_epoch, keep_checkpoint_max=1)\n prefix_name = \"gpt2_language_model_\" + str(cfg.gpt2_network) + \"_\" + str(cfg.optimizer) + \"_\" \\\n + str(epoch_num) + \"_bs\" + str(gpt2_net_cfg.batch_size)\n ckpoint_cb = ModelCheckpoint(prefix=prefix_name,\n directory=None if save_checkpoint_path == \"\" else save_checkpoint_path,\n config=ckpt_config)\n param_dict = load_checkpoint(load_checkpoint_path)\n\n final_param_dict = {}\n for name, _ in param_dict.items():\n final_param_dict['gpt2.gpt2.' + name] = param_dict[name]\n final_param_dict['gpt2.dense1.weight'] = param_dict['gpt2_embedding_lookup.embedding_table']\n\n load_param_into_net(network, final_param_dict)\n print(\"Load pretrained parameter successfully!\\n\")\n\n update_cell = DynamicLossScaleUpdateCell(loss_scale_value=2 ** 32, scale_factor=2, scale_window=1000)\n netwithgrads = GPT2FinetuneCell(network, optimizer=optimizer, scale_update_cell=update_cell)\n netwithgrads.set_train(True)\n\n loss_cb = LossMonitor(per_print_times=1)\n model = Model(netwithgrads)\n callbacks = [TimeMonitor(dataset.get_dataset_size()), loss_cb, ckpoint_cb]\n\n print(\"==================== Starting Finetuning ====================\")\n model.train(epoch_num, dataset, callbacks=callbacks, dataset_sink_mode=False)\n print(\"==================== Finetuning Success ====================\")\n\n print(\"==================== Starting Exporting ====================\")\n do_export(save_checkpoint_path, save_checkpoint_path, gpt2_net_cfg)\n print(\"==================== Exporting Success ====================\")\n\ndef do_eval(dataset=None, network=None, metric=None, load_checkpoint_path=\"\", eval_type=None, gpt2_net_cfg=None):\n \"\"\"\n Do eval\n Args:\n dataset: the eval dataset.\n network: the network with loss.\n metric: the evaluation method.\n load_checkpoint_path: the file path which saved finetuned model checkpoint.\n eval_type: option for \"zero-shot\" or \"finetuned\"\n gpt2_net_cfg: The gpt2 network config.\n \"\"\"\n if load_checkpoint_path == \"\":\n raise ValueError(\"Finetune model missed, evaluation task must load finetune model!\")\n\n if metric.lower() == \"ppl\":\n print(\"Prepare to calculate the ppl score ...\")\n gpt2_loss = network(config=gpt2_net_cfg,\n is_training=True,\n use_one_hot_embeddings=False)\n gpt2_loss.set_train(False)\n param_dict = load_checkpoint(load_checkpoint_path)\n\n if eval_type == \"zero-shot\":\n final_param_dict = {}\n for name, _ in param_dict.items():\n final_param_dict['gpt2.gpt2.' + name] = param_dict[name]\n final_param_dict['gpt2.dense1.weight'] = param_dict['gpt2_embedding_lookup.embedding_table']\n load_param_into_net(gpt2_loss, final_param_dict)\n print(\"load pretrained parameter successfully!\\n\")\n elif eval_type == \"finetuned\":\n load_param_into_net(gpt2_loss, param_dict)\n print(\"load finetuned parameter successfully!\\n\")\n else:\n raise ValueError(\"Evaluation type missed, eval_type should be [zero-shot, finetuned]\")\n\n model = Model(gpt2_loss)\n columns_list = [\"input_ids\", \"input_mask\", \"label_ids\"]\n print(\"==================== [PPL] Testing ====================\")\n num_data = 1\n total_loss = 0.0\n avg_loss = 0.0\n for data in dataset.create_dict_iterator():\n input_data = []\n for i in columns_list:\n input_data.append(data[i])\n input_ids, input_mask, label_ids = input_data\n loss = model.predict(input_ids, input_mask, label_ids)\n loss = float(loss.asnumpy())\n total_loss += loss\n avg_loss = float(total_loss / num_data)\n print(\" | Current Loss: {:.6f}\".format(avg_loss))\n print(\" | Current PPL: {}\\n\\n\".format(math.exp(avg_loss)))\n num_data += 1\n\n print(\"\\n\\n\")\n print(\"**************************************************************\")\n print(\"Average Loss: {:.6f}\".format(avg_loss))\n print(\"Average PPL: {:.6f}\".format(math.exp(avg_loss)))\n print(\"********************** Testing Finished **********************\")\n else:\n raise ValueError(\"metric method not supported, support: [ppl]\")\n\n\ndef run_languagemodel():\n \"\"\"\n run Language Modeling task\n \"\"\"\n parser = argparse.ArgumentParser(description=\"Finetune and Evaluate language modelings task\")\n parser.add_argument(\"--device_target\", type=str, default=\"Ascend\",\n help=\"Device type. Default: Ascend.\")\n parser.add_argument(\"--device_id\", type=int, default=0,\n help=\"ID of target device. \")\n parser.add_argument(\"--metric_method\", type=str, default=\"PPL\",\n help=\"The eval method including [PPL]. Default: PPL.\")\n parser.add_argument(\"--do_train\", type=str, default=\"true\",\n help=\"Enable train. Default: false.\")\n parser.add_argument(\"--do_eval\", type=str, default=\"false\",\n help=\"Enable evaluation. Default: true.\")\n parser.add_argument(\"--eval_type\", type=str, default=\"finetuned\",\n help=\"The type of evaluation including [zero-shot, finetuned]. Default: zero-shot.\")\n parser.add_argument(\"--epoch_num\", type=int, default=1,\n help=\"Epoch number. Default: 1.\")\n parser.add_argument(\"--train_data_shuffle\", type=str, default=\"true\",\n help=\"Enable train data shuffle. Default: true.\")\n parser.add_argument(\"--eval_data_shuffle\", type=str, default=\"false\",\n help=\"Enable eval data shuffle. Default: false.\")\n parser.add_argument(\"--save_finetune_ckpt_path\", type=str, default=\"\",\n help=\"Save the finetuned checkpoint path.\")\n parser.add_argument(\"--load_pretrain_ckpt_path\", type=str, default=\"\",\n help=\"Load the checkpoint file path for train.\")\n parser.add_argument(\"--load_finetune_ckpt_path\", type=str, default=\"\",\n help=\"Load the checkpoint file path for evaluation.\")\n parser.add_argument(\"--train_data_file_path\", type=str, default=\"\",\n help=\"Data path, it is better to use absolute path\")\n parser.add_argument(\"--eval_data_file_path\", type=str, default=\"\",\n help=\"Data path, it is better to use absolute path\")\n parser.add_argument(\"--mindrecord_name\", type=str, default=\"\",\n help=\"The name of the mindrecord.\")\n parser.add_argument(\"--ckpt_name\", type=str, default=\"\",\n help=\"The name of the ckpt.\")\n parser.add_argument(\"--size_of_gpt2_network\", type=str, default=\"small\",\n help=\"The type of size including [small, medium, large]. Default: small\")\n args_opt = parser.parse_args()\n\n epoch_num = args_opt.epoch_num\n metric = args_opt.metric_method\n train_data_file_path = os.path.realpath(args_opt.train_data_file_path)\n save_finetune_ckpt_path = os.path.realpath(args_opt.save_finetune_ckpt_path)\n load_finetune_ckpt_path = os.path.realpath(args_opt.load_finetune_ckpt_path)\n load_pretrain_ckpt_path = os.path.realpath(args_opt.load_pretrain_ckpt_path)\n\n if args_opt.do_train.lower() == \"false\" and args_opt.do_eval.lower() == \"false\":\n raise ValueError(\"At least one of 'do_train' or 'do_eval' must be true\")\n if args_opt.do_train.lower() == \"true\" and args_opt.train_data_file_path == \"\":\n raise ValueError(\"'train_data_file_path' must be set when do finetune task\")\n if args_opt.do_eval.lower() == \"true\" and args_opt.eval_data_file_path == \"\":\n raise ValueError(\"'eval_data_file_path' must be set when do evaluation task\")\n\n if args_opt.size_of_gpt2_network in [\"small\", \"medium\", \"large\"]:\n config, gpt2_net_config = get_config(args_opt.size_of_gpt2_network)\n print(config.gpt2_network)\n else:\n raise Exception(\"Size not supported. support: [small, medium, large]\")\n\n device_target = args_opt.device_target\n if device_target == \"Ascend\":\n context.set_context(mode=context.GRAPH_MODE,\n device_target=device_target,\n device_id=args_opt.device_id,\n max_call_depth=3000)\n context.set_auto_parallel_context(parallel_mode=\"stand_alone\")\n print(\" | Device: {} | Device id: {}\".format(device_target, args_opt.device_id))\n else:\n raise Exception(\"Device target error, Ascend is supported.\")\n\n gpt2_loss = GPT2LM(config=gpt2_net_config,\n is_training=True,\n use_one_hot_embeddings=False)\n\n dst_url = '/cache/dataset'\n mod_url = '/cache/module'\n if not os.path.exists(dst_url):\n os.makedirs(dst_url, exist_ok=True)\n if not os.path.exists(mod_url):\n os.makedirs(mod_url, exist_ok=True)\n\n if args_opt.do_train.lower() == \"true\":\n get_train_setting(config)\n get_model_setting(config, gpt2_net_config)\n print(\"==================== Start Loading Train Dataset ==================\")\n print(\" | Train Dataset: {}\".format(train_data_file_path))\n print(\" | Checkpoint: {}\".format(load_pretrain_ckpt_path))\n moxing.file.copy_parallel(train_data_file_path, dst_url)\n moxing.file.copy_parallel(load_pretrain_ckpt_path, mod_url)\n dataset_path = os.path.join(dst_url, args_opt.mindrecord_name)\n train_dataset = create_language_model_dataset(do_shuffle=(args_opt.train_data_shuffle.lower() == \"true\"),\n dataset_path=dataset_path)\n pretrain_ckpt_path = os.path.join(mod_url, args_opt.ckpt_name)\n do_train(train_dataset, gpt2_loss, pretrain_ckpt_path, save_finetune_ckpt_path,\n epoch_num, config, gpt2_net_config)\n\n if args_opt.do_eval.lower() == \"true\":\n get_model_setting(config, gpt2_net_config)\n print(\"==================== Start Loading Evaluation Dataset ==================\")\n print(\" | Eval Dataset: {}\".format(args_opt.eval_data_file_path))\n print(\" | Checkpoint: {}\".format(load_finetune_ckpt_path))\n moxing.file.copy_parallel(args_opt.eval_data_file_path, dst_url)\n moxing.file.copy_parallel(load_finetune_ckpt_path, mod_url)\n dataset_path = os.path.join(dst_url, args_opt.mindrecord_name)\n eval_dataset = create_language_model_dataset(do_shuffle=(args_opt.train_data_shuffle.lower() == \"true\"),\n dataset_path=dataset_path)\n finetune_ckpt_path = os.path.join(mod_url, args_opt.ckpt_name)\n do_eval(eval_dataset, GPT2LM, metric, finetune_ckpt_path, args_opt.eval_type, gpt2_net_config)\n\nif __name__ == \"__main__\":\n print(\"Start Time: \\n\", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n run_languagemodel()\n print(\"End Time: \\n\", time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime()))\n","sub_path":"research/nlp/gpt2/modelarts/start_train_language_model.py","file_name":"start_train_language_model.py","file_ext":"py","file_size_in_byte":19979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"566364130","text":"import torch\nimport numpy as np\nimport os\nimport cv2\nfrom tensorboardX import SummaryWriter\nimport shutil\nimport pandas as pd\n\nclass DeepFeatures(torch.nn.Module):\n '''\n This class extracts, reads, and writes data embeddings using a pretrained deep neural network. Meant to work with\n Tensorboard's Embedding Viewer (https://www.tensorflow.org/tensorboard/tensorboard_projector_plugin).\n When using with a 3 channel image input and a pretrained model from torchvision.models please use the\n following pre-processing pipeline:\n\n transforms.Compose([transforms.Resize(imsize),\n transforms.ToTensor(),\n transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])]) ## As per torchvision docs\n\n Args:\n model (nn.Module): A Pytorch model that returns an (B,1) embedding for a length B batched input\n imgs_folder (str): The folder path where the input data elements should be written to\n embs_folder (str): The folder path where the output embeddings should be written to\n tensorboard_folder (str): The folder path where the resulting Tensorboard log should be written to\n experiment_name (str): The name of the experiment to use as the log name\n\n\n '''\n\n def __init__(self, model,\n imgs_folder,\n embs_folder,\n tensorboard_folder,\n experiment_name=None):\n\n super(DeepFeatures, self).__init__()\n\n self.step = 0\n\n self.model = model\n self.model.eval()\n\n self.imgs_folder = imgs_folder\n self.embs_folder = embs_folder\n self.tensorboard_folder = tensorboard_folder\n\n self.name = experiment_name\n\n self.writer = None\n\n if len(os.listdir(self.imgs_folder)) != 0:\n \"Images folder must be empty\"\n clear_folder(self.imgs_folder)\n\n\n if len(os.listdir(self.embs_folder)) != 0:\n \"Embeddings folder must be empty\"\n clear_folder(self.embs_folder)\n\n self.labels_to_folder, self.folder_to_labels = self.convert_to_dict(pd.read_csv(\"../labelling_images/labelled.csv\"))\n\n\n def generate_embeddings(self, x):\n '''\n Generate embeddings for an input batched tensor\n\n Args:\n x (torch.Tensor) : A batched pytorch tensor\n\n Returns:\n (torch.Tensor): The output of self.model against x\n '''\n return (self.model(x))\n\n def write_embeddings(self, x,labels, outsize=(28, 28)):\n '''\n Generate embeddings for an input batched tensor and write inputs and\n embeddings to self.imgs_folder and self.embs_folder respectively.\n\n Inputs and outputs will be stored in .npy format with randomly generated\n matching filenames for retrieval\n\n Args:\n x (torch.Tensor) : An input batched tensor that can be consumed by self.model\n outsize (tuple(int, int)) : A tuple indicating the size that input data arrays should be\n written out to\n\n Returns:\n (bool) : True if writing was succesful\n\n '''\n\n\n\n # Generate embeddings\n embs = self.generate_embeddings(x)\n\n # Detach from graph\n embs = embs.detach().cpu().numpy()\n\n # Start writing to output folders\n for i in range(len(embs)):\n key = labels[i]\n np.save(self.imgs_folder + r\"/\" + key + '.npy', tensor2np(x[i], outsize))\n np.save(self.embs_folder + r\"/\" + key + '.npy', embs[i])\n\n return (True)\n\n def _create_writer(self, name):\n '''\n Create a TensorboardX writer object given an experiment name and assigns it to self.writer\n\n Args:\n name (str): Optional, an experiment name for the writer, defaults to self.name\n\n Returns:\n (bool): True if writer was created succesfully\n\n '''\n\n if self.name is None:\n name = 'Experiment_' + str(np.random.random())\n else:\n name = self.name\n\n dir_name = os.path.join(self.tensorboard_folder,\n name)\n\n if not os.path.exists(dir_name):\n os.mkdir(dir_name)\n\n else:\n print(\"Warning: logfile already exists\")\n print(\"logging directory: \" + str(dir_name))\n\n logdir = dir_name\n self.writer = SummaryWriter(logdir=logdir)\n return (True)\n\n def create_tensorboard_log(self):\n\n '''\n Write all images and embeddings from imgs_folder and embs_folder into a tensorboard log\n '''\n\n if self.writer is None:\n self._create_writer(self.name)\n\n\n\n ## Read in\n all_embeddings = [np.load(os.path.join(self.embs_folder, p)) for p in os.listdir(self.embs_folder) if\n p.endswith('.npy')]\n all_images = [np.load(os.path.join(self.imgs_folder, p)) for p in os.listdir(self.imgs_folder) if\n p.endswith('.npy')]\n all_images = [np.moveaxis(a, 2, 0) for a in all_images] # (HWC) -> (CHW)\n all_names = [os.path.join(self.imgs_folder, p) for p in os.listdir(self.imgs_folder) if\n p.endswith('.npy')]\n all_names = [os.path.basename(os.path.normpath(path)).replace(\".npy\", \"\").split(\"_\")[0] for path in all_names]\n ## Stack into tensors\n all_embeddings = torch.Tensor(all_embeddings)\n all_images = torch.Tensor(all_images)\n all_names_with_labels = [(name, self.folder_to_labels[name]) for name in all_names]\n print(all_embeddings.shape)\n print(all_images.shape)\n self.writer.add_embedding(all_embeddings,metadata = all_names_with_labels, label_img=all_images, metadata_header = [\"Folder\",\"Label\"])\n\n def convert_to_dict(self, labelled_df):\n label_to_folder = {}\n folder_to_label = {}\n for index, row in labelled_df.iterrows():\n label, folder = row[\"label\"], str(row[\"folder\"])\n if not (label in label_to_folder.keys()):\n label_to_folder[label] = []\n\n label_to_folder[label].append(folder)\n folder_to_label[folder] = label\n\n return label_to_folder, folder_to_label\n\n\ndef tensor2np(tensor, resize_to=None):\n '''\n Convert an image tensor to a numpy image array and resize\n\n Args:\n tensor (torch.Tensor): The input tensor that should be converted\n resize_to (tuple(int, int)): The desired output size of the array\n\n Returns:\n (np.ndarray): The input tensor converted to a channel last resized array\n '''\n\n out_array = tensor.detach().cpu().numpy()\n out_array = np.moveaxis(out_array, 0, 2) # (CHW) -> (HWC)\n\n if resize_to is not None:\n out_array = cv2.resize(out_array, dsize=resize_to, interpolation=cv2.INTER_CUBIC)\n\n return (out_array)\n\n\n\n\ndef clear_folder(folder):\n for file in os.listdir(folder):\n file_path = os.path.join(folder, file)\n try:\n if os.path.isfile(file_path) or os.path.islink(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print('Failed to delete %s. Reason: %s' % (file_path, e))","sub_path":"Dataset_CNN/ProjectorObject.py","file_name":"ProjectorObject.py","file_ext":"py","file_size_in_byte":7324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"256211885","text":"\"\"\"Common helpers, used in both web API and CLI applications.\"\"\"\n\nimport json\n\nimport requests\nfrom currency import currency\n\n\ndef calculate_amount(amount, input_currency, output_currency):\n \"\"\"\n Calculate the equivalent amount of foreign currency.\n :param float amount: Amount to convert\n :param str input_currency: ISO currency code\n :param str output_currency: ISO currency code\n :rtype: float\n :return: Equivalent amount\n \"\"\"\n return amount * get_rate(input_currency, output_currency)\n\n\ndef get_rate(input_currency, output_currency):\n \"\"\"\n Return specific exchange rate.\n :param str input_currency: ISO currency code\n :param str output_currency: ISO currency code\n :rtype: float\n :return: Rate\n \"\"\"\n rates = get_rates(input_currency) # Get all exchange rates against given currency\n\n return rates[output_currency] # Return specific exchange rate\n\n\ndef get_rates(input_currency):\n \"\"\"\n Return a dictionary of exchange rates against given currency available from https://exchangeratesapi.io.\n :param str input_currency: ISO currency code\n :rtype: dict\n :return: Dictionary of rates\n \"\"\"\n base_url = \"https://api.exchangeratesapi.io/latest?base=\"\n url = \"\".join([base_url, input_currency])\n\n r = requests.get(url)\n payload = r.json()\n\n return payload[\"rates\"]\n\n\ndef set_symbols():\n \"\"\"\n Return a dictionary of currency symbols available from external python library. (Symbol: ISO currency code).\n :param list curr: List of ISO currency codes\n :rtype: dict\n :return: Dictionary of currency symbols\n \"\"\"\n curr = get_currencies()\n symbols = {}\n for item in curr:\n symbols[item] = currency.symbol(item)\n s = inv_dct(symbols)\n return s\n\n\ndef get_currencies():\n \"\"\"\n Return a list of ISO currency codes.\n :rtype: list\n :return: List\n \"\"\"\n curr = []\n for key in get_rates(\"EUR\"):\n curr.append(key)\n curr.append(\"EUR\")\n curr.sort()\n return curr\n\n\ndef inv_dct(dct):\n \"\"\"\n Return an inverted dictionary. For example, from (ISO currency code: symbol) to (Symbol: ISO currency code).\n :param dict dct: Dictionary\n :rtype: dict\n :return: Inverted dictionary\n \"\"\"\n d = {}\n for key, value in dct.items():\n if value in d.keys():\n d[value] = \", \".join([d[value], key])\n else:\n d[value] = key\n return d\n\n\ndef convert(amount, input_currency, output_currency=\"ALL\"):\n \"\"\"\n Currency converter\n :param float amount:\n :param str input_currency:\n :param str output_currency:\n :rtype: dict\n :return: Dictionary\n \"\"\"\n\n all_symbols = set_symbols()\n\n if input_currency in all_symbols.keys():\n if \", \" in all_symbols[input_currency]:\n return (\n \"The input currency symbol is not unique. Please specify the ISO code.\"\n )\n else:\n input_currency = all_symbols[input_currency]\n\n if output_currency in all_symbols.keys():\n if \", \" in all_symbols[output_currency]:\n return (\n \"The output currency symbol is not unique. Please specify the ISO code.\"\n )\n else:\n output_currency = all_symbols[output_currency]\n\n results = {\"input\": {\"amount\": amount, \"currency\": input_currency}, \"output\": {}}\n\n if output_currency == \"ALL\":\n for currency in get_rates(input_currency):\n results[\"output\"][currency] = calculate_amount(\n amount, input_currency, currency\n )\n else:\n results[\"output\"][output_currency] = calculate_amount(\n amount, input_currency, output_currency\n )\n\n return results\n\n\ndef currencies_to_file():\n \"\"\"\n Write a dictionary of currency symbols to a file currencies.txt\n :rtype: NoneType\n :return: None\n \"\"\"\n with open(\"currencies.txt\", \"w+\") as file:\n json.dump(set_symbols(), file, ensure_ascii=False, indent=2)\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"170455291","text":"import pygame\nfrom pygame.locals import *\nfrom Constantes import *\nfrom random import *\n\nclass loadMap:\n \"\"\"Classes qui permet de faire charger la map, ses Rects, les déplacements\"\"\"\n\n def __init__(self, fichierMap):\n self.fichierMap = fichierMap\n self.map = 0\n self.x = -1000\n self.y = -1000\n\n def recup(self):\n #Récupère la map à partir du fichier .txt\n self.fichierMap = open('MAP.txt','r')\n for i in range(len(map)):\n tab = self.fichierMap.readline().split('\\t')\n map[i] = tab\n for j in range(len(map)):\n map[i][j] = int(map[i][j])\n self.fichierMap.close()\n\n def loadTexture(self):\n #Charge toutes les textures de la map\n for i in range(nombreTexture):\n textureText = ['textures/',str(i),'.png']\n textureText = ''.join(textureText)\n mapTexture[i] = pygame.image.load(textureText).convert_alpha()\n\n def afficher(self, L, C):\n #Affiche la map autour du joueur\n for ligne in range(L-3, L+4):\n for colonne in range(C-3, C+4):\n for k in range(len(mapTexture)):\n if map[ligne][colonne] == k:\n fenetre.blit(mapTexture[k], (200 * colonne + self.x, 200 * ligne + self.y))\n\n def move(self, direction, perso):\n #Déplace la map en fonction de nos déplacements\n if direction == 'droite':\n if perso.inCar is True:\n perso.acceleration(direction)\n #perso.vitesse(self)\n else:\n self.x -= perso.speed\n if direction == 'gauche':\n if perso.inCar is True:\n perso.acceleration(direction)\n #perso.vitesse(self)\n else:\n self.x += perso.speed\n if direction == 'haut':\n if perso.inCar is True:\n perso.acceleration(direction)\n #perso.vitesse(self)\n else:\n self.y += perso.speed\n if direction == 'bas':\n if perso.inCar is True:\n perso.acceleration(direction)\n #perso.vitesse(self)\n else:\n self.y -= perso.speed\n\n def loadRoad(self, L, C):\n #Charge tous les Rects de notre map\n roadTrottoirRect = []\n for ligne in range(L-3, L+4):\n for colonne in range(C-3, C+4):\n map[ligne][colonne] = int(map[ligne][colonne])\n if 0 <= map[ligne][colonne] <= 21:\n roadTrottoirRect.append(pygame.Rect((200 * colonne + self.x), (200 * ligne + self.y), 200, 200))\n return roadTrottoirRect\n\n def onTheRoad(self, route, direction, perso):\n #Vérifie les collisions de notre personnage avec l'environnement\n for i in range(len(route)):\n if direction == 'droite':\n point = perso.rect.midright\n elif direction == 'gauche':\n point = perso.rect.midleft\n elif direction == 'haut':\n point = perso.rect.midtop\n elif direction == 'bas':\n point = perso.rect.midbottom\n if route[i].collidepoint(point):\n return True\n perso.speedX, perso.speedY = 0, 0\n\n def collisionCar(self, direction, perso, carPNJList):\n #On ne peut pas marcher sur les voitures PNJ\n if len(carPNJList) >= 1:\n for car in carPNJList:\n i = carPNJList.index(car)\n if direction == 'droite':\n point = perso.rect.midright\n point1 = perso.rect.topright\n point2 = perso.rect.bottomright\n elif direction == 'gauche':\n point = perso.rect.midleft\n point1 = perso.rect.topleft\n point2 = perso.rect.bottomleft\n elif direction == 'haut':\n point = perso.rect.midtop\n point1 = perso.rect.topright\n point2 = perso.rect.topleft\n elif direction == 'bas':\n point = perso.rect.midbottom\n point1 = perso.rect.bottomleft\n point2 = perso.rect.bottomright\n if perso.inCar is False:\n if car.rect.collidepoint(point):\n perso.carModel = car.model\n perso.i = i\n perso.carLife = car.life\n return False\n else:\n if car.rect.collidepoint(point) or car.rect.collidepoint(point1) or car.rect.collidepoint(point2):\n if not (-10 < perso.speedX < 10 and -10 < perso.speedY < 10):\n car.life -= 1\n perso.speedX, perso.speedY = 0, 0\n return False\n return True\n\n def collisionPNJ(self, direction, perso, PNJList):\n #On ne peut pas marcher sur les PNJ\n if len(PNJList) >= 1:\n for i in range(len(PNJList)):\n if direction == 'droite':\n point = (perso.rect.right - 15, perso.rect.centery)\n point1 = (perso.rect.right - 15, perso.rect.top)\n point2 = (perso.rect.right - 15, perso.rect.bottom)\n elif direction == 'gauche':\n point = (perso.rect.left + 15, perso.rect.centery)\n point1 = (perso.rect.left + 15, perso.rect.top)\n point2 = (perso.rect.left + 15, perso.rect.bottom)\n elif direction == 'haut':\n point = (perso.rect.centerx, perso.rect.top + 15)\n point1 = (perso.rect.right, perso.rect.top + 15)\n point2 = (perso.rect.left, perso.rect.top + 15)\n elif direction == 'bas':\n point = (perso.rect.centerx, perso.rect.bottom - 15)\n point1 = (perso.rect.right, perso.rect.bottom - 15)\n point2 = (perso.rect.left, perso.rect.bottom - 15)\n if perso.inCar is False:\n if PNJList[i].rect.collidepoint(point):\n perso.cible = i\n return False\n else:\n if PNJList[i].rect.colliderect(perso.rect):# or PNJList[i].rect.collidepoint(point1) or PNJList[i].rect.collidepoint(point2):\n print(PNJList[i])\n PNJList[i].life -= 4\n return True\n\n def collisionCarObj(self, direction, perso, carObjList):\n #On ne peut pas marcher sur les voitures sans conducteur\n if len(carObjList) >= 1:\n for i in range(len(carObjList)):\n if direction == 'droite':\n point = perso.rect.midright\n point1 = perso.rect.topright\n point2 = perso.rect.bottomright\n elif direction == 'gauche':\n point = perso.rect.midleft\n point1 = perso.rect.topleft\n point2 = perso.rect.bottomleft\n elif direction == 'haut':\n point = perso.rect.midtop\n point1 = perso.rect.topright\n point2 = perso.rect.topleft\n elif direction == 'bas':\n point = perso.rect.midbottom\n point1 = perso.rect.bottomleft\n point2 = perso.rect.bottomright\n if perso.inCar is False:\n if carObjList[i].rect.collidepoint(point):\n perso.carModel = carObjList[i].model\n perso.i = i\n perso.carLife = carObjList[i].life\n return False\n else:\n if carObjList[i].rect.collidepoint(point) or carObjList[i].rect.collidepoint(point1) or carObjList[i].rect.collidepoint(point2):\n if not (-10 < perso.speedX < 10 and -10 < perso.speedY < 10):\n carObjList[i].life -= 1\n perso.speedX, perso.speedY = 0, 0\n return False\n return True\n\n def collision(self, direction, perso, carPNJList, PNJList, carObjList):\n if self.onTheRoad(self.loadRoad(perso.ligne, perso.colonne), direction, perso):\n if self.collisionCar(direction, perso, carPNJList) is True:\n if self.collisionPNJ(direction, perso, PNJList) is True:\n if self.collisionCarObj(direction, perso, carObjList) is True:\n return True\n","sub_path":"loadMap.py","file_name":"loadMap.py","file_ext":"py","file_size_in_byte":8686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"313101626","text":"import os\r\nimport pandas as pd\r\ndef search(path):\r\n order = []\r\n parents = os.listdir(path)\r\n for parent in parents: # 返回指定路径下所有文件和文件夹的名字,并存放于一个列表中\r\n child = os.path.join(path,parent)\r\n if os.path.isdir(child): # 将多个路径组合后返回\r\n search(child)\r\n elif os.path.isfile(child): # 如果是目录,则继续遍历子目录的文件\r\n if os.path.splitext(child)[1] == '.xlsx': # 分割文件名和文件扩展名,并且扩展名为'xlsx'\r\n d = pd.read_excel(child,dtype=str)\r\n order.append(d)\r\n return order\r\n\r\ndef run(list):\r\n order_all = []\r\n for i in list:\r\n path = r'D:\\MyData\\fanghui3\\Desktop\\京东数据\\订单\\%s'%i\r\n df = search(path)\r\n df = pd.concat(df)\r\n df['客服账号名称']=i\r\n order_all.append(df)\r\n return order_all\r\n\r\n\r\nif __name__ == '__main__':\r\n dirs = os.listdir(r'D:\\MyData\\fanghui3\\Desktop\\京东数据\\订单')\r\n df = pd.concat(run(dirs))\r\n df.to_excel('汇总.xlsx',index=False)\r\n","sub_path":"chat_order/order.py","file_name":"order.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"395213872","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\"\"\"\n分页下载百度贴吧图片\n\"\"\"\nimport logging,requests,csv\nfrom bs4 import BeautifulSoup\nimport os\n\n# 下载图片\nclass GetPicture:\n def __init__(self):\n\n # 日志\n self.log = logging\n self.log.basicConfig(level=logging.INFO)\n self.index = 1 # 图片编号\n\n # 获取soup\n def get_soup(self,url):\n # 消息请求头\n header = {\n 'Accept': 'text/html,image/webp,image/*,*/*;q=0.8',\n 'Accept-Encoding': 'gzip,deflate,sdch,br',\n 'Accept-language': 'zh-CN,zh;q=0.8',\n 'Connection': 'keep-alive',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) Apple',\n }\n response = requests.get(url,headers = header)\n response.raise_for_status()\n response.encoding = 'utf-8'\n soup = BeautifulSoup(response.text,features='html.parser')\n return soup\n\n # 解析HTML页面\n def get_picture(self,url):\n # 创建一个保存图片的文件夹\n os.makedirs('Images', exist_ok=True) # 只支持python3\n # index = 1 # 图片编号\n soup = self.get_soup(url)\n # 打印日志\n # self.log.info('开始解析HTML页面......')\n all_img = soup.find_all('img', class_='BDE_Image')\n\n for img in all_img:\n self.log.info('下载图片......')\n img_url = img['src']\n img_name = str(self.index)+'.jpg'\n # 下载图片\n img_content = requests.get(img_url).content\n # 保存图片\n with open(os.path.join('Images',img_name),mode='wb') as f:\n f.write(img_content)\n self.index += 1\n\n def get_picture_by_pages(self,url,pageNum):\n for page in range(1,pageNum+1):\n self.log.info('正在下载第{0}页的图片......'.format(page))\n page_url = url + '?pn='+str(page)\n self.get_picture(page_url)\n\n\nif __name__ == '__main__':\n url = 'http://tieba.baidu.com/p/2460150866'\n getPic = GetPicture()\n getPic.get_picture_by_pages(url,10)\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"GetBaiduPicture/GetPicture.py","file_name":"GetPicture.py","file_ext":"py","file_size_in_byte":2118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"628203106","text":"\n\n#calss header\nclass _PUBLISH():\n\tdef __init__(self,): \n\t\tself.name = \"PUBLISH\"\n\t\tself.definitions = [u'to make information available to people, especially in a book, magazine, or newspaper, or to produce and sell a book, magazine, or newspaper: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_publish.py","file_name":"_publish.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"71549030","text":"from rest_framework import serializers\n\nfrom auth_ex.serializers import UserSerializer\nfrom .models import Task\n\n\nclass TaskSerializer(serializers.ModelSerializer):\n user = UserSerializer(read_only=True)\n\n class Meta:\n model = Task\n fields = (\n 'pk', 'date', 'title', 'jira_task_id', 'jira_description',\n 'jira_timelog', 'notes', 'user',\n )\n extra_kwargs = {\n 'user': {\n 'read_only': True,\n 'default': serializers.CurrentUserDefault(),\n }\n }\n","sub_path":"backend/tasks/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"156005151","text":"import os\nimport json\nimport shutil\nimport unittest\n\nfrom dtk.utils.core.DTKConfigBuilder import DTKConfigBuilder\nfrom dtk.generic.demographics import *\nfrom dtk.vector.larval_habitat import *\nfrom dtk.vector.study_sites import configure_site\n\nclass TestDemographics(unittest.TestCase):\n\n def setUp(self):\n self.cb = DTKConfigBuilder.from_defaults('MALARIA_SIM')\n self.input_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'input')\n\n def tearDown(self):\n pass\n\n def test_risk(self):\n tmpfile = os.path.join(self.input_path,'test_risk.json')\n shutil.copyfile(os.path.join(self.input_path,'test_nodes.json'), tmpfile)\n set_risk_mod(tmpfile,'UNIFORM_DISTRIBUTION',0.8,1.2)\n with open(tmpfile,'r') as f:\n j=json.loads(f.read())\n for n in j['Nodes']:\n self.assertEqual(n['IndividualAttributes']['RiskDistribution1'],0.8)\n self.assertEqual(n['IndividualAttributes']['RiskDistribution2'],1.2)\n self.assertEqual(n['IndividualAttributes']['RiskDistributionFlag'],1)\n os.remove(tmpfile)\n\n def test_immune_mod(self):\n tmpfile=os.path.join(self.input_path,'test_immune_mod.json')\n shutil.copyfile(os.path.join(self.input_path,'test_defaults.json'), tmpfile)\n set_immune_mod(tmpfile,'GAUSSIAN_DISTRIBUTION',1.0,0.3)\n with open(tmpfile,'r') as f:\n j=json.loads(f.read())\n n=j['Defaults']\n self.assertEqual(n['IndividualAttributes']['ImmunityDistribution1'],1.0)\n self.assertEqual(n['IndividualAttributes']['ImmunityDistribution2'],0.3)\n self.assertEqual(n['IndividualAttributes']['ImmunityDistributionFlag'],2)\n os.remove(tmpfile)\n\n def test_static(self):\n self.cb.set_param('Demographics_Filenames',[os.path.join(self.input_path,'test_defaults.json')])\n set_static_demographics(self.cb)\n outfile=os.path.join(self.input_path,'test_defaults.static.json')\n self.assertListEqual(self.cb.get_param('Demographics_Filenames'),[outfile])\n self.assertEqual(self.cb.get_param('Birth_Rate_Dependence'),'FIXED_BIRTH_RATE')\n with open(outfile,'r') as f:\n j=json.loads(f.read())\n mod_mortality = { \"NumDistributionAxes\": 2,\n \"AxisNames\": [ \"gender\", \"age\" ],\n \"AxisUnits\": [ \"male=0,female=1\", \"years\" ],\n \"AxisScaleFactors\": [ 1, 365 ],\n \"NumPopulationGroups\": [ 2, 1 ],\n \"PopulationGroups\": [ [ 0, 1 ], [ 0 ] ],\n \"ResultUnits\": \"annual deaths per 1000 individuals\",\n \"ResultScaleFactor\": 2.74e-06,\n \"ResultValues\": [ [ 45 ], [ 45 ] ] }\n n=j['Defaults']\n self.assertDictEqual(n['IndividualAttributes']['MortalityDistribution'],mod_mortality)\n self.assertEqual(n['IndividualAttributes']['AgeDistribution1'],0.000118)\n self.assertEqual(n['IndividualAttributes']['AgeDistributionFlag'],3)\n self.assertEqual(n['NodeAttributes']['BirthRate'],0.12329)\n os.remove(outfile)\n\n def test_growing(self):\n self.cb.set_param('Demographics_Filenames',[os.path.join(self.input_path,'test_nodes.json')])\n set_growing_demographics(self.cb)\n outfile=os.path.join(self.input_path,'test_nodes.growing.json')\n self.assertListEqual(self.cb.get_param('Demographics_Filenames'),[outfile])\n self.assertEqual(self.cb.get_param('Birth_Rate_Dependence'),'POPULATION_DEP_RATE')\n with open(outfile,'r') as f:\n j=json.loads(f.read())\n mod_mortality = { \"NumDistributionAxes\": 2,\n \"AxisNames\": [ \"gender\", \"age\" ],\n \"AxisUnits\": [ \"male=0,female=1\", \"years\" ],\n \"AxisScaleFactors\": [ 1, 365 ],\n \"NumPopulationGroups\": [ 2, 5 ],\n \"PopulationGroups\": [ [ 0, 1 ],\n [ 0, 2, 10, 100, 2000 ] ],\n \"ResultUnits\": \"annual deaths per 1000 individuals\",\n \"ResultScaleFactor\": 2.74e-06,\n \"ResultValues\": [ [ 60, 8, 2, 20, 400 ],\n [ 60, 8, 2, 20, 400 ] ] }\n for n in j['Nodes']:\n self.assertDictEqual(n['IndividualAttributes']['MortalityDistribution'],mod_mortality)\n self.assertEqual(n['NodeAttributes']['BirthRate'],0.0001)\n os.remove(outfile)\n\n def test_study_static_site(self):\n configure_site(self.cb,'Sinazongwe.static')\n self.assertEqual(os.path.basename(self.cb.get_param('Demographics_Filenames')[0]),'Zambia_Sinamalima_single_node_demographics.static.json')\n\n def test_study_site(self):\n configure_site(self.cb,'Sinazongwe')\n self.assertEqual(os.path.basename(self.cb.get_param('Demographics_Filenames')[0]),'Zambia_Sinamalima_single_node_demographics.compiled.json')\n\n def test_habitat_overlay(self):\n set_habitat_multipliers(self.cb, 'single_test_guess_2.5arcmin',\n [ NodesMultipliers(nodes=[340461476],multipliers={'ALL_HABITATS':10.0}) ])\n overlay=self.cb.demog_overlays['single_test_guess_2.5arcmin']\n self.assertEqual(overlay['Metadata']['IdReference'],'Gridded world grump2.5arcmin')\n\n with open(os.path.join(self.input_path,'test_overlay.json')) as f:\n j=json.loads(f.read())\n self.assertListEqual(j['Nodes'],overlay['Nodes'])\n\n set_habitat_multipliers(self.cb, 'single_test_guess_30arcsec',\n [ NodesMultipliers(nodes=[1632117296],multipliers={'ALL_HABITATS':1.0}) ])\n self.assertEqual(self.cb.demog_overlays['single_test_guess_30arcsec']['Metadata']['IdReference'],'Gridded world grump30arcsec')\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test/test_demographics.py","file_name":"test_demographics.py","file_ext":"py","file_size_in_byte":5954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"539716749","text":"movies = []\n\n\n# Add new movie to dictionary by user\ndef add_movie():\n title = input(\"Enter the movie title: \")\n director = input(\"Enter the movie director: \")\n year = input(\"Enter the movie release year: \")\n genre = input(\"What type of genre is this movie: \")\n\n movies.append({\n \"title\": title,\n \"director\": director,\n \"year\": year,\n \"genre\": genre\n })\n\n\n# Gives us a list of existing movies in the list\ndef show_movies():\n for movie in movies:\n print_movies(movie)\n\n\n# Print info about movie\ndef print_movies(movie):\n print(f'Title: {movie[\"title\"]}')\n print(f'Director: {movie[\"director\"]}')\n print(f'Release year: {movie[\"year\"]}')\n print(f'Movie genre: {movie[\"genre\"]}')\n\n\n# finds a movie by a title provided by user\ndef find_movie():\n title = input(\"Enter the movie title: \")\n for movie in movies:\n if movie[\"title\"] == title:\n print_movies(movie)\n break\n else:\n print(f'Such movie {title} do not exist.')\n\n\nMENU_PROMPT = \"\\nEnter 'a' to add a movie, 'l' to see a movies, 'f' to find a movie by title or 'q' to quit: \"\n# defined dictionary with user options and actions\nuser_options = {\n \"a\": add_movie,\n \"l\": show_movies,\n \"f\": find_movie\n}\n\n\n# Menu function with options for user to select\ndef menu():\n selection = input(MENU_PROMPT)\n while selection != \"q\":\n if selection in user_options:\n selected_function = user_options[selection]\n selected_function()\n else:\n print(\"Unknown command. Please try again!\")\n selection = input(MENU_PROMPT)\n\n\nmenu()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"440971671","text":"import requests\nimport csv\nimport json\nfrom selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nfrom bs4 import BeautifulSoup\nimport time\n\nurl = \"https://data.nhi.gov.tw/Datasets/Download.ashx?rid=A21030000I-D50001-001&l=https://data.nhi.gov.tw/resource/mask/maskdata.csv\"\n\ncnt = 1\nerror_cnt = 0\ndic = []\ndemo_dic = []\naddress_dic = {}\ndemo_address_dic = {}\ntmp_dic = {}\n# print(rows[0])\ndef convert_string(full):\n s = full\n s = s.replace(\"F\",\"F\")\n s = s.replace(\"C\",\"C\")\n s = s.replace(\"-\",\"-\")\n s = s.replace(\"(\",\"(\")\n s = s.replace(\")\",\")\")\n s = s.replace(\":\",\":\")\n s = s.replace(\"0\",\"0\")\n s = s.replace(\"1\",\"1\")\n s = s.replace(\"2\",\"2\")\n s = s.replace(\"3\",\"3\")\n s = s.replace(\"4\",\"4\")\n s = s.replace(\"5\",\"5\")\n s = s.replace(\"6\",\"6\")\n s = s.replace(\"7\",\"7\")\n s = s.replace(\"8\",\"8\")\n s = s.replace(\"9\",\"9\")\n return s\n \ndef get_coor(addr):\n global cnt\n cnt += 1\n options = webdriver.ChromeOptions()\n options.add_argument(\"headless\")\n browser = webdriver.Chrome(executable_path='chromedriver',options=options)\n browser.get(\"http://www.map.com.tw/\")\n search = browser.find_element_by_id(\"searchWord\")\n search.clear()\n search.send_keys(addr)\n browser.find_element_by_xpath(\"/html/body/form/div[10]/div[2]/img[2]\").click() \n time.sleep(2)\n iframe = browser.find_elements_by_tag_name(\"iframe\")[1]\n browser.switch_to.frame(iframe)\n cor_btn = browser.find_element_by_xpath(\"/html/body/form/div[4]/table/tbody/tr[3]/td/table/tbody/tr/td[2]\")\n cor_btn.click()\n coor = browser.find_element_by_xpath(\"/html/body/form/div[5]/table/tbody/tr[2]/td\")\n cor = coor.text.strip().split(\" \")\n lat = cor[-1].split(\":\")[-1]\n log = cor[0].split(\":\")[-1]\n browser.quit()\n return (lat, log)\n\ndef update_data(row):\n global address_dic\n global dic\n if not row[2] in address_dic:\n address_dic[row[2]] = get_coor(row[2]) #return tuple\n print(\"Find New\", row[2], address_dic[row[2]])\n save_data()\n \n demo_address_dic[row[2]] = address_dic[row[2]]\n dic.append({\n \"id\":row[0],\n \"name\":row[1],\n \"address\":row[2],\n \"phone\":row[3],\n \"lat\": address_dic[row[2]][0],\n \"lng\": address_dic[row[2]][1],\n \"adult_count\":row[4],\n \"child_count\": row[5],\n \"updatetime\": row[6]\n })\n\ndef update_mask():\n global demo_address_dic\n r = requests.get(url)\n csv_data = csv.reader(r.text.splitlines(), delimiter=',')\n next(csv_data)\n rows = list(csv_data)\n for row in rows:\n row[2] = convert_string(row[2])\n update_data(row)\n\ndef load_data():\n with open('data.json', 'r') as json_file:\n global tmp_dic\n tmp_dic = json.load(json_file)\n with open('demo_address.json', 'r') as json_file:\n global demo_address_dic\n demo_address_dic = json.load(json_file)\n with open('address.json', 'r') as json_file:\n global address_dic\n address_dic = json.load(json_file)\n print(\"Data Loaded\")\n\ndef save_data():\n with open('data.json', 'w', encoding=\"utf-8\") as json_file:\n json.dump(dic, json_file, indent=4, ensure_ascii=False)\n with open('demo_address.json', 'w', encoding=\"utf-8\") as json_file:\n json.dump(demo_address_dic, json_file, indent=4, ensure_ascii=False)\n with open('address.json', 'w', encoding=\"utf-8\") as json_file:\n json.dump(address_dic, json_file, indent=4, ensure_ascii=False)\n print(\"Data Saved\")\n\ndef get_coor_google(addr):\n res = requests.get(\"https://www.google.com.tw/maps/place/\"+ addr, headers = headers)\n end1 = 0\n end2 = 0\n\n start1 = res.text.find(\"ll=\", end1)\n end1 = res.text.find(\"\\\"\", start1)\n ll1 = res.text[start1+3:end1].split(\",\")\n start2 = res.text.find(\"markers=\", end2)\n end2 = res.text.find(\"&\", start2)\n ll2 = res.text[start2+8:end2].split(\"%2C\")\n global error_cnt\n if len(ll1) == 2:\n error_cnt = 0\n print(ll1)\n return ll1\n elif len(ll2) == 2:\n error_cnt = 0\n print(ll2)\n return ll2\n else:\n error_cnt += 1\n return (0,0)\n\n\nload_data()\nori_address_dic = len(address_dic)\nori_data_dic = len(tmp_dic)\nprint(\"address_dic:\", ori_address_dic)\nprint(\"data_dic:\", ori_data_dic)\nupdate_mask()\n\n\n# get_coor_google(\"新北市平溪區公園街17號1樓\")\n\nto_del = []\nfor i in address_dic.keys():\n flag = 0\n for j in dic:\n if i == j[\"address\"]:\n flag = 1\n break\n if flag == 0:\n to_del.append(i)\n\nprint(to_del)\nfor i in to_del:\n try:\n address_dic.pop(i) \n except KeyError:\n print(\"Key not found\") \n\nprint(\"ori_address_dic:\", ori_address_dic)\nprint(\"ori_data_dic:\", ori_data_dic)\nprint(\"address_dic:\", len(address_dic))\nprint(\"data_dic:\", len(dic))\nsave_data()","sub_path":"project/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"208481649","text":"import re\nimport os\nimport configparser\n\nconfig_path = os.path.expanduser(\"~/.snippets.ini\")\nconfig = configparser.ConfigParser()\n\n\ndef get_ignored_list():\n path = os.path.expanduser(\"~/.snippets.ignored\")\n with open(path) as f:\n lines = f.readlines()\n lines = [l.strip() for l in lines if l.strip()]\n return lines\n\n\ndef match(path):\n ignored = get_ignored_list()\n for i in ignored:\n if i in path:\n return True\n else:\n return False\n\n\nextentions = {\n \"py\": \"python\",\n \"js\": \"javascript\",\n \"hs\": \"haskell\",\n \"sh\": \"sh\",\n \"el\": \"elisp\",\n}\n\n\ninline_comments = {\n \"python\": \"#\",\n \"sh\": \"#\",\n \"perl\": \"#\",\n \"haskell\": \"--\",\n \"javascript\": \"//\",\n \"elisp\": \";\",\n}\nRE = r\"^{}\\s*?TAGS?\\s*:(.*)\"\nRE_TEMPLATE = r\"%s tags: \"\nRE_TAGS = {key: re.compile(RE.format(val), re.I) for key, val in inline_comments.items()}\n","sub_path":"snippets/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"86879947","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Apr 15 15:31:46 2020\n\n@author: DavidOhlson\n\"\"\"\n\nimport numpy as np\nfrom astropy.io import fits\nfrom astropy.table import Table, join, vstack, Column\nimport matplotlib.pyplot as plt\nfrom astropy.coordinates import SkyCoord, Distance, match_coordinates_sky\nfrom astropy import units as u\n\n#%% read in data\n\nt_she = Table.read('data/she2017.fits')\n### she has columns 'RAJ2000', 'DEJ2000', and 'Dist'\nt_nsa = Table.read('data/nsa_v1_0_1.fits')\n### NSA has columns 'RA', 'DEC'\n\n#%% attempt to crossmatch catalogs based on RA and DEC\nra_she = np.array(t_she['RAJ2000'])\ndec_she = np.array(t_she['DEJ2000'])\ndist_she = np.array(t_she['Dist'])\nra_nsa = np.array(t_nsa['RA'])\ndec_nsa = np.array(t_nsa['DEC'])\nc = 299792.458\nH_0 = 73\ndist_nsa = np.array(t_nsa['ZDIST']* (c/H_0))\n\ncat_she = SkyCoord(ra=ra_she*u.degree, dec=dec_she*u.degree, distance=dist_she*u.Mpc)\ncat_nsa = SkyCoord(ra=ra_nsa*u.degree, dec=dec_nsa*u.degree, distance=dist_nsa*u.Mpc)\n\n### get NSA matches to She\nidx, d2d, d3d = cat_she.match_to_catalog_sky(cat_nsa)\n### can constrain matches by max separation\nmax_sep = 1.0 * u.arcsec\nsep_constraint = d2d < max_sep\nshe_matches = t_she[sep_constraint]\nnsa_matches = t_nsa[idx[sep_constraint]]\n\n\"\"\"\n### check that tables are matched and have same indexing\nplt.scatter(she_matches['RAJ2000'], nsa_matches['RA'])\nplt.show()\nplt.scatter(she_matches['DEJ2000'], nsa_matches['DEC'])\nplt.show()\nplt.scatter(she_matches['Dist'], nsa_matches['ZDIST']*(c/H_0))\nplt.show()\n\"\"\"\n\n#%% Combine tables\n\nshe_matches['NSAID'] = nsa_matches['NSAID']\nnsa_she = join(she_matches, nsa_matches, keys='NSAID', join_type='left')\n\nnsa_she.write('data/nsa_she_1.fits', format='fits', overwrite=True)\n","sub_path":"combine_table.py","file_name":"combine_table.py","file_ext":"py","file_size_in_byte":1725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"24176902","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim:fenc=utf-8\n#\n# Copyright © 2018 mhristof \n#\n# Distributed under terms of the MIT license.\nimport boto3\nfrom time import strftime\nimport os\n\n\ndef handler(event, context):\n bucket = 'mhristof0075'\n filename = os.path.join('/tmp', strftime(\"%Y-%m-%dT%H:%M:%S\"))\n\n s3 = boto3.client('s3')\n\n with open(filename, 'w') as out:\n out.write('this was a triumph\\n')\n\n s3.upload_file(filename, bucket, os.path.basename(filename))\n\n\nif __name__ == '__main__':\n handler(None, None)\n","sub_path":"rotate/rotate.py","file_name":"rotate.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"529337413","text":"import random\n\ndef jogar():\n\n print(\"************************************\")\n print(\"Bem vindo ao jogo de Adivinhação!\")\n print(\"************************************\")\n\n numero_secreto = round(random.randrange(1,101))\n\n total_de_tentativas = 0\n pontos = 1000\n\n print(\"Qual nível de dificuldade?\")\n print(\"(1) Fácil - (2) Médio - (3) Difícil\")\n\n nivel = int(input(\"Defina o nível: \"))\n\n if (nivel == 1):\n total_de_tentativas = 20\n elif (nivel == 2):\n total_de_tentativas = 10\n else:\n total_de_tentativas = 5\n\n for tentativa in range(1, total_de_tentativas +1):\n print(\"Tentativa {} de {}\".format(tentativa, total_de_tentativas))\n chute_str = input(\"Digite um número entre 1 e 100: \")\n print(\"Voce digitou \", chute_str)\n chute=int(chute_str)\n\n if (chute < 1 or chute > 100):\n print(\"Voce deve digitar um número entre 1 e 100!\")\n continue\n\n acertou = chute == numero_secreto\n maior = chute > numero_secreto\n menor = chute < numero_secreto\n\n\n if (acertou):\n print(\"Parabens! Você acertou e fez {} pontos!\".format(pontos))\n break\n else:\n if (maior):\n print(\"Voce erro! Seu chute esta acima do número correto\")\n if (tentativa == total_de_tentativas):\n print(\"O numero secreto era {}. Você fez {} pontos\".format(numero_secreto, pontos))\n elif (menor):\n print(\"Voce erro! Seu chute esta abaixo do número correto\")\n if (tentativa == total_de_tentativas):\n print(\"O numero secreto era {}. Você fez {} pontos\".format(numero_secreto,pontos))\n pontos_perdidos = abs(numero_secreto - chute)\n pontos = pontos - pontos_perdidos\n\n\n print(\"Fim de Jogo!\")\n\nif(__name__== \"__main__\"):\n jogar()","sub_path":"adivinhacao.py","file_name":"adivinhacao.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"505984738","text":"# -*- coding: utf-8 -*-\n# @Author: dacarvalho@ua.pt\n# @Date: 2016-12-06 01:28:26\n# @Last Modified by: dacarvalho\n# @Last Modified time: 2016-12-06 11:16:40\n\ndef sumN(n):\n sum = 0\n for i in range(0,n):\n value = input(\"Add value: \")\n sum += value\n\n print(\"Total sum of entered values: {}\".format(sum))\n\nn = input(\"How many values to sum? \")\nsumN(n)\n","sub_path":"Solved Exercises/1st Guide/ex26.py","file_name":"ex26.py","file_ext":"py","file_size_in_byte":374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"630489378","text":"#!/usr/bin/python\n\nfrom __future__ import print_function\nfrom bcc import BPF\nfrom ctypes import *\n\nimport sys\nimport socket\nimport os\nimport struct\nimport dnslib\nimport argparse\n\n\ndef encode_dns(name):\n size = 255\n if len(name) > 255:\n raise Exception(\"DNS Name too long.\")\n b = bytearray(size)\n i = 0;\n elements = name.split(\".\")\n for element in elements:\n b[i] = struct.pack(\"!B\", len(element))\n i += 1\n for j in range(0, len(element)):\n b[i] = element[j]\n i += 1\n\n\n return (c_ubyte * size).from_buffer(b)\n\ndef add_cache_entry(cache, name):\n key = cache.Key()\n key.p = encode_dns(name)\n leaf = cache.Leaf()\n leaf.p = (c_ubyte * 4).from_buffer(bytearray(4))\n cache[key] = leaf\n\n\nparser = argparse.ArgumentParser(usage='For detailed information about usage,\\\n try with -h option')\nreq_args = parser.add_argument_group(\"Required arguments\")\nreq_args.add_argument(\"-i\", \"--interface\", type=str, required=True, help=\"Interface name\")\nreq_args.add_argument(\"-d\", \"--domains\", type=str, required=True,\n help='List of domain names separated by comma. For example: -d \"abc.def, xyz.mno\"')\nargs = parser.parse_args()\n\n# initialize BPF - load source code from http-parse-simple.c\nbpf = BPF(src_file = \"dns_matching.c\", debug=0)\n# print(bpf.dump_func(\"dns_test\"))\n\n#load eBPF program http_filter of type SOCKET_FILTER into the kernel eBPF vm\n#more info about eBPF program types\n#http://man7.org/linux/man-pages/man2/bpf.2.html\nfunction_dns_matching = bpf.load_func(\"dns_matching\", BPF.SOCKET_FILTER)\n\n\n#create raw socket, bind it to user provided interface\n#attach bpf program to socket created\nBPF.attach_raw_socket(function_dns_matching, args.interface)\n\n# Get the table.\ncache = bpf.get_table(\"cache\")\n\n# Add cache entries\nentries = [i.strip() for i in args.domains.split(\",\")]\nfor e in entries:\n print(\">>>> Adding map entry: \", e)\n add_cache_entry(cache, e)\n\nprint(\"\\nTry to lookup some domain names using nslookup from another terminal.\")\nprint(\"For exmaple: nslookup foo.bar\")\nprint(\"\\nBPF program will filter-in DNS packets which match with map entries.\")\nprint(\"Packets received by user space program will be printed here\")\nprint(\"\\nHit Ctrl+C to end...\")\n\nsocket_fd = function_dns_matching.sock\nsock = socket.fromfd(socket_fd, socket.PF_PACKET, socket.SOCK_RAW, socket.IPPROTO_IP)\nsock.setblocking(True)\n\nwhile 1:\n #retrieve raw packet from socket\n packet_str = os.read(socket_fd, 2048)\n packet_bytearray = bytearray(packet_str)\n\n ETH_HLEN = 14\n UDP_HLEN = 8\n\n #IP HEADER\n #calculate ip header length\n ip_header_length = packet_bytearray[ETH_HLEN] #load Byte\n ip_header_length = ip_header_length & 0x0F #mask bits 0..3\n ip_header_length = ip_header_length << 2 #shift to obtain length\n\n #calculate payload offset\n payload_offset = ETH_HLEN + ip_header_length + UDP_HLEN\n\n payload = packet_bytearray[payload_offset:]\n # pass the payload to dnslib for parsing\n dnsrec = dnslib.DNSRecord.parse(payload)\n print (dnsrec.questions, \"\\n\")\n","sub_path":"examples/networking/dns_matching/dns_matching.py","file_name":"dns_matching.py","file_ext":"py","file_size_in_byte":3038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"553853211","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 9 07:43:17 2018\n\n@author: suleman\n\"\"\"\n\nimport numpy as np #to use numpy arrays\nimport matplotlib.pyplot as plt #used to plot data\n\nfrom readData import readData\nfrom costFunction import costFunction\nfrom normalEquation import normalEquation\nfrom addBiaseVariableToX import addBiaseVariableToX \nfrom featureNormalization import featureNormalization\nfrom gradientDescent import gradientDescent\n\n#read data from a csv file \nx,y = readData()\n\n#normalize all the features of x input\nfeatureNormalization(x)\n\n#if there is only one featuer then plot the data\nif x.shape[1] == 1:\n print('There is only one feature let plot the data.')\n #plotData(x,y) \n \n#add the biase variable to the x \nx = addBiaseVariableToX(x)\n\n#initialize theta\ntheta = np.zeros(x.shape[1])\n\nprint(f'Initial cost is: {costFunction(x, y, theta)}')\n#for univariate_linear_regression_data.txt\n#alpha = 0.01\n#iterations = 700\n\n#for assign2data1.txt\nalpha = 0.01\niterations = 300\n\nprint('Running Gradient Descent')\ncost, theta = gradientDescent(x, y, theta, alpha, iterations)\nif x.shape[1] == 2:#if there is only one feature then draw graph of new theta value\n plt.plot(x[:,1:2] , x.dot(theta), '-') \nprint(f'After Gradient Descent cost is: {costFunction(x, y, theta)}')\n\n#debug graph to see how cost decreases with respect to iterations\nprint('Drawing debuging graph')\nit = np.array(range(len(cost))) ;\nfig, debug = plt.subplots()\ndebug.plot(it,cost,'-') \ndebug.set(xlabel='number of iterations', ylabel='cost', title='Debugging')\n\n#calculate theta with normal quations which minimize the cost\nnormalizedTheta = normalEquation(x, y)","sub_path":"linearRegression/linearRegression.py","file_name":"linearRegression.py","file_ext":"py","file_size_in_byte":1703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"180677437","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport sys\nimport argparse\nimport re\nfrom mmap import ACCESS_READ, mmap\n\ndef hname(name):\n name = re.sub('.*[\\\\/]', '', name)\n return re.sub('\\.[^.]+$', '', name) + '.h'\n\n# https://stackoverflow.com/questions/3303312/how-do-i-convert-a-string-to-a-valid-variable-name-in-python\ndef vname(name):\n name = re.sub('.*[\\\\/]', '', name)\n return re.sub('\\W|^(?=\\d)', '_', name)\n\nparser = argparse.ArgumentParser(prog='bin2h.py')\nparser.add_argument('-i', '--input', metavar='FILE', help='input file', required=True);\nparser.add_argument('-o', '--output', metavar='HEADER.h', help='output file')\nparser.add_argument('-n', '--name', metavar='VARIABLE', help='variable name in C header')\nparser.add_argument('-u', '--unsigned', action='store_true', help='use unsigned char')\nparser.add_argument('-v', '--verbose', action='store_true', help='noisy output')\n\nargs = parser.parse_args()\n\nifile = args.input\nofile = args.output or hname(args.input)\nname = args.name or vname(args.input)\nunsigned = args.unsigned\nverbose = args.verbose\n\n\nsize = 0\n# https://stackoverflow.com/questions/1035340/reading-binary-file-and-looping-over-each-byte\nwith open(ifile, 'rb', 0) as f, mmap(f.fileno(), 0, access=ACCESS_READ) as s:\n with open(ofile, 'w') as out:\n if unsigned:\n out.write('const unsigned char %s[] =\\n \"' % name)\n else:\n out.write('const char %s[] =\\n \"' % name)\n\n for byte in s:\n size = size + 1\n\n v = hex(ord(byte))[2:].zfill(2)\n out.write('\\\\x%s' % v)\n\n if size % 16 == 0:\n out.write('\"\\n \"')\n\n out.write('\"\\n;\\nconst unsigned int %s_len = %s;\\n' % (name, size))\n\nif verbose:\n print(\"\"\"bin2h.py: '%s' (%s bytes) -> '%s' => %s[] + %s_len\"\"\" % (ifile, size, ofile, name, name))\n\n","sub_path":"src/bin2h.py","file_name":"bin2h.py","file_ext":"py","file_size_in_byte":1845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"607621799","text":"import prometheus_client\n\nfrom flask import Flask\n\nfrom werkzeug.middleware.dispatcher import DispatcherMiddleware\n\nfrom app.helpers.middleware import setup_metrics\n\n\ndef index():\n return \"Hello World!\"\n\n\ndef cpu():\n \"\"\"Simple function to simulate a cpu-intensive task\"\"\"\n for i in range(10000):\n i ** i # Exponential operations are REALLY cpu-intensive\n return \"CPU-intensive task is complete!\"\n\n\ndef memory():\n \"\"\"Simple function to simulate a memory-intensive task\"\"\"\n d = {}\n for i in range(10000000):\n i = str(i)\n i += \"xyz\"\n d[i] = i\n return \"Memory-intensive task is complete!\"\n\n\ndef create_app():\n main_app = Flask(__name__)\n\n main_app.add_url_rule(\"/\", \"index\", index)\n main_app.add_url_rule(\"/cpu\", \"cpu\", cpu)\n main_app.add_url_rule(\"/memory\", \"memory\", memory)\n setup_metrics(main_app)\n\n app = DispatcherMiddleware(\n app=main_app.wsgi_app,\n mounts={\"/metrics\": prometheus_client.make_wsgi_app()}\n )\n\n return app\n","sub_path":"app/flask_app.py","file_name":"flask_app.py","file_ext":"py","file_size_in_byte":1015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"404987968","text":"# spriteEg1.py\r\n# A sprite just refers is a 2D object in a game, usually a character\r\n# A spritesheet is a sheet of all of the frames of animation that a character\r\n# uses in a game to walk or perform actions.\r\n#\r\n# To achieve simple animation in python we simply\r\n# display the frames of the picture in order.\r\n\r\nfrom pygame import *\r\n\r\ninit()\r\nsize = width, height = 800, 600\r\nscreen = display.set_mode(size)\r\nrunning = True\r\nmyClock = time.Clock()\r\n\r\nframe = 0\r\npics = []\r\nfor i in range(3):\r\n pics.append(image.load(\"Will\\\\enemy\" + str(i) + \".png\"))\r\n \r\n\r\nwhile running:\r\n for evnt in event.get(): \r\n if evnt.type == QUIT:\r\n running = False\r\n\r\n screen.fill((150,220,150))\r\n screen.blit(pics[frame],(100,100))\r\n frame += 1\r\n if frame == 3:\r\n frame = 0\r\n display.flip()\r\n myClock.tick(8)\r\n \r\nquit()\r\n","sub_path":"enemySprite/stationaryEnemy1.py","file_name":"stationaryEnemy1.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"429302290","text":"\n\n#calss header\nclass _IBERIA():\n\tdef __init__(self,): \n\t\tself.name = \"IBERIA\"\n\t\tself.definitions = [u'the area of land that consists of Spain, Portugal, Andorra, Gibraltar, and part of France: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_iberia.py","file_name":"_iberia.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482777670","text":"\ndef leibnizCalculation(numItems):\n\n\tpi = 0;\n\n\tfor i in range(numItems):\n\t\tpi += (float(4)/(2*i+1)) * ((-1)**i)\n\t\tprint(i, pi)\n\n\treturn pi\n\n\ndef leibnizCalculation2(numItems):\n\n\tpi = 0;\n\tsign = -1\n\n\tfor i in range(numItems):\n\t\tsign *= -1\n\t\tpi += (float(4)/(2*i+1)) * sign\n\t\tprint(i, pi)\n\t\t\n\treturn pi\n\ndef wallis(numItems):\n\n\tpi = 1;\n\n\tfor i in range(1, numItems + 1):\n\t\tnum = 2*i\n\t\tden1 = num - 1\n\t\tden2 = num + 1\n\t\tpi *= (num/den1) * (num/den2)\n\t\tprint(i, pi*2)\n\n\treturn 2*pi\n\ndef main():\n\tnumItems = 1000000000000\n\tpi = wallis(numItems)\n\n\tprint(pi);\n\nmain()\n","sub_path":"Lecture/Lecture 9/calculatepi.py","file_name":"calculatepi.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"446478130","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn\n\nn_grid = 200\nx = np.linspace(-5,5,n_grid)\ny = np.sin(x)\n\nh = x[1]-x[0]\ndy_dx = np.gradient(y, h)\n\ndy2_dx2 = np.gradient(dy_dx, h)\nplt.plot(x, y, label='y')\nplt.plot(x, dy_dx, label='dy_dx')\nplt.plot(x, dy2_dx2, label='dy2_dx2')\nplt.legend()\nplt.show()\n\n\n","sub_path":"dft.py","file_name":"dft.py","file_ext":"py","file_size_in_byte":321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575795930","text":"import pb276\n\nimport time\n\nt1 = time.time()\n\n\nprint( 'test_function cython : \\t' , pb276.Primitive_integer_sided_triangles (10**4 ) )\n# print( 'test_function cython : \\t' , pb276.Ccombinations(3, [1,2,3,4,5,6] ) )\n\n\n\nt2 = time.time()\ncy = t2-t1\nprint('Completed in :', round((cy)*1000,6), 'ms')\n\n#\n# t1 = time.time()\n#\n#\n# print( '\\ntest_function Python : \\t' ,test_python.test_function(10**7) )\n#\n#\n# t2 = time.time()\n# py = t2-t1\n# print('Completed in :', round((py)*1000,6), 'ms')\n#\n# print('\\nCython is ' , round(py/cy,2) , 'x faster')\n\n\n\n\n\n\n","sub_path":"test/testing_pb276.py","file_name":"testing_pb276.py","file_ext":"py","file_size_in_byte":556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"103620079","text":"from flask import Flask, Blueprint, jsonify, request, send_file, current_app\nfrom models.administrador import Administrador\nfrom models.alumno import Alumno\nfrom models.apoderado import Apoderado\nfrom models.profesor import Profesor\nfrom models.direccion import Direccion\nfrom models.alerta import Alerta\nfrom flask_restful import Api, Resource, url_for\nfrom libs.to_dict import mongo_to_dict\nimport json\nfrom bson import json_util\nfrom PIL import Image\nfrom flask_restful import reqparse\nimport os\n\ndef init_module(api):\n api.add_resource(AdministradorItem, '/administradores/')\n api.add_resource(Administradores, '/administradores')\n api.add_resource(AdministradorImagenItem, '/administrador_imagen/')\n api.add_resource(AdministradorImagenDefault, '/administrador_imagen_default/')\n\ndef administradorEncriptacion():\n for administrador in Administrador.objects().all():\n administrador.encrypt_password(administrador.password)\n administrador.save()\nclass AdministradorItem(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('auth-token', type = str, required=True, location='headers')\n super(AdministradorItem, self).__init__()\n\n def get(self, id):\n args = self.reqparse.parse_args()\n token = args.get('auth-token')\n alumno = Alumno.load_from_token(token)\n apoderado = Apoderado.load_from_token(token)\n administrador = Administrador.load_from_token(token)\n profesor = Profesor.load_from_token(token)\n if alumno == None and apoderado == None and administrador == None and profesor == None:\n return {'response': 'user_invalid'},401\n return Administrador.objects(id=id).first().to_dict()\n\n def delete(self, id):\n args = self.reqparse.parse_args()\n token = args.get('auth-token')\n alumno = Alumno.load_from_token(token)\n apoderado = Apoderado.load_from_token(token)\n administrador = Administrador.load_from_token(token)\n profesor = Profesor.load_from_token(token)\n if alumno == None and apoderado == None and administrador == None and profesor == None:\n return {'response': 'user_invalid'},401\n administrador = Administrador.objects(id=id).first()\n administrador.activo = False\n administrador.save()\n return{'Response':'borrado'}\n \n def put(self,id):\n args = self.reqparse.parse_args()\n token = args.get('auth-token')\n administrador = Administrador.load_from_token(token)\n administrador_editar = Administrador.objects(id=id).first()\n if administrador == None and administrador_editar == None:\n return {'response': 'user_invalid'},401\n data = request.data.decode()\n data = json.loads(data)\n administrador_editar.nombres = data['nombres']\n administrador_editar.apellido_paterno = data['apellido_paterno']\n administrador_editar.apellido_materno = data['apellido_materno']\n administrador_editar.telefono = data['telefono']\n administrador_editar.email = data['email']\n administrador_editar.rut = data['rut']\n direccion = Direccion(calle=data['calle'],\n numero=data['numero'],\n comuna=data['comuna'],\n cas_dep_of=data['cas_dep_of'])\n administrador_editar.direccion = direccion\n administrador_editar.save()\n return {'Response': 'exito',\n 'id': str(administrador_editar.id)}\n\n\n\nclass Administradores(Resource):\n def __init__(self):\n self.reqparse = reqparse.RequestParser()\n self.reqparse.add_argument('auth-token', type = str, required=True, location='headers')\n super(Administradores, self).__init__()\n\n def get(self):\n args = self.reqparse.parse_args()\n token = args.get('auth-token')\n alumno = Alumno.load_from_token(token)\n apoderado = Apoderado.load_from_token(token)\n administrador = Administrador.load_from_token(token)\n profesor = Profesor.load_from_token(token)\n if alumno == None and apoderado == None and administrador == None and profesor == None:\n return {'response': 'user_invalid'},401\n response = []\n for administrador in Administrador.objects(activo=True).all():\n if administrador.activo:\n response.append(administrador.to_dict())\n return response\n \n def post(self):\n args = self.reqparse.parse_args()\n token = args.get('auth-token')\n alumno = Alumno.load_from_token(token)\n apoderado = Apoderado.load_from_token(token)\n administrador = Administrador.load_from_token(token)\n profesor = Profesor.load_from_token(token)\n if alumno == None and apoderado == None and administrador == None and profesor == None:\n return {'response': 'user_invalid'},401\n data = request.data.decode()\n data = json.loads(data)\n administrador = Administrador()\n administrador.nombres = data['nombres']\n administrador.apellido_paterno = data['apellido_paterno']\n administrador.apellido_materno = data['apellido_materno']\n administrador.telefono = data['telefono']\n administrador.email = data['email']\n administrador.rut = data['rut']\n administrador.encrypt_password(data['rut'])\n direccion = Direccion(calle=data['calle'],\n numero=data['numero'],\n comuna=data['comuna'],\n cas_dep_of=data['cas_dep_of'])\n administrador.direccion = direccion\n administrador.save()\n return {'Response': 'exito',\n 'id': str(administrador.id)}\n\nclass AdministradorImagenItem(Resource):\n def post(self,id):\n imagen = Image.open(request.files['imagen'].stream).convert(\"RGB\")\n imagen.save(os.path.join(current_app.config.get(\"BASE_PATH\")+\"uploads/administradores\", str(id)+\".jpg\"))\n imagen.thumbnail((800, 800))\n imagen.save(os.path.join(current_app.config.get(\"BASE_PATH\")+\"uploads/administradores\", str(id)+'_thumbnail.jpg'))\n administrador = Administrador.objects(id=id).first()\n administrador.imagen = id\n administrador.save()\n return {'Response': 'exito','id': str(administrador.id)}\n \n def get(self,id):\n return send_file(current_app.config.get(\"BASE_PATH\")+'uploads/administradores/'+id+'_thumbnail.jpg')\n\nclass AdministradorImagenDefault(Resource):\n def get(self,id):\n administrador = Administrador.objects(id=id).first()\n imagen = Image.open(current_app.config.get(\"BASE_PATH\")+\"uploads/administradores/default_thumbnail.jpg\")\n imagen.thumbnail((800, 800))\n imagen.save(os.path.join(current_app.config.get(\"BASE_PATH\")+\"uploads/administradores\", str(id)+'_thumbnail.jpg'))\n administrador.imagen = str(administrador.id)\n administrador.save()\n return { 'Response':'exito'}","sub_path":"resources/administrador.py","file_name":"administrador.py","file_ext":"py","file_size_in_byte":6990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"115547366","text":"#!/usr/bin/python3\n## Copyright 2015 Kevin Boxhoorn\n##\n## Licensed under the Apache License, Version 2.0 (the \"License\");\n## you may not use this file except in compliance with the License.\n## You may obtain a copy of the License at\n##\n## http://www.apache.org/licenses/LICENSE-2.0\n##\n## Unless required by applicable law or agreed to in writing, software\n## distributed under the License is distributed on an \"AS IS\" BASIS,\n## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n## See the License for the specific language governing permissions and\n## limitations under the License.\n##\n## main.py\n##\n## PySH entry point.\n\nimport os, pwd, re, subprocess, sys\nfrom argparse import ArgumentParser as argparser\nfrom os import chdir, getcwd\nfrom os.path import abspath, isdir\n\n## function to end pysh\ndef end(status):\n sys.exit(status)\n\n## TODO: -c on command line for non interactive\n## setup vars for interactive session\nhistory = []\n\n## cd function\ndef updatecwd(path):\n if isdir(path):\n chdir(path)\n else:\n print(\"{0}: no such file or directory\".format(path))\n\n## path formatters\ndef fmtpath(path):\n if \"HOME\" in os.environ:\n path = path.replace(os.environ[\"HOME\"], \"~\")\n return path\ndef unfmtpath(path):\n if \"HOME\" in os.environ:\n path = path.replace(\"~\", os.environ[\"HOME\"])\n return abspath(path)\n## environment variable substituter\ndef subenvvars(string):\n strvars = re.findall(r\"\\$\\w+\", string)\n for var in strvars:\n try:\n string = string.replace(var, os.environ[var.strip(\"$\")])\n except KeyError:\n continue\n return string\n\n## history management\ndef addtohist(cmd):\n if type(cmd) == list:\n history.append(\" \".join(cmd))\n elif type(cmd) == str:\n history.append(cmd)\n else:\n raise TypeError(\"command must be array or string\")\ndef clearhist():\n history = []\n\n## run command\ndef runcmd(cmdline):\n ## substitute environment variables\n cmdline = subenvvars(cmdline)\n\n ## create cmd array and check if any input\n cmd = cmdline.split()\n if len(cmd) < 1: return\n if cmd[0] == \"\": return\n \n ## inbuilt functions\n \n ## exit: ends pysh\n if cmd[0] == \"exit\":\n end(0)\n\n ## cd: change current working directory\n elif cmd[0] == \"cd\":\n updatecwd(unfmtpath(cmd[1]))\n ## export: set environment variables\n elif cmd[0] == \"export\" or cmd[0] == \"setenv\":\n if \"=\" in cmd[1]:\n var, value = cmd[1].split(\"=\", 1)\n os.environ[var] = value\n ## printenv: print environment variables\n elif cmd[0] == \"printenv\":\n for var, value in os.environ.items():\n print(\"{0}={1}\".format(var, value))\n\n ## history: shows command history from this session\n elif cmd[0] == \"history\":\n for index, line in enumerate(history):\n print(\"({0}) {1}\".format(index + 1, line))\n ## !: runs previous command by index, e.g. !2\n elif cmd[0][0] == \"!\":\n histcmd = cmd[0][1:]\n if len(histcmd) > 0:\n try:\n histindex = int(histcmd)\n if histindex > 0:\n histindex -= 1\n runcmd(history[histindex])\n except ValueError:\n if histcmd == \"!\":\n runcmd(history[-1])\n\n ## TODO: implement\n ## jobs: shows background tasks\n elif cmd[0] == \"jobs\":\n pass\n ## kill: kills a background task or process\n elif cmd[0] == \"kill\":\n pass\n\n ## not an inbuilt fuction, send to system\n else:\n try:\n subprocess.call(cmd)\n except FileNotFoundError:\n print(\"{0}: command not found\".format(cmd[0]))\n return\n\n## argument parsing\nparser = argparser(description = \"A shell made in Python, prioritizing speed and efficiency.\")\n\n## system information\nuser = pwd.getpwuid(os.getuid()).pw_name\nhostname = os.uname().nodename\n\n## main loop\nwhile True:\n ## update current working directory\n cwd = getcwd()\n\n ## get user input or command\n try:\n cmdline = input(\"{0}({1}):{2}/ \".format(user, hostname, fmtpath(cwd)))\n ## clean exit on ctrl-d\n except EOFError and KeyboardInterrupt:\n sys.stdout.write(\"\\n\")\n end(0)\n\n ## run command and add to history\n runcmd(cmdline)\n addtohist(cmdline)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"68783440","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jul 17 16:26:07 2019\n\n@author: TMaysGGS\n\"\"\"\n\n'''Last updated on 11/15/2019 14:22'''\nimport sys\nimport time\nimport os\nimport cv2 \nimport random \nimport argparse \nimport pickle as pkl\nimport numpy as np\n\nsys.path.append(\"../\")\nimport utils\nfrom MTCNN_models import pnet \n\n'''Helper functions'''\n# P-Net Prediction\ndef pnet_prediction(img, PNet, thresholds):\n \n temp = img.copy() / 255. \n orig_h, orig_w, orig_c = temp.shape \n # 生成用于制作图像金字塔的缩放比例列表\n scales = utils.calculate_scales(temp)\n PNet_outputs = []\n\n t0 = time.time()\n # 生成图像金字塔列表并逐一预测结果\n for scale in scales:\n scale_h = int(orig_h * scale)\n scale_w = int(orig_w * scale)\n scaled_img = cv2.resize(temp, (scale_w, scale_h)) # OpenCV中宽在前\n input_img = scaled_img.reshape(1, *scaled_img.shape) # reshape to (1, scale_h, scale_w, orig_c)\n pred = PNet.predict(input_img) # pred is a list of 2 arrays with the shapes (1, ?, ?, 2) & (1, ?, ?, 4)\n PNet_outputs.append(pred) \n img_num = len(scales)\n \n rectangles_list = []\n for i in range(img_num):\n prob = PNet_outputs[i][0][0][:, :, 0] # 是“人脸”的置信度,对应前面(1, ?, ?, 1)中的(?, ?)\n roi = PNet_outputs[i][1][0] # 人脸框的坐标偏移比例,对应前面(1, ?, ?, 4)中的(?, ?, 4)\n \n out_h, out_w = prob.shape # 每个点的值对应一个12 x 12框是否有”人“的置信度\n out_side = max(out_h, out_w) # ???\n \n prob = np.swapaxes(prob, 0, 1) \n roi = np.swapaxes(roi, 0, 2) # shape变为(4, ?, ?)\n rectangles = utils.pnet_detect_face(prob, roi, out_side, 1 / scales[i], orig_w, orig_h, thresholds[0])\n rectangles_list.extend(rectangles) # 每个rectangles包含(num, x1, y1, x2, y2, score)\n \n rectangles_list = utils.NMS(rectangles_list, 0.7, 'iou') \n \n t1 = time.time()\n print(\"Inference time for P-Net is \" + str(t1 - t0))\n\n return rectangles_list\n\n'''Generating hard negatvie samples & saving''' \ndef main(args):\n \n IMG_SIZE = args.IMG_SIZE\n print(\"IMG_SIZE:\", IMG_SIZE)\n if IMG_SIZE != 12 and IMG_SIZE != 24 and IMG_SIZE != 48: \n raise Exception(\"Image size wrong!\")\n IMG_ROOT_DIR = args.IMG_ROOT_DIR \n if args.DEBUG == 1: \n DEBUG = True \n else: \n \tDEBUG = False \n\n thresholds = [0.6, 0.6, 0.7]\n \n PNet = pnet(training = False)\n PNet.load_weights('../Models/PNet.h5')\n PNet.summary() \n \n RECORD_PATH = os.path.join(IMG_ROOT_DIR, 'wider_face_add_lm_10_10_info.pkl') \n f= open(RECORD_PATH, 'rb') \n info = pkl.load(f) \n f.close()\n if DEBUG: \n info = info[: 100] \n \n neg_hard_save_dir = r'../Data/' + str(IMG_SIZE) + '/neg_hard' \n if not os.path.exists(neg_hard_save_dir): \n os.mkdir(neg_hard_save_dir)\n \n neg_hard_idx = 0 \n neg_hard_list = [] \n \n for i in range(len(info)):\n \n pic_info = info[i]\n \n img_path = os.path.join(IMG_ROOT_DIR, 'JPEGImages', pic_info[0]) \n img = cv2.imread(img_path) \n height, width, channel = img.shape\n \n bboxes = np.array(pic_info[2]) # total bounding boxes in one picture \n \n if (i + 1) % 1000 == 0: \n print(str(i + 1) + \" pics processed \") \n print(str(neg_hard_idx) + \" hard negative samples generated. \") \n \n # Generate negative hard samples\n if IMG_SIZE == 12: \n pred_rectangles_list = pnet_prediction(img, PNet, thresholds) \n \n if args.AUGMENT_CONTROL > 0: \n random.shuffle(pred_rectangles_list) \n pred_rectangles_list = pred_rectangles_list[: args.AUGMENT_CONTROL] \n \n pred_rectangles = np.array(pred_rectangles_list) \n pred_boxes = utils.rect2square(pred_rectangles) \n \n for j in range(len(pred_boxes)):\n \n pred_box = pred_boxes[j] \n x1, y1, x2, y2 = np.array(pred_box[: 4]).astype(int)\n w = x2 - x1 + 1 \n h = y2 - y1 + 1 \n \n # Drop the box that exceeds the boundary or is too small \n if w < 20 or h < 20 or x1 < 0 or y1 < 0 or x2 > width - 1 or y2 > height - 1: \n continue\n \n crop_box = np.array([x1, y1, x2, y2])\n if bboxes.shape[0] == 0:\n iou = 0\n else:\n iou = utils.IoU(crop_box, bboxes)\n \n if np.max(iou) < 0.1: \n \n cropped_img = img[y1: y2, x1: x2]\n resized_img = cv2.resize(cropped_img, (IMG_SIZE, IMG_SIZE), interpolation = cv2.INTER_LINEAR)\n saving_path = os.path.join(neg_hard_save_dir, str(neg_hard_idx) + '.jpg')\n success = cv2.imwrite(saving_path, resized_img)\n if not success: \n raise Exception(\"Neg picture \" + str(neg_hard_idx) + \" saving failed. \") \n \n img_name = os.path.join('neg_hard', str(neg_hard_idx) + '.jpg')\n label = 0 \n roi = np.array([-1] * 4)\n landmark = np.array([-1] * 12)\n \n neg_hard_list.append([img_name, label, roi, landmark]) \n \n neg_hard_idx = neg_hard_idx + 1\n \n neg_hard_anno_path = r'../Data/' + str(IMG_SIZE) + '/neg_hard_record.pkl' \n \n file = open(neg_hard_anno_path, 'wb+') \n pkl.dump(neg_hard_list, file)\n file.close() \n \n print(\"File saving done. \") \n\ndef parse_arguments(argv):\n \n parser = argparse.ArgumentParser()\n parser.add_argument('--IMG_SIZE', type = int, help = 'The input image size', default = 12)\n parser.add_argument('--IMG_ROOT_DIR', type = str, help = 'The image data directory', default = r'../Data/wider_face_add_lm_10_10') \n parser.add_argument('--DEBUG', type = int, help = 'Debug mode or not', default = 0) \n parser.add_argument('--AUGMENT_CONTROL', type = int, help = 'Limit the number of augmented pics', default = 100) \n \n return parser.parse_args(argv)\n\nif __name__ == '__main__':\n \n main(parse_arguments(sys.argv[1:]))\n ","sub_path":"Data_Augmentation/02_WIDER_FACE_add_lm_annotation_hard_sample_generation.py","file_name":"02_WIDER_FACE_add_lm_annotation_hard_sample_generation.py","file_ext":"py","file_size_in_byte":6263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"229327685","text":"import numpy as np\nfrom PIL import Image\n\ninput_shape=(320,480,3)\n\npredicted = np.genfromtxt('predicted.csv', delimiter=',')\npredicted = np.reshape(predicted, input_shape)\n\nimg = np.zeros((input_shape), dtype=np.uint8)\n\nleft_color = [0, 70, 20]\nmiddle_color = [50, 0, 180]\nright_color = [200, 100, 0]\n\nfor x in range(input_shape[0]):\n for y in range(input_shape[1]):\n if predicted[x,y,0] == 1.0:\n img[x,y] = left_color\n elif predicted[x,y,1] == 1.0:\n img[x,y] = middle_color\n else:\n img[x,y] = right_color\n\nimg_file = Image.fromarray(img)\nimg_file.save('predicted.png')\n\nprint(\"done\")\n\n\n","sub_path":"classimages/color_predicted.py","file_name":"color_predicted.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"587369125","text":"\"\"\"\n\n\"\"\"\nimport torch\nfrom torch import log\nimport torch.nn as nn\nfrom torch.nn.modules.transformer import TransformerEncoder, TransformerEncoderLayer\nimport torch.nn.utils.rnn as rnn_utils\nimport torch.nn.functional as F\n\nclass _ATTR_NETWORK(nn.Module):\n def __init__(self, vocab_obj, args, device):\n super(_ATTR_NETWORK, self).__init__()\n\n self.m_device = device\n \n self.m_vocab_size = vocab_obj.vocab_size\n self.m_user_num = vocab_obj.user_num\n self.m_item_num = vocab_obj.item_num\n\n self.m_attr_embed_size = args.attr_emb_size\n self.m_user_embed_size = args.user_emb_size\n self.m_item_embed_size = args.item_emb_size\n\n self.m_attn_head_num = args.attn_head_num\n self.m_attn_layer_num = args.attn_layer_num\n\n self.m_output_hidden_size = args.output_hidden_size\n\n self.m_attn_linear_size = args.attn_linear_size\n\n self.m_attr_embedding = nn.Embedding(self.m_vocab_size, self.m_attr_embed_size)\n self.m_user_embedding = nn.Embedding(self.m_user_num, self.m_user_embed_size)\n self.m_item_embedding = nn.Embedding(self.m_item_num, self.m_item_embed_size)\n\n encoder_layers = TransformerEncoderLayer(self.m_attr_embed_size, self.m_attn_head_num, self.m_attn_linear_size)\n self.m_attn = TransformerEncoder(encoder_layers, self.m_attn_layer_num)\n\n self.m_gamma = args.gamma\n\n self.m_output_attr_embedding_user = nn.Embedding(self.m_vocab_size, self.m_attr_embed_size*2)\n self.m_output_attr_embedding_item = nn.Embedding(self.m_vocab_size, self.m_attr_embed_size*2)\n \n self.m_attr_tf_user = nn.Linear(1, 1)\n self.m_attr_tf_item = nn.Linear(1, 1)\n \n self.f_init_weight()\n\n self = self.to(self.m_device)\n\n def f_init_weight(self):\n initrange = 0.1\n torch.nn.init.uniform_(self.m_output_attr_embedding_user.weight, -initrange, initrange)\n torch.nn.init.uniform_(self.m_output_attr_embedding_item.weight, -initrange, initrange)\n\n torch.nn.init.uniform_(self.m_attr_embedding.weight, -initrange, initrange)\n # torch.nn.init.normal_(self.m_tag_item_embedding.weight, 0.0, 0.01)\n torch.nn.init.uniform_(self.m_user_embedding.weight, -initrange, initrange)\n torch.nn.init.uniform_(self.m_item_embedding.weight, -initrange, initrange)\n\n torch.nn.init.uniform_(self.m_attr_tf_user.weight, -initrange, initrange)\n torch.nn.init.uniform_(self.m_attr_tf_item.weight, -initrange, initrange)\n\n def f_generate_mask(self, length):\n max_len = length.max().item()\n mask = torch.arange(0, max_len).expand(len(length), max_len).to(length.device)\n mask = mask < length.unsqueeze(1)\n\n mask = ~mask\n\n return mask\n\n def f_get_tf_weight_user(self, attr, tf):\n \n ## tf: batch_size*seq_len\n attr_tf_weight = self.m_attr_tf_user(tf.unsqueeze(-1))\n\n ## attr_tf_weight: batch_size*seq_len*1\n\n return attr_tf_weight\n\n def f_get_tf_weight_item(self, attr, tf):\n \n ## tf: batch_size*seq_len\n attr_tf_weight = self.m_attr_tf_user(tf.unsqueeze(-1))\n\n ## attr_tf_weight: batch_size*seq_len*1\n\n return attr_tf_weight\n\n def f_get_avg_attr_user(self, attr, attr_lens, attr_tf):\n ### attr_user_embed: batch_size*seq_len*embed_size\n attr_user_embed = self.m_attr_embedding(attr) \n\n attr_user_mask = self.f_generate_mask(attr_lens)\n\n ## item_attr_user_logits: batch_size*seq_len*1\n attr_user_logits = self.f_get_tf_weight_user(attr, attr_tf)\n\n masked_attr_user_embed = attr_user_logits*attr_user_embed*((~attr_user_mask).unsqueeze(-1))\n # masked_attr_user_embed = attr_user_embed*((~attr_user_mask).unsqueeze(-1))\n\n attr_user = masked_attr_user_embed.sum(1)/((~attr_user_mask).sum(1).unsqueeze(-1))\n\n return attr_user, attr_user_mask\n\n def f_get_avg_attr_item(self, attr, attr_lens, attr_tf):\n attr_item_embed = self.m_attr_embedding(attr)\n \n attr_item_mask = self.f_generate_mask(attr_lens)\n\n ### attr_item_logits: batch_size*attr_len_item*1\n attr_item_logits = self.f_get_tf_weight_item(attr, attr_tf)\n\n masked_attr_item_embed = attr_item_logits*attr_item_embed*((~attr_item_mask).unsqueeze(-1))\n # masked_attr_item_embed = attr_item_embed*((~attr_item_mask).unsqueeze(-1))\n\n attr_item = masked_attr_item_embed.sum(1)/((~attr_item_mask).sum(1).unsqueeze(-1))\n\n return attr_item, attr_item_mask\n\n def f_get_logits(self, embed, attr):\n logits = torch.matmul(embed, attr.unsqueeze(-1))\n logits = logits.squeeze(-1)\n\n return logits\n\n def forward(self, attr_item, attr_tf_item, attr_lens_item, item_ids, attr_user, attr_tf_user, attr_lens_user, user_ids, pos_targets, pos_lens, neg_targets, neg_lens):\n # print(\"===\"*10)\n\n \"\"\" item \"\"\"\n\n attr_attn_item, attr_item_mask = self.f_get_avg_attr_item(attr_item, attr_lens_item, attr_tf_item) \n\n ### user_x: batch_size*user_embed\n user_embed = self.m_user_embedding(user_ids)\n \n # \"\"\" user \"\"\"\n\n attr_attn_user, attr_user_mask = self.f_get_avg_attr_user(attr_user, attr_lens_user, attr_tf_user)\n \n item_embed = self.m_item_embedding(item_ids)\n\n # user_output = user_embed\n # item_output = item_embed\n\n user_output = torch.cat([user_embed, attr_attn_user], dim=-1)\n item_output = torch.cat([item_embed, attr_attn_item], dim=-1)\n\n # user_output = user_embed+item_attr_user_output*self.m_gamma\n # item_output = item_embed+user_attr_item_output*self.m_gamma\n \n neg_embed_user = self.m_output_attr_embedding_user(neg_targets)\n neg_embed_item = self.m_output_attr_embedding_item(neg_targets)\n\n ### user_item_output: batch_size*ouput_size\n ### neg_logits: batch_size*neg_num\n # neg_logits_user = torch.matmul(neg_embed_user, user_output.unsqueeze(-1))\n # neg_logits_user = neg_logits_user.squeeze(-1)\n\n neg_logits_user = self.f_get_logits(neg_embed_user, user_output)\n\n # neg_logits_item = torch.matmul(neg_embed_item, item_output.unsqueeze(-1))\n # neg_logits_item = neg_logits_item.squeeze(-1)\n\n neg_logits_item = self.f_get_logits(neg_embed_item, item_output)\n\n # print(\"neg_lens\", neg_lens)\n # exit()\n neg_mask = self.f_generate_mask(neg_lens)\n neg_mask = ~neg_mask\n\n ### targets: batch_size*pos_num\n\n pos_embed_user = self.m_output_attr_embedding_user(pos_targets)\n pos_embed_item = self.m_output_attr_embedding_item(pos_targets)\n\n ### user_item_output: batch_size*ouput_size\n ### neg_logits: batch_size*neg_num\n\n pos_logits_user = self.f_get_logits(pos_embed_user, user_output)\n\n pos_logits_item = self.f_get_logits(pos_embed_item, item_output)\n\n pos_logits = pos_logits_user+pos_logits_item\n neg_logits = neg_logits_user+neg_logits_item\n\n pos_mask = self.f_generate_mask(pos_lens)\n pos_mask = ~pos_mask\n\n logits = torch.cat([pos_logits, neg_logits], dim=-1)\n\n mask = torch.cat([pos_mask, neg_mask], dim=-1)\n\n new_targets = torch.cat([torch.ones_like(pos_targets), torch.zeros_like(neg_targets)], dim=1)\n\n new_targets = new_targets*mask\n\n return logits, mask, new_targets\n\n def f_eval_forward(self, attr_item, attr_tf_item, attr_lens_item, item_ids, attr_user, attr_tf_user, attr_lens_user, user_ids):\n\n \"\"\" item \"\"\"\n\n attr_attn_item, attr_item_mask = self.f_get_avg_attr_item(attr_item, attr_lens_item, attr_tf_item) \n\n ### user_x: batch_size*user_embed\n user_embed = self.m_user_embedding(user_ids)\n \n # ### weighted_user_attr_item_logits: batch_size*attr_len_item*ouput_size\n \n # print(\"attr_attn_item\", attr_attn_item.size())\n # print(\"attr_item_mask\", attr_item_mask.size())\n\n ### attr_item: batch_size*attr_size\n ### attr_item_mask: batch_size*seq_len\n \n # \"\"\" user \"\"\"\n\n attr_attn_user, attr_user_mask = self.f_get_avg_attr_user(attr_user, attr_lens_user, attr_tf_user)\n \n item_embed = self.m_item_embedding(item_ids)\n\n # user_output = user_embed\n # item_output = item_embed\n \n user_output = torch.cat([user_embed, attr_attn_user], dim=-1)\n item_output = torch.cat([item_embed, attr_attn_item], dim=-1)\n\n logits_user = torch.matmul(user_output, self.m_output_attr_embedding_user.weight.t())\n logits_item = torch.matmul(item_output, self.m_output_attr_embedding_item.weight.t())\n\n logits = logits_user+logits_item\n \n return logits","sub_path":"two_tower_bpr_attr_tf/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":8770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"209181038","text":"'''\n写ppt\n'''\nimport win32com\nimport win32com.client\n\ndef makePPT(path):\n ppt = win32com.client.Dispatch('PowerPoint.Application')\n ppt.Visible = True\n pptFile = ppt.Presentations.Add()\n #创建页\n page1 = pptFile.Slides.Add(1,1)\n t1 = page1.Shapes[0].TextFrame.TextRange\n t1.Text = \"sunck1\"\n t2 = page1.Shapes[1].TextFrame.TextRange\n t2.Text = \"sunck2\"\n #括号第一个为页数,参数二为类型\n page2 = pptFile.Slides.Add(2, 1)\n t3 = page2.Shapes[0].TextFrame.TextRange\n t3.Text = \"5108\"\n t4 = page2.Shapes[1].TextFrame.TextRange\n t4.Text = \"5208\"\n page3 = pptFile.Slides.Add(3, 2)\n t5 = page3.Shapes[0].TextFrame.TextRange\n t5.Text = \"51081\"\n t6 = page3.Shapes[1].TextFrame.TextRange\n t6.Text = \"52081\"\n #保存\n pptFile.SaveAs(path)\n pptFile.Close()\n ppt.Quit()\npath = r'C:\\Users\\Administrator\\Desktop\\文件工作区\\workspace4\\5108.ppt'\nmakePPT(path)\n","sub_path":"python/python知识点/办公/写ppt.py","file_name":"写ppt.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"614789562","text":"\"\"\"\nGets the list of users in an account as a list of 'dictionaries'\nUsage:\ntoken_string = '3AAABLblqZhC2YTAO******* Your integration key or access token ************ohF9DaLV-4GKbLuiM-0lmTtSq'\nuserList = list_users(token_string)\npprint(userList)\naccess elements of each 'dictionary' using list index + dictionary index\nprint(userlist[0]['email'])\npriint(userlist[0]['company'])\n\"\"\"\n\nimport requests\nfrom pprint import pprint # if needed for pprint\n\ndef list_users(api_token):\n shard = 'na1'\n baseUrl = 'https://api.' + shard + '.echosign.com/api/rest/v5'\n usersUrl = baseUrl + '/users'\n headers = {\n 'Access-Token': api_token # Your access token or integration key here.\n }\n url = usersUrl\n return requests.get(url, headers=headers).json().get('userInfoList')\n","sub_path":"listUsers_V5.py","file_name":"listUsers_V5.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23741712","text":"\nimport torch\n\nfrom models import BasicConv, RFB3, RFB4, SSDDetectionHead\n\n\ndef test_basic_conv():\n\n x = torch.rand(4, 16, 12, 12)\n in_planes = x.shape[1]\n out_planes = in_planes * 2\n kernel_size = 3\n conv = BasicConv(in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, with_relu=True)\n y = conv(x)\n assert isinstance(y, torch.Tensor) and y.shape[1] == out_planes\n\n\ndef test_rfb3():\n\n x = torch.rand(4, 16, 12, 12)\n in_planes = x.shape[1]\n out_planes = in_planes * 2\n rfb = RFB3(in_planes, out_planes, stride=1, scale=0.1, visual=1)\n y = rfb(x)\n assert isinstance(y, torch.Tensor) and y.shape[1] == out_planes\n\n\ndef test_rfb4():\n\n x = torch.rand(4, 16, 12, 12)\n in_planes = x.shape[1]\n out_planes = in_planes * 2\n rfb = RFB4(in_planes, out_planes, stride=1, scale=0.1)\n y = rfb(x)\n assert isinstance(y, torch.Tensor) and y.shape[1] == out_planes\n\n\ndef test_ssd_detection_head():\n\n inputs = [\n torch.rand(4, 32, 16, 16),\n torch.rand(4, 64, 8, 8),\n torch.rand(4, 128, 4, 4),\n torch.rand(4, 128, 2, 2),\n ]\n\n in_planes_list = [x.shape[1] for x in inputs]\n num_anchors_list = [6 for _ in inputs]\n num_classes = 12\n head = SSDDetectionHead(in_planes_list, num_anchors_list, num_classes)\n loc, conf = head(inputs)\n assert isinstance(loc, torch.Tensor) and loc.shape[-1] == 4\n assert isinstance(conf, torch.Tensor) and conf.shape[-1] == num_classes\n","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"924857","text":"from flask import Flask,render_template,jsonify,abort,request\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom datetime import datetime\r\n\r\nimport enum\r\nimport requests\r\nimport re\r\nimport json\r\nfrom date_validate import *\r\napp = Flask(__name__)\r\n\r\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///rides.db'\r\ndb = SQLAlchemy(app)\r\nhttp_count_rides=0\r\nclass Ride(db.Model):\r\n r_id = db.Column(db.Integer, primary_key=True)\r\n time= db.Column(db.String,nullable=False)\r\n src = db.Column(db.Integer,nullable=False)\r\n dest = db.Column(db.Integer,nullable=False)\r\n created_by = db.Column(db.String, nullable=False)\r\n\r\n def __repr__(self):\r\n return '\\n{\"rideID\": %d ,\"username\": %s, \"timestamp\" : %s}' % (self.r_id,self.created_by,self.time)\r\n\r\nclass ride_user(db.Model):\r\n\r\n r_id = db.Column(db.Integer, nullable=False,primary_key=True)\r\n username = db.Column(db.String, nullable=False,primary_key=True)\r\n\r\n\r\n def __repr__(self):\r\n return \"\\n{RideId : %d , User : %s}\" % (self.r_id,self.username)\r\n\r\n\r\n\r\ndef is_present_areanumber(num):\r\n f=open(\"AreaNameEnum.csv\",\"r\")\r\n H={}\r\n i=0\r\n for line in f:\r\n if i==0: # first line heading of the attribute\r\n i=i+1\r\n else:\r\n l=line.split(',') #values of the place in integer\r\n H[int(l[0])]=int(l[0])\r\n f.close()\r\n return (num in H)\r\n\r\n\r\n\r\n@app.route(\"/api/v1/rides/\",methods=[\"POST\",\"GET\",\"DELETE\",\"PUT\"])\r\ndef Ridelistings(rideId):\r\n #Join an existing ride\r\n global http_count_rides\r\n http_count_rides =http_count_rides + 1\r\n if request.method == \"POST\":\r\n flag = Ride.query.filter_by(r_id=rideId).first()\r\n #uflag= User.query.filter_by(username=request.get_json()[\"username\"]).first()\r\n header = {'origin':'34.200.182.72'}\r\n uflag=requests.get(\"http://loadbalancer-584558139.us-east-1.elb.amazonaws.com/api/v1/users\",json={},headers=header)\r\n does_exists = ride_user.query.filter(ride_user.r_id==rideId,ride_user.username==request.get_json()[\"username\"]).first()\r\n u = request.get_json()[\"username\"]\r\n #if flag is None or u not in uflag.text or does_exists is not None:\r\n #print(u not in uflag.text)\r\n #print(u)\r\n if flag is None or u not in uflag.text or does_exists is not None:\r\n return {},400\r\n else:\r\n shared = ride_user(r_id = rideId , username = request.get_json()[\"username\"])\r\n db.session.add(shared)\r\n db.session.commit()\r\n return {},200\r\n\r\n elif request.method == \"DELETE\":\r\n\r\n try:\r\n flag = Ride.query.filter_by(r_id=rideId).first()\r\n if flag is None:\r\n return {},405\r\n else:\r\n db.session.delete(flag)\r\n ride_user.query.filter_by(r_id=rideId).delete()\r\n db.session.commit()\r\n return {},200\r\n except:\r\n return {},500\r\n\r\n #List all the details of a given ride\r\n elif request.method == \"GET\":\r\n try:\r\n flag = Ride.query.filter_by(r_id=rideId).first()\r\n if flag is None:\r\n return {},400\r\n r = db.session.execute('select r_id,created_by,time,src,dest from Ride where r_id = {}'.format(rideId))\r\n d = {}\r\n for i in r:\r\n d[\"rideId\"] = i[0]\r\n d[\"created_by\"]=i[1]\r\n d[\"Timestamp\"]=i[2]\r\n d[\"source\"]=i[3]\r\n d[\"destination\"]=i[4]\r\n\r\n\r\n res = db.session.execute('select username from ride_user where r_id = {}'.format(rideId))\r\n l = []\r\n for i in res:\r\n l.append(i[0])\r\n d[\"users\"]=l\r\n return jsonify(d),200\r\n except KeyError:\r\n return {},400\r\n except:\r\n return {},500\r\n\r\n else:\r\n return {},405\r\n\r\n#3 and 4th\r\n@app.route(\"/api/v1/rides\",methods=[\"POST\",\"GET\",\"DELETE\",\"PUT\"])\r\ndef rides():\r\n #Create a new ride\r\n global http_count_rides\r\n http_count_rides = http_count_rides + 1\r\n try:\r\n if request.method == \"POST\":\r\n uname = request.get_json()[\"created_by\"]\r\n #flag = User.query.filter_by(username=uname).first()\r\n header = {'origin':'34.200.182.72'}\r\n flag = requests.get(\"http://loadbalancer-584558139.us-east-1.elb.amazonaws.com/api/v1/users\",json={},headers=header)\r\n if uname not in flag.text:\r\n return jsonify({\"flag\":\"user doesnt exist\"}),400\r\n\r\n # use enum to check !!\r\n src = int(request.get_json()[\"source\"])\r\n dest = int(request.get_json()[\"destination\"])\r\n t=str(request.get_json()[\"timestamp\"])\r\n res=date_and_time_validate(t)\r\n print(res)\r\n if(res!=True): #either date or time is in invalid format\r\n return jsonify({\"flag\":\"date or time invalid\"}),400\r\n if ((is_present_areanumber(src)) and (is_present_areanumber(dest)) and (src != dest )):\r\n \"\"\"r = requests.post(\"http://3.233.19.138/write/newride\",json = request.get_json())\r\n if r.text == \"ok\":\r\n\r\n return {\"flag\":\"added\"},200\r\n else:\r\n return {\"flag\":\"not added\"},500\"\"\"\r\n r = Ride(src=request.get_json()[\"source\"],dest=request.get_json()[\"destination\"],created_by=request.get_json()[\"created_by\"],time=t)\r\n db.session.add(r)\r\n db.session.commit()\r\n return {\"flag\":\"added\"},201\r\n\r\n else:\r\n return jsonify({\"flag\":\"invalid input\"}),400\r\n\r\n #List all upcoming rides for a given source and destination\r\n elif request.method == \"GET\":\r\n tm=FormatTheDate(str(datetime.datetime.now()))\r\n src = int(request.args.get(\"source\"))\r\n dest = int(request.args.get(\"destination\"))\r\n if (( is_present_areanumber(src) ) and ( is_present_areanumber(dest)) and (src != dest )):\r\n query = db.session.execute('select r_id,created_by,time from Ride where src={} and dest={}'.format(src,dest))\r\n l = []\r\n for row in query:\r\n d = {}\r\n d[\"rideId\"]=row[0]\r\n d[\"username\"]=row[1]\r\n if(isupcoming(tm,row[2])): #only if the ride is upcoming\r\n d[\"time\"]=row[2] #insert it into the res list\r\n l.append(d)\r\n if len(l)==0:\r\n return {},204\r\n else:\r\n return jsonify(l),200\r\n else:\r\n return jsonify({\"flag\":\"either is not integer\"}),400\r\n\r\n else:\r\n return {},405\r\n except Exception as e:\r\n print(e)\r\n return {},500\r\n@app.route(\"/api/v1/rides/count\",methods=[\"POST\",\"GET\",\"DELETE\",\"PUT\"])\r\ndef rides_count():\r\n global http_count_rides\r\n http_count_rides = http_count_rides + 1\r\n try:\r\n if request.method == \"GET\":\r\n query=db.session.execute(\"select * from Ride\")\r\n count = 0\r\n for row in query:\r\n count = count + 1\r\n l=[]\r\n l.append(count)\r\n return jsonify(l),200\r\n else:\r\n return {},405\r\n except:\r\n return {},400\r\n\r\n@app.route(\"/api/v1/db/clear\",methods=[\"POST\",\"GET\",\"DELETE\",\"PUT\"])\r\ndef clearDB():\r\n if request.method == \"POST\":\r\n is_empty=not(bool(request.json))\r\n if(is_empty):\r\n Ride.query.delete() # Ride DB clearing\r\n ride_user.query.delete() # Ride DB Clearing\r\n db.session.commit()\r\n return {},200\r\n else:\r\n return {},400\r\n else:\r\n return {},405\r\n\r\n@app.route(\"/api/v1/db/read/\",methods=[\"GET\"])\r\ndef view(name):\r\n flag = rflag = Ride.query.filter_by(created_by=name).first()\r\n print(flag)\r\n if flag:\r\n return name\r\n\r\n ride_user.query.filter_by(username=name).delete()\r\n db.session.commit()\r\n\r\n return \"ok\"\r\n\r\n\r\n#HTTP requests count and reset\r\n@app.route(\"/api/v1/_count\",methods=[\"POST\",\"GET\",\"DELETE\",\"PUT\"])\r\ndef count():\r\n global http_count_rides\r\n if request.method==\"GET\":\r\n l=[]\r\n l.append(http_count_rides)\r\n return jsonify(l),200\r\n elif request.method==\"DELETE\":\r\n http_count_rides=0\r\n return {},200\r\n else:\r\n return {},405\r\n\"\"\"\r\n@app.route(\"/api/v1/_count\",methods=[\"POST\",\"GET\",\"DELETE\",\"PUT\"])\r\ndef reset():\r\n global http_count_rides\r\n if request.method==\"DELETE\":\r\n http_count_rides=0\r\n return {},200\r\n else:\r\n return {},405\r\n\"\"\"\r\nif __name__== '__main__':\r\n db.create_all()\r\n app.debug=True\r\n app.run(host='0.0.0.0',port=8000)","sub_path":"CC_Assignment_3/rides/rides.py","file_name":"rides.py","file_ext":"py","file_size_in_byte":10787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"300881165","text":"from utilities import reader\n\n# Mode\nmode = 'Train' # Available modes: 'Train', 'Test', 'Classify'\n\n# Saving/loading paths\nmodel_name = 'crispr_5000_18_batch_stat' # A folder with this name will be created to store checkpoints and tensorboard data.\nmodel_path = 'models/' + model_name + '/' # Do not modify this unless you changed the internal structure of the repo.\nif mode == 'Classify':\n classification_results_file_path = 'crispr_5000_cls.txt' # This file will contain the classification results.\n\n# Specify the maximal word length. Shorter words will be\n# padded and the LSTM will stop when reaching the padding.\n# Setting a fixed n_steps increases computing performance.\nn_steps = 56\n\n# Specify positive and negative set text files.\n# If you run the Spaghetti Net in test or classification\n# mode, the training data will be loaded directly from\n# the model folder.\nif mode == 'Train':\n positive_text = reader.import_text(\n 'datasets/crispr/crispr_training_positive_set.txt')\n negative_text = reader.import_text(\n 'datasets/crispr/crispr_training_negative_set.txt')\n\nelif mode == 'Test':\n positive_text = reader.import_text(\n 'datasets/crispr/crispr_test_positive_set.txt')\n negative_text = reader.import_text(\n 'datasets/crispr/crispr_test_negative_set.txt')\n\nelse:\n classification_text = reader.import_text(\n 'datasets/crispr/crispr_words.txt')\n","sub_path":"run_parameters.py","file_name":"run_parameters.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"409269589","text":"\"\"\"\nUtilities for loading datasets\n\"\"\"\n\nimport os\nimport pandas as pd\n\nfrom ..utils.load_data import load_from_tsfile_to_dataframe\n\n__all__ = [\"load_gunpoint\",\n \"load_arrow_head\",\n \"load_italy_power_demand\",\n \"load_shampoo_sales\",\n \"load_longley\"]\n__author__ = ['Markus Löning', 'Sajay Ganesh']\n\nDIRNAME = 'data'\nMODULE = os.path.dirname(__file__)\n\n\n# time series classification datasets\n\ndef _load_dataset(name, split, return_X_y):\n \"\"\"\n Helper function to load datasets.\n \"\"\"\n\n if split in [\"TRAIN\", \"TEST\"]:\n fname = name + '_' + split + '.ts'\n abspath = os.path.join(MODULE, DIRNAME, name, fname)\n X, y = load_from_tsfile_to_dataframe(abspath)\n elif split == \"ALL\":\n X = pd.DataFrame()\n y = pd.Series()\n for split in [\"TRAIN\", \"TEST\"]:\n fname = name + '_' + split + '.ts'\n abspath = os.path.join(MODULE, DIRNAME, name, fname)\n result = load_from_tsfile_to_dataframe(abspath)\n X = pd.concat([X, pd.DataFrame(result[0])])\n y = pd.concat([y, pd.Series(result[1])])\n else:\n raise ValueError(\"Invalid split value\")\n\n # Return appropriately\n if return_X_y:\n return (X, y)\n else:\n X['class_val'] = pd.Series(y)\n return X\n\n\ndef load_gunpoint(split='TRAIN', return_X_y=False):\n \"\"\"\n Loads the GunPoint time series classification problem and returns X and y\n\n Parameters\n ----------\n split: str{\"ALL\", \"TRAIN\", \"TEST\"}, optional (default=\"TRAIN\")\n Whether to load the train or test partition of the problem. By default it loads the train split.\n return_X_y: bool, optional (default=False)\n If True, returns (features, target) separately instead of a single dataframe with columns for\n features and the target.\n\n Returns\n -------\n X: pandas DataFrame with m rows and c columns\n The time series data for the problem with m cases and c dimensions\n y: numpy array\n The class labels for each case in X\n\n Details\n -------\n Dimensionality: univariate\n Series length: 150\n Train cases: 50\n Test cases: 150\n Number of classes: 2\n\n This dataset involves one female actor and one male actor making a motion with their\n hand. The two classes are: Gun-Draw and Point: For Gun-Draw the actors have their\n hands by their sides. They draw a replicate gun from a hip-mounted holster, point it\n at a target for approximately one second, then return the gun to the holster, and\n their hands to their sides. For Point the actors have their gun by their sides.\n They point with their index fingers to a target for approximately one second, and\n then return their hands to their sides. For both classes, we tracked the centroid\n of the actor's right hands in both X- and Y-axes, which appear to be highly\n correlated. The data in the archive is just the X-axis.\n\n Dataset details: http://timeseriesclassification.com/description.php?Dataset=GunPoint\n \"\"\"\n name = 'GunPoint'\n return _load_dataset(name, split, return_X_y)\n\n\ndef load_italy_power_demand(split='TRAIN', return_X_y=False):\n \"\"\"\n Loads the ItalyPowerDemand time series classification problem and returns X and y\n\n Parameters\n ----------\n split: str{\"ALL\", \"TRAIN\", \"TEST\"}, optional (default=\"TRAIN\")\n Whether to load the train or test partition of the problem. By default it loads the train split.\n return_X_y: bool, optional (default=False)\n If True, returns (features, target) separately instead of a single dataframe with columns for\n features and the target.\n\n Returns\n -------\n X: pandas DataFrame with m rows and c columns\n The time series data for the problem with m cases and c dimensions\n y: numpy array\n The class labels for each case in X\n\n Details\n -------\n Dimensionality: univariate\n Series length: 24\n Train cases: 67\n Test cases: 1029\n Number of classes: 2\n\n The data was derived from twelve monthly electrical power demand time series from Italy and\n first used in the paper \"Intelligent Icons: Integrating Lite-Weight Data Mining and\n Visualization into GUI Operating Systems\". The classification task is to distinguish days\n from Oct to March (inclusive) from April to September.\n\n Dataset details: http://timeseriesclassification.com/description.php?Dataset=ItalyPowerDemand\n \"\"\"\n\n name = 'ItalyPowerDemand'\n return _load_dataset(name, split, return_X_y)\n\n\ndef load_arrow_head(split='TRAIN', return_X_y=False):\n \"\"\"\n Loads the ArrowHead time series classification problem and returns X and y.\n\n Parameters\n ----------\n split: str{\"ALL\", \"TRAIN\", \"TEST\"}, optional (default=\"TRAIN\")\n Whether to load the train or test partition of the problem. By default it loads the train split.\n return_X_y: bool, optional (default=False)\n If True, returns (features, target) separately instead of a single dataframe with columns for\n features and the target.\n\n Returns\n -------\n X: pandas DataFrame with m rows and c columns\n The time series data for the problem with m cases and c dimensions\n y: numpy array\n The class labels for each case in X\n\n Details\n -------\n Dimensionality: univariate\n Series length: 251\n Train cases: 36\n Test cases: 175\n Number of classes: 3\n\n The arrowhead data consists of outlines of the images of arrowheads. The shapes of the\n projectile points are converted into a time series using the angle-based method. The\n classification of projectile points is an important topic in anthropology. The classes\n are based on shape distinctions such as the presence and location of a notch in the\n arrow. The problem in the repository is a length normalised version of that used in\n Ye09shapelets. The three classes are called \"Avonlea\", \"Clovis\" and \"Mix\".\"\n\n Dataset details: http://timeseriesclassification.com/description.php?Dataset=ArrowHead\n \"\"\"\n\n name = 'ArrowHead'\n return _load_dataset(name, split, return_X_y)\n\n\n# forecasting datasets\n\ndef load_shampoo_sales(return_y_as_dataframe=False):\n \"\"\"\n Load the shampoo sales univariate time series forecasting dataset.\n\n Parameters\n ----------\n return_y_as_dataframe: bool, optional (default=False)\n Whether to return target series as series or dataframe, useful for high-level interface.\n - If True, returns target series as pandas.DataFrame.s\n - If False, returns target series as pandas.Series.\n\n Returns\n -------\n y : pandas Series/DataFrame\n Shampoo sales dataset\n\n Details\n -------\n This dataset describes the monthly number of sales of shampoo over a 3 year period.\n The units are a sales count.\n\n Dimensionality: univariate\n Series length: 36\n Frequency: Monthly\n Number of cases: 1\n\n\n References\n ----------\n ..[1] Makridakis, Wheelwright and Hyndman (1998) Forecasting: methods and applications,\n John Wiley & Sons: New York. Chapter 3.\n \"\"\"\n\n name = 'ShampooSales'\n fname = name + '.csv'\n path = os.path.join(MODULE, DIRNAME, name, fname)\n data = pd.read_csv(path, index_col=0)\n data.index = pd.PeriodIndex(data.index, freq='M')\n if return_y_as_dataframe:\n # return nested pandas DataFrame with a single row and column\n return pd.DataFrame(pd.Series([pd.Series(data.squeeze())]), columns=[name])\n else:\n # return nested pandas Series with a single row\n return pd.Series([data.iloc[:, 0]], name=name)\n\n\ndef load_longley(return_X_y=False, return_y_as_dataframe=False):\n \"\"\"\n Load the Longley dataset for forecasting with exogenous variables.\n\n\n Parameters\n ----------\n return_y_as_dataframe: bool, optional (default=False)\n Whether to return target series as series or dataframe, useful for high-level interface.\n - If True, returns target series as pandas.DataFrame.s\n - If False, returns target series as pandas.Series.\n return_X_y: bool, optional (default=False)\n If True, returns (features, target) separately instead of a single dataframe with columns for\n features and the target.\n\n Returns\n -------\n X: pandas.DataFrame\n The exogenous time series data for the problem.\n y: pandas.Series\n The target series to be predicted.\n\n Details\n -------\n This dataset contains various US macroeconomic variables from 1947 to 1962 that are known to be highly\n collinear.\n\n Dimensionality: multivariate, 6\n Series length: 16\n Frequency: Yearly\n Number of cases: 1\n\n Variable description:\n\n TOTEMP - Total employment (y)\n GNPDEFL - Gross national product deflator\n GNP - Gross national product\n UNEMP - Number of unemployed\n ARMED - Size of armed forces\n POP - Population\n YEAR - Calendar year (index)\n\n References\n ----------\n ..[1] Longley, J.W. (1967) \"An Appraisal of Least Squares Programs for the\n Electronic Comptuer from the Point of View of the User.\" Journal of\n the American Statistical Association. 62.319, 819-41.\n (https://www.itl.nist.gov/div898/strd/lls/data/LINKS/DATA/Longley.dat)\n \"\"\"\n\n if return_y_as_dataframe and not return_X_y:\n raise ValueError(\"`return_y_as_dataframe` can only be set to True if `return_X_y` is True, \"\n \"otherwise y is given as a column in the returned dataframe and \"\n \"cannot be returned as a separate dataframe.\")\n\n name = 'Longley'\n fname = name + '.csv'\n path = os.path.join(MODULE, DIRNAME, name, fname)\n data = pd.read_csv(path, index_col=0)\n data = data.set_index('YEAR')\n data.index = pd.PeriodIndex(data.index, freq='Y')\n\n # Get target series\n yname = 'TOTEMP'\n y = data.pop(yname)\n y = pd.Series([y], name=yname)\n\n # Get feature series\n X = pd.DataFrame([pd.Series([data.iloc[:, i]]) for i in range(data.shape[1])]).T\n X.columns = data.columns\n\n if return_X_y:\n if return_y_as_dataframe:\n y = pd.DataFrame(pd.Series([pd.Series(y.squeeze())]), columns=[yname])\n return X, y\n else:\n return X, y\n else:\n X[yname] = y\n return X\n","sub_path":"sktime/datasets/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"256920773","text":"import feedparser\nimport helpers as h\nfrom pylons import config\n\n__all__ = ['delicious', 'flickr', 'twitter']\n\ndef delicious():\n delicious_feed = feedparser.parse('http://feeds.delicious.com/v2/rss/%s?count=10' % config['delicious.username'])\n if len(delicious_feed.entries):\n items = []\n template = \"\"\"\n
\n

Bookmarks

\n
    \n %s\n
\n
\n \"\"\"\n for entry in delicious_feed.entries[:7]:\n i = '
  • %s
  • '\n link = '%s (%s)'\n link = link % (entry['guid'], entry['title'], entry['title'], entry.updated[:11])\n items.append(i % link)\n return template % '\\n'.join(items)\n else:\n return ''\n\ndef flickr():\n flickr_feed = feedparser.parse('http://api.flickr.com/services/feeds/photos_public.gne?id=%s@N00&lang=en-us&format=atom' % config['flickr.id'])\n if len(flickr_feed.entries):\n items = []\n template = \"\"\"\n
    \n

    Public Flickr Stream

    \n
      \n %s\n
    \n
    \n \"\"\"\n for entry in flickr_feed.entries[:12]:\n image = entry['enclosures'][0]['href']\n image = image.replace('m.jpg', 's.jpg')\n i = '
  • %s
  • ' % h.link_to(\n h.literal('\"%s\"' % (image, entry['title'], entry['title'])),\n entry['link']\n )\n items.append(i)\n return template % '\\n'.join(items)\n else:\n return ''\n\ndef twitter():\n twitter_feed = feedparser.parse(\"http://twitter.com/statuses/user_timeline/%s.rss\" % config['twitter.user.screen_name'])\n if len(twitter_feed.entries):\n items = []\n template = \"\"\"\n
    \n

    Twitter Updates (Follow)

    \n
      \n %s\n
    \n
    \n \"\"\"\n\n for entry in twitter_feed.entries[:4]:\n description = entry['description'].split(':', 1)[1]\n i = '
  • %s %s
  • ' % (h.link_to(\n entry.updated[:14],\n entry['guid']),\n h.auto_link(description)\n )\n items.append(i)\n return template % (config['twitter.user.screen_name'], '\\n'.join(items))\n else:\n return ''","sub_path":"wurdig/lib/feed_display.py","file_name":"feed_display.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"134116038","text":"import os\nimport sys\nimport subprocess\nimport logging\nimport time\n\nfrom fabric import Connection\n\nfrom . import settings\n\nlogger = logging.getLogger(__name__)\n\n\nCODE_PATH = \"/home/ubuntu/code\"\n\n\ndef run_powerbi(instance, run_id: str):\n \"\"\"Run PowerBI processing on the remote server\"\"\"\n run_id = run_id.lower()\n msg = \"Running PowerBI processing for run %s on AWS instance %s\"\n logger.info(msg, run_id, instance[\"InstanceId\"])\n with get_connection(instance) as conn:\n update_repo(conn)\n install_requirements(conn)\n pipeline_name = \"RunPowerBI\"\n pipeline_args = {\n \"run-id\": run_id,\n \"workers\": 15,\n }\n run_luigi_pipeline(conn, pipeline_name, pipeline_args)\n logger.info(\"PowerBI processing completed for %s\", run_id)\n\n\ndef run_full_model(instance, run_id: str, burn_in: int, use_latest_code: bool):\n \"\"\"Run full model job on the remote server\"\"\"\n run_id = run_id.lower()\n msg = \"Running full models for run %s burn-in %s on AWS instance %s\"\n logger.info(msg, run_id, burn_in, instance[\"InstanceId\"])\n with get_connection(instance) as conn:\n if use_latest_code:\n update_repo(conn)\n else:\n set_run_id(conn, run_id)\n\n install_requirements(conn)\n model_name, _, _ = read_run_id(run_id)\n pipeline_name = \"RunFullModels\"\n pipeline_args = {\n \"run-id\": run_id,\n \"FullModelRunTask-burn-in\": burn_in,\n \"FullModelRunTask-model-name\": model_name,\n \"workers\": 15,\n }\n run_luigi_pipeline(conn, pipeline_name, pipeline_args)\n logger.info(\"Full model runs completed for %s\", run_id)\n\n\ndef run_calibration(\n instance, model_name: str, num_chains: int, runtime: int, branch: str,\n):\n \"\"\"Run calibration job on the remote server\"\"\"\n msg = \"Running calibration %s with %s chains for %s seconds on AWS instance %s.\"\n logger.info(msg, model_name, num_chains, runtime, instance[\"InstanceId\"])\n with get_connection(instance) as conn:\n update_repo(conn, branch=branch)\n install_requirements(conn)\n run_id = get_run_id(conn, model_name)\n pipeline_name = \"RunCalibrate\"\n pipeline_args = {\n \"run-id\": run_id,\n \"num-chains\": num_chains,\n \"workers\": num_chains,\n \"CalibrationChainTask-model-name\": model_name,\n \"CalibrationChainTask-runtime\": runtime,\n }\n run_luigi_pipeline(conn, pipeline_name, pipeline_args)\n logger.info(\"Calibration completed for %s\", run_id)\n\n\ndef run_luigi_pipeline(conn: Connection, pipeline_name: str, pipeline_args: dict):\n \"\"\"Run a Luigi pipeline on the remote machine\"\"\"\n # TODO: Figure out a way to run non-local scheduler\n # while ensuring that we track the task from start tofinush\n # start_luigi_scheduler(conn, instance)\n logger.info(\"Running Luigi pipleine %s\", pipeline_name)\n pipeline_args_str = \" \".join([f\"--{k} {v}\" for k, v in pipeline_args.items()])\n cmd_str = f\"LUIGI_CONFIG_PATH=tasks/luigi.cfg ./env/bin/python -m luigi --module tasks --local-scheduler --logging-conf-file tasks/luigi-logging.ini {pipeline_name} {pipeline_args_str}\"\n with conn.cd(CODE_PATH):\n conn.run(cmd_str, echo=True)\n\n logger.info(\"Finished running Luigi pipleine %s\", pipeline_name)\n # upload_luigi_logs(conn, \"calibrate\", run_id)\n # Note: this log line is used by Buildkite so don't change it.\n\n\ndef upload_luigi_logs(conn: Connection, log_folder_name: str, run_id: str):\n \"\"\"Upload Luigi log files from remote server to S3\"\"\"\n logger.info(\"Uploading Luigi log files.\")\n src = \"/home/ubuntu/code/data/outputs/luigid/luigi-server.log\"\n dest = f\"{run_id}/logs/{log_folder_name}/luigi-server.log\"\n copy_s3(conn, src, dest)\n src = \"/home/ubuntu/code/data/outputs/remote/luigi-worker.log\"\n dest = f\"{run_id}/logs/{log_folder_name}/luigi-worker.log\"\n copy_s3(conn, src, dest)\n\n\ndef copy_s3(conn: Connection, src_path: str, dest_key: str):\n with conn.cd(CODE_PATH):\n conn.run(f\"./env/bin/aws s3 cp {src_path} s3://{settings.S3_BUCKET}/{dest_key}\", echo=True)\n\n\ndef start_luigi_scheduler(conn: Connection, instance):\n \"\"\"Start the Luigi scheduling server\"\"\"\n ip = instance[\"ip\"]\n url = f\"http://{ip}:8082/static/visualiser/index.html\"\n logger.info(\"Starting Luigi scheduling server\")\n log_dir = \"/home/ubuntu/code/data/outputs/luigid\"\n conn.run(f\"mkdir -p {log_dir}\")\n cmd_str = (\n \"/home/ubuntu/code/env/bin/luigid\"\n \" --background\"\n f\" --logdir {log_dir}\"\n \" --address 0.0.0.0\"\n \" --port 8082\"\n )\n conn.sudo(cmd_str, echo=True)\n logger.info(\"Started Luigi scheduling server\")\n logger.info(\"Luigi server available at %s\", url)\n\n\ndef read_run_id(run_id: str):\n \"\"\"Read data from run id\"\"\"\n parts = run_id.split(\"-\")\n git_commit = parts[-1]\n timestamp = parts[-2]\n model_name = \"-\".join(parts[:-2])\n return model_name, timestamp, git_commit\n\n\ndef set_run_id(conn: Connection, run_id: str):\n \"\"\"Set git to use the commit for a given run ID\"\"\"\n logger.info(\"Setting up repo using a run id %s\", run_id)\n conn.sudo(f\"chown -R ubuntu:ubuntu {CODE_PATH}\", echo=True)\n _, _, commit = read_run_id(run_id)\n with conn.cd(CODE_PATH):\n conn.run(\"git fetch --quiet\", echo=True)\n conn.run(f\"git checkout --quiet {commit}\", echo=True)\n\n logger.info(\"Done updating repo.\")\n\n\ndef get_run_id(conn: Connection, job_name: str):\n \"\"\"Get the run ID for a given job name name\"\"\"\n logger.info(\"Building run id.\")\n with conn.cd(CODE_PATH):\n git_commit = conn.run(\"git rev-parse HEAD\", hide=\"out\").stdout.strip()\n\n git_commit_short = git_commit[:7]\n timestamp = int(time.time())\n run_id = f\"{job_name}-{timestamp}-{git_commit_short}\"\n logger.info(\"Using run id %s\", run_id)\n return run_id\n\n\ndef update_repo(conn: Connection, branch: str = \"master\"):\n \"\"\"Update remote Git repo to use the latest code\"\"\"\n logger.info(\"Updating git repository to run the latest code.\")\n conn.sudo(f\"chown -R ubuntu:ubuntu {CODE_PATH}\", echo=True)\n with conn.cd(CODE_PATH):\n conn.run(\"git fetch --quiet\", echo=True)\n conn.run(f\"git checkout --quiet {branch}\", echo=True)\n conn.run(\"git pull --quiet\", echo=True)\n logger.info(\"Done updating repo.\")\n\n\ndef install_requirements(conn: Connection):\n \"\"\"Install Python requirements on remote server\"\"\"\n logger.info(\"Ensuring latest Python requirements are installed.\")\n with conn.cd(CODE_PATH):\n conn.run(\"./env/bin/pip install --quiet -r requirements.txt\", echo=True)\n logger.info(\"Finished installing requirements.\")\n\n\ndef get_connection(instance):\n ip = instance[\"ip\"]\n key_filepath = os.path.expanduser(f\"~/.ssh/{settings.EC2_KEYFILE}\")\n return Connection(host=ip, user=\"ubuntu\", connect_kwargs={\"key_filename\": key_filepath},)\n\n\nSSH_OPTIONS = {\n \"StrictHostKeyChecking\": \"no\",\n # https://superuser.com/questions/522094/how-do-i-resolve-a-ssh-connection-closed-by-remote-host-due-to-inactivity\n \"TCPKeepAlive\": \"yes\",\n \"ServerAliveInterval\": \"30\",\n}\nSSH_OPT_STR = \" \".join([f\"-o {k}={v}\" for k, v in SSH_OPTIONS.items()])\nSSH_KEY_STR = f\"-i ~/.ssh/{settings.EC2_KEYFILE}\"\nSSH_ARGS = f\"{SSH_OPT_STR} {SSH_KEY_STR}\"\n\n\ndef ssh_interactive(instance):\n ip = instance[\"ip\"]\n name = instance[\"name\"]\n logger.info(f\"Starting SSH session with instance {name}.\")\n cmd_str = f\"ssh {SSH_ARGS} ubuntu@{ip}\"\n logger.info(\"Entering ssh session with: %s\", cmd_str)\n subprocess.call(cmd_str, shell=True)\n","sub_path":"scripts/aws/infra/remote.py","file_name":"remote.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"208260945","text":"class Node():\n\tdef __init__(self,key):\n\t\tself.l = None\n\t\tself.r = None\n\t\tself.val = key\ndef insert(root, val):\n\tif val > root.val:\n\t\tif not root.r:\n\t\t\troot.r = Node(val)\n\t\telse:\n\t\t\tinsert(root.r,val)\n\tif val <= root.val:\n\t\tif not root.l:\n\t\t\troot.l = Node(val)\n\t\telse:\n\t\t\tinsert(root.l,val)\ndef search(root, k):\n\tif root:\n\t\tif root.val == k:\n\t\t\treturn root.val\n\t\telif root.val < k:\n\t\t\tprint(root.val)\n\t\t\treturn search(root.l,k)\n\t\telse:\n\t\t\tprint(root.val)\n\t\t\treturn search(root.r,k)\ndef delete(root, k):\n\tif root:\n\t\tif root.val == k:\n\t\t\tif not root.l:\n\t\t\t\troot = root.r\n\t\t\telse:\n\t\t\t\tfar_temp, temp = root, root.l\n\t\t\t\twhile temp.r:\n\t\t\t\t\tfar_temp = temp\n\t\t\t\t\ttemp = temp.r\n\t\t\t\troot.val = temp.val\n\t\t\t\tif temp.l:\n\t\t\t\t\tfar_temp.r = temp.l\n\t\telif root.val < k:\n\t\t\treturn delete(root.l, k)\n\t\telse:\n\t\t\treturn delete(root.r, k)\ndef bfs(root,k):\n\tif root:\n\t\tif root.val == k:\n\t\t\treturn root\n\t\telse:\n\t\t\tprint(root.val)\n\t\t\tbfs(root.l, k)\n\t\t\tbfs(root.r, k)\ng = Node(10)\ninsert(g,7)\ninsert(g,19)\ninsert(g,12)\ninsert(g,21)\ninsert(g,9)\ndelete(g,9)\nbfs(g,9)\n\t\t\n","sub_path":"Tree/BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"224244250","text":"import utils\r\n\r\n\r\nwith utils.get_input(23) as f:\r\n muls = 0\r\n registers = dict()\r\n input = [i.strip().split(' ') for i in f.readlines()]\r\n # registers['a'] = 1\r\n # registers['b'] = 0\r\n # registers['c'] = 0\r\n # registers['d'] = 0\r\n # registers['e'] = 0\r\n # registers['f'] = 0\r\n # registers['g'] = 0\r\n # registers['h'] = 0\r\n pos = 0\r\n while 0 <= pos < len(input):\r\n i = input[pos]\r\n action = i[0]\r\n try:\r\n x = int(i[1])\r\n except ValueError:\r\n x = i[1]\r\n if type(x) is int:\r\n vx = x\r\n else:\r\n if x not in registers:\r\n registers[x] = 0\r\n vx = registers[x]\r\n if len(i) > 2:\r\n try:\r\n y = int(i[2])\r\n except ValueError:\r\n y = i[2]\r\n if type(y) is int:\r\n vy = y\r\n else:\r\n if y not in registers:\r\n registers[y] = 0\r\n vy = registers[y]\r\n print(pos, action, x, vx, y, vy)\r\n if action == 'set':\r\n registers[x] = vy\r\n if action == 'sub':\r\n registers[x] -= vy\r\n if action == 'mul':\r\n registers[x] *= vy\r\n muls += 1\r\n if action == 'jnz':\r\n if vx != 0:\r\n pos += vy\r\n continue\r\n pos += 1\r\nprint(registers['h'])\r\n","sub_path":"23b.py","file_name":"23b.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"75797258","text":"#Constants\r\nposition_dict = { \r\n 1 : 'Center'\r\n , 2 : 'Left Wing'\r\n , 3 : 'Right Wing'\r\n , 4 : 'Defense'\r\n , 5 : 'Goalie'\r\n}\r\n\r\nteam_dict = {\r\n 1: 'Boston Bruins'\r\n , 2: 'Buffalo Sabres'\r\n , 3: 'Calgary Flames'\r\n , 4: 'Chicago Blackhawks'\r\n , 5: 'Detroit Red Wings'\r\n , 6: 'Edmonton Oilers'\r\n , 7: 'Carolina Hurricanes'\r\n , 8: 'Los Angeles Kings'\r\n , 9: 'Dallas Stars'\r\n , 10: 'Montréal Canadiens'\r\n , 11: 'New Jersey Devils'\r\n , 12: 'New York Islanders'\r\n , 13: 'New York Rangers'\r\n , 14: 'Ottawa Senators'\r\n , 15: 'Philadelphia Flyers'\r\n , 16: 'Pittsburgh Penguins'\r\n , 17: 'Colorado Avalanche'\r\n , 18: 'San Jose Sharks'\r\n , 19: 'St. Louis Blues'\r\n , 20: 'Tampa Bay Lightning'\r\n , 21: 'Toronto Maple Leafs'\r\n , 22: 'Vancouver Canucks'\r\n , 23: 'Washington Capitals'\r\n , 24: 'Arizona Coyotes'\r\n , 25: 'Anaheim Ducks'\r\n , 26: 'Florida Panthers'\r\n , 27: 'Nashville Predators'\r\n , 28: 'Winnipeg Jets'\r\n , 29: 'Columbus Blue Jackets'\r\n , 30: 'Minnesota Wild'\r\n , 37: 'Vegas Golden Knights'\r\n}\r\n","sub_path":"espn_api/hockey/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"249147317","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat May 30 11:49:41 2020\n\n@author: shijiliu\n\"\"\"\nimport torch\nimport numpy as np\nimport random\n\nfrom ddpg_ensemble import DDPG_ENSEMBLE\nfrom utils import MultiAgentReplayBuffer\n\n\nBUFFER_SIZE = int(1e6) # replay buffer size\nBATCH_SIZE = 128 # minibatch size\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\nclass MADDPG_ENSEMBLE():\n def __init__(self,number_agents, obs_dim, action_dim, buffer_maxlen = BUFFER_SIZE, num_sub_policy = 3, batch_size = BATCH_SIZE):\n self.num_agents = number_agents\n self.num_sub_policy = int(num_sub_policy)\n self.replay_buffer = [MultiAgentReplayBuffer(self.num_agents, buffer_maxlen) for _ in range(self.num_sub_policy)]\n self.agents = [DDPG_ENSEMBLE(number_agents, obs_dim, action_dim, i, num_sub_policy = self.num_sub_policy) for i in range(self.num_agents)]\n self.subpolicy_array = np.arange(self.num_sub_policy)\n self.batch_size = batch_size\n \n def get_actions(self, states):\n actions = []\n sub_policy_id = random.choice(self.subpolicy_array)\n for i in range(self.num_agents):\n action = self.agents[i].get_action(states[i],sub_policy_id)\n actions.append(action)\n return actions, sub_policy_id\n \n def update(self, sub_policy_id):\n #print(\"sub_policy_id == \", sub_policy_id)\n obs_batch, indiv_action_batch, indiv_reward_batch, next_obs_batch, \\\n global_state_batch, global_actions_batch, global_next_state_batch, done_batch = self.replay_buffer[sub_policy_id].sample(self.batch_size)\n \n \n for i in range(self.num_agents):\n obs_batch_i = obs_batch[i]\n indiv_action_batch_i = indiv_action_batch[i]\n indiv_reward_batch_i = indiv_reward_batch[i]\n next_obs_batch_i = next_obs_batch[i]\n #print(next_obs_batch_i)\n next_global_actions = []\n for agent in self.agents:\n next_obs_batch_i = torch.FloatTensor(next_obs_batch_i)\n #print(next_obs_batch_i)\n indiv_next_action = agent.actor[sub_policy_id].forward(next_obs_batch_i.to(device))\n indiv_next_action = [act for act in indiv_next_action]\n indiv_next_action = torch.stack(indiv_next_action)\n next_global_actions.append(indiv_next_action)\n next_global_actions = torch.cat([next_actions_i for next_actions_i in next_global_actions], 1)\n \n \n self.agents[i].learn(sub_policy_id,indiv_reward_batch_i, obs_batch_i, global_state_batch, global_actions_batch, global_next_state_batch, next_global_actions)","sub_path":"draft_1/maddpg_ensemble.py","file_name":"maddpg_ensemble.py","file_ext":"py","file_size_in_byte":2736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"81429610","text":"\n\nclass Person:\n fname = \"\"\n lname = \"\"\n dob = \"\"\n address = \"\"\n\n def __init__(self, fname, lname, dob, address):\n self.fname = fname\n self.lname = lname\n self.dob = dob\n self.address = address\n\n def getName(self):\n return \"{} {}\".format(self.fname, self.lname)\n\n def getInfo(self):\n msg = \"\\nName:\\t\\t{} {}\\nAddress:\\t{}\\nDate of birth:\\t{}\".format(self.fname, self.lname, self.address, self.dob)\n return msg\n\n\nclass Student(Person):\n parent = \"\"\n grade = 0\n\n def __init__(self, fname, lname, dob, address, parent, grade):\n self.fname = fname\n self.lname = lname\n self.dob = dob\n self.address = address\n self.parent = parent\n self.grade = grade\n\n def getInfo(self):\n msg = \"\\nName:\\t\\t{} {}\\nAddress:\\t{}\\nDate of birth:\\t{}\\nGrade:\\t\\t{}\\nParent:\\t\\t{}\".format(self.fname, self.lname, self.address, self.dob, self.grade, self.parent)\n return msg\n\n def getGrade(self):\n return self.grade\n\n \nclass Teacher(Person):\n subjects = []\n payGrade = 0\n\n def __init__(self, fname, lname, dob, address, subjects, paygrade):\n self.fname = fname\n self.lname = lname\n self.dob = dob\n self.address = address\n self.subjects = subjects\n self.paygrade = paygrade\n\n def getInfo(self):\n msg = \"\\nName:\\t\\t{} {}\\nAddress:\\t{}\\nDate of birth:\\t{}\\nSubjects:\\t{}\\nPaygrade:\\t{}\".format(self.fname, self.lname, self.address, self.dob, self.subjects, self.paygrade)\n return msg\n\n\nif __name__ == \"__main__\":\n student1 = Student(\"Maria\", \"van Veen\", \"01/01/2014\", \"123 Main Street\", \"Mom van Veen\", 6)\n print(student1.getInfo()) \n teacher1 = Teacher(\"Karin\", \"Dawson\", \"01/01/1970\", \"11 North Street\", [\"english\", \"math\"], 2)\n print(teacher1.getInfo())\n person1 = Person(\"Catharina\", \"van Veen\", \"01/01/1980\", \"30 South Street\")\n print(person1.getInfo())\n print()\n print(student1.getName())\n print(student1.getGrade())\n","sub_path":"Polymorphism.py","file_name":"Polymorphism.py","file_ext":"py","file_size_in_byte":2046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"135901183","text":"import logging\nimport os\n\nimport pandas as pd\n\nfrom dalynator import get_input_args\nfrom dalynator.constants import UMASK_PERMISSIONS\nfrom dalynator.write_csv import sub_pub_for_cc\n\n\nos.umask(UMASK_PERMISSIONS)\nlogger = logging.getLogger(__name__)\n\nPK_SINGLE_YEAR_RISK = ['location_id', 'cause_id', 'rei_id', 'age_group_id',\n 'year_id', 'metric_id', 'measure_id', 'sex_id']\nPK_MULTI_YEAR_RISK = ['location_id', 'cause_id', 'rei_id', 'age_group_id',\n 'year_start_id', 'year_end_id', 'metric_id',\n 'measure_id', 'sex_id']\nPK_SINGLE_YEAR_NORISK = ['location_id', 'cause_id', 'age_group_id', 'year_id',\n 'metric_id', 'measure_id', 'sex_id']\nPK_MULTI_YEAR_NORISK = ['location_id', 'cause_id', 'age_group_id',\n 'year_start_id', 'year_end_id', 'metric_id',\n 'measure_id', 'sex_id']\n\n\nclass ColumnstoreSorter(object):\n\n def __init__(self, args, table_class, n_years):\n \"\"\"See the parser+arg constructors for details about the expected\n args. table_class is expected to be in the set {eti, risk, summary}.\n n_years should be in the set {single_year, multi_year}\"\"\"\n\n if table_class not in [\"eti\", \"risk\", \"summary\"]:\n raise ValueError(\"table_class is expected to be in the \"\n \"set {eti, risk, summary}\")\n if n_years not in [\"single_year\", \"multi_year\"]:\n raise ValueError(\"n_years is expected to be in the \"\n \"set {single_year, multi_year}\")\n\n self.location_id = args.location_id\n\n if n_years == \"single_year\":\n self.year_postfixes = args.year_ids\n elif n_years == \"multi_year\":\n self.year_postfixes = [\n \"FILEPATH\".format(s, e)\n for s, e in zip(args.start_year_ids, args.end_year_ids)]\n self.measure_ids = args.measure_ids\n self.tool_name = args.tool_name\n\n self.n_years = n_years\n self.file_prefix = \"FILEPATH\".format(table_class)\n\n self.root_dir = sub_pub_for_cc(\n os.path.join(args.out_dir, \"draws\", str(self.location_id)))\n\n self.outfile_dir = os.path.join(self.root_dir, \"upload\")\n self.outfile_basename = \"FILEPATH\".format(\n loc=self.location_id, tc=table_class, ny=self.n_years)\n\n def _get_csv_list(self):\n file_paths = []\n for year_pf in self.year_postfixes:\n for meas in self.measure_ids:\n fd = os.path.join(self.root_dir, \"upload\", str(meas),\n self.n_years)\n fp = \"FILEPATH\".format(\n prefix=self.file_prefix, loc=self.location_id, yr=year_pf)\n file_paths.append(os.path.join(fd, fp))\n return file_paths\n\n def _read_csvs_to_dataframe(self, csv_list):\n df = pd.concat([pd.read_csv(f) for f in csv_list])\n return df\n\n def _sort_frame(self, df):\n if self.n_years == \"single_year\":\n if self.tool_name == \"burdenator\":\n sort_order = PK_SINGLE_YEAR_RISK\n elif self.tool_name == \"dalynator\":\n sort_order = PK_SINGLE_YEAR_NORISK\n elif self.n_years == \"multi_year\":\n if self.tool_name == \"burdenator\":\n sort_order = PK_MULTI_YEAR_RISK\n elif self.tool_name == \"dalynator\":\n sort_order = PK_MULTI_YEAR_NORISK\n return df.sort_values(sort_order)\n\n def reduce_to_meas_csvs(self):\n csvs = self._get_csv_list()\n df = self._read_csvs_to_dataframe(csvs)\n sorted_df = self._sort_frame(df)\n for measure_id in sorted_df.measure_id.unique():\n filepath = \"FILEPATH\".format(od=self.outfile_dir,\n m=measure_id,\n ob=self.outfile_basename)\n write_df = sorted_df.query(\"measure_id == {}\".format(measure_id))\n write_df.to_csv(filepath, index=False)\n\n\ndef main(cli_args=None):\n parser = get_input_args.construct_parser_cs_sort()\n args = get_input_args.construct_args_cs_sort(parser, cli_args=cli_args)\n\n # Handle different file permutations based on tool\n if args.tool_name == \"burdenator\":\n sorter = ColumnstoreSorter(args, \"risk\", \"single_year\")\n sorter.reduce_to_meas_csvs()\n\n sorter = ColumnstoreSorter(args, \"eti\", \"single_year\")\n sorter.reduce_to_meas_csvs()\n\n if args.start_year_ids is not None:\n sorter = ColumnstoreSorter(args, \"risk\", \"multi_year\")\n sorter.reduce_to_meas_csvs()\n\n sorter = ColumnstoreSorter(args, \"eti\", \"multi_year\")\n sorter.reduce_to_meas_csvs()\n\n elif args.tool_name == \"dalynator\":\n sorter = ColumnstoreSorter(args, \"summary\", \"single_year\")\n sorter.reduce_to_meas_csvs()\n\n if args.start_year_ids is not None:\n sorter = ColumnstoreSorter(args, \"summary\", \"multi_year\")\n sorter.reduce_to_meas_csvs()\n","sub_path":"gbd_2019/shared_code/central_comp/dalys_hale/dalynator/dalynator/dalynator/tasks/run_pipeline_cs_sort.py","file_name":"run_pipeline_cs_sort.py","file_ext":"py","file_size_in_byte":5063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"84338675","text":"import os\nimport sys\nimport time\nimport asyncio\nimport logging\nimport argparse\n\nimport synapse.exc as s_exc\nimport synapse.common as s_common\nimport synapse.cortex as s_cortex\nimport synapse.telepath as s_telepath\n\nimport synapse.lib.cmdr as s_cmdr\nimport synapse.lib.output as s_output\nimport synapse.lib.msgpack as s_msgpack\nimport synapse.lib.version as s_version\nimport synapse.lib.encoding as s_encoding\n\nlogger = logging.getLogger(__name__)\n\nreqver = '>=0.2.0,<3.0.0'\n\ndef getItems(*paths):\n items = []\n for path in paths:\n if path.endswith('.json'):\n item = s_common.jsload(path)\n if not isinstance(item, list):\n item = [item]\n items.append((path, item))\n elif path.endswith('.jsonl'):\n with s_common.genfile(path) as fd:\n item = list(s_encoding.iterdata(fd, False, format='jsonl'))\n items.append((path, item))\n elif path.endswith(('.yaml', '.yml')):\n item = s_common.yamlload(path)\n if not isinstance(item, list):\n item = [item]\n items.append((path, item))\n elif path.endswith('.mpk') or path.endswith('.nodes'):\n genr = s_msgpack.iterfile(path)\n items.append((path, genr))\n else: # pragma: no cover\n logger.warning('Unsupported file path: [%s]', path)\n return items\n\nasync def addFeedData(core, outp, feedformat, debug=False, *paths, chunksize=1000, offset=0, viewiden=None):\n\n items = getItems(*paths)\n for path, item in items:\n\n bname = os.path.basename(path)\n\n tick = time.time()\n outp.printf(f'Adding items from [{path}]')\n\n foff = 0\n for chunk in s_common.chunks(item, chunksize):\n\n clen = len(chunk)\n if offset and foff + clen < offset:\n # We have not yet encountered a chunk which\n # will include the offset size.\n foff += clen\n continue\n\n await core.addFeedData(feedformat, chunk, viewiden=viewiden)\n\n foff += clen\n outp.printf(f'Added [{clen}] items from [{bname}] - offset [{foff}]')\n\n tock = time.time()\n\n outp.printf(f'Done consuming from [{bname}]')\n outp.printf(f'Took [{tock - tick}] seconds.')\n\n if debug:\n await s_cmdr.runItemCmdr(core, outp, True)\n\nasync def main(argv, outp=None):\n\n if outp is None: # pragma: no cover\n outp = s_output.OutPut()\n\n pars = makeargparser()\n opts = pars.parse_args(argv)\n\n if opts.offset:\n if len(opts.files) > 1:\n outp.printf('Cannot start from a arbitrary offset for more than 1 file.')\n return 1\n\n outp.printf(f'Starting from offset [{opts.offset}] - it may take a while'\n f' to get to that location in the input file.')\n\n if opts.test:\n async with s_cortex.getTempCortex(mods=opts.modules) as prox:\n await addFeedData(prox, outp, opts.format, opts.debug,\n chunksize=opts.chunksize,\n offset=opts.offset,\n *opts.files)\n\n elif opts.cortex:\n async with s_telepath.withTeleEnv():\n async with await s_telepath.openurl(opts.cortex) as core:\n try:\n s_version.reqVersion(core._getSynVers(), reqver)\n except s_exc.BadVersion as e:\n valu = s_version.fmtVersion(*e.get('valu'))\n outp.printf(f'Cortex version {valu} is outside of the feed tool supported range ({reqver}).')\n outp.printf(f'Please use a version of Synapse which supports {valu}; '\n f'current version is {s_version.verstring}.')\n return 1\n await addFeedData(core, outp, opts.format, opts.debug,\n chunksize=opts.chunksize,\n offset=opts.offset, viewiden=opts.view,\n *opts.files)\n\n else: # pragma: no cover\n outp.printf('No valid options provided [%s]', opts)\n return 1\n\n return 0\n\ndef makeargparser():\n desc = 'Command line tool for ingesting data into a cortex'\n pars = argparse.ArgumentParser('synapse.tools.feed', description=desc)\n\n muxp = pars.add_mutually_exclusive_group(required=True)\n muxp.add_argument('--cortex', '-c', type=str,\n help='Cortex to connect and add nodes too.')\n muxp.add_argument('--test', '-t', default=False, action='store_true',\n help='Perform a local ingest against a temporary cortex.')\n\n pars.add_argument('--debug', '-d', default=False, action='store_true',\n help='Drop to interactive prompt to inspect cortex after loading data.')\n pars.add_argument('--format', '-f', type=str, action='store', default='syn.nodes',\n help='Feed format to use for the ingested data.')\n pars.add_argument('--modules', '-m', type=str, action='append', default=[],\n help='Additional modules to load locally with a test Cortex.')\n pars.add_argument('--chunksize', type=int, action='store', default=1000,\n help='Default chunksize for iterating over items.')\n pars.add_argument('--offset', type=int, action='store', default=0,\n help='Item offset to start consuming data from.')\n pars.add_argument('--view', type=str, action='store', default=None,\n help='The View to ingest the data into.')\n pars.add_argument('files', nargs='*', help='json/yaml/msgpack feed files')\n\n return pars\n\nif __name__ == '__main__': # pragma: no cover\n s_common.setlogging(logger, 'DEBUG')\n asyncio.run(main(sys.argv[1:]))\n","sub_path":"synapse/tools/feed.py","file_name":"feed.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"40391244","text":"import os,glob,sys\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nout_folder = sys.argv[1]\n\nflist = glob.glob(os.path.join(out_folder,'slc_*'))\nN = len(flist)\nDT = np.zeros(N)\nt = np.zeros(N)\n\ni=0\n\nfor fname in flist:\n with h5py.File(fname, 'r') as f:\n t[i] = f.attrs['t']\n rho = np.asarray(f['rho'])\n p = np.asarray(f['p'])\n\n T = p/rho\n DT[i] = np.sqrt((T**2).mean() - T.mean()**2) / T.mean()\n i+=1\n\nnp.save('temp_damp.npy',[t,DT])\n# plt.figure()\n# plt.semilogy(t, DT)\n# plt.show()\n","sub_path":"scripts/temp_damping.py","file_name":"temp_damping.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"478288612","text":"\"\"\"\nHomework this week, wsgi application of.\nOnline calculator that can perform several operations.\n\"\"\"\nimport os\nimport re\nimport traceback\nimport math\n\n\ndef add(*args):\n \"\"\"\n Returns a STRING with the sum of the arguments\n A call to /add/a/b yields a + b\n \"\"\"\n body = []\n total=0\n try:\n total = sum(map(int, args))\n body.append(\"Result is: {}\".format(total))\n except (ValueError, TypeError) as err:\n body = \"Unable to calculate a sum: please provide int values\" + str(err)\n return str(body)\n\n\ndef subtract(*args):\n \"\"\"\n Returns a STRING with the subtract of the arguments\n A call to /subtruct/a/b yields a + b\n \"\"\"\n body = []\n total = 0\n try:\n total = int(args[0]) - int(args[1])\n body.append(\"Result is: {}\".format(total))\n except (ValueError, TypeError) as err:\n body = \"Unable to calculate a subtract: please provide int values\" + str(err)\n return body\n\ndef multiply(*args):\n \"\"\"\n Returns a STRING with the multiply of the arguments\n A call to /multiply/a/b yields a + b\n \"\"\"\n body = []\n total = 0\n try:\n total = int(args[0]) * int(args[1])\n body.append(\"Result is: {}\".format(total))\n except (ValueError, TypeError) as err:\n body = \"Unable to calculate a multiply: please provide int values\" + str(err)\n return body\n\n\ndef divide(*args):\n \"\"\"\n Returns a STRING with the divide of the arguments\n A call to /divide/a/b yields a + b\n \"\"\"\n body = []\n total = 0\n try:\n total = int(args[0]) // int(args[1])\n body.append(\"Result is: {}\".format(total))\n except (ValueError, TypeError) as err:\n body = \"Unable to calculate a divide: please provide int values\" + str(err)\n return body\n\n\ndef root_path(*args):\n \"\"\"\n The index page at the root of the server shall include instructions\n on how to use the page.\n \"\"\"\n body = \"\"\"\n

    Instructions on how to use the page.

    \n
      \n
    • A call to: /add/a/b yields a + b
    • \n
    • A call to: /subtract/a/b yields a - b
    • \n
    • A call to: /multiply/a/b yields a * b
    • \n
    • A call to: /divide/a/b yields a / b
    • \n
    • A call to: / yields instruction index page
    • \n
    \n \"\"\"\n try:\n print(\"Instruction page\", body)\n except (ValueError, TypeError) as err:\n body = \"Unable to calculate a divide: please provide int values\" + str(err)\n return body\n\ndef resolve_path(path):\n \"\"\"\n Should return two values: a callable and an iterable of\n arguments.\n \"\"\"\n funcs = {\n '': root_path,\n 'add': add,\n 'subtract': subtract,\n 'multiply': multiply,\n 'divide': divide\n }\n\n path = path.strip('/').split('/')\n # print(\"after strip:\", path)\n func_name = path[0]\n args = path[1:]\n\n try:\n func = funcs[func_name]\n print (\"func: \", func);\n except KeyError:\n raise NameError\n return func, args\n\ndef application(environ, start_response):\n # application will invoke start_response(status, headers) and\n # Param: environ and start_response\n # return the body of the response in BYTE encoding.\n headers = [(\"Content-type\", \"text/html\")]\n try:\n path = environ.get('PATH_INFO', None)\n if path is None:\n raise NameError\n function, arguments = resolve_path(path)\n body = function(*arguments)\n status = \"200 OK\"\n except NameError as e:\n print(\"got name error\", e)\n status = \"404 Not Found\"\n body = \"

    Not Found

    \"\n except Exception:\n status = \"500 Internal Server Error\"\n body = \"

    Internal Server Error

    \"\n print(traceback.format_exc())\n finally:\n body = str(body);\n headers.append(('Content-length', str(len(body))))\n start_response(status, headers)\n return [body.encode('utf8')]\n\nif __name__ == '__main__':\n # wsgiref simple server creation\n from wsgiref.simple_server import make_server\n port = int(os.environ.get(\"PORT\", 8080))\n\n srv = make_server('0.0.0.0', port, application)\n srv.serve_forever()\n","sub_path":"calculator.py","file_name":"calculator.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"34173772","text":"\"\"\"\nMadelyn Reyes\nOcotober 26, 2015\nLists\n\"\"\"\n\"\"\"\n#list: container of objects\n\n#l = [ objects ]\n\n\nl = [1,2,0.5,'hello'] #can be any data type\n\n#objects, or elements in a list, are ordered from left to right.\n\nl[1] -> 1 #can use indexing to extract elements of the list\n\nl[2] -> 2\n\nl[3] -> 0.5\n\nl[4] -> 'Hello'\n\nlen() -> #to find length of a list\n\n#strings can be thought of as a list of characters\n\n\n\nl = [1,2,0.5,'hello']\n\nprint(l[1]) \n\nprint(len(l))\n\n\n\nfor j in range(len(l)):\n print(l[j])\n\nfor e in l:\n print(e)\n\n\nM = [1,2,3,4,5,6,7,8,9,10]\n\nnum = 0\nfor i in range(len(M)):\n num += M[i]\nprint(num)\n \ntotal = 0\nfor e in M:\n total += e\nprint(total)\n\nprint(M[10]) #list out of range\n\n\n\nM = [1,2,3,4,5,6,7,8,9,10]\n\nprint(M)\n\nM[0] = 100 #change the value of m at position 0 to be 100\n\nprint(M) #lists can be modified\n\n \n\nM = [11,2,3,4,5,6,7,8,9,10]\n\nfor i in range(len(M)):\n M[i] = M[i] + 5\n\nprint(M)\n\n\nM = [11,2,3,4,5,6,7,8,9,10]\n\nfor e in M: #this will just extract an element from the list\n e = e + 5 #if you want to save the result from a computation back to the list\n #you must use positioning \nprint(M)\n\nM = [11,2,3,4,5,6,7,8,9,10] #this will not work\n\nM = M + 5 #you cannot add an integer to a list\n\nprint(M)\n\n\n\"\"\"\n\nM = [5,6,7,8] #list concatenation\n\nM1 = [78,79]\n\nM2 = M + M1\n\nprint(M2)\n\n\nM = [5,6,7,8] #list concatenation\n\nM1 = [5,6,7,8]\n\nprint(M == M1)\n\nprint(M < M1) #compares each position at the corresponding index, and evaluates.\n\n#python ignores the rest of the list once it can evaluate something.\n\n\n\n\n\n\n\n\n","sub_path":"recent programming stuff/lists.py","file_name":"lists.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"469525037","text":"'''\nCreated on Sep 2, 2018\n@author: Burkhard A. Meier\n\n\nSpace background image was downloaded from:\n--------------------------------------\nhttps://opengameart.org\nNo attribution required for this png file.\n\n'''\n\n\n\n\nimport pygame\nfrom pygame.locals import * \nfrom os import path \nimport numpy as np # import numpy module and alias as \"np\"\n \n\npygame.init() \npygame.display.set_caption('PyGame - Starships and Asteroids game') \n \nWIDTH, HEIGHT = 900, 550 # <== adjust size to your liking \ngame_surface = pygame.display.set_mode((WIDTH, HEIGHT)) \n \nfps_clock = pygame.time.Clock() # create clock instance\nFPS = 60 # frames per second\n\ndef run_3d_game(): \n bg_img = pygame.image.load(path.join('images', 'space_background.png')) \n game_surface.blit(bg_img, (0, 0))\n \n # game loop ------------------------------------------------------------\n run_game = True\n while run_game:\n fps_clock.tick(FPS) \n\n for event in pygame.event.get():\n if event.type == QUIT: \n run_game = False \n \n game_surface.blit(bg_img, (0, 0)) \n pygame.display.update()\n # End game loop --------------------------------------------------------\n pygame.quit()\n\nif __name__ == '__main__':\n run_3d_game()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Section 4/Video1_2_numpy.py","file_name":"Video1_2_numpy.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"108134284","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/3/6 21:46\n# @Author : WuxieYaYa\n\ndef reverse(x):\n ans = ''\n x, res = abs(x), 0\n boundary = (1 << 31) - 1 if x > 0 else 1 << 31\n\n if x == 0:\n return 0\n\n \"\"\"\n while 1:\n if x == 0:\n break\n ans += str(x % 10)\n x //= 10\n result = int(ans) if x > 0 else -int(ans)\n return result if x <= boundary else res\n \"\"\"\n while x != 0:\n res = res * 10 + x % 10\n if res > boundary:\n return 0\n x //= 10\n return res if x > 0 else -res\n\"\"\"\n y, res = abs(x), 0\n # 则其数值范围为 [−2^31, 2^31 − 1]\n boundry = (1<<31) -1 if x>0 else 1<<31\n while y != 0:\n res = res*10 +y%10\n if res > boundry :\n return 0\n y //=10\n return res if x >0 else -res\n\n作者:boywithacoin_cn\n链接:https://leetcode-cn.com/problems/reverse-integer/solution/pythondan-chu-he-tui-ru-shu-zi-yi-chu-qian-jin-xin/\n\"\"\"\n\n\nif __name__ == '__main__':\n a = -2147483412\n reult = reverse(a)\n print(reult)\n","sub_path":"7.整数反转.py","file_name":"7.整数反转.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"448589552","text":"# Создать текстовый файл (не программно), сохранить в нем несколько строк,\n# выполнить подсчет количества строк, количества слов в каждой строке.\n#\n# counter = 0\n# with open('text777.txt', 'r') as file:\n# for line in file:\n# counter += 1\n# line_words = line.split()\n# print(line, line_words, 'Длина строки:', len(line_words))\n# print('Всего строк: ', counter)\n\n#----------------------------------------------------------------------------------------------------------------\n\nwith open('text777.txt', 'r') as f:\n my_line = f.readlines()\n print(my_line)\n for index, value in enumerate(my_line):\n number_of_words = len(value.split())\n print(f'Строка {index + 1} содержит {number_of_words}')","sub_path":"lesson_5/Homework_5.2 copy.py","file_name":"Homework_5.2 copy.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"198235060","text":"import numpy as np\r\nimport gym\r\n\r\n\r\nclass Agent(object):\r\n def __init__(self, env, render_mode: str=None):\r\n \"\"\"\r\n Args:\r\n env: environment\r\n render_mode:\r\n None: don't render\r\n human: render in a window\r\n \"\"\"\r\n self.env = env\r\n self.render_mode = render_mode\r\n\r\n def __repr__(self):\r\n return \"{}(env={}, render_mode={})\".format(\r\n self.__class__.__name__,\r\n self.env,\r\n self.render_mode\r\n )\r\n\r\n def _initial_state(self) -> np.ndarray:\r\n \"\"\"\r\n Reset env and return initial state\r\n \"\"\"\r\n state = self.env.reset()\r\n if self.render_mode is not None:\r\n self.env.render(mode=self.render_mode)\r\n\r\n return state\r\n\r\n def _next_state(self, action: int) -> tuple:\r\n \"\"\"\r\n Return next state based on the given action\r\n Args:\r\n action: the action to perform for some frame\r\n\r\n Returns:\r\n tuple of:\r\n - next state\r\n - reward as a result of the action\r\n - flag determining end of episode\r\n - additional info\r\n \"\"\"\r\n state, reward, done, info = self.env.step(action=action)\r\n if self.render_mode is not None:\r\n self.env.render(mode=self.render_mode)\r\n\r\n return state, reward, done\r\n\r\n__all__ = [Agent.__name__]\r\n\r\nif __name__ == \"__main__\":\r\n test = Agent(\"env\", None)\r\n print(test)\r\n","sub_path":"agents/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"88058250","text":"# Quizzing Application Theming Utility\n# Beta 0.5 (No Adjustable UI Elements yet)\n\n\"\"\"\nCODE UNDER PRODUCTION\n\"\"\"\n\n# Boot time calculation\nimport qa_time\n\ntry: boot_start = qa_time.now() # Added try/except block to remove PEP8 warnings about variables before importing\nexcept Exception: pass\n\n# Generic python modules\nimport threading, os, sys, traceback, math, shutil, random\nimport tkinter as tk\nimport tkinter.messagebox as tkmsb\nimport tkinter.colorchooser as tkcolor\nfrom tkinter import font as tkfont\nfrom tkinter import filedialog as tkfd\nfrom time import sleep\nfrom tkinter import ttk\n\n# Custom modules\nimport qa_appinfo as qaai\nimport qa_theme as Theme\nimport qa_logging as log\nimport qa_fileIOHandler as QaF\nimport qa_fontPicker as QaFPA\nimport qa_globalFlags as QaFlagHandler\nimport qa_diagnostics as QaDiagnostics\n\n# Global variables\nlog_logref = log.Log()\nlog_varref = log.Variables()\napptitle = f\"Theming Utility v{qaai.versionData[qaai.VFKeys['v']]}\"\ncont = False\n\nglobal ui\n\ndef no_func(re=True) -> any:\n jsr_debug(f\"Called placeholder function no_func\")\n return re # Placeholder function\n\ndef check_theme_integ() -> bool:\n jsr_debug(f\"Called function 'check_theme_integ'\")\n integ = Theme.integ()\n jsr_debug(f\"Theme Integ: {integ}\")\n return integ\n\ndef reset_theme() -> None:\n jsr_debug(f\"Resetting theme file...\")\n\n global apptitle\n tkmsb.showinfo(apptitle, f\"Resetting theme file; press OK to continue.\")\n Theme.reset()\n jsr_debug(f\"Reset theme file...\")\n tkmsb.showinfo(apptitle, f\"Successfully reset theme file.\")\n\ntemp = QaFlagHandler.QAFlags()\n\ncheck_function_mapping = {\n temp.def_func: no_func,\n 'no_func': no_func,\n 'check_theme_integ': check_theme_integ\n}\n\ncheck_fail_function_mapping = {\n 'check_theme_integ': reset_theme,\n 'no_func': lambda: no_func(True),\n temp.def_func: lambda: no_func(True)\n}\n\nclass CrashHandler(threading.Thread):\n def __init__(self):\n centeral_flags_id = QaFlagHandler.QAFlags()\n\n self.id = centeral_flags_id.theme_crash_id\n self.unresolved_bool_id = centeral_flags_id.log_unr_id\n self.crash_information_id = centeral_flags_id.log_info_id\n self.crash_time_id = centeral_flags_id.log_time_id\n self.func_call_id = centeral_flags_id.log_function_id\n self.no_func_id = centeral_flags_id.def_func\n self.log_script_name = centeral_flags_id.theme_crash_timed_id\n\n self.thread = threading.Thread\n self.thread.__init__(self)\n self.start()\n\n def boot_check(self):\n global apptitle; global check_function_mapping; global check_fail_function_mapping\n\n try:\n\n if loadFlag(self.id, return_boolean=True): # If the flag exists\n check_b = loadFlag(self.id , return_boolean=False)\n jsr_debug(f\"Received check_b={check_b}\")\n\n if check_b[self.unresolved_bool_id]: # If the crash has not been dealt with\n test = \"Unknown\"\n jsr_debug(f\"Dealing with crash from time='{check_b[self.crash_time_id]}'\")\n confirm_check = tkmsb.askyesno(apptitle,\n f'An unresolved crash from \"{check_b[self.crash_time_id]}\" has been detected; would you like to run diagnostics now?\\n\\nCrash Information: {check_b[self.crash_information_id]}')\n\n if not confirm_check:\n jsr_debug(f\"User wishes to not run diagnostics; marking crash as RESOLVED.\")\n n = check_b\n n[self.unresolved_bool_id] = False\n setFlag(self.id, n)\n tkmsb.showinfo(apptitle, f\"Marked crash as 'RESOLVED'\")\n\n else:\n dmr = False # dmr = do not remove\n\n diagnostics = QaDiagnostics.Diagnostics()\n\n result = diagnostics.run_diagnostics(key=check_b[self.func_call_id])\n\n if result:\n tkmsb.showinfo(apptitle, f\"Passed requested test; removing boot error flag; press 'OK' to continue.\")\n test = True\n dmr = False # clear flag\n\n else:\n tkmsb.showerror(apptitle, f\"Failed requested test; running appropriate correction script; press 'OK' to continue.\")\n test = False\n corrections = QaDiagnostics.Corrections()\n try: corrections.run_correction(key=check_b[self.func_call_id])\n except Exception as e:\n dmr = True # Do not clear\n raise Exception.__class__(e)\n\n if not dmr:\n jsr_debug(f\"removing boot error flag\")\n removeFlag(self.id)\n tkmsb.showinfo(apptitle, f\"Cleared error flag.\")\n jsr_debug(f\"flag removed\")\n\n else:\n jsr_debug(f\"not removing boot error flag\")\n\n diag_data = QaDiagnostics.Data()\n\n self.log_event(time=qa_time.now(), info={\n 'crash_detected': check_b,\n 'user_ran_diagnostics': confirm_check,\n 'test_passed': test,\n 'appropriate_diagnostics': diag_data.diagnostics_function_mapping[check_b[self.func_call_id]].__name__,\n 'appropriate_failed_test_function': diag_data.correction_function_mapping[check_b[self.func_call_id]].__name__\n })\n\n global cont\n cont = True \n\n except Exception as e:\n self.log_crash(time=qa_time.now(), info=f\"{traceback.format_exc()}\")\n tkmsb.showerror(apptitle, f\"Unable to check/reset crash log; terminating application.\")\n jsr_debug(f\"Crash: {e}; {e.__class__.__name__}; {traceback.format_exc()}\")\n sys.exit(f\"{IOError}\")\n\n def log_event(self, time, info):\n\n jsr_debug(f\"Logging event; time = {time}, info = {info}\")\n main_thread_name = self.log_script_name + f\"{time}\"\n\n setFlag(main_thread_name, info)\n\n def log_crash(self, time, info, func_call=None):\n if func_call is None: func_call = self.no_func_id\n\n jsr_debug(f\"Logging the following crash: time = {time}, info = {info}\")\n\n setFlag(self.id, {\n self.unresolved_bool_id: True,\n self.crash_information_id: info,\n self.crash_time_id: f\"{time}\",\n self.func_call_id: f'{func_call}'} # Function may only be a return type bool function\n )\n\n\ndef setFlag(flag_id: str, flag_data: any, **flags) -> None:\n \"\"\"\n **QA_APPS_TU.setFlag**\n\n Custom function to save flags to external JSON file\n\n :param flag_id: Flag ID (dict key) [str]\n :param flag_data: Flag data (data to be stored/dict value) [any]\n :param flags: Flags (see 'Supported Flags' section for more information)\n :return: None\n\n ===============\n\n **Supported Flags**\n\n 1) *append*:\n * Type: boolean\n * Default: True\n * Information: Append new data or change existing flag data and *NOT* clear the entire file before doing so and therefore not removing all other flags...\n\n 2) *flags_filename*:\n * Type: str, bytes\n * Default: qa_appinfo.global_nv_flags_fn\n * Information: Name of file where flags are stored\n\n 3) *reload_nv_flags*:\n * Type: boolean\n * Default: True (*Highly Recommended*)\n * Information: Reload qa_globalFlags' internal JSON data variable and reset it to the new information present in the flags file; highly recommended to set to *True* otherwise the application may not work suitably or may work slower than optimal.\n\n ==============\n\n \"\"\"\n\n if not type(flag_id) is str: raise TypeError(f\"Invalid type {type(flag_id)} passed for arguement 'flag_id'; expected type {str}.\")\n\n Flags = {\n 'append': [True, (True, bool)],\n 'flags_filename': [qaai.global_nv_flags_fn, (qaai.global_nv_flags_fn, str, bytes)],\n 'reload_nv_flags': [True, (True, bool)]\n }\n\n Flags = flags_modifier(Flags, flags)\n\n temp: dict = {}\n for i in Flags: temp[i] = Flags[i][0]\n\n Flags = temp\n\n flag_io = QaFlagHandler.QAFlags()\n key = flag_io.SET\n\n flag_io.io(key,\n filename=Flags['flags_filename'],\n data={\n flag_id: flag_data\n },\n appendData=Flags['append'],\n reloadJSON=Flags['reload_nv_flags'])\n\n return None\n\ndef loadFlag(flag_id: str, **flags):\n \"\"\"\n **QA_APPS_TU.loadFlag**\n\n Custom function to retrieve flag data from external JSON file\n\n :param flag_id: Flag ID (Dict Key) [str]\n :param flags: Flags [str]\n :return: Union (bool, any)\n\n ==========\n\n **Supported Flags**\n\n 1) *return_boolean*\n * Type: bool\n * Default: True\n * Information:\n * If set to *True*, the function will return a boolean;\n * True = Flag is present in given file\n * False = Flag is not present in given file\n\n * If set to *False*, the function will return the value of the flag;\n * If the flag exists, the function returns the value; type *any*\n * If the flag does not exist, the function returns type *NoneType*\n\n 2) *filename*\n * Type: bytes, str\n * Default: qa_appinfo.global_nv_flags_fn\n * File to look for the flags in\n\n 3) *reload_nv_flags*\n * Type: bool\n * Default: True (Highly Recomended; although slower)\n * Information: If set to *True*, the flags handler will reload its internal variables with the latest information present in the requested file.\n\n ==========\n\n \"\"\"\n\n if not type(flag_id) is str: raise TypeError(f\"Invalid type {type(flag_id)} passed for arguement 'flag_id'; expected type {str}.\")\n\n Flags = {\n 'return_boolean': [True, (True, bool)],\n 'filename': [qaai.global_nv_flags_fn, (qaai.global_nv_flags_fn, str, bytes)],\n 'reload_nv_flags': [True, (True, bool)]\n }\n\n Flags = flags_modifier(Flags, flags)\n\n temp: dict = {}\n for i in Flags: temp[i] = Flags[i][0]\n\n Flags = temp\n\n jsr_debug(f\"Set flags to {Flags}\")\n jsr_debug(f\"Querying for flag {flag_id} in file {Flags['filename']}\")\n\n flagsIO = QaFlagHandler.QAFlags()\n key = flagsIO.GET\n\n result = flagsIO.io(Key=key,\n key=flag_id,\n filename=Flags['filename'],\n re_bool=Flags['return_boolean'],\n reloadJSON=Flags['reload_nv_flags'])\n\n jsr_debug(f\"Result of query: '{result}'\")\n\n return result\n\ndef removeFlag(flag_id: str, **flags) -> None:\n \"\"\"\n **QA_APPS_TU.removeFlag**\n\n Custom function to remove flags (entries) from external JSON file\n\n :param flag_id: Flag ID (Dict Key) [str]\n :param flags: Flags [dict]\n :return: None\n\n =========\n\n **Supported Flags**\n\n 1) *filename*:\n * Type: str, bytes\n * Default: qa_appinfo.global_nv_flags_fn\n * Information: File from which to remove the given entry\n\n =========\n\n \"\"\"\n\n if not type(flag_id) is str: raise TypeError(\n f\"Invalid type {type(flag_id)} passed for argument 'flag_id'; expected type {str}.\")\n\n Flags = {\n 'filename': [qaai.global_nv_flags_fn, (qaai.global_nv_flags_fn, str, bytes)]\n }\n\n Flags = flags_modifier(Flags, flags)\n\n jsr_debug(f\"Removing flag {flag_id} from file {Flags['filename']}\")\n\n flagsIO = QaFlagHandler.QAFlags()\n key = flagsIO.REMOVE\n\n flagsIO.io(Key=key,\n filename=Flags['filename'][0],\n key=flag_id)\n\n return None\n\ndef jsr_debug(_deb_data: str) -> None:\n \"\"\"\n **QA_APPS_TU.JSR_DEBUG**\n\n :param _deb_data: debug data (str)\n :return: None\n\n Custom function to manipulate custom module qa_logging to log data.\n \"\"\"\n\n # Global vars:\n global log_logref;\n global log_varref\n\n if not type(_deb_data) is str: raise IOError\n\n try:\n sc = __file__.replace(\"/\", \"\\\\\").replace(os.getcwd().strip(), \"\").strip(\"\\\\\").strip()\n except:\n sc = sys.argv[0].replace(os.getcwd().strip(), \"\").strip(\"\\\\\").strip()\n\n if not log_varref.genDebugFile(): log_logref.logFile_create(sc)\n log_logref.log(_deb_data, sc)\n\n\ndef boot_time(s, e, **flags):\n \"\"\"\n **QA_APPS_TU.BOOT_TIME**\n\n :param s: start (datetime.time)\n :param e: end (datetime.time)\n :param flags: flags (dict)\n :return: datetime.time\n\n Calculates and logs boot time\n\n Supported Flags:\n * log: bool = True/False - Log boot time to logs file?\n \"\"\"\n\n # flags\n # ..., log: bool = False/True - True will let the function log the boot time.\n Flags = {\n 'log' : [True, (True, bool)]\n }\n\n bt = qa_time.calcDelta(s, e)\n if Flags['log'][0]: jsr_debug(f'boot time: start - {s}, end - {e}, time - {bt}')\n return bt\n\ndef closeapp(ui, _code: str=\"0\"):\n ui.rm() # Close UI\n sys.exit(_code)\n\ndef error_handler(_e: str = 'No diagnostic information given', _ecode: str = 'Unknown exit code', _exit: bool = False,\n _showui: bool = True, ui_ref= None, **flags):\n \"\"\"\n **QA_APPS_TU.ERROR_HANDLER**\n\n :param _e: Error code / Diagnostic info (str)\n :param _ecode: Exit code (str)\n :param _exit: Exit? (bool)\n :param _showui: Show the UI? (bool)\n :param flags: flags (dict)\n :return: None\n\n Handles errors, logs to log file, exits (if requested), etc.\n\n Supported flags:\n * useCustomText (bool): to use all custom text or an altered pre-existing base.\n * customText (str): custom text to use if 'useCustomText' is set to True; default is 'An unknown error occured'\n * doNotLog (bool): do not log error\n \"\"\"\n try:\n # Flags\n Flags = {\n 'useCustomText': [False, (False, bool)],\n 'customText': ['An unknown error occurred', ('An unknown error occurred', str)],\n 'doNotLog': [False, (False, bool)],\n }\n\n Flags = flags_modifier(Flags, flags)\n\n jsr_debug(f\"Running error_handler with arguments: {_e, _ecode, _exit, _showui, flags}\")\n\n global apptitle\n\n # Variables\n e_use = _e\n base_no_e = 'An error occurred whilst running the application scripts; the error, however, was not significant enough to require a termination of the app.\\n\\nDiagnostic information:\\n{}'.format(\n e_use)\n base_e = 'An error occurred whilst running the application scripts; this error is severe and therefore requies the application to be terminated. If data was being saved it will be restored automatically.\\n\\nDiagnostic information:\\n{}'.format(\n e_use)\n base = base_e if _exit else base_no_e\n\n if not Flags['doNotLog'][0]: jsr_debug(\n f\"Error raised: _e: {_e}, _ecode: {_ecode} _exit: {_exit}, _showui: {_showui}, Flags: {Flags}\")\n\n # UI\n if _showui:\n if Flags['useCustomText'][0]:\n tkmsb.showerror(apptitle, Flags['customText'][0]) # Custom message\n else:\n tkmsb.showerror(apptitle, base) # Edited generic message\n\n if _exit: closeapp(_ecode) # Exit if requested\n\n except Exception as e:\n jsr_debug(f\"Unable to show error; more information: {e.__class__.__name__}: {e}: {traceback.format_exc()}\")\n ui_ref.root.quit()\n\ndef flags_modifier(_ref: dict, _flags: dict, __raiseError: bool = True):\n \"\"\"\n **QA_APPS_TU.FLAGS_MODIFIER**\n\n :param _ref: reference dictionary of flags (dict); Format: flag name: info; info: list = [current value, defaults]; defaults: tuple = (default value, )\n :param _flags: flags provided (**kwargs or similar)\n :param __raiseError: raise an error if the flag is not valid (default True)\n :return: altered _ref (dict)\n\n Custom function to take care of arguments similar to **kwargs\n \"\"\"\n\n # Name conversion\n Flags = _ref\n flags = _flags\n\n # Logging\n jsr_debug('Running function flags_modifier')\n jsr_debug(f\"jsr_conv: Flags before flag check: {Flags}; flags given: {flags}\")\n\n for i in flags:\n jsr_debug(f\"Checking flag {i}\")\n if i in Flags: # If it is a valid flag\n # Log\n jsr_debug(f\"Flag name is valid\")\n jsr_debug(f'datatype of flag: {type(flags[i])}; allowed data types: {Flags[i][1:]}')\n\n if type(flags[i]) in Flags[i][1][1::]: # If it is valid\n jsr_debug(f\"Flag data type is valid; resetting from {Flags[i][0]} to {flags[i]}\")\n Flags[i][0] = flags[i]\n\n elif __raiseError:\n raise IOError(\n \"Type {} unsupported for flag {}; supported types: {}\".format(type(flags[i]), i, Flags[i][1][1::]))\n\n else:\n jsr_debug(\n f\"Flag datatype {type(flags[i])} invalid; keeping original data ({Flags[i][0]})\") # If the Dt is not valid\n\n elif __raiseError:\n raise IOError(\"Flag name {} invalid\".format(i))\n\n else:\n jsr_debug(f\"Flag name invalid\") # If the name is not valid\n\n # Trailing logging\n jsr_debug(f\"jsr_conv: Flags after flag check: {Flags}\")\n\n return Flags\n\ndef jsr_conv(_d, **flags):\n \"\"\"\n **QA_APPS_TU.JSR_CONV**\n\n :param _d: raw data\n :param flags: flags (dict)\n :return: tuple/list (bool valid, data type, data)\n\n Custom function to convert data types (advanced functionality)\n If a data type is unsupported the function will return [None, None, None]; otherwise it will return [True/False, datatype, data]\n\n Supported flags (Optional):\n * dtypes (tuple): a tuple / list containing the valid data types; example: (str, bytes, list, tuple)\n * convertTo (type): the type to convert the data to (default is str)\n * multi_sep (str): the separator for converting a str or bytes object to a list\n * doNotStrip (bool): do not strip the data (trailing spaces, leading speaces, and newline characters) (defaults = bool)\n\n Supported data types (input):\n * list\n * tuple\n * str\n * bytes\n\n Supported data types (output):\n * list\n * str\n * bytes\n \"\"\"\n\n # Basic Logging:\n for i in flags: jsr_debug(f\"Flag {i}: {flags[i]} (type {type(flags[i])})\")\n\n # Variables\n type_multi = [list, tuple]\n type_single = [str, bytes, int] # All supported data types\n\n # Flags\n # Flags dictionary - contains all flags\n Flags = { # Flag name : [set_to, (default, *supported_data_types)]\n 'dtypes': [(str, bytes, list, tuple, int), ((str, bytes, list, tuple, int), tuple, list)],\n 'convertTo': [str, (str, type)],\n 'multi_sep': [\"\\n\", (\"\\n\", str)],\n 'doNotStrip': [False, (False, bool)]\n }\n\n # Flags dictionary - modify\n Flags = flags_modifier(Flags, flags)\n\n # Step 1: Get data type\n dtype = type(_d)\n jsr_debug(f\"Data type: {dtype}\")\n\n # Step 2: if the data type is the same, return\n if dtype == Flags['convertTo'][0]:\n jsr_debug(f\"The data type is the same; returning immediately\")\n return [True, dtype, _d]\n\n # Step 3: If the dtype is not valid or supported\n if not dtype in Flags['dtypes'][0] or (not dtype in type_multi and not dtype in type_single):\n jsr_debug(f\"Unable to convert '{_d}' (type {dtype}); returning [False, None, None]\")\n return [False, None, None]\n\n # Step 4: Convert to right category (single / multi)\n # Step 4.1: Find\n if Flags['convertTo'][0] in type_multi:\n to = 'multi'\n elif Flags['convertTo'][0] in type_single:\n to = 'single'\n else:\n jsr_debug(\"Unable to convert {_d} (type {dtype}) due to an internal error; returning [None, None, None]\")\n return [None, None, None] # Return triple none if unsupported data type\n\n jsr_debug(f\"to (category): {to}\")\n\n # Step 4.2: Convert (or skip)\n if (dtype in type_multi and to == \"multi\") or (dtype in type_single and to == \"single\"):\n jsr_debug(\"Correct category\")\n if to == 'single':\n\n jsr_debug(\"single => single\")\n\n if dtype is int:\n jsr_debug(f'converting from {int} to {Flags[\"convertTo\"][0]}')\n\n if Flags['convertTo'][0] is str:\n jsr_debug(f\"int => str conversion; returning {str(_d)}\")\n return [True, str, str(_d)]\n\n if Flags['convertTo'][0] is bytes:\n jsr_debug(f\"int => bytes conversion; returning {str(_d).encode('utf-8')}\")\n return [True, bytes, str(_d).encode('utf-8')]\n\n elif dtype is bytes: # OG data type\n\n if Flags['convertTo'][0] is str: # bytes => str\n jsr_debug(f\"bytes => str conversion\")\n if not Flags['doNotStrip'][0]: out = str(_d.decode('utf-8')).strip()\n else: out = str(_d.decode('utf-8'))\n return [True, Flags['convertTo'][0], out] if type(out) is Flags['convertTo'][0] else [None, None,\n None]\n\n else:\n jsr_debug(f\"Cannot convert from type bytes; returning [None, None, None]\")\n return [None, None, None]\n\n elif dtype is str: # OG data type\n\n if Flags['convertTo'][0] is bytes: # str => bytes\n jsr_debug(f\"str => bytes conversion\")\n if not Flags['doNotStrip'][0]: out = bytes(_d.strip().encode('utf-8'))\n else: out = bytes(_d.encode('utf-8'))\n return [True, Flags['convertTo'][0], out] if type(out) is Flags['convertTo'][0] else [None, None,\n None]\n if Flags['convertTo'] is int:\n jsr_debug(f\"str => int converstion\")\n out = ''; allowed = [str(i) for i in range(10)]\n for i in _d.strip(): out += i if i in allowed else ''\n return[True, Flags['convertTo'][0], out] if type (out) is Flags['convertTo'] else [None, None, None]\n\n else:\n jsr_debug(f\"Cannot convert from type str; returning [None, None, None]\")\n return [None, None, None]\n\n else:\n error_handler(_e=\"Cannot convert from type {} to {}; data = {}; allowed data types = {}\".format(dtype,\n Flags[\n 'convertTo'][\n 0],\n _d,\n Flags[\n 'dtypes'][\n 0]),\n _ecode=\"IOError\", _exit=True)\n\n elif to == 'multi':\n\n jsr_debug(f\"Converting type multi => multi\")\n\n if dtype is tuple: # OG DType\n\n if Flags['convertTo'][0] is list: # tuple => list\n jsr_debug(f\"tuple => list\")\n out = list(i for i in _d)\n jsr_debug(f'output = {out}')\n return [True, Flags['convertTo'][0], out] if type(out) is Flags['convertTo'][0] else [False, None,\n None]\n\n else:\n error_handler(\n _e=\"Cannot convert from type {} to {}; data = {}; allowed data types = {}\".format(dtype, Flags[\n 'convertTo'][0], _d, Flags['dtypes'][0]), _ecode=\"IOError\", _exit=True)\n\n if dtype is list:\n\n if Flags['convertTo'][0] is tuple: # list => tuple\n jsr_debug(f\"list => tuple\")\n out = tuple(i for i in _d)\n jsr_debug(f\"output = {out}\")\n return [True, Flags['convertTo'][0], out] if type(out) is Flags['convertTo'][0] else [False, None,\n None]\n\n else:\n error_handler(_ecode=\"IOError\", _exit=True)\n\n else: # If conversion is needed\n\n if to == 'single': # If to be converted to a single type data type\n jsr_debug(f\"Converting from type multi => single\")\n\n output = \"\"\n if Flags['convertTo'][0] is bytes: output = bytes(output.encode('utf-8'))\n\n for i in _d:\n if Flags['convertTo'][0] is bytes:\n if type(i) is str:\n output += bytes(i.encode('utf-8')) # Convert str to bytes\n\n elif type(i) is bytes:\n output += i # If it is already bytes\n\n else:\n raise IOError(\"Unable to decode type {} for data {}\".format(type(i), i))\n\n elif Flags['convertTo'][0] is str:\n if type(i) is str:\n output += i # if it already a str\n\n elif type(i) is bytes:\n output += str(i.decode('utf-8')).strip() # If it is type bytes\n\n else:\n raise IOError(\"Unable to decode type {} for data {}\".format(type(i), i))\n\n jsr_debug(f\"Returning [True, {Flags['convertTo'][0]}, {output}]\")\n return [True, Flags['convertTo'][0], output]\n\n elif to == 'multi': # If to be converted to a multiple type data type; returns a list\n jsr_debug(f\"Converting from single => multi\")\n\n # Variables\n output = []\n t = \"\"\n\n # Variable setup\n if type(_d) is bytes:\n t = str(_d.decode('utf-8')).strip()\n elif type(_d) is str:\n t = _d.strip()\n else:\n raise IOError(\"Unable to decode type {} for data {}\".format(type(_d), _d))\n\n # Conversion\n output_str = t.split(Flags['multi_sep'][0])\n jsr_debug(f\"Converted data to str, then to list, and now converting back to original data.\")\n\n output = [];\n contTo = type(_d)\n og_dt = type(_d)\n if og_dt not in Flags['dtypes'][0] or (og_dt not in type_single and og_dt not in type_multi):\n output = output_str\n\n else: # If can convert\n\n BR_ERR = False;\n temp = []\n\n try:\n for i in output_str:\n jsr_debug(f\"i {output_str.index(i)}/{len(output_str)}\")\n if type(i) is Flags['convertTo'][0]:\n temp.append(i)\n jsr_debug(f\"Same DT; appended '{i}'\")\n\n else:\n jsr_debug(f\"Changing data types\")\n\n if type(i) is str:\n jsr_debug(f\"Converting from str to ...\")\n\n if contTo is bytes: # str => bytes\n jsr_debug(f\"Converting to {bytes}\")\n temp.append(i.strip().encode('utf-8'))\n\n else: # Unsupported data types\n jsr_debug(\"Cannot convert; raising error flag.\")\n BR_ERR = True\n break\n\n elif type(i) is bytes:\n jsr_debug(f\"Converting from bytes to ...\")\n\n if contTo is str: # bytes => str\n jsr_debug(f\"Converting to str\")\n temp.append(i.decode('utf-8').strip())\n\n else: # Unsupported data types\n jsr_debug(\"Cannot convert; raising error flag.\")\n BR_ERR = True\n break\n\n else: # Unsupported data types\n jsr_debug(\"Cannot convert; raising error flag.\")\n BR_ERR = True\n break\n\n except Exception as e:\n jsr_debug(f\"An error occurred whilst converting data; more info: {e}: {traceback.format_exc()}\")\n BR_ERR = True\n\n if BR_ERR:\n jsr_debug(f\"Error flag raised; returning str\")\n output = output_str\n\n else:\n jsr_debug(f\"Error flag not raised; returning converted list\")\n output = temp\n\n jsr_debug(f\"Returning [True, {type(output)}, {output}]\")\n return [True, type(output), output]\n\n else:\n jsr_debug(f'Returning [None, None, None]')\n return [None, None, None]\n\n return [False, None, None] # Edge case\n\ndef get_IOMode(data, **flags):\n \"\"\"\n **QA_APPS_TU.GET_IOMODE**\n\n :param data: data stream (any)\n :param flags: flags (dict)\n :return: data io info [mode (read write dict [read = 'r', write = 'w']), multi required (bool)] (list/tuple)\n\n Custom function that returns the appropriate IO mode ('wb', 'rb', 'r', etc)\n\n Supported data types:\n * str\n * bytes\n * list\n * tuple\n\n \"\"\"\n\n # Function variables\n Flags = {} # Default flags\n edge = [{'w': 'w', 'r': 'r'}, False] # Edge case return statement\n Flags = flags_modifier(Flags, flags) # Set flags\n supp = [bytes, str, list, tuple] # Supported data types\n\n # Step 1: Basic check(s)\n dt = type(data)\n if not dt in supp: raise TypeError(\"Type {} not supported for variable 'data'\".format(type(dt)))\n\n # Step 2: switch case (equivalent)\n if dt is bytes:\n return [{'w': 'wb', 'r': 'rb'}, False]\n elif dt is str:\n return [{'w': 'w', 'r': 'r'}, False]\n elif dt is list or dt is tuple:\n return [{'w': 'w', 'r': 'r'}, True]\n\n return edge # Edge case\n\ndef secure_save(_f: str, _d, **flags):\n \"\"\"\n **QA_APPS_TU.SECURE SAVE**\n\n :param _f: filename to save to (str)\n :param _d: data to save (str / bytes)\n :return: None\n\n **Supported Flags:**\n 1) encrypt: to encrypt file? (bool; default = False)\n 2) datatype: data type to convert to (type; default = bytes)\n 3) append: append the new data (bool; default = False)\n 4) appendSep: seperator for appending; str only; automatically converted to the right dt (str; default = \"\")\n\n Creates backup, attempts to save properly, if fails, restores to backup and processes data if required.\n \"\"\"\n\n # Flags\n Flags = {\n 'encrypt': [False, (False, bool)],\n 'datatype': [bytes, (bytes, type)],\n 'append': [False, (False, bool)],\n 'appendSep': ['', ('', str)]\n }\n\n Flags = flags_modifier(Flags, flags)\n\n # Data types\n _d_dts_supp = [str, bytes, list, tuple]\n _d_dts = [str, bytes]\n _d_final = Flags['datatype'][0]\n\n # Basic checks\n if not os.path.exists(_f): raise IOError(\"File {} does not exist.\".format(_f))\n if not type(_d) in _d_dts_supp: raise TypeError(f'Type {type(_d)} unsupported by function.')\n\n # Extra data variables\n data = None # No converted data (yet)\n BAK = None # No backup\n\n # Create backup\n try:\n BAK = open(_f, 'rb').read() # Read in bytes to allow maximum compatibility with minimal error inducing factors.\n except Exception as e: # Unsafe to continue with the saving process.\n try:\n raise IOError(\n f\"Cannot create auto-gen backup for file {_f}; it is deemed unsafe to proceed therefore the process will be terminated.\")\n except Exception as e:\n error_handler(_e=traceback.format_exc(), _ecode=e.__class__.__name__, _exit=True)\n\n # Save to file; if something goes wrong, restore to backup and exit\n try: # Try to save the new data\n\n # Set the data var\n\n if Flags['append'][0]:\n # Step 1: convert data to bytes\n\n try:\n E = None\n data = jsr_conv(_d, convertTo=bytes)\n\n # Checks\n if not data[0] or data[-1] is None: raise IOError(\n f\"Pass to error handler -- convertor failed to convert the data type for data from file {_f}\")\n\n # Reset data var to the actual data\n data = data[-1]\n jsr_debug(f\"Set data variable to {data}\")\n\n # Append-only code: read the data from the file (in bytes; decrypt if required)\n org = load_file(_f, outputDT=bytes)\n\n # Append\n data = (org + jsr_conv(Flags['appendSep'][0], convertTo=bytes, doNotStrip=True)[-1] + data) if len(jsr_conv(org, convertTo=bytes)[-1]) > 0 else data\n\n except Exception as e:\n E = f'An error occurred whilst running a conversion script; more information:\\n\\n{e}\\n{traceback.format_exc()}'\n\n else:\n try:\n if data is None:\n jsr_debug(f\"Unable to convert data for file {_f}\")\n E = 'Unable to convert the data to the correct type...'\n except Exception as e:\n error_handler(_e=traceback.format_exc(), _exit=True, _ecode=e.__class__.__name__)\n\n if not E is None: error_handler(useCustomText=True, customText=E)\n\n else:\n # Not appending\n # Altered data var + checks\n E = None\n try:\n data = jsr_conv(_d, dtypes=_d_dts, convertTo=_d_final) # [valid, type, data]\n\n # Checks\n if not data[0] or data[-1] is None: raise IOError(f\"Pass to error handler -- convertor failed to convert the data type for data from file {_f}\")\n\n # Reset data var to the actual data\n data = data[-1]\n\n except Exception as e:\n E = f'An error occurred whilst running a conversion script; more information:\\n\\n{e}\\n{traceback.format_exc()}'\n else:\n try:\n if data is None:\n jsr_debug(f\"Unable to convert data for file {_f}\")\n E = 'Unable to convert the data to the correct type...'\n except Exception as e:\n error_handler(_e=traceback.format_exc(), _exit=True, _ecode=e.__class__.__name__)\n\n if not E is None: error_handler(useCustomText=True, customText=E)\n\n # Data var set; appending/not appending operation are the same from now on...\n\n # Step 1: Clear the file\n open(_f, 'w').close()\n jsr_debug(f\"Cleared File {_f}\")\n\n # Step 2: Write the new data\n md = get_IOMode(data)\n if md[-1]:\n open(_f, md[0]['w']).writelines(data)\n else:\n open(_f, md[0]['w']).write(data)\n\n jsr_debug(\n f'Written {data} to {_f}. Mode info: {md}; 0:{md[0]}; 0_w:{md[0][\"w\"]}; 0_r:{md[0][\"r\"]}; 1:{md[-1]}')\n\n # Step 3: Encrypt (If requested) (appending and not appending)\n if Flags['encrypt'][0]:\n jsr_debug(f\"Encrypted file {_f}\")\n QaF.encrypt(_f)\n\n return # Return; do not run any extra code; save time\n\n except Exception as e1: # Restore to backup\n try: # Try to restore to backup\n jsr_debug(f\"Failed to save new data; attempting to restore backup to file {_f}\")\n\n # Backup\n open(_f, 'wb').write(BAK)\n\n # If successful\n jsr_debug(\"Successfully restored data to backup ({}) in file {}\".format(BAK, _f))\n jsr_debug(f\"Raising error {e1} - {traceback.format_exc()}\")\n try:\n raise e1.__class__(f'{e1}; {traceback.format_exc()}')\n except Exception as e1_2:\n error_handler(traceback.format_exc(), _ecode=e1_2.__class__.__name__, _exit=True)\n\n except Exception as e2: # Restoration failed\n jsr_debug(f\"Failed to restore to backup for file {_f}\")\n error_handler(_ecode=e2.__class__.__name__, _exit=True, useCustomText=True,\n customText=f\"Failed to restore to auto-gen backup for file {_f}; the file may no longer be accessible by Quizzing Application.\\n\\nDiagnostic Information: {traceback.format_exc()}\")\n\ndef bw_hex_choose(hex_code: str) -> str:\n hex_code = hex_code.replace('#', '').strip().lower()\n jsr_debug(f\"Querying whether if #000000 should be used or whether if #ffffff be used given #{hex_code}\")\n\n Int = 0 # <0 = black; >0 = white\n allowed = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n conv: dict = {'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15}; threshold = math.floor(conv['f']/2)\n conv_back = {}\n for k, v in conv.items(): conv_back[v] = k\n\n for i in hex_code:\n if i not in allowed: raise TypeError(f\"Hex code #{hex_code} invalid; character {i} is not one of {allowed}\")\n try:\n tmp = int(i)\n except Exception as e:\n tmp = conv[i]\n\n if tmp >= threshold: Int += 1\n else: Int -= 1\n\n if Int <= 0: # It's dark, return white\n jsr_debug(f\"Returning #ffffff\")\n return \"#ffffff\"\n else: # It's light, return black\n jsr_debug(f\"Returning #000000\")\n return \"#000000\"\n\ndef invert_hex(hex_code: str) -> str: # Warning; inefficient method of doing this; couldn't find a better way\n jsr_debug(f\"Inverting #{hex_code}\")\n hex_code = hex_code.replace(\"#\", '').strip().lower()\n if not len(hex_code) == 6: raise AttributeError(f\"Hex code #{hex_code} is invalid; expected 6 character str\")\n\n allowed: list = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'a', 'b', 'c', 'd', 'e', 'f']\n conv: dict = {'a': 10, 'b': 11, 'c': 12, 'd': 13, 'e': 14, 'f': 15}; max=conv['f']\n conv_back = {}\n for k, v in conv.items(): conv_back[v] = k\n final: str = ''\n\n for i in hex_code:\n if i not in allowed: raise TypeError(f\"Hex code #{hex_code} invalid; character {i} is not one of {allowed}\")\n try:\n tmp = max-int(i)\n except Exception as e:\n tmp = max-conv[i]\n\n final += f\"{tmp}\" if tmp < 10 else conv_back[tmp]\n\n jsr_debug(f\"Inverted hex #{hex_code} to #{final}\")\n\n return final\n\ndef load_file(_f, **flags):\n \"\"\"\n **QA_APPS_TU.LOAD_FILE**\n\n :param _f: filename (str)\n :param flags: flags (dict)\n :return: file data\n\n Loads data from a given file.\n Default data type: str\n\n 1) **Supported Flags:**\n * outputDT (type): the data type of output\n * dict_keyVal_sep (str): separator of a key and a value\n - => {key: val}\n - Example: \"ac #000000\" => {'ac': '#000000'}\n - Default separator is a space (' ')\n - The separator is automatically converted to the right data type\n * skDec (bool): skip decryption (optional; decrypted or skipped automatically)\n * list_separator (str): list separator\n - \n - Example: ' ' => [item1, item2]\n - Default separator is \n - The separator is automatically converted to the right data type\n\n 2) **Supported Output Data Types:**\n * str\n * bytes\n * list\n * dict\n \"\"\"\n\n # Variables\n Flags = {\n 'outputDT': [str, (str, type)],\n 'dict_keyVal_sep': [\" \", (\" \", str)],\n 'skDec': [False, (False, bool)],\n 'list_separator': [\"\\n\", (\"\\n\", str)]\n }\n supp = [str, bytes, list, dict] # Supported data types (output)\n\n # Modify flags\n Flags = flags_modifier(Flags, flags)\n\n # Step 0: Basic checks\n if not os.path.exists(_f): raise IOError(\"File {} does not exist.\".format(_f))\n if not Flags['outputDT'][0] in supp: raise IOError(\n \"Type {} not supported by function load_file\".format(Flags['outputDT'][0]))\n\n # Step 1: read raw (may need to decrypt)\n raw = None\n try:\n if Flags['skDec']: raise QaF.CRError(\"Do not decrypt file; passing\")\n raw = QaF.decrypt(_f, _owr=False) # Custom python module qa_fileIOHandler\n jsr_debug(f\"Read RAW data from {_f} using qa_fileIOHandler.decrypt()\")\n\n except Exception:\n raw = open(_f, 'rb').read()\n jsr_debug(f\"Read RAW data from {_f} using open(_f, 'rb').read()\")\n\n else:\n jsr_debug(f\"Successfully read data from file {_f}\")\n\n # Step 2: Convert to the right data type\n if raw is None: raise IOError(\"Cannot load file {}\".format(_f))\n\n dt = type(raw);\n dt_request = Flags['outputDT'][0] # Data types\n\n # Configure flags that need to be adjusted (data types)\n Conf = ['dict_keyVal_sep', 'list_separator']\n for i in Conf:\n __temp1 = Flags[i][0]\n __to = jsr_conv(__temp1, convertTo=dt_request if dt_request is str or dt_request is bytes else str)\n Flags[i][0] = __to # Change to\n jsr_debug(\"Changed Flags[{}] from {} to {}\".format(__temp1, i, __to))\n\n if dt is dt_request:\n jsr_debug(\"dt == dt_request; returning raw ({})\".format(raw))\n return raw # Immediate return statement\n\n def Str(_d):\n jsr_debug(\"Running load_file.Str() for data ({})\".format(_d))\n\n if type(_d) is bytes:\n return str(_d.decode('utf-8')).strip()\n elif type(_d) is str:\n return _d.strip()\n else:\n raise IOError(\"Type {} no supported by load_file.Str()\".format(type(_d)))\n\n def Bytes(_d):\n jsr_debug(\"Running load_file.Bytes() for data ({})\".format(_d))\n\n if type(_d) is str:\n return _d.strip().encode('utf-8')\n elif type(_d) is bytes:\n return _d\n else:\n raise IOError(\"Type {} no supported by load_file.Bytes()\".format(type(_d)))\n\n def List(_d, _flags):\n DT = type(_d)\n if DT is str: # str => list\n return _d.strip().split(_flags['list_separator'][0])\n\n elif DT is bytes: # bytes => list\n return Str(_d).split(_flags['list_separator'][0])\n\n else:\n raise TypeError(\"Unsupported conversion type {} to {}\".format(dt, list))\n\n # Conversion\n data = \"\" # Output Variable\n\n if dt is bytes and dt_request is str: # bytes => str\n data = Str(raw)\n\n elif dt is str and dt_request is bytes: # str => bytes\n data = Bytes(raw)\n\n elif dt_request is list: # Union(str, bytes) => list\n if dt is str: # str => list\n data = raw.strip().split(Flags['list_separator'][0])\n\n elif dt is bytes: # bytes => list\n data = Str(raw).split(Flags['list_separator'][0])\n\n else:\n raise TypeError(\"Unsupported conversion type {} to {}\".format(dt, list))\n\n elif dt_request is dict: # Union(str, bytes) => dict\n out = {}\n if dt is str: # str => dict\n for i in List(raw, Flags):\n if len(i.strip()) > 0: # If there is any data in the given entry\n\n key = i.split(Flags['dict_keyVal_sep'])[0].strip() # Get the key\n val = i.replace(key, \"\").strip() # Replace the key from text to get value\n\n out[key] = val # Set the value\n\n data = out # Set the data variable\n\n elif dt is bytes: # bytes => dict\n for i in List(Str(raw), Flags):\n if len(i.strip()) > 0: # If there is any data in the given entry\n\n key = i.split(Flags['dict_keyVal_sep'])[0].strip() # Get the key\n val = i.replace(key, \"\").strip() # Replace the key from text to get value\n\n out[key] = val # Set the value\n\n data = out # Set the data variable\n\n else:\n raise TypeError(\"Unsupported conversion type {} to {}\".format(dt, dict))\n\n return data # Return the output\n\ndef int_ask(theme: dict, curr: str, ttl: str = \"Enter an Integer\"):\n\n print(f'abcd {curr}')\n\n TK = tk.Tk()\n root = tk.Frame(TK)\n root.pack(fill=tk.BOTH, expand=True)\n TK.config(bg=theme['bg'])\n root.config(bg=theme['bg'])\n TK.title(\"Picker\")\n TK.protocol(\"WM_DELETE_WINDOW\", TK.destroy)\n\n entry = tk.Entry(root,\n justify=tk.LEFT,\n fg=theme['fg'],\n bg=theme['bg'],\n selectforeground=theme['hg'],\n selectbackground=theme['ac']\n )\n\n lbl = tk.Label(root,\n text=ttl,\n bg=theme['bg'],\n fg=theme['fg'])\n\n submit = tk.Button(root,\n text=\"Submit\",\n bd=theme['border'],\n fg=theme['fg'],\n bg=theme['bg'],\n activeforeground=theme['hg'],\n activebackground=theme['ac'],\n command=lambda: root.after(0, TK.destroy)\n )\n\n lbl.pack(fill=tk.BOTH, expand=True)\n entry.pack(fill=tk.BOTH, expand=True, padx=10, pady=5)\n submit.pack(fill=tk.BOTH, expand=True, padx=10, pady=5)\n\n entry.delete(0, tk.END)\n entry.insert(0, f\"{curr}\")\n\n root.lift()\n\n root.update()\n\n ws = [root.winfo_width(), root.winfo_height()]\n ws = [\n ws[0] * 2 if ws[0] * 2 <= root.winfo_screenwidth() else ws[0],\n # ws[1] * 2 if ws[1] * 2 <= root.winfo_screenheight() else ws[1]\n ws[1] # Don't change height\n ] # Reuse ws to prevent the \"winfo\" function running too many times.\n\n TK.geometry(f\"{ws[0]}x{ws[1]}\")\n\n root.mainloop()\n\n def filter(string: str) -> int:\n allowed = []\n out: str = ''\n for i in range(10): allowed.append(str(i))\n for i in string: out += i if i in allowed else ''\n return int(out)\n\n raw = entry.get()\n\n TK.withdraw()\n\n return filter(raw)\n\n# UI Definition\n\nclass UI(threading.Thread):\n\n def __init__(self, title=\"Quizzing Application Theming Utility\", master=None):\n # Initialization\n self.t = threading.Thread\n self.t.__init__(self)\n\n # Run crash check\n ch = CrashHandler()\n ch.boot_check()\n\n # Variable initialization\n self.title = jsr_conv(title, convertTo=str)[-1] if jsr_conv(title, convertTo=str)[\n 0] else \"Quizzing Application Theming Utility\"\n\n # Main window\n self.root = tk.Tk(); self.root.withdraw()\n\n # Set window transform information\n self.txy = {'x': 0, 'y': 1} # Coordinate template\n self.ss = (self.root.winfo_screenwidth(), self.root.winfo_screenheight()) # Screen size\n self.ds = (700, 750) # Default size\n self.ds_ratio = (\n 700/1920, # Width\n 750/1080 # Height\n )\n self.ws = [\n self.ds[self.txy['x']] if self.ds[self.txy['x']] < self.ss[self.txy['x']] else int(self.ss[self.txy['x']]*self.ds_ratio[0]),\n self.ds[self.txy['y']] if self.ds[self.txy['y']] < self.ss[self.txy['y']] else int(self.ss[self.txy['y']]*self.ds_ratio[1])\n ] # Window size (adjustable)\n self.sp = (int(self.ss[self.txy['x']] / 2 - self.ws[self.txy['x']] / 2),\n int(self.ss[self.txy['y']] / 2 - self.ws[self.txy['y']] / 2)) # Position on screen\n\n # Padding x and y\n self.padX = 20; self.padY = 20\n\n # Label frame ratios (after deducting padding)\n self.lbl_fr_r = {\n 'change': 0.75, # Change group gets 75%\n }; self.lbl_fr_r['preview'] = 1-self.lbl_fr_r['change'] # Calculate the preview group's width %%\n\n # UI Variables\n self.titleLbl = tk.Label(self.root, text=\"Theming Utility\") # Main Header Text\n self.change_btnGrp = tk.LabelFrame(self.root, text=\"Change Theme\") # Altering Group Label Frame\n self.save_refresh_btn = tk.Button(self.root, text=\"Refresh\") # Submit and Refresh Button\n self.io_btn_grp = tk.LabelFrame(self.root, text=\"IO\")\n self.io_import = tk.Button(self.io_btn_grp, text=\"Import Theme\")\n self.io_export = tk.Button(self.io_btn_grp, text=\"Export Theme\")\n self.restore_btn = tk.Button(self.root, text=\"Restore to Default\")\n\n # UI - Theme setting variables\n # self.theme_set_canv_frame = tk.Canvas(self.change_btnGrp)\n # self.theme_set_canv = tk.Frame(self.theme_set_canv_frame)\n # self.theme_set_canv_scbar = ttk.Scrollbar(self.theme_set_canv_frame)\n\n self.theme_set_canvas_new = tk.Canvas(self.change_btnGrp, borderwidth=0)\n self.theme_set_frame_new = tk.Frame(self.theme_set_canvas_new)\n self.theme_set_vsb = tk.Scrollbar(self.change_btnGrp, orient=tk.VERTICAL)\n\n # LB Frames - the containers need to have their own instances\n self.bg_lbf = tk.LabelFrame(self.theme_set_frame_new)\n self.fg_lbf = tk.LabelFrame(self.theme_set_frame_new)\n self.ac_lbf = tk.LabelFrame(self.theme_set_frame_new)\n self.hg_lbf = tk.LabelFrame(self.theme_set_frame_new)\n self.font_lbf = tk.LabelFrame(self.theme_set_frame_new)\n self.fsize_para_lbf = tk.LabelFrame(self.theme_set_frame_new)\n self.min_fsize_lbf = tk.LabelFrame(self.theme_set_frame_new)\n self.btn_fsize_lbf = tk.LabelFrame(self.theme_set_frame_new)\n self.border_lbf = tk.LabelFrame(self.theme_set_frame_new)\n\n # Buttons need to have their own instance\n self.bg_change_btn = tk.Button(self.bg_lbf)\n self.fg_change_btn = tk.Button(self.fg_lbf)\n self.ac_change_btn = tk.Button(self.ac_lbf)\n self.hg_change_btn = tk.Button(self.hg_lbf)\n self.font_change_btn = tk.Button(self.font_lbf)\n self.fsize_para_change_btn = tk.Button(self.fsize_para_lbf)\n self.min_fsize_change_btn = tk.Button(self.min_fsize_lbf)\n self.btn_fsize_change_btn = tk.Button(self.btn_fsize_lbf)\n self.border_change_btn = tk.Button(self.border_lbf)\n\n \tself.font_face_preview = tk.Label(self.font_lbf)\n\n self.theme_set_loop_vars = { # Title: [Info, command, Theme dict ID]\n 'Background Color': ['Background color of all application windows', lambda: self.change('bg'), 'bg'],\n 'Foreground Color': ['The color of all text throughout all applications', lambda: self.change('fg'), 'fg'],\n 'Accent Color': ['The accent color can be seen as the background of a pressed button',lambda: self.change('ac'), 'ac'],\n 'Highlight Color': ['The highlight color can be seen as the foreground of a pressed button',lambda: self.change('hg'), 'hg'],\n 'Font Face': ['The font face used throughout all applications',lambda: self.change('font'), 'font'],\n 'Font Size (P)': ['The font size used in paragraphs and regular text throughout all applications',\n lambda: self.change('fsize_para'),'fsize_para'],\n 'Font Size (Min)': ['The smallest font size possible in any application (paragraphs excluded)',\n lambda: self.change('min_fsize'),'min_fsize'],\n 'Font Size (Btn)': ['The font size used for (most) buttons throughout the application', lambda: self.change('btn_fsize'),'btn_fsize'],\n 'Button Border Size': ['The size of the line surrounding buttons', lambda: self.change('border'),'border']\n }\n\n # Mapper\n self.tsblkm: tuple = ( # Instance of the keys above ^, btn ref, lbf ref\n ('Background Color', self.bg_change_btn, self.bg_lbf),\n ('Foreground Color', self.fg_change_btn, self.fg_lbf),\n ('Accent Color', self.ac_change_btn, self.ac_lbf),\n ('Highlight Color', self.hg_change_btn, self.hg_lbf),\n ('Font Face', self.font_change_btn, self.font_lbf),\n ('Font Size (P)', self.fsize_para_change_btn, self.fsize_para_lbf),\n ('Font Size (Min)', self.min_fsize_change_btn, self.min_fsize_lbf),\n ('Font Size (Btn)', self.btn_fsize_change_btn, self.btn_fsize_lbf),\n ('Button Border Size', self.border_change_btn, self.border_lbf)\n )\n\n\n self.btns_change_set_commands = []\n\n self.adj_btns: list = []\n\n # Theme (loaded)\n th = Theme.Get()\n self.theme = th.get(\"themedict\")\n\n # Update variables\n self.theme_lbl = []\n self.theme_btn = []\n self.previews = {}\n\n # Last things in self.__init__\n self.start()\n\n # Calculate the boot time\n global boot_start\n boot_e = qa_time.now() # Boot end time (UI initialized yet not drawn...\n boot_time(boot_start, boot_e) # Calculate the boot time\n\n # Show the UI\n self.root.mainloop() # Run the UI\n\n def _on_mousewheel(self, event):\n \"\"\"\n Straight out of stackoverflow\n Article: https://stackoverflow.com/questions/17355902/tkinter-binding-mousewheel-to-scrollbar\n Change: added \"int\" around the first arg\n \"\"\"\n self.theme_set_canvas_new.yview_scroll(int(-1 * (event.delta / 120)), \"units\")\n\n def rm(self): self.root.after(0, self.root.destroy)\n\n def get_theme(self): return self.theme\n\n def update_ui(self):\n jsr_debug(f\"UI.update_ui: Updating the theme with the following theme: {self.theme}.\")\n jsr_debug(f\"Labels (or similar) to update_ui: {self.theme_lbl}; button-like: {self.theme_btn} and previews {self.previews}\")\n\n # Update root\n self.root.config(background=self.theme['bg'])\n\n # Change GRP\n self.theme_set_canvas_new.config(background=self.theme['bg'])\n self.theme_set_frame_new.config(background=self.theme['bg'])\n \n self.root.iconbitmap(\n qaai.icons_ico.get('tu')\n )\n\n jsr_debug(f\"UI.update_ui: Updated root theme\")\n\n # Theme the labels\n for i in self.theme_lbl:\n try:\n jsr_debug(f\"UI.update_ui: Setting theme for label {i}: {i.cget('text')}\")\n i.config(bg=self.theme['bg'], fg=self.theme['fg'])\n except Exception as e:\n error_handler(useCustomText=True,\n customText=f\"Unable to set theme as there was an error in the theme file; please restore the theme file by pressing the button in the bottom right-hand corner;\\n\\nMore diagnostic information: {e.__class__.__name__}: {e}: {traceback.format_exc()}\", ui_ref=self)\n\n # Theme the buttons\n for i in self.theme_btn:\n try:\n jsr_debug(f\"UI.update_ui: Setting theme for button {i}: {i.cget('text')}\")\n i.config(bg=self.theme['bg'],\n fg=self.theme['fg'],\n activebackground=self.theme['ac'],\n activeforeground=self.theme['hg'],\n highlightbackground=self.theme['border_color'],\n highlightcolor=self.theme['border_color'],\n highlightthickness=self.theme['border'],\n bd=self.theme['border'])\n\n except Exception as e:\n # error_handler(useCustomText=True,\n # customText=f\"Unable to set theme as there was an error in the theme file; please restore the theme file by pressing the button in the bottom right-hand corner;\\n\\nMore diagnostic information: {e.__class__.__name__}: {e}: {traceback.format_exc()}\", ui_ref=self)\n try: self.dsb_btns()\n except: pass\n\n jsr_debug(f\"Crash report: cannot set element theme for {i}; more information: {e}; {traceback.format_exc()}\")\n\n ch = CrashHandler()\n data = QaDiagnostics.Data()\n ch.log_crash(time=qa_time.now(), info=f\"{traceback.format_exc()}\", func_call=data.theme_integ_key)\n\n sleep(0.5)\n self.root.quit()\n self.t.join(self)\n\n self.restore_btn.config( # Invert Color to ensure that it can be seen\n bg=self.theme['bg'],\n # fg=f\"#{invert_hex(self.theme['bg'])}\"\n fg=bw_hex_choose(self.theme['bg'])\n )\n\n self.font_face_preview.config(text=f\"{self.theme['bg']}\")\n\n # Title LBL Font Face\n self.titleLbl.config(font=(\n \tself.theme['font'],\n \tint(self.titleLbl.cget('font').split(' ')[-1])\n ))\n\n # Previews\n for i in self.previews:\n d = self.previews[i]\n tmp_preview = i.config(\n text=self.theme[d],\n bd=0,\n highlightbackground=f'#{invert_hex(self.theme[d])}',\n background=self.theme[d],\n activebackground=self.theme[d],\n foreground=bw_hex_choose(self.theme[d]),\n activeforeground=bw_hex_choose(self.theme[d])\n )\n jsr_debug(f\"Set theme for preview {i} with dict key {d}\")\n\n def dsb_btns(self):\n for i in self.theme_btn:\n i.config(state=tk.DISABLED)\n\n def enb_btns(self):\n for i in self.theme_btn:\n i.config(state=tk.NORMAL)\n\n def run(self):\n # Event Handlers (temporary)\n def eventHandler(event):\n jsr_debug(f\"Resized to {event.width}x{event.height}\")\n\n # root_handler.root.bind('', eventHandler) # Remove when not debugging events to prevent delay\n\n # Root variables\n gem = f\"{self.ws[self.txy['x']]}x{self.ws[self.txy['y']]}+{self.sp[self.txy['x']]}+{self.sp[self.txy['y']]}\"\n ttl = self.title; ico = qaai.icons_ico['tu']\n\n # Root setup\n self.root.title(ttl) # Set title\n self.root.geometry(gem) # Set transformation\n self.root.iconbitmap(ico) # Set icon\n self.root.protocol(\"WM_DELETE_WINDOW\", lambda: closeapp(self))\n\n jsr_debug(f\"Using title {ttl}\") # Debug (title)\n jsr_debug(f\"Using geometry {gem}\") # Debug (size)\n jsr_debug(f\"Setting icon to '{ico}'\") # Debug (icon)\n # Element Placement + configuration\n\n # Label frame c and p width calculation\n w = int(self.ws[\n 0] - 3 * self.padX) # window size - 3* padx ( + + )\n c_w = int(w * self.lbl_fr_r['change']); p_w = int(w * self.lbl_fr_r['preview'])\n\n jsr_debug(f\"\"\"c_w + p_w values: \n cw = {c_w}, \n pw = {p_w}, \n total = {c_w + p_w}, \n expected {w} \n (cw:pw expected = {self.lbl_fr_r['change']}:{self.lbl_fr_r['preview']}; \n cw:pw actual = {c_w/(c_w+p_w)}:{1-(c_w/(c_w+p_w))})\"\"\")\n\n # Title Label\n self.titleLbl.pack(fill=tk.BOTH, expand=True)\n tfs = math.floor(self.ws[self.txy['x']]/len(self.titleLbl.cget('text'))) # Calculate the font size\n jsr_debug(f\"Using font face {self.theme['font']} and size {tfs} for title\")\n self.titleLbl.config(font=(self.theme['font'], tfs)) # Set font attributes\n self.theme_lbl.append(self.titleLbl)\n self.theme_lbl.append(self.change_btnGrp)\n\n # Change Theme - Label Frame\n # Canvas\n self.change_btnGrp.pack(fill=tk.BOTH, expand=1, padx=(self.padX, int(self.padX/2)), pady=self.padY, side=tk.LEFT)\n\n # Change Container + Frame + SCBar\n # self.theme_set_canvas_new = tk.Canvas\n # self.theme_set_frame_new = tk.Frame\n # self.theme_set_vsb = tk.Scrollbar\n\n self.theme_set_vsb.configure(command=self.theme_set_canvas_new.yview)\n self.theme_set_canvas_new.configure(\n yscrollcommand=self.theme_set_vsb.set,\n width=int(c_w-0.1*c_w)\n )\n\n self.theme_set_vsb.pack(fill=tk.Y, side=tk.RIGHT, expand=False)\n self.theme_set_canvas_new.pack(fill=tk.BOTH, expand=True, side=tk.LEFT)\n self.theme_set_canvas_new.create_window(\n (0,0),\n window=self.theme_set_frame_new,\n anchor=\"nw\",\n tags=\"self.theme_set_frame_new\"\n )\n\n self.theme_set_frame_new.config(width=self.theme_set_canvas_new.cget('width'))\n\n jsr_debug(f\"\"\"\nself.theme_set_frame_new.width = {self.theme_set_frame_new.cget('width')}\nself.theme_set_canvas_new.width = {self.theme_set_canvas_new.cget('width')}\n\"\"\")\n\n # IO\n self.io_btn_grp.pack(fill=tk.BOTH, expand=1, padx=self.padX, pady=(self.padY, int(self.padY / 2)))\n self.theme_lbl.append(self.io_btn_grp)\n\n self.io_import.pack(fill=tk.BOTH, expand=1, padx=int(self.padX/2), pady=(10, 5))\n self.io_export.pack(fill=tk.BOTH, expand=1, padx=int(self.padX/2), pady=(10, 5))\n\n self.theme_btn.append(self.io_import)\n self.theme_btn.append(self.io_export)\n\n self.io_import.config(command=self.io_import_func)\n self.io_export.config(command=self.io_export_func)\n\n # Submit + refresh Button\n self.save_refresh_btn.pack(fill=tk.BOTH, expand=1, padx=self.padX, pady=int(self.padY / 2))\n self.theme_btn.append(self.save_refresh_btn)\n\n # self.save_refresh_btn.config(command=self.save)\n self.save_refresh_btn.config(command=self.reload) # Button only used to refresh now.\n\n # Restore Button\n self.restore_btn.pack(fill=tk.BOTH, expand=1, padx=self.padX, pady=(int(self.padY / 2), self.padY))\n self.theme_btn.append(self.restore_btn)\n\n self.restore_btn.config(command=self.restore)\n\n # Set the widths\n self.change_btnGrp.config(width=c_w)\n self.save_refresh_btn.config(width=p_w)\n\n # Canvas Objects\n curr_ind: int = 0\n max_ind: int = len(self.theme_set_loop_vars)-1\n info_vars = self.theme_set_loop_vars\n\n def configute_chng_grp(info, key, root_handler, btn_inst, lbf_inst):\n\n py: tuple = (int(root_handler.padY / 2), int(root_handler.padY / 2)) if 0 < curr_ind < max_ind else ((root_handler.padY, int(root_handler.padY / 2)) if curr_ind == 0 else (int(root_handler.padY / 2), root_handler.padY))\n\n lbf_inst.config(text=key, width=int(\n #self.theme_set_frame_new.cget('width') - root_handler.padX\n root_handler.theme_set_frame_new.cget('width')\n ))\n lbf_inst.pack(fill=tk.BOTH, expand=True, padx=root_handler.padX, pady=py)\n\n jsr_debug(f\"Set width for {lbf_inst} ({lbf_inst.cget('text')}) to (expected: {root_handler.theme_set_frame_new.cget('width')}, actual: {lbf_inst.cget('width')})\")\n\n temp_lbl = tk.Label(lbf_inst, text=info.get(key)[0], anchor=tk.W)\n temp_lbl.pack(fill=tk.BOTH, expand=True)\n\n btn_inst.config(text=f'Set {key}', command=info.get(key)[-2])\n btn_inst.pack(fill=tk.BOTH, expand=True, side=tk.LEFT)\n\n jsr_debug(f\"Set command for button {btn_inst} to {info.get(key)[-2]}\")\n\n if info.get(key)[-1] in Theme.colorIDs:\n tmp_preview = tk.Button(lbf_inst,\n text=root_handler.theme[info[key][-1]].strip('{').strip('}')\n )\n tmp_preview.pack(fill=tk.BOTH, expand=True, side=tk.RIGHT)\n root_handler.previews[tmp_preview] = info[key][-1]\n\n else:\n if key == 'font':\n root_handler.font_face_preview.pack(fill=tk.BOTH, expand=True, side=tk.RIGHT, padx=(0, int(root_handler.padX / 2)))\n\n else:\n temp_lbl_2 = tk.Label(lbf_inst,\n text='Set to: {}'.format(root_handler.theme.get(info[key][-1]))\n )\n temp_lbl_2.pack(fill=tk.BOTH, expand=True, side=tk.RIGHT, padx=(0, int(root_handler.padX / 2)))\n root_handler.theme_lbl.append(temp_lbl_2)\n\n root_handler.theme_lbl.append(lbf_inst)\n root_handler.theme_lbl.append(temp_lbl)\n root_handler.theme_btn.append(btn_inst)\n\n # Change BTN Grps\n for i in self.tsblkm:\n configute_chng_grp(info_vars, # Information Dictionary\n i[0], # Key (info_vars key)\n self, # Root Instance\n i[1], # Button Instance\n i[-1] # LBF Instance\n )\n\n # Set the widths\n self.change_btnGrp.config(width=c_w)\n self.save_refresh_btn.config(width=p_w)\n\n # Event Handler Binds\n self.theme_set_frame_new.update() # Update the Frame object\n self.theme_set_frame_new.bind(\"\", self.onFrameConfig)\n self.theme_set_canvas_new.bind_all(\"\", self._on_mousewheel)\n\n # Final code in UI.run\n jsr_debug(\"UI.run: set root_handler.theme_lbl to {} and root_handler.theme.btn to {}.\".format(self.theme_lbl, self.theme_btn))\n\n # Update the theme of the elements\n self.root.deiconify()\n self.update_ui()\n\n def reload(self):\n Theme.Get().refresh_theme()\n self.theme = Theme.Get().get('theme')\n self.update_ui()\n\n def onFrameConfig(self, event): # for scbar\n self.theme_set_canvas_new.configure(\n scrollregion=self.theme_set_canvas_new.bbox(\"all\")\n )\n\n # Button Functions\n\n def change(self, theme_key: str): # Master handler of all changes\n jsr_debug(f\"Running change command for key {theme_key}\")\n new = None\n\n if theme_key in Theme.colorIDs:\n new = tkcolor.askcolor(color=self.theme.get(theme_key))[-1]\n jsr_debug(f\"New color for id {theme_key}: {new}\")\n\n elif theme_key == 'font':\n new = QaFPA.FontDialog()['font']\n jsr_debug(f\"New font: {new}\")\n\n elif theme_key in Theme.intIDs:\n id = theme_key\n for i in self.theme_set_loop_vars:\n if self.theme_set_loop_vars[i][-1] == theme_key:\n id = i\n break\n\n new = int_ask(self.theme, self.theme[theme_key], f\"Set {id}\")\n jsr_debug(f\"New int for theme id {id}: {new}\")\n\n if new is None: return\n if new == self.theme[theme_key]: return\n else: # Save time\n self.theme[theme_key] = new\n self.save()\n\n def io_import_func(self):\n jsr_debug(f\"Attempting to import theme\")\n\n # Step 1: get the file\n filename = tkfd.askopenfilename(filetypes=[(\"Quizzing Application Theme File\", Theme.extenstion)])\n jsr_debug(f\"Selected file: {filename}\")\n\n if filename.strip() == \"\":\n error_handler(useCustomText=True, customText='No file selected; aborting.')\n jsr_debug(\"Cancelled importing operation\")\n return # If no file is selected\n\n # Step 2: Load data\n theme = {}\n\n try:\n # Variables\n raw: str = open(filename, 'r').read().strip() # Raw (str)\n raw_l: list = raw.split(\"\\n\") # Raw (list)\n sep = \" \" # Separator\n loaded = {}\n\n # Handle\n for i in raw_l:\n if len(i.strip()) > 0:\n if not i.strip()[0] == \"#\":\n # If it is not a comment\n key = i.split(sep)[0].strip()\n val = i.replace(key, \"\", 1).strip()\n loaded[key] = val\n\n valid = Theme.check_theme_integ(loaded, Theme.default) # make sure all keys from default exist in the loaded dict\n\n if not valid: raise IOError('Failed to validate file \"{}\"'.format(filename))\n else: theme = loaded\n\n except Exception as e:\n error_handler(useCustomText=True,\n customText=f\"Cannot load theme data from file '{filename}'; aborting import process.\\n\\nDiagnostic information: {traceback.format_exc()}\")\n\n return None\n\n jsr_debug(f\"Theme file valid\")\n\n # Step 2.5 (ish): Confirm\n global apptitle\n if not tkmsb.askyesno(apptitle, f\"Are you sure you want to overwrite your theme data; this cation cannot be undone.\"):\n jsr_debug(f\"User aborted theme import\")\n return\n\n jsr_debug(f\"User confirmed theme import; continuing\")\n\n # Step 3: Set data (copy)\n if not os.path.exists(Theme.theme_filename): # If the theme file does not exist...\n jsr_debug(f\"Theme file does not exist; copying...\")\n shutil.copyfile(filename, Theme.theme_filename) # Create and copy data\n\n else:\n jsr_debug(f\"Theme file exists; overwriting data...\")\n open(Theme.theme_filename, 'wb').write(open(filename, 'rb').read()) # Overwrite data as it is valid\n\n # Update\n th = Theme.Get()\n th.refresh_theme()\n self.theme = th.get('theme')\n self.update_ui()\n\n jsr_debug(f\"Refreshed UI theme\")\n tkmsb.showinfo(apptitle, \"Successfully imported and refreshed theme.\")\n\n def io_export_func(self):\n jsr_debug(f\"Exporting theme\")\n global apptitle\n\n export_filename_suggestion = f\"{random.randint(000000, 9999999)}.{Theme.extenstion}\"\n jsr_debug(f\"Suggested filename: {export_filename_suggestion}\")\n\n allowed_file_types = [\n ('Quizzing Application Theme File', f'*.{Theme.extenstion}')\n ]\n\n filename = tkfd.asksaveasfilename(filetypes=allowed_file_types, defaultextension=allowed_file_types, initialfile=export_filename_suggestion)\n\n # If aborted / canceled\n if filename is None or filename.strip() == \"\":\n error_handler(useCustomText=True, customText=\"Canceled export operation.\")\n jsr_debug(\"Cancelled exporting operation\")\n\n jsr_debug(f\"Exporting theme data to '{filename}'\")\n\n # Copy the file\n try: # Try\n shutil.copyfile(Theme.theme_filename, filename)\n\n except Exception as e: # Failed\n jsr_debug(f\"Cannot export file; more info: {e.__class__.__name__}: {e}: {traceback.format_exc()}\")\n error_handler(_e=traceback.format_exc())\n\n else: # Success\n jsr_debug(f\"Exported theme file to '{filename}'\")\n tkmsb.showinfo(apptitle, f'Successfully exported theme to file \"{filename}\"')\n\n def restore(self):\n jsr_debug(f\"UI.restore (reset theme to default)\")\n # Load OG Theme to in memory variable\n # Step 1: load the new data in qa_theme's memory\n og_theme = \"{}\\\\{}\\\\{}\".format(os.getcwd().replace('/','\\\\'), qaai.ftsFolder, qaai.themeFilename)\n jsr_debug(f\"Reading theme from file: '{og_theme}'\")\n # Theme.load_theme(file=og_theme)\n # Step 2: refresh qa_theme's internal dictionary\n g = Theme.Get()\n g.refresh_theme(__loadFrom__=og_theme)\n # Step 3: load new theme to this app's theme variable\n self.theme: dict = g.get('theme')\n jsr_debug(f\"Restore routine: reset theme to {self.theme}\")\n\n # Then call the save function that will overwrite and update_ui the UI\n # Step 4: Call self.save\n self.save() # Reads from self.theme, overwrites and refreshes UI\n\n def save(self):\n jsr_debug(f\"Saving theme\")\n\n self.save_theme() # Save the theme\n self.update_ui() # Update the theme\n\n def save_theme(self):\n \"\"\"\n :return: None\n\n Please save new theme to theme dictionary before calling this function.\n The appropriate dictionary is UI.theme (retrieved by UI.get_theme())\n \"\"\"\n\n # Load Variables\n theme = self.get_theme()\n theme_file = Theme.theme_filename\n\n jsr_debug(f\"Saving the following theme: {theme}\")\n\n # Step 1: Checks\n if not Theme.check_theme_integ(theme, Theme.default): # Check if the theme is valid\n error_handler(useCustomText=True, customText=f\"Failed to save theme to file {theme_file}\")\n\n if not os.path.exists(theme_file):\n error_handler(useCustomText=True,\n customText=f\"Failed to save theme to file {theme_file} as the theme file does not exist.\")\n\n # Step 2: Construct the data\n # Credit >> Comments >> Data\n\n out = \"\" # Output\n\n # Credit\n out = f\"Credit {jsr_conv(self.theme['Credit'], convertTo=str)[-1]}\"\n jsr_debug(f\"Set output data to '{out}'\")\n\n # Comments\n out += f\"\\n{jsr_conv(Theme.default_comment_header, convertTo=str)[-1]}\"\n jsr_debug(f\"Set output data to '{out}'\")\n\n # Data\n for i in self.theme:\n if not i.lower() == \"credit\":\n jsr_debug(f\"Adding data for key {i}\")\n out += f\"\\n{jsr_conv(i, convertTo=str)[-1]} {jsr_conv(self.theme[i], convertTo=str)[-1]}\"\n jsr_debug(f\"Set output data to '{out}'\")\n\n # Step 3: Save data\n secure_save(theme_file, out)\n\nif not os.path.exists(Theme.theme_filename): # If theme file does not exist.\n jsr_debug(f\"No theme file found; aborting.\")\n error_handler(_exit=True,\n _ecode = \"No Theme File Found\",\n useCustomText=True,\n customText=f\"Unable to launch application as the theme file does not exist; please run the FTSRA utility.\\n\\nExpected location: {Theme.theme_filename}.\")\n#\n# tkmsb.showinfo(\n# apptitle,\n# f\"Note that only the title is affected by changes in font in this utility.\"\n# )\n\nui = UI(apptitle)\n","sub_path":"QA_apps_tu.py","file_name":"QA_apps_tu.py","file_ext":"py","file_size_in_byte":74696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"268831150","text":"# -*- coding: utf-8 -*-\r\nfrom matplotlib.pylab import *\r\nfrom matplotlib.pylab import cm\r\nimport numpy as np\r\n\r\na = 1 #alto\r\nb = 1 #ancho\r\nNx = 30 #numero de intervalos en x\r\nNy = 30 # numero de intervalos en y\r\n\r\ndx = b/Nx\r\ndy = a/Ny\r\n# dx y dy tienen qye ser iguales!\r\n\r\nif dx != dy:\r\n print(\"ERRORR dx no es igual a dy\")\r\n exit(-1)\r\n\r\n# Funcion de conveneniencia para calcular las coordenadas del punto (i,j)\r\ncoords = lambda i,j:(dx*i,dy*j)\r\nx,y = coords(4,2)\r\n\r\n\r\ndef imshowbien(u):\r\n\timshow(u.T[Nx::-1,:],cmap=cm.coolwarm, interpolation=\"bilinear\")\r\n\tcbar=colorbar(extend='both', cmap=cm.coolwarm)\r\n\tticks = arange(0,35,5)\r\n\tticks_Text=[\"{}\".format(deg) for deg in ticks]\r\n\tcbar.set_ticks(ticks)\r\n\tcbar.set_ticklabels(ticks_Text)\r\n\tclim(0,30)\r\n\r\n\txlabel('b')\r\n\tylabel('a')\r\n\txTicks_N = arange(0,Nx+1,3)\r\n\tyTicks_N = arange(0,Ny+1,3)\r\n\txTicks = [coords(i,0)[0] for i in xTicks_N]\r\n\tyTicks = [coords(0,i)[1] for i in yTicks_N]\r\n\txTicks_Text=[\"{0:.2f}\".format(tick) for tick in xTicks]\r\n\tyTicks_Text=[\"{0:.2f}\".format(tick) for tick in yTicks]\r\n\r\n\txticks(xTicks_N, xTicks_Text, rotation='vertical')\r\n\tyticks(yTicks_N, yTicks_Text)\r\n\tmargins(0.2)\r\n\tsubplots_adjust(bottom=0.15) \r\n\r\n\r\nu_k = zeros((Nx+1, Ny+1), dtype=double)\r\nu_km1 = zeros((Nx+1, Ny+1), dtype=double)\r\n\r\n# Condicion de borde inicial\r\nu_k[:,:] = 20 #en todas las celdas\r\n\r\n#Parametros\r\ndt = 0.01 # s\r\nK = 79.5 # m^2/s\r\nc = 450 # J/Kg*C\r\nrho = 7800 # Kg/m^3\r\n\r\nalpha = (K*dt)/(c*rho*(dx**2))\r\n\r\n# Loop en el tiempo\r\nminuto = 60\r\nhora = 60*minuto\r\ndia = 24*hora\r\n\r\ndt = 1*minuto\r\ndnext_t = 0.5*hora\r\n\r\nnext_t = 0\r\nframenum = 0\r\n\r\nT = 1*dia\r\nDays = 1*T # Cuantos dias quiero simunlar\r\n\r\n#vectores para acumular la temperatura en puntos interesantes\r\nsup=zeros(int32(Days/dt))\r\nu_P1=zeros(int32(Days/dt))\r\nu_P2=zeros(int32(Days/dt))\r\nu_P3=zeros(int32(Days/dt))\r\n\r\n\r\ndef truncate(n, decimals=0):\r\n multiplier = 10**decimals\r\n return int(n*multiplier)/multiplier\r\n\r\n# Loops en el tiempo\r\nfor k in range(int32(Days/dt)):\r\n t =dt*(k+1)\r\n dias = truncate(t/dia,0)\r\n horas = truncate((t-dias*dia)/hora,0)\r\n minutos = truncate((t-dias*dia - horas*hora)/minuto,0)-1\r\n titulo = \"k = {0:05.0f}\".format(k) + \" t = {0:02.0f}d {1:02.0f}h {2:02.0f}m \".format(dias,horas,minutos)\r\n \r\n # CB esenciales, se repiten en cada iteracion\r\n \r\n u_k[0, :] = 20 #izq\r\n u_k[:, 0] = 20 #inf\r\n u_k[:, -1] = 0 #sup\r\n u_k[-1, :] = 0 # der\r\n # (f(x+h) - f(x))/dx = algo\r\n \r\n for i in range (1,Nx):\r\n for j in range(1,Ny):\r\n \r\n #Algoritmo de diferencias finitas para 2-D\r\n #Laplaciano\r\n nabla_u_k = (u_k[i-1,j] + u_k[i+1,j] + u_k[i,j-1] + u_k[i,j+1] -4*u_k[i,j]) /dx**2\r\n \r\n #Forward Euler\r\n u_km1[i,j] = u_k[i,j] + alpha*nabla_u_k\r\n # Avanzar la solucion a k+1\r\n u_k = u_km1\r\n \r\n # CB denuevo, porsia\r\n u_k[0, :] = 20 #izq\r\n u_k[:, 0] = 20 #inf\r\n u_k[:, -1] = 0 #sup\r\n u_k[-1, :] = 0 # der\r\n # (f(x+h) - f(x))/dx = algo\r\n \r\n u_P1[k]=u_k[int(Nx/2), int(Ny/2)]\r\n u_P2[k]=u_k[int(Nx/2), int(3*Ny/4)]\r\n u_P3[k]=u_k[int(3*Nx/4), int(3*Ny/4)]\r\n sup[k]=u_k[int(Nx/2), -1]\r\n \r\n #Grafico en d_next\r\n if t>next_t:\r\n figure(1)\r\n imshowbien (u_k)\r\n title(titulo)\r\n savefig(\"Caso_1_img/frame_{0:04.0f}.png\".format(framenum))\r\n framenum +=1\r\n next_t += dnext_t\r\n close(1)\r\n\r\nfigure(2)\r\nplot(range(int32(Days/dt)), sup, label=\"superficie\")\r\nplot(range(int32(Days/dt)), u_P1, label=\"P1\")\r\nplot(range(int32(Days/dt)), u_P2, label=\"P2\")\r\nplot(range(int32(Days/dt)), u_P3, label=\"P3\")\r\ntitle(\"Evolucion de temperatura en puntos\")\r\nlegend()\r\n\r\n\r\nshow()\r\n","sub_path":"Caso_1.py","file_name":"Caso_1.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"267419406","text":"'''\nsample application\n'''\n# load libraries\nimport json\nimport time\nimport copy\nimport base64\nfrom io import BytesIO\nfrom PIL import Image\nimport numpy as np\nfrom flask import Flask, render_template, request, Response\nfrom keras.models import load_model\nfrom keras.preprocessing.image import img_to_array\nimport tensorflow as tf\nimport websocket\n\n# generate application instance\napp = Flask(\"Yahoo Hack Day 2017\")\n\n# load model\nmodel = load_model(\"models/detect_model.hdf5\")\nmodel._make_predict_function()\ntf_graph = tf.get_default_graph()\n\n# define flags\nflag_alert = False\nflag_start_time = None\n\n# ng_image\nng_info = {\"time\": None, \"image\": None}\n\ndef fix_base64_to_np(b64_img):\n '''\n convert data\n '''\n str_img = b64_img.replace('data:image/jpeg;base64,', '')\n # print(str_img)\n buf = BytesIO(base64.b64decode(str_img))#.encode('utf-8')\n img = Image.open(buf)\n img = img.resize((100, 100))\n img = img.convert('RGB')\n data = np.array([np.asarray(img)])\n # data = img_to_array(img)\n # print(data.shape)\n return data\n\n\ndef validate_images(images):\n '''\n if the car is near, this method return True\n '''\n ## base64 convert\n cnt = 0\n predicts = []\n for image in images:\n data = fix_base64_to_np(image)\n with tf_graph.as_default():\n result = model.predict_classes(data)\n # print(result)\n cnt += result\n if result == 0:\n predicts.append('near')\n elif result == 1:\n predicts.append('far')\n else:\n predicts.append('unknown')\n\n # if near, return true\n result_flag = cnt > len(images)\n return result_flag, predicts\n\n\n@app.route(\"/api/detect\", methods=[\"POST\"])\ndef detect():\n '''\n detection car\n '''\n request_json = request.json\n if request.method == 'POST':\n img = request_json.get('image')\n data = fix_base64_to_np(img)\n with tf_graph.as_default():\n pred = model.predict_classes(data)\n if pred == 0:\n msg = 'near'\n ng_info[\"time\"] = time.time()\n ng_info[\"image\"] = img\n elif pred == 1:\n msg = 'far'\n else:\n msg = pred\n body = json.dumps({\"predict\": msg, \"image\": img})\n response = Response(body, status=200, mimetype='application/json')\n else:\n body = json.dumps({\"message\": \"bad request.\"})\n response = Response(body, status=400, mimetype='application/json')\n\n return response\n\n\n\n@app.route(\"/api/reckless_driving/analyze\", methods=[\"GET\", \"POST\"])\ndef analyze():\n '''\n analyze driving\n '''\n # flag check\n global flag_alert\n global flag_start_time\n \n request_json = request.json\n\n if request.method == 'POST':\n # get images\n req_time = request_json.get('current_time')\n images = request_json.get('images')\n # print(request_json)\n # check\n result_flag, predicts = validate_images(images)\n # result_flag = False\n # images = [image+\"_ok\" for image in images]\n\n # alert check\n alert_signal = False\n reckless_level = 0\n if result_flag and flag_alert:\n diff_time = time.time() - flag_start_time\n # alert check: if spend over 10sec, this api return the alert signal.\n if diff_time > 10.0:\n alert_signal = True\n reckless_level = 5\n # flag time check\n elif flag_alert:\n flag_start_time = time.time()\n flag_alert = True\n # flag reset\n else:\n flag_start_time = None\n flag_alert = False\n\n body = json.dumps({\"status\": True, \"alert_signal\": alert_signal, \"reckless_level\": reckless_level, \"predicts\": predicts, \"current_time\": req_time, \"images\": images})\n response = Response(body, status=200, mimetype='application/json')\n else:\n body = json.dumps({\"message\": \"bad request.\"})\n response = Response(body, status=400, mimetype='application/json')\n flag_alert = False\n\n return response\n\n\n@app.route(\"/api/reckless_driving/report\", methods=[\"POST\"])\ndef report():\n '''\n report\n '''\n request_json = request.json\n\n if request.method == \"POST\":\n images = request_json.get(\"images\")\n images = [image+\"_ok\" for image in images]\n # images processing\n body = json.dumps({\"status\": True, \"images\": images})\n response = Response(body, status=200, mimetype=\"application/json\")\n else:\n body = json.dumps({\"message\": \"bad request\"})\n response = Response(body, status=400, mimetype=\"application/json\")\n\n return response\n\n\n@app.route(\"/api/ng_check\")\ndef ng_check():\n '''\n return ng_checklist\n '''\n access_time = time.time()\n if ng_info.get('time') is None or (access_time - ng_info.get('time')) > 60:\n body = {\"time\": None, \"image\": None, \"is_ng\": False}\n else:\n body = copy.deepcopy(ng_info)\n body['is_ng'] = True\n response = Response(json.dumps(body), status=200, mimetype=\"application/json\")\n\n return response\n\n\n@app.route(\"/test\")\ndef index():\n '''\n return testpage\n '''\n return render_template('test.html')\n\n\n@app.route(\"/test2\")\ndef test2():\n '''\n return detect test page\n '''\n return render_template('test2.html')\n\n\n@app.route(\"/sample\")\ndef sample():\n '''\n sample output\n '''\n return '''\n \n \n \n \n
    \n Hello, World!\n
    \n \n \n '''\n\nif __name__ == \"__main__\":\n '''\n app run\n '''\n app.run(host=\"0.0.0.0\", debug=True)","sub_path":"server/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"354029727","text":"\"\"\"\nManages the Sequence ability\n\n--\n\nAuthor : DrLarck\n\nLast update : 04/03/20 (DrLarck)\n\"\"\"\n\n# dependancies\nimport asyncio\n\n# util\nfrom utility.cog.combat_system.damage.calculator import Damage_calculator\nfrom utility.cog.character.ability.ability import Ability\n\nclass Sequence_1(Ability):\n \"\"\"\n Global sequence ability\n \"\"\"\n\n def __init__(self, client, ctx, caster, target, team_a, team_b):\n Ability.__init__(self, client, ctx, caster, target ,team_a, team_b)\n\n self.name = \"Sequence\"\n self.description = \"The unit performs a **Sequence** attack, inflicting physical damage.\"\n self.icon = \":punch:\"\n self.id = 1\n\n # targeting\n self.need_target = True\n self.target_enemy = True\n\n # damage\n self.damage.physical = 100\n\n async def set_tooltip(self):\n self.tooltip = f\"Inflicts **{int(self.caster.damage.physical_min):,}** - **{int(self.caster.damage.physical_max):,}** :punch: to the target.\"\n \n async def use(self):\n damager = Damage_calculator()\n damage = await self.get_damage()\n\n display = f\"__Move__ : {self.icon}`{self.name}`\\n\"\n\n display += await damager.inflict_damage(self.caster, self.target, damage)\n\n return(display)","sub_path":"utility/cog/character/ability/list/_1_sequence.py","file_name":"_1_sequence.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"535435830","text":"\"\"\"\n 华为手机搜索测试用例\n\"\"\"\nimport time\nfrom selenium import webdriver\n\ndriver = webdriver.Chrome(executable_path=\"chromedriver.exe\")\ndriver.maximize_window() # 浏览器全屏\ndriver.get(\"http://132.232.44.158:9999/shopxo/\")\n\ndriver.find_element_by_id('search-input').send_keys(\"华为\")\ndriver.find_element_by_id('ai-topsearch').click()\ntime.sleep(3)\n\n# 稍微智能一点的\ndriver.implicitly_wait(6)\n\n# 判断title是否正确\ntitle = \"华为 - ShopXO企业��B2C电商系统提供商 - 演示站点\"\nassert driver.title == title\n\nprint(\"华为搜索测试用例执行成功!\")","sub_path":"SeleniumTest/demo2.py","file_name":"demo2.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"450896563","text":"from rest_framework import permissions\nfrom rest_framework import viewsets\nfrom prototype.models.unit import Unit, UnitSerializer\nfrom rest_framework.response import Response\nfrom rest_framework.authentication import TokenAuthentication, SessionAuthentication\n\nclass UnitViewSet(viewsets.ModelViewSet):\n queryset = Unit.objects.all()\n serializer_class = UnitSerializer\n permission_classes = (permissions.IsAuthenticated,)\n authentication_classes = (SessionAuthentication, TokenAuthentication,)\n\n def list(self, request):\n queryset_to_use = {}\n if 'course_id' in request.QUERY_PARAMS:\n queryset_to_use = Unit.objects.filter(course_id=request.QUERY_PARAMS['course_id'])\n elif request.user.is_superuser: # May want to change this to user.is_staff\n queryset_to_use = self.queryset\n\n serializer = UnitSerializer(queryset_to_use, many=True)\n return Response(serializer.data)\n\n","sub_path":"prototype/views/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"296660828","text":"import Model\nimport Virus\nfrom Population import Population\nfrom PopulationGroup import PopulationGroup\nfrom utils import *\nfrom Virus import Virus\n\nMOBILITY_FACTOR_MIN = 0.1\nMOBILITY_FACTOR_MAX = 0.9\n\nINFECTION_RATE_FACTOR_MIN = 0.25\nINFECTION_RATE_FACTOR_MAX = 0.9\n\nMORTALITY_FACTOR_MIN = 0.1\nMORTALITY_FACTOR_MAX = 0.3\n\nSUSCEPTIBLE_BETA = 0.5\nSIUSCEPTIBLE_DELTA = 0.1\n\n\nclass ModelGenerator:\n def __init__(self):\n pass\n\n def setRandomParameters(self, nbPopulationsGroups=1, nbVariants=1): \n with open('ModelConfigs.json', 'r') as f:\n config = json.load(f)\n\n for i in range(0, nbPopulationsGroups):\n mobilityFactor = random.uniform(MOBILITY_FACTOR_MIN, MOBILITY_FACTOR_MAX)\n config['PopulationsGroups'][i] = PopulationGroup(contactMatrix=[], mobilityFactor=mobilityFactor)\n\n for j in range(0, nbVariants):\n infectionRate = random.uniform(INFECTION_RATE_FACTOR_MIN, INFECTION_RATE_FACTOR_MAX)\n mortality = random.uniform(MORTALITY_FACTOR_MIN, MORTALITY_FACTOR_MAX)\n config['Virus'][j] = Virus(infectionRate, mortality, None)\n self.setPopulations()\n\n def setParameters(self, PopulationsGroups=[], Variants=[]):\n with open('ModelConfigs.json', 'r') as f:\n config = json.load(f)\n\n for i in range(0, len(PopulationsGroups)):\n config['PopulationsGroups'][i] = PopulationsGroups[i]\n\n for i in range(0, len(Variants)):\n config['Virus'][i] = Variants[i]\n self.setPopulations()\n\n def setPopulations(self): \n infected = []\n with open('ModelConfigs.json', 'r') as f:\n config = json.load(f)\n variants = config['Virus']\n \n for variant in variants:\n name = variant.get('name')\n self.Populations.append(Population(('infected_' + name), 2, [], []))\n \n self.fillImportFlows()\n\n def loadSettings(self): \n with open('ModelConfigs.json', 'r') as f:\n config = json.load(f)\n self.populationsGroups = config['PopulationsGroups']\n self.variants = config['Virus']\n\n","sub_path":"Scenario 1/GraphBuilder/ModelGenerator.py","file_name":"ModelGenerator.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"177863679","text":"import os\nimport codecs\n\nfrom bs4 import BeautifulSoup\n\ndef carregar_HTML(arquivo):\n \"\"\"Carregar HTML e remover tags\"\"\"\n with open(arquivo, \"rb\") as arq:\n texto = arq.read()\n\n texto_bs = BeautifulSoup(texto, \"html.parser\")\n\n # Remove js scripts\n for script in texto_bs([\"script\", \"style\"]):\n script.decompose()\n \n texto = texto_bs.text\n # break into lines and remove leading and trailing space on each\n linhas = (linha.strip() for linha in texto.splitlines())\n # break multi-headlines into a line each\n pedacos = (frase.strip() for linha in linhas for frase in linha.split(\" \"))\n # drop blank lines\n texto = '\\n'.join(pedaco for pedaco in pedacos if pedaco)\n\n #print(text)\n return texto.lower()\n\n\ndef remover_stopwords(palavras, arquivo_stopwords):\n \"\"\"Remover Stopwords\"\"\"\n with codecs.open(arquivo_stopwords, \"r\", encoding=\"utf-8\") as stopwords:\n sw = stopwords.read()\n \n stopwords_lista = sw.split()\n\n palavras_novo = [palavra for palavra in palavras \n if palavra not in stopwords_lista]\n\n return palavras_novo\n\n\ndef tokenize(texto, arquivo_stopwords):\n \"\"\"Tokenização\"\"\"\n pontuacao = \" .,-!#$%^&*();:\\n\\t\\\\\\\"|/?!\\{\\}[]<>+©\"\n for i in range(0, len(texto)):\n for j in range(0, len(pontuacao)):\n if texto[i] == pontuacao[j]:\n texto = texto.replace(pontuacao[j], \" \") \n\n termos = remover_stopwords(texto.split(), arquivo_stopwords)\n\n return termos","sub_path":"processa.py","file_name":"processa.py","file_ext":"py","file_size_in_byte":1501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"374146697","text":"from urllib import request\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import quote_plus\nfrom youtube_dl import YoutubeDL\n\nYOUTUBE_URL = 'https://www.youtube.com'\nSEARCH_SUBSTR = '/results?search_query='\nWATCH_SUBSTR = '/watch?v='\nPLAYLIST_SUBSTR = '/playlist?list='\n\ndef get_videos_by_query_string( query ):\n soup = get_soup_by_url( YOUTUBE_URL + SEARCH_SUBSTR + quote_plus( query ) )\n\n results = []\n video_rows = soup.findAll( 'div', { 'class' : 'yt-lockup-video' } )\n\n for row in video_rows:\n image = row.find( 'img' )\n src_att = image['src']\n if '/yts/img/' in src_att:\n img_url = image['data-thumb']\n else:\n img_url = src_att\n\n title = row.find( 'a', { 'class' : 'yt-uix-tile-link' } )\n\n results.append( {\n 'title' : title.contents[0],\n 'watch_id' : row['data-context-item-id'],\n 'image_url' : img_url\n } )\n\n return results\n\ndef get_video_by_watch_id( watch_id ):\n soup = get_soup_by_url( YOUTUBE_URL + WATCH_SUBSTR + watch_id )\n\ndef get_playlist_videos_by_playlist_id( playlist_id ):\n soup = get_soup_by_url( YOUTUBE_URL + PLAYLIST_SUBSTR + playlist_id )\n\n results = []\n video_rows = soup.findAll( 'tr', { 'class' : 'pl-video' } )\n\n for row in video_rows:\n image = row.find( 'img' )\n results.append( {\n 'title' : row['data-title'],\n 'watch_id' : row['data-video-id'],\n 'image_url' : image['data-thumb']\n } )\n\n return results\n\ndef get_soup_by_url( url ):\n page = request.urlopen( url )\n return BeautifulSoup( page, 'html.parser' )\n\ndef download_video( watch_id ):\n options = {\n 'format': 'bestaudio/best',\n 'postprocessors': [ {\n 'key' : 'FFmpegExtractAudio',\n 'preferredcodec' : 'mp3',\n 'preferredquality' : '192',\n } ],\n 'logger' : My_Logger(),\n 'progress_hooks' : progress_hook,\n 'noplaylist' : True,\n }\n\n YoutubeDL( options ).download( YOUTUBE_URL + WATCH_SUBSTR + watch_id )\n\ndef download_playlist( playlist_id ):\n options = {\n 'format': 'bestaudio/best',\n 'postprocessors': [\n {\n 'key' : 'FFmpegExtractAudio',\n 'preferredcodec' : 'mp3',\n 'preferredquality' : '192',\n }\n ],\n 'logger' : My_Logger(),\n 'progress_hooks' : progress_hook\n }\n\n YoutubeDL( options ).download( YOUTUBE_URL + PLAYLIST_SUBSTR + playlist_id )\n\nclass My_Logger( object ):\n def debug( self, msg ):\n pass\n\n def warning( self, msg ):\n pass\n\n def error( self, msg ):\n print( msg )\n\ndef progress_hook( d ):\n if d['status'] == 'finished':\n print( 'Done downloading, now converting ...' )","sub_path":"youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"215315084","text":"#Create an emotions dict, where the keys are the names of different human emotions\n#and the values are the degree to which the emotion is being felt on a scale from 1 to 3.\n\nemotions_dictionary = {\n 'happy': 3,\n 'sad': 1,\n 'excited': 3\n}\n\n#Write a Person class with the following characteristics:\n\n#name (string)\n#emotions (dict)\n#Initialize an instance of Person using your emotions dict from exercise 1.\n\n\nclass Person:\n \"\"\"A class representing a person\"\"\"\n\n def __init__(self, name, emotions):\n self.name = name\n self.emotions = emotions\n\n def __str__(self):\n return \"Person: {} Person's Emotions: {}\".format(self.name, self.emotions)\n\n def how_do_we_feel(self): #instance method -\n for emotions, degree in self.emotions.items(): #for emotions we want a degree of them ranging 1-3 or low to high\n converted_degree = None # value of converted_degree starts at 'none' but still will receive value later\n if degree == 1:\n converted_degree == 'low'\n elif degree == 2:\n converted_degree == 'average'\n elif degree == 3:\n converted_degree == 'high'\n print(\"{} feel {} level of {}\".format(converted_degree, emotions))\n\nMike = Person('Mike', emotions_dictionary)\nprint(Mike)\n\n\n#Add an instance method to your class that displays a message describing how the\n#erson is feeling. Substitute the words \"high\", \"medium\", and \"low\" for the emotion levels 1, 2, and 3.\n","sub_path":"Feb19Fundamentals.py","file_name":"Feb19Fundamentals.py","file_ext":"py","file_size_in_byte":1481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"199657552","text":"from pyspark import SparkContext\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql import functions as F\nfrom pyspark.sql import types as T\nfrom datetime import datetime, timedelta\nimport json\nimport numpy as np\nimport sys\n\ndef main(sc, spark):\n '''\n Transfer our code from the notebook here, however, remember to replace\n the file paths with the ones provided in the problem description.\n '''\n dfPlaces = spark.read.csv('/data/share/bdm/core-places-nyc.csv', header=True, escape='\"')\n dfPattern = spark.read.csv('/data/share/bdm/weekly-patterns-nyc-2019-2020/*', header=True, escape='\"')\n #dfPlaces = spark.read.csv('core-places-nyc.csv', header=True, escape='\"')\n #dfPattern = spark.read.csv('weekly-patterns-nyc-2019-2020-sample.csv', header=True, escape='\"')\n OUTPUT_PREFIX = sys.argv[1]\n CAT_CODES = {'445210', '722515', '445299', '445120', '452210', '311811', '722410', '722511', '445220', '445292', '445110', '445291', '445230', '446191', '446110', '722513', '452311'}\n CAT_GROUP = {'452311': 0, '452210': 0, '445120': 1, '722410': 2, '722511': 3, '722513': 4, '446191': 5, '446110': 5, '722515': 6, '311811': 6, '445299': 7, '445220': 7, '445292': 7, '445291': 7, '445230': 7, '445210': 7, '445110': 8}\n dfD = dfPlaces.filter(dfPlaces.naics_code.isin(CAT_CODES)).select(\"placekey\", \"naics_code\")\n udfToGroup = F.udf(lambda x: CAT_GROUP[x])\n\n dfE = dfD.withColumn('group', udfToGroup('naics_code'))\n dfF = dfE.drop('naics_code').cache()\n groupCount = dict(dfF.groupBy(\"group\").count().collect())\n\n def expandVisits(date_range_start, visits_by_day):\n visits_by_day = json.loads(visits_by_day)\n for i in range(len(visits_by_day)):\n overall_date = (datetime.strptime(date_range_start[:10], \"%Y-%m-%d\")+ timedelta(days=i))\n if overall_date.year != 2018:\n yield overall_date.year, overall_date.strftime('%Y-%m-%d')[5:10], visits_by_day[i]\n\n visitType = T.StructType([T.StructField('year', T.IntegerType()),\n T.StructField('date', T.StringType()),\n T.StructField('visits', T.IntegerType())])\n\n udfExpand = F.udf(expandVisits, T.ArrayType(visitType))\n\n dfH = dfPattern.join(dfF, 'placekey') \\\n .withColumn('expanded', F.explode(udfExpand('date_range_start', 'visits_by_day'))) \\\n .select('group', 'expanded.*')\n\n def computeStats(group, visits):\n add_zeros = groupCount[group] - len(visits)\n visits.extend([0]*add_zeros)\n median = np.median(visits) #get the median of values in a list in each row\n if median - np.std(visits) >=0:\n return (int(median), int(median - np.std(visits)), int(median + np.std(visits)))\n else:\n return (int(median), 0, int(median + np.std(visits)))\n\n\n statsType = T.StructType([T.StructField('median', T.IntegerType()),\n T.StructField('low', T.IntegerType()),\n T.StructField('high', T.IntegerType())])\n\n udfComputeStats = F.udf(computeStats, statsType)\n\n dfI = dfH.groupBy('group', 'year', 'date') \\\n .agg(F.collect_list('visits').alias('visits')) \\\n .withColumn('stats', udfComputeStats('group', 'visits'))\n\n dfJ = dfI \\\n .select('group', 'year', F.concat(F.lit(\"2020-\"), F.col('date')).alias('date'), 'stats.*')\\\n .sort('group', 'year', 'date') \\\n .cache()\n\n group_names = {'big_box_grocers': 0,\n 'convenience_stores': 1,\n 'drinking_places': 2,\n 'full_service_restaurants': 3,\n 'limited_service_restaurants': 4,\n 'pharmacies_and_drug_stores': 5,\n 'snack_and_retail_bakeries': 6,\n 'specialty_food_stores': 7,\n 'supermarkets_except_convenience_stores': 8}\n\n for filename,number in group_names.items():\n dfJ.filter('group='+str(number)) \\\n .drop('group').coalesce(1).write.csv(OUTPUT_PREFIX+'/'+filename,\n mode='overwrite', header=True)\n\n #!ls /content/ | grep -Ev \".csv|sample_data\"\n\nif __name__=='__main__':\n sc = SparkContext()\n spark = SparkSession(sc)\n main(sc, spark)\n","sub_path":"BDM_HW4_Ng.py","file_name":"BDM_HW4_Ng.py","file_ext":"py","file_size_in_byte":3975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"141276410","text":"import tornado.httpclient\nimport json\n\n# Test register\n\n\ndef service_call(URL, service_point, post_data):\n http_client = tornado.httpclient.HTTPClient()\n try: \n print(json.dumps(post_data))\n \n request = tornado.httpclient.HTTPRequest(URL + \"/\" + service_point, \n method='POST',\n body=json.dumps(post_data))\n response = http_client.fetch(request)\n return json.loads(str(response.body, encoding='utf-8'))\n \n except tornado.httpclient.HTTPError as e:\n # HTTPError is raised for non-200 responses; the response\n # can be found in e.response.\n print(\"Error: \" + str(e) + \" \" + str(e.response))\n except Exception as e:\n # Other errors are possible, such as IOError.\n print(\"Error: \" + str(e))\n \n http_client.close()\n\n\nURL = \"http://localhost:9110\"\n\n\"\"\"\nresponse = service_call(URL, \"register\", {'username' : 'yusuf', 'email': 'rer@fdfd.com', 'password': 'pass'})\n\nif response['status'] != 'success':\n print response['error']\n\n\"\"\"\n\nchannelName = 'nearbyTweetChannel'\nbrokerName = 'brokerF'\n\nresponse = service_call(URL, \"notifybroker\", {\n 'dataverseName': 'channels',\n 'channelName': channelName,\n 'channelExecutionTime': \"2016-09-20T13:29:06.390Z\",\n 'subscriptionIds': ['c0b62d66-e4b3-c2bd-73c1-4151a7e94429']\n})\n\nprint(json.dumps(response))\n\n","sub_path":"WebWithBroker/BADAsterixBackend.py","file_name":"BADAsterixBackend.py","file_ext":"py","file_size_in_byte":1432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"645801585","text":"x = open(\"file.txt\",\"r\")\na = x.read()\nb = x.readlines()\nc = x.readlines()\nprint(a)\nprint(b)\nprint(c)\nfor line in x.readline(): # read lines\n print (line)\nx.close()\n","sub_path":"disk2.py","file_name":"disk2.py","file_ext":"py","file_size_in_byte":169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"61556042","text":"import xmlrpc.client as xmlrpclib\nimport logging\n_logger = logging.getLogger(__name__)\n\ndef connection(host, port, https_on):\n _logger.debug(\"Creating object_facade\")\n if https_on:\n url_template = \"https://%s/xmlrpc/%s\"\n object_facade = xmlrpclib.ServerProxy(url_template % (\n host, 'object'))\n else:\n url_template = \"http://%s:%s/xmlrpc/%s\"\n object_facade = xmlrpclib.ServerProxy(url_template % (\n host, port, 'object'))\n return object_facade\n\n\ndef authenticate_connection(host, port, user, user_pw, database, https_on):\n _logger.debug(\"Validating Connection to Odoo via XMLRPC\")\n if https_on:\n url_template = \"https://%s/xmlrpc/%s\"\n login_facade = xmlrpclib.ServerProxy(url_template % (\n host, 'common'))\n else:\n url_template = \"http://%s:%s/xmlrpc/%s\"\n login_facade = xmlrpclib.ServerProxy(url_template % (\n host, port, 'common'))\n try:\n user_id = login_facade.login(database, user, user_pw)\n if user_id:\n _logger.debug(\"Odoo Connection succed on XMLRPC user %s\", str(user_id))\n return user_id\n except Exception:\n _logger.debug(\"Odoo Connection can't return user_id\")\n return False","sub_path":"lib/odoo_xmlrpc.py","file_name":"odoo_xmlrpc.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"565913658","text":"def topKFrequent(words, k):\n count = {}\n for w in words:\n count[w] = count.get(w, 0) + 1\n \n # -count[k] will reverse the order\n sort = sorted(count, key=lambda k: (-count[k], k))\n \n return sort[:k]\n\n\n# this method is not giving answer in alphabatical order\ndef topKFrequent1(words, k):\n count = {}\n for w in words:\n count[w] = count.get(w, 0) + 1\n \n sort = sorted(count, key=lambda k:count[k], reverse=True)\n \n return sort[:k]\n\n\nprint(topKFrequent1([\"i\", \"love\", \"leetcode\", \"i\", \"love\", \"coding\"], 2))\nprint(topKFrequent1([\"i\", \"love\", \"leetcode\", \"i\", \"love\", \"coding\"], 3))","sub_path":"TopKFrequentWords.py","file_name":"TopKFrequentWords.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"594837674","text":"'''\n* 리펙토링\n - 기능은 동일하나 최적의 코드로 변환 및 개선\n - \nstep01 단계 확장하기\n- 향후 완결 로직 : main page에서 소개하는 모든 책들의 상세 page의 목차 스크래핑\n- 권장 구조\n 1. 로직별 개별 함수로 개발 권장\n 2. 다수의 page를 스크래핑 해야 하는 관계로\n - 연속적인 크롤링 따라서 세션이라는 객체가 필요\n\n- 사고력 기르기\n 1. 연속적인 다수의 page 크롤링시 세션을 사용하는 사유?\n - 연결 유지\n - 서버의 리소스 자원을 절약할 수 있게 하는 매너있는 자세\n\n- 주요 API\n lxml.html : tree 기반의 html 문서를 다룰수 있게 지원해주는 api\n\n- 개발 방식\n 1. process\n 세��� 객체 생성 -> 크롤링 -> lxml을 사용하여 html에서 데이터 추출\n 2. 코드\n requests.Session()\n get()\n lxml.html.fromstring()\n\n- 용어정리\n1. 세션\n - 로그인~로그아웃 할때까지 어떤 user인지 구분, 관리 - 상태유지\n - 상태 유지 기술\n - 세션 처리를 가장 예민 하게 관리 : 단연코 금융\n2. \n'''\n\nimport requests\nimport lxml.html\nimport re\nimport time\n\ndef main():\n session = requests.Session()\n response = session.get(\"http://www.hanbit.co.kr/store/books/new_book_list.html\")\n urls = scrape_list_page(response)\n\n for url in urls:\n time.sleep(1)\n response = session.get(url)\n bookInfo = scrape_detail_page(response)\n print(bookInfo)\n break\n\ndef scrape_list_page(response):\n root = lxml.html.fromstring(response.content)\n root.make_links_absolute(response.url)\n for a in root.cssselect('.view_box .book_tit a'):\n url = a.get('href')\n yield url\n\n\n# 상세 page에서 목차 정보 스크래핑 하는 함수\n'''\n 상세 정보는 dict 구조로 가공하기\n key - url, title, price, content(목차)\n value - cssselect()\n 이 함수의 반환타입은 list\n'''\ndef scrape_detail_page(response):\n root = lxml.html.fromstring(response.content)\n bookInfo = {\n 'url' : response.url,\n 'title' : root.cssselect(\".store_product_info_box h3\")[0].text_content(),\n 'price' :root.cssselect(\".pbr strong\")[0].text_content(),\n 'content' : [normalize_space(p.text_content()) for p in root.cssselect(\"#tabs_3 .hanbit_edit_view p\") if normalize_space(p.text_content()) != '']\n }\n return bookInfo\ndef scrape_detail_page2(response):\n root = lxml.html.fromstring(response.content)\n #price = root.cssselect('#container > div.store_view_wrap > div.store_payment_area > fieldset > label.payment_box.curr > p:nth-child(2) > span.pbr > strong')[0].text_content()\n price = root.cssselect(\".pbr del\")[0].text_content()\n print(price)\n\n content = root.cssselect(\"#tabs_3 .hanbit_edit_view p\")\n print(len(content))\n print(content[0].text_content())\n print(content[-1].text_content())\n contents = [normalize_space(p.text_content()) for p in root.cssselect(\"#tabs_3 .hanbit_edit_view p\") if normalize_space(p.text_content()) != '']\n print(contents)\n'''\n1. 문자열 앞뒤의 잉여 여백제거 : strip()\n2. '' 무의미한 문자열 데이터 제거 \n : if 조건식으로 필터링\n if(p.text_content() != '')\n3. 문자열들 사이에 다수의 의미없는 여백이 존재할 경우 하나의 여백으로 가공\n : re.sub( 수정하고자하는 표현식, 변경할 표현식, 데이터)\n re.sub('\\s+', ' ', 데이터)\n\n'''\ndef normalize_space(s):\n return re.sub(r'\\s+', ' ', s).strip()\nif __name__ == \"__main__\":\n main()\n","sub_path":"03.crawler/step02crawler.py","file_name":"step02crawler.py","file_ext":"py","file_size_in_byte":3656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"243711705","text":"# Import packages\nimport argparse\nimport numpy as np\nimport torch\nfrom torch import nn, optim\nimport torchvision\nfrom torchvision import datasets, transforms, models\nimport time\nimport copy\nfrom collections import OrderedDict\n\ndef arg_parser():\n parser = argparse.ArgumentParser(description='Training Skript for a Deep Neural Network for Image Classification')\n \n parser.add_argument('data_dir', type=str, help=\"data directory (required)\")\n\t\n parser.add_argument('--save_dir', default='', type=str, help='directory to save the model checkpoint')\n parser.add_argument('--arch', choices=['vgg16', 'densenet121'], default='vgg16', type=str, help='model architecture, options: vgg16, densenet121')\n parser.add_argument('--learning_rate', default=0.001, type=float, help='learning rate')\n parser.add_argument('--hidden_units', default=4096, type=int, help='number of hidden layers')\n parser.add_argument('--epochs', default=5, type=int, help='number of epochs for training')\n parser.add_argument('--gpu', default=False, action='store_true', help='activate GPU support for training')\n\n args = parser.parse_args()\n return args\n\n\n# load data\ndef load_training_data(data_dir):\n train_dir = data_dir + '/train'\n valid_dir = data_dir + '/valid'\n\n # TODO: Define your transforms for the training, validation, and testing sets\n # dictionaryy forms based on: https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html#load-data\n data_transforms = {\n 'train': transforms.Compose([\n transforms.RandomResizedCrop(224),\n transforms.RandomRotation(30),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n 'validation': transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\n ]),\n }\n\n # creating a dictionary containing all data folder\n dir_dict = {'train':train_dir,\n 'validation': valid_dir}\n\n # TODO: Load the datasets with ImageFolder\n image_datasets = {x: datasets.ImageFolder(dir_dict[x], data_transforms[x])\n for x in ['train', 'validation']}\n \n class_to_idx = image_datasets['train'].class_to_idx\n\n print('image dataset loaded.')\n return image_datasets, class_to_idx\n\n\n# build model\ndef build_model(arch, nof_hidden_layer, learning_rate):\n # select pretrained model\n if(arch=='vgg16'):\n model = models.vgg16(pretrained=True) \n nof_input_layer = 25088\n elif(arch=='densenet121'):\n model = models.densenet121(pretrained=True) \n nof_input_layer = 1024\n if(nof_hidden_layer>1000):\n print('max. 1000 hidden layer allowed for densenet121. number of hidden layer reduced to: 512')\n nof_hidden_layer=512\n else:\n print('no valid model arch selected... will use vgg16.')\n model = models.vgg16(pretrained=True) \n nof_input_layer = 25088\n \n # Freeze feature parameters\n for param in model.parameters():\n param.requires_grad = False\n \n # Build classifier\n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(nof_input_layer, nof_hidden_layer)),\n ('relu1', nn.ReLU()),\n ('drop1', nn.Dropout(0.4)),\n ('fc2', nn.Linear(nof_hidden_layer, 102)),\n ('out', nn.LogSoftmax(dim=1))]))\n\n model.classifier = classifier\n\n # select criterion\n criterion = nn.NLLLoss()\n\n # Only train the classifier parameters, the feature parameters are frozen\n optimizer = optim.Adam(model.classifier.parameters(), lr=learning_rate) \n \n print('model {} built with {} hidden layer and a learning rate of {}.'.format(arch,nof_hidden_layer,learning_rate))\n return model, criterion, optimizer\n\n \n# define function for training the model\n# based on https://pytorch.org/tutorials/beginner/transfer_learning_tutorial.html#load-data\ndef train_model(model, image_datasets, criterion, optimizer, device, nof_epochs):\n # TODO: Using the image datasets and the trainforms, define the dataloaders\n dataloaders = {x: torch.utils.data.DataLoader(image_datasets[x], batch_size=64, shuffle=True) \n for x in ['train', 'validation']}\n \n # get dataset size\n dataset_sizes = {x: len(image_datasets[x]) for x in ['train', 'validation']}\n # save start time\n time_epoch = []\n time_epoch.append(time.time())\n \n # move model parameter to \"device\" (GPU if available)\n model.to(device);\n \n # initialize variables for the best model\n best_model_wts = copy.deepcopy(model.state_dict())\n best_acc = 0.0\n\n print('Start training of the model...')\n print()\n for epoch in range(nof_epochs):\n print('Epoch {}/{}'.format(epoch + 1, nof_epochs))\n print('-' * 10)\n\n # each epoch has a training and validation phase\n for phase in ['train', 'validation']:\n if phase == 'train':\n # set model mode to training (dropouts activated)\n model.train()\n else:\n # change model mode to evaluation (no dropouts)\n model.eval()\n\n # initialize and reset variables\n running_loss = 0.0\n running_corrects = 0\n\n # iterate over data from dataloader\n for inputs, labels in dataloaders[phase]:\n # move input and label tensors to the default device\n inputs, labels = inputs.to(device), labels.to(device)\n\n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n with torch.set_grad_enabled(phase == 'train'):\n outputs = model(inputs)\n _, preds = torch.max(outputs, 1)\n loss = criterion(outputs, labels)\n\n # in training mode: perform backward + optimize\n if phase == 'train':\n loss.backward()\n optimizer.step()\n\n # perform some statistics (calculate loss + correct)\n running_loss += loss.item() * inputs.size(0)\n running_corrects += torch.sum(preds == labels.data)\n\n # calculate epoch loss\n epoch_loss = running_loss / dataset_sizes[phase]\n # calculate epoch accuracy\n epoch_acc = running_corrects.double() / dataset_sizes[phase]\n\n # print losses and accuracy per phase and epoch\n print('{} loss: {:.4f} Acc: {:.4f}'.format(phase, epoch_loss, epoch_acc))\n\n # deep copy the model\n if phase == 'validation' and epoch_acc > best_acc:\n best_acc = epoch_acc\n best_model_wts = copy.deepcopy(model.state_dict())\n \n # print timing results for epoch\n time_epoch.append(time.time() - time_epoch[epoch])\n print('epoch duration: {:.0f}m {:.0f}s'.format(time_epoch[epoch+1] // 60, time_epoch[epoch+1] % 60))\n print()\n\n # print timing results\n time_elapsed = time.time() - time_epoch[0]\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n # print best accuracy result\n print('Best valid Acc: {:.4f}'.format(best_acc))\n\n # load best model weights\n model.load_state_dict(best_model_wts)\n return model\n\n# define function to save checkpoint\ndef save_checkpoint(model, arch, filepath=''):\n # check model architecture\n if(arch=='vgg16'):\n nof_input_layer = 25088\n elif(arch=='densenet121'):\n nof_input_layer = 1024\n else:\n nof_input_layer = 25088\n \n # create full filname\n filename = filepath + arch + '_checkpoint.pth'\n \n # parameters\n checkpoint_state = {'model_name':arch,\n 'input_size': nof_input_layer,\n 'output_size': 102,\n 'model_state_dict': model.state_dict(),\n 'class_to_idx' : model.class_to_idx,\n 'classifier': model.classifier}\n # save\n torch.save(checkpoint_state, filename)\n \n print('model checkpoint saved to file: {}'.format(filename))\n\ndef main():\n # get input args\n args = arg_parser()\n # Check if GPU is available\n if args.gpu:\n if torch.cuda.is_available():\n device = torch.device(\"cuda\")\n print('calculation will be performe on: {}'.format(device))\n else:\n device = torch.device(\"cpu\")\n print('no GPU available. calculation will be performe on: {}'.format(device))\n else:\n device = torch.device(\"cpu\")\n print('calculation will be performe on: {}'.format(device))\n \n # load image files\n image_datasets, class_to_idx = load_training_data(args.data_dir)\n # build model\n model, criterion, optimizer = build_model(args.arch, args.hidden_units, args.learning_rate)\n model.class_to_idx = class_to_idx\n # train the model\n model = train_model(model, image_datasets, criterion, optimizer, device, args.epochs)\n # save model checkpoint\n save_checkpoint(model, args.arch, args.save_dir)\n\nif __name__ == \"__main__\":\n main()","sub_path":"3_image_classifier/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":9461,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"218580440","text":"import heapq\n\n\nclass Solution(object):\n def lastStoneWeight(self, stones):\n \"\"\"\n :type stones: List[int]\n :rtype: int\n \"\"\"\n stones = [-s for s in stones]\n heapq.heapify(stones)\n while len(stones) > 1:\n a = heapq.heappop(stones)\n b = heapq.heappop(stones)\n if a != b:\n heapq.heappush(stones, -abs(abs(a) - abs(b)))\n print(stones)\n return 0 if not stones else abs(stones[0])\n\n","sub_path":"src2/last-stone-weight/s.py","file_name":"s.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"290824222","text":"import numpy as np\n\nfrom josephson_circuit import *\nfrom static_problem import *\n\nimport matplotlib\nimport matplotlib.pyplot as plt\n\nmatplotlib.use(\"TkAgg\")\n\n\"\"\"\nEXAMPLE 3: Vortex with inductance: Flux quantization\n\n\"\"\"\n\n# Square array with screening\nN = 12\narray = SquareArray(N, N)\nLlist = [0.01, 0.1, 1, 10]\nc = (N-1)/2\nf = 0 # frustration\nn = np.zeros(array.face_count(), dtype=int) # target vortex configuration\ncentre_face_idx = array.locate_faces(c, c) # locating face idx at coordinate x=(N-1)/2, y=(N-1)/2\nn[centre_face_idx] = 1\n\nfor i, L in enumerate(Llist):\n array.set_inductance_factors(L)\n out, _, _ = StaticProblem(array, frustration=f, vortex_configuration=n).compute()\n flux = out.get_flux()\n flux_center = np.round(flux[array.locate_faces(c, c)], 4)\n flux_sum = np.round(np.sum(flux), 4)\n out.plot(show_face_quantity=True, face_quantity=\"flux\", face_quantity_logarithmic_colors=True,\n face_quantity_clim=[1E-10, 1], title=f\"beta_L={L}, flux centre {flux_center}, sum flux {flux_sum}\", arrow_color=[1, 1, 1])\n\n\nN = 30\narray = SquareArray(N, N)\nLlist = [0.01, 0.1, 1, 10]\nc = (N-1)/2\nf = 0 # frustration\nn = np.zeros(array.face_count(), dtype=int) # target vortex configuration\ncentre_face_idx = array.locate_faces(c, c) # locating face idx at coordinate x=(N-1)/2, y=(N-1)/2\nn[centre_face_idx] = 1\n\n\nLlist = 10 ** np.linspace(-2, 2, 21)\nPhi = np.zeros(len(Llist))\nfor i, L in enumerate(Llist):\n array.set_inductance_factors(L)\n out, _, _ = StaticProblem(array, vortex_configuration=n).compute()\n Phi[i] = np.sum(out.get_flux())\nplt.subplots()\nplt.semilogx(Llist, Phi, marker=\"o\", label=\"total flux\")\nplt.semilogx(Llist, Llist / (1 + Llist), label=\"beta_L / (1 + beta_L)\")\nplt.xlabel(\"beta_L\")\nplt.ylabel(\"total flux\")\n\nplt.legend()\nplt.show()\n","sub_path":"examples/daan_answer.py","file_name":"daan_answer.py","file_ext":"py","file_size_in_byte":1804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"460950447","text":"\"\"\"\r\nMake extinction plots\r\nSam Geen, September 2020\r\n\"\"\"\r\n\r\nfrom startup import *\r\nimport extinctionplots, snaptime\r\n\r\nfrom pymses.utils import constants as C \r\n\r\nfrom scipy import interpolate\r\n\r\nAVtext = \"A$_{\\mathrm{V}}$\"\r\n\r\ndef plotagevisiblevsstellarmass(simnames,extinctionlimit=1,scalewithlifetime=False,luminositylimit=None):\r\n '''\r\n Plot the stellar age the star is visible versus its mass\r\n '''\r\n plt.clf()\r\n for simname in simnames:\r\n sim = hamusims[simname]\r\n simsnaps = {snap.OutputNumber():snap for snap in sim.Snapshots()}\r\n simtimes = {snap.OutputNumber():snaptime.Myr(snap) for snap in simsnaps.values()}\r\n # Get the stellar object tracks\r\n tracks, isnaps = extinctionplots.extinctionbyparticle(simname,wholesink=False)\r\n masses = tracks.keys()\r\n # Get ages each stellar object is visible at\r\n visibleages = []\r\n visiblemasses = []\r\n tcreateds = []\r\n # TODO:\r\n # - Get birth time of each star\r\n # - Turn each track into an interp1d function of stellar age vs extinction\r\n # - Find age where extinction == extinctionlimit\r\n # - Plot ages vs masses\r\n for mass in masses:\r\n # TODO: turn into probability plot based on los\r\n extinctions = np.array(tracks[mass][:,50])\r\n extinctions = NHtoAv(extinctions)\r\n snapnums = np.array(isnaps[mass])+1\r\n firstsnap = simsnaps[snapnums[0]]\r\n stellar = stellars.FindStellar(firstsnap)\r\n whichstar = np.where(stellar.mass == mass)\r\n tcreated = stellar.tcreated[whichstar][0]\r\n lifetime = stellar.lifetime[whichstar][0]\r\n ages = np.array([simtimes[num] for num in snapnums]) - tcreated\r\n # Get luminosities in the V band\r\n LVbands = np.array([singlestar.star_bandenergies(mass, ageins, 1.0) for ageins in ages*Myrins]).flatten()\r\n # Get extincted LV values\r\n LVextincted = np.array([L * 10.0**(-extinction/2.5) for L, extinction in zip(LVbands, extinctions)]).flatten()\r\n # Interpolate extinction track\r\n if len(ages) > 1:\r\n #agefunc = interpolate.interp1d(extinctions, ages)\r\n if extinctionlimit is not None:\r\n if extinctionlimit > extinctions.max():\r\n visibleage = 0.0\r\n elif extinctionlimit < extinctions.min():\r\n visibleage = lifetime\r\n else:\r\n visibleage = ages[np.where(extinctions < extinctionlimit)].min()\r\n if luminositylimit is not None:\r\n if luminositylimit > LVextincted.max():\r\n visibleage = lifetime\r\n elif luminositylimit < LVextincted.min():\r\n visibleage = 0.0\r\n else:\r\n mask = np.where(LVextincted > luminositylimit)\r\n #print(len(LVextincted),len(ages),len(mask),mask)\r\n #import pdb; pdb.set_trace()\r\n visibleage = ages[mask].min()\r\n if scalewithlifetime:\r\n visibleage /= lifetime\r\n visibleages.append(visibleage)\r\n visiblemasses.append(mass)\r\n tcreateds.append(tcreated)\r\n tcreateds = np.array(tcreateds)\r\n tcreateds -= tcreateds.min()\r\n tcreateds /= tcreateds.max()\r\n plt.scatter(visiblemasses, visibleages,c=tcreateds,cmap=\"copper\")\r\n plt.xlabel(\"Stellar Mass / Msun\")\r\n if scalewithlifetime:\r\n plt.ylabel(\"Fraction of star's age until it is visible\")\r\n else:\r\n plt.ylabel(\"Age star becomes visible / Myr\")\r\n plt.xscale(\"log\")\r\n plt.yscale(\"log\")\r\n visibleages = np.array(visibleages)\r\n if scalewithlifetime:\r\n plt.ylim([visibleages[visibleages > 0.0].min(),1.0])\r\n else:\r\n plt.ylim([visibleages[visibleages > 0.0].min(),visibleages.max()])\r\n #if len(visibleages) > 0:\r\n # plt.ylim([np.array(visibleages).min(),1.0])\r\n limtxt = \"\"\r\n if extinctionlimit is not None:\r\n limtxt = \"extinctionlimit_\"+str(extinctionlimit)\r\n if luminositylimit is not None:\r\n limtxt = \"luminositylimit_\"+str(luminositylimit)\r\n lifetxt = \"\"\r\n if scalewithlifetime:\r\n lifetxt = \"_scaledwithlifetime\"\r\n plt.savefig(\"../plots/visibleages_\"+limtxt+lifetxt+\"_\"+simnames[0]+\"_allsims.pdf\",bbox_inches='tight')\r\n\r\nif __name__==\"__main__\":\r\n luminositylimit = 10**4.5 * 2e33 # (limit / Lsolar) = 4.5 guestimate from Schootemeijer et al 2020\r\n for simnames in [[\"128_LEGO\"]]:#[imfsims, icsims]:\r\n for scalewithlifetime in [True, False][::-1]:\r\n plotagevisiblevsstellarmass(simnames,luminositylimit=luminositylimit,scalewithlifetime=scalewithlifetime)\r\n #plotagevisiblevsstellarmass(simnames,1,False) \r\n #plotagevisiblevsstellarmass(simnames,1,True) \r\n\r\n","sub_path":"Extinction/scripts/stellarextinctions.py","file_name":"stellarextinctions.py","file_ext":"py","file_size_in_byte":5014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"337372967","text":"import abc\r\nimport numpy as np\r\nimport traceback\r\nfrom multiprocessing import Process, Pipe, current_process\r\n\r\nfrom marketenv import Wrapper\r\n\r\nclass Error(Exception):\r\n pass\r\n\r\nclass DeadProcessError(Error):\r\n pass\r\n\r\nclass CloudpickleWrapper(object):\r\n '''\r\n From OpenAI baselines\r\n Uses cloudpickle to serialize contents, otherwise multiprocessing \r\n tries to use pickle\r\n '''\r\n def __init__(self, x):\r\n self.x = x\r\n\r\n def __getstate__(self):\r\n import cloudpickle\r\n return cloudpickle.dumps(self.x)\r\n\r\n def __setstate__(self, ob):\r\n import pickle\r\n self.x = pickle.loads(ob)\r\n\r\ndef worker(remote, wrapped_env_fn):\r\n try:\r\n pid = current_process().pid\r\n env = wrapped_env_fn.x()\r\n remote.send((0, pid, None))\r\n except Exception as e:\r\n pid = current_process().pid\r\n remote.send((1, pid, traceback.format_exc()))\r\n remote.close()\r\n return\r\n try:\r\n while True:\r\n cmd, data = remote.recv()\r\n if cmd == 'step':\r\n obs, reward, done, info = env.step(data)\r\n if done:\r\n obs = env.reset()\r\n remote.send((0, pid, (obs, reward, done, info)))\r\n elif cmd == 'reset':\r\n obs = env.reset()\r\n remote.send((0, pid, obs))\r\n elif cmd == 'close':\r\n remote.close()\r\n break\r\n elif cmd == 'get_specs':\r\n try:\r\n # not in the standard gym environment\r\n env_spec = env.env_spec \r\n except AttributeError:\r\n env_spec = None\r\n remote.send((0, pid, (env.observation_space, \r\n env.action_space,\r\n env.reward_range,\r\n env_spec)))\r\n elif cmd == 'getattr':\r\n if hasattr(env, data):\r\n attr = getattr(env, data)\r\n if callable(attr):\r\n value = attr()\r\n else:\r\n value = attr\r\n remote.send((0, pid, value))\r\n else:\r\n raise AttributeError('Environment has no'\r\n + 'attribute \"{}\"'.format(data))\r\n else:\r\n raise NotImplementedError\r\n except Exception as e:\r\n remote.send((1, pid, traceback.format_exc()))\r\n remote.close()\r\n finally:\r\n env.close()\r\n \r\nclass ParallelEnvironment(object):\r\n '''\r\n Based on OpenAI baselines, simplified for the market environment(s) \r\n used here\r\n '''\r\n def __init__(self, env_fns):\r\n self._env_fns = env_fns\r\n self._n_envs = len(env_fns)\r\n \r\n self._closed = False\r\n self._waiting = False\r\n \r\n # env remotes are kept in the main process\r\n # process remotes are send to the subprocesses\r\n self._env_remotes, self._process_remotes = zip(*[Pipe() for _ in \r\n range(self._n_envs)])\r\n \r\n self._ps = [Process(target = worker, \r\n args = (process_remote, CloudpickleWrapper(env_fn)))\r\n for (process_remote, env_fn) \r\n in zip(self._process_remotes, self._env_fns)]\r\n for process in self._ps:\r\n # if the main process crashes, we should not cause things to hang\r\n process.daemon = True \r\n process.start()\r\n \r\n for remote in self._env_remotes:\r\n _ = self._parse_response(remote.recv())\r\n \r\n self._env_remotes[0].send(('get_specs', None))\r\n o, a, r, s = self._parse_response(self._env_remotes[0].recv())\r\n self.observation_space = o\r\n self.action_space = a\r\n self.reward_range = r\r\n self.env_spec = s\r\n \r\n @property\r\n def closed(self):\r\n return self._closed\r\n \r\n def reset(self):\r\n self._assert_is_ready()\r\n \r\n for remote in self._env_remotes:\r\n remote.send(('reset', None))\r\n self._waiting = True\r\n \r\n obs = np.stack([self._parse_response(remote.recv()) \r\n for remote in self._env_remotes])\r\n self._waiting = False\r\n \r\n return obs\r\n \r\n def step(self, actions):\r\n self._assert_is_ready()\r\n \r\n for action, remote in zip(actions, self._env_remotes):\r\n remote.send(('step', action))\r\n self._waiting = True\r\n \r\n results = [self._parse_response(remote.recv()) \r\n for remote in self._env_remotes]\r\n self._waiting = False\r\n \r\n obs, reward, terminal, info = zip(*results)\r\n obs = np.stack(obs)\r\n reward = np.asarray(reward)\r\n terminal = np.asarray(terminal)\r\n info = list(info)\r\n \r\n return obs, reward, terminal, info\r\n \r\n def close(self):\r\n if self._closed:\r\n return\r\n \r\n self._closed = True\r\n if self._waiting:\r\n for remote in self._env_remotes:\r\n remote.recv()\r\n \r\n for remote in self._env_remotes:\r\n remote.send(('close', None))\r\n remote.close()\r\n \r\n for process in self._ps:\r\n process.join()\r\n \r\n def get_env_attr(self, attr):\r\n self._assert_is_ready()\r\n \r\n for remote in self._env_remotes:\r\n remote.send(('getattr', attr))\r\n self._waiting = True\r\n \r\n results = [self._parse_response(remote.recv()) \r\n for remote in self._env_remotes]\r\n \r\n self._waiting = False\r\n \r\n return results\r\n \r\n def _assert_is_ready(self):\r\n assert (not self._closed), 'Environment is closed'\r\n assert (not self._waiting), 'Waiting for responses from subprocesses'\r\n \r\n def _parse_response(self, response):\r\n if response[0] == 0:\r\n return response[2]\r\n else:\r\n for remote in self._env_remotes:\r\n remote.send(('close', None))\r\n remote.close()\r\n \r\n for process in self._ps:\r\n process.join()\r\n \r\n self._closed = True\r\n raise DeadProcessError('Error in child process'\r\n + '{}'.format(response[1])\r\n + ':\\n' + response[2])\r\n \r\n def __enter__(self):\r\n return self\r\n\r\n def __exit__(self, *args):\r\n self.close()\r\n \r\nclass VectorEnvWrapper(Wrapper):\r\n def __init__(self, env):\r\n super().__init__(env)\r\n \r\n def step(self, action):\r\n action = np.asarray(action).squeeze(0)\r\n obs, reward, terminal, info = self.env.step(action)\r\n if terminal:\r\n obs = self.env.reset()\r\n return np.expand_dims(obs, axis = 0), [reward], [terminal], [info]\r\n \r\n def reset(self):\r\n obs = self.env.reset()\r\n return np.expand_dims(obs, axis = 0)","sub_path":"simplerl/agents/common/parallel_envs.py","file_name":"parallel_envs.py","file_ext":"py","file_size_in_byte":7292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"97351080","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n 程式說明請參閱18-24頁\n\"\"\"\n\nimport machine, time, gc\nimport ubinascii\nimport ujson\nfrom umqtt.simple import MQTTClient\nfrom machine import Pin\n\nled = Pin(2, Pin.OUT, value=1)\n\nconfig = {\n 'broker':'io.adafruit.com',\n 'user':'你的帳號',\n 'key':'你的AIO KEY',\n 'id' : 'room/' + ubinascii.hexlify(machine.unique_id()).decode(),\n 'topic' : b'poushen/feeds/led'\n}\n\ndef subCallback(topic, msg):\n #obj = ujson.loads(msg)\n value = msg.decode()\n print('led:', value)\n led.value(0) if value.upper() == 'ON' else led.value(1)\n\ndef main():\n client = MQTTClient(client_id=config['id'], \n server=config['broker'],\n user=config['user'],\n password=config['key'])\n client.set_callback(subCallback)\n client.connect()\n client.subscribe(config['topic'])\n\n try:\n while True:\n client.check_msg()\n time.sleep(2)\n except KeyboardInterrupt:\n print('bye')\n except OSError as ex:\n print(ex.args[0])\n if ex.args[0] == -1:\n print('woooops')\n gc.collect()\n time.sleep(1)\n main()\n # ---\n finally:\n client.disconnect()\n","sub_path":"lesson_15/diy18_4_aio.py","file_name":"diy18_4_aio.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"361096190","text":" # -*- coding: utf-8 -*-\nfrom odoo.addons.importexcel.models.model_dict_folder.tool_tao_instance import get_key\nfrom odoo.osv import expression\nimport datetime\nfrom odoo import fields\nfrom odoo.exceptions import UserError\nfrom odoo.addons.downloadwizard.models.dl_models.dl_model import wrap_center_vert_border_style\nfrom odoo.tools.float_utils import float_compare, float_round\nfrom odoo.addons.importexcel.models.model_dict_folder.tool_tao_instance import BreakRowException\n\nCREATE_IN_M2X = False\ndef get_create_write_x2m (self,\n search_dict,\n write_dict ={},\n MD = {},\n exist_val=False,\n setting= {},\n check_file = False,\n is_search = True,\n is_write = True,\n# main_call = False,\n x2m_fields = False,\n needdata = None,\n f_name_call = None\n ):\n# x2m_fields = MD.get('x2m_fields')\n if x2m_fields:\n x2m_field = x2m_fields[0]\n x2m_values = search_dict[x2m_field]\n len_x2m_vals = len(x2m_values)\n# try:\n# len_x2m_vals = len(x2m_values)\n# except TypeError:\n# len_x2m_vals = 1\n else:\n len_x2m_vals = 1\n instance_build_noti_dict = {} \n for count_i, i in enumerate(range(0, len_x2m_vals)):\n if x2m_fields:\n x2m_obj = []\n x2m_searched_obj = []\n search_dict[x2m_field] = x2m_values[i] #\n obj, searched_obj, new_noti_dict = get_create_write(self, \n search_dict,\n write_dict =write_dict,\n MD = MD,\n exist_val=exist_val,\n setting = setting,\n check_file = check_file,\n is_search = is_search,\n is_write = is_write,\n needdata = needdata,\n f_name_call = f_name_call,\n is_x2m_fields = bool(x2m_fields)\n )\n \n for k,v in new_noti_dict.items():\n instance_build_noti_dict[k] = instance_build_noti_dict.get(k,0) + v\n if x2m_fields:\n x2m_obj.append(obj)\n x2m_searched_obj.append(searched_obj)\n if x2m_fields:\n obj = x2m_obj\n searched_obj = x2m_searched_obj\n \n if not check_file and not obj:\n raise UserError('not check_file and not obj')\n \n return obj, searched_obj, instance_build_noti_dict\n\n\ndef get_create_write(\n self,\n search_dict,\n write_dict ={},\n MD = {},\n exist_val= None,\n setting = {},\n check_file=False,\n is_search = True,\n is_write = True,\n# main_call = False,\n needdata = None, \n f_name_call = None,\n is_x2m_fields = False\n ):\n new_noti_dict = {} \n model_name = MD.get('model')\n empty_object = self.env[model_name]\n \n# is_search_default_when_not_check_file = False \n# is_search= check_file or (exist_val ==None or func_check_if_excel_is_same_existence)\n# is_write = not check_file and (exist_val ==None or st_is_allow_write_existence )\n \n if is_search:\n searched_obj = search_handle(self, MD, search_dict, model_name, setting, needdata)\n new_noti_dict['search']=1\n else:\n searched_obj = empty_object\n \n return_obj = exist_val if exist_val != None else searched_obj\n if return_obj and len(return_obj) > 1:\n if MD.get('get_first_one',False):\n return_obj = return_obj[0]\n else:\n try:\n mapped_name = return_obj.mapped('name')\n except:\n mapped_name = return_obj\n raise UserError (u'len_return_obj > 1, mapped_name: %s,model_name: %s,search_dict:%s '%(str(mapped_name),model_name, search_dict))\n \n if (exist_val ==None and return_obj == empty_object ) and not check_file:\n create_obj = create_handle(self, search_dict, write_dict, MD, model_name, f_name_call, is_x2m_fields)\n return_obj = create_obj\n new_noti_dict['create'] =1\n elif return_obj and is_write :\n write_handle(self, return_obj, MD, write_dict, new_noti_dict, f_name_call = f_name_call )\n return return_obj, searched_obj, new_noti_dict\n\ndef search_handle(self, model_dict, search_dict, model_name, setting, needdata):\n search_func = model_dict.get('search_func')\n if search_func:\n searched_obj = search_func(self, model_dict, setting, needdata)\n else:\n if search_dict :\n pass\n else:\n raise UserError(u'Không có search dict, model_name: %s-MD: %s'%(model_name, model_dict))\n if model_dict.get('inactive_include_search'):\n domain_not_active = ['|',('active','=',True),('active','=',False)]\n else:\n domain_not_active = []\n domain = []\n has_none_val_search_field = False\n for f_name in search_dict:\n field_attr = model_dict['fields'][f_name]\n val = search_dict[f_name]\n f_name = get_key(field_attr, 'transfer_name') or f_name\n operator_search = field_attr.get('operator_search','=')\n tuple_in = (f_name, operator_search, val)\n domain.append(tuple_in)\n if not has_none_val_search_field:\n domain = expression.AND([domain_not_active, domain])\n searched_obj = self.env[model_name].search(domain)\n return searched_obj\n \ndef create_handle(self, search_dict, write_dict, MD, model_name, f_name_call,is_x2m_fields):\n search_dict_new ={}\n context = MD.get('context',{})\n allow_create = MD.get('allow_create', True)\n if not allow_create:\n if getattr(self, 'BreakRowException_if_raise_allow_create'):\n raise BreakRowException(u'Model %s này với giá trị f_name_call:%s, name: \"%s\" chỉ được get chứ không được tạo'%(model_name, f_name_call, MD['fields']['name']['val']))\n else:\n raise UserError(u'Model %s này với giá trị f_name_call:%s, name: \"%s\" chỉ được get chứ không được tạo, hãy tạo tay hoặc chọn thuộc tính BreakRowException_if_raise_allow_create để bỏ qua thông báo này'%(model_name, f_name_call, MD['fields']['name']['val']))\n# search_dict.update(write_dict)\n write_dict.update(search_dict)\n for f_name,val in write_dict.items():\n field_attr = MD['fields'][f_name]\n f_name = get_key(field_attr, 'transfer_name') or f_name\n delete_when_return_x2m_create_if_false = field_attr.get('delete_when_return_x2m_create_if_false',False)\n if not delete_when_return_x2m_create_if_false:\n \n search_dict_new[f_name]=val\n if is_x2m_fields and not CREATE_IN_M2X:\n# for i in search_dict:\n# delete_when_return_x2m_create_if_false = MD.get('delete_when_return_x2m_create_if_false',False)\n# if delete_when_return_x2m_create_if_false:\n \n return search_dict_new\n \n \n created_object = self.env[model_name].sudo().with_context(**context).create(search_dict_new)\n return_obj = created_object\n return return_obj\n\n \ndef write_handle(self, return_obj, MD, write_dict, new_noti_dict, f_name_call=False ):\n write_dict_new = {}\n writed_object = return_obj\n for key_f_name, val in write_dict.items():\n field_MD= MD['fields'][key_f_name]\n if field_MD.get('val_goc') != False or field_MD.get('write_false', False):\n f_name = get_key(field_MD, 'transfer_name') or key_f_name\n is_write_this_field = field_MD['write_field']\n if is_write_this_field :\n orm_field_val = getattr(writed_object, f_name)\n func_in_write_handle = field_MD.get('func_in_write_handle')\n if func_in_write_handle:\n val = func_in_write_handle(orm_field_val,val)\n is_x2m = field_MD.get('x2m_fields', False)\n diff = check_diff_write_val_with_exist_obj(orm_field_val, val, field_MD, is_x2m)\n if diff:\n if is_write_this_field:\n write_dict_new[f_name] = val\n if write_dict_new:\n writed_object.write(write_dict_new)\n new_noti_dict['update'] = 1\n if f_name_call =='main_call' or MD.get('print_wite',False):\n print ('**write_dict_new:%s for obj:%s'%(write_dict_new, writed_object))\n else:#'not update'\n new_noti_dict['skipupdate'] = 1\n \n \n \n\n \n \ndef check_diff_write_val_with_exist_obj(orm_field_val, field_dict_val, field_MD, is_x2m):\n field_type = field_MD.get('field_type')\n is_write = False\n if is_x2m:\n pass\n# elif isinstance(orm_field_val, datetime.date):\n# converted_orm_val_to_dict_val = fields.Date.from_string(orm_field_val)\n# elif isinstance(orm_field_val, datetime.datetime):\n# converted_orm_val_to_dict_val = fields.Datetime.from_string(orm_field_val)\n else:\n try:\n converted_orm_val_to_dict_val = getattr(orm_field_val, 'id', orm_field_val)\n if converted_orm_val_to_dict_val == None: #recorderset.id ==None when recorder set = ()\n converted_orm_val_to_dict_val = False\n except:\n converted_orm_val_to_dict_val = orm_field_val\n \n if '2many' in field_type:\n is_write = True\n# is_write = False\n# if not all(field_MD['obj']):\n# is_write = True\n# else:\n# for field_dict_val_item in field_MD['obj']:\n# is_write_item = field_dict_val_item not in orm_field_val\n# is_write = is_write or is_write_item\n elif field_type=='float':\n is_write = float_compare(orm_field_val, field_dict_val, precision_rounding=0.01)# 1 la khac, 0 la giong\n else:\n is_write = converted_orm_val_to_dict_val != field_dict_val\n return is_write\n \n \n \n\n","sub_path":"importexcel/models/model_dict_folder/get_or_create_func.py","file_name":"get_or_create_func.py","file_ext":"py","file_size_in_byte":10760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"88320189","text":"from PIL import Image\nimport serial\nimport time\nimport struct\nimport os\n\nser1 = serial.Serial('COM3', 115200)\ntime.sleep(2)\n\nfor d in range (0,50):\n drc=sorted(os.listdir('.'))\n for f in drc:\n if f.endswith('.jpg'):\n im=Image.open(f)\n px=im.load()\n penis=0\n for x in range(0,8):\n print ('______')\n for y in range(0,8):\n if px[x,y]==(255,255,255):\n izlaz=0\n elif px[x,y]==(0,0,0):\n izlaz=1\n else:\n izlaz=0\n kurac=abs(y-7)\n penis+=(izlaz*2**kurac)\n ser1.write(struct.pack('>B', penis))\n penis=0\n time.sleep(0.01)\n","sub_path":"1. radna verzija -KOPIJA ZA DRKANJE/slike i python/test3.py","file_name":"test3.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"456218129","text":"#coding: utf-8\n'''\n1. 由 http://docutils.sourceforge.net/docs/user/rst/cheatsheet.txt 下載 rst 格式檔案\n2. 利用本程式轉為 html, 然後以 Chrome 開啟檢查\n3. 由 url 取得文件內容\nimport urllib.request\nurl = 'http://example.com/'\nresp = urllib.request.urlopen(url)\ndata = resp.read() # 傳回一個 'bytes' 物件\ntext = data.decode('utf-8') # 將 'bytes' 物件內容編碼成為 'string'\n參考資料: \nhttp://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3\n'''\n# 導入 os 模組\nimport os\n'''\n以下經由 urllib 向網路 URL 取得文件內容\n'''\n# 導入 urllib.request\nimport urllib.request\n# 導入用來處理 rst2html 的 publish_string\nfrom docutils.core import publish_string\nrst檔案2url = \"http://www.southampton.ac.uk/~fangohr/computing/rst/rst.txt\"\n# 將以下所要使用的 image.png 存檔\n影像檔案url = \"http://www.southampton.ac.uk/~fangohr/computing/rst/image.png\"\n# 下載上述檔案並且存為 image.png\nwith open(\"image.png\", 'wb') as out_file:\n result = urllib.request.urlopen(影像檔案url ).read() # 傳回 'bytes' 物件\n out_file.write(result)\n網址回覆 = urllib.request.urlopen(rst檔案2url)\n# 取得 url 所在主機的內容標頭檔案\nprint(urllib.request.urlopen(rst檔案2url).headers)\n# 取得 byte 物件\nbyte資料 = 網址回覆.read()\n# 解碼得到文件內容\n文件內容 = byte資料.decode('utf-8')\n# 列印出文件內容\n#print(文件內容)\n超文件內容 = publish_string(\n source=文件內容,\n writer_name='html',\n settings = None,\n settings_overrides={'output_encoding': 'unicode'}\n )\n# 將超文件內容存檔案\n檔案 = open(\"docutils_ex2.html\", \"w\")\n檔案.write(超文件內容)\n目前所在目錄 = os.getcwd()\nos.system(\"V:/tools/GoogleChromePortable/GoogleChromePortable.exe \"+目前所在目錄+\"/docutils_ex2.html\")\n# 關閉檔案\n檔案.close()\n'''\n# rst檔案1 = \"http://docutils.sourceforge.net/docs/user/rst/cheatsheet.txt\"\n# 執行 rst2html.py, rst 檔案為輸入, 而 html 檔案作為輸出\n# 將已經存在的 1.txt rst 格式檔案利用 rst2html.py 轉為 1.html\nos.system(\"V:/SciTE/python.exe V:/SciTE/scripts/rst2html.py V:/misc/rst_test/1.txt v:/misc/rst_test/1.html\")\n所產生的超文件 = \"v:/misc/rst_test/1.html\"\n# 以 Chrome 開啟所建立的 html 檔案\nos.system(\"V:/tools/GoogleChromePortable/GoogleChromePortable.exe \"+所產生的超文件)\n'''\n","sub_path":"2013spring/c2/docutils_ex2.py","file_name":"docutils_ex2.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"467449135","text":"import pygame as pg\r\nimport traceback\r\nimport datetime\r\nfrom os import path\r\n\r\nimport game\r\n\r\n\r\nif __name__ == '__main__':\r\n try:\r\n g = game.Game()\r\n g.run()\r\n except Exception:\r\n e = traceback.format_exc()\r\n print(e)\r\n # save the error in a text file\r\n with open(path.join('..', 'errors.txt'), 'a') as f:\r\n f.write(datetime.datetime.now().strftime('%d.%m.%Y %H:%M:%S')\r\n + '\\n')\r\n f.write(e + '\\n')\r\n pg.quit()\r\n","sub_path":"src/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"186074629","text":"import numpy as np\r\nimport cv2\r\nimport render_image\r\nimport pyautogui\r\nimport threading\r\nimport time\r\nimport Player\r\n\r\nsdThresh = 20\r\nfont = cv2.FONT_HERSHEY_SIMPLEX\r\ncontrols = [['Up', 'Left','Right'], ['w', 'a', 'd']]\r\nnumber_of_players = 2\r\n\r\ndef distMap(frame1, frame2):\r\n \"\"\"outputs pythagorean distance between two frames\"\"\"\r\n frame1_32 = np.float32(frame1)\r\n frame2_32 = np.float32(frame2)\r\n diff32 = frame1_32 - frame2_32\r\n norm32 = np.sqrt(diff32[:,:,0]**2 + diff32[:,:,1]**2 + diff32[:,:,2]**2)/np.sqrt(255**2 + 255**2 + 255**2)\r\n dist = np.uint8(norm32*255)\r\n return dist\r\n\r\n\r\ndef capture_frame(cap):\r\n _, frame = cap.read()\r\n return render_image.slice(frame, number_of_players)\r\n\r\n\r\ndef player_initial_frames(frame1, frame2, player):\r\n\r\n frame1s = render_image.slice(frame1, 3)\r\n\r\n frame2s = render_image.slice(frame2, 3)\r\n\r\n player.initial_frames = [(frame1s), (frame2s)]\r\n\r\n\r\n\r\ndef main():\r\n cv2.namedWindow('frame')\r\n cap = cv2.VideoCapture(0)\r\n\r\n frame1 = capture_frame(cap)\r\n frame2 = capture_frame(cap)\r\n\r\n players = []\r\n for i in range(number_of_players):\r\n players.append(Player.Player(1, controls[i][0], controls[i][1], controls[i][2], 'r', 0.5))\r\n # capture video stream from camera source. 0 refers to first camera, 1 referes to 2nd and so on.\r\n i = 0\r\n for player in players:\r\n player_initial_frames(frame1[i], frame2[i], player)\r\n player.always_forward()\r\n i += 1\r\n\r\n\r\n # maintains fixed forward speed\r\n\r\n\r\n while True:\r\n _, frame3_all = cap.read()\r\n frame3s = render_image.slice(frame3_all, number_of_players)\r\n i = 0\r\n for player in players:\r\n frame3 = frame3s[i]\r\n i += 1\r\n frames = render_image.slice(frame3, 3)\r\n frame_index = 0\r\n # states = {'right': False, 'center': False, 'left': False}\r\n for frame in frames:\r\n frame1 = player.initial_frames[0][frame_index]\r\n frame2 = player.initial_frames[1][frame_index]\r\n rows, cols, _ = np.shape(frame)\r\n dist = distMap(frame1, frame)\r\n\r\n frame1 = frame2\r\n frame2 = frame\r\n\r\n # apply Gaussian smoothing\r\n mod = cv2.GaussianBlur(dist, (9, 9), 0)\r\n\r\n # apply thresholding\r\n _, thresh = cv2.threshold(mod, 100, 255, 0)\r\n\r\n # calculate st dev test\r\n _, stDev = cv2.meanStdDev(mod)\r\n\r\n cv2.putText(frame2, 'SPLIT', (70, 70), font, 1.5, (255, 255, 255), 1, cv2.LINE_AA)\r\n if stDev > sdThresh:\r\n cv2.putText(frame2, 'SPLIT', (70, 70), font, 1.5, (0, 0, 100), 1, cv2.LINE_AA)\r\n if frame_index == 0:\r\n player.states['right'] = True\r\n if frame_index == 1:\r\n player.states['center'] = True\r\n if frame_index == 2:\r\n player.states['left'] = True\r\n # TODO: Face Detection 2\r\n player.handle_movement()\r\n if cv2.waitKey(1) & 0xFF == 27:\r\n break\r\n frame_index += 1\r\n # cv2.imshow('dist', frame3)\r\n # # dist = distMap(frame1, frame3)\r\n # # mod = cv2.GaussianBlur(dist, (9, 9), 0)\r\n # # cv2.imshow('dist', mod)\r\n height, width = frame3_all.shape[:2]\r\n width /= number_of_players\r\n for i in range(number_of_players):\r\n cv2.line(frame3_all, (int(width / 3)+ int(i*width), 0), (int(width / 3) + int(width*i), height), (0, 0, 0), 1)\r\n cv2.line(frame3_all, (int(width / 3)*2+ int(i*width), 0), (int(width / 3)*2+ int(i*width), height), (0, 0, 0), 1)\r\n if number_of_players > 1:\r\n cv2.line(frame3_all, (int(width), 0), (int(width), height), (0, 0, 255), 3)\r\n cv2.imshow('frame', frame3_all)\r\n\r\n cap.release()\r\n cv2.destroyAllWindows()\r\n\r\nif __name__ == '__main__':\r\n main()","sub_path":"motion_detection.py","file_name":"motion_detection.py","file_ext":"py","file_size_in_byte":4087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"356718068","text":"import tweepy as tw\nimport pandas as pd\nimport numpy as np\nimport sys; sys.path.insert(1, '../functions')\nimport dict_utilities as dict_u\n\ndef get_df_news(data_folder, news_to_read, format_cols):\n '''Creates a dataframe from a \".parquet.gzip\" file\n data_folder : directory of the parquet file\n news_to_read : \"Bloomberg\" or \"Reuters\"\n format_cols : temporary argument until we agree on the format of dataframe'''\n df = pd.read_parquet(data_folder + 'financial_data' + news_to_read + '.parquet.gzip')\n df = df.rename(columns = {'Article':'Text', 'Journalists':'Author'})\n return df[format_cols]\n\ndef get_codes(codes_dir):\n '''Returns ['API_Key', 'API_Secret_Key', 'Access_Token','Access_Secret_Token'] stored in codes_dir txt file'''\n codes = []\n f = open(codes_dir, \"r\")\n for _ in range(4):\n string = f.readline()\n codes.append(string[string.find(':')+2:].strip())\n f.close()\n return codes\n\ndef search_twitter(search_word, date_since, nb_items, language, codes, format_cols, retweet=False):\n '''Constructs a dataframe of tweets found all over twitter with matching search word\n search_word : word to search in tweets\n date_since : date from which to search (format YYYY-MM-DD)\n nb_items : number of tweets to get\n language : \"en\", \"fr\"...\n codes : ['API_Key', 'API_Secret_Key', 'Access_Token','Access_Secret_Token'] (use get_codes)\n format_cols : temporary argument until we agree on the format of dataframe'''\n\n auth = tw.OAuthHandler(codes[0], codes[1])\n auth.set_access_token(codes[2],codes[3])\n api = tw.API(auth)\n\n try:\n api.verify_credentials()\n except tw.TweepError:\n print(\"Error during authentification\")\n\n if not retweet:\n search_word = search_word + \"-filter:retweets\"\n\n tweets = tw.Cursor(api.search, q=search_word, lang=language, since=date_since, tweet_mode='extended').items(nb_items)\n\n list_data = [[tweet.full_text, tweet.user.screen_name, tweet.created_at, search_word] for tweet in tweets]\n\n tweet_df = pd.DataFrame(data=list_data, columns=[\"Text\", \"Author\", \"Date\", \"Search Word\"])\n\n return tweet_df[format_cols]\n\ndef search_author(search_id, date_since, nb_items, language, codes, format_cols, retweet=False):\n '''Constructs a dataframe of the last ~nb_items tweets found since date_since on account with id search_id\n search_id : id of account to search from\n date_since : date from which to search (format YYYY-MM-DD)\n nb_items : number of tweets to get\n language : \"en\", \"fr\"...\n codes : ['API_Key', 'API_Secret_Key', 'Access_Token','Access_Secret_Token'] (use get_codes)\n format_cols : temporary argument until we agree on the format of dataframe'''\n \n auth = tw.OAuthHandler(codes[0], codes[1])\n auth.set_access_token(codes[2],codes[3])\n api = tw.API(auth)\n \n try:\n api.verify_credentials()\n except tw.TweepError:\n print(\"Error during authentification\")\n\n tweets = tw.Cursor(api.user_timeline, screen_name = search_id, count = nb_items, include_rts = retweet, lang=language, since=date_since, tweet_mode='extended').items(nb_items)\n\n list_data = [[tweet.full_text, tweet.user.screen_name, tweet.created_at] for tweet in tweets]\n\n tweet_data = pd.DataFrame(data=list_data, columns=[\"Text\", \"Author\", \"Date\"])\n\n return tweet_data[format_cols]\n\ndef add_tweets_to_dict (date_since, nb_items, language, codes, format_cols, dict_dir, retweet=False, from_words=[], from_ids=[]):\n '''Adds tweets with matching search words across twitter and/or from specific accounts to the data dictionary with correct ticker\n date_since : date from which to search (format YYYY-MM-DD)\n nb_items : number of tweets to get\n language : \"en\", \"fr\"...\n codes : ['API_Key', 'API_Secret_Key', 'Access_Token','Access_Secret_Token'] (use get_codes)\n format_cols : temporary argument until we agree on the format of dataframe\n dict_dir : Directory of data dictionary\n from_words contains words to search all across twitter;\n from_ids contains the ids of Twitter accounts to search from'''\n for search_word in from_words:\n df = search_twitter(search_word, date_since, nb_items, language, codes, format_cols, retweet=False)\n dict_u.add_to_dict(df, search_word, dict_dir, format_cols)\n for search_id in from_ids:\n df = search_author(search_id, date_since, nb_items, language, codes, format_cols, retweet=False)\n dict_u.add_to_dict(df, search_id, dict_dir, format_cols)\n \n \ndef add_news_to_dict(search_words, data_folder, news_to_read, dict_dir, format_cols):\n '''Adds tweets with matching search words from parquet file\n data_folder : directory of the parquet file\n news_to_read : \"Bloomberg\" or \"Reuters\"\n dict_dir : Directory of data dictionary\n format_cols : temporary argument until we agree on the format of dataframe'''\n source_df = get_df_news(data_folder, news_to_read, format_cols)\n for search_word in search_words:\n filtered_df = source_df[source_df[format_cols[0]].apply(lambda article : search_word.lower() in article.lower())]\n dict_u.add_to_dict(filtered_df, search_word, dict_dir, format_cols)\n \n \n \n","sub_path":"Yann/functions/data_utilities.py","file_name":"data_utilities.py","file_ext":"py","file_size_in_byte":5255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104316494","text":"from __future__ import unicode_literals\n\n\nfrom django.shortcuts import render\nfrom django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom .models import *\nfrom .forms import CreateUserForm\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib.auth import authenticate,login,logout\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required \nfrom .decorators import unauthenticated_user,allowed_users\n\n\nimport pandas as pd\n\n\nfrom .models import Profile, Comment\nfrom datetime import datetime\nfrom django.urls import reverse\nfrom django.db.models import Q\nfrom .forms import *\nfrom django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404\nfrom django.template.loader import render_to_string\nfrom django.forms import modelformset_factory\n\n\n# Create your views here.\n\n# Registration Page\n@unauthenticated_user\ndef registerPage(request):\n form = CreateUserForm()\n if request.method == \"POST\":\n form = CreateUserForm(request.POST) \n if form.is_valid():\n form.save()\n user = form.cleaned_data.get('username')\n messages.success(request,'Account was created for '+ user) \n return redirect('login')\n \n context={'form':form}\n return render(request,'accounts/register.html',context)\n\n\n# Login Page\n@unauthenticated_user\ndef loginPage(request):\n if request.method == 'POST':\n username=request.POST.get('username')\n password=request.POST.get('password')\n \n user = authenticate(request,username=username, password = password)\n \n if user is not None:\n login(request,user)\n return redirect('home')\n else:\n messages.info(request,'Username or Password is incorrect ') \n \n context={}\n return render(request,'accounts/login.html',context)\n\ndef aboutus(request):\n return render(request,'accounts/aboutus.html')\n\n# Logout page\ndef logoutUser(request):\n logout(request)\n return redirect('login')\n\n\n\ndef sage(request):\n #post = get_object( id=id)\n comments = Comment.objects.filter( reply=None).order_by('-id')\n\n if request.method == 'POST':\n comment_form = CommentForm(request.POST or None)\n if comment_form.is_valid():\n content = request.POST.get('content')\n reply_id = request.POST.get('comment_id')\n comment_qs = None\n if reply_id:\n comment_qs = Comment.objects.get(id=reply_id)\n comment = Comment.objects.create(user=request.user, content=content)\n comment.save()\n #return HttpResponseRedirect(post.get_absolute_url())\n else:\n comment_form= CommentForm()\n\n context = {\n 'comments': comments,\n 'comment_form': comment_form,\n }\n if request.is_ajax():\n html = render_to_string('accounts/comments.html', context, request=request)\n return JsonResponse({'form': html})\n\n return render(request, 'accounts/home.html', context)\n\n\n\n# @allowed_users(allowed_roles=['admin'])\n@login_required(login_url='login', redirect_field_name=None)\ndef home(request):\n return redirect('sage')\n\n\n# Profile View for the users profile\n@login_required(login_url='login', redirect_field_name=None)\ndef profile(request):\n profiledata = {\n \"name\": request.user.username,\n \"email\": request.user.email,\n \"rank\": 1,\n \"badge\": \"Great Sage\",\n }\n return render(request, \"accounts/profile.html\", {\n \"data\": profiledata,\n })\n\n# Fetch news from the database: csv file will be read and news will be fetched randmly\nraw_news = pd.read_csv(\"static/news_temp.csv\")\n@login_required(login_url='login', redirect_field_name=None)\ndef grabnews(request):\n news_data = raw_news.sample(1)\n news_data = news_data.iloc[0]\n news_title = news_data.title\n news_summary = news_data.summary\n html = \"

    %s

    %s

    \" % (news_title, news_summary)\n return HttpResponse(html)\n","sub_path":"ctzs/accounts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"631138442","text":"import re\nfrom google.appengine.ext import db\nfrom models import USGSEarthquakeEvent\nfrom datetime import datetime\nimport urllib2\n\ndef tryUSGSFeed():\n url = 'http://earthquake.usgs.gov/earthquakes/catalogs/1day-M2.5.xml'\n try:\n response = urllib2.urlopen(url).read()\n except:\n response = False\n return response\n\ndef parsexmltext(text):\n return re.findall(r'M (\\d*[4-9]\\.\\d*), (.*California.*)(\\d\\d\\d\\d)-(\\d\\d)-(\\d\\d)T(\\d\\d):(\\d\\d):(\\d\\d).*(\\d*.\\d*) (-*\\d*\\.\\d*)',text)\n\ndef storeOrRejectDuplicate(match):\n def getEQParametersFromMatch(match):\n date = getDateFromMatch(match)\n mag = float(match[0])\n coord = getCoordFromMatch(match)\n location = match[1]\n return date,mag,location,coord\n \n date, mag, location , coord = getEQParametersFromMatch(match)\n \n if len(USGSEarthquakeEvent.all().filter('date =',date).filter('mag =', mag).filter('location =', location).filter('coord =', coord).fetch(1)) > 0:\n return\n USGSEarthquakeEvent(date=date,mag=mag,location=location,coord=coord).put()\n \ndef getDateFromMatch(match):\n return datetime(int(match[2]),int(match[3]),int(match[4]),int(match[5]),int(match[6]),int(match[7]))\n\ndef getCoordFromMatch(match):\n return db.GeoPt(match[8],match[9])\n \n\n\ndef main():\n urlResponse = tryUSGSFeed()\n if not urlResponse:\n return False\n newreadings = parsexmltext(urlResponse)\n \n for match in newreadings:\n storeOrRejectDuplicate(match)\n return True","sub_path":"updateEQDatabase.py","file_name":"updateEQDatabase.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"84071306","text":"from random import randint\nfrom dice.virtual import Virtual\n\n\nclass User:\n def __init__(self, name, life, x, y):\n self.name = name\n self.life = life\n self.land_idx = 0\n self.virtual = Virtual(x, y)\n self.result = 0\n self.circle_text = \"\"\n # 무인도 변수\n self.countdown = []\n # 폭풍 변수\n self.countdown2 = []\n\n # 실제위치 메서드( game 변수목록 클래스, 회차 cnt, 1번째 2번째 유저 구분여부 idx) - 실제 위치\n # 주사위 돌리기, 땅 위치 값 계산 후 이동, 한바퀴 돌았을 시 idx값 조정, 주사위 값 반환\n def dice(self, game, count, idx):\n # 주사위 돌리기\n self.result = randint(1, 6)\n # 땅 위치 값 계산 후 이동\n self.land_idx += self.result\n # 한바퀴 돌았을 시 idx값 조정, life +1\n if self.land_idx > 17:\n self.land_idx -= len(game.land[idx])\n self.life += 1\n self.circle_text = \"한바퀴가 지났습니다. 생명력 +1 !!\"\n # 한바퀴 돌고 두번째 이후 text를 비운다.\n else:\n self.circle_text = \"\"\n # 주사위 값 반환, 객체 변수로 만들어서 Game 클래스에서 주사위 값 사용\n return self.result\n\n # 실제, 가상위치 비교 및 이동\n def virtual_thread(self, land):\n # 실제, 가상위치 일치\n if self.land_idx == self.virtual.idx:\n return True, randint(1, 17)\n # 실제, 가상위치 불일치\n else:\n self.virtual.move_run(land)\n return False, 0\n\n\n","sub_path":"dice/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"502203738","text":"#-*- coding:utf-8 -*-\nfrom __future__ import division\nimport re,sys,os\n\nimport cPickle as pickle\nimport numpy as np\nfrom optparse import OptionParser\n\nimport htsaminfo\n\n\ndef prepare_optparser():\n usage =\"\"\"usage: %s [options] \nThis file can read the result of HTseq result and then get the number of UMI.\nWith the count of UMI, the TPM value could be calculated for each gene in the\ngiven gene list as well as the ercc list.\n\nUsing -h or --help for more information\n\nPre-processing:\nsamtools view -o /data/Analysis/huboqiang/b2_bar.sam \\\\\n /date/dongji/project/Huyuqiong/task10_2015_11_25/mismatch0/01.Tophat/b2_bar2/accepted_hits.bam &&\\\\\npython /data/Analysis/huboqiang/software/anaconda/bin/htseq-count \\\\\n -s no -f sam -a 10 -o /data/Analysis/huboqiang/b2_bar.umi.sort.gene.sam \\\\\n /data/Analysis/huboqiang/b2_bar.sam \\\\\n /date/dongji/database/Database_RNA_v2/mm10/refGene.gtf \\\\\n >/data/Analysis/huboqiang/b2_bar.deseq.xls\n\nExample:\n python %s -g genelist.xls -e list_gene_ercc.xls input.sam\n \n \"\"\" % (sys.argv[0],sys.argv[0])\n\n description = \"TPM value for a given HTseq result file. \"\n \n optparser = OptionParser(version=\"%s v0.2 20141130\" % (sys.argv[0]),\n description=description,\n usage=usage,\n add_help_option=False\n )\n optparser.add_option(\n \"-g\", \"--geneList\", default=\"list_gene.xls\",\n help=\"\\nGene list with given order. [default: %default]\"\n )\n optparser.add_option(\n \"-e\", \"--erccList\", default=\"list_ercc.xls\",\n help=\"\\nERCC list with given order. [default: %default]\"\n )\n\n optparser.add_option(\"-h\",\"--help\", action=\"help\",\n help=\"\\nShow this help message and exit.\"\n )\n return optparser\n\n\ndef main():\n prepare_optparser()\n (options,args) = prepare_optparser().parse_args()\n try:\n input_sam = args[0]\n geneList = options.geneList\n erccList = options.erccList\n except IndexError:\n prepare_optparser().print_help()\n sys.exit(1)\n\n m_tpmInfo = htsaminfo.HTSeqSamInfo(input_sam, geneList, erccList)\n m_tpmInfo.s1_SamToUMI()\n m_tpmInfo.s2_outUMIList()\n m_tpmInfo.s3_UMI2TPM()\n\n\nif __name__ == '__main__':\n main()","sub_path":"04_tpm_hubq.py","file_name":"04_tpm_hubq.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"56237214","text":"#!/usr/bin/python\n#code By: Mr.D'HACK\n#code Date: 22/12/2019\n#Tools spam calling\n###################################\n# Color #\n###################################\nc=('\\033[1;36m')\nr=('\\033[1;31m')\ng=('\\033[1;32m')\nw=('\\033[1;37m')\n###################################\n# Kesalahan Module #\n###################################\ntry:\n\timport os, requests, time, json\nexcept ModuleNotFoundError:\n\tprint (\"\\nSepertinya module requests BELUM Di Install\")\n###################################\n# banner #\n###################################\nbanner = \"\"\"\n ___\n |<\\>\n \\___/\n { Code By }\n } Mr.D'HACK {\n <<<<<\\_/>>>>>\n\"\"\"\n###################################\n# input target #\n###################################\nprint (banner)\ntarget = input(\"[+] Notarget: \")\njlmh=int(input(\"[+] Jumlah Spam: \"))\n###################################\n# \ntry:\n\thenti_tanya=False\n\tforcecon=0\n\tprint(\"\\n%s[-] RESULT:%s\"%(r,w));time.sleep(1)\n\tfor i in range(jlmh):\n\t\tcout=1\n\t\tprint(f\"{'{'}{i+1}{'}'}\"+\"=\"*40+f\"{'{'}{i+1}{'}'}\")\n\t\tfor i in target:\n\t\t\tif i == '':\n\t\t\t\tcout+=1\n\t\t\t\tcontinue\n\t\t\tdt={'method':'CALL','countryCode':'id','phoneNumber':i,'templateID':'pax_android_production'}\n\t\t\tr1 = requests.post('https://api.grab.com/grabid/v1/phone/otp',data=dt,headers={'user-agent':'Mozilla/5.0 (Linux; Android 7.1.2; AFTMM Build/NS6264; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/59.0.3071.125 Mobile Safari/537.36'})\n\n\t\t\tif \"10074\" in r1.text:\n\t\t\t\tprint(f\"[!] Sepertinya Target Terkena Batas limit Spam , Tunggu 15 mnt untuk melanjutkan\")\n\t\t\t\tif henti_tanya == True:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tpil=input(\"[?] Kesalahan! Apa anda ingin menjeda sepama 1 mnt? [y/n] \")\n\t\t\t\t\tif pil.lower() == 'y':\n\t\t\t\t\t\tfor x in range(60):\n\t\t\t\t\t\t\ttry:\n\t\t\t\t\t\t\t\tprint(end=f\"\\r[!] Jeda {60-(x+1)} detik\",flush=True)\n\t\t\t\t\t\t\t\ttime.sleep(1)\n\t\t\t\t\t\t\texcept: break\n\t\t\t\t\t\tprint(\"\\n[+] Melanjutkan....\")\n\t\t\t\t\telif pil.lower() == 'f':\n\t\t\t\t\t\thenti_tanya=True\n\t\t\t\t\telse:\n\t\t\t\t\t\tforcecon+=1\n\t\t\t\t\t\tif forcecon >= 3:\n\t\t\t\t\t\t\tprint(f\"[!] {c}tekan F untuk menghentikan pertanyaan{w}\")\n\t\t\telif \"challengeID\" in r1.text:\n\t\t\t\tprint (\"[+] Spam Berhasil.\")\n\t\t\telse:\n\t\t\t\tprint (f\"[-] Spam Gagal.\")\n\t\t\ttime.sleep(10)\n\t\t\tcout+=1 \n\tprint(\"{end}\"+\"=\"*40+\"{end}\")\nexcept KeyboardInterrupt:\n\tprint(\"\\n%sBye2.......\"%(c)) \n","sub_path":"spam.py","file_name":"spam.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"396934872","text":"\"\"\" Implement class to handle QE electron scattering data. \"\"\"\n\nimport os\nimport pandas as pd\n\nfrom .utils import make_path\n\n\nclass QuasielasticData:\n \"\"\" Class to hold and manipulate QE electron scattering data.\"\"\"\n def __init__(self, element):\n data_file = make_path(element+'.dat', os.path.join('data', 'qe'))\n self.data = pd.read_csv(data_file,\n sep=r'\\s+',\n names=('Z', 'A', 'Energy', 'Angle', 'Peak',\n 'Data', 'StatUncertainty', 'citation'))\n self.element = element\n self.Z = self.data['Z'].values[0]\n self.A = self.data['A'].values[0]\n\n @property\n def citations(self):\n ''' Get all the experimental citations in the data set'''\n return self.data.citation.unique()\n\n @property\n def energies(self):\n ''' Get all the energy values in the data set'''\n return self.data.Energy.unique()\n\n @property\n def angles(self):\n ''' Get all the angle values in the data set'''\n return self.data.Angle.unique()\n\n @property\n def peaks(self):\n '''\n Get all the 4-momentum transfer at the top of the QE peak (x=1)\n values in the data set\n '''\n return self.data.Peak.unique()\n\n def get_data(self, energy, angle):\n \"\"\" Get the data for a specific energy and angle. \"\"\"\n mask_energy = self.data['Energy'] == energy\n mask_angle = self.data['Angle'] == angle\n data_tmp = self.data[mask_energy & mask_angle]\n data_omega = data_tmp['Peak'].values\n data_dsigma = data_tmp['Data'].values\n data_error = data_tmp['StatUncertainty'].values\n\n return data_omega, data_dsigma, data_error\n","sub_path":"src/nuchic/qe_data.py","file_name":"qe_data.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"310922016","text":"import socket\nimport sys\n\n\ns = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ns.connect((sys.argv[1], int(sys.argv[2])))\ns.send(sys.argv[3])\ndata = s.recv(1024)\nrcv_file = open(sys.argv[3], 'wb')\nrcv_file.write(data)\nrcv_file.close()\ns.close()\nprint('Done!')\n","sub_path":"python3/socket_file_upload/clientDown.py","file_name":"clientDown.py","file_ext":"py","file_size_in_byte":261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"369891420","text":"# By Alberto Caro\n# Librerias utilizadas\n# Su instalacion en python\n# --------------------------------------------------\n# pip3 install bs4\n# pip3 install requests\n# pip3 install pandas\n# pip3 install numpy\n# pip3 install telepod\n# --------------------------------------------------\nfrom bs4 import BeautifulSoup as BFS\nimport requests as RQ, pandas as pd\nimport urllib2, numpy as np\nimport matplotlib.pyplot as plt\nimport telepot as Te, time as ti\n\n# --------------------------------------------------\n# Data Set COVID-19 de www.ecdc.europa.eu\n# European Centre for Disease Prevention and Control\n# By Alberto Caro S.\n# --------------------------------------------------\n\n# Arreglos Globales de datos relevantes infectados\n# --------------------------------------------------\naPais = [] # -> Pais de origen de personas infectadas\naContagiados = [] # -> Total de personas contagiadas por COVID\naMuertos = [] # -> Total de personas muertas por COVID\naConfirmados = [] # -> Casos Confirmados de personas infectadas COVID\naPeriodo = [] # -> Periodo ano 2021 semanas 9 y 10\n\n# Funcion que limpia los datos cuando los valores\n# utf-8 son mayores que el ordinal 128\n# y los convierte a String o Numerico\n# Se devuelve una Tupla(.)\n#---------------------------------------------------\ndef Clear_Data( sEle, cType ):\n sTRUCO = 'BAD DATA'; sAux = sEle\n if cType in ['S']:\n if len(sAux) == 1: return (False,'')\n for i in range(len(sAux)):\n if ord(sAux[i]) > 128:\n sAux = sTRUCO # Ahora String valido\n return (True,str(sAux)) # String no utf-8\n if cType in ['N']: \n if len(sAux) == 1: return (True,0)\n return (True,int(sAux)) # Ahora dato numerico\n\n return (False,'')\n\n# URL donde se encuentran los datos a obtener\n#---------------------------------------------------\nURL = 'https://www.ecdc.europa.eu/en/geographical-distribution-2019-ncov-cases'\n\n# Realizamos una peticion get al Servidor\n#---------------------------------------------------\nRes = RQ.get(URL)\n\n# Parseamos el contenido devuelto en Res\n# En web_info esta todo el contenido html del URL\n#---------------------------------------------------\nweb_info = BFS(Res.content,'html.parser')\n\n# El tag 'tbody tr' es donde se encuentran los datos\n# Esto se descubrio analizando la pagina consultada\n# mediante el Inspector del navegador Chrome\n#---------------------------------------------------\ndata = web_info.select('tbody tr')\n\n# Recorrimos todas las entradas de data y sacamos la\n# la informacion de interes\n#---------------------------------------------------\nfor xEle in data:\n #Procesamos pais de procedencia\n sLine = xEle.select('td')[1].get_text() \n tRet = Clear_Data(sLine,'S') # Se espera datos tipo String\n if tRet[0] : aPais.append(tRet[1]) \n\n #Procesamos total contagiados\n sLine = xEle.select('td')[2].get_text() \n tRet = Clear_Data(sLine,'N') # se espera dato tipo Numeric\n if tRet[0] : aContagiados.append(tRet[1]) \n\n #Procesamos total muertos\n sLine = xEle.select('td')[3].get_text() \n tRet = Clear_Data(sLine,'N') # sesLine espera dato tipo Numeric\n if tRet[0] : aMuertos.append(tRet[1]) \n \n #Procesamos total confirmados\n sLine = xEle.select('td')[4].get_text() \n tRet = Clear_Data(sLine,'N') # se espera dato tipo Numeric\n if tRet[0] : aConfirmados.append(tRet[1]) \n \n #Procesamos periodo del estudio\n sLine = xEle.select('td')[5].get_text()\n tRet = Clear_Data(sLine,'S') # se espera dato tipo String\n if tRet[0] : aPeriodo.append(tRet[1]) \n\n# Pasando los datos anteriores a un DataFrame Pandas\n# Armamos el dataframe desde los arreglos anteriores\n# Solo tomamos los primeros 215 datos. El dato en\n# posicion 216 -> Registra los Totales de cada columna\n#---------------------------------------------------\ndData = {\n 'P' : aPais, # Pais\n 'C' : aContagiados[:215], # Contagiados\n 'M' : aMuertos[:215], # Muertos\n 'F' : aConfirmados[:215] # Confirmados\n } \n\n# DataFrame que resume y ordena de manera matricial\n# toda la informacion anterior\n#---------------------------------------------------\ndf = pd.DataFrame(dData,columns = ['P','C','F','M'],index = range(len(aPais)))\n\n# Exportar datos DataFrame a Archivo CSV.003d\n#---------------------------------------------------\nsFile = 'data_covid_10_19_2021.csv'\n\ndf.to_csv(sFile)\n\n#---------------------------------------------------\n# Salida de los 10 primeros data\n#---------------------------------------------------\ndf.head(10)\n#---------------------------------------------------\n'''\n P C F M\n0 Algeria 124265 11173 3036\n1 Angola 21380 573 521\n2 Benin 6501 867 81\n3 Botswana 35009 6628 447\n4 Burkina_Faso 12378 396 144\n5 Burundi 2461 252 3\n6 Cameroon 38988 3274 588\n7 Cape_Verde 16101 701 156\n8 Central_African_Republic 5025 28 63\n9 Chad 4309 336 154\n----------------------------------------------------\n'''\n\n# Resumen estadistico dataframe contaminacion COVID\n#---------------------------------------------------\ndf.describe()\n#----------------------------------------------------\n'''\ncount 215.000000 215.000000 215.000000\nmean 559388.032558 27434.227907 12371.172093\nstd 2366985.265277 97332.612742 47135.416403\nmin 0.000000 0.000000 0.000000\n25% 3884.000000 149.000000 59.000000\n50% 38988.000000 1859.000000 622.000000\n75% 244211.500000 11176.000000 4447.000000\nmax 29495422.000000 932608.000000 535661.000000\n----------------------------------------------------\n'''\n\n# Graficamos los casos de Contaminados, Contagiados\n# y Muertos de Africa, Europa, Asia, America y Otros\n#---------------------------------------------------\nplt.close('all')\n\n# Graficos de Contagiados de COVID por pais\n# Al hacer ZOOM en Tools se detallan los paises\n#---------------------------------------------------\nInfo = pd.DataFrame(\n {\n 'Pais' : aPais,\n 'Contagiados': aContagiados[:215]\n })\nG = Info.plot(x = 'Pais', y = 'Contagiados',rot = 90 )\nplt.show()\n\n# Graficos de Confirmados de COVID por pais\n# Al hacer ZOOM en Tools se detallan los paises\n#---------------------------------------------------\nInfo = pd.DataFrame(\n {\n 'Pais' : aPais,\n 'Confirmados': aConfirmados[:215]\n })\nG = Info.plot(x = 'Pais', y = 'Confirmados',rot = 90 )\nplt.show()\n\n# Graficos de Muertos de COVID por pais\n# Al hacer ZOOM en Tools se detallan los paises\n#---------------------------------------------------\nInfo = pd.DataFrame(\n {\n 'Pais' : aPais,\n 'Muertos': aMuertos[:215]\n })\nG = Info.plot(x = 'Pais', y = 'Muertos',rot = 90 )\nplt.show()\n\n","sub_path":"codigos/ws_1.py","file_name":"ws_1.py","file_ext":"py","file_size_in_byte":7134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"617482206","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# fitness_functions.py\n\n\"\"\"\nFitness functions for driving animat evolution.\n\"\"\"\n\nimport textwrap\nWRAPPER = textwrap.TextWrapper(width=80)\n\nfrom collections import OrderedDict, Counter\nfrom functools import wraps\nimport numpy as np\nfrom sklearn.metrics import mutual_info_score\nimport pyphi\n\nimport config\nimport constants as _\nfrom utils import unique_rows\n\n\n# Metadata associated with the available fitness functions.\nmetadata = OrderedDict()\n# Mapping from parameter values to descriptive names\nLaTeX_NAMES = {\n 'nat': 'Correct\\ Trials',\n 'mi': 'Mutual\\ Information',\n 'mi_wvn': 'Mutual\\ Information\\ (world\\ vs.\\ noise)',\n 'ex': 'Extrinsic\\ cause\\ information',\n 'ex_wvn': 'Extrinsic\\ cause\\ information\\ (world\\ vs.\\ noise)',\n 'sp': '\\sum\\\\varphi',\n 'sp_wvn': '\\sum\\\\varphi\\ (world\\ vs.\\ noise)',\n 'bp': '\\Phi',\n 'bp_wvn': '\\Phi\\ (world\\ vs.\\ noise)',\n 'mat': 'Matching'\n}\n\n\ndef _register(data_function=None):\n \"\"\"Register a fitness function to the directory.\n\n Also associates the function to data-gathering data_functions, if any.\n \"\"\"\n def wrapper(f):\n metadata[f.__name__] = {'doc': f.__doc__,\n 'data_function': data_function}\n return wrapper\n\n\ndef print_functions():\n \"\"\"Display a list of available fitness functions with their\n descriptions.\"\"\"\n for name, data in metadata.items():\n print('\\n' + name + '\\n ' + data['doc'])\n print('\\n' + WRAPPER.fill(\n 'NB: In order to make selection pressure more even, the fitness '\n 'function used in the selection algorithm is transformed so that it '\n 'is exponential, according to the formula F(R) = B^(S*R + A), where '\n 'R is one of the “raw” fitness values described above, and where B, '\n 'S, A are controlled with the FITNESS_BASE, FITNESS_EXPONENT_SCALE, '\n 'and FITNESS_EXPONENT_ADD parameters, respectively.'))\n print('')\n\n\n# Helper functions\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n# TODO document kwargs\ndef _avg_over_visited_states(shortcircuit=True, upto_attr=False,\n transform=False, n=None):\n \"\"\"A decorator that takes an animat and applies a function for every unique\n state the animat visits during a game (up to the given units only) and\n returns the average.\n\n The wrapped function must take an animat and state, and return a number.\"\"\"\n def decorator(func):\n @wraps(func)\n def wrapper(ind, **kwargs):\n # Short-circuit if the animat has no connections.\n if shortcircuit and ind.cm.sum() == 0:\n return 0.0\n upto = getattr(_, upto_attr) if upto_attr else False\n game = ind.play_game()\n sort = n is not None\n unique_states = unique_rows(game.animat_states, upto=upto,\n sort=sort)[:n]\n values = [func(ind, state, **kwargs) for state in unique_states]\n if transform:\n values = list(map(transform, values))\n return sum(values) / len(values)\n return wrapper\n return decorator\n\n\ndef _world_vs_noise(shortcircuit=True, upto_attr=False, transform=False,\n reduce=sum, n=None):\n \"\"\"A decorator that returns the difference between the sum of the given\n function applied to unique states visited in the world, and the same for\n noise.\n\n The wrapped function must take an animat and a state, and return a\n number.\"\"\"\n def decorator(func):\n @wraps(func)\n def wrapper(ind, **kwargs):\n # Short-circuit if the animat has no connections.\n if shortcircuit and ind.cm.sum() == 0:\n return 0.0\n upto = getattr(_, upto_attr) if upto_attr else False\n # Play the game and a scrambled version of it.\n world = ind.play_game().animat_states\n noise = ind.play_game(scrambled=True).animat_states\n sort = n is not None\n # Uniqify and flatten the world and noise state arrays.\n world = unique_rows(world, upto=upto, sort=sort)[:n]\n noise = unique_rows(noise, upto=upto, sort=sort)[:n]\n # Get a flat list of all the the states.\n combined = np.concatenate([world, noise])\n combined = combined.reshape(-1, combined.shape[-1])\n # Get unique world and noise states.\n all_states, unq_idx = unique_rows(combined, upto=upto,\n indices=True)\n all_states = list(map(tuple, all_states))\n # Compute the value for each unique state.\n values = {state: func(ind, state, **kwargs)\n for state in all_states}\n # Collect the world and noise values.\n world_values = [values[all_states[unq_idx[i]]]\n for i in range(len(world))]\n noise_values = [values[all_states[unq_idx[i]]]\n for i in range(len(world),\n len(world) + len(noise))]\n # Transform.\n if transform:\n world_values = transform(world_values)\n noise_values = transform(noise_values)\n # Reduce and take the difference.\n return reduce(world_values) - reduce(noise_values)\n return wrapper\n return decorator\n\n\ndef unq_concepts(constellations):\n \"\"\"Takes a list of constellations and returns the set of unique concepts in\n them.\"\"\"\n return set.union(*(set(C) for C in constellations))\n\n\ndef phi_sum(phi_objects):\n \"\"\"Takes a list of objects that have a ``phi`` attribute and returns the\n sum of those attributes.\"\"\"\n return sum(o.phi for o in phi_objects)\n\n\n# Natural fitness\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef nat(ind):\n \"\"\"Natural: Animats are evaluated based on the number of game trials they\n successfully complete. For each task given in the ``TASKS`` parameter,\n there is one trial per direction (left or right) of block descent, per\n initial animat position (given by ``config.WORLD_WIDTH``).\"\"\"\n ind.play_game()\n return ind.correct\n_register()(nat)\n\n\n# Mutual information\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef mutual_information(states):\n \"\"\"Get the sensor-motor mutual information for a group of trials.\"\"\"\n # The contingency matrix has a row for every sensors state and a column for\n # every motor state.\n contingency = np.zeros([_.NUM_SENSOR_STATES, _.NUM_MOTOR_STATES])\n # Get only the sensor and motor states.\n sensor_motor = np.concatenate([states[:, :, :config.NUM_SENSORS],\n states[:, :, -config.NUM_MOTORS:]], axis=2)\n # Count!\n for idx, state in _.SENSOR_MOTOR_STATES:\n contingency[idx] = (sensor_motor == state).all(axis=2).sum()\n # Calculate mutual information in nats.\n mi_nats = mutual_info_score(None, None, contingency=contingency)\n # Convert from nats to bits and return.\n return mi_nats * _.NAT_TO_BIT_CONVERSION_FACTOR\n\n\ndef mi(ind):\n \"\"\"Mutual information: Animats are evaluated based on the mutual\n information between their sensors and motor over the course of a game.\"\"\"\n game = ind.play_game()\n return mutual_information(game.animat_states)\n_register(data_function=mutual_information)(mi)\n\n\ndef mi_wvn(ind):\n \"\"\"Same as `mi` but counting the difference between world and noise.\"\"\"\n # Play the game and a scrambled version of it.\n world = ind.play_game().animat_states\n noise = ind.play_game(scrambled=True).animat_states\n return mutual_information(world) - mutual_information(noise)\n_register(data_function=mutual_information)(mi_wvn)\n\n\n# Extrinsic cause information\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef extrinsic_causes(ind, state):\n \"\"\"Return the core causes of motors and hidden units whose purviews\n are subsets of the sensors.\"\"\"\n # TODO generate powerset once (change PyPhi to use indices in find_mice\n # purview restriction)?\n subsystem = ind.as_subsystem(state)\n hidden_and_motors = subsystem.indices2nodes(_.HIDDEN_MOTOR_INDICES)\n sensors = subsystem.indices2nodes(_.SENSOR_INDICES)\n mechanisms = tuple(pyphi.utils.powerset(hidden_and_motors))\n purviews = tuple(pyphi.utils.powerset(sensors))\n mice = [subsystem.core_cause(mechanism, purviews=purviews)\n for mechanism in mechanisms]\n return list(filter(lambda m: m.phi > 0, mice))\n\n\nex = _avg_over_visited_states(transform=phi_sum)(extrinsic_causes)\nex.__name__ = 'ex'\nex.__doc__ = \\\n \"\"\"Extrinsic cause information: Animats are evaluated based on the sum of φ\n for core causes that are “about” the sensors (the purview is a subset of\n the sensors). This sum is averaged over every unique state the animat\n visits during a game.\"\"\"\n_register(data_function=extrinsic_causes)(ex)\n\n\nex_wvn = _world_vs_noise(transform=unq_concepts,\n reduce=phi_sum)(extrinsic_causes)\nex_wvn.__name__ = 'ex_wvn'\nex_wvn.__doc__ = \\\n \"\"\"Same as `ex` but counting the difference between the sum of φ of unique\n concepts that appear in the world and a scrambled version of it.\"\"\"\n_register(data_function=extrinsic_causes)(ex_wvn)\n\n\n# Sum of small-phi\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef all_concepts(ind, state):\n \"\"\"Return the constellation of all concepts.\"\"\"\n subsystem = ind.as_subsystem(state)\n return pyphi.compute.constellation(\n subsystem,\n mechanisms=_.HIDDEN_POWERSET,\n past_purviews=_.SENSORS_AND_HIDDEN_POWERSET,\n future_purviews=_.HIDDEN_AND_MOTOR_POWERSET)\n\n\n# The states only need to be considered unique up to the hidden units because\n# the subsystem is always the entire network (not the main complex), so there\n# are no background conditions.\nsp = _avg_over_visited_states(transform=phi_sum,\n upto_attr='HIDDEN_INDICES')(all_concepts)\nsp.__name__ = 'sp'\nsp.__doc__ = \\\n \"\"\"Sum of φ: Animats are evaluated based on the sum of φ for all the\n concepts of the animat's hidden units, or “brain”, averaged over the unique\n states the animat visits during a game, where uniqueness is considered up\n to the state of the hidden units (since the entire animat is the system, no\n background conditions need to be considered, and since the sensors lack\n incoming connections and the motors lack outgoing, the only possible\n concepts are therefore those whose mechanisms are a subset of the hidden\n units).\"\"\"\n_register(data_function=all_concepts)(sp)\n\n\nsp_wvn = _world_vs_noise(transform=unq_concepts,\n reduce=phi_sum,\n upto_attr='HIDDEN_INDICES')(all_concepts)\nsp_wvn.__name__ = 'sp_wvn'\nsp_wvn.__doc__ = \\\n \"\"\"Same as `sp` but counting the difference between the sum of φ of unique\n concepts that appear in the world and a scrambled version of it.\"\"\"\n_register(data_function=all_concepts)(sp_wvn)\n\n\n# Big-Phi\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef main_complex(ind, state):\n \"\"\"Return the main complex of the individual.\"\"\"\n return pyphi.compute.main_complex(ind.network, state)\n\n# We compute only the N most-frequent states of those visited for performance\n# reasons. Ideally we would consider every unique state.\nNUM_BIG_PHI_STATES_TO_COMPUTE = None\n\nbp = _avg_over_visited_states(transform=lambda x: x.phi,\n upto_attr='SENSOR_HIDDEN_INDICES',\n n=NUM_BIG_PHI_STATES_TO_COMPUTE)(main_complex)\nbp.__name__ = 'bp'\nbp.__doc__ = \\\n \"\"\"ϕ: Animats are evaluated based on the ϕ-value of their brains, averaged\n over the {} most-common unique states the animat visits during a game\n (where uniqueness is considered up to the state of the sensors and hidden\n units).\"\"\".format(NUM_BIG_PHI_STATES_TO_COMPUTE)\n_register(data_function=main_complex)(bp)\n\n\nbp_wvn = _world_vs_noise(reduce=phi_sum,\n upto_attr='SENSOR_HIDDEN_INDICES',\n n=NUM_BIG_PHI_STATES_TO_COMPUTE)(main_complex)\nbp_wvn.__name__ = 'bp_wvn'\nbp_wvn.__doc__ = \\\n \"\"\"Same as `bp` but counting the difference between world and noise.\"\"\"\n_register(data_function=main_complex)(bp_wvn)\n\n\n# Matching\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\ndef matching(W, N, constellations):\n # Collect the constellations specified in the world.\n world_constellations = [constellations[state] for state in W]\n # Collect those specified in noise.\n noise_constellations = [constellations[state] for state in N]\n # Join the constellations for every state visited in the world and uniquify\n # the resulting set of concepts. Concepts should be considered the same\n # when they have the same φ, same mechanism, same mechanism state, and the\n # same cause and effect purviews and repertoires.\n world_concepts = unq_concepts(world_constellations)\n # Do the same for noise.\n noise_concepts = unq_concepts(noise_constellations)\n # Calculate and return the final value for matching: the difference in the\n # sum of small phi for the unique concepts specified when presented with\n # the world and that when presented with a scrambled world, weighted by\n # existence in the world.\n return (sum(c.phi for c in world_concepts) -\n sum(c.phi for c in noise_concepts))\n\n\ndef matching_weighted(W, N, constellations, complexes):\n world = np.array([\n sum(complexes[state].phi * c.phi for c in constellations[state])\n for state in W\n ])\n noise = np.array([\n sum(complexes[state].phi * c.phi for c in constellations[state])\n for state in N\n ])\n return world.sum() - noise.sum()\n\n\ndef matching_average_weighted(W, N, constellations, complexes):\n # Collect the constellations specified in the world.\n world_constellations = [constellations[state] for state in W]\n # Collect those specified in noise.\n noise_constellations = [constellations[state] for state in N]\n # Join the constellations for every state visited in the world and uniquify\n # the resulting set of concepts. Concepts should be considered the same\n # when they have the same φ, same mechanism, same mechanism state, and the\n # same cause and effect purviews and repertoires.\n world_concepts = unq_concepts(world_constellations)\n # Do the same for noise.\n noise_concepts = unq_concepts(noise_constellations)\n # Map concepts to the ϕ values.\n big_phis_w = {}\n for state in W:\n for c in constellations[state]:\n if c not in big_phis_w:\n big_phis_w[c] = []\n big_phis_w[c].append(complexes[state].phi)\n big_phis_n = {}\n for state in N:\n for c in constellations[state]:\n if c not in big_phis_n:\n big_phis_n[c] = []\n big_phis_n[c].append(complexes[state].phi)\n # Average the ϕ values.\n big_phis_w = {concept: np.mean(values)\n for concept, values in big_phis_w.items()}\n big_phis_n = {concept: np.mean(values)\n for concept, values in big_phis_n.items()}\n return (sum(c.phi * big_phis_w[c] for c in world_concepts) -\n sum(c.phi * big_phis_n[c] for c in noise_concepts))\n\n\ndef mat(ind):\n \"\"\"Matching: Animats are evaluated based on how well they “match” their\n environment. Roughly speaking, this captures the degree to which their\n conceptual structure “resonates” with statistical regularities in the\n world. This quantity is given by:\n\n ϕ * (Σφ'(W) - Σφ'(N)),\n\n where ϕ is just the animat's ϕ-value (averaged over the 5 most-common\n unique states that it visits during a game), Σφ'(W) is the sum of φ for\n each *unique* concept that the animat obtains when presented with a\n stimulus set from the world, and Σφ'(N) is the same but for a stimulus set\n that has been scrambled first in space and then in time.\"\"\"\n # Short-circuit if the animat has no connections.\n if ind.cm.sum() == 0:\n return (0, 0, 0)\n # Play the game and a scrambled version of it.\n world = ind.play_game().animat_states\n noise = ind.play_game(scrambled=True).animat_states\n # Since the motor states can't influence φ or ϕ, we set them to zero to\n # make uniqifying the states simpler.\n world[_.MOTOR_INDICES] = 0\n noise[_.MOTOR_INDICES] = 0\n # Get a flat list of all the the states.\n combined = np.concatenate([world, noise])\n combined = combined.reshape(-1, combined.shape[-1])\n # Get unique world and noise states and their counts, up to sensor and\n # hidden states (we care about the sensors since sensor states can\n # influence φ and ϕ as background conditions). The motor states are ignored\n # since now they're all zero.\n all_states = Counter(tuple(state) for state in combined)\n # Get the main complexes for each unique state.\n complexes = {\n state: pyphi.compute.main_complex(ind.network, state)\n for state in all_states\n }\n # TODO weight by frequency?\n # Existence is the mean of the ϕ values.\n big_phis, counts = zip(*[(complexes[state].phi, count)\n for state, count in all_states.items()])\n existence = np.average(big_phis, weights=counts)\n # Get the unique concepts in each constellation.\n constellations = {\n state: set(bm.unpartitioned_constellation)\n for state, bm in complexes.items()\n }\n # Get the set of unique states in each trial for world and noise.\n world = [set(tuple(state) for state in trial) for trial in world]\n noise = [set(tuple(state) for state in trial) for trial in noise]\n # Now we calculate the matching terms for many stimulus sets (each trial)\n # which are later averaged to obtain the matching value for a “typical”\n # stimulus set.\n # TODO weight each concept by average big phi of its states?\n raw_matching = np.mean([\n matching(W, N, constellations) for W, N in zip(world, noise)\n ])\n raw_matching_weighted = np.mean([\n matching_weighted(W, N, constellations, complexes)\n for W, N in zip(world, noise)\n ])\n raw_matching_average_weighted = np.mean([\n matching_average_weighted(W, N, constellations, complexes)\n for W, N in zip(world, noise)\n ])\n return (existence * raw_matching_average_weighted,\n existence * raw_matching_weighted,\n existence * raw_matching)\n_register(data_function=main_complex)(mat)\n","sub_path":"fitness_functions.py","file_name":"fitness_functions.py","file_ext":"py","file_size_in_byte":18831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"281924262","text":"import curses\nimport pickle\nimport traceback\nimport glob\nimport os\nimport numpy as np\nimport modules.common_params.common as c\nimport modules.memory_classes.memory as m\nimport modules.queues.queue as q\nimport modules.organisms.organism as o\nfrom conf.config import Config, default_ancestors\nimport math\nimport json\n\nimport sys\n\nsys.modules['modules.memory'] = m\nsys.modules['modules.queue'] = q\nsys.modules['modules.organism'] = o\nsys.modules['modules.common'] = c\n\nfrom typing import Dict\nimport logging\n\nlogging.basicConfig(\n level=logging.INFO,\n format=\"%(levelname)s|%(filename)s|%(lineno)s| %(asctime)s - %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n filename='example.log',\n)\nlogger = logging.getLogger(__name__)\n\n\nclass Fungera:\n def __init__(self):\n self.timer = c.RepeatedTimer(\n c.config['autosave_rate'], self.save_state, (True,)\n )\n # print(c.config['random_seed'], c.config['simulation_name'], c.config['random_rate'])\n np.random.seed(c.config['random_seed'])\n if not os.path.exists('snapshots'):\n os.makedirs('snapshots')\n self.cycle = 0\n self.is_minimal = False\n self.purges = 0\n self.info_window = c.screen.derived(\n np.array([0, 0]), c.config['info_display_size'],\n )\n\n coords = np.array(c.config['memory_size']) // 2\n ip = np.copy(coords)\n if c.instructions_set_name == 'error_correction':\n ip = ip + 1\n genome_size = self.load_genome_into_memory(\n default_ancestors[c.instructions_set_name], coords\n )\n o.organism_class(coords, genome_size, ip=ip)\n self.update_info()\n if c.config['snapshot_to_load'] != 'new':\n self.load_state()\n\n self.information_per_site_tables = []\n self.entropy = 0.0\n\n def run(self):\n try:\n self.input_stream()\n except KeyboardInterrupt:\n curses.endwin()\n self.timer.cancel()\n except Exception:\n curses.endwin()\n self.timer.cancel()\n print(traceback.format_exc())\n\n def load_genome_into_memory(self, filename: str, address: np.array) -> np.array:\n with open(filename) as genome_file:\n genome = np.array([list(line.strip()) for line in genome_file])\n m.memory.load_genome(genome, address, genome.shape)\n return genome.shape\n\n def update_position(self, delta):\n m.memory.scroll(delta)\n q.queue.update_all()\n self.update_info()\n\n def update_info_full(self):\n self.info_window.erase()\n info = ''\n info += '[{}] \\n'.format(c.config['simulation_name'])\n info += 'Cycle : {}\\n'.format(self.cycle)\n info += 'Position : {}\\n'.format(list(m.memory.position))\n info += 'Total : {}\\n'.format(len(q.queue.organisms))\n info += 'Purges : {}\\n'.format(self.purges)\n info += 'Organism : {}\\n'.format(q.queue.index)\n info += q.queue.get_organism().info()\n self.info_window.print(info)\n\n def update_info_minimal(self):\n self.info_window.erase()\n info = ''\n info += 'Minimal mode '\n info += '[Running]\\n' if c.is_running else '[Paused]\\n'\n info += 'Cycle : {}\\n'.format(self.cycle)\n info += 'Total : {}\\n'.format(len(q.queue.organisms))\n if q.queue.organisms:\n entropy = self.get_entropy_score()\n\n info += f\"Entropy: {entropy}\\n\"\n self.entropy = entropy\n commands_distribution = self.get_commands_distribution()\n for i in commands_distribution:\n commands_distribution[i] = '{:.2e}'.format(commands_distribution[i])\n # info += f\"Commands distribution: {commands_distribution}\\n\"\n # info += f\"Organism sizes: {self.get_organism_sizes()[:3]}\\n\"\n else:\n info += \"Entropy: 0.0\"\n raise ValueError\n # info += f'{m.memory.memory_map[organism_bounds]}'\n self.info_window.print(info)\n\n def update_info(self):\n if not self.is_minimal:\n self.update_info_full()\n else:\n if self.cycle % c.config['cycle_gap'] == 0:\n self.update_info_minimal()\n\n def toogle_minimal(self, memory=None):\n self.is_minimal = not self.is_minimal\n self.update_info_minimal()\n m.memory.clear()\n m.memory = m.memory.toogle() if memory is None else memory.toogle()\n m.memory.update(refresh=True)\n q.queue.toogle_minimal()\n\n def save_state(self, from_timer=False):\n return_to_full = False\n if not self.is_minimal:\n if from_timer:\n return\n self.toogle_minimal()\n return_to_full = True\n\n filename = 'snapshots/{}_cycle_{}.snapshot'.format(\n c.config['simulation_name'].lower().replace(' ', '_'), self.cycle\n )\n if c.config['dump_full_snapshots']:\n with open(filename, 'wb') as f:\n state = {\n 'cycle': self.cycle,\n 'memory': m.memory,\n 'queue': q.queue,\n 'information_per_site': self.information_per_site_tables,\n 'entropy': self.entropy\n }\n pickle.dump(state, f)\n\n metrics = {\n 'cycle': self.cycle,\n 'information_per_site': self.information_per_site_tables,\n 'entropy': self.entropy,\n 'number_of_organisms': len(q.queue.organisms),\n 'commands_distribution': self.get_commands_distribution(),\n 'sizes': self.get_organism_sizes()\n }\n metrics_file = 'snapshots/{}_cycle_{}.snapshot'.format(\n c.config['simulation_name'].lower().replace(' ', '_'), self.cycle\n ) + '2'\n with open(metrics_file, 'wb') as mf:\n pickle.dump(metrics, mf)\n if not self.is_minimal or return_to_full:\n self.toogle_minimal()\n\n def load_state(self):\n return_to_full = False\n if not self.is_minimal:\n self.toogle_minimal()\n return_to_full = True\n try:\n if (\n c.config['snapshot_to_load'] == 'last'\n or c.config['snapshot_to_load'] == 'new'\n ):\n filename = max(glob.glob('snapshots/*.snapshot'), key=os.path.getctime)\n logger.info(filename)\n else:\n filename = c.config['snapshot_to_load']\n with open(filename, 'rb') as f:\n state = pickle.load(f)\n logger.info(\n f'{state.keys()}'\n )\n memory = state['memory']\n q.queue = state['queue']\n self.cycle = state['cycle']\n except Exception as e:\n print(e)\n logger.error(e, exc_info=True)\n pass\n\n if not self.is_minimal or return_to_full:\n self.toogle_minimal(memory)\n else:\n m.memory = memory\n self.update_info_minimal()\n\n def make_cycle(self):\n m.memory.update(refresh=True)\n if self.cycle % c.config['random_rate'] == 0 and c.config['use_mutations']:\n m.memory.cycle()\n if self.cycle % c.config['cycle_gap'] == 0:\n if m.memory.is_time_to_kill():\n q.queue.kill_organisms()\n self.purges += 1\n if not self.is_minimal:\n q.queue.update_all()\n self.cycle += 1\n self.update_info()\n\n @staticmethod\n def calculate_entropy(distribution, num_commands):\n entropy = 0\n for key in distribution:\n p = distribution[key]\n log_p = math.log(p, num_commands)\n entropy -= p * log_p\n return entropy\n\n def get_commands_distribution(self) -> Dict:\n organisms_commands = []\n for organism in q.queue.organisms:\n organism_commands = self.get_organism_commands(\n organism.start,\n organism.size\n )\n\n organisms_commands.append(organism_commands.flatten())\n try:\n organisms_commands = np.concatenate(organisms_commands)\n commands, counts = np.unique(organisms_commands, return_counts=True)\n command_counts = dict(zip(commands, counts))\n return command_counts\n except ValueError:\n logger.info(f'{organisms_commands}')\n raise ValueError\n\n def get_organism_sizes(self):\n sizes = []\n for organism in q.queue.organisms:\n sizes.append(str(organism.size))\n sizes = np.array(sizes)\n\n return np.unique(sizes, return_counts=True)\n\n def get_entropy_score(self):\n max_table_size = [max(q.queue.organisms, key=lambda x: x.size[0]).size[0],\n max(q.queue.organisms, key=lambda x: x.size[1]).size[1]]\n\n organisms_commands = []\n\n # Getting command tables\n for organism in q.queue.organisms:\n organisms_commands.append(self.get_organism_commands(\n organism.start,\n organism.size\n ))\n\n # Getting frequencies\n values_distributions = [[0 for j in range(max_table_size[1])] for i in range(max_table_size[0])]\n for i in range(max_table_size[0]):\n for j in range(max_table_size[1]):\n values = []\n for commands in organisms_commands:\n if i < commands.shape[0] and j < commands.shape[1]:\n values.append(commands[i][j])\n values = {x: values.count(x) / len(values) for x in values}\n values_distributions[i][j] = values\n\n per_site_entropy = np.zeros(max_table_size)\n for i in range(max_table_size[0]):\n for j in range(max_table_size[1]):\n per_site_entropy[i, j] = self.calculate_entropy(values_distributions[i][j], len(c.instructions))\n\n self.information_per_site_tables = 1 - np.array(per_site_entropy)\n return np.sum(per_site_entropy)\n # total_entropy = 0\n # information_tables = []\n # for organism_commands in organisms_commands:\n # entropy = 0\n # entropy_table = np.zeros(organism_commands.shape)\n # max_entropy_per_site = math.log(len(c.instructions), len(c.instructions))\n # information_per_site = max_entropy_per_site - entropy_table\n # for i in range(organism_commands.shape[0]):\n # for j in range(organism_commands.shape[1]):\n # p = values_distributions[i][j][organism_commands[i][j]]\n # entropy -= p * math.log(\n # p, len(c.instructions)\n # )\n # entropy_table[i, j] = -p * math.log(\n # p, len(c.instructions)\n # )\n #\n # total_entropy += entropy\n # information_tables.append(entropy_table)\n # information_tables = np.array(information_tables)\n # self.information_per_site_tables = information_tables\n # return total_entropy\n\n @staticmethod\n def get_organism_commands(start, size):\n return m.memory.memory_map[\n start[0]: start[0] + size[0],\n start[1]: start[1] + size[1],\n ]\n\n def input_stream(self):\n while True:\n key = c.screen.get_key()\n if key == ord(' '):\n c.is_running = not c.is_running\n if self.is_minimal:\n self.update_info_minimal()\n elif key == ord('c') and not c.is_running:\n q.queue.cycle_all()\n self.make_cycle()\n elif key == curses.KEY_DOWN and not self.is_minimal:\n self.update_position(c.config['scroll_step'] * c.deltas['down'])\n elif key == curses.KEY_UP and not self.is_minimal:\n self.update_position(c.config['scroll_step'] * c.deltas['up'])\n elif key == curses.KEY_RIGHT and not self.is_minimal:\n self.update_position(c.config['scroll_step'] * c.deltas['right'])\n elif key == curses.KEY_LEFT and not self.is_minimal:\n self.update_position(c.config['scroll_step'] * c.deltas['left'])\n elif key == ord('d') and not self.is_minimal:\n q.queue.select_next()\n self.update_info()\n elif key == ord('a') and not self.is_minimal:\n q.queue.select_previous()\n self.update_info()\n elif key == ord('m'):\n self.toogle_minimal()\n elif key == ord('p'):\n self.save_state()\n elif key == ord('l'):\n self.load_state()\n elif key == ord('k'):\n q.queue.kill_organisms()\n elif key == -1 and c.is_running:\n q.queue.cycle_all()\n self.make_cycle()\n elif len(q.queue.organisms) == 0:\n break\n\n\nif __name__ == '__main__':\n c.is_running = False\n Fungera().run()\n","sub_path":"fungera.py","file_name":"fungera.py","file_ext":"py","file_size_in_byte":13204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"143943063","text":"# -*- coding: utf-8 -*-\nfrom openprocurement.auctions.core.utils import (\n json_view,\n context_unpack,\n opresource\n)\nfrom openprocurement.auctions.core.validation import (\n validate_file_update,\n validate_file_upload,\n validate_patch_document_data\n)\nfrom openprocurement.auctions.core.views.mixins import AuctionDocumentResource\n\nfrom openprocurement.auctions.core.interfaces import (\n IManager\n)\n\nfrom openprocurement.auctions.core.utils import (\n get_file\n)\n\n\n@opresource(name='geb:Auction Documents',\n collection_path='/auctions/{auction_id}/documents',\n path='/auctions/{auction_id}/documents/{document_id}',\n auctionsprocurementMethodType=\"geb\",\n description=\"Auction related binary files (PDFs, etc.)\")\nclass AuctionDocumentResource(AuctionDocumentResource):\n\n @json_view(permission='upload_auction_documents', validators=(validate_file_upload,))\n def collection_post(self):\n \"\"\"Auction Document Upload\"\"\"\n save = None\n\n manager = self.request.registry.queryMultiAdapter((self.request, self.context), IManager)\n\n applicant = self.request.validated.get('document', self.request.validated.get('file'))\n document = manager.create(applicant)\n\n if document:\n save = manager.save()\n\n if save:\n msg = 'Created auction document {}'.format(document.id)\n extra = context_unpack(self.request, {'MESSAGE_ID': 'auction_document_create'}, {'document_id': document['id']})\n self.LOGGER.info(msg, extra=extra)\n\n self.request.response.status = 201\n\n route = self.request.matched_route.name.replace(\"collection_\", \"\")\n locations = self.request.current_route_url(_route_name=route, document_id=document.id, _query={})\n self.request.response.headers['Location'] = locations\n return {'data': document.serialize(\"view\")}\n\n @json_view(permission='view_auction')\n def get(self):\n \"\"\"Auction Document Read\"\"\" # TODO rm black box\n document = self.request.validated['document']\n offline = bool(document.get('documentType') == 'x_dgfAssetFamiliarization')\n if self.request.params.get('download') and not offline:\n return get_file(self.request)\n document_data = document.serialize(\"view\")\n document_data['previousVersions'] = [\n i.serialize(\"view\")\n for i in self.request.validated['documents']\n if i.url != document.url or\n (offline and i.dateModified != document.dateModified)\n ]\n return {'data': document_data}\n\n @json_view(content_type=\"application/json\", permission='upload_auction_documents', validators=(validate_patch_document_data,))\n def patch(self):\n \"\"\"Auction Document Update\"\"\"\n save = None\n\n manager = self.request.registry.queryMultiAdapter((self.request, self.context), IManager)\n\n manager.change()\n save = manager.save()\n\n if save:\n extra = context_unpack(self.request, {'MESSAGE_ID': 'auction_document_patch'})\n msg = 'Updated auction document {}'.format(self.request.context.id)\n self.LOGGER.info(msg, extra=extra)\n return {'data': self.request.context.serialize(\"view\")}\n\n @json_view(permission='upload_auction_documents', validators=(validate_file_update,))\n def put(self):\n save = None\n\n manager = self.request.registry.queryMultiAdapter((self.request, self.context), IManager)\n\n document = manager.put()\n save = manager.save()\n\n if save:\n extra = context_unpack(self.request, {'MESSAGE_ID': 'auction_document_put'})\n msg = 'Updated auction document {}'.format(document.id)\n self.LOGGER.info(msg, extra=extra)\n return {'data': document.serialize(\"view\")}\n","sub_path":"openprocurement/auctions/geb/views/procedure_document.py","file_name":"procedure_document.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"59603338","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Apr 7 12:30:34 2019\n\n@author: Ismail Bourbie\n\"\"\"\n\ndef merge(left, right):\n result = []\n i, j = 0, 0\n \n ## comapre the smallest elemnt of the left list with the smallest element of the right list\n while i < len(left) and j < len(right):\n if left[i] < right[j]:\n result.append(left[i])\n i += 1\n else:\n result.append(right[j])\n j += 1\n \n \n ## if the right list is empty, append the left to result\n while (i < len(left)):\n result.append(left[i])\n i += 1\n \n ## if the left list is empty, append the right to result\n while (j < len(right)):\n result.append(right[j])\n j += 1\n return result\n\n\ndef merge_sort(L):\n if len(L) < 2:\n return L[:]\n else:\n middle = len(L)//2\n left = merge_sort(L[:middle])\n right = merge_sort(L[middle:]) \n return merge(left, right)\n \ntestList = [1, 4, 6, 3, 0, 2, 8, 7, 9, 5]\n\nprint(merge_sort(testList))","sub_path":"python files/courses/merge_sort.py","file_name":"merge_sort.py","file_ext":"py","file_size_in_byte":1056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"186108113","text":"# -*- coding: utf-8 -*-\n\nimport sys, urllib, clr\nclr.AddReference('System')\nclr.AddReference('System.Windows')\nclr.AddReference('System.Windows.Forms')\nclr.AddReference('System.Drawing')\nclr.AddReference('RevitAPI')\nclr.AddReference('RevitAPIUI')\n\nfrom System.Windows.Forms import MessageBox, ProgressBar, Form\nfrom System.Windows.Forms import Application, Form, ProgressBar\n# from System.Threading import ThreadStart, Thread\n# from IronPython.Runtime.Calls import CallTarget0\n\nfrom Autodesk.Revit.DB import StorageType, ParameterType\nfrom RB_print_class import RB_print\n\nPRINTER = RB_print()\n\n#Преобразует объект в строку\ndef to_str(obj):\n if isinstance(obj, str):\n return obj\n else:\n return str(obj)\n\n#Выдает сообщение\ndef echo(*arg):\n if arg:\n string = ''\n for i in arg:\n if isinstance(i, dict):\n for k in i.keys():\n string += to_str(k) + ' : ' + to_str(i[k]) + '\\r\\n'\n elif isinstance(i, list):\n for k in i:\n string += to_str(k) + '\\r\\n'\n else:\n string += to_str(i)\n PRINTER.Show(string)\n # print(to_str(i))\n\ndef echo_arr(*arg):\n if arg:\n string = ''\n for i in arg:\n string = to_str(i) + '\\n\\r'\n PRINTER.Show(to_str(string))\n # print(to_str(i))\n\ndef echo_close():\n PRINTER.Close()\n\n\n#возвращает родительскую папку заданной вложенности\n\n\ndef echo_dir(el):\n line = ''\n for i in el:\n line += to_str(i) + '\\n\\r'\n echo(line)\n\ndef to_feet(mm):\n return mm*0.00328084\n\ndef to_mm(mm):\n return round(mm/0.00328084, 3)\n\ndef get_parameter(el, parameter_name, is_dict=False, for_set=False):\n is_mm_list = [ParameterType.Length, ParameterType.ReinforcementLength, ParameterType.BarDiameter]\n all_parameters = list(el.Parameters)\n val_to_ret = None\n if el.GetTypeId():\n all_parameters += list(el.Symbol.Parameters)\n all_parameters = {i.Definition.Name: i for i in all_parameters}\n if parameter_name in all_parameters.keys():\n param = all_parameters[parameter_name]\n if for_set:\n return param\n if param.StorageType == StorageType.Double:\n if param.Definition.ParameterType in is_mm_list:\n val_to_ret = to_mm(param.AsDouble())\n else:\n val_to_ret = param.AsDouble()\n if param.StorageType == StorageType.Integer:\n if param.Definition.ParameterType == ParameterType.YesNo:\n val_to_ret = bool(param.AsInteger())\n else:\n val_to_ret = param.AsInteger()\n if param.StorageType == StorageType.String:\n val_to_ret = param.AsString()\n if is_dict:\n return {parameter_name: val_to_ret}\n return val_to_ret\n \ndef message(text):\n text = to_str(text)\n MessageBox.Show(text, 'RedBim')\n\nclass RB_Parameter_mixin:\n\n def __init__(self):\n super(RB_Parameter_mixin, self).__init__()\n\n @classmethod\n def get_parameter(cls, element, parameter):\n if not hasattr(cls, \"_rb_parameters\"):\n cls._rb_parameters = {}\n cls._rb_parameters.setdefault(element.Id.IntegerValue, {})\n cls._rb_parameters[element.Id.IntegerValue].setdefault(parameter, None)\n if cls._rb_parameters[element.Id.IntegerValue][parameter] is None:\n par = element.LookupParameter(parameter)\n if not par and element.GetTypeId():\n par = element.Symbol.LookupParameter(parameter)\n if par:\n cls._rb_parameters[element.Id.IntegerValue][parameter] = par\n return cls._rb_parameters[element.Id.IntegerValue][parameter]\n\n\n\n__all__ = ['echo', 'echo_dir', 'to_str', 'echo_close', 'to_feet', 'to_mm', 'get_parameter', 'message', \"RB_Parameter_mixin\"]\n","sub_path":"common_scripts/system_scripts.py","file_name":"system_scripts.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"331854004","text":"\"\"\"\nModule to interface model pipeline to model tracker service.\n\"\"\"\n\nimport os\nimport subprocess\nimport tempfile\nimport sys\nimport pandas as pd\nimport json\nimport tarfile\nimport logging\n\nlogger = logging.getLogger('ATOM')\n\nfrom atomsci.ddm.utils import datastore_functions as dsf\n\nmlmt_supported = True\ntry:\n from atomsci.clients import MLMTClient\nexcept (ModuleNotFoundError, ImportError):\n logger.debug(\"Model tracker client not supported in your environment; will save models in filesystem only.\")\n mlmt_supported = False\n\nclass UnableToTarException(Exception):\n pass\n\nclass DatastoreInsertionException(Exception):\n pass\nclass MLMTClientInstantiationException(Exception):\n pass\n\n# *********************************************************************************************************************************\ndef save_model(pipeline, collection_name='model_tracker', log=True):\n \"\"\"Save the model.\n\n Save the model files to the datastore and save the model metadata dict to the Mongo database.\n\n Args:\n pipeline (ModelPipeline object): the pipeline to use\n collection_name (str): the name of the Mongo DB collection to use\n log (bool): True if logs should be printed, default False\n use_personal_client (bool): True if personal client should be used (i.e. for testing), default False\n\n Returns:\n None if insertion was successful, raises UnableToTarException, DatastoreInsertionException, MLMTClientInstantiationException\n or MongoInsertionException otherwise\n \"\"\"\n \n if pipeline is None:\n raise Exception('pipeline cannot be None.')\n\n if not mlmt_supported:\n print(\"Model tracker not supported in your environment; can save models in filesystem only.\")\n return\n\n # ModelPipeline.create_model_metadata() should be called before the call to save_model.\n # Get the metadata dictionary from the model pipeline.\n metadata_dict = pipeline.model_metadata\n model_uuid = metadata_dict['model_uuid']\n if model_uuid is None:\n raise ValueError(\"model_uuid is missing from pipeline metadata.\")\n\n #### Part 1: Save the model tarball ####\n model = pipeline.model_wrapper\n # best_model_dir is an absolute path.\n directory_to_tar = model.best_model_dir\n # Put tar file in a temporary directory that will automatically be destroyed when we're done\n with tempfile.TemporaryDirectory() as tmp_dir:\n tar_file = os.path.join(tmp_dir, 'model_{model_uuid}.tar.gz'.format(model_uuid=model_uuid))\n tar_flags = 'czf'\n # Change directory to model_dir so that paths in tarball are relative to model_dir.\n tar_command = 'tar -{tar_flags} {tar_file} -C {directory_to_tar} .'.format(tar_flags=tar_flags, tar_file=tar_file,\n directory_to_tar=directory_to_tar)\n try:\n subprocess.check_output(tar_command.split())\n except subprocess.CalledProcessError as e:\n pipeline.log.error('Command to create model tarball returned status {return_code}'.format(return_code=e.returncode))\n pipeline.log.error('Command was: \"{cmd}\"'.format(cmd=e.cmd))\n pipeline.log.error('Output was: \"{output}\"'.format(output=e.output))\n pipeline.log.error('stderr was: \"{stderr}\"'.format(stderr=e.stderr))\n raise UnableToTarException('Unable to tar {directory_to_tar}.'.format(directory_to_tar=directory_to_tar))\n title = '{model_uuid} model tarball'.format(model_uuid=model_uuid)\n uploaded_results = dsf.upload_file_to_DS(\n bucket=pipeline.params.model_bucket, title=title, description=title, tags=[],\n key_values={'model_uuid' : model_uuid, 'file_category': 'ml_model'}, filepath=tmp_dir,\n filename=tar_file, dataset_key='model_' + model_uuid + '_tarball', client=pipeline.ds_client,\n return_metadata=True)\n if uploaded_results is None:\n raise DatastoreInsertionException('Unable to upload title={title} to datastore.'.format(title=title))\n # Get the dataset_oid for actual metadata file stored in datastore.\n model_dataset_oid = uploaded_results['dataset_oid']\n # By adding dataset_oid to the dict, we can immediately find the datastore file asssociated with a model.\n metadata_dict['model_parameters']['model_dataset_oid'] = model_dataset_oid\n\n\n #### Part 2: Save the model metadata ####\n mlmt_client = dsf.initialize_model_tracker()\n mlmt_client.save_metadata(collection_name=collection_name,\n model_uuid=metadata_dict['model_uuid'],\n model_metadata=metadata_dict)\n if log:\n print('Successfully inserted into the database with model_uuid %s.' % model_uuid)\n\n# *********************************************************************************************************************************\ndef get_full_metadata(filter_dict, collection_name=None):\n \"\"\"Retrieve relevant full metadata (including training run metrics) of models matching given criteria.\n\n Args:\n filter_dict (dict): dictionary to filter on\n\n collection_name (str): Name of collection to search\n\n Returns:\n A list of matching full model metadata (including training run metrics) dictionaries. Raises MongoQueryException if the query fails.\n \"\"\"\n if not mlmt_supported:\n print(\"Model tracker not supported in your environment; can load models from filesystem only.\")\n return None\n\n if filter_dict is None:\n raise ValueError('Parameter filter_dict cannot be None.')\n if collection_name is None:\n raise ValueError('Parameter collection_name cannot be None.')\n mlmt_client = dsf.initialize_model_tracker()\n\n query_params = {\n \"match_metadata\": filter_dict,\n }\n\n metadata_list = mlmt_client.model.query_model_metadata(\n collection_name=collection_name,\n query_params=query_params\n ).result()\n return list(metadata_list)\n\n# *********************************************************************************************************************************\ndef get_metadata_by_uuid(model_uuid, collection_name=None):\n \"\"\"Retrieve model parameter metadata by model_uuid. The resulting metadata dictionary can\n be passed to parameter_parser.wrapper(); it does not contain performance metrics or\n training dataset metadata.\n\n Args:\n model_uuid (str): model unique identifier\n collection_name(str): collection to search (optional, searches all collections if not specified)\n Returns:\n Matching metadata dictionary. Raises MongoQueryException if the query fails.\n \"\"\"\n\n if not mlmt_supported:\n print(\"Model tracker not supported in your environment; can load models from filesystem only.\")\n return None\n\n mlmt_client = dsf.initialize_model_tracker()\n\n if collection_name is None:\n collection_name = get_model_collection_by_uuid(model_uuid, mlmt_client=mlmt_client)\n\n exclude_fields = [\n \"training_metrics\",\n \"time_built\",\n \"training_dataset.dataset_metadata\"\n ]\n return mlmt_client.get_model(collection_name=collection_name, model_uuid=model_uuid,\n exclude_fields=exclude_fields)\n\n# *********************************************************************************************************************************\ndef get_full_metadata_by_uuid(model_uuid, collection_name=None):\n \"\"\"Retrieve model parameter metadata for the given model_uuid and collection.\n The returned metadata dictionary will include training run performance metrics and\n training dataset metadata.\n\n Args:\n model_uuid (str): model unique identifier\n collection_name(str): collection to search (optional, searches all collections if not specified)\n Returns:\n Matching metadata dictionary. Raises MongoQueryException if the query fails.\n \"\"\"\n\n if not mlmt_supported:\n print(\"Model tracker not supported in your environment; can load models from filesystem only.\")\n return None\n\n mlmt_client = dsf.initialize_model_tracker()\n\n if collection_name is None:\n collection_name = get_model_collection_by_uuid(model_uuid, mlmt_client=mlmt_client)\n\n return mlmt_client.get_model(collection_name=collection_name, model_uuid=model_uuid)\n\n# *********************************************************************************************************************************\ndef get_model_collection_by_uuid(model_uuid, mlmt_client=None):\n \"\"\"Retrieve model collection given a uuid.\n\n Args:\n model_uuid (str): model uuid\n\n mlmt_client: Ignored\n Returns:\n Matching collection name\n Raises:\n ValueError if there is no collection containing a model with the given uuid.\n \"\"\"\n\n if not mlmt_supported:\n print(\"Model tracker not supported in your environment; can load models from filesystem only.\")\n return None\n\n mlmt_client = dsf.initialize_model_tracker()\n\n collections = mlmt_client.collections.get_collection_names().result()\n for col in collections:\n if not col.startswith('old_'):\n if mlmt_client.count_models(collection_name=col, model_uuid=model_uuid) > 0:\n return col\n\n raise ValueError('Collection not found for uuid: ' + model_uuid)\n\n# *********************************************************************************************************************************\ndef get_model_training_data_by_uuid(uuid):\n \"\"\"Retrieve data used to train, validate, and test a model given the uuid\n\n Args:\n uuid (str): model uuid\n Returns:\n a tuple of datafraes containint training data, validation data, and test data including the compound ID, RDKIT SMILES, and response value\n \"\"\"\n if not mlmt_supported:\n print(\"Model tracker not supported in your environment; can load models from filesystem only.\")\n return None\n\n model_meta = get_metadata_by_uuid(uuid)\n response_col = model_meta['training_dataset']['response_cols']\n smiles_col = model_meta['training_dataset']['smiles_col']\n full_data = dsf.retrieve_dataset_by_dataset_oid(model_meta['training_dataset']['dataset_oid'], verbose=False)\n\n # Pull split data and merge into initial dataset\n split_meta = dsf.search_datasets_by_key_value('split_dataset_uuid', model_meta['splitting_parameters']['Splitting']['split_uuid'])\n split_oid = split_meta['dataset_oid'].values[0]\n split_data = dsf.retrieve_dataset_by_dataset_oid(split_oid, verbose=False)\n split_data['compound_id'] = split_data['cmpd_id']\n split_data = split_data.drop(columns=['cmpd_id'])\n full_data = pd.merge(full_data, split_data, how='inner', on=['compound_id'])\n\n train_data = full_data[full_data['subset'] == 'train'][['compound_id',smiles_col,*response_col]].reset_index(drop=True)\n valid_data = full_data[full_data['subset'] == 'valid'][['compound_id',smiles_col,*response_col]].reset_index(drop=True)\n test_data = full_data[full_data['subset'] == 'test'][['compound_id',smiles_col,*response_col]].reset_index(drop=True)\n\n return train_data, valid_data, test_data\n\n\n\n# *********************************************************************************************************************************\ndef export_model(model_uuid, collection, model_dir):\n \"\"\"\n Export the metadata (parameters) and other files needed to recreate a model\n from the model tracker database to a gzipped tar archive.\n\n Args:\n model_uuid (str): Model unique identifier\n\n collection (str): Name of the collection holding the model in the database.\n\n model_dir (str): Path to directory where the model metadata and parameter files will be written. The directory will\n be created if it doesn't already exist. Subsequently, the directory contents will be packed into a gzipped tar archive\n named model_dir.tar.gz.\n\n Returns:\n none\n \"\"\"\n if not mlmt_supported:\n print(\"Model tracker not supported in your environment; can load models from filesystem only.\")\n return\n\n ds_client = dsf.config_client()\n metadata_dict = get_metadata_by_uuid(model_uuid, collection_name=collection)\n\n # Get the tarball containing the saved model from the datastore, and extract it into model_dir.\n if 'ModelMetadata' in metadata_dict:\n # Convert old style metadata\n metadata_dict = convert_metadata(metadata_dict)\n\n if 'model_parameters' in metadata_dict:\n model_parameters = metadata_dict['model_parameters']\n model_dataset_oid = model_parameters['model_dataset_oid']\n else:\n raise Exception(\"Bad metadata for model UUID %s\" % model_uuid)\n\n os.makedirs(model_dir, exist_ok=True)\n\n # Unpack the model state tarball into a subdirectory of the new archive\n extract_dir = dsf.retrieve_dataset_by_dataset_oid(model_dataset_oid, client=ds_client, return_metadata=False,\n nrows=None, print_metadata=False, sep=False,\n tarpath='%s/best_model' % model_dir)\n\n # Download the transformers pickle file if there is one\n try:\n transformer_oid = model_parameters[\"transformer_oid\"]\n trans_fp = ds_client.open_dataset(transformer_oid, mode='b')\n trans_data = trans_fp.read()\n trans_fp.close()\n trans_path = \"%s/transformers.pkl\" % model_dir\n trans_out = open(trans_path, mode='wb')\n trans_out.write(trans_data)\n trans_out.close()\n del model_parameters['transformer_oid']\n model_parameters['transformer_key'] = 'transformers.pkl'\n\n except KeyError:\n # OK if there are no transformers\n pass\n\n # Save the metadata params\n meta_path = \"%s/model_metadata.json\" % model_dir\n with open(meta_path, 'w') as meta_out:\n json.dump(metadata_dict, meta_out, indent=4)\n\n # Create a new tarball containing both the metadata and the parameters from the retrieved model tarball\n new_tarpath = \"%s.tar.gz\" % model_dir\n tarball = tarfile.open(new_tarpath, mode='w:gz')\n tarball.add(model_dir, arcname='.')\n tarball.close()\n print(\"Wrote model files to %s\" % new_tarpath)\n\n\n# *********************************************************************************************************************************\ndef convert_metadata(old_metadata):\n \"\"\"\n Convert model metadata from old format (with camel-case parameter group names) to new format.\n\n Args:\n old_metadata (dict): Model metadata in old format\n\n Returns:\n new_metadata (dict): Model metadata in new format\n \"\"\"\n\n model_metadata = old_metadata['ModelMetadata']\n model_parameters = model_metadata['ModelParameters']\n training_dataset = model_metadata['TrainingDataset'].copy()\n new_metadata = {\n \"model_uuid\": old_metadata['model_uuid'],\n \"time_built\": old_metadata['time_built'],\n \"training_dataset\": training_dataset,\n \"training_metrics\": []\n }\n\n map_keys = [\n (\"external_export_parameters\", \"ExternalExportParameters\"),\n (\"dataset_metadata\", \"DatasetMetadata\"),\n ]\n\n for (nkey, okey) in map_keys:\n value = training_dataset.pop(okey, None)\n if value is not None:\n training_dataset[nkey] = value\n\n map_keys = [\n (\"model_parameters\", 'ModelParameters'),\n (\"ecfp_specific\", 'ECFPSpecific'),\n (\"rf_specific\", 'RFSpecific'),\n (\"autoencoder_specific\", 'AutoencoderSpecific'),\n (\"descriptor_specific\", 'DescriptorSpecific'),\n (\"nn_specific\", \"NNSpecific\"),\n (\"xgb_specific\", \"xgbSpecific\"),\n (\"umap_specific\", \"UmapSpecific\"),\n\n ]\n for (nkey, okey) in map_keys:\n value = model_metadata.get(okey)\n if value is not None:\n new_metadata[nkey] = value\n\n # Get rid of useless extra level in split params\n split_params = model_metadata.get('SplittingParameters')\n if split_params is not None:\n splitting = split_params.get('Splitting')\n if splitting is not None:\n new_metadata['splitting_parameters'] = splitting\n\n return new_metadata\n","sub_path":"atomsci/ddm/pipeline/model_tracker.py","file_name":"model_tracker.py","file_ext":"py","file_size_in_byte":16293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"98619223","text":"\nimport sys\nimport os\n\n# Thirdparty imports\nimport wx\ntry:\n import win32gui\nexcept ImportError:\n win32gui = None\n\n# Package imports\nfrom observer import Event\n\n#----------------------------------------------------------------------\n#\n# Config data\n#\n\nclass ConfigManager:\n\n _instance = None\n\n @staticmethod\n def create(app_name, org_name):\n \"\"\"Factory method to create the singleton ConfigManager\"\"\"\n if ConfigManager._instance and ConfigManager._instance.app_name == app_name:\n return ConfigManager._instance\n ConfigManager._instance = ConfigManager(app_name, org_name)\n return ConfigManager._instance\n\n @staticmethod\n def reset():\n \"\"\"This will reset the config manager instance and caller has to again call create with proper app_name\"\"\"\n ConfigManager._instance = None\n \n @staticmethod\n def singleton():\n if ConfigManager._instance:\n return ConfigManager._instance\n else:\n return ConfigManager.create(\"test\", \"Unknown\") # this eliminates the need to setup the ConfigManager ahead of time for unit tests\n \n def __init__(self, app_name, org_name):\n self.app_name = app_name\n self.cfg = wx.Config(app_name, org_name)\n self.clear_config_state = Event(doc=\"\"\"\nTODO\"\"\")\n self.save_config_state = Event(doc=\"\"\"\nTODO\"\"\")\n\n @staticmethod\n def __write(cfg, path, key, val, kind):\n key = path + '/' + key\n if kind == 1:\n #print \"Write(%s, '%s')\" % (key, val)\n cfg.Write(key, val)\n elif kind == 2:\n #print \"WriteBool(%s, %s)\" % (key, val)\n cfg.WriteBool(key, val)\n elif kind == 3:\n #print \"WriteInt(%s, %d)\" % (key, val)\n cfg.WriteInt(key, val)\n elif kind == 4:\n #print \"WriteFloat(%s, %g)\" % (key, val)\n cfg.WriteFloat(key, val)\n\n def __migrate(self, src):\n (cont, key, idx) = src.GetFirstEntry()\n path = src.GetPath()\n ConfigManager.__write(self.cfg, path, key, src.Read(key), src.GetEntryType(key))\n while cont:\n (cont, key, idx) = src.GetNextEntry(idx)\n if -1 != idx:\n ConfigManager.__write(self.cfg, path, key, src.Read(key), src.GetEntryType(key))\n groups = []\n (cont, group, idx) = src.GetFirstGroup()\n if -1 != idx:\n groups.append(group)\n while cont:\n (cont, group, idx) = src.GetNextGroup(idx)\n if -1 != idx:\n groups.append(group)\n for group in groups:\n src.SetPath(group)\n self.__migrate(src)\n src.SetPath(path)\n\n def get_wx_config(self):\n return self.cfg\n\n def save_dialog_position(self, dialog_name, rect):\n path = \"DialogState/\" + dialog_name + \"/Position/\"\n self.cfg.Write(path + \"X\", unicode(rect[0]))\n self.cfg.Write(path + \"Y\", unicode(rect[1]))\n self.cfg.Write(path + \"W\", unicode(rect[2]))\n self.cfg.Write(path + \"H\", unicode(rect[3]))\n\n def save_dialog_zoom(self, dialog_name, max):\n path = \"DialogState/\" + dialog_name + \"/Position/\"\n self.cfg.Write(path + \"Max\", unicode(bool(max)))\n\n def get_dialog_position(self, dialog_name, default=None):\n path = \"DialogState/\" + dialog_name + \"/Position/\"\n if not default:\n default = (10, 10, 300, 300)\n x = int(self.cfg.Read(path + \"X\", unicode(default[0])))\n y = int(self.cfg.Read(path + \"Y\", unicode(default[1])))\n w = int(self.cfg.Read(path + \"W\", unicode(default[2])))\n h = int(self.cfg.Read(path + \"H\", unicode(default[3])))\n return [x, y, w, h]\n\n def get_dialog_zoom(self, dialog_name):\n path = \"DialogState/\" + dialog_name + \"/Position/\"\n m = self.cfg.Read(path + \"Max\", u\"False\") == \"True\"\n return m\n\n def get_extra(self, name, dialog_name, default=\"\"):\n path = \"DialogState/\" + dialog_name + \"/\" + name\n return self.cfg.Read(path, default)\n\n def get_extra_boolean(self, name, dialog_name, default=False):\n if default:\n default_str = u\"1\"\n else:\n default_str = u\"0\"\n path = \"DialogState/\" + dialog_name + \"/\" + name\n return not (self.cfg.Read(path, default_str) == u\"0\")\n\n def get_extra_integer(self, name, dialog_name, default=0):\n default_str = str(default)\n return int(self.get_extra(name, dialog_name, default_str))\n\n def save_extra(self, name, value, dialog_name):\n path = \"DialogState/\" + dialog_name + \"/\" + name\n self.cfg.Write(path, unicode(value))\n\n def save_extra_boolean(self, name, value, dialog_name):\n assert value == True or value == False\n if value == True:\n value_str = \"1\"\n else:\n value_str = \"0\" \n path = \"DialogState/\" + dialog_name + \"/\" + name\n self.cfg.Write(path, unicode(value_str))\n\n def save_extra_integer(self, name, value, dialog_name):\n assert type(value) == type(0)\n self.save_extra(name, unicode(value), dialog_name)\n \n def clear_all(self):\n return self.cfg.DeleteAll()\n\n def clear_dialog(self, dialog_name):\n self.cfg.DeleteGroup(\"DialogState/\" + dialog_name)\n\n def get_next_user_number(self, name):\n name = \"Counter/\" + name\n counter = int(self.cfg.Read(name, u\"0\"))\n counter += 1\n self.cfg.Write(name, unicode(counter))\n return counter\n\n def flush(self):\n self.cfg.Flush()\n\n#----------------------------------------------------------\n\n\nclass DialogPositionMixin:\n def __init__(self, dialog_name, default_dimension=(100, 100, 300, 300), extra_names=[]):\n self.dialog_name = dialog_name\n self.default_dimension = default_dimension\n self.extra_names = extra_names\n self.__remember()\n wx.CallAfter(self.set_sizes)\n wx.CallAfter(self.reg_events)\n # event registrations need to happen after the call to set_sizes\n # because the MOVE event is raised immediately, which would overwrite \n # the saved window position before it has been set\n\n def reg_events(self):\n wx.EVT_MAXIMIZE(self, self.OnMaximise)\n wx.EVT_ICONIZE(self, self.OnMinimise)\n wx.EVT_MOVE(self, self.OnMove)\n wx.EVT_SIZE(self, self.OnSize)\n \n def set_sizes(self):\n x, y, w, h = ConfigManager.singleton().get_dialog_position(self.dialog_name, self.default_dimension)\n if win32gui:\n desktop = win32gui.GetDesktopWindow()\n (dleft,dtop,dwidth,dheight) = win32gui.GetWindowRect(desktop)\n else:\n dwidth, dheight = wx.DisplaySize()\n if ((x > dwidth) or (y > dheight)):\n x = (dwidth - w) / 2\n y = (dheight - h) / 2\n self.SetDimensions(x, y, w, h)\n self.Maximize(ConfigManager.singleton().get_dialog_zoom(self.dialog_name))\n\n def OnSaveConfig(self, ignore=None):\n self.__save_dialog_position()\n\n def OnClearPreferences(self, ignore=None):\n self.SetDimensions(self.default_dimension[0],\n self.default_dimension[1],\n self.default_dimension[2],\n self.default_dimension[3])\n ConfigManager.singleton().save_dialog_position(self.dialog_name, self.default_dimension)\n\n def OnMaximise(self, event):\n self.__save_dialog_position()\n event.Skip()\n\n def OnMinimise(self, event):\n if event.Iconized():\n # we're being minimized.\n self.__save_dialog_position()\n event.Skip()\n \n def OnMove(self, event):\n self.__save_dialog_position()\n event.Skip()\n \n def OnSize(self, event):\n self.__save_dialog_position()\n event.Skip()\n\n def __save_dialog_position(self):\n # don't save the minimized or maximised position \n if not self.IsMaximized() and not self.IsIconized(): \n ConfigManager.singleton().save_dialog_position(self.dialog_name, self.GetRect())\n ConfigManager.singleton().save_dialog_zoom(self.dialog_name, self.IsMaximized())\n\n # Use these method to get and save other dialog state, e.g. splitter or list column positions.\n # Note that the value and the default must be strings - that is all that the wx.Config can save.\n # In your derived class, override the OnSaveConfig to save these extra values.\n def get_extra(self, name, default=\"\"):\n return ConfigManager.singleton().get_extra(name, self.dialog_name, default)\n\n def save_extra(self, name, value):\n ConfigManager.singleton().save_extra(name, value, self.dialog_name)\n\n def __remember(self):\n ConfigManager.singleton().save_config_state.add_observer(self.OnSaveConfig)\n ConfigManager.singleton().clear_config_state.add_observer(self.OnClearPreferences)\n\n def forget(self):\n ConfigManager.singleton().save_config_state.remove_observer(self.OnSaveConfig)\n ConfigManager.singleton().clear_config_state.remove_observer(self.OnClearPreferences)\n\n#\n# -----------------------------------------------------------\n#\n\ndef top_level_parent(dlg):\n parent = dlg.GetParent()\n if parent:\n while True:\n if parent.IsTopLevel():\n break\n parent = parent.GetParent()\n return parent\n\ndef centre_and_run_modal_dialog(dlg):\n parent = top_level_parent(dlg)\n if parent:\n px, py = parent.GetPositionTuple()\n pw, ph = parent.GetSizeTuple()\n w, h = dlg.GetSizeTuple()\n x = (pw - w) / 2 + px\n y = (ph - h) / 2 + py\n if x < 10:\n x = 10\n if y < 10:\n y = 10\n screen_x = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_X)\n screen_y = wx.SystemSettings.GetMetric(wx.SYS_SCREEN_Y) \n if x > screen_x - 5 or y > screen_y - 5:\n x = (screen_x - w) / 2\n y = (screen_y - h) /2\n dlg.SetPosition((x, y))\n result = dlg.ShowModal()\n dlg.Destroy()\n return result\n\n\n#----------------------------------------------------------------------\n\nclass TextEntryDialog(wx.TextEntryDialog):\n def get(parent, message, title):\n dlg = TextEntryDialog(parent, message, title)\n result = None\n if dlg.ShowModal() == wx.ID_OK:\n result = dlg.GetValue()\n dlg.Destroy()\n return result\n get = staticmethod(get)\n\n def __init__(self, parent, message, title):\n wx.TextEntryDialog.__init__(self, parent, message, title)\n","sub_path":"wxpythonutils/dialogutils.py","file_name":"dialogutils.py","file_ext":"py","file_size_in_byte":10616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"116760172","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\nGiven a string containing only digits, restore it by returning all possible valid IP address combinations.\n\nFor example:\nGiven \"25525511135\",\n\nreturn [\"255.255.11.135\", \"255.255.111.35\"]. (Order does not matter)\n\"\"\"\n\nclass Solution(object):\n def restoreIpAddresses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n def helper(i, n):\n if i == len(s) and n == 0:\n yield ''\n elif i >= len(s) or n == 0:\n yield False\n else:\n for j in xrange(1, 4):\n if int(s[i:i+j]) <= 255:\n for sub in helper(i + j, n - 1):\n if sub is not False:\n yield s[i:i+j] + ('.' + sub if sub else '')\n if s[i] == '0':\n break\n if not s:\n return []\n return list(helper(0, 4))\n\n\nclass Solution(object):\n def restoreIpAddresses(self, s):\n \"\"\"\n :type s: str\n :rtype: List[str]\n \"\"\"\n def isValid(i, j):\n if i < j <= l and ((s[i] == '0' and i + 1 == j) or (s[i] != '0' and int(s[i:j]) <= 255)):\n return True\n else:\n return False\n\n result = []\n l = len(s)\n for i in xrange(1, 4):\n for j in xrange(i + 1, i + 4):\n for k in xrange(j + 1, l):\n if isValid(0, i) and isValid(i, j) and isValid(j, k) and isValid(k, l):\n result.append('.'.join([s[:i], s[i:j], s[j:k], s[k:]]))\n return result\n","sub_path":"Python/93-RestoreIPAddress/restoreIpAddress.py","file_name":"restoreIpAddress.py","file_ext":"py","file_size_in_byte":1665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"126747289","text":"import csv\nfrom .models import Student, Staff, Club, Officer\nfrom random import randint\n\n\ndef load_students(filename):\n with open(filename, 'r') as f:\n for student in csv.DictReader(f):\n gpa_req = True if float(student['cum_gpa']) >= 3.0 or (student['acad_prog'] in ['UGRD', 'NDEGU']\n and float(student['cum_gpa']) >= 2.0) else False\n Student.objects.create(emplid=student['emplid'],\n first_name=student['first_name'],\n last_name=student['last_name'],\n email=student['email'],\n school=student['cu_school1'],\n semester=student['strm'],\n gpa_req=gpa_req,\n type=student['acad_prog'],\n cum_credits=int(float(student['tot_cumulative'])))\n\n\ndef load_staff(filename):\n with open(filename, 'r') as f:\n for staff in csv.DictReader(f):\n Staff.objects.create(first_name=staff['first_name'],\n last_name=staff['last_name'],\n email=staff['email'])\n\n\ndef load_clubs(filename, grad_clubs):\n gc_emails = []\n eboard = {\n 'P': \"President's Citymail\",\n 'VP': \"VP Citymail\",\n 'TR': \"Treasurer's Citymail\",\n 'S': \"Secretary's Citymail\"\n }\n\n with open(grad_clubs, 'r') as f:\n for club in csv.reader(f):\n gc_emails.append(club[1].lower())\n\n ids = set()\n with open(filename, mode='r', encoding='utf-8-sig') as f:\n\n for club in csv.DictReader(f):\n c_type = 'G' if club['Club Email'].lower() in gc_emails else 'U'\n try:\n adv = Staff.objects.get(\n email=club[\"Faculty Advisor's CCNY Email\"].lower())\n except:\n adv = None\n\n new_club = Club.objects.create(id=generate_id(ids),\n name=club['Club Name'],\n email=club['Club Email'].lower(),\n website=club['Club Website'],\n description=bytes(club['Club Description'],\n 'utf-8').decode('utf-8', 'ignore'),\n type=c_type,\n advisor=adv\n )\n for k, v in eboard.items():\n try:\n std = Student.objects.get(email=club[v].lower())\n except:\n std = None\n Officer.objects.create(club=new_club,\n position=k,\n student=std)\n\n\ndef generate_id(rands):\n while True:\n new_id = randint(99, 999)\n if new_id not in rands:\n rands.add(new_id)\n return int('181' + str(new_id))\n","sub_path":"sacred3.0/server/registration/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"534125927","text":"import argparse\nimport os\nfrom dup_searcher import DuplicateSearcher\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--directory\", required=True, help=\"Top level directory\")\nap.add_argument(\"-s\", \"--save\", required=False, help=\"save location were possible duplicates are stored for review; \\\n if unprovided, duplicates will be automatically deleted\")\nap.add_argument(\"-n\", \"--distance\", default=0.01, help=\"maximum chi-squared distance \\\n allowed between query and relevant histograms\")\nargs = vars(ap.parse_args())\n\n\n# ##### initialize ######\n\n# handle unclean directories\ndef clean_directories():\n print(\"[INFO] Cleaning directories...\")\n\n save_dirs = ()\n save_files = ()\n # gather directory info and delete hidden .DS_Store files\n for dirpath, dirnames, files in os.walk(args[\"save\"]):\n for file in files:\n if file == \".DS_Store\":\n os.remove(os.path.join(dirpath, file))\n print(\"[INFO] Deleting .DS_Store file in save directory...\")\n for dir in dirnames:\n for sub_dirpath, _, sub_files in os.walk(dir):\n for sub_file in sub_files:\n if sub_file == \".DS_Store\":\n os.remove(os.path.join(sub_dirpath, sub_file))\n print(\"[INFO] Deleting .DS_Store file in save directory...\")\n\n for dirpath, dirnames, files in os.walk(args[\"directory\"]):\n for file in files:\n if file == \".DS_Store\":\n os.remove(os.path.join(dirpath, file))\n print(\"[INFO] Deleting .DS_Store file in top directory...\")\n for dir in dirnames:\n for sub_dirpath, _, sub_files in os.walk(dir):\n for sub_file in sub_files:\n if sub_file == \".DS_Store\":\n os.remove(os.path.join(sub_dirpath, sub_file))\n print(\"[INFO] Deleting .DS_Store file in top directory...\")\n print()\n\n for _, dirnames, files in os.walk(args[\"save\"]):\n # check that the directory is empty\n if len(dirnames) > 0 or len(files) > 0:\n print(\"[INFO] Designated save directory is not empty.\")\n print(\" This could interfere with the saving of duplicates. Please\")\n print(\" clear the directory or terminate and designate another\")\n print(\" before proceeding.\")\n print()\n\n print(args[\"save\"])\n print(\"contains: \")\n for file in files:\n print(\" \" + file)\n for dir in dirnames:\n print()\n print(\" subdir: \" + dir)\n for _, _, sub_files in os.walk(dir):\n for sub_file in sub_files:\n print(\" \" + sub_file)\n\n try:\n input(\"Continue? [enter]\")\n except SyntaxError:\n pass\n\n\nclean_directories()\nsearcher = DuplicateSearcher(args[\"directory\"], args[\"save\"], args[\"distance\"])\n\nauto_del = False\nif not isinstance(args[\"save\"], str):\n auto_del = True\n\n# start processing\nprint(\"[INFO] Beginning histogram image comparison...\")\nprint(\" Auto-Delete: \" + str(auto_del))\nprint()\nif not auto_del:\n num_dups = searcher.gather_duplicates(\"save\")\n\n print()\n print()\n print(\"[INFO] Comparisons complete; ready to delete duplicates.\")\n print(\"[INFO] Please review the selected images in your given save directory.\")\n print(\" Delete an image from duplicates subdirectory to prevent it from being purged\")\n print(\" from the original dataset.\")\n print()\n print(\"Total Duplicates: \" + str(num_dups))\n print()\n\n try:\n input(\"Continue? [enter]\")\n except SyntaxError:\n pass\n searcher.delete_all_duplicates()\n\nelse:\n searcher.gather_duplicates(\"delete\")\n\nprint(\"[INFO] Terminating histogram comparison...\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"83318246","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n# AZURE CLI VM TEST DEFINITIONS\nimport json\nimport os\nimport platform\nimport tempfile\nimport time\nimport unittest\nimport mock\nimport uuid\n\nimport six\n\nfrom knack.util import CLIError\nfrom azure_devtools.scenario_tests import AllowLargeResponse, record_only\nfrom azure.cli.core.profiles import ResourceType\nfrom azure.cli.testsdk import (\n ScenarioTest, ResourceGroupPreparer, LiveScenarioTest, api_version_constraint,\n StorageAccountPreparer)\nTEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))\n\n# pylint: disable=line-too-long\n# pylint: disable=too-many-lines\n\nTEST_SSH_KEY_PUB = \"ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQCbIg1guRHbI0lV11wWDt1r2cUdcNd27CJsg+SfgC7miZeubtwUhbsPdhMQsfDyhOWHq1+ZL0M+nJZV63d/1dhmhtgyOqejUwrPlzKhydsbrsdUor+JmNJDdW01v7BXHyuymT8G4s09jCasNOwiufbP/qp72ruu0bIA1nySsvlf9pCQAuFkAnVnf/rFhUlOkhtRpwcq8SUNY2zRHR/EKb/4NWY1JzR4sa3q2fWIJdrrX0DvLoa5g9bIEd4Df79ba7v+yiUBOS0zT2ll+z4g9izHK3EO5d8hL4jYxcjKs+wcslSYRWrascfscLgMlMGh0CdKeNTDjHpGPncaf3Z+FwwwjWeuiNBxv7bJo13/8B/098KlVDl4GZqsoBCEjPyJfV6hO0y/LkRGkk7oHWKgeWAfKtfLItRp00eZ4fcJNK9kCaSMmEugoZWcI7NGbZXzqFWqbpRI7NcDP9+WIQ+i9U5vqWsqd/zng4kbuAJ6UuKqIzB0upYrLShfQE3SAck8oaLhJqqq56VfDuASNpJKidV+zq27HfSBmbXnkR/5AK337dc3MXKJypoK/QPMLKUAP5XLPbs+NddJQV7EZXd29DLgp+fRIg3edpKdO7ZErWhv7d+3Kws+e1Y+ypmR2WIVSwVyBEUfgv2C8Ts9gnTF4pNcEY/S2aBicz5Ew2+jdyGNQQ== test@example.com\\n\"\n\n\ndef _write_config_file(user_name):\n from datetime import datetime\n\n public_key = ('ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC8InHIPLAu6lMc0d+5voyXqigZfT5r6fAM1+FQAi+mkPDdk2hNq1BG0Bwfc88G'\n 'm7BImw8TS+x2bnZmhCbVnHd6BPCDY7a+cHCSqrQMW89Cv6Vl4ueGOeAWHpJTV9CTLVz4IY1x4HBdkLI2lKIHri9+z7NIdvFk7iOk'\n 'MVGyez5H1xDbF2szURxgc4I2/o5wycSwX+G8DrtsBvWLmFv9YAPx+VkEHQDjR0WWezOjuo1rDn6MQfiKfqAjPuInwNOg5AIxXAOR'\n 'esrin2PUlArNtdDH1zlvI4RZi36+tJO7mtm3dJiKs4Sj7G6b1CjIU6aaj27MmKy3arIFChYav9yYM3IT')\n config_file_name = 'private_config_{}.json'.format(datetime.utcnow().strftime('%H%M%S%f'))\n config = {\n 'username': user_name,\n 'ssh_key': public_key\n }\n config_file = os.path.join(TEST_DIR, config_file_name)\n with open(config_file, 'w') as outfile:\n json.dump(config, outfile)\n return config_file\n\n\nclass VMImageListByAliasesScenarioTest(ScenarioTest):\n\n def test_vm_image_list_by_alias(self):\n result = self.cmd('vm image list --offer ubuntu').get_output_in_json()\n self.assertTrue(len(result) >= 1)\n self.assertEqual(result[0]['publisher'], 'Canonical')\n self.assertTrue(result[0]['sku'].endswith('LTS'))\n\n\nclass VMUsageScenarioTest(ScenarioTest):\n\n def test_vm_usage(self):\n self.cmd('vm list-usage --location westus',\n checks=self.check('type(@)', 'array'))\n\n\nclass VMImageListThruServiceScenarioTest(ScenarioTest):\n\n @AllowLargeResponse()\n def test_vm_images_list_thru_services(self):\n result = self.cmd('vm image list -l westus --publisher Canonical --offer Ubuntu_Snappy_Core -o tsv --all').output\n assert result.index('15.04') >= 0\n\n result = self.cmd('vm image list -p Canonical -f Ubuntu_Snappy_Core -o tsv --all').output\n assert result.index('15.04') >= 0\n\n\nclass VMOpenPortTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_open_port')\n def test_vm_open_port(self, resource_group):\n\n self.kwargs.update({\n 'vm': 'vm1'\n })\n\n self.cmd('vm create -g {rg} -l westus -n {vm} --admin-username ubuntu --image Canonical:UbuntuServer:14.04.4-LTS:latest --admin-password @PasswordPassword1! --public-ip-address-allocation dynamic --authentication-type password --use-unmanaged-disk')\n\n # min params - apply to existing NIC (updates existing NSG)\n self.kwargs['nsg_id'] = self.cmd('vm open-port -g {rg} -n {vm} --port \"*\" --priority 900').get_output_in_json()['id']\n self.kwargs['nsg'] = os.path.split(self.kwargs['nsg_id'])[1]\n self.cmd('network nsg show -g {rg} -n {nsg}',\n checks=self.check(\"length(securityRules[?name == 'open-port-all'])\", 1))\n\n # apply to subnet (creates new NSG)\n self.kwargs['nsg'] = 'newNsg'\n self.cmd('vm open-port -g {rg} -n {vm} --apply-to-subnet --nsg-name {nsg} --port \"*\" --priority 900')\n self.cmd('network nsg show -g {rg} -n {nsg}',\n checks=self.check(\"length(securityRules[?name == 'open-port-all'])\", 1))\n\n\nclass VMShowListSizesListIPAddressesScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_list_ip')\n def test_vm_show_list_sizes_list_ip_addresses(self, resource_group):\n\n self.kwargs.update({\n 'loc': 'westus',\n 'vm': 'vm-with-public-ip',\n 'allocation': 'dynamic'\n })\n # Expecting no results at the beginning\n self.cmd('vm list-ip-addresses --resource-group {rg}', checks=self.is_empty())\n self.cmd('vm create --resource-group {rg} --location {loc} -n {vm} --admin-username ubuntu --image Canonical:UbuntuServer:14.04.4-LTS:latest --admin-password testPassword0 --public-ip-address-allocation {allocation} --authentication-type password --use-unmanaged-disk')\n result = self.cmd('vm show --resource-group {rg} --name {vm} -d', checks=[\n self.check('type(@)', 'object'),\n self.check('name', '{vm}'),\n self.check('location', '{loc}'),\n self.check('resourceGroup', '{rg}')\n ]).get_output_in_json()\n self.assertEqual(4, len(result['publicIps'].split('.')))\n\n result = self.cmd('vm list --resource-group {rg} -d', checks=[\n self.check('[0].name', '{vm}'),\n self.check('[0].location', '{loc}'),\n self.check('[0].resourceGroup', '{rg}'),\n self.check('[0].powerState', 'VM running')\n ]).get_output_in_json()\n self.assertEqual(4, len(result[0]['publicIps'].split('.')))\n\n self.cmd('vm list-vm-resize-options --resource-group {rg} --name {vm}',\n checks=self.check('type(@)', 'array'))\n\n # Expecting the one we just added\n self.kwargs['rg_caps'] = resource_group.upper() # test the command handles name with casing diff.\n self.cmd('vm list-ip-addresses --resource-group {rg_caps}', checks=[\n self.check('length(@)', 1),\n self.check('[0].virtualMachine.name', '{vm}'),\n self.check('[0].virtualMachine.resourceGroup', '{rg}'),\n self.check('length([0].virtualMachine.network.publicIpAddresses)', 1),\n self.check('[0].virtualMachine.network.publicIpAddresses[0].ipAllocationMethod', self.kwargs['allocation'].title()),\n self.check('type([0].virtualMachine.network.publicIpAddresses[0].ipAddress)', 'string')\n ])\n\n\nclass VMSizeListScenarioTest(ScenarioTest):\n\n def test_vm_size_list(self):\n self.cmd('vm list-sizes --location westus',\n checks=self.check('type(@)', 'array'))\n\n\nclass VMImageListOffersScenarioTest(ScenarioTest):\n\n def test_vm_image_list_offers(self):\n self.kwargs.update({\n 'loc': 'westus',\n 'pub': 'Canonical'\n })\n\n result = self.cmd('vm image list-offers --location {loc} --publisher {pub}').get_output_in_json()\n self.assertTrue(len(result) > 0)\n self.assertFalse([i for i in result if i['location'].lower() != self.kwargs['loc']])\n\n\nclass VMImageListPublishersScenarioTest(ScenarioTest):\n\n @AllowLargeResponse()\n def test_vm_image_list_publishers(self):\n self.kwargs.update({\n 'loc': 'westus'\n })\n self.cmd('vm image list-publishers --location {loc}', checks=[\n self.check('type(@)', 'array'),\n self.check(\"length([?location == '{loc}']) == length(@)\", True),\n ])\n\n\nclass VMImageListSkusScenarioTest(ScenarioTest):\n\n def test_vm_image_list_skus(self):\n\n self.kwargs.update({\n 'loc': 'westus',\n 'pub': 'Canonical',\n 'offer': 'UbuntuServer'\n })\n\n result = self.cmd(\"vm image list-skus --location {loc} -p {pub} --offer {offer} --query \\\"length([].id.contains(@, '/Publishers/{pub}/ArtifactTypes/VMImage/Offers/{offer}/Skus/'))\\\"\").get_output_in_json()\n self.assertTrue(result > 0)\n\n\nclass VMImageShowScenarioTest(ScenarioTest):\n\n def test_vm_image_show(self):\n\n self.kwargs.update({\n 'loc': 'westus',\n 'pub': 'Canonical',\n 'offer': 'UbuntuServer',\n 'sku': '14.04.2-LTS',\n 'ver': '14.04.201503090'\n })\n\n self.cmd('vm image show --location {loc} --publisher {pub} --offer {offer} --sku {sku} --version {ver}', checks=[\n self.check('type(@)', 'object'),\n self.check('location', '{loc}'),\n self.check('name', '{ver}'),\n self.check(\"contains(id, '/Publishers/{pub}/ArtifactTypes/VMImage/Offers/{offer}/Skus/{sku}/Versions/{ver}')\", True)\n ])\n\n\nclass VMGeneralizeScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_generalize_vm')\n def test_vm_generalize(self, resource_group):\n\n self.kwargs.update({\n 'vm': 'vm-generalize'\n })\n\n self.cmd('vm create -g {rg} -n {vm} --admin-username ubuntu --image UbuntuLTS --admin-password testPassword0 --authentication-type password --use-unmanaged-disk')\n self.cmd('vm stop -g {rg} -n {vm}')\n # Should be able to generalize the VM after it has been stopped\n self.cmd('vm generalize -g {rg} -n {vm}', checks=self.is_empty())\n self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()\n self.cmd('vm capture -g {rg} -n {vm} --vhd-name-prefix vmtest',\n checks=self.is_empty())\n\n\nclass VMVMSSWindowsLicenseTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_windows_license_type')\n def test_vm_vmss_windows_license_type(self, resource_group):\n\n self.kwargs.update({\n 'vm': 'winvm',\n 'vmss': 'winvmss'\n })\n self.cmd('vm create -g {rg} -n {vm} --image Win2012R2Datacenter --admin-username clitest1234 --admin-password Test123456789# --license-type Windows_Server --use-unmanaged-disk')\n self.cmd('vm show -g {rg} -n {vm}', checks=[\n self.check('licenseType', 'Windows_Server')\n ])\n self.cmd('vm update -g {rg} -n {vm} --license-type None', checks=[\n self.check('licenseType', 'None')\n ])\n self.cmd('vmss create -g {rg} -n {vmss} --image Win2012R2Datacenter --admin-username clitest1234 --admin-password Test123456789# --license-type Windows_Server --instance-count 1 --use-unmanaged-disk')\n self.cmd('vmss show -g {rg} -n {vmss}', checks=[\n self.check('virtualMachineProfile.licenseType', 'Windows_Server')\n ])\n\n\nclass VMCreateWithSpecializedUnmanagedDiskTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_with_specialized_unmanaged_disk')\n def test_vm_create_with_specialized_unmanaged_disk(self, resource_group):\n\n self.kwargs.update({\n 'loc': 'westus'\n })\n\n # create a vm with unmanaged os disk\n self.cmd('vm create -g {rg} -n vm1 --image debian --use-unmanaged-disk --admin-username ubuntu --admin-password testPassword0 --authentication-type password')\n vm1_info = self.cmd('vm show -g {rg} -n vm1').get_output_in_json()\n self.kwargs['disk_uri'] = vm1_info['storageProfile']['osDisk']['vhd']['uri']\n\n self.cmd('vm delete -g {rg} -n vm1 -y')\n\n # create a vm by attaching the OS disk from the deleted VM\n self.cmd('vm create -g {rg} -n vm2 --attach-os-disk {disk_uri} --os-type linux --use-unmanaged-disk',\n checks=self.check('powerState', 'VM running'))\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_with_specialized_unmanaged_disk')\n def test_vm_create_with_unmanaged_data_disks(self, resource_group):\n\n self.kwargs.update({\n 'vm': 'vm1',\n 'vm2': 'vm2'\n })\n\n # create a unmanaged bm with 2 unmanaged disks\n vm_create_cmd = 'vm create -g {rg} -n vm1 --image debian --use-unmanaged-disk --admin-username ubuntu --admin-password testPassword0 --authentication-type password'\n self.cmd(vm_create_cmd)\n self.cmd('vm unmanaged-disk attach -g {rg} --vm-name {vm} --new --size-gb 1')\n self.cmd('vm unmanaged-disk attach -g {rg} --vm-name {vm} --new --size-gb 2')\n vm1_info = self.cmd('vm show -g {rg} -n {vm}').get_output_in_json()\n self.kwargs['disk_uri'] = vm1_info['storageProfile']['osDisk']['vhd']['uri']\n self.kwargs['data_disk'] = vm1_info['storageProfile']['dataDisks'][0]['vhd']['uri']\n self.kwargs['data_disk2'] = vm1_info['storageProfile']['dataDisks'][1]['vhd']['uri']\n\n self.cmd('vm delete -g {rg} -n vm1 -y')\n\n # create a vm by attaching the OS disk from the deleted VM\n vm_create_cmd = ('vm create -g {rg} -n {vm2} --attach-os-disk {disk_uri} --os-type linux --use-unmanaged-disk '\n '--attach-data-disks {data_disk} {data_disk2} --data-disk-caching 0=ReadWrite 1=ReadOnly')\n self.cmd(vm_create_cmd)\n self.cmd('vm show -g {rg} -n {vm2} -d', checks=[\n self.check('storageProfile.dataDisks[0].caching', 'ReadWrite'),\n self.check('storageProfile.dataDisks[0].lun', 0),\n self.check('storageProfile.dataDisks[1].caching', 'ReadOnly'),\n self.check('storageProfile.dataDisks[1].lun', 1)\n ])\n\n\nclass VMAttachDisksOnCreate(ScenarioTest):\n\n @ResourceGroupPreparer()\n def test_vm_create_by_attach_unmanaged_os_and_data_disks(self, resource_group):\n # creating a vm\n self.cmd('vm create -g {rg} -n vm1 --use-unmanaged-disk --image centos --admin-username centosadmin --admin-password testPassword0 --authentication-type password')\n self.cmd('vm unmanaged-disk attach -g {rg} --vm-name vm1 --new --size-gb 2')\n result = self.cmd('vm show -g {rg} -n vm1').get_output_in_json()\n self.kwargs['os_disk_vhd'] = result['storageProfile']['osDisk']['vhd']['uri']\n self.kwargs['data_disk_vhd'] = result['storageProfile']['dataDisks'][0]['vhd']['uri']\n\n # delete the vm to end vhd's leases so they can be used to create a new vm through attaching\n self.cmd('vm deallocate -g {rg} -n vm1')\n self.cmd('vm delete -g {rg} -n vm1 -y')\n\n # rebuild a new vm\n self.cmd('vm create -g {rg} -n vm2 --attach-os-disk {os_disk_vhd} --attach-data-disks {data_disk_vhd} --os-type linux --use-unmanaged-disk',\n checks=self.check('powerState', 'VM running'))\n\n\nclass VMOSDiskSize(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_os_disk_size')\n def test_set_os_disk_size(self, resource_group):\n # test unmanaged disk\n self.kwargs.update({'sa': self.create_random_name(prefix='cli', length=12)})\n self.cmd('vm create -g {rg} -n vm --image centos --admin-username centosadmin --admin-password testPassword0 --authentication-type password --os-disk-size-gb 75 --use-unmanaged-disk --storage-account {sa}')\n result = self.cmd('storage blob list --account-name {sa} --container-name vhds').get_output_in_json()\n self.assertTrue(result[0]['properties']['contentLength'] > 75000000000)\n\n\nclass VMCreateAndStateModificationsScenarioTest(ScenarioTest):\n\n def _check_vm_power_state(self, expected_power_state):\n\n self.cmd('vm get-instance-view --resource-group {rg} --name {vm}', checks=[\n self.check('type(@)', 'object'),\n self.check('name', '{vm}'),\n self.check('resourceGroup', '{rg}'),\n self.check('length(instanceView.statuses)', 2),\n self.check('instanceView.statuses[0].code', 'ProvisioningState/succeeded'),\n self.check('instanceView.statuses[1].code', expected_power_state),\n ])\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_state_mod')\n def test_vm_create_state_modifications(self, resource_group):\n\n self.kwargs.update({\n 'loc': 'eastus',\n 'vm': 'vm-state-mod',\n 'nsg': 'mynsg',\n 'ip': 'mypubip',\n 'sa': self.create_random_name('clistorage', 15),\n 'vnet': 'myvnet'\n })\n\n # Expecting no results\n self.cmd('vm list --resource-group {rg}',\n checks=self.is_empty())\n self.cmd('vm create --resource-group {rg} --location {loc} --name {vm} --admin-username ubuntu --image UbuntuLTS --admin-password testPassword0 --authentication-type password --tags firsttag=1 secondtag=2 thirdtag --nsg {nsg} --public-ip-address {ip} --vnet-name {vnet} --storage-account {sa} --use-unmanaged-disk')\n\n # Expecting one result, the one we created\n self.cmd('vm list --resource-group {rg}', checks=[\n self.check('length(@)', 1),\n self.check('[0].resourceGroup', '{rg}'),\n self.check('[0].name', '{vm}'),\n self.check('[0].location', '{loc}'),\n self.check('[0].provisioningState', 'Succeeded')\n ])\n\n # Verify tags were set\n self.cmd('vm show --resource-group {rg} --name {vm}', checks=[\n self.check('tags.firsttag', '1'),\n self.check('tags.secondtag', '2'),\n self.check('tags.thirdtag', ''),\n ])\n self._check_vm_power_state('PowerState/running')\n\n self.cmd('vm user update -g {rg} -n {vm} -u foouser1 -p Foo12345')\n self.cmd('vm user delete -g {rg} -n {vm} -u foouser1')\n\n self.cmd('network nsg show --resource-group {rg} --name {nsg}', checks=[\n self.check('tags.firsttag', '1'),\n self.check('tags.secondtag', '2'),\n self.check('tags.thirdtag', ''),\n ])\n self.cmd('network public-ip show --resource-group {rg} --name {ip}', checks=[\n self.check('tags.firsttag', '1'),\n self.check('tags.secondtag', '2'),\n self.check('tags.thirdtag', ''),\n ])\n self.cmd('network vnet show --resource-group {rg} --name {vnet}', checks=[\n self.check('tags.firsttag', '1'),\n self.check('tags.secondtag', '2'),\n self.check('tags.thirdtag', ''),\n ])\n self.cmd('storage account show --resource-group {rg} --name {sa}', checks=[\n self.check('tags.firsttag', '1'),\n self.check('tags.secondtag', '2'),\n self.check('tags.thirdtag', ''),\n ])\n\n self.cmd('vm stop --resource-group {rg} --name {vm}')\n self._check_vm_power_state('PowerState/stopped')\n self.cmd('vm start --resource-group {rg} --name {vm}')\n self._check_vm_power_state('PowerState/running')\n self.cmd('vm restart --resource-group {rg} --name {vm}')\n self._check_vm_power_state('PowerState/running')\n self.cmd('vm deallocate --resource-group {rg} --name {vm}')\n self._check_vm_power_state('PowerState/deallocated')\n self.cmd('vm resize -g {rg} -n {vm} --size Standard_DS2_v2',\n checks=self.check('hardwareProfile.vmSize', 'Standard_DS2_v2'))\n self.cmd('vm delete --resource-group {rg} --name {vm} --yes')\n # Expecting no results\n self.cmd('vm list --resource-group {rg}', checks=self.is_empty())\n\n\nclass VMNoWaitScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_no_wait')\n def test_vm_create_no_wait(self, resource_group):\n\n self.kwargs.update({\n 'loc': 'westus',\n 'vm': 'vmnowait2'\n })\n self.cmd('vm create -g {rg} -n {vm} --admin-username user12 --admin-password testPassword0 --authentication-type password --image UbuntuLTS --no-wait --use-unmanaged-disk',\n checks=self.is_empty())\n self.cmd('vm wait -g {rg} -n {vm} --custom \"instanceView.statuses[?code==\\'PowerState/running\\']\"',\n checks=self.is_empty())\n self.cmd('vm get-instance-view -g {rg} -n {vm}',\n checks=self.check(\"length(instanceView.statuses[?code=='PowerState/running'])\", 1))\n self.cmd('vm update -g {rg} -n {vm} --set tags.mytag=tagvalue1 --no-wait',\n checks=self.is_empty())\n self.cmd('vm wait -g {rg} -n {vm} --updated',\n checks=self.is_empty())\n self.cmd('vm show -g {rg} -n {vm}',\n checks=self.check(\"tags.mytag\", 'tagvalue1'))\n\n\nclass VMAvailSetScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer()\n def test_vm_availset(self, resource_group):\n\n self.kwargs.update({\n 'availset': 'availset-test'\n })\n self.cmd('vm availability-set create -g {rg} -n {availset}', checks=[\n self.check('name', '{availset}'),\n self.check('platformFaultDomainCount', 2),\n self.check('platformUpdateDomainCount', 5) # server defaults to 5\n ])\n\n # create with explict UD count\n self.cmd('vm availability-set create -g {rg} -n avset2 --platform-fault-domain-count 2 --platform-update-domain-count 2', checks=[\n self.check('platformFaultDomainCount', 2),\n self.check('platformUpdateDomainCount', 2)\n ])\n self.cmd('vm availability-set delete -g {rg} -n avset2')\n\n self.cmd('vm availability-set update -g {rg} --availability-set-name {availset} -n {availset} --set tags.test=success',\n checks=self.check('tags.test', 'success'))\n self.cmd('vm availability-set list -g {rg}', checks=[\n self.check('length(@)', 1),\n self.check('[0].name', '{availset}')\n ])\n self.cmd('vm availability-set list-sizes -g {rg} -n {availset}',\n checks=self.check('type(@)', 'array'))\n self.cmd('vm availability-set show -g {rg} -n {availset}',\n checks=[self.check('name', '{availset}')])\n self.cmd('vm availability-set delete -g {rg} -n {availset}')\n self.cmd('vm availability-set list -g {rg}',\n checks=self.check('length(@)', 0))\n\n\nclass VMExtensionScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_extension')\n def test_vm_extension(self, resource_group):\n\n user_name = 'foouser1'\n config_file = _write_config_file(user_name)\n\n self.kwargs.update({\n 'vm': 'myvm',\n 'pub': 'Microsoft.OSTCExtensions',\n 'ext': 'VMAccessForLinux',\n 'config': config_file,\n 'user': user_name\n })\n\n self.cmd('vm create -n {vm} -g {rg} --image UbuntuLTS --authentication-type password --admin-username user11 --admin-password testPassword0 --use-unmanaged-disk')\n\n try:\n self.cmd('vm extension list --vm-name {vm} --resource-group {rg}',\n checks=self.check('length([])', 0))\n self.cmd('vm extension set -n {ext} --publisher {pub} --version 1.2 --vm-name {vm} --resource-group {rg} --protected-settings \"{config}\"')\n self.cmd('vm get-instance-view -n {vm} -g {rg}', checks=[\n self.check('*.extensions[0].name', ['VMAccessForLinux']),\n self.check('*.extensions[0].typeHandlerVersion', ['1.4.7.1']),\n ])\n self.cmd('vm extension show --resource-group {rg} --vm-name {vm} --name {ext}', checks=[\n self.check('type(@)', 'object'),\n self.check('name', '{ext}'),\n self.check('resourceGroup', '{rg}')\n ])\n self.cmd('vm extension delete --resource-group {rg} --vm-name {vm} --name {ext}')\n finally:\n os.remove(config_file)\n\n\nclass VMMachineExtensionImageScenarioTest(ScenarioTest):\n\n def test_vm_machine_extension_image(self):\n\n self.kwargs.update({\n 'loc': 'westus',\n 'pub': 'Microsoft.Azure.Diagnostics',\n 'ext': 'IaaSDiagnostics',\n 'ver': '1.6.4.0'\n })\n\n self.cmd('vm extension image list-names --location {loc} --publisher {pub}', checks=[\n self.check('type(@)', 'array'),\n self.check(\"length([?location == '{loc}']) == length(@)\", True),\n ])\n self.cmd('vm extension image list-versions --location {loc} -p {pub} --name {ext}', checks=[\n self.check('type(@)', 'array'),\n self.check(\"length([?location == '{loc}']) == length(@)\", True),\n ])\n self.cmd('vm extension image show --location {loc} -p {pub} --name {ext} --version {ver}', checks=[\n self.check('type(@)', 'object'),\n self.check('location', '{loc}'),\n self.check(\"contains(id, '/Providers/Microsoft.Compute/Locations/{loc}/Publishers/{pub}/ArtifactTypes/VMExtension/Types/{ext}/Versions/{ver}')\", True)\n ])\n\n\nclass VMExtensionImageSearchScenarioTest(ScenarioTest):\n\n @AllowLargeResponse()\n def test_vm_extension_image_search(self):\n # pick this specific name, so the search will be under one publisher. This avoids\n # the parallel searching behavior that causes incomplete VCR recordings.\n self.kwargs.update({\n 'pub': 'Test.Microsoft.VisualStudio.Services',\n 'image': 'TeamServicesAgentLinux1'\n })\n self.cmd('vm extension image list -l westus --publisher {pub} --name {image}', checks=[\n self.check('type(@)', 'array'),\n self.check(\"length([?name == '{image}']) == length(@)\", True)\n ])\n self.cmd('vm extension image list -l westus -p {pub} --name {image} --latest',\n checks=self.check('length(@)', 1))\n\n\nclass VMCreateUbuntuScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_create_ubuntu')\n def test_vm_create_ubuntu(self, resource_group, resource_group_location):\n\n self.kwargs.update({\n 'username': 'ubuntu',\n 'vm': 'cli-test-vm2',\n 'image': 'UbuntuLTS',\n 'auth': 'ssh',\n 'ssh_key': TEST_SSH_KEY_PUB,\n 'loc': resource_group_location\n })\n self.cmd('vm create --resource-group {rg} --admin-username {username} --name {vm} --authentication-type {auth} --image {image} --ssh-key-value \\'{ssh_key}\\' --location {loc} --data-disk-caching ReadOnly --use-unmanaged-disk')\n\n self.cmd('vm show -g {rg} -n {vm}', checks=[\n self.check('provisioningState', 'Succeeded'),\n self.check('osProfile.adminUsername', '{username}'),\n self.check('osProfile.computerName', '{vm}'),\n self.check('osProfile.linuxConfiguration.disablePasswordAuthentication', True),\n self.check('osProfile.linuxConfiguration.ssh.publicKeys[0].keyData', '{ssh_key}')\n ])\n\n # test for idempotency--no need to reverify, just ensure the command doesn't fail\n self.cmd('vm create --resource-group {rg} --admin-username {username} --name {vm} --authentication-type {auth} --image {image} --ssh-key-value \\'{ssh_key}\\' --location {loc} --data-disk-caching ReadOnly --use-unmanaged-disk')\n\n\nclass VMMultiNicScenarioTest(ScenarioTest): # pylint: disable=too-many-instance-attributes\n\n @ResourceGroupPreparer(name_prefix='cli_test_multi_nic_vm')\n def test_vm_create_multi_nics(self, resource_group):\n\n self.kwargs.update({\n 'vnet': 'myvnet',\n 'subnet': 'mysubnet',\n 'vm': 'multinicvm1',\n 'ssh_key': TEST_SSH_KEY_PUB\n })\n\n self.cmd('network vnet create -g {rg} -n {vnet} --subnet-name {subnet}')\n for i in range(1, 5): # create four NICs\n self.kwargs['nic'] = 'nic{}'.format(i)\n self.cmd('network nic create -g {rg} -n {nic} --subnet {subnet} --vnet-name {vnet}')\n\n self.cmd('vm create -g {rg} -n {vm} --image UbuntuLTS --nics nic1 nic2 nic3 nic4 --admin-username user11 --size Standard_DS3 --ssh-key-value \\'{ssh_key}\\' --use-unmanaged-disk')\n self.cmd('vm show -g {rg} -n {vm}', checks=[\n self.check(\"networkProfile.networkInterfaces[0].id.ends_with(@, 'nic1')\", True),\n self.check(\"networkProfile.networkInterfaces[1].id.ends_with(@, 'nic2')\", True),\n self.check(\"networkProfile.networkInterfaces[2].id.ends_with(@, 'nic3')\", True),\n self.check(\"networkProfile.networkInterfaces[3].id.ends_with(@, 'nic4')\", True),\n self.check('length(networkProfile.networkInterfaces)', 4)\n ])\n # cannot alter NICs on a running (or even stopped) VM\n self.cmd('vm deallocate -g {rg} -n {vm}')\n\n self.cmd('vm nic list -g {rg} --vm-name {vm}', checks=[\n self.check('length(@)', 4),\n self.check('[0].primary', True)\n ])\n self.cmd('vm nic show -g {rg} --vm-name {vm} --nic nic1')\n self.cmd('vm nic remove -g {rg} --vm-name {vm} --nics nic4 --primary-nic nic1', checks=[\n self.check('length(@)', 3),\n self.check('[0].primary', True),\n self.check(\"[0].id.contains(@, 'nic1')\", True)\n ])\n self.cmd('vm nic add -g {rg} --vm-name {vm} --nics nic4', checks=[\n self.check('length(@)', 4),\n self.check('[0].primary', True),\n self.check(\"[0].id.contains(@, 'nic1')\", True)\n ])\n self.cmd('vm nic set -g {rg} --vm-name {vm} --nics nic1 nic2 --primary-nic nic2', checks=[\n self.check('length(@)', 2),\n self.check('[1].primary', True),\n self.check(\"[1].id.contains(@, 'nic2')\", True)\n ])\n\n\nclass VMCreateNoneOptionsTest(ScenarioTest): # pylint: disable=too-many-instance-attributes\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_create_none_options', location='westus')\n def test_vm_create_none_options(self, resource_group):\n\n self.kwargs.update({\n 'vm': 'nooptvm',\n 'loc': 'eastus', # create in different location from RG\n 'quotes': '\"\"' if platform.system() == 'Windows' else \"''\",\n 'ssh_key': TEST_SSH_KEY_PUB\n })\n\n self.cmd('vm create -n {vm} -g {rg} --image Debian --availability-set {quotes} --nsg {quotes} --ssh-key-value \\'{ssh_key}\\' --public-ip-address {quotes} --tags {quotes} --location {loc} --admin-username user11 --use-unmanaged-disk')\n\n self.cmd('vm show -n {vm} -g {rg}', checks=[\n self.check('availabilitySet', None),\n self.check('length(tags)', 0),\n self.check('location', '{loc}')\n ])\n self.cmd('network public-ip show -n {vm}PublicIP -g {rg}',\n expect_failure=True)\n\n\nclass VMBootDiagnostics(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_diagnostics')\n @StorageAccountPreparer(name_prefix='clitestbootdiag')\n def test_vm_boot_diagnostics(self, resource_group, storage_account):\n\n self.kwargs.update({\n 'vm': 'myvm',\n 'vm2': 'myvm2'\n })\n self.kwargs['storage_uri'] = 'https://{}.blob.core.windows.net/'.format(self.kwargs['sa'])\n\n self.cmd('storage account create -g {rg} -n {sa} --sku Standard_LRS -l westus')\n self.cmd('vm create -n {vm} -g {rg} --image UbuntuLTS --authentication-type password --admin-username user11 --admin-password testPassword0 --use-unmanaged-disk')\n\n self.cmd('vm boot-diagnostics enable -g {rg} -n {vm} --storage {sa}')\n self.cmd('vm show -g {rg} -n {vm}', checks=[\n self.check('diagnosticsProfile.bootDiagnostics.enabled', True),\n self.check('diagnosticsProfile.bootDiagnostics.storageUri', '{storage_uri}')\n ])\n\n # will uncomment after #302 gets addressed\n # self.cmd('vm boot-diagnostics get-boot-log -g {rg} -n {vm}')\n self.cmd('vm boot-diagnostics disable -g {rg} -n {vm}')\n self.cmd('vm show -g {rg} -n {vm}',\n checks=self.check('diagnosticsProfile.bootDiagnostics.enabled', False))\n\n # try enable it at the create\n self.cmd('vm create -g {rg} -n {vm2} --image debian --admin-username user11 --admin-password testPassword0 --boot-diagnostics-storage {sa} --use-unmanaged-disk')\n self.cmd('vm show -g {rg} -n {vm2}', checks=[\n self.check('diagnosticsProfile.bootDiagnostics.enabled', True),\n self.check('diagnosticsProfile.bootDiagnostics.storageUri', '{storage_uri}')\n ])\n\n\nclass VMSSExtensionInstallTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_extension')\n def test_vmss_extension(self):\n\n username = 'myadmin'\n config_file = _write_config_file(username)\n\n self.kwargs.update({\n 'vmss': 'vmss1',\n 'pub': 'Microsoft.Azure.NetworkWatcher',\n 'ext': 'NetworkWatcherAgentLinux',\n 'username': username,\n 'config_file': config_file\n })\n\n self.cmd('vmss create -n {vmss} -g {rg} --image UbuntuLTS --authentication-type password --admin-username admin123 --admin-password testPassword0 --instance-count 1 --use-unmanaged-disk')\n\n try:\n self.cmd('vmss extension set -n {ext} --publisher {pub} --version 1.4 --vmss-name {vmss} --resource-group {rg} --protected-settings \"{config_file}\"')\n self.cmd('vmss extension show --resource-group {rg} --vmss-name {vmss} --name {ext}', checks=[\n self.check('type(@)', 'object'),\n self.check('name', '{ext}'),\n self.check('publisher', '{pub}')\n ])\n finally:\n os.remove(config_file)\n\n\nclass DiagnosticsExtensionInstallTest(ScenarioTest):\n \"\"\"\n Note that this is currently only for a Linux VM. There's currently no test of this feature for a Windows VM.\n \"\"\"\n @ResourceGroupPreparer(name_prefix='cli_test_vm_vmss_diagnostics_extension')\n @StorageAccountPreparer()\n def test_diagnostics_extension_install(self, resource_group, storage_account):\n\n self.kwargs.update({\n 'vm': 'testdiagvm',\n 'vmss': 'testdiagvmss'\n })\n\n self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --authentication-type password --admin-username user11 --admin-password TestTest12#$ --use-unmanaged-disk')\n self.cmd('vm create -g {rg} -n {vm} --image UbuntuLTS --authentication-type password --admin-username user11 --admin-password TestTest12#$ --use-unmanaged-disk')\n storage_sastoken = '123' # use junk keys, do not retrieve real keys which will get into the recording\n _, protected_settings = tempfile.mkstemp()\n with open(protected_settings, 'w') as outfile:\n json.dump({\n \"storageAccountName\": storage_account,\n \"storageAccountSasToken\": storage_sastoken\n }, outfile)\n self.kwargs['protected_settings'] = protected_settings.replace('\\\\', '\\\\\\\\')\n\n _, public_settings = tempfile.mkstemp()\n curr_dir = os.path.dirname(os.path.realpath(__file__))\n template_file = os.path.join(curr_dir, 'sample-public.json').replace('\\\\', '\\\\\\\\')\n with open(template_file) as data_file:\n data = json.load(data_file)\n data[\"StorageAccount\"] = storage_account\n with open(public_settings, 'w') as outfile:\n json.dump(data, outfile)\n self.kwargs['public_settings'] = public_settings.replace('\\\\', '\\\\\\\\')\n\n checks = [\n self.check('virtualMachineProfile.extensionProfile.extensions[0].name', \"LinuxDiagnostic\"),\n self.check('virtualMachineProfile.extensionProfile.extensions[0].publisher', \"Microsoft.Azure.Diagnostics\"),\n self.check('virtualMachineProfile.extensionProfile.extensions[0].settings.StorageAccount', '{sa}'),\n self.check('virtualMachineProfile.extensionProfile.extensions[0].typeHandlerVersion', '3.0')\n ]\n\n self.cmd(\"vmss diagnostics set -g {rg} --vmss-name {vmss} --settings {public_settings} --protected-settings {protected_settings}\", checks=checks)\n self.cmd(\"vmss show -g {rg} -n {vmss}\", checks=checks)\n\n # test standalone VM, we will start with an old version\n self.cmd('vm extension set -g {rg} --vm-name {vm} -n LinuxDiagnostic --version 2.3.9025 --publisher Microsoft.OSTCExtensions --settings {public_settings} --protected-settings {protected_settings}',\n checks=self.check('typeHandlerVersion', '2.3'))\n # see the 'diagnostics set' command upgrades to newer version\n self.cmd(\"vm diagnostics set -g {rg} --vm-name {vm} --settings {public_settings} --protected-settings {protected_settings}\", checks=[\n self.check('name', 'LinuxDiagnostic'),\n self.check('publisher', 'Microsoft.Azure.Diagnostics'),\n self.check('settings.StorageAccount', '{sa}'),\n self.check('typeHandlerVersion', '3.0')\n ])\n\n self.cmd(\"vm show -g {rg} -n {vm}\", checks=[\n self.check('resources[0].name', 'LinuxDiagnostic'),\n self.check('resources[0].publisher', 'Microsoft.Azure.Diagnostics'),\n self.check('resources[0].settings.StorageAccount', '{sa}')\n ])\n\n\nclass VMCreateExistingOptions(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_create_existing')\n @StorageAccountPreparer()\n def test_vm_create_existing_options(self, resource_group, storage_account):\n\n self.kwargs.update({\n 'availset': 'vrfavailset',\n 'pubip': 'vrfpubip',\n 'vnet': 'vrfvnet',\n 'subnet': 'vrfsubnet',\n 'nsg': 'vrfnsg',\n 'vm': 'vrfvm',\n 'disk': 'vrfosdisk',\n 'container': 'vrfcontainer',\n 'ssh_key': TEST_SSH_KEY_PUB\n })\n\n self.cmd('vm availability-set create --name {availset} -g {rg} --platform-fault-domain-count 3 --platform-update-domain-count 3')\n self.cmd('network public-ip create --name {pubip} -g {rg}')\n self.cmd('network vnet create --name {vnet} -g {rg} --subnet-name {subnet}')\n self.cmd('network nsg create --name {nsg} -g {rg}')\n\n self.cmd('vm create --image UbuntuLTS --os-disk-name {disk} --vnet-name {vnet} --subnet {subnet} --availability-set {availset} --public-ip-address {pubip} -l \"West US\" --nsg {nsg} --use-unmanaged-disk --size Standard_DS2 --admin-username user11 --storage-account {sa} --storage-container-name {container} -g {rg} --name {vm} --ssh-key-value \\'{ssh_key}\\'')\n\n self.cmd('vm availability-set show -n {availset} -g {rg}',\n checks=self.check('virtualMachines[0].id.ends_with(@, \\'{}\\')'.format(self.kwargs['vm'].upper()), True))\n self.cmd('network nsg show -n {nsg} -g {rg}',\n checks=self.check('networkInterfaces[0].id.ends_with(@, \\'{vm}VMNic\\')', True))\n self.cmd('network nic show -n {vm}VMNic -g {rg}',\n checks=self.check('ipConfigurations[0].publicIpAddress.id.ends_with(@, \\'{pubip}\\')', True))\n self.cmd('vm show -n {vm} -g {rg}',\n checks=self.check('storageProfile.osDisk.vhd.uri', 'https://{sa}.blob.core.windows.net/{container}/{disk}.vhd'))\n\n\nclass VMCreateExistingIdsOptions(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_create_existing_ids')\n @StorageAccountPreparer()\n def test_vm_create_existing_ids_options(self, resource_group, storage_account):\n from azure.cli.core.commands.client_factory import get_subscription_id\n from msrestazure.tools import resource_id, is_valid_resource_id\n\n subscription_id = self.get_subscription_id()\n\n self.kwargs.update({\n 'availset': 'vrfavailset',\n 'pubip': 'vrfpubip',\n 'vnet': 'vrfvnet',\n 'subnet': 'vrfsubnet',\n 'nsg': 'vrfnsg',\n 'vm': 'vrfvm',\n 'disk': 'vrfosdisk',\n 'container': 'vrfcontainer',\n 'ssh_key': TEST_SSH_KEY_PUB\n })\n\n self.cmd('vm availability-set create --name {availset} -g {rg} --platform-fault-domain-count 3 --platform-update-domain-count 3')\n self.cmd('network public-ip create --name {pubip} -g {rg}')\n self.cmd('network vnet create --name {vnet} -g {rg} --subnet-name {subnet}')\n self.cmd('network nsg create --name {nsg} -g {rg}')\n\n rg = self.kwargs['rg']\n self.kwargs.update({\n 'availset_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Compute', type='availabilitySets', name=self.kwargs['availset']),\n 'pubip_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Network', type='publicIpAddresses', name=self.kwargs['pubip']),\n 'subnet_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Network', type='virtualNetworks', child_type_1='subnets', name=self.kwargs['vnet'], child_name_1=self.kwargs['subnet']),\n 'nsg_id': resource_id(subscription=subscription_id, resource_group=rg, namespace='Microsoft.Network', type='networkSecurityGroups', name=self.kwargs['nsg'])\n })\n\n assert is_valid_resource_id(self.kwargs['availset_id'])\n assert is_valid_resource_id(self.kwargs['pubip_id'])\n assert is_valid_resource_id(self.kwargs['subnet_id'])\n assert is_valid_resource_id(self.kwargs['nsg_id'])\n\n self.cmd('vm create --image UbuntuLTS --os-disk-name {disk} --subnet {subnet_id} --availability-set {availset_id} --public-ip-address {pubip_id} -l \"West US\" --nsg {nsg_id} --use-unmanaged-disk --size Standard_DS2 --admin-username user11 --storage-account {sa} --storage-container-name {container} -g {rg} --name {vm} --ssh-key-value \\'{ssh_key}\\'')\n\n self.cmd('vm availability-set show -n {availset} -g {rg}',\n checks=self.check('virtualMachines[0].id.ends_with(@, \\'{}\\')'.format(self.kwargs['vm'].upper()), True))\n self.cmd('network nsg show -n {nsg} -g {rg}',\n checks=self.check('networkInterfaces[0].id.ends_with(@, \\'{vm}VMNic\\')', True))\n self.cmd('network nic show -n {vm}VMNic -g {rg}',\n checks=self.check('ipConfigurations[0].publicIpAddress.id.ends_with(@, \\'{pubip}\\')', True))\n self.cmd('vm show -n {vm} -g {rg}',\n checks=self.check('storageProfile.osDisk.vhd.uri', 'https://{sa}.blob.core.windows.net/{container}/{disk}.vhd'))\n\n\nclass VMUnmanagedDataDiskTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli-test-disk')\n def test_vm_data_unmanaged_disk(self, resource_group):\n\n self.kwargs.update({\n 'loc': 'westus',\n 'vm': 'vm-datadisk-test',\n 'disk': 'd7'\n })\n\n self.cmd('vm create -g {rg} --location {loc} -n {vm} --admin-username ubuntu --image UbuntuLTS --admin-password testPassword0 --authentication-type password --use-unmanaged-disk')\n\n # check we have no data disk\n result = self.cmd('vm show -g {rg} -n {vm}',\n checks=self.check('length(storageProfile.dataDisks)', 0)).get_output_in_json()\n\n # get the vhd uri from VM's storage_profile\n blob_uri = result['storageProfile']['osDisk']['vhd']['uri']\n\n self.kwargs['vhd_uri'] = blob_uri[0:blob_uri.rindex('/') + 1] + 'd7.vhd'\n\n # now attach\n self.cmd('vm unmanaged-disk attach -g {rg} --vm-name {vm} -n {disk} --vhd {vhd_uri} --new --caching ReadWrite --size-gb 8 --lun 1')\n # check we have a data disk\n self.cmd('vm show -g {rg} -n {vm}', checks=[\n self.check('length(storageProfile.dataDisks)', 1),\n self.check('storageProfile.dataDisks[0].caching', 'ReadWrite'),\n self.check('storageProfile.dataDisks[0].lun', 1),\n self.check('storageProfile.dataDisks[0].diskSizeGb', 8),\n self.check('storageProfile.dataDisks[0].createOption', 'Empty'),\n self.check('storageProfile.dataDisks[0].vhd.uri', '{vhd_uri}'),\n self.check('storageProfile.dataDisks[0].name', '{disk}')\n ])\n\n # now detach\n self.cmd('vm unmanaged-disk detach -g {rg} --vm-name {vm} -n {disk}')\n\n # check we have no data disk\n self.cmd('vm show -g {rg} -n {vm}',\n checks=self.check('length(storageProfile.dataDisks)', 0))\n\n # now attach to existing\n self.cmd('vm unmanaged-disk attach -g {rg} --vm-name {vm} -n {disk} --vhd {vhd_uri} --caching ReadOnly', checks=[\n self.check('storageProfile.dataDisks[0].name', '{disk}'),\n self.check('storageProfile.dataDisks[0].createOption', 'Attach')\n ])\n\n\nclass VMCreateCustomDataScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_create_vm_custom_data')\n def test_vm_create_custom_data(self, resource_group):\n\n self.kwargs.update({\n 'deployment': 'azurecli-test-dep-vm-create-custom-data',\n 'username': 'ubuntu',\n 'loc': 'westus',\n 'image': 'UbuntuLTS',\n 'auth': 'ssh',\n 'vm': 'vm-name',\n 'custom_data': '#cloud-config\\nhostname: myVMhostname',\n 'ssh_key': TEST_SSH_KEY_PUB\n })\n\n self.cmd('vm create -g {rg} -n {vm} --admin-username {username} --authentication-type {auth} --image {image} --ssh-key-value \\'{ssh_key}\\' -l {loc} --custom-data \\'{custom_data}\\' --use-unmanaged-disk')\n\n # custom data is write only, hence we have no automatic way to cross check. Here we just verify VM was provisioned\n self.cmd('vm show -g {rg} -n {vm}',\n checks=self.check('provisioningState', 'Succeeded'))\n\n\n# region VMSS Tests\n\nclass VMSSCreateAndModify(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_and_modify')\n def test_vmss_create_and_modify(self):\n\n self.kwargs.update({\n 'vmss': 'vmss1',\n 'count': 5,\n 'new_count': 4\n })\n\n self.cmd('vmss create --admin-password testPassword0 --name {vmss} -g {rg} --admin-username myadmin --image Win2012R2Datacenter --instance-count {count} --use-unmanaged-disk')\n\n self.cmd('vmss show --name {vmss} -g {rg}', checks=[\n self.check('virtualMachineProfile.priority', None)\n ])\n\n self.cmd('vmss list',\n checks=self.check('type(@)', 'array'))\n\n self.cmd('vmss list --resource-group {rg}', checks=[\n self.check('type(@)', 'array'),\n self.check('length(@)', 1),\n self.check('[0].name', '{vmss}'),\n self.check('[0].resourceGroup', '{rg}')\n ])\n self.cmd('vmss list-skus --resource-group {rg} --name {vmss}',\n checks=self.check('type(@)', 'array'))\n self.cmd('vmss show --resource-group {rg} --name {vmss}', checks=[\n self.check('type(@)', 'object'),\n self.check('name', '{vmss}'),\n self.check('resourceGroup', '{rg}')\n ])\n result = self.cmd('vmss list-instances --resource-group {rg} --name {vmss} --query \"[].instanceId\"').get_output_in_json()\n self.kwargs['instance_ids'] = result[3] + ' ' + result[4]\n self.cmd('vmss update-instances --resource-group {rg} --name {vmss} --instance-ids {instance_ids}')\n self.cmd('vmss get-instance-view --resource-group {rg} --name {vmss}', checks=[\n self.check('type(@)', 'object'),\n self.check('type(virtualMachine)', 'object'),\n self.check('type(statuses)', 'array')\n ])\n\n self.cmd('vmss stop --resource-group {rg} --name {vmss}')\n self.cmd('vmss start --resource-group {rg} --name {vmss}')\n self.cmd('vmss restart --resource-group {rg} --name {vmss}')\n\n self.cmd('vmss scale --resource-group {rg} --name {vmss} --new-capacity {new_count}')\n self.cmd('vmss show --resource-group {rg} --name {vmss}', checks=[\n self.check('sku.capacity', '{new_count}'),\n self.check('virtualMachineProfile.osProfile.windowsConfiguration.enableAutomaticUpdates', True)\n ])\n\n result = self.cmd('vmss list-instances --resource-group {rg} --name {vmss} --query \"[].instanceId\"').get_output_in_json()\n self.kwargs['instance_ids'] = result[2] + ' ' + result[3]\n self.cmd('vmss delete-instances --resource-group {rg} --name {vmss} --instance-ids {instance_ids}')\n self.cmd('vmss get-instance-view --resource-group {rg} --name {vmss}', checks=[\n self.check('type(@)', 'object'),\n self.check('type(virtualMachine)', 'object'),\n self.check('virtualMachine.statusesSummary[0].count', self.kwargs['new_count'] - 2)\n ])\n self.cmd('vmss deallocate --resource-group {rg} --name {vmss}')\n self.cmd('vmss delete --resource-group {rg} --name {vmss}')\n self.cmd('vmss list --resource-group {rg}', checks=self.is_empty())\n\n\nclass VMSSCreateBalancerOptionsTest(ScenarioTest): # pylint: disable=too-many-instance-attributes\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_none')\n def test_vmss_create_none_options(self, resource_group):\n self.kwargs.update({\n 'vmss': 'vmss1',\n 'ssh_key': TEST_SSH_KEY_PUB,\n 'quotes': '\"\"' if platform.system() == 'Windows' else \"''\"\n })\n self.cmd('vmss create -n {vmss} -g {rg} --image Debian --load-balancer {quotes} --admin-username ubuntu --ssh-key-value \\'{ssh_key}\\' --public-ip-address {quotes} --tags {quotes} --vm-sku Basic_A1 --use-unmanaged-disk')\n self.cmd('vmss show -n {vmss} -g {rg}', checks=[\n self.check('tags', {}),\n self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations.ipConfigurations.loadBalancerBackendAddressPools', None),\n self.check('sku.name', 'Basic_A1'),\n self.check('sku.tier', 'Basic')\n ])\n self.cmd('vmss update -g {rg} -n {vmss} --set tags.test=success',\n checks=self.check('tags.test', 'success'))\n self.cmd('network public-ip show -n {vmss}PublicIP -g {rg}',\n expect_failure=True)\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_w_ag')\n def test_vmss_create_with_app_gateway(self, resource_group):\n self.kwargs.update({\n 'vmss': 'vmss1',\n 'ssh_key': TEST_SSH_KEY_PUB\n })\n self.cmd(\"vmss create -n {vmss} -g {rg} --image Debian --admin-username clittester --ssh-key-value '{ssh_key}' --app-gateway apt1 --instance-count 5 --use-unmanaged-disk\",\n checks=self.check('vmss.provisioningState', 'Succeeded'))\n # spot check it is using gateway\n self.cmd('vmss show -n {vmss} -g {rg}', checks=[\n self.check('sku.capacity', 5),\n self.check('virtualMachineProfile.networkProfile.networkInterfaceConfigurations[0].ipConfigurations[0].applicationGatewayBackendAddressPools[0].resourceGroup', '{rg}')\n ])\n\n @ResourceGroupPreparer()\n def test_vmss_create_default_app_gateway(self, resource_group):\n self.kwargs.update({\n 'vmss': 'vmss1'\n })\n\n res = self.cmd(\"vmss create -g {rg} --name {vmss} --validate --image UbuntuLTS --disable-overprovision --instance-count 101 --single-placement-group false \"\n \"--admin-username ubuntuadmin --generate-ssh-keys --use-unmanaged-disk\").get_output_in_json()\n # Ensure generated template is valid. \"Quota Exceeding\" is expected on most subscriptions, so we allow that.\n self.assertTrue(not res['error'] or (res['error']['details'][0]['code'] == 'QuotaExceeded'))\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_existing_lb')\n def test_vmss_existing_lb(self, resource_group):\n self.kwargs.update({\n 'vmss': 'vmss1',\n 'lb': 'lb1'\n })\n self.cmd('network lb create -g {rg} -n {lb} --backend-pool-name test')\n self.cmd('vmss create -g {rg} -n {vmss} --load-balancer {lb} --image UbuntuLTS --admin-username clitester --admin-password TestTest12#$ --use-unmanaged-disk')\n\n\nclass SecretsScenarioTest(ScenarioTest): # pylint: disable=too-many-instance-attributes\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_secrets')\n def test_vm_create_linux_secrets(self, resource_group, resource_group_location):\n\n self.kwargs.update({\n 'admin': 'ubuntu',\n 'loc': 'westus',\n 'image': 'UbuntuLTS',\n 'auth': 'ssh',\n 'ssh_key': TEST_SSH_KEY_PUB,\n 'vm': 'vm-name',\n 'secrets': json.dumps([{'sourceVault': {'id': 'id'}, 'vaultCertificates': []}]),\n 'vault': self.create_random_name('vmlinuxkv', 20)\n })\n\n # TODO: Re-enable when issue #5155 is resolved.\n # message = 'Secret is missing vaultCertificates array or it is empty at index 0'\n self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --authentication-type {auth} --image {image} --ssh-key-value \\'{ssh_key}\\' -l {loc} --secrets \\'{secrets}\\' --use-unmanaged-disk', expect_failure=True)\n\n vault_out = self.cmd('keyvault create -g {rg} -n {vault} -l {loc} --enabled-for-deployment true --enabled-for-template-deployment true').get_output_in_json()\n\n time.sleep(60)\n\n self.kwargs['policy_path'] = os.path.join(TEST_DIR, 'keyvault', 'policy.json')\n self.cmd('keyvault certificate create --vault-name {vault} -n cert1 -p @\"{policy_path}\"')\n self.kwargs['secret_out'] = self.cmd('keyvault secret list-versions --vault-name {vault} -n cert1 --query \"[?attributes.enabled].id\" -o tsv').output.strip()\n vm_format = self.cmd('vm secret format -s {secret_out}').get_output_in_json()\n self.kwargs['secrets'] = json.dumps(vm_format)\n\n self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --authentication-type {auth} --image {image} --ssh-key-value \\'{ssh_key}\\' -l {loc} --secrets \\'{secrets}\\' --use-unmanaged-disk')\n\n self.cmd('vm show -g {rg} -n {vm}', checks=[\n self.check('provisioningState', 'Succeeded'),\n self.check('osProfile.secrets[0].sourceVault.id', vault_out['id']),\n self.check('osProfile.secrets[0].vaultCertificates[0].certificateUrl', '{secret_out}')\n ])\n\n @ResourceGroupPreparer()\n def test_vm_create_windows_secrets(self, resource_group, resource_group_location):\n\n self.kwargs.update({\n 'admin': 'windowsUser',\n 'loc': 'westus',\n 'image': 'Win2012R2Datacenter',\n 'vm': 'vm-name',\n 'secrets': json.dumps([{'sourceVault': {'id': 'id'}, 'vaultCertificates': [{'certificateUrl': 'certurl'}]}]),\n 'vault': self.create_random_name('vmkeyvault', 20)\n })\n\n # TODO: Re-enable when issue #5155 is resolved.\n # message = 'Secret is missing certificateStore within vaultCertificates array at secret index 0 and ' \\\n # 'vaultCertificate index 0'\n self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --admin-password VerySecret!12 --image {image} -l {loc} --secrets \\'{secrets}\\' --use-unmanaged-disk', expect_failure=True)\n\n vault_out = self.cmd(\n 'keyvault create -g {rg} -n {vault} -l {loc} --enabled-for-deployment true --enabled-for-template-deployment true').get_output_in_json()\n\n time.sleep(60)\n\n self.kwargs['policy_path'] = os.path.join(TEST_DIR, 'keyvault', 'policy.json')\n self.cmd('keyvault certificate create --vault-name {vault} -n cert1 -p @\"{policy_path}\"')\n\n self.kwargs['secret_out'] = self.cmd('keyvault secret list-versions --vault-name {vault} -n cert1 --query \"[?attributes.enabled].id\" -o tsv').output.strip()\n self.kwargs['secrets'] = self.cmd('vm secret format -s {secret_out} --certificate-store \"My\"').get_output_in_json()\n\n self.cmd('vm create -g {rg} -n {vm} --admin-username {admin} --admin-password VerySecret!12 --image {image} -l {loc} --secrets \"{secrets}\" --use-unmanaged-disk')\n\n self.cmd('vm show -g {rg} -n {vm}', checks=[\n self.check('provisioningState', 'Succeeded'),\n self.check('osProfile.secrets[0].sourceVault.id', vault_out['id']),\n self.check('osProfile.secrets[0].vaultCertificates[0].certificateUrl', self.kwargs['secret_out']),\n self.check('osProfile.secrets[0].vaultCertificates[0].certificateStore', 'My')\n ])\n\n\nclass VMSSCreateLinuxSecretsScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_linux_secrets')\n @AllowLargeResponse()\n def test_vmss_create_linux_secrets(self, resource_group):\n self.kwargs.update({\n 'loc': 'westus',\n 'vmss': 'vmss1-name',\n 'secrets': json.dumps([{'sourceVault': {'id': 'id'}, 'vaultCertificates': []}]),\n 'vault': 'vmcreatelinuxsecret3334',\n 'secret': 'mysecret',\n 'ssh_key': TEST_SSH_KEY_PUB\n })\n\n vault_out = self.cmd('keyvault create -g {rg} -n {vault} -l {loc} --enabled-for-deployment true --enabled-for-template-deployment true').get_output_in_json()\n\n time.sleep(60)\n\n self.kwargs['policy_path'] = os.path.join(TEST_DIR, 'keyvault', 'policy.json')\n self.cmd('keyvault certificate create --vault-name {vault} -n cert1 -p @\"{policy_path}\"')\n\n self.kwargs['secret_out'] = self.cmd('keyvault secret list-versions --vault-name {vault} -n cert1 --query \"[?attributes.enabled].id\" -o tsv').output.strip()\n vm_format = self.cmd('vm secret format -s {secret_out}').get_output_in_json()\n self.kwargs['secrets'] = json.dumps(vm_format)\n\n self.cmd('vmss create -n {vmss} -g {rg} --image Debian --admin-username deploy --ssh-key-value \\'{ssh_key}\\' --secrets \\'{secrets}\\' --use-unmanaged-disk')\n\n self.cmd('vmss show -n {vmss} -g {rg}', checks=[\n self.check('provisioningState', 'Succeeded'),\n self.check('virtualMachineProfile.osProfile.secrets[0].sourceVault.id', vault_out['id']),\n self.check('virtualMachineProfile.osProfile.secrets[0].vaultCertificates[0].certificateUrl', '{secret_out}')\n ])\n\n\nclass VMSSCreateExistingOptions(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_existing_options')\n def test_vmss_create_existing_options(self):\n\n self.kwargs.update({\n 'vmss': 'vrfvmss',\n 'os_disk': 'vrfosdisk',\n 'container': 'vrfcontainer',\n 'sku': 'Standard_A3',\n 'vnet': 'vrfvnet',\n 'subnet': 'vrfsubnet',\n 'lb': 'vrflb',\n 'bepool': 'mybepool',\n 'ssh_key': TEST_SSH_KEY_PUB\n })\n\n self.cmd('network vnet create -n {vnet} -g {rg} --subnet-name {subnet}')\n self.cmd('network lb create --name {lb} -g {rg} --backend-pool-name {bepool}')\n\n self.cmd('vmss create --image CentOS --os-disk-name {os_disk} --admin-username ubuntu --vnet-name {vnet} --subnet {subnet} -l \"West US\" --vm-sku {sku} --storage-container-name {container} -g {rg} --name {vmss} --load-balancer {lb} --ssh-key-value \\'{ssh_key}\\' --backend-pool-name {bepool} --use-unmanaged-disk')\n self.cmd('vmss show --name {vmss} -g {rg}', checks=[\n self.check('sku.name', '{sku}'),\n self.check('virtualMachineProfile.storageProfile.osDisk.name', '{os_disk}'),\n self.check('virtualMachineProfile.storageProfile.osDisk.vhdContainers[0].ends_with(@, \\'{container}\\')', True)\n ])\n self.cmd('network lb show --name {lb} -g {rg}',\n checks=self.check('backendAddressPools[0].backendIpConfigurations[0].id.contains(@, \\'{vmss}\\')', True))\n self.cmd('network vnet show --name {vnet} -g {rg}',\n checks=self.check('subnets[0].ipConfigurations[0].id.contains(@, \\'{vmss}\\')', True))\n\n\nclass VMSSCreateExistingIdsOptions(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_existing_ids')\n def test_vmss_create_existing_ids_options(self, resource_group):\n\n from msrestazure.tools import resource_id, is_valid_resource_id\n subscription_id = self.get_subscription_id()\n\n self.kwargs.update({\n 'vmss': 'vrfvmss',\n 'os_disk': 'vrfosdisk',\n 'container': 'vrfcontainer',\n 'sku': 'Standard_A3',\n 'vnet': 'vrfvnet',\n 'subnet': 'vrfsubnet',\n 'lb': 'vrflb',\n 'bepool': 'mybepool',\n 'ssh_key': TEST_SSH_KEY_PUB\n })\n\n self.cmd('network vnet create -n {vnet} -g {rg} --subnet-name {subnet}')\n self.cmd('network lb create --name {lb} -g {rg} --backend-pool-name {bepool}')\n\n self.kwargs.update({\n 'subnet_id': resource_id(subscription=subscription_id, resource_group=resource_group, namespace='Microsoft.Network', type='virtualNetworks', child_type_1='subnets', name=self.kwargs['vnet'], child_name_1=self.kwargs['subnet']),\n 'lb_id': resource_id(subscription=subscription_id, resource_group=resource_group, namespace='Microsoft.Network', type='loadBalancers', name=self.kwargs['lb'])\n })\n\n assert is_valid_resource_id(self.kwargs['subnet_id'])\n assert is_valid_resource_id(self.kwargs['lb_id'])\n\n self.cmd('vmss create --image CentOS --os-disk-name {os_disk} --admin-username ubuntu --subnet {subnet_id} -l \"West US\" --vm-sku {sku} --storage-container-name {container} -g {rg} --name {vmss} --load-balancer {lb_id} --ssh-key-value \\'{ssh_key}\\' --backend-pool-name {bepool} --use-unmanaged-disk')\n self.cmd('vmss show --name {vmss} -g {rg}', checks=[\n self.check('sku.name', '{sku}'),\n self.check('virtualMachineProfile.storageProfile.osDisk.name', '{os_disk}'),\n self.check('virtualMachineProfile.storageProfile.osDisk.vhdContainers[0].ends_with(@, \\'{container}\\')', True)\n ])\n self.cmd('network lb show --name {lb} -g {rg}',\n checks=self.check('backendAddressPools[0].backendIpConfigurations[0].id.contains(@, \\'{vmss}\\')', True))\n self.cmd('network vnet show --name {vnet} -g {rg}',\n checks=self.check('subnets[0].ipConfigurations[0].id.contains(@, \\'{vmss}\\')', True))\n\n\nclass VMSSVMsScenarioTest(ScenarioTest):\n\n def _check_vms_power_state(self, *args):\n for iid in self.kwargs['instance_ids']:\n result = self.cmd('vmss get-instance-view --resource-group {{rg}} --name {{vmss}} --instance-id {}'.format(iid)).get_output_in_json()\n self.assertTrue(result['statuses'][1]['code'] in args)\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_vms')\n def test_vmss_vms(self, resource_group):\n\n self.kwargs.update({\n 'vmss': 'vmss1',\n 'count': 2,\n 'instance_ids': []\n })\n\n self.cmd('vmss create -g {rg} -n {vmss} --image UbuntuLTS --authentication-type password --admin-username admin123 --admin-password TestTest12#$ --instance-count {count} --use-unmanaged-disk')\n\n instance_list = self.cmd('vmss list-instances --resource-group {rg} --name {vmss}', checks=[\n self.check('type(@)', 'array'),\n self.check('length(@)', '{count}'),\n self.check(\"length([].name.starts_with(@, '{vmss}'))\", self.kwargs['count'])\n ]).get_output_in_json()\n\n self.kwargs['instance_ids'] = [x['instanceId'] for x in instance_list]\n self.kwargs['id'] = self.kwargs['instance_ids'][0]\n\n self.cmd('vmss show --resource-group {rg} --name {vmss} --instance-id {id}', checks=[\n self.check('type(@)', 'object'),\n self.check('instanceId', '{id}')\n ])\n result = self.cmd('vmss list-instance-connection-info --resource-group {rg} --name {vmss}').get_output_in_json()\n self.assertTrue(result['instance 0'].split('.')[1], '5000')\n self.cmd('vmss restart --resource-group {rg} --name {vmss} --instance-ids *')\n self._check_vms_power_state('PowerState/running', 'PowerState/starting')\n self.cmd('vmss stop --resource-group {rg} --name {vmss} --instance-ids *')\n self._check_vms_power_state('PowerState/stopped')\n self.cmd('vmss start --resource-group {rg} --name {vmss} --instance-ids *')\n self._check_vms_power_state('PowerState/running', 'PowerState/starting')\n self.cmd('vmss deallocate --resource-group {rg} --name {vmss} --instance-ids *')\n self._check_vms_power_state('PowerState/deallocated')\n self.cmd('vmss delete-instances --resource-group {rg} --name {vmss} --instance-ids *')\n self.cmd('vmss list-instances --resource-group {rg} --name {vmss}')\n\n\nclass VMSSCustomDataScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_custom_data')\n def test_vmss_create_custom_data(self):\n\n self.kwargs.update({\n 'vmss': 'vmss-custom-data',\n 'ssh_key': TEST_SSH_KEY_PUB\n })\n\n self.cmd('vmss create -n {vmss} -g {rg} --image Debian --admin-username deploy --ssh-key-value \"{ssh_key}\" --custom-data \"#cloud-config\\nhostname: myVMhostname\" --use-unmanaged-disk')\n\n # custom data is write only, hence we have no automatic way to cross check. Here we just verify VM was provisioned\n self.cmd('vmss show -n {vmss} -g {rg}',\n checks=self.check('provisioningState', 'Succeeded'))\n\n\nclass VMSSNicScenarioTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_nics')\n def test_vmss_nics(self):\n\n self.kwargs.update({\n 'vmss': 'vmss1',\n 'iid': 0\n })\n\n self.cmd('vmss create -g {rg} -n {vmss} --authentication-type password --admin-username admin123 --admin-password PasswordPassword1! --image Win2012R2Datacenter --use-unmanaged-disk')\n\n self.cmd('vmss nic list -g {rg} --vmss-name {vmss}', checks=[\n self.check('type(@)', 'array'),\n self.check(\"length([?resourceGroup == '{rg}']) == length(@)\", True)\n ])\n nic_list = self.cmd('vmss nic list-vm-nics -g {rg} --vmss-name {vmss} --instance-id {iid}', checks=[\n self.check('type(@)', 'array'),\n self.check(\"length([?resourceGroup == '{rg}']) == length(@)\", True)\n ]).get_output_in_json()\n self.kwargs['nic'] = nic_list[0].get('name')\n self.cmd('vmss nic show --resource-group {rg} --vmss-name {vmss} --instance-id {iid} -n {nic}', checks=[\n self.check('type(@)', 'object'),\n self.check('name', '{nic}'),\n self.check('resourceGroup', '{rg}')\n ])\n\n\nclass VMSSCreateIdempotentTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_create_idempotent')\n def test_vmss_create_idempotent(self, resource_group):\n\n self.kwargs.update({'vmss': 'vmss1'})\n\n # run the command twice with the same parameters and verify it does not fail\n self.cmd('vmss create -g {rg} -n {vmss} --authentication-type password --admin-username admin123 --admin-password PasswordPassword1! --image UbuntuLTS --use-unmanaged-disk')\n self.cmd('vmss create -g {rg} -n {vmss} --authentication-type password --admin-username admin123 --admin-password PasswordPassword1! --image UbuntuLTS --use-unmanaged-disk')\n\n # still 1 vnet and 1 subnet inside\n self.cmd('network vnet list -g {rg}', checks=[\n self.check('length([])', 1),\n self.check('[0].name', self.kwargs['vmss'] + 'VNET'),\n self.check('[0].subnets[0].addressPrefix', '10.0.0.0/24'),\n self.check('length([0].subnets)', 1),\n ])\n\n\nclass VMSSILBTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vmss_ilb')\n def test_vmss_with_ilb(self, resource_group):\n\n self.kwargs.update({'vmss': 'vmss1'})\n\n self.cmd('vmss create -g {rg} -n {vmss} --admin-username admin123 --admin-password PasswordPassword1! --image centos --instance-count 1 --public-ip-address \"\" --use-unmanaged-disk')\n # TODO: restore error validation when #5155 is addressed\n # with self.assertRaises(AssertionError) as err:\n self.cmd('vmss list-instance-connection-info -g {rg} -n {vmss}', expect_failure=True)\n # self.assertTrue('internal load balancer' in str(err.exception))\n\n\nclass VMLiveScenarioTest(LiveScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_create_progress')\n def test_vm_create_progress(self, resource_group):\n from azure.cli.testsdk.utilities import force_progress_logging\n\n self.kwargs.update({'vm': 'vm123'})\n\n with force_progress_logging() as test_io:\n self.cmd('vm create -g {rg} -n {vm} --admin-username {vm} --admin-password PasswordPassword1! --image debian --use-unmanaged-disk')\n\n content = test_io.getvalue()\n # check log has okay format\n lines = content.splitlines()\n for l in lines:\n self.assertTrue(l.split(':')[0] in ['Accepted', 'Succeeded'])\n # spot check we do have some relevant progress messages coming out\n # (Note, CLI's progress controller does routine \"sleep\" before sample the LRO response.\n # This has the consequence that it can't promise each resource's result wil be displayed)\n self.assertTrue('Accepted:'.format(**self.kwargs) in lines)\n self.assertTrue('Succeeded:'.format(**self.kwargs) in lines)\n\n\n# convert to ScenarioTest and re-record when issue #6006 is fixed\nclass VMLBIntegrationTesting(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_lb_integration')\n def test_vm_lb_integration(self, resource_group):\n\n self.kwargs.update({\n 'lb': 'lb1',\n 'vm1': 'vm1',\n 'vm2': 'vm2',\n 'avset': 'av1'\n })\n # provision 2 web servers\n self.cmd('vm availability-set create -g {rg} -n {avset}')\n self.cmd('vm create -g {rg} -n {vm1} --image ubuntults --public-ip-address \"\" --availability-set {avset} --generate-ssh-keys --admin-username ubuntuadmin --use-unmanaged-disk')\n self.cmd('vm open-port -g {rg} -n {vm1} --port 80')\n self.cmd('vm create -g {rg} -n {vm2} --image ubuntults --public-ip-address \"\" --availability-set {avset} --generate-ssh-keys --admin-username ubuntuadmin --use-unmanaged-disk')\n self.cmd('vm open-port -g {rg} -n {vm2} --port 80')\n\n # provision 1 LB\n self.cmd('network lb create -g {rg} -n {lb}')\n\n # create LB probe and rule\n self.cmd('network lb probe create -g {rg} --lb-name {lb} -n probe1 --protocol Http --port 80 --path /')\n self.cmd('network lb rule create -g {rg} --lb-name {lb} -n rule1 --protocol Tcp --frontend-port 80 --backend-port 80')\n\n # add 2 vm into BE Pool\n self.cmd('network nic ip-config address-pool add -g {rg} --lb-name {lb} --address-pool {lb}bepool --nic-name {vm1}VMNic --ip-config-name ipconfig{vm1}')\n self.cmd('network nic ip-config address-pool add -g {rg} --lb-name {lb} --address-pool {lb}bepool --nic-name {vm2}VMNic --ip-config-name ipconfig{vm2}')\n\n # Create Inbound Nat Rules so each VM can be accessed through SSH\n self.cmd('network lb inbound-nat-rule create -g {rg} --lb-name {lb} -n inbound-nat-rule1 --frontend-port 50000 --backend-port 22 --protocol Tcp')\n self.cmd('network nic ip-config inbound-nat-rule add -g {rg} --lb-name {lb} --nic-name {vm1}VMNic --ip-config-name ipconfig{vm1} --inbound-nat-rule inbound-nat-rule1')\n self.cmd('network lb inbound-nat-rule create -g {rg} --lb-name {lb} -n inbound-nat-rule2 --frontend-port 50001 --backend-port 22 --protocol Tcp')\n self.cmd('network nic ip-config inbound-nat-rule add -g {rg} --lb-name {lb} --nic-name {vm2}VMNic --ip-config-name ipconfig{vm2} --inbound-nat-rule inbound-nat-rule2')\n\n\nclass VMSecretTest(ScenarioTest):\n\n @ResourceGroupPreparer(name_prefix='cli_test_vm_secrets')\n def test_vm_secret_e2e_test(self, resource_group, resource_group_location):\n self.kwargs.update({\n 'vm': 'vm1',\n 'vault': self.create_random_name('vmsecretkv', 20),\n 'cert': 'vm-secrt-cert',\n 'loc': resource_group_location\n })\n\n vault_result = self.cmd('keyvault create -g {rg} -n {vault} -l {loc} --enabled-for-disk-encryption true --enabled-for-deployment true').get_output_in_json()\n self.kwargs['policy_path'] = os.path.join(TEST_DIR, 'keyvault', 'policy.json')\n\n self.cmd('vm create -g {rg} -n {vm} --image rhel --generate-ssh-keys --admin-username rheladmin --use-unmanaged-disk')\n time.sleep(60) # ensure we don't hit the DNS exception (ignored under playback)\n\n self.cmd('keyvault certificate create --vault-name {vault} -n {cert} -p @\"{policy_path}\"')\n secret_result = self.cmd('vm secret add -g {rg} -n {vm} --keyvault {vault} --certificate {cert}', checks=[\n self.check('length([])', 1),\n self.check('[0].sourceVault.id', vault_result['id']),\n self.check('length([0].vaultCertificates)', 1),\n ]).get_output_in_json()\n self.assertTrue('https://{vault}.vault.azure.net/secrets/{cert}/'.format(**self.kwargs) in secret_result[0]['vaultCertificates'][0]['certificateUrl'])\n self.cmd('vm secret list -g {rg} -n {vm}')\n self.cmd('vm secret remove -g {rg} -n {vm} --keyvault {vault} --certificate {cert}')\n\n self.cmd('vm secret list -g {rg} -n {vm}',\n checks=self.check('length([])', 0))\n\n\n# endregion\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"src/command_modules/azure-cli-vm/azure/cli/command_modules/vm/tests/profile_2017_03_09/test_vm_commands.py","file_name":"test_vm_commands.py","file_ext":"py","file_size_in_byte":73639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"340476282","text":"import arrow\nimport requests\nfrom models import *\nimport config\n\nhashtags = [\"rf17\", \"rf2017\", \"roskilde\", \"orangefeeling\", \"rfrising\"]\n\ntime = arrow.utcnow().replace(minutes=-90)\ntime = time.to('Europe/Copenhagen').format('YYYY-MM-DD HH:mm:ss')\nprint(\"Loading from %s\\n\" % time)\ntime = \"2017-06-01 23:04:45\"\n\nurl = \"http://graflr.co/api/export-hashtags/\"\ntoken = config.api_token\ndb_eng = db_connect()\ndb_session = create_db_session(db_eng)\n# check_index()\n\nfor h in hashtags:\n print(\"\\n\\nGetting hashtag: %s\\n\" % h)\n req = url + h + \"?access_token=\" + token + \"&from=\" + time\n data = requests.get(req).json()\n for d in data:\n print(\"Loading %s\" % d.get(\"media_id\"))\n try:\n d[\"hashtags\"] = [i for i in d.get(\"media_text\").split() if i.startswith(\"#\")]\n except:\n d[\"hashtags\"] = []\n p = arrow.get(d.get(\"created_time\")).format(\"YYYY-MM-DDTHH:mm:ss+02:00\")\n insta = Insta(media_id=d.get(\"media_id\"),\n user_id=d.get(\"user_id\"),\n username=d.get(\"username\"),\n fullname=d.get(\"full_name\"),\n predicted_gender=d.get(\"predicted_gender\"),\n created_time=d.get(\"created_time\"),\n like_count=d.get(\"like_count\"),\n comment_count=d.get(\"comment_count\"),\n media_text=d.get(\"media_text\"),\n hashtags=d.get(\"hashtags\"),\n photo_url=d.get(\"photo_url\"),\n location_name=d.get(\"location_name\"),\n longitude=d.get(\"longitude\"),\n latitude=d.get(\"latitude\"),\n location_id=d.get(\"location_id\"),\n country=d.get(\"country\"))\n d[\"created_time\"] = p\n if d.get(\"latitude\"):\n loc = [float(d.get(\"longitude\")), float(d.get(\"latitude\"))]\n d[\"coordinates\"] = {\"text\": d.get(\"location_name\"), \"location\": loc}\n d[\"pin\"] = loc\n db_session.merge(insta)\n load_es(d)\n r = requests.get(d.get(\"photo_url\"), stream=True)\n if r.status_code == 200:\n with open(config.imgpath + d.get(\"media_id\") + \".png\", 'wb') as f:\n # with open(\"pics/%s.png\" % d.get(\"media_id\"), 'wb') as f:\n for chunk in r:\n f.write(chunk)\n\n \n if data:\n d = data[-1]\n r = requests.get(d.get(\"photo_url\"), stream=True)\n if r.status_code == 200:\n with open(config.imgpath + d.get(\"media_id\") + \".png\", 'wb') as f:\n # with open(\"pics/latest.png\", 'wb') as f:\n for chunk in r:\n f.write(chunk)\n db_session.commit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2694,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"58682164","text":"# -*- encoding: utf-8 -*-\nimport sys\nfrom collections import deque\nr_input = sys.stdin.readline\n\nif __name__ == '__main__':\n N = int(r_input())\n r1, c1, r2, c2 = map(int, r_input().split())\n\n board = [[0] * N for _ in range(N)]\n\n board[r1][c1] = 1\n\n queue = deque([(r1, c1)])\n cnt = 0\n\n dx = [-2, -2, 0, 0, 2, 2]\n dy = [1, -1, 2, -2, 1, -1]\n\n while queue:\n for _ in range(len(queue)):\n loc = queue.popleft()\n\n if loc == (r2, c2):\n print(cnt)\n exit()\n\n for k in range(6):\n tmp_row = loc[0] + dx[k]\n tmp_col = loc[1] + dy[k]\n\n if 0 <= tmp_row < N and 0 <= tmp_col < N:\n if not board[tmp_row][tmp_col]:\n board[tmp_row][tmp_col] = 1\n queue.append((tmp_row, tmp_col))\n\n cnt += 1\n\n print(-1)\n","sub_path":"Algorithm/Baekjoon/16948 데스 나이트/16948.py","file_name":"16948.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"382575307","text":"from celery import shared_task\nfrom time import sleep\nfrom datetime import datetime\nfrom .models import *\n\n@shared_task\ndef agent_a(task_a_pk):\n task_a = Task.objects.get(pk=task_a_pk)\n task_a.status = \"Executing\"\n task_a.save()\n print(\"Task A with ID\", task_a.task_id, \"assigned Executing status.\", '\\n')\n\n print(\"Agent A with task ID\", task_a.task_id, \"go to sleep for 3 seconds\", '\\n')\n sleep(3)\n\n new_task_b1 = Task.objects.create(task_type=\"B1\")\n new_task_b1.save()\n print(\"Created task with ID\", str(new_task_b1.task_id) + \":\", new_task_b1.task_type, '\\n')\n\n print(\"Agent B start processing task\", new_task_b1.task_type, \"with ID\", new_task_b1.task_id, '\\n')\n agent_b.delay(new_task_b1.pk)\n\n print(\"Agent B with task ID\", new_task_b1.task_id, \"go to sleep for 3 seconds\", '\\n')\n sleep(3)\n\n new_task_b2 = Task.objects.create(task_type=\"B2\")\n new_task_b2.save()\n agent_b.delay(new_task_b2.pk)\n print(\"Created task with ID\", str(new_task_b2.task_id) + \":\", new_task_b2.task_type, '\\n')\n\n task_a.status = \"Executed\"\n task_a.save()\n print(\"Task A with ID\", task_a.task_id, \"assigned Executed status.\", '\\n')\n\n return task_a.status\n\n@shared_task\ndef agent_b(task_b_pk):\n task_b = Task.objects.get(pk=task_b_pk)\n task_b.status = \"Executing\"\n task_b.save()\n print(\"Task B with ID\", task_b.task_id, \"assigned Executing status.\", '\\n')\n\n print(\"Agent B with task ID\", task_b.task_id, \"go to sleep for 5 seconds\", '\\n')\n sleep(5)\n\n print(\"Creation of the result and its content.\", '\\n')\n result_content = '{current_time}, задача№{task_id}'.format(\n current_time=datetime.now().time(),\n task_id=task_b.task_id)\n result = Result.objects.create(result_id=task_b, content=result_content)\n result.save()\n\n task_b.status = \"Executed\"\n task_b.save()\n print(\"Task B with ID\", task_b.task_id, \"assigned Executed status.\", '\\n')\n\n return task_b.status\n","sub_path":"app/TaskAPISchedulerEngine/task_processing_api/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"617195560","text":"\nfrom static import Base_RM_Register\nfrom PRS_field import *\n\n\nclass RM_Register_PRS_SWPULSE(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_SWPULSE, self).__init__(rmio, label,\n 0x400e6000, 0x000,\n 'SWPULSE', 'PRS.SWPULSE', 'write-only',\n \"\",\n 0x00000000, 0x00000FFF)\n\n self.CH0PULSE = RM_Field_PRS_SWPULSE_CH0PULSE(self)\n self.zz_fdict['CH0PULSE'] = self.CH0PULSE\n self.CH1PULSE = RM_Field_PRS_SWPULSE_CH1PULSE(self)\n self.zz_fdict['CH1PULSE'] = self.CH1PULSE\n self.CH2PULSE = RM_Field_PRS_SWPULSE_CH2PULSE(self)\n self.zz_fdict['CH2PULSE'] = self.CH2PULSE\n self.CH3PULSE = RM_Field_PRS_SWPULSE_CH3PULSE(self)\n self.zz_fdict['CH3PULSE'] = self.CH3PULSE\n self.CH4PULSE = RM_Field_PRS_SWPULSE_CH4PULSE(self)\n self.zz_fdict['CH4PULSE'] = self.CH4PULSE\n self.CH5PULSE = RM_Field_PRS_SWPULSE_CH5PULSE(self)\n self.zz_fdict['CH5PULSE'] = self.CH5PULSE\n self.CH6PULSE = RM_Field_PRS_SWPULSE_CH6PULSE(self)\n self.zz_fdict['CH6PULSE'] = self.CH6PULSE\n self.CH7PULSE = RM_Field_PRS_SWPULSE_CH7PULSE(self)\n self.zz_fdict['CH7PULSE'] = self.CH7PULSE\n self.CH8PULSE = RM_Field_PRS_SWPULSE_CH8PULSE(self)\n self.zz_fdict['CH8PULSE'] = self.CH8PULSE\n self.CH9PULSE = RM_Field_PRS_SWPULSE_CH9PULSE(self)\n self.zz_fdict['CH9PULSE'] = self.CH9PULSE\n self.CH10PULSE = RM_Field_PRS_SWPULSE_CH10PULSE(self)\n self.zz_fdict['CH10PULSE'] = self.CH10PULSE\n self.CH11PULSE = RM_Field_PRS_SWPULSE_CH11PULSE(self)\n self.zz_fdict['CH11PULSE'] = self.CH11PULSE\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_SWLEVEL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_SWLEVEL, self).__init__(rmio, label,\n 0x400e6000, 0x004,\n 'SWLEVEL', 'PRS.SWLEVEL', 'read-write',\n \"\",\n 0x00000000, 0x00000FFF)\n\n self.CH0LEVEL = RM_Field_PRS_SWLEVEL_CH0LEVEL(self)\n self.zz_fdict['CH0LEVEL'] = self.CH0LEVEL\n self.CH1LEVEL = RM_Field_PRS_SWLEVEL_CH1LEVEL(self)\n self.zz_fdict['CH1LEVEL'] = self.CH1LEVEL\n self.CH2LEVEL = RM_Field_PRS_SWLEVEL_CH2LEVEL(self)\n self.zz_fdict['CH2LEVEL'] = self.CH2LEVEL\n self.CH3LEVEL = RM_Field_PRS_SWLEVEL_CH3LEVEL(self)\n self.zz_fdict['CH3LEVEL'] = self.CH3LEVEL\n self.CH4LEVEL = RM_Field_PRS_SWLEVEL_CH4LEVEL(self)\n self.zz_fdict['CH4LEVEL'] = self.CH4LEVEL\n self.CH5LEVEL = RM_Field_PRS_SWLEVEL_CH5LEVEL(self)\n self.zz_fdict['CH5LEVEL'] = self.CH5LEVEL\n self.CH6LEVEL = RM_Field_PRS_SWLEVEL_CH6LEVEL(self)\n self.zz_fdict['CH6LEVEL'] = self.CH6LEVEL\n self.CH7LEVEL = RM_Field_PRS_SWLEVEL_CH7LEVEL(self)\n self.zz_fdict['CH7LEVEL'] = self.CH7LEVEL\n self.CH8LEVEL = RM_Field_PRS_SWLEVEL_CH8LEVEL(self)\n self.zz_fdict['CH8LEVEL'] = self.CH8LEVEL\n self.CH9LEVEL = RM_Field_PRS_SWLEVEL_CH9LEVEL(self)\n self.zz_fdict['CH9LEVEL'] = self.CH9LEVEL\n self.CH10LEVEL = RM_Field_PRS_SWLEVEL_CH10LEVEL(self)\n self.zz_fdict['CH10LEVEL'] = self.CH10LEVEL\n self.CH11LEVEL = RM_Field_PRS_SWLEVEL_CH11LEVEL(self)\n self.zz_fdict['CH11LEVEL'] = self.CH11LEVEL\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_ROUTEPEN(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_ROUTEPEN, self).__init__(rmio, label,\n 0x400e6000, 0x008,\n 'ROUTEPEN', 'PRS.ROUTEPEN', 'read-write',\n \"\",\n 0x00000000, 0x00000FFF)\n\n self.CH0PEN = RM_Field_PRS_ROUTEPEN_CH0PEN(self)\n self.zz_fdict['CH0PEN'] = self.CH0PEN\n self.CH1PEN = RM_Field_PRS_ROUTEPEN_CH1PEN(self)\n self.zz_fdict['CH1PEN'] = self.CH1PEN\n self.CH2PEN = RM_Field_PRS_ROUTEPEN_CH2PEN(self)\n self.zz_fdict['CH2PEN'] = self.CH2PEN\n self.CH3PEN = RM_Field_PRS_ROUTEPEN_CH3PEN(self)\n self.zz_fdict['CH3PEN'] = self.CH3PEN\n self.CH4PEN = RM_Field_PRS_ROUTEPEN_CH4PEN(self)\n self.zz_fdict['CH4PEN'] = self.CH4PEN\n self.CH5PEN = RM_Field_PRS_ROUTEPEN_CH5PEN(self)\n self.zz_fdict['CH5PEN'] = self.CH5PEN\n self.CH6PEN = RM_Field_PRS_ROUTEPEN_CH6PEN(self)\n self.zz_fdict['CH6PEN'] = self.CH6PEN\n self.CH7PEN = RM_Field_PRS_ROUTEPEN_CH7PEN(self)\n self.zz_fdict['CH7PEN'] = self.CH7PEN\n self.CH8PEN = RM_Field_PRS_ROUTEPEN_CH8PEN(self)\n self.zz_fdict['CH8PEN'] = self.CH8PEN\n self.CH9PEN = RM_Field_PRS_ROUTEPEN_CH9PEN(self)\n self.zz_fdict['CH9PEN'] = self.CH9PEN\n self.CH10PEN = RM_Field_PRS_ROUTEPEN_CH10PEN(self)\n self.zz_fdict['CH10PEN'] = self.CH10PEN\n self.CH11PEN = RM_Field_PRS_ROUTEPEN_CH11PEN(self)\n self.zz_fdict['CH11PEN'] = self.CH11PEN\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_ROUTELOC0(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_ROUTELOC0, self).__init__(rmio, label,\n 0x400e6000, 0x010,\n 'ROUTELOC0', 'PRS.ROUTELOC0', 'read-write',\n \"\",\n 0x00000000, 0x3F3F3F3F)\n\n self.CH0LOC = RM_Field_PRS_ROUTELOC0_CH0LOC(self)\n self.zz_fdict['CH0LOC'] = self.CH0LOC\n self.CH1LOC = RM_Field_PRS_ROUTELOC0_CH1LOC(self)\n self.zz_fdict['CH1LOC'] = self.CH1LOC\n self.CH2LOC = RM_Field_PRS_ROUTELOC0_CH2LOC(self)\n self.zz_fdict['CH2LOC'] = self.CH2LOC\n self.CH3LOC = RM_Field_PRS_ROUTELOC0_CH3LOC(self)\n self.zz_fdict['CH3LOC'] = self.CH3LOC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_ROUTELOC1(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_ROUTELOC1, self).__init__(rmio, label,\n 0x400e6000, 0x014,\n 'ROUTELOC1', 'PRS.ROUTELOC1', 'read-write',\n \"\",\n 0x00000000, 0x3F3F3F3F)\n\n self.CH4LOC = RM_Field_PRS_ROUTELOC1_CH4LOC(self)\n self.zz_fdict['CH4LOC'] = self.CH4LOC\n self.CH5LOC = RM_Field_PRS_ROUTELOC1_CH5LOC(self)\n self.zz_fdict['CH5LOC'] = self.CH5LOC\n self.CH6LOC = RM_Field_PRS_ROUTELOC1_CH6LOC(self)\n self.zz_fdict['CH6LOC'] = self.CH6LOC\n self.CH7LOC = RM_Field_PRS_ROUTELOC1_CH7LOC(self)\n self.zz_fdict['CH7LOC'] = self.CH7LOC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_ROUTELOC2(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_ROUTELOC2, self).__init__(rmio, label,\n 0x400e6000, 0x018,\n 'ROUTELOC2', 'PRS.ROUTELOC2', 'read-write',\n \"\",\n 0x00000000, 0x3F3F3F3F)\n\n self.CH8LOC = RM_Field_PRS_ROUTELOC2_CH8LOC(self)\n self.zz_fdict['CH8LOC'] = self.CH8LOC\n self.CH9LOC = RM_Field_PRS_ROUTELOC2_CH9LOC(self)\n self.zz_fdict['CH9LOC'] = self.CH9LOC\n self.CH10LOC = RM_Field_PRS_ROUTELOC2_CH10LOC(self)\n self.zz_fdict['CH10LOC'] = self.CH10LOC\n self.CH11LOC = RM_Field_PRS_ROUTELOC2_CH11LOC(self)\n self.zz_fdict['CH11LOC'] = self.CH11LOC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x020,\n 'CTRL', 'PRS.CTRL', 'read-write',\n \"\",\n 0x00000000, 0x0000001F)\n\n self.SEVONPRS = RM_Field_PRS_CTRL_SEVONPRS(self)\n self.zz_fdict['SEVONPRS'] = self.SEVONPRS\n self.SEVONPRSSEL = RM_Field_PRS_CTRL_SEVONPRSSEL(self)\n self.zz_fdict['SEVONPRSSEL'] = self.SEVONPRSSEL\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_DMAREQ0(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_DMAREQ0, self).__init__(rmio, label,\n 0x400e6000, 0x024,\n 'DMAREQ0', 'PRS.DMAREQ0', 'read-write',\n \"\",\n 0x00000000, 0x000003C0)\n\n self.PRSSEL = RM_Field_PRS_DMAREQ0_PRSSEL(self)\n self.zz_fdict['PRSSEL'] = self.PRSSEL\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_DMAREQ1(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_DMAREQ1, self).__init__(rmio, label,\n 0x400e6000, 0x028,\n 'DMAREQ1', 'PRS.DMAREQ1', 'read-write',\n \"\",\n 0x00000000, 0x000003C0)\n\n self.PRSSEL = RM_Field_PRS_DMAREQ1_PRSSEL(self)\n self.zz_fdict['PRSSEL'] = self.PRSSEL\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_PEEK(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_PEEK, self).__init__(rmio, label,\n 0x400e6000, 0x030,\n 'PEEK', 'PRS.PEEK', 'read-only',\n \"\",\n 0x00000000, 0x00000FFF)\n\n self.CH0VAL = RM_Field_PRS_PEEK_CH0VAL(self)\n self.zz_fdict['CH0VAL'] = self.CH0VAL\n self.CH1VAL = RM_Field_PRS_PEEK_CH1VAL(self)\n self.zz_fdict['CH1VAL'] = self.CH1VAL\n self.CH2VAL = RM_Field_PRS_PEEK_CH2VAL(self)\n self.zz_fdict['CH2VAL'] = self.CH2VAL\n self.CH3VAL = RM_Field_PRS_PEEK_CH3VAL(self)\n self.zz_fdict['CH3VAL'] = self.CH3VAL\n self.CH4VAL = RM_Field_PRS_PEEK_CH4VAL(self)\n self.zz_fdict['CH4VAL'] = self.CH4VAL\n self.CH5VAL = RM_Field_PRS_PEEK_CH5VAL(self)\n self.zz_fdict['CH5VAL'] = self.CH5VAL\n self.CH6VAL = RM_Field_PRS_PEEK_CH6VAL(self)\n self.zz_fdict['CH6VAL'] = self.CH6VAL\n self.CH7VAL = RM_Field_PRS_PEEK_CH7VAL(self)\n self.zz_fdict['CH7VAL'] = self.CH7VAL\n self.CH8VAL = RM_Field_PRS_PEEK_CH8VAL(self)\n self.zz_fdict['CH8VAL'] = self.CH8VAL\n self.CH9VAL = RM_Field_PRS_PEEK_CH9VAL(self)\n self.zz_fdict['CH9VAL'] = self.CH9VAL\n self.CH10VAL = RM_Field_PRS_PEEK_CH10VAL(self)\n self.zz_fdict['CH10VAL'] = self.CH10VAL\n self.CH11VAL = RM_Field_PRS_PEEK_CH11VAL(self)\n self.zz_fdict['CH11VAL'] = self.CH11VAL\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH0_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH0_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x040,\n 'CH0_CTRL', 'PRS.CH0_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH0_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH0_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH0_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH0_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH0_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH0_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH0_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH0_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH1_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH1_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x044,\n 'CH1_CTRL', 'PRS.CH1_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH1_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH1_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH1_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH1_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH1_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH1_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH1_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH1_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH2_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH2_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x048,\n 'CH2_CTRL', 'PRS.CH2_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH2_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH2_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH2_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH2_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH2_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH2_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH2_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH2_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH3_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH3_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x04C,\n 'CH3_CTRL', 'PRS.CH3_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH3_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH3_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH3_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH3_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH3_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH3_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH3_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH3_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH4_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH4_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x050,\n 'CH4_CTRL', 'PRS.CH4_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH4_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH4_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH4_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH4_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH4_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH4_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH4_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH4_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH5_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH5_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x054,\n 'CH5_CTRL', 'PRS.CH5_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH5_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH5_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH5_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH5_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH5_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH5_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH5_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH5_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH6_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH6_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x058,\n 'CH6_CTRL', 'PRS.CH6_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH6_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH6_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH6_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH6_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH6_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH6_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH6_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH6_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH7_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH7_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x05C,\n 'CH7_CTRL', 'PRS.CH7_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH7_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH7_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH7_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH7_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH7_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH7_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH7_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH7_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH8_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH8_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x060,\n 'CH8_CTRL', 'PRS.CH8_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH8_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH8_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH8_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH8_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH8_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH8_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH8_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH8_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH9_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH9_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x064,\n 'CH9_CTRL', 'PRS.CH9_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH9_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH9_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH9_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH9_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH9_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH9_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH9_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH9_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH10_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH10_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x068,\n 'CH10_CTRL', 'PRS.CH10_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH10_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH10_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH10_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH10_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH10_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH10_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH10_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH10_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\nclass RM_Register_PRS_CH11_CTRL(Base_RM_Register):\n def __init__(self, rmio, label):\n self.__dict__['zz_frozen'] = False\n super(RM_Register_PRS_CH11_CTRL, self).__init__(rmio, label,\n 0x400e6000, 0x06C,\n 'CH11_CTRL', 'PRS.CH11_CTRL', 'read-write',\n \"\",\n 0x00000000, 0x5E307F07)\n\n self.SIGSEL = RM_Field_PRS_CH11_CTRL_SIGSEL(self)\n self.zz_fdict['SIGSEL'] = self.SIGSEL\n self.SOURCESEL = RM_Field_PRS_CH11_CTRL_SOURCESEL(self)\n self.zz_fdict['SOURCESEL'] = self.SOURCESEL\n self.EDSEL = RM_Field_PRS_CH11_CTRL_EDSEL(self)\n self.zz_fdict['EDSEL'] = self.EDSEL\n self.STRETCH = RM_Field_PRS_CH11_CTRL_STRETCH(self)\n self.zz_fdict['STRETCH'] = self.STRETCH\n self.INV = RM_Field_PRS_CH11_CTRL_INV(self)\n self.zz_fdict['INV'] = self.INV\n self.ORPREV = RM_Field_PRS_CH11_CTRL_ORPREV(self)\n self.zz_fdict['ORPREV'] = self.ORPREV\n self.ANDNEXT = RM_Field_PRS_CH11_CTRL_ANDNEXT(self)\n self.zz_fdict['ANDNEXT'] = self.ANDNEXT\n self.ASYNC = RM_Field_PRS_CH11_CTRL_ASYNC(self)\n self.zz_fdict['ASYNC'] = self.ASYNC\n self.__dict__['zz_frozen'] = True\n\n\n","sub_path":".closet/jython.configurator.efr32/1.0.0.201606231656-435/host_py_rm_studio_internal/host_py_rm_studio_internal_efr32xg1xfull/revA3/PRS_register.py","file_name":"PRS_register.py","file_ext":"py","file_size_in_byte":25080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"228960256","text":"import numpy as np\n\n#XXX: Shamefully stolen from pyautodiff\n# https://github.com/jaberg/pyautodiff/blob/master/autodiff/fmin_scipy.py\n\ndef vector_from_args(args):\n args_sizes = [w.size for w in args]\n x_size = sum(args_sizes)\n x = np.empty(x_size, dtype='float64') # has to be float64 for fmin_l_bfgs_b\n i = 0\n for w in args:\n x[i: i + w.size] = w.flatten()\n i += w.size\n return x\n\ndef args_from_vector(x, orig_args):\n # unpack x_opt -> args-like structure `args_opt`\n rval = []\n i = 0\n for w in orig_args:\n rval.append(x[i: i + w.size].reshape(w.shape).astype(w.dtype))\n i += w.size\n return rval\n","sub_path":"cae/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"482053277","text":"from model.net.constrastive.text_encoder_finetune import TextEncoder\r\nfrom model.net.constrastive.vision_encoder_finetune import VisionEncoder\r\nfrom model.net.constrastive.audio_encoder_fintune import AudioEncoder\r\nimport torch\r\nimport config as default_config\r\nfrom torch import nn\r\nfrom model.decoder.classifier import BaseClassifier\r\nfrom util.metrics import cont_NTXentLoss\r\nimport numpy as np\r\nfrom util.common import check_dir\r\n\r\n\r\nclass projector(nn.Module):\r\n def __init__(self, input_dim, output_dim, dropout=0.5):\r\n super(projector, self).__init__()\r\n\r\n self.fc = nn.Sequential(\r\n nn.LayerNorm(input_dim),\r\n nn.Linear(input_dim, output_dim),\r\n # nn.ReLU(),\r\n # nn.Linear(output_dim, output_dim),\r\n nn.Tanh(),\r\n nn.Dropout(dropout)\r\n )\r\n\r\n def forward(self, x):\r\n x = self.fc(x)\r\n return x\r\n\r\n\r\nclass TVA_fusion(nn.Module):\r\n def __init__(self, name=None, encoder_fea_dim=None, drop_out=None, config=default_config):\r\n super(TVA_fusion, self).__init__()\r\n self.config = config\r\n self.text_encoder = TextEncoder(name=name, with_projector=False, config=config)\r\n self.vision_encoder = VisionEncoder(config=config)\r\n self.audio_encoder = AudioEncoder(config=config)\r\n if encoder_fea_dim is None:\r\n encoder_fea_dim = config.MOSI.downStream.encoder_fea_dim\r\n if drop_out is None:\r\n drop_out = config.MOSI.downStream.text_drop_out\r\n\r\n uni_fea_dim = int(encoder_fea_dim/2)\r\n\r\n self.T_simi_proj = projector(encoder_fea_dim, uni_fea_dim)\r\n self.V_simi_proj = projector(encoder_fea_dim, uni_fea_dim)\r\n self.A_simi_proj = projector(encoder_fea_dim, uni_fea_dim)\r\n\r\n self.T_dissimi_proj = projector(encoder_fea_dim, uni_fea_dim)\r\n self.V_dissimi_proj = projector(encoder_fea_dim, uni_fea_dim)\r\n self.A_dissimi_proj = projector(encoder_fea_dim, uni_fea_dim)\r\n\r\n hidden_size = [uni_fea_dim * 2, uni_fea_dim, int(uni_fea_dim / 2), int(uni_fea_dim / 4),\r\n ]\r\n\r\n self.TVA_decoder = BaseClassifier(input_size=uni_fea_dim * 6,\r\n hidden_size=hidden_size,\r\n output_size=1, drop_out=drop_out,\r\n name='TVARegClassifier', )\r\n\r\n self.mono_decoder = BaseClassifier(input_size=uni_fea_dim,\r\n hidden_size=hidden_size[2:],\r\n output_size=1, drop_out=drop_out,\r\n name='TVAMonoRegClassifier', )\r\n\r\n self.device = config.DEVICE\r\n self.criterion = torch.nn.MSELoss()\r\n self.model_path = config.MOSI.path.model_path + str(config.seed) + '/'\r\n check_dir(self.model_path)\r\n\r\n self.batch_size = config.MOSI.downStream.TVAExp_fusion.batch_size\r\n self.heat = config.MOSI.downStream.const_heat\r\n\r\n self.ntxent_loss = cont_NTXentLoss(temperature=self.heat)\r\n self.set_train()\r\n\r\n def forward(self, sample1, sample2, return_loss=True, return_emb=False, device=None):\r\n if device is None:\r\n device = self.device\r\n\r\n text1 = sample1['raw_text']\r\n vision1 = sample1['vision'].clone().detach().to(device).float()\r\n audio1 = sample1['audio'].clone().detach().to(device).float()\r\n label1 = sample1['regression_labels'].clone().detach().to(device).float() # .squeeze()\r\n label_T1 = sample1['regression_labels'].clone().detach().to(device).float() # .squeeze()\r\n label_V1 = sample1['regression_labels'].clone().detach().to(device).float() # .squeeze()\r\n label_A1 = sample1['regression_labels'].clone().detach().to(device).float() # .squeeze()\r\n key_padding_mask_V1, key_padding_mask_A1 = (sample1['vision_padding_mask'].clone().detach().to(device),\r\n sample1['audio_padding_mask'].clone().detach().to(device))\r\n\r\n x_t_embed = self.text_encoder(text1, device=device).squeeze()\r\n x_v_embed = self.vision_encoder(vision1, key_padding_mask=key_padding_mask_V1, device=device).squeeze()\r\n x_a_embed = self.audio_encoder(audio1, key_padding_mask=key_padding_mask_A1, device=device).squeeze()\r\n\r\n x_t_simi1 = self.T_simi_proj(x_t_embed)\r\n x_v_simi1 = self.V_simi_proj(x_v_embed)\r\n x_a_simi1 = self.A_simi_proj(x_a_embed)\r\n x_t_dissimi1 = self.T_dissimi_proj(x_t_embed)\r\n x_v_dissimi1 = self.V_dissimi_proj(x_v_embed)\r\n x_a_dissimi1 = self.A_dissimi_proj(x_a_embed)\r\n\r\n x1_s = torch.cat((x_t_simi1, x_v_simi1, x_a_simi1), dim=-1)\r\n x1_ds = torch.cat((x_t_dissimi1, x_v_dissimi1, x_a_dissimi1), dim=-1)\r\n x1_all = torch.cat((x1_s, x1_ds), dim=-1)\r\n x1_sds = torch.cat((x_t_simi1, x_v_simi1, x_a_simi1, x_t_dissimi1, x_v_dissimi1, x_a_dissimi1,\r\n ), dim=0)\r\n label1_sds = torch.cat((label1, label1, label1, label_T1, label_V1, label_A1,), dim=0)\r\n x_sds = x1_sds\r\n label_sds = label1_sds\r\n x2 = None\r\n x = x1_all\r\n label_all = label1\r\n if sample2 is not None:\r\n text2 = sample2['raw_text']\r\n vision2 = sample2['vision'].clone().detach().to(device).float()\r\n audio2 = sample2['audio'].clone().detach().to(device).float()\r\n label2 = sample2['regression_labels'].clone().detach().to(device).float() # .squeeze()\r\n label_T2 = sample2['regression_labels'].clone().detach().to(device).float() # .squeeze()\r\n label_V2 = sample2['regression_labels'].clone().detach().to(device).float() # .squeeze()\r\n label_A2 = sample2['regression_labels'].clone().detach().to(device).float() # .squeeze()\r\n key_padding_mask_V2, key_padding_mask_A2 = (sample2['vision_padding_mask'].clone().detach().to(device),\r\n sample2['audio_padding_mask'].clone().detach().to(device))\r\n\r\n x_t_embed2 = self.text_encoder(text2, device=device).squeeze()\r\n x_v_embed2 = self.vision_encoder(vision2, key_padding_mask=key_padding_mask_V2, device=device).squeeze()\r\n x_a_embed2 = self.audio_encoder(audio2, key_padding_mask=key_padding_mask_A2, device=device).squeeze()\r\n\r\n x_t_simi2 = self.T_simi_proj(x_t_embed2)\r\n x_v_simi2 = self.V_simi_proj(x_v_embed2)\r\n x_a_simi2 = self.A_simi_proj(x_a_embed2)\r\n x_t_dissimi2 = self.T_dissimi_proj(x_t_embed2)\r\n x_v_dissimi2 = self.V_dissimi_proj(x_v_embed2)\r\n x_a_dissimi2 = self.A_dissimi_proj(x_a_embed2)\r\n\r\n x2_s = torch.cat((x_t_simi2, x_v_simi2, x_a_simi2), dim=-1)\r\n x2_ds = torch.cat((x_t_dissimi2, x_v_dissimi2, x_a_dissimi2), dim=-1)\r\n x2_all = torch.cat((x2_s, x2_ds), dim=-1)\r\n x2_sds = torch.cat((x_t_simi2, x_v_simi2, x_a_simi2, x_t_dissimi2, x_v_dissimi2, x_a_dissimi2,\r\n ), dim=0)\r\n label2_sds = torch.cat((label2, label2, label2, label_T2, label_V2, label_A2,), dim=0)\r\n x = torch.cat((x1_all, x2_all), dim=0)\r\n label_all = torch.cat((label1.squeeze(), label2.squeeze()), dim=0)\r\n x_sds = torch.cat((x1_sds, x2_sds), dim=0)\r\n label_sds = torch.cat((label1_sds, label2_sds), dim=0)\r\n\r\n if return_loss:\r\n pred = self.TVA_decoder(x)\r\n pred_mono = self.mono_decoder(x_sds)\r\n sup_const_loss = 0\r\n # sds_loss = 0\r\n if sample2 is not None:\r\n # [Ts,T1s,T2s,T3s,T4s,T5s,T6s,V1s,V2s,V3s,....]\r\n t1, p, t2, n = torch.tensor([0, 0, 7, 7, 14, 14,\r\n 0, 0, 1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6],\r\n device=device), \\\r\n torch.tensor([1, 2, 8, 9, 15, 16,\r\n 7, 14, 8, 15, 9, 16, 10, 17, 11, 18, 12, 19, 13, 20],\r\n device=device), \\\r\n torch.tensor([0, 0, 0, 0, 7, 7, 7, 7, 14, 14, 14, 14,\r\n 0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6],\r\n device=device), \\\r\n torch.tensor([3, 4, 5, 6, 10, 11, 12, 13, 17, 18, 19, 20,\r\n 21, 28, 35, 22, 29, 36, 23, 30, 37, 24, 31, 38, 25, 32, 39, 26, 33, 40, 27,\r\n 34, 41], device=device)\r\n\r\n indices_tuple = (t1, p, t2, n)\r\n pre_sample_label = torch.tensor([0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 1, 2, 3, 4, 0, 0, 0, 1, 2, 3, 4,\r\n 5, 5, 5, 6, 7, 8, 9, 5, 5, 5, 6, 7, 8, 9, 5, 5, 5, 6, 7, 8, 9, ])\r\n for i in range(len(x1_all)):\r\n pre_sample_x = []\r\n for fea1, fea2 in zip([x_t_simi1, x_v_simi1, x_a_simi1, x_t_dissimi1, x_v_dissimi1, x_a_dissimi1, ],\r\n [x_t_simi2, x_v_simi2, x_a_simi2, x_t_dissimi2, x_v_dissimi2,\r\n x_a_dissimi2, ]):\r\n pre_sample_x.append(torch.cat((fea1[i].unsqueeze(0), fea2[6 * i:6 * (i + 1)]), dim=0))\r\n\r\n sup_const_loss += self.ntxent_loss(torch.cat(pre_sample_x, dim=0), pre_sample_label,\r\n indices_tuple=indices_tuple)\r\n\r\n sup_const_loss /= len(x1_all)\r\n\r\n pred_loss = self.criterion(pred.squeeze(), label_all)\r\n mono_task_loss = self.criterion(pred_mono.squeeze(), label_sds)\r\n\r\n loss = pred_loss + 0.1 * sup_const_loss + 0.01 * mono_task_loss\r\n if return_emb:\r\n return pred, x1_all, loss, pred_loss, sup_const_loss\r\n else:\r\n return pred, (x_t_embed, x_v_embed, x_a_embed), loss, pred_loss, sup_const_loss\r\n else:\r\n if return_emb:\r\n return x1_all\r\n else:\r\n return (x_t_embed, x_v_embed, x_a_embed)\r\n\r\n def save_model(self, name):\r\n # save all modules\r\n mode_path = self.model_path + 'TVA_fusion' + '_model.ckpt'\r\n\r\n print('model saved at:')\r\n print(mode_path)\r\n torch.save(self.state_dict(), mode_path)\r\n\r\n def load_model(self, name, load_pretrain=False):\r\n if load_pretrain:\r\n text_encoder_path = self.config.MOSI.path.encoder_path + name + '_text_encoder.ckpt'\r\n vision_encoder_path = self.config.MOSI.path.encoder_path + name + '_vision_encoder.ckpt'\r\n audio_encoder_path = self.config.MOSI.path.encoder_path + name + '_audio_encoder.ckpt'\r\n\r\n print('model loaded from:')\r\n print(text_encoder_path)\r\n print(vision_encoder_path)\r\n print(audio_encoder_path)\r\n self.text_encoder.load_state_dict(torch.load(text_encoder_path, map_location=self.device))\r\n # self.text_encoder.tokenizer.from_pretrained(self.config.SIMS.path.bert_en,do_lower_case=True)\r\n # self.text_encoder.extractor.from_pretrained(self.config.SIMS.path.bert_en)\r\n self.vision_encoder.load_state_dict(torch.load(vision_encoder_path, map_location=self.device))\r\n self.audio_encoder.load_state_dict(torch.load(audio_encoder_path, map_location=self.device))\r\n\r\n else:\r\n mode_path = self.model_path + 'TVA_fusion' + '_model.ckpt'\r\n\r\n print('model loaded from:')\r\n print(mode_path)\r\n self.load_state_dict(torch.load(mode_path, map_location=self.device))\r\n\r\n def set_train(self, train_module=None):\r\n if train_module is None:\r\n train_module = [False, False, True, True]\r\n\r\n for param in self.parameters():\r\n param.requires_grad = train_module[3]\r\n self.text_encoder.set_train(train_module=train_module[0:2])\r\n self.vision_encoder.set_train(train_module=train_module[2])\r\n self.audio_encoder.set_train(train_module=train_module[2])\r\n","sub_path":"MOSI/model/net/constrastive/TVA_fusion.py","file_name":"TVA_fusion.py","file_ext":"py","file_size_in_byte":12346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"544217296","text":"from common.TreeNode import TreeNode\n\n\nclass Codec:\n\n def serialize(self, root):\n \"\"\"Encodes a tree to a single string.\n\n :type root: TreeNode\n :rtype: str\n \"\"\"\n if not root:\n return \"n\"\n have_node = True\n q = [root]\n res = \"\"\n while have_node:\n have_node = False\n new_q = []\n for node in q:\n if not node:\n res += \"n,\"\n else:\n res += (str(node.val) + \",\")\n if not have_node and (node.left or node.right):\n have_node = True\n new_q.append(node.left)\n new_q.append(node.right)\n q = new_q\n return res\n\n def deserialize(self, data):\n \"\"\"Decodes your encoded data to tree.\n\n :type data: str\n :rtype: TreeNode\n \"\"\"\n if data == \"n\":\n return None\n s = data[: -1].split(\",\")\n root = TreeNode(s[0])\n prev = [root]\n i = 1\n while i < len(s):\n new_prev = []\n for j in range(i, i + len(prev) * 2):\n if s[j] != \"n\":\n new_node = TreeNode(s[j])\n if (j - i) % 2 == 0:\n prev[(j - i) / 2].left = new_node\n else:\n prev[(j - i) / 2].right = new_node\n new_prev.append(new_node)\n i += len(prev) * 2\n prev = new_prev\n return root\n","sub_path":"tree/297_serialize_and_deserialize_binary_tree.py","file_name":"297_serialize_and_deserialize_binary_tree.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"462702555","text":"# Basic Code is taken from https://github.com/ckmarkoh/GAN-tensorflow\r\n\r\nimport tensorflow as tf\r\nfrom tensorflow.examples.tutorials.mnist import input_data\r\nimport numpy as np\r\nfrom scipy.misc import imsave\r\nimport os\r\nimport shutil\r\nfrom PIL import Image\r\nimport time\r\nimport random\r\n\r\nfrom layers import *\r\n\r\nimg_layer = 1\r\n\r\n\r\ndef build_resnet_block(inputres, dim, change_dimension=False, block_stride=2, name=\"resnet\"):\r\n with tf.variable_scope(name):\r\n if change_dimension:\r\n short_cut_conv = general_conv3d(inputres, dim, 1, 1, 1, block_stride, block_stride, block_stride, 0.02, \"VALID\", \"sc\", do_relu=False)\r\n else:\r\n short_cut_conv = inputres\r\n out_res = tf.pad(inputres, [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]], \"REFLECT\")\r\n if change_dimension:\r\n out_res = general_conv3d(out_res, dim, 3, 3, 3, block_stride, block_stride, block_stride, 0.02, \"VALID\", \"c1\")\r\n else:\r\n out_res = general_conv3d(out_res, dim, 3, 3, 3, 1, 1, 1, 0.02, \"VALID\", \"c1\")\r\n out_res = tf.pad(out_res, [[0, 0], [1, 1], [1, 1], [1, 1], [0, 0]], \"REFLECT\")\r\n out_res = general_conv3d(out_res, dim, 3, 3, 3, 1, 1, 1, 0.02, \"VALID\", \"c2\", do_relu=False)\r\n return tf.nn.relu(out_res + short_cut_conv)\r\n\r\n\r\ndef build_generator(inputgen, dim, numofres = 6, name=\"generator\"):\r\n with tf.variable_scope(name):\r\n f = 7\r\n ks = 3\r\n pad_input = tf.pad(inputgen, [[0, 0], [ks, ks], [ks, ks], [ks, ks], [0, 0]], \"REFLECT\")\r\n o_c1 = general_conv3d(pad_input, dim, f, f, f, 1, 1, 1, 0.02, name=\"c1\")\r\n o_c2 = general_conv3d(o_c1, dim * 2, ks, ks, ks, 2, 2, 2, 0.02, \"SAME\", \"c2\")\r\n o_c3 = general_conv3d(o_c2, dim * 4, ks, ks, ks, 2, 2, 2, 0.02, \"SAME\", \"c3\")\r\n o_rb = o_c3\r\n for idd in range(numofres):\r\n o_rb = build_resnet_block(o_rb, dim * 4, name='r{0}'.format(idd))\r\n o_c4 = general_deconv3d(o_rb, [1, 64, 64, 64, dim * 2], dim * 2, ks, ks, ks, 2, 2, 2, 0.02, \"SAME\", \"c4\")\r\n o_c5 = general_deconv3d(o_c4, [1, 128, 128, 128, dim], dim, ks, ks, ks, 2, 2, 2, 0.02, \"SAME\", \"c5\")\r\n o_c5_pad = tf.pad(o_c5, [[0, 0], [ks, ks], [ks, ks], [ks, ks], [0, 0]], \"REFLECT\")\r\n o_c6 = general_conv3d(o_c5_pad, img_layer, f, f, f, 1, 1, 1, 0.02, \"VALID\", \"c6\", do_relu=False)\r\n out_gen = tf.nn.tanh(o_c6, \"t1\")\r\n return out_gen\r\n\r\n\r\ndef build_gen_discriminator(inputdisc, dim, name=\"discriminator\"):\r\n with tf.variable_scope(name):\r\n f = 4\r\n o_c1 = general_conv3d(inputdisc, dim, f, f, f, 2, 2, 2, 0.02, \"SAME\", \"c1\", do_norm=False, relufactor=0.2)\r\n o_c2 = general_conv3d(o_c1, dim * 2, f, f, f, 2, 2, 2, 0.02, \"SAME\", \"c2\", relufactor=0.2)\r\n o_c3 = general_conv3d(o_c2, dim * 4, f, f, f, 2, 2, 2, 0.02, \"SAME\", \"c3\", relufactor=0.2)\r\n o_c4 = general_conv3d(o_c3, dim * 8, f, f, f, 1, 1, 1, 0.02, \"SAME\", \"c4\", relufactor=0.2)\r\n o_c5 = general_conv3d(o_c4, 1, f, f, f, 1, 1, 1, 0.02, \"SAME\", \"c5\", do_norm=False, do_relu=False)\r\n return o_c5\r\n\r\n\r\ndef build_feature_vgg_3d(in_x, dim, name=\"vgg_3d\"):\r\n with tf.variable_scope(name):\r\n ks = 3\r\n o_c0 = general_conv3d(in_x, dim, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c0\", do_norm=True)\r\n o_p0 = tf.nn.max_pool3d(o_c0, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c1 = general_conv3d(o_p0, dim*2, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c1\", do_norm=True)\r\n o_p1 = tf.nn.max_pool3d(o_c1, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c2 = general_conv3d(o_p1, dim*4, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c2\", do_norm=True)\r\n o_p2 = tf.nn.max_pool3d(o_c2, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c3 = general_conv3d(o_p2, dim*4, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c3\", do_norm=True)\r\n o_p3 = tf.nn.max_pool3d(o_c3, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c4 = general_conv3d(o_p3, dim*4, 3, 3, 3, 1, 1, 1, 0.2, \"SAME\", name=\"c4\", do_norm=True, do_relu=True)\r\n o_p4 = tf.nn.avg_pool3d(o_c4, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='VALID')\r\n return o_p4\r\n\r\n\r\ndef build_feature_resnet18(inputgen, dim, name=\"res18\"):\r\n with tf.variable_scope(name):\r\n ks = 3\r\n oc_1 = general_conv3d(inputgen, dim * 1, ks, ks, ks, 2, 2, 2, 0.02, \"SAME\", name=\"oc1\")\r\n op_1 = tf.nn.max_pool3d(oc_1, ksize=[1, 3, 3, 3, 1], strides=[1, 2, 2, 2, 1], padding=\"SAME\", name=\"op1\")\r\n r1_1 = build_resnet_block(op_1, dim * 1, name=\"r11\")\r\n r1_2 = build_resnet_block(r1_1, dim * 1, name=\"r12\")\r\n r2_1 = build_resnet_block(r1_2, dim * 2, True, name=\"r21\")\r\n r2_2 = build_resnet_block(r2_1, dim * 2, name=\"r22\")\r\n r3_1 = build_resnet_block(r2_2, dim * 4, True, name=\"r31\")\r\n r3_2 = build_resnet_block(r3_1, dim * 4, name=\"r32\")\r\n r4_1 = build_resnet_block(r3_2, dim * 8, True, name=\"r41\")\r\n r4_2 = build_resnet_block(r4_1, dim * 8, name=\"r42\")\r\n return r4_2\r\n\r\n\r\ndef build_classifier_old(input, dim, order=(1, 2), name=\"discriminator\"):\r\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:\r\n ks = 3\r\n o_c0 = general_conv3d(input, dim, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c0\", do_norm=True)\r\n o_p0 = tf.nn.max_pool3d(o_c0, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c1 = general_conv3d(o_p0, dim*2, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c1\", do_norm=True)\r\n o_p1 = tf.nn.max_pool3d(o_c1, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c2 = general_conv3d(o_p1, dim*4, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c2\", do_norm=True)\r\n o_p2 = tf.nn.max_pool3d(o_c2, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c3 = general_conv3d(o_p2, dim*4, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c3\", do_norm=True)\r\n o_p3 = tf.nn.max_pool3d(o_c3, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c4 = general_conv3d(o_p3, dim*4, 3, 3, 3, 1, 1, 1, 0.2, \"SAME\", name=\"c4\", do_norm=True, do_relu=True)\r\n o_p4 = tf.nn.avg_pool3d(o_c4, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='VALID')\r\n\r\n # means = tf.get_variable(\"means\", shape=o_p4.get_shape()[1::], dtype=tf.float32, initializer=tf.zeros_initializer())\r\n descs = []\r\n if 1 in order:\r\n descs.append(o_p4)\r\n if 2 in order:\r\n descs.append(tf.square(o_p4) - 1)\r\n if 3 in order:\r\n descs.append(tf.square(o_p4)*o_p4)\r\n descs = tf.nn.l2_normalize(tf.concat(descs, axis=-1), axis=-1)\r\n # descs = tf.nn.l2_normalize(tf.concat((o_p4, tf.square(o_p4) - 1), axis=-1), axis=-1)\r\n\r\n # weights = tf.Variable(tf.ones([1, 4, 5, 4, 1], dtype=np.float32), name=\"attention\", trainable=True)\r\n\r\n # feats = tf.reduce_mean(descs, axis=(1, 2, 3))\r\n feats = tf.nn.l2_normalize(tf.reshape(descs, (-1, descs.shape[1]*descs.shape[2]*descs.shape[3]*descs.shape[4])), axis=-1)\r\n logits, prob = fc_op(feats, \"fc_layer\", 2, activation=tf.nn.softmax)\r\n return logits, prob, [o_c0, o_c1, o_c2, o_c3, o_c4]\r\n\r\n\r\ndef build_classifier(input, dim, order=(1, 2), name=\"discriminator\"):\r\n with tf.variable_scope(name, reuse=tf.AUTO_REUSE) as scope:\r\n ks = 3\r\n o_c0 = general_conv3d(input, dim, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c0\", do_norm=True)\r\n o_p0 = tf.nn.max_pool3d(o_c0, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c1 = general_conv3d(o_p0, dim*2, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c1\", do_norm=True)\r\n o_p1 = tf.nn.max_pool3d(o_c1, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c2 = general_conv3d(o_p1, dim*4, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c2\", do_norm=True)\r\n o_p2 = tf.nn.max_pool3d(o_c2, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c3 = general_conv3d(o_p2, dim*4, ks, ks, ks, 1, 1, 1, 0.2, \"SAME\", name=\"c3\", do_norm=True)\r\n o_p3 = tf.nn.max_pool3d(o_c3, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='SAME')\r\n o_c4 = general_conv3d(o_p3, dim*4, 3, 3, 3, 1, 1, 1, 0.2, \"SAME\", name=\"c4\", do_norm=True, do_relu=True)\r\n o_p4 = tf.nn.avg_pool3d(o_c4, [1, 3, 3, 3, 1], [1, 2, 2, 2, 1], padding='VALID')\r\n\r\n if 0 in order:\r\n feats = tf.reduce_mean(o_p4, axis=(1, 2, 3))\r\n else:\r\n descs = []\r\n if 1 in order:\r\n descs.append(o_p4)\r\n if 2 in order:\r\n descs.append(tf.square(o_p4) - 1)\r\n if 3 in order:\r\n descs.append(tf.square(o_p4) * o_p4)\r\n descs = tf.nn.l2_normalize(tf.concat(descs, axis=-1), axis=-1)\r\n feats = tf.nn.l2_normalize(\r\n tf.reshape(descs, (-1, descs.shape[1] * descs.shape[2] * descs.shape[3] * descs.shape[4])), axis=-1)\r\n logits, prob = fc_op(feats, \"fc_layer\", 2, activation=tf.nn.softmax)\r\n return logits, prob, [o_c0, o_c1, o_c2, o_c3, o_c4]\r\n\r\n","sub_path":"DRCGAN/modelcls.py","file_name":"modelcls.py","file_ext":"py","file_size_in_byte":8968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586610885","text":"import argparse\n\nimport hdtop.config\nimport hdtop.const\nimport hdtop.main\n\n\n_SUBPARSERS = {\n \"start\": hdtop.main.setup_argparse,\n \"config\": hdtop.config.setup_argparse,\n}\n\n\ndef main():\n \"\"\"The 'real' entry point of this program\"\"\"\n # parse args\n parser = argparse.ArgumentParser(\n prog=hdtop.const.PROG_NAME, description=hdtop.const.DESCRIPTION\n )\n parser.add_argument(\n \"action\",\n default=\"start\",\n nargs=\"?\",\n choices=_SUBPARSERS,\n help=\"Action for the program\",\n )\n\n args, remain = parser.parse_known_args()\n\n # parse sub args\n subparser: argparse.ArgumentParser = _SUBPARSERS[args.action]()\n args = subparser.parse_args(remain, args)\n\n # action\n return args.func(args)\n\n\nexit(main())\n","sub_path":"hdtop/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"270808849","text":"#-*- coding: utf-8 -*-\n#File: agent.py\n\nimport paddle.fluid as fluid\nfrom paddle.fluid.param_attr import ParamAttr\nimport numpy as np\nfrom tqdm import tqdm\nimport math\n\nUPDATE_TARGET_STEPS = 200\n\n\nclass Model(object):\n def __init__(self, state_dim, action_dim, gamma):\n self.global_step = 0\n self.state_dim = state_dim\n self.action_dim = action_dim\n self.gamma = gamma\n self.exploration = 1.0\n\n self._build_net()\n\n def _get_inputs(self):\n return [fluid.layers.data(\\\n name='state', shape=[self.state_dim], dtype='float32'),\n fluid.layers.data(\\\n name='action', shape=[1], dtype='int32'),\n fluid.layers.data(\\\n name='reward', shape=[], dtype='float32'),\n fluid.layers.data(\\\n name='next_s', shape=[self.state_dim], dtype='float32'),\n fluid.layers.data(\\\n name='isOver', shape=[], dtype='bool')]\n\n def _build_net(self):\n state, action, reward, next_s, isOver = self._get_inputs()\n self.pred_value = self.get_DQN_prediction(state)\n self.predict_program = fluid.default_main_program().clone()\n\n action_onehot = fluid.layers.one_hot(action, self.action_dim)\n action_onehot = fluid.layers.cast(action_onehot, dtype='float32')\n\n pred_action_value = fluid.layers.reduce_sum(\\\n fluid.layers.elementwise_mul(action_onehot, self.pred_value), dim=1)\n\n targetQ_predict_value = self.get_DQN_prediction(next_s, target=True)\n best_v = fluid.layers.reduce_max(targetQ_predict_value, dim=1)\n best_v.stop_gradient = True\n\n target = reward + (1.0 - fluid.layers.cast(\\\n isOver, dtype='float32')) * self.gamma * best_v\n cost = fluid.layers.square_error_cost(\\\n input=pred_action_value, label=target)\n cost = fluid.layers.reduce_mean(cost)\n\n self._sync_program = self._build_sync_target_network()\n\n optimizer = fluid.optimizer.Adam(1e-3)\n optimizer.minimize(cost)\n\n # define program\n self.train_program = fluid.default_main_program()\n\n # fluid exe\n place = fluid.CUDAPlace(0)\n self.exe = fluid.Executor(place)\n self.exe.run(fluid.default_startup_program())\n\n def get_DQN_prediction(self, state, target=False):\n variable_field = 'target' if target else 'policy'\n # layer fc1\n param_attr = ParamAttr(name='{}_fc1'.format(variable_field))\n bias_attr = ParamAttr(name='{}_fc1_b'.format(variable_field))\n fc1 = fluid.layers.fc(input=state,\n size=256,\n act='relu',\n param_attr=param_attr,\n bias_attr=bias_attr)\n\n param_attr = ParamAttr(name='{}_fc2'.format(variable_field))\n bias_attr = ParamAttr(name='{}_fc2_b'.format(variable_field))\n fc2 = fluid.layers.fc(input=fc1,\n size=128,\n act='tanh',\n param_attr=param_attr,\n bias_attr=bias_attr)\n\n param_attr = ParamAttr(name='{}_fc3'.format(variable_field))\n bias_attr = ParamAttr(name='{}_fc3_b'.format(variable_field))\n value = fluid.layers.fc(input=fc2,\n size=self.action_dim,\n param_attr=param_attr,\n bias_attr=bias_attr)\n\n return value\n\n def _build_sync_target_network(self):\n vars = fluid.default_main_program().list_vars()\n policy_vars = []\n target_vars = []\n for var in vars:\n if 'GRAD' in var.name: continue\n if 'policy' in var.name:\n policy_vars.append(var)\n elif 'target' in var.name:\n target_vars.append(var)\n\n policy_vars.sort(key=lambda x: x.name.split('policy_')[1])\n target_vars.sort(key=lambda x: x.name.split('target_')[1])\n\n sync_program = fluid.default_main_program().clone()\n with fluid.program_guard(sync_program):\n sync_ops = []\n for i, var in enumerate(policy_vars):\n sync_op = fluid.layers.assign(policy_vars[i], target_vars[i])\n sync_ops.append(sync_op)\n sync_program = sync_program.prune(sync_ops)\n return sync_program\n\n def act(self, state, train_or_test):\n sample = np.random.random()\n if train_or_test == 'train' and sample < self.exploration:\n act = np.random.randint(self.action_dim)\n else:\n state = np.expand_dims(state, axis=0)\n pred_Q = self.exe.run(self.predict_program,\n feed={'state': state.astype('float32')},\n fetch_list=[self.pred_value])[0]\n pred_Q = np.squeeze(pred_Q, axis=0)\n act = np.argmax(pred_Q)\n self.exploration = max(0.1, self.exploration - 1e-6)\n return act\n\n def train(self, state, action, reward, next_state, isOver):\n if self.global_step % UPDATE_TARGET_STEPS == 0:\n self.sync_target_network()\n self.global_step += 1\n\n action = np.expand_dims(action, -1)\n self.exe.run(self.train_program, \\\n feed={'state': state, \\\n 'action': action, \\\n 'reward': reward, \\\n 'next_s': next_state, \\\n 'isOver': isOver})\n\n def sync_target_network(self):\n self.exe.run(self._sync_program)\n","sub_path":"fluid/DeepQNetwork/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":5663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"32588323","text":"from pydmfet import locints, sdmfet,oep,tools,dfet_ao\nfrom pyscf import gto, scf, ao2mo\nimport numpy as np\nfrom pyscf.tools import molden\nimport time\nfrom pydmfet import dfet_ao\nfrom pydmfet.dfet_ao import dfet\n\n\nbas ='ccpvdz'\ntemp = 0.01\n\n\nmol = gto.Mole()\nmol.atom = open('C4H8.xyz').read()\nmol.basis = bas\nmol.charge = 0\nmol.build(max_memory = 4000, verbose=4)\n\n\n#mf = scf.UKS(mol)\nmf = dfet_ao.scf.EmbedSCF(mol, 0.0, smear_sigma = temp)\nmf.xc = \"pbe,pbe\"\nmf.max_cycle = 50\n\nDMguess = None\nmf.scf(dm0=DMguess)\n\n\n#embedding calc\nmyInts = locints.LocalIntegrals( mf, range( mol.nao_nr() ), 'meta_lowdin' )\nmyInts.TI_OK = False\n\nnatoms = mol.natm\nimpAtom = np.zeros([natoms], dtype=int)\nfor i in range(5):\n impAtom[i] = 1\n\n\nghost_frag = 1-impAtom\nghost_env = 1-ghost_frag\n\nmol_frag = gto.Mole()\nmol_frag.atom = tools.add_ghost(mol.atom, ghost_frag)\nmol_frag.basis = bas\nmol_frag.charge = -1\nmol_frag.build(max_memory = 4000,verbose = 4)\n\nmol_env = gto.Mole()\nmol_env.atom = tools.add_ghost(mol.atom, ghost_env)\nmol_env.basis = bas\nmol_env.charge = 1\nmol_env.build(max_memory = 4000,verbose = 4)\n\n\naoslice = mol.aoslice_by_atom()\nimpurities = np.zeros([mol.nao_nr()], dtype = int)\nfor i in range(natoms):\n if(impAtom[i] == 1):\n impurities[aoslice[i,2]:aoslice[i,3]] = 1\n\nNe_frag = 16\nboundary_atoms = np.zeros([natoms])\nboundary_atoms2 = np.zeros([natoms])\n\nboundary_atoms[5] = 1\nboundary_atoms2[5] = -1\n\n#umat = np.loadtxt('umat.gz')\n\nparams = oep.OEPparams(algorithm = '2011', opt_method = 'L-BFGS-B', \\\n ftol = 1e-8, gtol = 1e-3,diffP_tol=1e-3, outer_maxit = 50, maxit = 100,l2_lambda = 0.0, oep_print = 0)\n\n#theDMFET = dfet.DFET(mf, mol_frag, mol_env,\\\n# boundary_atoms=boundary_atoms, boundary_atoms2=boundary_atoms2,umat=umat,\\\n# oep_params=params, smear_sigma=temp, ecw_method = 'hf',mf_method = mf.xc, plot_dens=True)\n\n\ntheDMFET = sdmfet.DMFET( mf, mol_frag, mol_env,myInts,impurities, impAtom, Ne_frag, \\\n\t\t\t boundary_atoms=boundary_atoms, boundary_atoms2=boundary_atoms2,\\\n umat = None, dim_imp =19, dim_bath=19, dim_big =None, smear_sigma = temp, \\\n\t\t\t oep_params=params,ecw_method='ccsd', mf_method = mf.xc,\\\n use_umat_ao=False)\n\n\n#umat = theDMFET.embedding_potential()\n#energy = theDMFET.correction_energy()\n\n\n","sub_path":"examples/research/C4H8.py","file_name":"C4H8.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"173025765","text":"## This file is part of biopy.\n## Copyright (C) 2010 Joseph Heled\n## Author: Joseph Heled \n## See the files gpl.txt and lgpl.txt for copying conditions.\n#\n\n\"\"\" Calculate Bayesian statistics sucj as Heighst Posterior Density (HPD) and\nEffective Sample Size (Beast interpretation).\n\"\"\"\n\nfrom __future__ import division\n\nimport numpy\n\n__all__ = [\"hpd\", \"effectiveSampleSize\"]\n\ndef hpd(data, level) :\n \"\"\" The Highest Posterior Density (credible) interval of C{data} at level C{level}\n (0 < level < 1). \"\"\" \n \n d = list(data)\n d.sort()\n\n nData = len(data)\n nIn = int(round(level * nData))\n if nIn < 2 :\n raise RuntimeError(\"not enough data\")\n \n i = 0\n r = d[i+nIn-1] - d[i]\n for k in range(len(d) - (nIn - 1)) :\n rk = d[k+nIn-1] - d[k]\n if rk < r :\n r = rk\n i = k\n\n assert 0 <= i <= i+nIn-1 < len(d)\n \n return (d[i], d[i+nIn-1])\n\n\ndef effectiveSampleSize(data, stepSize = 1) :\n \"\"\" Effective sample size, as computed by BEAST Tracer.\"\"\"\n samples = len(data)\n\n assert len(data) > 1,\"no stats for short sequences\"\n \n maxLag = min(samples//3, 1000)\n\n gammaStat = [0,]*maxLag\n #varGammaStat = [0,]*maxLag\n\n varStat = 0.0;\n\n if type(data) != numpy.ndarray :\n data = numpy.array(data)\n\n normalizedData = data - data.mean()\n \n for lag in range(maxLag) :\n v1 = normalizedData[:samples-lag]\n v2 = normalizedData[lag:]\n v = v1 * v2\n gammaStat[lag] = sum(v) / len(v)\n #varGammaStat[lag] = sum(v*v) / len(v)\n #varGammaStat[lag] -= gammaStat[0] ** 2\n\n # print lag, gammaStat[lag], varGammaStat[lag]\n \n if lag == 0 :\n varStat = gammaStat[0]\n elif lag % 2 == 0 :\n s = gammaStat[lag-1] + gammaStat[lag]\n if s > 0 :\n varStat += 2.0*s\n else :\n break\n \n # standard error of mean\n # stdErrorOfMean = Math.sqrt(varStat/samples);\n\n # auto correlation time\n act = stepSize * varStat / gammaStat[0]\n\n # effective sample size\n ess = (stepSize * samples) / act\n\n return ess","sub_path":"RANS/dimension-reduced-geom-infmcmc/elliptic_dili/_Elliptic_KLcoeff/v0/bayesianStats.py","file_name":"bayesianStats.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"497061159","text":"#!/usr/bin/python\nimport urllib2\nimport re\nimport datetime\nimport csv\nimport getopt\nimport sys\nimport os\n\nre_tag = re.compile(']*>')\noptions = {\"path\": \"\"}\n\n\nclass Spider(object):\n def __init__(self):\n self.url = \"https://eresearch.fidelity.com/eresearch/conferenceCalls.jhtml?tab=dividends&begindate=%(date)s\"\n\n def td_date(self, td):\n date_data = re_tag.sub(\"\", td).replace(\"/\", \" \").split()\n date_data = date_data[-1:] + date_data[:-1]\n return \"-\".join(date_data)\n\n def deal_tr(self, tr):\n item_data = {}\n tds_data = re.findall('', tr, re.S)\n company_data = re.findall('', tr, re.S)[0].replace(\" \", \" \")\n item_data[\"company\"] = re_tag.sub(\"\", company_data)\n website_data = re.findall('', tr, re.S)\n if len(website_data) == 0:\n website_data = \"\"\n symbol_data = re_tag.sub(\"\", tds_data[0])\n symbol_data = re.sub(r'\\n|\\t', '', symbol_data)\n else:\n website_data = website_data[0]\n website_data = re.findall(\"\\(.*?\\)\", website_data, re.S)[0].split(',')[1]. \\\n replace('\\'', '').replace(')', '')\n symbol_data = re.findall('', tds_data[0], re.S)[0]\n\n item_data[\"website\"] = re_tag.sub(\"\", website_data)\n item_data[\"symbol\"] = re_tag.sub(\"\", symbol_data)\n item_data[\"dividend\"] = re_tag.sub(\"\", tds_data[1])\n item_data[\"anouncement_date\"] = self.td_date(tds_data[2])\n item_data[\"record_date\"] = self.td_date(tds_data[3])\n item_data[\"ex_date\"] = self.td_date(tds_data[4])\n item_data[\"pay_date\"] = self.td_date(tds_data[5])\n return item_data\n\n def deal_tbody(self, tr_arr_content):\n data_list = []\n for tr_date in tr_arr_content:\n data_list.append(self.deal_tr(tr_date))\n return data_list\n\n def generate_date(self):\n date_list = []\n year = datetime.datetime.now().year\n month = datetime.datetime.now().month\n day = datetime.datetime.now().day\n day_range = 30\n for i in range(day_range):\n cur_date = datetime.datetime(year, month, day) + datetime.timedelta(days=i)\n cur_date = cur_date.strftime('%m/%d/%Y')\n date_list.append(cur_date)\n return date_list\n\n def get_day_data(self, date):\n response = urllib2.urlopen(self.url % ({\"date\": date})).read()\n if \"No Dividends for this date\" in response:\n return []\n table_content = \\\n re.findall('',\n response, re.S)[0]\n body_content = re.findall('', table_content, re.S)[0]\n tr_arr_content = re.findall('', body_content, re.S)\n return self.deal_tbody(tr_arr_content)\n\n def create_csv_file(self):\n date_list = self.generate_date()\n file_name = os.path.join(options[\"path\"], datetime.datetime.now().strftime(\"%Y-%m-%d-%H-%M-%S.csv\"))\n with open(file_name, \"wb\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(\n [\"company\", \"website\", \"symbol\", \"dividend\", \"anouncement_date\", \"record_date\",\n \"ex_date\", \"pay_date\"])\n for date in date_list:\n print(\"Crawling date: %s\" % date)\n day_data = self.get_day_data(date)\n for line in day_data:\n writer.writerows([[line[\"company\"], line[\"website\"], line[\"symbol\"],\n line[\"dividend\"], line[\"anouncement_date\"],\n line[\"record_date\"], line[\"ex_date\"], line[\"pay_date\"]]])\n print(\"Save csv file in %s\" % file_name)\n\n\ndef printUsage():\n print ('''usage: spider -o ''')\n\n\ndef main():\n try:\n opts, args = getopt.getopt(sys.argv[1:], \"ho:\",[\"path=\"])\n except getopt.GetoptError:\n print('usage: spider -o ')\n sys.exit(-1)\n for opt, arg in opts:\n if opt == '-h':\n printUsage()\n exit(0)\n elif opt in (\"-o\", \"--output\"):\n options[\"path\"] = arg\n if not os.path.exists(options[\"path\"]):\n print(\"The path: %s is not exist\" % options[\"path\"])\n exit(-1)\n if options[\"path\"] == \"\":\n print(\"Please specify the output path\")\n printUsage()\n exit(-1)\n spider = Spider()\n spider.create_csv_file()\n\n\nif __name__ == '__main__':\n main()","sub_path":"spider/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"114901106","text":"from datetime import datetime\n\nfrom django.db import models\nfrom django.core import serializers\n\nfrom edc.core.crypto_fields.classes import FieldCryptor\n\nfrom ..models import ExportTransaction\n\n\nclass ExportHistoryManager(models.Manager):\n\n export_transaction_model = ExportTransaction\n# see https://github.com/treyhunner/django-simple-history/ for some ideas to improve this\n# def __init__(self, model, instance=None):\n# super(ExportHistoryManager, self).__init__()\n# self.model = model\n# self.instance = instance\n#\n# def get_query_set(self):\n# if self.instance is None:\n# return super(ExportHistoryManager, self).get_query_set()\n# return super(ExportHistoryManager, self).get_query_set().filter(**{'instance_pk': self.instance.pk})\n\n def serialize_to_export_transaction(self, instance, change_type, using, encrypt=True, force_export=False):\n \"\"\"Serialize this instance to the export transaction model if ready.\n\n Be sure to inspect model property ready_to_export_transaction. ready_to_export_transaction can\n return True or False. If False, the tx will not be exported.\n\n if model method :func:`ready_to_export_transaction` has not been defined,\n export will proceed.\n\n .. note:: If change_type == 'D', entire tx is still sent.\"\"\"\n try:\n ready_to_export_transaction = force_export or instance.ready_to_export_transaction\n except AttributeError as attribute_error:\n if str(attribute_error).endswith(\"has no attribute 'ready_to_export_transaction'\"):\n ready_to_export_transaction = True\n else:\n raise\n if ready_to_export_transaction:\n if instance._meta.proxy_for_model: # if this is a proxy model, get to the main model\n instance = instance._meta.proxy_for_model.objects.get(id=instance.id)\n json_tx = serializers.serialize(\"json\", [instance, ], ensure_ascii=False, use_natural_keys=False)\n if encrypt:\n json_tx = FieldCryptor('aes', 'local').encrypt(json_tx)\n return ExportTransaction.objects.using(using).create(\n app_label=instance._meta.app_label,\n object_name=instance._meta.object_name,\n tx_pk=instance.id,\n export_change_type=change_type,\n exported=False,\n export_uuid=instance.export_uuid,\n status='new',\n tx=json_tx,\n timestamp=datetime.today().strftime('%Y%m%d%H%M%S%f'),\n )\n","sub_path":"edc/export/managers/export_history_manager.py","file_name":"export_history_manager.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"102605234","text":"from django.shortcuts import render\nfrom reports import reports\nfrom catalog.catalog import get_returned_lent_tools\n\n\ndef show_carpenter_summary(request):\n carpenter_orders = reports.get_carpenter_orders(request.user)\n carpenter_orders_commission_total = reports.get_carpenter_order_totals_commission(request.user)\n assigned_orders = reports.get_assigned_orders(request.user)\n assigned_orders_commission_total = reports.get_assigned_order_totals_commission(request.user)\n total_commission = reports.total_commission(request.user)\n salary = reports.get_salary(request.user)\n gross_salary = reports.carpenter_gross_salary(request.user)\n orders_commission = float(0.02)\n assigned_order_commission = float(0.01)\n return render(request, 'reports/carpenter_report.html', locals())\n\n\ndef show_monthly_summary(request):\n # lending_fees(pending)\n # delivery_fees(pending)\n salary_totals = reports.get_salary_totals()\n orders_totals = reports.get_orders_totals()\n commission_totals = reports.get_commissions_grand_totals()\n damage_fee_totals = reports.get_total_tool_damage_fees()\n storage_fee_totals = reports.get_total_storage_fees()\n lending_fee_totals = reports.get_total_lending_fees()\n\n lent_tools = get_returned_lent_tools()\n orders_with_storage_fee = reports.get_orders_with_storage_fee()\n damaged_tools = reports.get_damaged_tools()\n carpenter_profiles = reports.get_carpenter_profiles()\n complete_orders = reports.get_all_complete_orders()\n carpenter_commission_totals = reports.get_carpenter_commissions_subtotals()\n temp_carpenter_commission_totals = reports.get_temp_carpenter_commissions_subtotals()\n return render(request, 'reports/monthly_report.html', locals())\n","sub_path":"furniture_palace/reports/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"268283596","text":"__author__ = 'tahsin'\r\nimport os\r\n\r\n\"\"\"\r\n This is the application starter. It can be used to run the application instead of running each step manually.\r\n\"\"\"\r\n\r\nif __name__ == '__main__':\r\n\r\n if (os.path.exists('testdata.json')):\r\n print('testdata.json removed!')\r\n os.remove('testdata.json')\r\n\r\n # run the scrapy spider\r\n # os.system('scrapy crawl katcr -o testdata.json > out.txt')\r\n os.system('scrapy crawl yobit -o testdata.json > out.txt')\r\n # print(sys.path)\r\n # sort data\r\n # os.system('python main.py')\r\n\r\n # start server\r\n # os.system('python -m SimpleHTTPServer')\r\n\r\n print(\"DONE!\")\r\n\r\n\r\n","sub_path":"yobit_spider/run-application.py","file_name":"run-application.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"211579775","text":"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\"\"\"This module contains the stub connection.\"\"\"\n\nimport asyncio\nimport codecs\nimport logging\nimport re\nfrom asyncio import CancelledError\nfrom asyncio.tasks import Task\nfrom concurrent.futures.thread import ThreadPoolExecutor\nfrom contextlib import contextmanager\nfrom pathlib import Path\nfrom typing import AsyncIterable, IO, List, Optional, Union\n\nfrom aea.configurations.base import PublicId\nfrom aea.connections.base import Connection, ConnectionStates\nfrom aea.helpers import file_lock\nfrom aea.helpers.base import exception_log_and_reraise\nfrom aea.mail.base import Envelope\n\n\nlogger = logging.getLogger(__name__)\n\nINPUT_FILE_KEY = \"input_file\"\nOUTPUT_FILE_KEY = \"output_file\"\nDEFAULT_INPUT_FILE_NAME = \"./input_file\"\nDEFAULT_OUTPUT_FILE_NAME = \"./output_file\"\nSEPARATOR = b\",\"\n\nPUBLIC_ID = PublicId.from_str(\"fetchai/stub:0.8.0\")\n\n\ndef _encode(e: Envelope, separator: bytes = SEPARATOR):\n result = b\"\"\n result += e.to.encode(\"utf-8\")\n result += separator\n result += e.sender.encode(\"utf-8\")\n result += separator\n result += str(e.protocol_id).encode(\"utf-8\")\n result += separator\n result += e.message_bytes\n result += separator\n\n return result\n\n\ndef _decode(e: bytes, separator: bytes = SEPARATOR):\n split = e.split(separator)\n\n if len(split) < 5 or split[-1] not in [b\"\", b\"\\n\"]:\n raise ValueError(\n \"Expected at least 5 values separated by commas and last value being empty or new line, got {}\".format(\n len(split)\n )\n )\n\n to = split[0].decode(\"utf-8\").strip()\n sender = split[1].decode(\"utf-8\").strip()\n protocol_id = PublicId.from_str(split[2].decode(\"utf-8\").strip())\n # protobuf messages cannot be delimited as they can contain an arbitrary byte sequence; however\n # we know everything remaining constitutes the protobuf message.\n message = SEPARATOR.join(split[3:-1])\n message = codecs.decode(message, \"unicode-escape\").encode(\"utf-8\")\n\n return Envelope(to=to, sender=sender, protocol_id=protocol_id, message=message)\n\n\n@contextmanager\ndef lock_file(file_descriptor: IO[bytes]):\n \"\"\"Lock file in context manager.\n\n :param file_descriptor: file descriptio of file to lock.\n \"\"\"\n with exception_log_and_reraise(\n logger.error, f\"Couldn't acquire lock for file {file_descriptor.name}: {{}}\"\n ):\n file_lock.lock(file_descriptor, file_lock.LOCK_EX)\n\n try:\n yield\n finally:\n file_lock.unlock(file_descriptor)\n\n\ndef write_envelope(envelope: Envelope, file_pointer: IO[bytes]) -> None:\n \"\"\"Write envelope to file.\"\"\"\n encoded_envelope = _encode(envelope, separator=SEPARATOR)\n logger.debug(\"write {}: to {}\".format(encoded_envelope, file_pointer.name))\n write_with_lock(file_pointer, encoded_envelope)\n\n\ndef write_with_lock(file_pointer: IO[bytes], data: Union[bytes]) -> None:\n \"\"\"Write bytes to file protected with file lock.\"\"\"\n with lock_file(file_pointer):\n file_pointer.write(data)\n file_pointer.flush()\n\n\ndef _process_line(line: bytes) -> Optional[Envelope]:\n \"\"\"\n Process a line of the file.\n\n Decode the line to get the envelope, and put it in the agent's inbox.\n\n :return: Envelope\n :raise: Exception\n \"\"\"\n logger.debug(\"processing: {!r}\".format(line))\n envelope = None # type: Optional[Envelope]\n try:\n envelope = _decode(line, separator=SEPARATOR)\n except ValueError as e:\n logger.error(\"Bad formatted line: {!r}. {}\".format(line, e))\n except Exception as e: # pragma: nocover # pylint: disable=broad-except\n logger.exception(\"Error when processing a line. Message: {}\".format(str(e)))\n return envelope\n\n\nclass StubConnection(Connection):\n r\"\"\"A stub connection.\n\n This connection uses two files to communicate: one for the incoming messages and\n the other for the outgoing messages. Each line contains an encoded envelope.\n\n The format of each line is the following:\n\n TO,SENDER,PROTOCOL_ID,ENCODED_MESSAGE\n\n e.g.:\n\n recipient_agent,sender_agent,default,{\"type\": \"bytes\", \"content\": \"aGVsbG8=\"}\n\n The connection detects new messages by watchdogging the input file looking for new lines.\n\n To post a message on the input file, you can use e.g.\n\n echo \"...\" >> input_file\n\n or:\n\n #>>> fp = open(DEFAULT_INPUT_FILE_NAME, \"ab+\")\n #>>> fp.write(b\"...\\n\")\n\n It is discouraged adding a message with a text editor since the outcome depends on the actual text editor used.\n \"\"\"\n\n connection_id = PUBLIC_ID\n\n message_regex = re.compile(\n (b\"[^\" + SEPARATOR + b\"]*\" + SEPARATOR) * 3 + b\".*,[\\n]?\", re.DOTALL\n )\n\n read_delay = 0.001\n\n def __init__(self, **kwargs):\n \"\"\"Initialize a stub connection.\"\"\"\n super().__init__(**kwargs)\n input_file: str = self.configuration.config.get(\n INPUT_FILE_KEY, DEFAULT_INPUT_FILE_NAME\n )\n output_file: str = self.configuration.config.get(\n OUTPUT_FILE_KEY, DEFAULT_OUTPUT_FILE_NAME\n )\n input_file_path = Path(input_file)\n output_file_path = Path(output_file)\n if not input_file_path.exists():\n input_file_path.touch()\n\n self.input_file = open(input_file_path, \"rb+\")\n self.output_file = open(output_file_path, \"wb+\")\n\n self.in_queue = None # type: Optional[asyncio.Queue]\n\n self._read_envelopes_task: Optional[Task] = None\n self._write_pool = ThreadPoolExecutor(\n max_workers=1, thread_name_prefix=\"stub_connection_writer_\"\n ) # sequential write only! but threaded!\n\n async def _file_read_and_trunc(self, delay: float = 0.001) -> AsyncIterable[bytes]:\n \"\"\"\n Generate input file read chunks and trunc data already read.\n\n :param delay: float, delay on empty read.\n\n :return: async generator return file read bytes.\n \"\"\"\n while True:\n with lock_file(self.input_file):\n data = self.input_file.read()\n if data:\n self.input_file.truncate(0)\n self.input_file.seek(0)\n\n if data:\n yield data\n else:\n await asyncio.sleep(delay)\n\n async def read_envelopes(self) -> None:\n \"\"\"Read envelopes from inptut file, decode and put into in_queue.\"\"\"\n assert self.in_queue is not None, \"Input queue not initialized.\"\n assert self._loop is not None, \"Loop not initialized.\"\n\n logger.debug(\"Read messages!\")\n async for data in self._file_read_and_trunc(delay=self.read_delay):\n lines = self._split_messages(data)\n for line in lines:\n envelope = _process_line(line)\n\n if envelope is None:\n continue\n\n logger.debug(f\"Add envelope {envelope}\")\n await self.in_queue.put(envelope)\n\n @classmethod\n def _split_messages(cls, data: bytes) -> List[bytes]:\n \"\"\"\n Split binary data on messages.\n\n :param data: bytes\n\n :return: list of bytes\n \"\"\"\n return [m.group(0) for m in cls.message_regex.finditer(data)]\n\n async def receive(self, *args, **kwargs) -> Optional[\"Envelope\"]:\n \"\"\"Receive an envelope.\"\"\"\n if self.in_queue is None: # pragma: nocover\n logger.error(\"Input queue not initialized.\")\n return None\n\n try:\n return await self.in_queue.get()\n except Exception: # pylint: disable=broad-except\n logger.exception(\"Stub connection receive error:\")\n return None\n\n async def connect(self) -> None:\n \"\"\"Set up the connection.\"\"\"\n if self.is_connected:\n return\n\n self._state.set(ConnectionStates.connecting)\n\n try:\n self._loop = asyncio.get_event_loop()\n self.in_queue = asyncio.Queue()\n self._read_envelopes_task = self._loop.create_task(self.read_envelopes())\n self._state.set(ConnectionStates.connected)\n except Exception: # pragma: no cover\n self._state.set(ConnectionStates.disconnected)\n raise\n\n async def _stop_read_envelopes(self) -> None:\n \"\"\"\n Stop read envelopes task.\n\n Cancel task and wait for completed.\n \"\"\"\n if not self._read_envelopes_task:\n return # pragma: nocover\n\n if not self._read_envelopes_task.done():\n self._read_envelopes_task.cancel()\n\n try:\n await self._read_envelopes_task\n except CancelledError:\n pass # task was cancelled, that was expected\n except BaseException: # pragma: nocover # pylint: disable=broad-except\n logger.exception(\n \"during envelop read\"\n ) # do not raise exception cause it's on task stop\n\n async def disconnect(self) -> None:\n \"\"\"\n Disconnect from the channel.\n\n In this type of connection there's no channel to disconnect.\n \"\"\"\n if self.is_disconnected:\n return\n\n assert self.in_queue is not None, \"Input queue not initialized.\"\n\n self._state.set(ConnectionStates.disconnecting)\n await self._stop_read_envelopes()\n self._write_pool.shutdown(wait=False)\n self.in_queue.put_nowait(None)\n self._state.set(ConnectionStates.disconnected)\n\n async def send(self, envelope: Envelope) -> None:\n \"\"\"\n Send messages.\n\n :return: None\n \"\"\"\n assert self.loop is not None, \"Loop not initialized.\"\n await self.loop.run_in_executor(\n self._write_pool, write_envelope, envelope, self.output_file\n )\n","sub_path":"aea/connections/stub/connection.py","file_name":"connection.py","file_ext":"py","file_size_in_byte":10498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"332839732","text":"from time import sleep\nfrom threading import Thread\n\ndef sing():\n for i in range(5):\n print(\"正在唱歌...%d\" % i)\n sleep(1)\n\ndef dance():\n for i in range(5):\n print(\"正在跳舞...%d\" % i)\n sleep(1)\n\n\ndef main():\n t1 = Thread(target=sing)\n t2 = Thread(target=dance)\n t1.start()\n t2.start()\n\nif __name__ == '__main__':\n main()\n","sub_path":"15_多任务/zph_02_多任务-线程-demo.py","file_name":"zph_02_多任务-线程-demo.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"537496079","text":"import pytest\nfrom mock import MagicMock\n\nfrom menu_sun_integration.application.adapters.order_adapter import OrderAdapter\nfrom menu_sun_integration.infrastructure.serbom.presentations.order.serbom_order_post_request import \\\n SerbomOrderPostOrderRequest\nfrom menu_sun_integration.infrastructure.serbom.translators.benjamin_order_translator import BenjaminOrderTranslator\nfrom menu_sun_integration.presentations.order.order_sqs_platform import OrderDetailSQSPlatform, \\\n OrderItemDetailSQSPlatform, OrderAddressSQSPlatform, OrderCustomerSQSPlatform, OrderStatusSQSPlatform, \\\n OrderSQSMessagePlatform\nfrom test.menu_sun_integration.infrastructure.stubs.FakeClient import FakeClient\n\n\nclass TestSerbomOrderTranslator:\n @pytest.fixture\n def order_raw_from_platform(self):\n payload = {\n \"ReceiptHandle\": \"AAAAA\",\n \"Body\": {\n \"order_id\": \"M299999999\",\n \"total\": 31.80,\n \"discount\": 0.02,\n \"shipping\": 10.00,\n \"subtotal\": 21.80,\n \"shipping_address\": {\n \"name\": \"Shipping Address Name\",\n \"street\": \"Shipping Address Street\",\n \"number\": 1000,\n \"complement\": \"Shipping Address Complement\",\n \"reference\": \"Shipping Address Reference\",\n \"neighborhood\": \"Shipping Address Neighborhood\",\n \"state_code\": \"Shipping Address State Code\",\n \"city\": \"Shipping Address City\",\n \"country_code\": \"Shipping Address Country Code\",\n \"postcode\": \"Shipping Address Postcode\"\n },\n \"billing_address\": {\n \"name\": \"Billing Address Name\",\n \"street\": \"Billing Address Street\",\n \"number\": 1111,\n \"complement\": \"Billing Address Complement\",\n \"reference\": \"Billing Address Reference\",\n \"neighborhood\": \"Billing Address Neighborhood\",\n \"state_code\": \"Billing Address State Code\",\n \"city\": \"Billing Address City\",\n \"country_code\": \"Billing Address Country Code\",\n \"postcode\": \"Billing Address Postcode\"\n },\n \"items\": [\n {\n \"sku\": \"13201\",\n \"name\": \"Cerveja DUff's\",\n \"ean\": \"42040613\",\n \"ncm\": \"2203.00.00\",\n \"price\": 31.80,\n \"original_price\": 32.00,\n \"quantity\": 1\n },\n {\n \"sku\": \"13202\",\n \"name\": \"Cerveja Beck's\",\n \"ean\": \"42040613\",\n \"ncm\": \"2203.00.00\",\n \"price\": 31.80,\n \"original_price\": 32.00,\n \"quantity\": 1\n }\n ],\n \"customer\": {\n \"name\": \"Luke Skywalker\",\n \"document\": \"00005234000121\",\n \"email\": \"luke@starwars.com\",\n \"phone_number\": \"5511999999999\",\n },\n \"statuses\": [{\n \"status\": \"NEW\",\n \"comments\": \"\",\n \"updated_date\": \"2020-05-13T14:41:25\"\n }, {\n \"status\": \"APPROVED\",\n \"comments\": \"\",\n \"updated_date\": \"2020-04-13T14:41:25\"\n }],\n \"order_date\": \"2020-04-13T14:41:25\",\n \"delivery_date\": \"2020-04-14T14:41:25\",\n \"seller_code\": \"0810204\",\n \"payment_code\": \"2\",\n \"seller_id\": 1,\n \"integration_type\": \"NONE\"\n }\n }\n return payload\n\n def test_benjamin_order_translator(self, order_raw_from_platform):\n receipt_handle = order_raw_from_platform[\"ReceiptHandle\"]\n body = order_raw_from_platform.get(\"Body\", {})\n\n order_detail = OrderDetailSQSPlatform.from_dict(body)\n order_detail_items = [OrderItemDetailSQSPlatform.from_dict(item) for item in body.get('items', {})]\n order_detail.items = order_detail_items\n order_detail.shipping_address = OrderAddressSQSPlatform.from_dict(body.get(\"shipping_address\", {}))\n order_detail.billing_address = OrderAddressSQSPlatform.from_dict(body.get(\"billing_address\", {}))\n order_detail.customer = OrderCustomerSQSPlatform.from_dict(body.get(\"customer\", {}))\n order_detail.statuses = [OrderStatusSQSPlatform.from_dict(item) for item in body.get('statuses', {})]\n\n queue_message = OrderSQSMessagePlatform(receipt_handle=receipt_handle, body=order_detail)\n\n fake_client = FakeClient()\n fake_client.post_order = MagicMock(return_value=True)\n translator = BenjaminOrderTranslator()\n adapter = OrderAdapter(client=fake_client, translator=translator)\n adapter.send_to_seller(queue_message.body)\n\n request_to_serbom = translator.to_seller_send_format(order_detail)\n\n requested: SerbomOrderPostOrderRequest = fake_client.post_order.call_args_list[0][0][0]\n\n assert fake_client.post_order.called\n assert fake_client.post_order.call_count == 1\n assert request_to_serbom.order_id == requested.order_id\n assert request_to_serbom.document == requested.document\n assert request_to_serbom.payment_code == requested.payment_code\n assert request_to_serbom.order_date == requested.order_date\n assert request_to_serbom.delivery_date == requested.delivery_date\n assert request_to_serbom.document_supplier == requested.document_supplier\n for serbom_item_request, serbom_item_requested in zip(request_to_serbom.items, requested.items):\n assert serbom_item_request.name == serbom_item_requested.name\n assert serbom_item_request.sku == serbom_item_requested.sku\n assert serbom_item_request.quantity == serbom_item_requested.quantity\n","sub_path":"test/menu_sun_integration/infrastructure/serbom/translators/test_benjamin_order_translator.py","file_name":"test_benjamin_order_translator.py","file_ext":"py","file_size_in_byte":6197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"407327219","text":"# Copyright (c) 2013 Rackspace, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom stevedore import driver\n\nfrom marconi.common import config\nfrom marconi.common import decorators\nfrom marconi.common import exceptions\nfrom marconi.openstack.common import log\nfrom marconi.queues import transport # NOQA\n\n\nPROJECT_CFG = config.project('marconi')\nCFG = config.namespace('drivers').from_options(\n transport='wsgi',\n storage='sqlite')\n\nLOG = log.getLogger(__name__)\n\n\nclass Bootstrap(object):\n \"\"\"Defines the Marconi bootstrapper.\n\n The bootstrap loads up drivers per a given configuration, and\n manages their lifetimes.\n \"\"\"\n\n def __init__(self, config_file=None, cli_args=None):\n PROJECT_CFG.load(filename=config_file, args=cli_args)\n log.setup('marconi')\n\n @decorators.lazy_property(write=False)\n def storage(self):\n LOG.debug(_(u'Loading Storage Driver'))\n try:\n mgr = driver.DriverManager('marconi.storage',\n CFG.storage,\n invoke_on_load=True)\n return mgr.driver\n except RuntimeError as exc:\n LOG.exception(exc)\n raise exceptions.InvalidDriver(exc)\n\n @decorators.lazy_property(write=False)\n def transport(self):\n LOG.debug(_(u'Loading Transport Driver'))\n try:\n mgr = driver.DriverManager('marconi.transport',\n CFG.transport,\n invoke_on_load=True,\n invoke_args=[self.storage])\n return mgr.driver\n except RuntimeError as exc:\n LOG.exception(exc)\n raise exceptions.InvalidDriver(exc)\n\n def run(self):\n self.transport.listen()\n","sub_path":"marconi/queues/bootstrap.py","file_name":"bootstrap.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"256642063","text":"from flask import render_template, redirect, url_for, request\nfrom flask_login import login_required\nfrom flask_babel import lazy_gettext as _\n\nfrom ..ext import db\nfrom ..profile import profile_bp\nfrom ..profile.forms import ProfileForm\nfrom ..models import User\nfrom ..utils.message import success\n\n\n@profile_bp.route('/', methods=['POST', 'GET'])\n@login_required\ndef details(user_id):\n user = User.query.get_or_404(user_id)\n form = None\n if request.method == 'POST':\n form = ProfileForm()\n if form.validate_on_submit():\n form.populate_obj(obj=user)\n db.session.add(user)\n db.session.commit()\n success(_('Your profile data has been saved'))\n return redirect(url_for('profile.details', user_id=user_id))\n if form is None:\n form = ProfileForm(obj=user)\n ctx = {\n 'user': user,\n 'form': form,\n }\n return render_template('profile/details.html', **ctx)\n","sub_path":"ecpapp/profile/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"378797851","text":"from django.contrib import admin\n\nfrom api_2007scape_tools.monsters.models import Monster\n\n\n@admin.register(Monster)\nclass MonsterAdmin(admin.ModelAdmin):\n model = Monster\n\n fields = [\n \"image_tag\",\n \"name\",\n \"combat_level\",\n \"hitpoints\",\n \"is_members\",\n \"attack_speed\",\n \"base_attack\",\n \"base_strength\",\n \"base_defence\",\n \"base_magic\",\n \"base_ranged\",\n \"offensive_stab\",\n \"offensive_slash\",\n \"offensive_crush\",\n \"offensive_magic\",\n \"offensive_ranged\",\n \"defensive_stab\",\n \"defensive_slash\",\n \"defensive_crush\",\n \"defensive_magic\",\n \"defensive_ranged\",\n \"melee_strength\",\n \"ranged_strength\",\n \"magic_damage\",\n \"accuracy\",\n ]\n\n list_display = [\"image_tag\", \"name\"]\n\n readonly_fields = (\"image_tag\",)\n","sub_path":"api_2007scape_tools/monsters/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"102611238","text":"import numpy as np\nimport skfuzzy as fuzz\n#from matplotlib import pyplot as plt\n\nclass Fuzzy:\n def __init__(self, cpu_value, mem_value, truput_value):\n self.cpu_val = cpu_value\n self.mem_val = mem_value\n self.truput_val = truput_value\n print(\"Cpu val \"+str(cpu_value)+\" Mem Val \"+str(mem_value)+\" Thruput Val \"+ str(truput_value))\n # all input valur in integer\n # input : Cpu Val, RAM mem val, throughput Value\n # cpu val range : 0 - 100%\n # RAM mem val range : 0 - 100%\n # throughput val range : normalized 0 - 100%\n # output : load = load window change\n # load val range = -0.6 - 0.4\n x_cpu = np.arange(0, 100, 1)\n x_mem = np.arange(0, 100, 1)\n x_truput = np.arange(0, 100, 1)\n x_load = np.arange(-0.6, 0.4, 0.01)\n\n\n # Membership functions\n cpu_verylow = fuzz.trapmf(x_cpu, [-20, -10, 10, 40 ])\n cpu_low = fuzz.trapmf(x_cpu, [-10, 20, 30, 60 ])\n cpu_medium = fuzz.trapmf(x_cpu, [15, 45, 55, 85 ])\n cpu_high = fuzz.trapmf(x_cpu, [40, 70, 80, 110 ])\n cpu_veryhigh = fuzz.trapmf(x_cpu, [60, 90, 105, 110 ])\n\n mem_verylow = fuzz.trapmf(x_mem, [-20, -10, 10, 40 ])\n mem_low = fuzz.trapmf(x_mem, [-10, 20, 30, 60 ])\n mem_medium = fuzz.trapmf(x_mem, [15, 45, 55, 85 ])\n mem_high = fuzz.trapmf(x_mem, [40, 70, 80, 110 ])\n mem_veryhigh = fuzz.trapmf(x_mem, [60, 90, 105, 110 ])\n\n truput_low = fuzz.trapmf(x_truput, [-40, -10, 20, 60 ])\n truput_medium = fuzz.trapmf(x_truput, [10, 40, 60, 90 ])\n truput_high = fuzz.trapmf(x_truput, [40, 80, 110, 140 ])\n\n load_extdec = fuzz.trapmf(x_load, [-0.7, -0.65, -0.55, -0.5 ])\n load_veryfastdec = fuzz.trimf(x_load, [ -0.6, -0.5, -0.4 ])\n load_fastdec = fuzz.trimf(x_load, [ -0.5, -0.4, -0.3 ])\n load_dec = fuzz.trimf(x_load, [ -0.4, -0.3, -0.2 ])\n load_smalldec = fuzz.trimf(x_load, [ -0.3, -0.2, -0.1 ])\n load_verysmalldec = fuzz.trimf(x_load, [ -0.2, -0.1, 0 ])\n load_nochange = fuzz.trimf(x_load, [-0.1, 0, 0.1 ])\n load_smallincrease = fuzz.trimf(x_load, [ 0, 0.1, 0.2 ])\n load_increase = fuzz.trimf(x_load, [ 0.1, 0.2, 0.3 ])\n load_fastincrease = fuzz.trimf(x_load, [ 0.2, 0.3, 0.4 ])\n load_veryfastincrease = fuzz.trapmf(x_load, [0.3, 0.35, 0.45, 0.5 ])\n\n # Input: score\n cpu_score = self.cpu_val \n mem_score = self.mem_val\n truput_score = self.truput_val\n\n cpu_verylow_degree = fuzz.interp_membership(\n x_cpu, cpu_verylow, cpu_score)\n cpu_low_degree = fuzz.interp_membership(\n x_cpu, cpu_low, cpu_score)\n cpu_medium_degree = fuzz.interp_membership(\n x_cpu, cpu_medium, cpu_score)\n cpu_high_degree = fuzz.interp_membership(\n x_cpu, cpu_high, cpu_score)\n cpu_veryhigh_degree = fuzz.interp_membership(\n x_cpu, cpu_veryhigh, cpu_score)\n\n mem_verylow_degree = fuzz.interp_membership(\n x_mem, mem_verylow, mem_score)\n mem_low_degree = fuzz.interp_membership(\n x_mem, mem_low, mem_score)\n mem_medium_degree = fuzz.interp_membership(\n x_mem, mem_medium, mem_score)\n mem_high_degree = fuzz.interp_membership(\n x_mem, mem_high, mem_score)\n mem_veryhigh_degree = fuzz.interp_membership(\n x_mem, mem_veryhigh, mem_score)\n\n thruput_low_degree = fuzz.interp_membership(\n x_truput, truput_low, truput_score)\n thruput_medium_degree = fuzz.interp_membership(\n x_truput, truput_medium, truput_score)\n thruput_high_degree = fuzz.interp_membership(\n x_truput, truput_high, truput_score)\n\n # Whole config\n #fig_scale_x = 2.0\n #fig_scale_y = 1.5\n #fig = plt.figure(figsize=(6.4 * fig_scale_x, 6.4 * fig_scale_y))\n #row = 3\n #col = 3 \n\n # =======================================\n # Mamdani (max-min) inference method:\n # * min because of logic 'and' connective.\n # 1) ed_degree <-> loadchange_ed\n # 2) vfd_degree <-> loadchange_vfd\n # 3) fd_degree <-> loadchange_fd\n # 4) dec_degree <-> loadchange_dec\n # 5) sd_degree <-> loadchange_sd\n # 6) vsd_degree <-> loadchange_vsd\n # 7) nc_degree <-> loadchange_nc\n # 8) si_degree <-> loadchange_si\n # 9) inc_degree <-> loadchange_inc\n # 10) fi_degree <-> loadchange_fi\n # 11) vfi_degree <-> loadchange_vfi\n\n # Apply Fuzzy Rule\n\n #slow decrease load window value change\n sd_degree1 = np.fmax(cpu_verylow_degree, mem_veryhigh_degree)\n #very slow decrease load window value change\n vsd_degree1 = np.fmax(cpu_verylow_degree, mem_high_degree)\n #increase load window value change\n inc_degree1 = np.fmax(cpu_verylow_degree, mem_medium_degree)\n #fast increase load window value change\n fi_degree1 = np.fmax(cpu_verylow_degree, mem_low_degree)\n #very fast increase load window value change\n vfi_degree1 = np.fmax(cpu_verylow_degree, mem_verylow_degree)\n\n #decrease load window value change\n dec_degree2 = np.fmax(cpu_low_degree, mem_veryhigh_degree)\n #slow decrease load window value change\n sd_degree2 = np.fmax(cpu_low_degree, mem_high_degree)\n #small increas load window value change\n si_degree2 = np.fmax(cpu_low_degree, mem_medium_degree)\n #increase load window value change\n inc_degree2 = np.fmax(cpu_low_degree, mem_low_degree)\n #fast increase load window value change\n fi_degree2 = np.fmax(cpu_low_degree, mem_verylow_degree)\n\n #fast decrease load window value change\n fd_degree3 = np.fmax(cpu_medium_degree, mem_veryhigh_degree)\n #decrease load window value change\n dec_degree3 = np.fmax(cpu_medium_degree, mem_high_degree)\n #no change load window value change\n nc_degree3 = np.fmax(cpu_medium_degree, mem_medium_degree)\n #small increase load window value change\n si_degree3 = np.fmax(cpu_medium_degree, mem_low_degree)\n #increase load window value change\n inc_degree3 = np.fmax(cpu_medium_degree, mem_verylow_degree)\n\n #very fast decrease load window value change\n vfd_degree4 = np.fmax(cpu_high_degree, mem_veryhigh_degree)\n #fast decrease load window value change\n fd_degree4 = np.fmax(cpu_high_degree, mem_high_degree)\n #decrease load window value change\n dec_degree4 = np.fmax(cpu_high_degree, mem_medium_degree)\n #slow decrease load window value change\n sd_degree4 = np.fmax(cpu_high_degree, mem_low_degree)\n #very small decrease load window value change\n vsd_degree4 = np.fmax(cpu_high_degree, mem_verylow_degree)\n\n #extremely decrease load window value change\n ed_degree5 = np.fmax(cpu_veryhigh_degree,mem_veryhigh_degree)\n #very fast decrease load window value change\n vfd_degree5 = np.fmax(cpu_veryhigh_degree, mem_high_degree)\n #fast decrease load window value change\n fd_degree5 = np.fmax(cpu_veryhigh_degree, mem_medium_degree)\n #decrease load window value change\n dec_degree5 = np.fmax(cpu_veryhigh_degree, mem_low_degree)\n #slow decrease load window value change\n sd_degree5 = np.fmax(cpu_veryhigh_degree, mem_verylow_degree)\n\n vfi_degree6 = thruput_low_degree\n inc_degree6 = thruput_medium_degree\n ed_degree6 = thruput_high_degree\n\n\n ed_degree = np.fmax(ed_degree6, ed_degree5)\n vfd_degree = np.fmax(vfd_degree5, vfd_degree4)\n fd_degree = np.fmax(fd_degree5, np.fmax(fd_degree4, fd_degree3)) \n dec_degree = np.fmax(dec_degree5, np.fmax(dec_degree4, np.fmax(dec_degree3, dec_degree2)))\n sd_degree = np.fmax(sd_degree5, np.fmax(sd_degree4, np.fmax(sd_degree2, sd_degree1)))\n vsd_degree = np.fmax(vsd_degree4, vsd_degree1)\n nc_degree = nc_degree3\n si_degree = np.fmax(si_degree3,si_degree2)\n inc_degree = np.fmax(inc_degree6, np.fmax(inc_degree3,\n np.fmax(inc_degree2,inc_degree1)))\n fi_degree = np.fmax(fi_degree2,fi_degree1)\n vfi_degree = np.fmax(vfi_degree6, vfi_degree1)\n\n # Apply IMPLICATION or ACTIVATION\n activation_extdec = np.fmin(ed_degree, load_extdec)\n activation_veryfastdec = np.fmin(vfd_degree, load_veryfastdec)\n activation_fastdec = np.fmin(fd_degree, load_fastdec)\n activation_dec = np.fmin(dec_degree, load_dec)\n activation_smalldec = np.fmin(sd_degree, load_smalldec)\n activation_verysmalldec = np.fmin(vsd_degree, load_verysmalldec)\n activation_nochange = np.fmin(nc_degree, load_nochange)\n activation_smallinc = np.fmin(si_degree, load_smallincrease)\n activation_increase = np.fmin(inc_degree, load_increase)\n activation_fastinc = np.fmin(fi_degree, load_fastincrease)\n activation_veryfastinc = np.fmin(vfi_degree, load_veryfastincrease)\n\n # AGGREGATION\n # Apply the rules:\n # * max for aggregation, like or the cases\n # aggregated1 = np.fmax(\n # activation_extdec,\n # np.fmax(activation_veryfastdec, activation_fastdec))\n # aggregated2 = np.fmax(\n # activation_dec,\n # np.fmax(activation_smalldec, activation_verysmalldec))\n # aggregated3 = np.fmax(\n # activation_nochange,\n # np.fmax(activation_smallinc, activation_increase))\n # aggregated4 = np.fmax(\n # activation_fastinc,\n # np.fmax(activation_veryfastinc, aggregated1))\n # aggregated5 = np.fmax(\n # aggregated2,\n # np.fmax(aggregated3, aggregated4))\n\n aggregated = np.fmax(activation_veryfastinc, np.fmax(activation_fastinc, \n np.fmax(activation_increase, np.fmax(activation_smallinc, \n np.fmax(activation_nochange, np.fmax(activation_verysmalldec, \n np.fmax(activation_smalldec, np.fmax(activation_dec, \n np.fmax( activation_fastdec, np.fmax(activation_extdec, activation_veryfastdec))))))))))\n\n # Defuzzification\n # skfuzzy.defuzz(x, mfx, mode)[source]\n # Parameters: \n # x : 1d array or iterable, length N\n\n # Independent variable.\n # mfx : 1d array of iterable, length N\n\n # Fuzzy membership function.\n # mode : string\n\n # Controls which defuzzification method will be used. * ‘centroid’: Centroid of area * ‘bisector’: bisector of area * ‘mom’ : mean of maximum * ‘som’ : min of maximum * ‘lom’ : max of maximum\n # Defuzzification of a membership function, returning a defuzzified value of the function at x, using various defuzzification methods.\n # Returns: u : float or int Defuzzified result.\n\n self.tip_centroid = fuzz.defuzz(x_load, aggregated, 'centroid')\n self.tip_bisector = fuzz.defuzz(x_load, aggregated, 'bisector')\n self.tip_mom = fuzz.defuzz(x_load, aggregated, \"mom\")\n self.tip_som = fuzz.defuzz(x_load, aggregated, \"som\")\n self.tip_lom = fuzz.defuzz(x_load, aggregated, \"lom\")\n\n # print(tip_centroid)\n # print(tip_bisector)\n # print(tip_mom)\n # print(tip_som)\n # print(tip_lom)\n\n def get_fuzzy(self):\n defuzz_val = self.tip_centroid\n print(\"Hasil deFuzzy = \"+ str(self.tip_centroid))\n # return float or int\n return defuzz_val\n","sub_path":"fuzzyMam3inCMT.py","file_name":"fuzzyMam3inCMT.py","file_ext":"py","file_size_in_byte":11551,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"283406827","text":"import subprocess\nimport sys\nsys.path.append('../')\n\n# Rasterizes the shapefile within the bounding coordinates of a tile\ndef rasterize(in_shape, out_tif, xmin, ymin, xmax, ymax, tr=None, ot=None, gainEcoCon=None, anodata=None):\n cmd = ['gdal_rasterize', '-co', 'COMPRESS=LZW',\n\n # Input raster is ingested as 1024x1024 pixel tiles (rather than the default of 1 pixel wide strips\n '-co', 'TILED=YES', '-co', 'BLOCKXSIZE=1024', '-co', 'BLOCKYSIZE=1024',\n '-te', str(xmin), str(ymin), str(xmax), str(ymax),\n '-tr', tr, tr, '-ot', ot, '-a', gainEcoCon, '-a_nodata',\n anodata, in_shape, out_tif]\n\n subprocess.check_call(cmd)\n\n return out_tif","sub_path":"carbon_pools/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"391539972","text":"from sympy import derive_by_array,symbols,integrate,parse_expr\nfrom PyQt5 import QtGui,QtWidgets\nfrom sys import argv\n\nclass ID(QtWidgets.QWidget):\n def __init__(self):\n super(ID, self).__init__()\n self.setFixedSize(600, 400)\n self.setWindowTitle(\"Integration And Differentiate\")\n self.setWindowIcon(QtGui.QIcon(\"1.jpg\"))\n self.setStyleSheet(\"\"\"background-color:rgba(255,100,150,0.1);\"\"\")\n self.setToolTip(\"Welcome \")\n self.setToolTipDuration(3000)\n\n self.inputt = QtWidgets.QLineEdit(self)\n self.inputt.setStyleSheet(\"\"\"\n border-radius:25px;\n border:3px groove green;\n background-color: transparent;\n color:crimson;\n width:574;\n height:50;\n font-size:30px;\n font-family: 'Brush Script MT', cursive;\n padding: 10px;\n \"\"\")\n self.inputt.setToolTip(\"Enter Your Equation\")\n self.setToolTipDuration(3000)\n \n self.label_hint = QtWidgets.QLabel(\"'X' Is The Variable Here !!\", self)\n self.label_hint.setStyleSheet(\"\"\"\n color: yellow;\n background-color: transparent;\n font-size:20px;\n font-family: 'Comic Sans MS', sans-serif;\n \"\"\")\n self.label_hint.move(180, 130)\n self.label_hint.setToolTip(\"Note\")\n self.label_hint.setToolTipDuration(3000)\n \n self.integrate = QtWidgets.QPushButton(\"Integrate\", self)\n self.integrate.setStyleSheet(\"\"\"\n background-color: #F7CAC9;\n color: violet;\n font-size:20px;\n font-family: 'Comic Sans MS', sans-serif;\n border-radius: 30px;\n border: 4px groove black;\n \"\"\")\n self.integrate.resize(150, 60)\n self.integrate.move(50, 180)\n self.integrate.setToolTip(\"Integration\")\n self.integrate.setToolTipDuration(3000)\n\n self.clear = QtWidgets.QPushButton(\"Clear\", self)\n self.clear.setStyleSheet(\"\"\"\n background-color: #F7CAC9;\n color: violet;\n font-size:20px;\n font-family: 'Comic Sans MS', sans-serif;\n border-radius: 30px;\n border: 4px groove black;\n \"\"\")\n self.clear.resize(150, 60)\n self.clear.move(215, 180)\n self.clear.setToolTip(\"Clear Screen\")\n self.clear.setToolTipDuration(3000)\n\n \n self.derive = QtWidgets.QPushButton(\"Differentiate\", self)\n self.derive.setStyleSheet(\"\"\"\n background-color: #F7CAC9;\n color: violet;\n font-size:20px;\n font-family: 'Comic Sans MS', sans-serif;\n border-radius: 30px;\n border: 4px groove black;\n \"\"\")\n self.derive.resize(150, 60)\n self.derive.move(400, 180)\n self.derive.setToolTip(\"Differentiation\")\n self.derive.setToolTipDuration(3000)\n \n self.result = QtWidgets.QLineEdit(self)\n self.result.setStyleSheet(\"\"\"\n border-radius:25px;\n border:3px groove orange;\n background-color: transparent;\n color:crimson;\n width:574;\n height:50;\n font-size:30px;\n font-family: 'Brush Script MT', cursive;\n padding: 10px;\n \"\"\")\n self.result.move(0, 300)\n self.result.setToolTip(\"Result Of Integration Or Differentiate\")\n self.result.setToolTipDuration(3000)\n self.result.setReadOnly(True)\n self._1()\n self._2()\n self._3()\n\n def _1(self):\n self.derive.clicked.connect(self.clicked1)\n\n def clicked1(self):\n inputt_string = self.inputt.text()\n if '^' in inputt_string:\n inputt_string = inputt_string.replace(\"^\",\"**\")\n x = symbols(\"x\")\n self.result.setText(str(derive_by_array(parse_expr(inputt_string),x)))\n\n def _2(self):\n self.integrate.clicked.connect(self.clicked2)\n\n def clicked2(self):\n inputt_string = self.inputt.text()\n if '^' in inputt_string:\n inputt_string = inputt_string.replace(\"^\",\"**\")\n x = symbols(\"x\")\n if(str(integrate(parse_expr(inputt_string),x)) != f\"Integral({inputt_string}, x)\"):\n self.result.setText(str(integrate(parse_expr(inputt_string),x)) + \" + C\")\n else:\n self.result.setText(\"Cannot Solve\")\n\n def _3(self):\n self.clear.clicked.connect(self.clicked3)\n\n def clicked3(self):\n self.inputt.setText(\"\")\n self.result.setText(\"\")\n\nif __name__ == '__main__':\n application = QtWidgets.QApplication(argv)\n win = ID()\n win.show()\n application.exec_()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"579822961","text":"#! /usr/bin/env python\n# coding=UTF-8\n\nfrom phoenixdb.avatica import AvaticaClient\n\nclass AmendedAvaticaClient(AvaticaClient):\n\n def __init__(self,url, version=None, max_retries=None):\n super(AmendedAvaticaClient,self).__init__(url, version,max_retries)\n\n def getCatalogs(self):\n request = {'request': 'getCatalogs'}\n return self._apply(request,'resultSet')\n\n def getSchemas(self, catalog=None, schemaPattern=None):\n request = {\n 'request': 'getSchemas',\n 'catalog': catalog,\n 'schemaPattern': schemaPattern,\n }\n return self._apply(request,'resultSet')\n\n def getTables(self, catalog=None, schemaPattern=None, tableNamePattern=None, typeList=None):\n request = {\n 'request': 'getTables',\n 'catalog': catalog,\n 'schemaPattern': schemaPattern,\n 'tableNamePattern': tableNamePattern,\n 'typeList': typeList,\n }\n return self._apply(request,'resultSet')\n\n def getColumns(self, catalog=None, schemaPattern=None, tableNamePattern=None, columnNamePattern=None):\n request = {\n 'request': 'getColumns',\n 'catalog': catalog,\n 'schemaPattern': schemaPattern,\n 'tableNamePattern': tableNamePattern,\n 'columnNamePattern': columnNamePattern,\n }\n return self._apply(request,'resultSet')\n\n def getTableTypes(self):\n request = {'request': 'getTableTypes'}\n return self._apply(request,'resultSet')\n\n def getTypeInfo(self):\n request = {'request': 'getTypeInfo'}\n return self._apply(request,'resultSet')\n","sub_path":"build/lib/phoenix/avatica.py","file_name":"avatica.py","file_ext":"py","file_size_in_byte":1656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"72966082","text":"#!/usr/bin/env python\n\nimport rospy\nimport logging\nimport sys\nimport time\nimport math\nimport os\nfrom std_msgs.msg import Float64\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Float32MultiArray\nfrom bosch_imu_emilk.msg import BOSCH_IMU_DATA\n#from imu_emilk.msg import mymessages\n\nrot_speed = 0\nlin_speed = 0\n\ndef cleanupOnExit():\n print(\"-- Cleaning up and quiting!\")\n clearData = Float32MultiArray()\n clearData.data = [int(0),int(0)]\n pub_speeds.publish(clearData) # Publishing 0 values on topic\n time.sleep(2) # give time for the publisher to send the message\n sys.exit(0)\n\ndef Get_angularspeed(data):\n global rot_speed\n rot_speed = data.data\t# Get control signal (angular velocity command) from PID node\n\ndef Get_linearspeed(msg):\n global lin_speed\n lin_speed = msg.data\n\ndef attachCommands():\n while not rospy.is_shutdown():\n \t# BUILD ARRAY message\n \tspeedmsg = Float32MultiArray()\n \tspeedmsg.data = [lin_speed,rot_speed]\n\n \tpub_speeds.publish(speedmsg)\t# Sends speed commands to ros2mbedserial node\n \trate.sleep()\n\nrospy.init_node('base_commands', anonymous = False)\nsubs_omega = rospy.Subscriber('angular_speed', Float64, Get_angularspeed)\nsubs_linspeed = rospy.Subscriber('linear_speed', Float64, Get_linearspeed)\npub_speeds = rospy.Publisher('base_control_sig', Float32MultiArray, queue_size=1)\nrate = rospy.Rate(100)\n\nif __name__ == '__main__':\n\trospy.on_shutdown(cleanupOnExit)\n\tattachCommands()\n","sub_path":"deniro-mobile-base/Emilk_Project/bosch_imu_emilk/src/Adafruit_Python_BNO055/base_commands_publisher.py","file_name":"base_commands_publisher.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"558902978","text":"import numpy as np\nimport hierPHMM.domainCuts as dc\n\neps = 1e-18\n\ndef test_logdotexp():\n assert np.allclose(np.exp(dc.logdotexp(np.log(np.array([[1, 2], [3, 4]])),\n np.log(np.array(np.ones([2, 2]))), eps)),\n np.dot(np.array([[1, 2], [3, 4]]), np.ones([2, 2])))\n\ndef test_domainPerturb():\n test_tmat = np.log([[0.2, 0.2, 0.6],\n [0.1, 0.5, 0.4],\n [0.0, 0.5, 0.5]])\n test_Bind = np.array([False, True, True])\n test_logw = np.log([1., 9.])\n test_logv = np.log([1., 3.])\n newTmat = dc.domainPerturb(test_tmat, test_Bind, test_logw, test_logv, eps)\n assert np.allclose(np.exp(newTmat), np.array([[0.2, 0.2, 0.6], [0.1, 0.295, 0.605], [0., 0.2, 0.8]]))\n \n \ndef test_logmatpowexp():\n test_tmat = np.log([[0.2, 0.2, 0.6],\n [0.1, 0.5, 0.4],\n [0.0, 0.5, 0.5]])\n assert np.allclose(dc.logmatpowexp(test_tmat, 2, eps), np.log(np.dot(np.exp(test_tmat), np.exp(test_tmat))))\n \n \ndef test_reduceHidden():\n test_tmat = np.log(np.array([[1/4, 1/2, 1/4], [0., 1/2, 1/2], [1/4, 1/4, 1/2]]))\n test_emat = np.reshape(np.arange(15), [3, 5])\n test_startv = np.array([0., -np.inf, -np.inf])\n test_Oind = np.array([True, False, False])\n test_gaplim = 3\n tst_tmat, tst_emat, tst_startv = dc.reduceHidden(test_tmat, test_emat, test_startv, test_Oind, test_gaplim, eps)\n assert np.allclose(tst_tmat,\n np.array([[np.log(1/4 + 1/16 + 3/32 + 11/128)]]))\n assert np.allclose(tst_emat, np.array([np.arange(5)]))\n assert np.allclose(tst_startv, np.array([0.]))\n \n \ndef test_marginalLikelihood():\n test_xseq = 'AB'\n test_tmat = np.log(np.array([[0.2, 0.8], [0.7, 0.3]]))\n test_emat = np.log(np.transpose(np.array([[0.6, 0.4], [0.2, 0.8]])))\n test_startv = np.array([1, 0])\n test_alph = ['A', 'B']\n assert np.allclose(dc.marginalLikelihood(test_xseq, test_tmat, test_emat,\n test_startv, test_alph, eps),\n np.log(51/125))\n \n \ndef test_normlog():\n assert np.allclose(np.exp(dc.normlog(np.log([1, 2, 3]))), np.array([1/6, 1/3, 1/2]))\n\n \ndef test_forwardMarg():\n test_xseq = 'AB'\n test_tmat = np.log(np.array([[0.2, 0.8], [0.7, 0.3]]) + eps)\n test_emat = np.log(np.transpose(np.array([[0.6, 0.4], [0.2, 0.8]])) + eps)\n test_startv = np.array([1, 0])\n test_alph = ['A', 'B']\n marg = dc.forwardMarg(test_xseq, test_tmat, test_emat, test_startv, test_alph, eps)\n print(marg)\n print(np.log(np.array([[1, 0], [1/17, 16/17]]) + eps))\n assert np.allclose(np.exp(marg), np.exp(np.log(np.array([[1, 0], [1/17, 16/17]]) + eps)))\n \n\ndef test_samplePosterior():\n test_xseq = 'AB'\n test_tmat = np.log(np.array([[0.2, 0.8], [0.7, 0.3]]) + eps)\n test_emat = np.log(np.transpose(np.array([[0.6, 0.4], [0.2, 0.8]])) + eps)\n test_startv = np.array([1, 0])\n test_alph = ['A', 'B']\n np.random.seed(0)\n zdraw, condPs = dc.samplePosterior(test_xseq, test_tmat, test_emat, test_startv, test_alph, eps, True)\n assert np.allclose(np.exp(condPs), np.exp(dc.forwardMarg(test_xseq, test_tmat, test_emat,\n test_startv, test_alph, eps)))\n assert np.allclose(zdraw, np.array([0, 1]))\n \n \ndef test_states2alignments():\n tst_algns = dc.states2alignments('0ACDEFG1', '0AFDE1', \n [('t', 0), ('s', 0), ('n', 2),\n ('s', 2), ('s', 3), ('t', 1)])\n chk_algns = ('AC-DEFG', 'A-FDE--')\n assert tst_algns == chk_algns\n \n tst_algns = dc.states2alignments('0ACD1', '0AFDEGH1',\n [('t', 0), ('s', 0), ('s', 1),\n ('n', 2), ('n', 2), ('s', 2),\n ('n', 3), ('t', 1)])\n chk_algns = ('AC--D-', 'AFDEGH')\n assert tst_algns == chk_algns","sub_path":"hierPHMM/test_domainCuts.py","file_name":"test_domainCuts.py","file_ext":"py","file_size_in_byte":3956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"19252834","text":"from neat_eo.metrics.core import confusion\n\n\ndef get(label, predicted, config=None):\n\n tn, fn, fp, tp = confusion(label, predicted)\n if tp == 0 and fp == 0 and fn == 0:\n return float(\"NaN\")\n\n try:\n assert tp or fp or fn\n iou = float(tp / (fp + fn + tp))\n except ZeroDivisionError:\n iou = float(\"NaN\")\n\n return iou\n","sub_path":"neat_eo/metrics/IoU.py","file_name":"IoU.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"101212238","text":"#!/usr/bin/env python3\n\nfrom os import listdir, makedirs\nfrom os.path import expanduser, isdir, splitext, join, basename\nimport re\nimport numpy as np\nfrom scipy.stats import skew, kurtosis\nfrom pickle import loads\nfrom routines.functions import discarray\nfrom routines.functions import lacunarity, perimeter, area, get_glcm, grad\n# from segmentation import segmentate\nfrom thresh_segmentation import segmentate\nfrom adth_block_functions import BLOCK_FUNCTIONS\nfrom parameters import CLASSIFICATION_CATEGORIES\n\n\nMODELS = 'models'\nMODEL_NAME = 'adth_random_forests_classification_model.bin'\n# CLASSIFICATION_MODEL_SCALER = join(MODELS, f'{MODEL_NAME}_scaler')\nFEATURE_NAMES_NAME = 'adth_random_forests_classification_features.bin'\nCLASSIFICATION_MODEL = join(MODELS, MODEL_NAME)\nCLASSIFICATION_FEATURE_NAMES = join(MODELS, FEATURE_NAMES_NAME)\n\nwith open(CLASSIFICATION_MODEL, 'rb') as f:\n classification_model = loads(f.read())\n classification_model.n_jobs = -1\n\n# with open(CLASSIFICATION_FEATURE_NAMES, 'rb') as f:\n # FEATURE_NAMES = loads(f.read())\n # # print(FEATURE_NAMES)\n\nFEATURE_NAMES = ['std', 'skew', 'kurt', 'entropy', 'fgperoarea', 'psdfd',\n 'bcfd', 'bclac', 'gradmax', 'gradmean', 'gradmeanomedian']\n\nFEATURES = [{'name': name, 'function': BLOCK_FUNCTIONS[name]}\n for name in FEATURE_NAMES]\n\n# print(FEATURES)\n\ndef classify(img, proba=False):\n def calculate_features(img):\n n_functions = len(FEATURES)\n feats = np.empty(n_functions)\n segmented = segmentate(img)\n glcm = None\n segglcm = None\n _grad = grad(img)\n\n for i, feature in enumerate(FEATURES):\n print(f\" Calculating: {feature['name']}\")\n feats[i] = feature['function'](img, segmented, glcm, segglcm, _grad)\n # print(feats[i])\n return(feats)\n\n # return calculate_features(img)\n # Add verbose option\n print(f\" Calculating block features.\")\n x = calculate_features(img)[None, :]\n print(f\" Dealing with NaNs.\")\n x = np.where(np.isnan(x), -1, x) # check if it's okay\n x = np.where(~np.isfinite(x), -1, x) # check if it's okay\n # print(f\" Scaling data.\")\n # X = segmentation_model_scaler.transform(X)\n print(f\" Perfforming segmentation of block.\")\n if proba:\n y = classification_model.predict_proba(x)[0]\n else:\n y = classification_model.predict(x)\n return y\n\n\nif __name__ == '__main__':\n import netCDF4 as nc\n import matplotlib.pyplot as plt\n from random import shuffle\n folder = '/mnt/hdd/home/tmp/los/data/classification_blocks'\n files = [join(folder, f) for f in listdir(folder)]\n shuffle(files)\n for f in files:\n img = discarray(f)\n c = classify(img)\n\n plt.suptitle(basename(f))\n plt.title(f'{CLASSIFICATION_CATEGORIES[int(c)][\"name\"]}')\n plt.imshow(img)\n plt.show()\n","sub_path":"old/adaptive_segmentation/adth_classification.py","file_name":"adth_classification.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154020554","text":"import pygame\nfrom cell import Cell\nfrom colours import BLACK\n\nEMPTY = -1\nO = 0\nX = 1\n\niTuple = (-1,-2, 0, 0, 1, 2, 1,2, 1,2, 0,0, -1,-2, -1,-2)\njTuple = (-1,-2, -1,-2, -1,-2, 0,0, 1,2, 1,2, 1, 2, 0, 0)\n\nclass Board:\n def __init__(self, s):\n self.size = s\n self.free = 9\n self.table = [[Cell((0, 0), (s/3, s/3)),\n Cell((s/3, 0), (2*s/3, s/3)),\n Cell((2*s/3, 0), (s, s/3))],\n [Cell((0, s/3), (s/3, 2*s/3)),\n Cell((s/3, s/3), (2*s/3, 2*s/3)),\n Cell((2*s/3, s/3), (s, 2*s/3))],\n [Cell((0, 2*s/3), (s/3, s)),\n Cell((s/3, 2*s/3), (2*s/3, s)),\n Cell((2*s/3, 2*s/3), (s, s))]]\n\n def drawBoard(self, scr):\n '''\n Draws a Tic Tac Toe Board\n '''\n w = 10\n m = 17\n x = self.size\n pygame.draw.line(scr, BLACK, (x/3, 0+m), (x/3, x-m), w)\n pygame.draw.line(scr, BLACK, (2*x/3, 0+m), (2*x/3, x-m), w)\n pygame.draw.line(scr, BLACK, (0+m, x/3), (x-m, x/3), w)\n pygame.draw.line(scr, BLACK, (0+m, 2*x/3), (x-m, 2*x/3), w)\n\n def getFree(self):\n return self.free\n\n def findCell(self, pos):\n '''\n If the 'pos' is correct\n returns the 'i, j' position of the Cell in the Board,\n otherwise it returns '-1, -1'\n '''\n for i in range(0, 3):\n for j in range(0, 3):\n if self.table[i][j].insideCell(pos):\n return (i, j)\n return (-1, -1)\n\n def drawPieces(self, n, pos, scr):\n '''\n Draws the 'n' piece in the 'scr' surface. This piece is contained\n in the 'i, j' Cell.\n '''\n i = pos[0]\n j = pos[1]\n self.free -= 1\n self.table[i][j].drawPiece(n, scr)\n\n def checkVictory(self, pos):\n '''\n Returns True if the last player to play gets three in a row,\n otherwise returns False\n '''\n i = pos[0]\n j = pos[1]\n for x in range(0, 16):\n if x%2 == 0:\n cont = 1\n ii = iTuple[x] + i\n jj = jTuple[x] + j\n if self.correctPosition(ii, jj) and self.samePiece(i, j, ii, jj):\n cont += 1\n if cont == 3:\n return True\n elif x%2 == 0:\n x += 1\n return False\n\n def correctPosition(self, i, j):\n '''\n Checks if the 'i, j' is inside the table\n '''\n return i >= 0 and i <= 2 and j >= 0 and j <= 2\n\n def samePiece(self, i, j, ii, jj):\n '''\n Checks if the 'i, j' position contains a piece with the same type as\n the 'ii, jj' position.\n '''\n return self.table[i][j].whichPiece() == self.table[ii][jj].whichPiece()\n\n def searchEmpty(self):\n '''\n Return the (i, j) position of the empty cell in the table\n '''\n for i in range(0, 3):\n for j in range(0, 3):\n if self.table[i][j].whichPiece() == EMPTY:\n return (i, j)\n\n def victoryPosible(self):\n '''\n Returns True if a victory it's possible, otherwise returns False\n '''\n pos = searchEmpty()\n i = pos[0]\n j = pos[1]\n for x in range(0, 16):\n if x%2 == 0:\n aux = EMPTY\n ii = iTuple[x] + i\n jj = jTuple[x] + j\n if self.correctPosition(ii, jj):\n if aux == EMPTY:\n aux = self.table[ii][jj].whichPiece()\n elif self.table[ii][jj].whichPiece == aux:\n return True\n return False\n","sub_path":"board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":3709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"72944723","text":"import os\r\nimport hou\r\nfrom PySide2 import (QtWidgets, QtUiTools, QtGui, QtCore)\r\nfrom PySide2.QtCore import *\r\nfrom PySide2.QtGui import *\r\nfrom PySide2.QtWidgets import *\r\n\r\n\r\nclass HDRILink(QtWidgets.QWidget):\r\n def __init__(self):\r\n super(HDRILink, self).__init__()\r\n\r\n self.scriptpath = os.path.dirname(os.path.realpath(__file__))\r\n\r\n pathfile = open(self.scriptpath + \"/ARNO_HLtoH/Hdri_Path.txt\")\r\n txtcon = pathfile.readline()\r\n pathfile.close()\r\n self.oripath = txtcon.replace('\\\\','/')\r\n self.proj = self.oripath\r\n self.transition = \"0\";\r\n\r\n #load UI file\r\n loader = QtUiTools.QUiLoader()\r\n self.ui = loader.load(self.scriptpath+\"/UI.ui\")\r\n\r\n #get UI elements\r\n self.setproj = self.ui.findChild(QtWidgets.QPushButton, \"setproj\")\r\n self.folderlist = self.ui.findChild(QtWidgets.QComboBox, \"folderlist\")\r\n self.scenelist = self.ui.findChild(QtWidgets.QListWidget, \"scenelist\")\r\n self.label = self.ui.findChild(QtWidgets.QLabel, \"label_2\")\r\n self.height = self.ui.findChild(QtWidgets.QSpinBox, \"spinbox\")\r\n\r\n\r\n # creat connections\r\n self.setproj.clicked.connect(self.setproject)\r\n self.folderlist.activated.connect(self.Refresh)\r\n self.folderlist.activated.connect(self.CreateInterface)\r\n self.height.valueChanged.connect(self.changeHeight)\r\n #layout\r\n mainLayout = QtWidgets.QVBoxLayout()\r\n mainLayout.addWidget(self.ui)\r\n\r\n #add widgets to layout\r\n self.setLayout(mainLayout)\r\n\r\n # creat widgets\r\n self.Refresh()\r\n self.CreateInterface()\r\n\r\n #set icon size\r\n self.scenelist.setIconSize(QSize(150,75))\r\n self.label.setMaximumHeight(150)\r\n #set height\r\n heightfile = open(self.scriptpath + \"/ARNO_HLtoH/Height.txt\")\r\n heightS = heightfile.readline()\r\n try:\r\n height = int(heightS)\r\n except ValueError:\r\n height = 500\r\n self.scenelist.setMaximumHeight(height)\r\n self.scenelist.setMinimumHeight(height)\r\n self.height.setValue(height)\r\n\r\n heightfile.close()\r\n\r\n def changeHeight(self,data):\r\n self.scenelist.setMaximumHeight(data)\r\n self.scenelist.setMinimumHeight(data)\r\n f = open(self.scriptpath + \"/ARNO_HLtoH/Height.txt\", \"wt\")\r\n f.write(str(data))\r\n f.close()\r\n\r\n def setproject(self):\r\n setpath = hou.ui.selectFile(title=\"Set Project\",file_type=hou.fileType.Directory)\r\n newpath = os.path.dirname(setpath) +\"/\"\r\n\r\n if (newpath != \"/\"):\r\n self.proj = newpath\r\n f = open(self.scriptpath + \"/ARNO_HLtoH/Hdri_Path.txt\",\"wt\")\r\n f.write(newpath)\r\n f.close()\r\n\r\n #print self.proj\r\n self.Refresh()\r\n self.CreateInterface()\r\n\r\n def Refresh(self):\r\n if self.proj != self.transition and self.proj !=\"\":\r\n self.folderlist.clear()\r\n for folder in os.listdir(self.proj):\r\n self.folderlist.addItem(folder)\r\n\r\n self.transition = self.proj\r\n\r\n self.instexpath = self.proj + str(self.folderlist.currentText()) + \"/Thumbnails/\"\r\n self.texpath = self.proj + str(self.folderlist.currentText()) + \"/HDRIs/\"\r\n\r\n #print self.texpath\r\n\r\n def CreateInterface(self):\r\n self.scenelist.clear()\r\n\r\n try:\r\n for file in os.listdir(self.instexpath):\r\n if file.endswith('.jpg'):\r\n fn = file.split(\".\")\r\n del fn[-1]\r\n name = \".\".join(fn)\r\n #add icon\r\n instex0 = self.instexpath + file\r\n jpg0 = QtGui.QPixmap(instex0).scaled(300, 150)\r\n icon = QtGui.QIcon(jpg0)\r\n item = QListWidgetItem(icon, \"\")\r\n item.setText(name)\r\n self.scenelist.addItem(item)\r\n\r\n endfile = file\r\n\r\n instex1 = self.instexpath + endfile\r\n # print instex\r\n jpg1 = QtGui.QPixmap(instex1).scaled(500, 250)\r\n self.label.setPixmap(jpg1)\r\n except WindowsError:\r\n pass\r\n\r\n #connect list items to function\r\n self.scenelist.doubleClicked.connect(self.setTex)\r\n self.scenelist.clicked.connect(self.viewHdri)\r\n\r\n\r\n def viewHdri(self, item):\r\n texname = item.data()\r\n\r\n instex = self.instexpath + texname+\".jpg\"\r\n #print instex\r\n jpg = QtGui.QPixmap(instex).scaled(500, 250)\r\n self.label.setPixmap(jpg)\r\n\r\n def setTex(self,item):\r\n texname = item.data()\r\n\r\n for texture in os.listdir(self.texpath):\r\n j = texture.split(texname)\r\n if len(j)>=2:\r\n texname = texture\r\n\r\n path = self.texpath + texname\r\n node = hou.selectedNodes()[0]\r\n gen = node.parm('env_map')\r\n \r\n if(gen == None):\r\n gen = node.parm('ar_light_color_texture')\r\n \r\n if (gen == None):\r\n gen = node.parm('A_FILENAME')\r\n gen.set(path)\r\n gen = node.parm('A_FILENAME2')\r\n gen.set(path)\r\n\r\n gen.set(path)\r\n \r\n #print \"author:ARNO\"\r\n #print \"QQ:1245527422\"","sub_path":"HL.py","file_name":"HL.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"269040121","text":"import io\nimport logging\nimport os\nimport platform\nimport re\nimport subprocess\nimport sys\nimport tempfile\nimport urllib.request\nimport zipfile\n\nfrom qhub.utils import timer, run_subprocess_cmd\nfrom qhub import constants\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TerraformException(Exception):\n pass\n\n\ndef download_terraform_binary(version=constants.TERRAFORM_VERSION):\n os_mapping = {\n \"linux\": \"linux\",\n \"win32\": \"windows\",\n \"darwin\": \"darwin\",\n \"freebsd\": \"freebsd\",\n \"openbsd\": \"openbsd\",\n \"solaris\": \"solaris\",\n }\n\n architecture_mapping = {\n \"x86_64\": \"amd64\",\n \"i386\": \"386\",\n \"armv7l\": \"arm\",\n \"aarch64\": \"arm64\",\n }\n\n download_url = f\"https://releases.hashicorp.com/terraform/{version}/terraform_{version}_{os_mapping[sys.platform]}_{architecture_mapping[platform.machine()]}.zip\"\n filename_directory = os.path.join(tempfile.gettempdir(), \"terraform\", version)\n filename_path = os.path.join(filename_directory, \"terraform\")\n\n if not os.path.isfile(filename_path):\n logger.info(\n f\"downloading and extracting terraform binary from url={download_url} to path={filename_path}\"\n )\n with urllib.request.urlopen(download_url) as f:\n bytes_io = io.BytesIO(f.read())\n download_file = zipfile.ZipFile(bytes_io)\n download_file.extract(\"terraform\", filename_directory)\n\n os.chmod(filename_path, 0o555)\n return filename_path\n\n\ndef run_terraform_subprocess(processargs, **kwargs):\n terraform_path = download_terraform_binary()\n logger.info(f\" terraform at {terraform_path}\")\n if run_subprocess_cmd([terraform_path] + processargs, **kwargs):\n raise TerraformException(\"Terraform returned an error\")\n\n\ndef version():\n terraform_path = download_terraform_binary()\n logger.info(f\"checking terraform={terraform_path} version\")\n\n version_output = subprocess.check_output([terraform_path, \"--version\"]).decode(\n \"utf-8\"\n )\n return re.search(r\"(\\d+)\\.(\\d+).(\\d+)\", version_output).group(0)\n\n\ndef init(directory=None):\n logger.info(f\"terraform init directory={directory}\")\n with timer(logger, \"terraform init\"):\n run_terraform_subprocess([\"init\"], cwd=directory, prefix=\"terraform\")\n\n\ndef apply(directory=None, targets=None):\n targets = targets or []\n\n logger.info(f\"terraform apply directory={directory} targets={targets}\")\n command = [\"apply\", \"-auto-approve\"] + [\"-target=\" + _ for _ in targets]\n with timer(logger, \"terraform apply\"):\n run_terraform_subprocess(command, cwd=directory, prefix=\"terraform\")\n\n\ndef output(directory=None):\n terraform_path = download_terraform_binary()\n\n logger.info(f\"terraform={terraform_path} output directory={directory}\")\n with timer(logger, \"terraform output\"):\n return subprocess.check_output(\n [terraform_path, \"output\", \"-json\"], cwd=directory\n ).decode(\"utf8\")[:-1]\n\n\ndef tfimport(addr, id, directory=None):\n logger.info(f\"terraform import directory={directory} addr={addr} id={id}\")\n command = [\"import\", addr, id]\n with timer(logger, \"terraform import\"):\n run_terraform_subprocess(\n command, cwd=directory, prefix=\"terraform\", strip_errors=True\n )\n\n\ndef destroy(directory=None):\n logger.info(f\"terraform destroy directory={directory}\")\n command = [\n \"destroy\",\n \"-auto-approve\",\n ]\n\n with timer(logger, \"terraform destroy\"):\n run_terraform_subprocess(command, cwd=directory, prefix=\"terraform\")\n\n\ndef rm_local_state(directory=None):\n logger.info(f\"rm local state file terraform.tfstate directory={directory}\")\n tfstate_path = \"terraform.tfstate\"\n if directory:\n tfstate_path = os.path.join(directory, tfstate_path)\n\n if os.path.isfile(tfstate_path):\n os.remove(tfstate_path)\n","sub_path":"qhub/provider/terraform.py","file_name":"terraform.py","file_ext":"py","file_size_in_byte":3866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"330704514","text":"# -*- coding: utf8 -*-\n# python\n# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n# \n\nbl_info = {\"name\": \"Artist Paint Popup\",\n \"author\": \"CDMJ, Spirou4D\",\n \"version\": (1, 0),\n \"blender\": (2, 76, 0),\n \"location\": \"\",\n \"description\": \"shortcut menu for Artist Panel addon\",\n \"warning\": \"\",\n \"wiki_url\": \"\",\n \"category\": \"Paint\"}\n\nimport bpy\nfrom bpy.types import AddonPreferences,\\\n Menu,\\\n Panel,\\\n UIList,\\\n Operator\nimport math\nimport os\nSEP = os.sep\n\n\nclass canvasPopup(Operator):\n bl_idname = \"artist_paint.popup\"\n bl_label = \"Artist Paint Popup\"\n bl_options = {'REGISTER', 'UNDO'}\n\n @classmethod\n def poll(cls, context):\n brush = context.tool_settings.image_paint.brush\n ob = context.active_object\n return (brush is not None and ob is not None)\n\n def check(self, context):\n return True\n\n def invoke(self, context, event):\n return context.window_manager.invoke_props_dialog(self,\n width=240)\n\n def execute(self, context):\n return {'FINISHED'}\n\n def draw(self, context):\n #\"ARTIST_PAINT_OT_popup\"\n _strAngle = str(context.scene.CustomAngle)\n tool_settings = context.tool_settings\n ipaint = context.tool_settings.image_paint\n\n layout = self.layout\n trunk = layout.column()\n trunk.separator()\n trunk.label(\"Objects Masking Tools\")\n box = trunk.box()\n col = box.column()\n col.operator(\"artist_paint.trace_selection\",\n text=\"Mask from Gpencil\",\n icon='OUTLINER_OB_MESH')\n\n row = col.row(align=True)\n row.operator(\"artist_paint.curve_2dpoly\",\n text=\"Make Vector Mask\",\n icon='PARTICLE_POINT')\n row.operator(\"artist_paint.curve_unwrap\",\n text=\"\",\n icon='OUTLINER_OB_MESH')\n col.operator(\"artist_paint.inverted_mask\",\n text=\"Mesh Mask Inversion\",\n icon='MOD_TRIANGULATE')\n\n col.prop(ipaint, \"use_stencil_layer\",\n text=\"Use stencil mask\")\n\n if ipaint.use_stencil_layer == True:\n col.template_ID(ipaint, \"stencil_image\")\n col.operator(\"image.new\", text=\"New\").\\\n gen_context = 'PAINT_STENCIL'\n col.prop(ipaint, \"invert_stencil\",\n text=\"Invert the mask\")\n trunk.separator() # empty line\n trunk.label(\"Mirrors / Rotations\")\n box = trunk.box()\n col = box.column(align=True)\n col.prop(context.scene, \"ArtistPaint_Bool01\",\n text=\"Canvas Frame Constraint\")\n row = col.row(align=True)\n row.operator(\"artist_paint.canvas_horizontal\",\n text=\"Flip Horizontal\", icon='ARROW_LEFTRIGHT')\n row.operator(\"artist_paint.canvas_vertical\",\n text=\"Flip Vertical\", icon='FILE_PARENT')\n row = col.row(align=True)\n col.separator() # empty line\n row = col.row(align=True)\n buttName_1 = \"Rotate \" + _strAngle + \"° CCW\"\n buttName_2 = \"-\" + buttName_1\n row.operator(\"artist_paint.rotate_ccw_15\",\n text=buttName_1, icon='TRIA_LEFT')\n row.operator(\"artist_paint.rotate_cw_15\",\n text=buttName_2, icon='TRIA_RIGHT')\n row = col.row(align=True)\n row.operator(\"artist_paint.rotate_ccw_90\",\n text=\"Rotate 90° CCW\", icon='PREV_KEYFRAME')\n row.operator(\"artist_paint.rotate_cw_90\",\n text=\"Rotate 90° CW\", icon='NEXT_KEYFRAME')\n col.operator(\"artist_paint.canvas_resetrot\",\n text=\"Reset Rotation\", icon='CANCEL')\n\n\ndef register():\n bpy.utils.register_module(__name__)\n\n km_list = ['Image Paint']\n for i in km_list:\n # bpy.context.window_manager.keyconfigs.default.keymaps\n sm = bpy.context.window_manager\n km = sm.keyconfigs.default.keymaps[i]\n kmi = km.keymap_items.new('artist_paint.popup', 'V', 'PRESS')\n\n\ndef unregister():\n bpy.utils.unregister_module(__name__)\n\n km_list = ['Image Paint']\n for i in km_list:\n sm = bpy.context.window_manager\n km = sm.keyconfigs.default.keymaps[i]\n for kmi in (kmi for kmi in km.keymap_items\n if (kmi.idname == \"artist_paint.popup\")):\n km.keymap_items.remove(kmi)\n\n\nif __name__ == \"__main__\":\n register()\n","sub_path":"scripts/addons_extern/paint_artist_popup.py","file_name":"paint_artist_popup.py","file_ext":"py","file_size_in_byte":5339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"44289610","text":"'''\r\n哈夫编码\r\n输入:\r\n正整数:表示编码字母的数量\r\n字符串:包含了n中字母的字符串\r\n\r\n输出:\r\n输出哈夫曼编码\r\n\r\n注意:\r\n因哈夫曼树的的构造不唯一,现约定如下:\r\n输入中首先给出了有多少个字符要编码,\r\n然后根据输入字符串获得每个字母按字典顺序的权值序列(每个字母的权值即该字母在字符串中出现的次数),\r\n\r\n造哈夫曼树时每次从权值序列选取(从左到右选取)两个最小的权值,\r\n若选取的值不相等,那么较小值的结点作为以他们值加和的新结点的左孩子,另一个则为右孩子,\r\n若选取的值相等,先选取的做左孩子,后选取的做右孩子,\r\n删除已选取的值的结点,并在权值序列最后(注意最后!)添加两个选取值加和的新结点。\r\n如此往复,直到权值序列表长度为1。\r\n\r\n我的看法:\r\n额,根据这里的题意:对于权值序列,最小的一个数应该是相对在左边,第二小的数应该是在相对位置的右边\r\n\r\n'''\r\nfrom collections import Counter\r\nfrom collections import deque\r\n\r\n\r\nclass TreeNode():\r\n def __init__(self, v, k = None):\r\n self.Value = v # 权重\r\n self.Key = k # 关键字\r\n self.Left = None\r\n self.Right = None\r\n\r\ndef PrintNode(node, t):\r\n if node != None:\r\n print(' '*t, end = \"\")\r\n if node.Key != None:\r\n print('%-2d:%c' % (node.Value, node.Key))\r\n else:\r\n print('%-4d' % node.Value)\r\n PrintNode(node.Left, t + 1)\r\n PrintNode(node.Right, t + 1)\r\n\r\n\r\ndef Decoding(node, di, s):\r\n if node != None:\r\n if node.Left == None and node.Right == None:\r\n di[node.Key] = s\r\n else:\r\n Decoding(node.Left, di, s + '0')\r\n Decoding(node.Right, di, s + '1')\r\n\r\n \r\n\r\ndef MakeTree(d): # d 是一个权重字典\r\n p = deque()\r\n for k in d:\r\n p.append(TreeNode(d[k], k))\r\n\r\n while len(p) > 1:\r\n firstMin = min(p, key = lambda x : x.Value)\r\n del p[p.index(firstMin)]\r\n secondMin = min(p, key = lambda x : x.Value)\r\n del p[p.index(secondMin)] \r\n\r\n newNode = TreeNode(firstMin.Value + secondMin.Value)\r\n newNode.Left = firstMin\r\n newNode.Right = secondMin\r\n p.append(newNode)\r\n\r\n # print(\"tree\")\r\n # PrintNode(p[0], 0)\r\n\r\n di = {}\r\n Decoding(p[0], di, '')\r\n # print(di)\r\n return di\r\n\r\n\r\ndef solve():\r\n #input()\r\n s = input()\r\n p = dict(Counter(s))\r\n # print(p)\r\n di = MakeTree(p)\r\n for a in s:\r\n print(di[a], end =\" \")\r\n \r\nsolve() \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"accode/031.py","file_name":"031.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"554988262","text":"import csv\n\n\ndef get_value(s, emptyvalue=\"\", boolvalues=(\"true\", \"false\")):\n try:\n s = float(s)\n except ValueError:\n if s == emptyvalue:\n s = None\n elif s == boolvalues[0]:\n s = True\n elif s == boolvalues[1]:\n s = False\n return s\n\ndef reduce(items, f):\n sum = 0\n for x in items:\n sum = f(x, sum)\n return sum\n\ndef load_csv_data(fn):\n data = []\n with open(fn) as f:\n reader = csv.reader(f, delimiter=\",\", quotechar=\"'\")\n for row in reader:\n data.append(row)\n for i in range(len(data)):\n for j in range(len(data[i])):\n data[i][j] = get_value(data[i][j], emptyvalue=\"\", boolvalues=(\"true\", \"false\"))\n return data\n\n\ndata = load_csv_data(\"iris.csv\")\nfor i in range(len(data)):\n print(data[i])","sub_path":"TSVD/3.2.py","file_name":"3.2.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"161768299","text":"\nimport numpy as np\n\n\ndef pad_tensor(vec, pad, axis):\n \"\"\"\n args:\n vec - tensor to pad\n pad - the size to pad to\n axis - dimension to pad\n\n return:\n a new tensor padded to 'pad' in dimension 'dim'\n \"\"\"\n\n pad_size = list(vec.shape)\n pad_size[axis] = pad - vec.shape[axis]\n return np.concatenate([vec, np.zeros(pad_size)], axis=axis)\n\n\ndef collate_fn(batch):\n \"\"\"Moves list inside of dict recursively.\n\n Can be used as input to batch iterator.\n\n Args:\n batch:\n\n Returns:\n\n \"\"\"\n def nested_batching(value, key, nested_batch):\n # recursively nesting the batch\n if isinstance(value, dict):\n if key not in nested_batch:\n nested_batch[key] = dict()\n return {k: nested_batching(v, k, nested_batch[key])\n for k, v in value.items()}\n else:\n if key not in nested_batch:\n nested_batch[key] = []\n nested_batch[key].append(value)\n return nested_batch[key]\n\n nested_batch = {}\n for elem in batch:\n assert isinstance(elem, dict)\n nested_batch = {key: nested_batching(value, key, nested_batch)\n for key, value in elem.items()}\n return nested_batch\n","sub_path":"padertorch/data/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"414935846","text":"import re\n\n\ndef extract_libraries(files):\n \"\"\"Extracts a list of imports that were used in the files\n\n Parameters\n ----------\n files : []string\n Full paths to files that need to be analysed\n\n Returns\n -------\n dict\n imports that were used in the provided files, mapped against the language\n \"\"\"\n\n res = []\n # regex to find imports like from foo.foo1 import bar; returns foo.foo1\n regex1 = re.compile(r\"from (.+) import\", re.IGNORECASE)\n\n # regex to find imports like import bar as foo and from foo.foo1 import bar; returns bar\n regex2 = re.compile(r\"import ([a-zA-Z0-9_-]+)(?:\\s| as)\", re.IGNORECASE)\n for f in files:\n with open(file=f, mode='r', errors='ignore') as fr:\n\n contents = ' '.join(fr.readlines())\n matches = regex1.findall(contents)\n matches.extend(regex2.findall(contents))\n\n if matches:\n res.extend(matches)\n return {\"Python\": res}\n","sub_path":"src/language/Python.py","file_name":"Python.py","file_ext":"py","file_size_in_byte":963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"40797649","text":"from __future__ import unicode_literals\r\nimport youtube_dl\r\nimport os\r\nfrom sys import argv\r\n\r\nimport sys\r\nfrom shutil import copyfile\r\nfrom sys import exit\r\n\r\ndef get_desktop():\r\n\r\n\tplatform = sys.platform\r\n\tdesktop = None \r\n\tif platform == 'win32':\r\n\t\tdesktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')\r\n\telif platform == 'linux1' or platform == 'linux2':\r\n\t\tdesktop = os.path.expanduser(\"~/Desktop/\")\r\n\telse:\r\n\t\tpass\r\n\r\n\treturn desktop\r\n\r\nget_desktop()\r\n\r\n# Download data and config\r\n\r\ndownload_options = {\r\n\t'format' : 'bestvideo[width>=1920]+bestaudio[ext=m4a]/best',\r\n\t'outtmpl':\tf'{get_desktop()}/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s',\r\n\t'nocheckcertificate': True,\r\n\r\n}\r\n\r\n# Song Directory\r\n\r\nif not os.path.exists('Downloads'):\r\n\tos.mkdir('Downloads')\r\nelse:\r\n\tos.chdir('Downloads')\r\n\r\n#copy file link to Download directory\r\n\r\nsource = f'{sys.path[0]}/{argv[1]}'\r\ntarget = f'{os.getcwd()}/{argv[1]}'\r\n\r\n# adding exception handling\r\ntry:\r\n copyfile(source, target)\r\nexcept IOError as e:\r\n print(\"Unable to copy file. %s\" % e)\r\n exit(1)\r\nexcept:\r\n print(\"Unexpected error:\", sys.exc_info())\r\n exit(1)\r\n\r\n\r\n\r\n# Download Individudal\r\n\r\nwith youtube_dl.YoutubeDL(download_options) as ydl:\r\n\twith open('./' + argv[1], 'r') as f:\r\n\r\n\t\tfor urls in f:\r\n\t\t\tif 'youtube' in urls:\r\n\t\t\t\tdownload_options.update({'postprocessors': [\r\n\t\t\t\t\t{'key': 'FFmpegMetadata'},]},\r\n\t\t\t\t\t)\r\n\t\t\ttry:\r\n\t\t\t\twith youtube_dl.YoutubeDL(download_options) as ydl:\r\n\t\t\t\t\tydl.download([urls])\r\n\t\t\texcept:\r\n\t\t\t\tprint('e')\r\n","sub_path":"Youtube/Downloader.py","file_name":"Downloader.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"414614965","text":"#import threading\n#import time\n#\n#exitFlag = 0\n#\n#class myThread (threading.Thread):\n# def __init__(self, threadID, name, counter):\n# threading.Thread.__init__(self)\n# self.threadID = threadID\n# self.name = name\n# self.counter = counter\n# def run(self):\n# print (\"Starting \" + self.name)\n# print_time(self.name, self.counter, 5)\n# print (\"Exiting \" + self.name)\n#\n#def print_time(threadName, delay, counter):\n# while counter:\n# if exitFlag:\n# threadName.exit()\n# time.sleep(delay)\n# print (\"%s: %s\" % (threadName, time.ctime(time.time())))\n# counter -= 1\n#\n## Create new threads\n#thread1 = myThread(1, \"Thread-1\", 1)\n#thread2 = myThread(2, \"Thread-2\", 2)\n#\n## Start new Threads\n#thread1.start()\n#thread2.start()\n#thread1.join()\n#thread2.join()\n#print (\"Exiting Main Thread\")\n\n###from multiprocessing.dummy import Pool as ThreadPool\n###\n###def squareNumber(n):\n### return n ** 2\n###\n#### function to be mapped over\n###def calculateParallel(numbers, threads=4):\n### pool = ThreadPool(threads)\n### results = pool.map(squareNumber, numbers)\n### pool.close()\n### pool.join()\n### return results\n###\n###if __name__ == \"__main__\":\n### numbers = [1, 2, 3, 4, 5]\n### squaredNumbers = calculateParallel(numbers, 8)\n### for n in squaredNumbers:\n### print(n)\n\n#\n#import threading\n#from queue import Queue\n#import time\n#\n## lock to serialize console output\n#lock = threading.Lock()\n#\n#def do_work(a, b, c):\n# #time.sleep(.1) # pretend to do some lengthy work.\n# # Make sure the whole print completes or threads can mix up output in one line.\n# with lock:\n# print(threading.current_thread().name,a,b,c)\n#\n## The worker thread pulls an item from the queue and processes it\n#def worker(a):\n# while True:\n# items = q.get()\n# do_work(items[0], items[1], items[2])\n# q.task_done()\n#\n## Create the queue and thread pool.\n#q = Queue()\n#for i in range(4):\n# t = threading.Thread(target=worker, args=(q,))\n# t.daemon = True # thread dies when main thread (only non-daemon thread) exits.\n# t.start()\n#\n## stuff work items on the queue (in this case, just a number).\n#start = time.perf_counter()\n#for item in range(20):\n# q.put((item, item+1, item+5))\n#\n#q.join() # block until all tasks are done\n#\n## \"Work\" took .1 seconds per task.\n## 20 tasks serially would be 2 seconds.\n## With 4 threads should be about .5 seconds (contrived because non-CPU intensive \"work\")\n#print('time:',time.perf_counter() - start)\n\n\nimport linecache\nimport os;\n\n\n#class\nclass nline(object):\n pos = 0\n value = 0\n#path_mat = 'D:/MCC/TESIS/metabolomico/test_data/matrices'\n#path_porc = 'D:/MCC/TESIS/metabolomico/test_data/porcentaje'\n\npath_mat = 'C:/Users/vaneseinh/Documents/MCC/TESIS MCC - LastCode&Results/MetabolomicoG/test_data/matrices'\npath_porc = 'C:/Users/vaneseinh/Documents/MCC/TESIS MCC - LastCode&Results/MetabolomicoG/test_data/porcentaje'\n\nfile_matrix = 'C2SINUVD3'\n#file_porc= 'C2SINUVD0.txt'\n\nfile_matrix2 = 'C2SINUVD3'\n#file_porc2= 'C2SINUVD1.txt'\n\ndef is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False\n\n\nline = linecache.getline(path_mat + '/' + file_matrix, 453 + 5)\nmin_range_accepted = 280\n\ntemp = line.split(\",\")\nprint (\"st es el len de temp \"+str(len(temp)))\n\nnew_line1 = []\nnew_line2 = []\ncount_elem1 = 0\ncount_elem2 = 0\n\nfor i in range(1 , len(temp)-1):\n esp = temp[i].strip()\n #print(\"pos i \"+str(i)+ \"val: \"+esp)\n if (is_number(esp) == True):\n if ((float(esp) > 0) and (float(esp) >= min_range_accepted)):\n count_elem1+=1\n line = nline()\n line.value = float(esp)\n line.pos = i\n new_line1.append(line)\nprint(\"fin for\")\n\nline = linecache.getline(path_mat + '/' + file_matrix2, 464 + 5)\n\ntemp = line.split(\",\")\nprint (\"st es el len de temp \"+str(len(temp)))\n\n\n\nfor i in range(1 , len(temp)-1):\n esp = temp[i].strip()\n if (is_number(esp) == True):\n if ((float(esp) > 0) and (float(esp) >= min_range_accepted)):\n count_elem2+=1\n line = nline()\n line.value = float(esp)\n line.pos = i\n new_line2.append(line)\n\nprint(\"fin for\")\n\nprint(\"count_elem1 \"+str(count_elem1) + \" --- \" + \"count_elem2 \"+str(count_elem2))\nnew_line1.sort(key = lambda x: x.value, reverse=True)\nnew_line2.sort(key = lambda x: x.value, reverse=True)\nline1 = []\nline2 = []\n\nif(count_elem1>=count_elem2):\n for i in range(0 , count_elem1 ):\n line = nline()\n line.value = new_line1[i].value\n line.pos = new_line1[i].pos\n line1.append(line)\n\n for i in range(0 , count_elem1 ):\n line = nline()\n line.value = new_line2[i].value\n line.pos = new_line2[i].pos\n line2.append(line)\nelse:\n for i in range(0 , count_elem2 ):\n line = nline()\n line.value = new_line1[i].value\n line.pos = new_line1[i].pos\n line1.append(line)\n\n for i in range(0 , count_elem2 ):\n line = nline()\n line.value = new_line2[i].value\n line.pos = new_line2[i].pos\n line2.append(line)\n\n\n\n\n\nprint('*****line1 results')\nfor i in range(0,len(line1)):\n #if (line1[i].value > 0):\n print(\"pos: \"+str(line1[i].pos) + \"--- num \"+ str(line1[i].value))\nprint(\"fin for\")\n\n\nprint('*****line2 results')\nfor i in range(0,len(line2)):\n #if (line2[i].value > 0):\n print(\"pos: \"+str(line2[i].pos) + \"--- num \"+ str(line2[i].value))\nprint(\"fin for\")\n\n","sub_path":"prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":5533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"137123288","text":"from flask import Flask, render_template, request\nfrom werkzeug import secure_filename\nimport os\nimport modelpred as md\nimport removefilordir as rm\nimport wtforms as wtf\nimport json\nimport flask\nfrom flask_cors import CORS,cross_origin\n\napp = Flask(__name__)\nCORS(app)\napp.config['CORS_HEADERS'] = 'Content-Type'\nmodel_ft_count=0\nclass Average(wtf.Form):\n filename = wtf.FileField(validators=\n [wtf.validators.InputRequired()])\n\n\nALLOWED_EXTENSIONS = set(['pdf'])\n\ndef allowed_file(filename):\n \"\"\"Does filename have the right extension?\"\"\"\n return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\nAPP_ROOT =\"/home/shubhi/JDproject/Files/\"\n\ndef result(filename):\n res = md.main1(APP_ROOT)\n \n return res\n\n\n\n\n@app.route('/', methods = ['GET', 'POST'])\n@cross_origin()\ndef index():\n global model_ft_count\n if(model_ft_count==0):\n print(\"Fitting model on training data ........\")\n md.model_fit()\n print(\"Fitting done ...\")\n model_ft_count=model_ft_count +1\n form = Average(request.form)\n filename = None # default\n if request.method == 'POST':\n rm.remove(APP_ROOT[:-1])\n # Save uploaded file on server if it exists and is valid\n if request.files:\n file1 = request.files[form.filename.name]\n print(file1)\n if file1 and allowed_file(file1.filename):\n rm.remove(APP_ROOT[:-1])\n\n filename = secure_filename(file1.filename)\n file1.save(os.path.join(APP_ROOT,\n filename))\n data ={\"filename\": filename , \"Predicted_Profile\": result(filename)}\n return flask.jsonify(data)\n #print(result1)\n #return result1\n else:\n result1 = None\n\n return render_template(\"upload1.html\", form=form, result=result1)\n\n\n\nif __name__ == '__main__':\n app.run(debug = True)\n \n \n\n","sub_path":"Api's/upload2.py","file_name":"upload2.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"288139637","text":"# mp3_mixture.py\n# ---------------\n# Licensing Information: You are free to use or extend this projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to the University of Illinois at Urbana-Champaign\n#\n# Created by Justin Lizama (jlizama2@illinois.edu) on 09/28/2018\n# Modified by Jaewook Yeom 02/02/2020\nimport sys\nimport argparse\nimport configparser\nimport copy\nimport numpy as np\n\nimport reader\nimport naive_bayes_mixture as nb\n\n\"\"\"\nThis file contains the main application that is run for Part 2 of this MP.\n\"\"\"\n\ndef compute_accuracies(predicted_labels, dev_set, dev_labels):\n yhats = predicted_labels\n accuracy = np.mean(yhats == dev_labels)\n tp = np.sum([yhats[i] == dev_labels[i] and yhats[i] == 1 for i in range(len(yhats))])\n precision = tp / np.sum([yhats[i] == 1 for i in range(len(yhats))])\n recall = tp / (np.sum([yhats[i] != dev_labels[i] and yhats[i] == 0 for i in range(len(yhats))]) + tp)\n f1 = 2 * (precision * recall) / (precision + recall)\n return accuracy, f1, precision, recall\n\n\ndef main(args):\n train_set, train_labels, dev_set, dev_labels = reader.load_dataset(args.training_dir,args.development_dir,args.stemming,args.lower_case)\n predicted_labels = nb.naiveBayesMixture(train_set, train_labels, dev_set, args.bigram_lambda, args.unigram_smoothing, args.bigram_smoothing, args.pos_prior)\n\n accuracy, f1, precision, recall = compute_accuracies(predicted_labels, dev_set, dev_labels)\n print(\"Accuracy:\",accuracy)\n print(\"F1-Score:\",f1)\n print(\"Precision:\",precision)\n print(\"Recall:\",recall)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='CS440 MP3 Naive Bayes Mixture (Part 2)')\n\n parser.add_argument('--training', dest='training_dir', type=str, default = '../data/movies_review/train',\n help='the directory of the training data')\n parser.add_argument('--development', dest='development_dir', type=str, default = '../data/movies_review/dev',\n help='the directory of the development data')\n parser.add_argument('--stemming',dest=\"stemming\", type=bool, default=False,\n help='Use porter stemmer')\n parser.add_argument('--lower_case',dest=\"lower_case\", type=bool, default=False,\n help='Convert all word to lower case')\n parser.add_argument('--bigram_lambda',dest=\"bigram_lambda\", type=float, default = 0.5,\n help='Bigram Lambda Value - default 0.5')\n parser.add_argument('--unigram_smoothing',dest=\"unigram_smoothing\", type=float, default = 1.0,\n help='Unigram Laplace smoothing parameter - default 1.0')\n parser.add_argument('--bigram_smoothing',dest=\"bigram_smoothing\", type=float, default = 1.0,\n help='Bigram Laplace smoothing parameter - default 1.0')\n parser.add_argument('--pos_prior',dest=\"pos_prior\", type=float, default = 0.8,\n help='Positive prior, i.e. Num_positive_comments / Num_comments')\n args = parser.parse_args()\n main(args)\n","sub_path":"mp3/mp3_mixture.py","file_name":"mp3_mixture.py","file_ext":"py","file_size_in_byte":3166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"428526446","text":"'''\nCreated on 2018年8月21日\n\n@author: Administrator\n'''\n\n\"\"\"\nasyncio\n\"\"\"\nimport asyncio\nimport threading\n\n@asyncio.coroutine\ndef hello(index):\n print(\"Hello, world %s:(%s)\" % (index,threading.currentThread()))\n # 异步调用asyncio.sleep(1)\n yield from asyncio.sleep(1)\n print(\"Hello, end %s:(%s) \" % (index,threading.currentThread()))\n\n# 获取EventLoop:\nloop = asyncio.get_event_loop()\n# 执行coroutine\nloop.run_until_complete(asyncio.wait([hello(1),hello(2)]))\n# loop.run_until_complete(hello(2))\nloop.close()\n\n","sub_path":"pythoning/src/asyncDemo/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"398954065","text":"import ghttp as client, random\nimport time, sys\nfrom concurrent.futures import ThreadPoolExecutor, as_completed\n\nhh = [['Host', \"collector-pxu6b0qd2s.px-cloud.net\"], ['sec-ch-ua', '\"Chromium\";v=\"88\", \"Google Chrome\";v=\"88\", \";Not A Brand\";v=\"99\"'], ['sec-ch-ua-mobile', '?0'], [ 'user-agent', 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.104 Safari/537.36'], ['content-type', 'application/x-www-form-urlencoded'], [ 'accept', '*/*'], ['origin', 'https://www.walmart.com'], [ 'sec-fetch-site', 'cross-site'], [ 'sec-fetch-mode', 'cors'], ['sec-fetch-dest', 'empty'], ['referer', 'https://www.walmart.com/'], ['accept-language', 'en-US,en;q=0.9'], [\":path\", \"\"], [\":scheme\", \"\"], [\":authority\", \"\"], [\":method\", \"\"]]\n\ndef lol():\n try:\n s = client.Session(\"\", client.JA3Fingerprint(\"k\"), timeout=2)\n for _ in range(MULTIPLES):\n y = s.get(\"https://http2.golang.org/reqinfo\", headers=hh)\n #print(y.status)\n except Exception as e:\n print(\" exception:\", e)\n pass\nMULTIPLES = 5\nwith ThreadPoolExecutor(max_workers=200) as ex:\n NUM_TASKS = 1\n tasks = (ex.submit(lol) for _ in range(NUM_TASKS))\n start = time.time()\n for i, _ in enumerate(as_completed(tasks)):\n i = (i+1)*MULTIPLES\n sys.stderr.write(f\"\\r\\r\\r {i}\")\n print()\n print(NUM_TASKS*MULTIPLES, \"took\", time.time() - start, \"seconds\")\n\n# lol()","sub_path":"venetia-build/tls/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"293048590","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef coord_equal_unity(transData, axis='x', length=1):\n if axis == 'x':\n a_coords = (0, length)\n b_coords = np.array([1, 0])\n a_col = 1\n b_col = 0\n else:\n a_coords = (length, 0)\n b_coords = np.array([0, 1])\n a_col = 0\n b_col = 1\n\n # transform data coordinates of a segment with a given length in axis a to display coordinates in axis a\n display_unity = transData.transform([(0, 0), a_coords])\n # get the length of the segment in display units\n a_display_unity = display_unity[1, a_col] - display_unity[0, a_col]\n # create a transform which will take from display to data coordinates\n inv = transData.inverted()\n # transform from display coordinates to data coordinates in axis b\n coords_unity = inv.transform([(0, 0), a_display_unity * b_coords])\n # get the length of the segment in data units\n b_coords_unity = coords_unity[1, b_col] - coords_unity[0, b_col]\n return b_coords_unity\n\n\ndef convert_display_to_data_coordinates(transData, length=10):\n # create a transform which will take from display to data coordinates\n inv = transData.inverted()\n # transform from display coordinates to data coordinates in x axis\n data_coords = inv.transform([(0, 0), (length, 0)])\n # get the length of the segment in data units\n x_coord_len = data_coords[1, 0] - data_coords[0, 0]\n # transform from display coordinates to data coordinates in y axis\n data_coords = inv.transform([(0, 0), (0, length)])\n # get the length of the segment in data units\n y_coord_len = data_coords[1, 1] - data_coords[0, 1]\n return x_coord_len, y_coord_len\n\n\n# arbitrary values\nxmin = -120\nxmax = 400\nymin = -0.5\nymax = 2.5\n\nplt.figure(0, figsize=(8, 7), frameon=False)\nax = plt.subplot(2, 1, 1)\nplt.xlim(xmin, xmax)\nplt.ylim(ymin, ymax)\n\ny_coords_unity = 1\ndisplay_unity = ax.transData.transform([(0, 0), (0, y_coords_unity)])\ny_display_unity = display_unity[1, 1] - display_unity[0, 1]\nprint(display_unity)\n\ninv = ax.transData.inverted()\ncoords_unity = inv.transform([(0, 0), (y_display_unity, 0)])\nx_coords_unity = coords_unity[1, 0] - coords_unity[0, 0]\n\n# plot a square\nplt.plot([0, x_coords_unity], [0, 0], 'k')\nplt.plot([0, x_coords_unity], [y_coords_unity, y_coords_unity], 'k')\n\nplt.plot([0, 0], [0, y_coords_unity], 'k')\nplt.plot([x_coords_unity, x_coords_unity], [0, y_coords_unity], 'k')\n\nplt.show()\n\n####################\n# test function 1 y\n\nplt.figure(1, figsize=(8, 7), frameon=False)\n\n\nax = plt.subplot(2, 1, 1)\nplt.xlim(xmin, xmax)\nplt.ylim(ymin, ymax)\n\ny_coords_unity = 3\nx_coords_unity = coord_equal_unity(ax.transData, axis='x', length=y_coords_unity)\n\n# plot a square\nplt.plot([0, x_coords_unity], [0, 0], 'k')\nplt.plot([0, x_coords_unity], [y_coords_unity, y_coords_unity], 'k')\n\nplt.plot([0, 0], [0, y_coords_unity], 'k')\nplt.plot([x_coords_unity, x_coords_unity], [0, y_coords_unity], 'k')\n\nplt.show()\n\n####################\n# test function 1 x\n\nplt.figure(2, figsize=(8, 7), frameon=False)\n\n\nax = plt.subplot(2, 1, 1)\nplt.xlim(xmin, xmax)\nplt.ylim(ymin, ymax)\n\nx_coords_unity = 10\ny_coords_unity = coord_equal_unity(ax.transData, axis='y', length=x_coords_unity)\n\n# plot a square\nplt.plot([0, x_coords_unity], [0, 0], 'k')\nplt.plot([0, x_coords_unity], [y_coords_unity, y_coords_unity], 'k')\n\nplt.plot([0, 0], [0, y_coords_unity], 'k')\nplt.plot([x_coords_unity, x_coords_unity], [0, y_coords_unity], 'k')\n\nplt.show()\n\n####################\n# test function 2\n\nplt.figure(2, figsize=(8, 7), frameon=False)\n\n\nax = plt.subplot(2, 1, 1)\nplt.xlim(xmin, xmax)\nplt.ylim(ymin, ymax)\n\ndisplay_length = 10\nx_coords, y_coords = convert_display_to_data_coordinates(ax.transData, length=display_length)\n\n# plot a square\nplt.plot([0, x_coords], [0, 0], 'k')\nplt.plot([0, x_coords], [y_coords, y_coords], 'k')\n\nplt.plot([0, 0], [0, y_coords], 'k')\nplt.plot([x_coords, x_coords], [0, y_coords], 'k')\n\nplt.show()\n\n\n\n","sub_path":"figuras/Pycharm_Matched_Filter_Report/display_coordinates_test.py","file_name":"display_coordinates_test.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"498456155","text":"##############################################################################\n#\n# Copyright (c) 2001, 2002 Zope Corporation and Contributors.\n# All Rights Reserved.\n#\n# This software is subject to the provisions of the Zope Public License,\n# Version 2.0 (ZPL). A copy of the ZPL should accompany this distribution.\n# THIS SOFTWARE IS PROVIDED \"AS IS\" AND ANY AND ALL EXPRESS OR IMPLIED\n# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS\n# FOR A PARTICULAR PURPOSE.\n#\n##############################################################################\n\"\"\"\n$Id$\n\"\"\"\n\nimport unittest\nimport logging\n\nfrom zope.component.tests.placelesssetup import PlacelessSetup\nfrom zope.component import getGlobalServices\nfrom zope.app.servicenames import EventPublication\n\nfrom zope.app.event import globalSubscribe, globalUnsubscribe, publish\nfrom zope.app.container.contained import ObjectAddedEvent\nfrom zope.app.event.globalservice import Logger, eventPublisher\nfrom zope.app.event.interfaces import IPublisher\n\nclass DopeyHandler(logging.Handler):\n\n def __init__(self):\n logging.Handler.__init__(self)\n self.results = []\n\n def emit(self, record):\n self.results.append(record)\n\nclass TestLogger1(PlacelessSetup,unittest.TestCase):\n\n eventlogger = Logger()\n\n def setUp(self):\n super(TestLogger1, self).setUp()\n getGlobalServices().defineService(EventPublication, IPublisher)\n getGlobalServices().provideService(EventPublication, eventPublisher)\n # futz a handler in for testing\n self.logger = logging.getLogger(\"Event.Logger\")\n self.oldlevel = self.logger.level\n self.oldprop = self.logger.propagate\n self.logger.propagate = False\n self.logger.setLevel(logging.DEBUG)\n self.handler = DopeyHandler()\n self.logger.addHandler(self.handler)\n # register a logger\n globalSubscribe(self.eventlogger)\n # send an event\n publish(None, ObjectAddedEvent(None, \"parent\", 'foo'))\n\n def tearDown(self):\n globalUnsubscribe(self.eventlogger)\n self.logger.removeHandler(self.handler)\n self.logger.setLevel(self.oldlevel)\n self.logger.propagate = self.oldprop\n PlacelessSetup.tearDown(self)\n\n def testLogger(self):\n # Test the logger logs appropriately\n results = self.handler.results\n self.assertEqual(len(results), 1)\n result = results[0]\n self.assertEqual(result.name, \"Event.Logger\")\n self.assertEqual(result.levelno, logging.INFO)\n self.assertEqual(\n result.getMessage(),\n \"zope.app.container.contained.ObjectAddedEvent: [\"\n \"('newName', 'foo'),\\n \"\n \"('newParent', 'parent'),\\n \"\n \"('object', None),\\n \"\n \"('oldName', None),\\n \"\n \"('oldParent', None)\"\n \"]\\n\"\n )\n self.assertEqual(result.exc_info, None)\n\nclass TestLogger2(TestLogger1):\n\n eventlogger = Logger(logging.CRITICAL)\n\n def testLogger(self):\n # Test the logger logs appropriately\n results = self.handler.results\n self.assertEqual(len(results), 1)\n result = results[0]\n self.assertEqual(result.name, \"Event.Logger\")\n self.assertEqual(result.levelno, logging.CRITICAL)\n self.assertEqual(\n result.getMessage(),\n \"zope.app.container.contained.ObjectAddedEvent: [\"\n \"('newName', 'foo'),\\n \"\n \"('newParent', 'parent'),\\n \"\n \"('object', None),\\n \"\n \"('oldName', None),\\n \"\n \"('oldParent', None)\"\n \"]\\n\"\n )\n self.assertEqual(result.exc_info, None)\n\ndef test_suite():\n return unittest.TestSuite([\n unittest.makeSuite(TestLogger1),\n unittest.makeSuite(TestLogger2),\n ])\n\nif __name__ == '__main__':\n unittest.main(defaultTest='test_suite')\n","sub_path":"Zope3/tags/ZopeX3-3.0.0a2/src/zope/app/event/tests/test_logger.py","file_name":"test_logger.py","file_ext":"py","file_size_in_byte":3976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638425025","text":"\"\"\"\r\n执行用时 :260 ms, 在所有 python3 提交中击败了59.91%的用户\r\n内存消耗 :12.9 MB, 在所有 python3 提交中击败了100.00%的用户\r\n\r\n设计链表的实现。可以选择单链表或者双链表。单链表中的节点应该具有两个\r\n属性:val和next。val是当前节点的值,next是指向下一个节点的指针/引用\r\n如果要使用双向链表,则还需要一个属性prev以指示链表中的上一个节点。\r\n\r\nget(index):获取链表中第index个节点。如果索引无效,则返回-1。\r\naddAtHead(val):在链表的第一个元素之前添加一个值为val的节点。插入后,\r\n新节点将成为链表的第一个节点。\r\naddAtTail(val):将值为val的节点追加到链表的最后一个元素。\r\naddAtIndex(index, val):在链表的第index个节点之前添加值为val的节点。\r\n如果index等于链表的长度,则该节点将附加到链表的末尾。如果Index大于链表\r\n长度,则不会插入节点。如果index小于0,则在头部插入节点。\r\ndeleteAtIndex(index):如果索引index有效,则删除链表中的第index个节点。\r\n\"\"\"\r\n\r\n\r\nclass Node(object):\r\n def __init__(self, x):\r\n self.val = x\r\n self.next = None\r\n\r\n\r\nclass MyLinkedList:\r\n def __init__(self):\r\n self.head = Node(0)\r\n\r\n def get(self, index):\r\n if index < 0:\r\n return -1\r\n node = self.head\r\n for _ in range(index + 1):\r\n if node.next is not None:\r\n node = node.next\r\n else:\r\n return -1\r\n return node.val\r\n\r\n def addAtHead(self, val):\r\n new = Node(val)\r\n new.next = self.head.next\r\n self.head.next = new\r\n\r\n def addAtTail(self, val):\r\n node = self.head\r\n while node.next is not None:\r\n node = node.next\r\n node.next = Node(val)\r\n\r\n def addAtIndex(self, index, val):\r\n node = self.head\r\n for _ in range(index):\r\n if node.next is not None:\r\n node = node.next\r\n else:\r\n return\r\n new = Node(val)\r\n new.next = node.next\r\n node.next = new\r\n\r\n def deleteAtIndex(self, index):\r\n if index < 0:\r\n return\r\n node = self.head\r\n for _ in range(index):\r\n if node.next is not None:\r\n node = node.next\r\n else:\r\n return\r\n if node.next is not None:\r\n node.next = node.next.next","sub_path":"链表/MyLinkedList.py","file_name":"MyLinkedList.py","file_ext":"py","file_size_in_byte":2496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"435822299","text":"#\n# @lc app=leetcode id=367 lang=python3\n#\n# [367] Valid Perfect Square\n#\n# https://leetcode.com/problems/valid-perfect-square/description/\n#\n# algorithms\n# Easy (41.49%)\n# Likes: 794\n# Dislikes: 166\n# Total Accepted: 209.5K\n# Total Submissions: 505.2K\n# Testcase Example: '16'\n#\n# Given a positive integer num, write a function which returns True if num is a\n# perfect square else False.\n# \n# Follow up: Do not use any built-in library function such as sqrt.\n# \n# \n# Example 1:\n# Input: num = 16\n# Output: true\n# Example 2:\n# Input: num = 14\n# Output: false\n# \n# \n# Constraints:\n# \n# \n# 1 <= num <= 2^31 - 1\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n def isPerfectSquare(self, num: int) -> bool:\n if num == 0:\n return False\n l = 1\n r = num // 2 + 1\n while l < r:\n m = l + (r-l)//2\n mult = m*m\n if mult == num:\n return True\n elif mult > num:\n r = m\n else:\n l = m + 1\n return l*l == num\n# @lc code=end\n","sub_path":"python3/367.valid-perfect-square.336704590.ac.py","file_name":"367.valid-perfect-square.336704590.ac.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"458960730","text":"#!/bin/env python\nfrom scraper import Scrape\nfrom js_generator import JS_Generator\nfrom bot import Bot\n\ndef main():\n xs = 0\n while(True):\n print(\"\\n\\nEnter what you want to do:\")\n print(\" 1. Generate javascript\")\n print(\" 2. Start bot\")\n print(\" 0. Exit\")\n choice = int(input(\" > \"))\n\n if(choice == 0):\n exit()\n\n ccna_number = 0\n while(ccna_number > 4 or ccna_number < 1):\n print(\"\\n\\nWhich CCNA level (1-4)??\")\n ccna_number = int(input(\"> \"))\n\n chapter_number = 0\n while(chapter_number > 10 or chapter_number < 1):\n print(\"\\n\\nWhat chapter (1-10)??\")\n chapter_number = int(input(\"> \"))\n \n \n if ccna_number == 1:\n url = \"https://itexamanswers.net/ccna-1-v5-1-v6-0-chapter-{0}-exam-answers-100-full.html\".format(chapter_number)\n elif ccna_number == 2:\n url = \"https://itexamanswers.net/ccna-2-v5-0-3-v6-0-chapter-{0}-exam-answers-100-full.html\".format(chapter_number)\n elif ccna_number == 3:\n url = \"https://itexamanswers.net/ccna-3-v5-0-3-v6-0-chapter-{0}-exam-answers-100-full.html\".format(chapter_number)\n elif ccna_number == 4:\n url = \"https://itexamanswers.net/ccna-4-chapter-{0}-exam-answers-v5-0-3-v6-0-full-100.html\".format(chapter_number)\n \n exam = Scrape(url)\n\n if(choice == 1):\n JS_Generator(exam)\n elif(choice == 2):\n Bot(exam)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"244783678","text":"import socket\nimport os\nimport threading\nfrom time import sleep\nfrom threading import Thread\nimport _thread\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\nprint('Socket Preparado...')\n\ndef main():\n host = '192.168.43.31'\n port = 8888\n s.bind((host, port))\n print('Enlaze listo...')\n print('Escuchando...')\n s.listen(1)\n\ndef handle_client(client_socket):\n while True:\n data = client_socket.recv(1024).decode('utf-8')\n if not data: break\n print('Client says: ' + data)\n print('Sending: ' + data)\n client_socket.send(data.encode('utf-8'))\n client_socket.close()\n\nif __name__ == '__main__':\n main()\n while True:\n client_socket, addr = s.accept()\n os.system('cls')\n print('Conexion desde: '+str(addr)) \n _thread.start_new_thread(handle_client ,(client_socket,))\n s.close()","sub_path":"tcp_server/tcp_server_mul.py","file_name":"tcp_server_mul.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"234838070","text":"# 一个函数可以接收另一个函数作为参数\nfrom functools import reduce\n\n\ndef sum(x, y, f):\n return x + y + f(x, y)\n\n\ndef is_odd(x):\n return x % 2 == 1\n\n\ndef odd_iter():\n n = 1\n while True:\n n = n + 2\n yield n\n\n\ndef not_division(n):\n return lambda x: x % n > 0\n\n\ndef primes():\n yield 2\n it = odd_iter() # 初始序列\n while True:\n n = next(it) # 返回序列的第一个数\n yield n\n it = filter(not_division(n), it) # 构造新序列\n\n\nif __name__ == '__main__':\n # 函数名也是变量\n a = abs\n print(a(-1)) # 1\n print(sum(1, 2, max)) # 5\n\n # map/reduce\n # map()函数接收两个参数,一个是函数,一个是Iterable,map将传入的函数依次作用到序列的每个元素,并把结果作为新的Iterator返回。\n # 由于结果r是一个Iterator,Iterator是惰性序列,因此通过list()函数让它把整个序列都计算出来并返回一个list。\n print(list(map(str, [1, 2, 3, 4, 5, 6]))) # ['1', '2', '3', '4', '5', '6']\n # reduce把一个函数作用在一个序列[x1, x2, x3, …]上,这个函数必须接收两个参数,reduce把结果继续和序列的下一个元素做累积计算\n print(reduce(max, [5, 6, 7, 3, 8, 2])) # 8\n # filter()函数用于过滤序列\n print(list(filter(is_odd, list(range(1, 11))))) # [1, 3, 5, 7, 9]\n\n # 求素数\n for n in primes():\n if n < 1000:\n print(n)\n else:\n break\n\n # sorted\n print(sorted([3, 4, 5, 6, 7, 1])) # [1, 3, 4, 5, 6, 7]\n # 还可以传入一个key函数来自定义排序\n print(sorted([3, 5, -7, -1, 2], key=abs)) # [-1, 2, 3, 5, -7]\n print(sorted(['E', 'b', 'A', 'c'], key=str.lower)) # ['A', 'b', 'c', 'E']\n print(sorted(['E', 'b', 'A', 'c'], key=str.lower, reverse=True)) # ['E', 'c', 'b', 'A']\n","sub_path":"cjh/04finctional_programming/01high_order_function.py","file_name":"01high_order_function.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"39782613","text":"import os\n\ndef get_files():\n files_path=[]\n for i in os.listdir(os.getcwd()):\n if os.path.splitext(i)[1]=='.ipynb':\n files_path.append(i)\n return files_path\n\ndef count(files):\n line_of_code,blank,comments=0,0,0\n for filename in files:\n f=open(filename,'rb')\n for line in f:\n line=line.strip()\n line_of_code+=1\n if line=='':\n blank+=1\n elif line[0]=='#'or line[0]=='/':\n comments+=1\n f.close()\n return (line_of_code,blank,comments)\n\nif __name__=='__main__':\n files=get_files()\n print(files)\n lines=count(files)\n print('Line(s):%d,black line(s):%d,comments line(s):%d'%(lines[0],lines[1],lines[2]))\n","sub_path":"mission-master/recordNUM.py","file_name":"recordNUM.py","file_ext":"py","file_size_in_byte":740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"510869820","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Jul 11 15:43:43 2020\n\n@author: YYH19\n\"\"\"\n\nimport os,pdb\nimport pandas as pd\nfrom collections import defaultdict\nimport tensorflow as tf\nimport numpy as np\nimport random, math\nfrom time import time\nimport shutil\nimport multiprocessing as mp\nfrom numpy.random import seed\nimport utils\nseed(2020)\nfrom tensorflow import set_random_seed\nset_random_seed(2021)\n\n\n### parameters ###\ntest_flag = 1\nversion = 1\nrunid = 241\nlayer = 4\ndevice_id = 0\nlearning_rate = 0.0005\nepochs = 200\nstart_epoch = 100\ndimension = 32\nbatch_size = 1024 * 8 * 5\nlamda = 0.0005\ngama = 0.1\nalpha = 0.1\nuser_count = 55187\nitem_count = 9916 \nadd_count = 10000\npre_flag = 0\n\n\nbest_ckpt = '../../saved_models/pinterest_model/epoch_184_ndcg_0.06655656807767725.ckpt'\n\nif test_flag == 1:\n topk_u, topk_v = 3, 50\nelse:\n topk_u, topk_v = 3, 15\n\nbase_path = '../../saved_model/pinterest_model/runid_'+str(runid)+'/'\nif not os.path.exists(base_path):\n os.makedirs(base_path)\nif test_flag == 0:\n train_txt = open(base_path+'loss.txt','a')\n shutil.copyfile('egln.py', base_path+'egln.py')\nevaluate_txt = open(base_path+'evaluate.txt','a')\nmodel_save_path = base_path + 'models/'\nif not os.path.exists(model_save_path):\n os.makedirs(model_save_path)\n\n\n### read data ###\nt1 = time()\ndata_path = '../../datasets/pinterest_data/'\nuser_items = np.load(data_path+'user_items.npy', allow_pickle=True).tolist()\ntraindata = np.load(data_path+'traindata.npy', allow_pickle=True).tolist()\nvaldata = np.load(data_path+'valdata.npy', allow_pickle=True).tolist()\ntestdata = np.load(data_path+'testdata.npy', allow_pickle=True).tolist()\nuserset = set(range(user_count))\nitemset = set(range(item_count))\nt2 = time()\nprint('load data cost time:',round(t2-t1, 4))\nprint('layer:',layer)\n\n\ndef get_train_adj_matrix(train_rating):\n '''\n get adjacent matrix of traindata#\n '''\n item_user_train = defaultdict(set)\n for key in train_rating.keys():\n for i in train_rating[key]:\n item_user_train[i].add(key)\n user_item_indexs, user_item_values = [], []\n item_user_indexs, item_user_values = [], []\n\n for x in train_rating.keys():\n for y in train_rating[x]:\n user_item_indexs.append([x, y])\n user_item_values.append(1.0)\n for x in item_user_train.keys():\n for y in item_user_train[x]:\n item_user_indexs.append([x, y])\n item_user_values.append(1.0)\n user_item_sparse_matrix = tf.SparseTensor(indices=user_item_indexs, values=user_item_values, dense_shape=[user_count, item_count])\n item_user_sparse_matrix = tf.SparseTensor(indices=item_user_indexs, values=item_user_values, dense_shape=[item_count, user_count])\n user_item_dense_matrix = tf.sparse_tensor_to_dense(user_item_sparse_matrix, default_value=0, validate_indices=False, name=None)\n user_item_indexs = np.reshape(np.array(user_item_indexs), [-1,2])\n all_user_list = np.reshape(user_item_indexs[:,0], [-1,1])\n all_item_list = np.reshape(user_item_indexs[:,1], [-1,1])\n return user_item_sparse_matrix, item_user_sparse_matrix, user_item_dense_matrix, all_user_list, all_item_list\nuser_item_adj_matrix, item_user_adj_matrix, adj_matrix_dense, all_user_list, all_item_list = get_train_adj_matrix(traindata)\n\n \ndef get_simi_matrix_old(user_matrix, item_matrix, w1, w2, adj_matrix, topk1, topk2, gama):\n\n user_rep = tf.matmul(user_matrix, w1)\n item_rep = tf.matmul(item_matrix, w2)\n user_emb1 = tf.nn.l2_normalize(user_rep, axis=1)\n item_emb1 = tf.nn.l2_normalize(item_rep, axis=1)\n sim_matrix = tf.nn.sigmoid(tf.matmul(user_emb1, tf.transpose(item_emb1))) #[m,n] \n\n loss_simi_adj = gama*tf.reduce_mean(tf.square(sim_matrix-adj_matrix)) \n user_topk = tf.nn.top_k(sim_matrix, topk1)\n user_topk_values = tf.reshape(user_topk.values,[-1])\n user_topk_columns = tf.cast(tf.reshape(user_topk.indices, [-1,1]), dtype=tf.int64)\n user_all_rows = np.reshape(np.arange(user_count), [-1,1])\n user_topk_rows = tf.reshape(tf.tile(user_all_rows,multiples= [1,topk1]), [-1,1])\n user_topk_indexs = tf.concat([user_topk_rows, user_topk_columns], 1)\n user_item_sparse_simi = tf.SparseTensor(indices=user_topk_indexs, values=user_topk_values, dense_shape=[user_count, item_count]) \n\n item_topk = tf.nn.top_k(tf.transpose(sim_matrix), topk2)\n item_topk_values = tf.reshape(item_topk.values, [-1])\n item_topk_columns = tf.cast(tf.reshape(item_topk.indices, [-1,1]), dtype=tf.int64)\n item_all_rows = np.reshape(np.arange(item_count), [-1,1])\n item_topk_rows = tf.reshape(tf.tile(item_all_rows,multiples= [1,topk2]), [-1,1])\n item_topk_indexs = tf.concat([item_topk_rows, item_topk_columns], 1)\n item_user_sparse_simi = tf.SparseTensor(indices=item_topk_indexs, values=item_topk_values, dense_shape=[item_count, user_count]) \n return user_item_sparse_simi, item_user_sparse_simi, loss_simi_adj, user_topk_indexs\n\n\n########################################### test part ############################################\ndef _init(_test_ratings, _all_ratings, _topk_list, _predictions):\n global test_ratings, all_ratings, topk_list, predictions\n test_ratings = _test_ratings\n all_ratings = _all_ratings\n topk_list = _topk_list\n predictions = _predictions\n\n\ndef get_one_performance(_uid):\n u = _uid\n metrics = {}\n pos_index = list(test_ratings[u])\n pos_length = len(test_ratings[u])\n neg_index = list(itemset-set(all_ratings[u]))\n pos_index.extend(neg_index) \n pre_one = predictions[u][pos_index] \n indices = utils.largest_indices(pre_one, topk_list[-1])\n indices=list(indices[0])\n for topk in topk_list:\n hit_value = 0\n dcg_value = 0 \n for idx in range(topk):\n ranking = indices[idx]\n if ranking < pos_length:\n hit_value += 1\n dcg_value += math.log(2) / math.log(idx+2) \n target_length = min(topk,pos_length)\n hr_cur = hit_value/target_length\n ndcg_cur = dcg_value/utils.get_idcg(target_length)\n metrics[topk] = {'hr': hr_cur, 'ndcg':ndcg_cur}\n return metrics\n\n\ndef evaluate(_testdata, _user_items, _topk_list):\n hr_topk_list = defaultdict(list)\n ndcg_topk_list = defaultdict(list)\n hr_out, ndcg_out = {}, {}\n user_matrix, item_matrix = sess.run([final_user_emb, final_item_emb])\n _predictions = np.matmul(user_matrix, item_matrix.T)\n test_users = _testdata.keys()\n with mp.Pool(processes=10, initializer=_init, initargs=(_testdata, _user_items, _topk_list, _predictions)) as pool:\n all_metrics = pool.map(get_one_performance, test_users)\n for i, one_metrics in enumerate(all_metrics):\n for topk in _topk_list:\n hr_topk_list[topk].append(one_metrics[topk]['hr'])\n ndcg_topk_list[topk].append(one_metrics[topk]['ndcg'])\n for topk in _topk_list:\n hr_out[topk] = np.mean(hr_topk_list[topk])\n ndcg_out[topk] = np.mean(ndcg_topk_list[topk])\n return hr_out, ndcg_out\n\n\ndef user_group_test(test_ratings, all_ratings, topk=10):\n '''\n# 用来比较不同spasity下user_group的测试结果\n '''\n user_matrix, item_matrix = sess.run([final_user_emb, final_item_emb])\n all_hr_list = defaultdict(list)\n all_ndcg_list = defaultdict(list)\n hr_out = {}\n ndcg_out = {}\n user_group = np.load(data_path+'user_group.npy', allow_pickle=True).tolist()\n ratings = user_matrix.dot(item_matrix.T)\n for group_idx in range(len(user_group)):\n group_data = user_group[group_idx]\n for u in group_data:\n if u in test_ratings.keys():\n pos_index = list(test_ratings[u])\n pos_length = len(test_ratings[u])\n neg_index = list(itemset-set(all_ratings[u]))\n pos_index.extend(neg_index) \n pre_one=ratings[u][pos_index] \n indices=utils.largest_indices(pre_one, topk)\n indices=list(indices[0]) \n hit_value = 0\n dcg_value = 0 \n for idx in range(topk):\n ranking = indices[idx]\n if ranking < pos_length:\n hit_value += 1\n dcg_value += math.log(2) / math.log(idx+2) \n target_length = min(topk, pos_length) \n all_hr_list[group_idx].append(hit_value/target_length)\n idcg_value = utils.get_idcg(target_length)\n all_ndcg_list[group_idx].append(dcg_value/idcg_value) \n for group_idx in range(len(user_group)):\n hr_out[group_idx] = round(sum(all_hr_list[group_idx])/len(all_hr_list[group_idx]), 5)\n ndcg_out[group_idx] = round(sum(all_ndcg_list[group_idx])/len(all_hr_list[group_idx]), 5)\n print('group_idx', group_idx, 'hr:', hr_out[group_idx], 'ndcg:',ndcg_out[group_idx])\n return hr_out, ndcg_out\n\n\n########################################### construct model ###########################################\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(device_id)\nuser_emb = tf.Variable(tf.random.normal([user_count, dimension], stddev=0.01, dtype=tf.float32), name='user_emb')\nitem_emb = tf.Variable(tf.random.normal([item_count, dimension], stddev=0.01, dtype=tf.float32), name='item_emb')\nsim_w1 = tf.Variable(tf.random.normal([dimension, 16], dtype=tf.float32), name='sim_w1')\nsim_w2 = tf.Variable(tf.random.normal([dimension, 16], dtype=tf.float32), name='sim_w2')\nbilinear_w = tf.Variable(tf.random_normal([64, 64], stddev=0.01), name='bilinear_w')\nbilinear_b = tf.Variable(tf.zeros([1]), name='bilinear_b') \nval_dict = {'user_emb':user_emb, 'item_emb':item_emb, \n 'sim_w1':sim_w1, 'sim_w2':sim_w2,\n 'bilinear_w':bilinear_w, 'bilinear_b':bilinear_b}\n\n\n################################### get adaptive adjacent matrix #######################################\nuser_item_simi_matrix, item_user_simi_matrix, loss_s, add_edges = \\\nget_simi_matrix_old(user_emb, item_emb, sim_w1, sim_w2, adj_matrix_dense, topk_u, topk_v, gama)\nadd_sparse_user_matrix = tf.sparse_add(user_item_adj_matrix, user_item_simi_matrix)\nadd_sparse_item_matrix = tf.sparse_add(item_user_adj_matrix, item_user_simi_matrix)\nuser_item_final_matrix = utils.nor_sparse_matrix(add_sparse_user_matrix)\nitem_user_final_matrix = utils.nor_sparse_matrix(add_sparse_item_matrix)\n\n\n################################### gcn model input feature matrix ######################################\ndef model_gcn_with_feature(_user_emb, _item_emb, _layer):\n all_user_emb, all_item_emb = [_user_emb], [_item_emb]\n for _ in range(_layer):\n tmp_user_emb = tf.sparse_tensor_dense_matmul(user_item_final_matrix, all_item_emb[-1]) + all_user_emb[-1]\n tmp_item_emb = tf.sparse_tensor_dense_matmul(item_user_final_matrix, all_user_emb[-1]) + all_item_emb[-1]\n all_user_emb.append(tmp_user_emb)\n all_item_emb.append(tmp_item_emb)\n return all_user_emb[-1], all_item_emb[-1]\nfinal_user_emb,final_item_emb = model_gcn_with_feature(user_emb, item_emb, layer)\n_shuffle_user_emb, _shuffle_item_emb = utils.shuffle_embedding(user_emb, item_emb, dimension, 16)\nshuffle_user_emb, shuffle_item_emb = model_gcn_with_feature(_shuffle_user_emb, _shuffle_item_emb, layer)\n\n\n################################## gcn model input adjacent matrix ######################################\ndef model_gcn_with_structure(_user_item_matrix, _item_user_matrix, _layer):\n all_user_emb, all_item_emb = [user_emb], [item_emb]\n for _ in range(_layer):\n tmp_user_emb = tf.sparse_tensor_dense_matmul(_user_item_matrix, all_item_emb[-1]) + all_user_emb[-1]\n tmp_item_emb = tf.sparse_tensor_dense_matmul(_item_user_matrix, all_user_emb[-1]) + all_item_emb[-1]\n all_user_emb.append(tmp_user_emb)\n all_item_emb.append(tmp_item_emb)\n return all_user_emb[-1], all_item_emb[-1]\nuser_item_cor_matrix, item_user_cor_matrix = utils.graph_random(user_count, item_count, add_count)\nadd_uv_cor_matrix = tf.sparse_add(user_item_cor_matrix, add_sparse_user_matrix)\nadd_vu_cor_matrix = tf.sparse_add(item_user_cor_matrix, add_sparse_item_matrix)\nnor_uv_cor_matrix = utils.nor_sparse_matrix(add_uv_cor_matrix)\nnor_vu_cor_matrix = utils.nor_sparse_matrix(add_vu_cor_matrix)\ncur_user_emb, cur_item_emb = model_gcn_with_structure(nor_uv_cor_matrix, nor_vu_cor_matrix, layer)\n\n\n########################################### rating loss #################################################\nu_input = tf.placeholder(\"int32\", [None, 1])\ni_input = tf.placeholder(\"int32\", [None, 1])\nj_input = tf.placeholder(\"int32\", [None, 1])\nua = tf.gather_nd(final_user_emb, u_input)\nvi = tf.gather_nd(final_item_emb, i_input)\nvj = tf.gather_nd(final_item_emb, j_input)\nRai = tf.reduce_sum(tf.multiply(ua, vi), 1, keepdims=True)\nRaj = tf.reduce_sum(tf.multiply(ua, vj), 1, keepdims=True)\nauc = tf.reduce_mean(tf.to_float((Rai-Raj)>0))\nbprloss = -tf.reduce_mean(tf.log(tf.clip_by_value(tf.nn.sigmoid(Rai-Raj),1e-9,1.0)))\nregulation = lamda * tf.reduce_mean(tf.square(ua)+tf.square(vi)+tf.square(vj)) \nloss_r = bprloss + regulation\n\n\n####################################### discriminator loss ##############################################\ndef make_discriminator_bilinear(_lo_emb, _gl_emb):\n '''\n input: _lo_emb[None,64], _gl_emb[None,64]\n output: label[None,1]\n '''\n emb_d1 = tf.matmul(_lo_emb, bilinear_w)\n emb_d2 = tf.multiply(emb_d1, _gl_emb)\n emb_d3 = tf.reduce_sum(emb_d2, 1, keepdims=True) + bilinear_b\n return emb_d3\n\n\ndef local_global_v1():\n '''\n pos:, neg:\n ''' \n pos_local_emb = tf.concat([tf.sigmoid(ua), tf.sigmoid(vi)], 1)\n neg_local_emb = tf.concat([tf.sigmoid(ua), tf.sigmoid(vj)], 1) \n# avg_global_emb = tf.reduce_mean(pos_local_emb, 0, keepdims=True)\n \n add_user_list, add_item_list = tf.split(add_edges, 2, axis=1)\n all_user_emb_1 = tf.gather_nd(final_user_emb, all_user_list) #[,32]\n all_item_emb_1 = tf.gather_nd(final_item_emb, all_item_list) #[,32]\n all_user_emb_2 = tf.gather_nd(final_user_emb, add_user_list)\n all_item_emb_2 = tf.gather_nd(final_item_emb, add_item_list)\n all_user_emb = tf.concat([all_user_emb_1, all_user_emb_2], 0)\n all_item_emb = tf.concat([all_item_emb_1, all_item_emb_2], 0)\n all_edge_emb = tf.concat([tf.sigmoid(all_user_emb), tf.sigmoid(all_item_emb)], 1) #[,64]\n avg_global_emb = tf.reduce_mean(all_edge_emb, 0, keepdims=True)\n \n get_shape = tf.reduce_sum(ua, 1, keepdims=True)\n global_emb = tf.tile(avg_global_emb, [batch_size, 1])\n one_label = tf.ones_like(get_shape, dtype=tf.float32)\n zero_label = tf.zeros_like(get_shape, dtype=tf.float32)\n real_predict = make_discriminator_bilinear(pos_local_emb, global_emb)\n fake_predict = make_discriminator_bilinear(neg_local_emb, global_emb)\n d_loss_all = tf.nn.sigmoid_cross_entropy_with_logits(logits=real_predict, labels=one_label) + \\\n tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_predict, labels=zero_label) \n loss_d = alpha * tf.reduce_mean(d_loss_all)\n return loss_d\n\n\ndef local_global_v2():\n '''\n G'=(A,F')\n '''\n pos_local_emb = tf.concat([tf.sigmoid(ua), tf.sigmoid(vi)], 1)\n _ua = tf.gather_nd(shuffle_user_emb, u_input)\n _vi = tf.gather_nd(shuffle_item_emb, i_input)\n neg_local_emb = tf.concat([tf.sigmoid(_ua), tf.sigmoid(_vi)], 1)\n avg_global_emb = tf.reduce_mean(pos_local_emb, 0, keepdims=True) \n \n get_shape = tf.reduce_sum(ua, 1, keepdims=True)\n global_emb = tf.tile(avg_global_emb, [batch_size, 1])\n one_label = tf.ones_like(get_shape, dtype=tf.float32)\n zero_label = tf.zeros_like(get_shape, dtype=tf.float32)\n real_predict = make_discriminator_bilinear(pos_local_emb, global_emb)\n fake_predict = make_discriminator_bilinear(neg_local_emb, global_emb)\n d_loss_all = tf.nn.sigmoid_cross_entropy_with_logits(logits=real_predict, labels=one_label) + \\\n tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_predict, labels=zero_label) \n loss_d = alpha * tf.reduce_mean(d_loss_all)\n return loss_d\n\n\ndef local_global_v3():\n '''\n G'=(A',F)\n '''\n pos_local_emb = tf.concat([tf.sigmoid(ua), tf.sigmoid(vi)], 1)\n _ua = tf.gather_nd(cur_user_emb, u_input)\n _vi = tf.gather_nd(cur_item_emb, i_input)\n neg_local_emb = tf.concat([tf.sigmoid(_ua), tf.sigmoid(_vi)], 1)\n avg_global_emb = tf.reduce_mean(pos_local_emb, 0, keepdims=True)\n \n get_shape = tf.reduce_sum(ua, 1, keepdims=True)\n global_emb = tf.tile(avg_global_emb, [batch_size, 1])\n one_label = tf.ones_like(get_shape, dtype=tf.float32)\n zero_label = tf.zeros_like(get_shape, dtype=tf.float32)\n real_predict = make_discriminator_bilinear(pos_local_emb, global_emb)\n fake_predict = make_discriminator_bilinear(neg_local_emb, global_emb)\n d_loss_all = tf.nn.sigmoid_cross_entropy_with_logits(logits=real_predict, labels=one_label) + \\\n tf.nn.sigmoid_cross_entropy_with_logits(logits=fake_predict, labels=zero_label) \n loss_d = alpha * tf.reduce_mean(d_loss_all)\n return loss_d\n\nif version == 1:\n loss_d = local_global_v1()\nif version == 2:\n loss_d = local_global_v2()\nif version == 3:\n loss_d = local_global_v3()\nloss = loss_r + loss_s +loss_d\nopt = tf.compat.v1.train.AdamOptimizer(learning_rate).minimize(loss)\n\n\n\n# start tensorflow session\ninit = tf.global_variables_initializer()\nsaver = tf.train.Saver(val_dict, max_to_keep=5)\nconfig = tf.ConfigProto()\nconfig.gpu_options.allow_growth = True\nsess = tf.compat.v1.Session(config=config)\nsess.run(init)\nif pre_flag == 1:\n saver.restore(sess, cur_ckpt)\n\n\n########################################### start train ################################################\nif test_flag == 0:\n print('start training...')\n valndcg = {}\n max_ndcg = 0\n for epoch in range(epochs):\n # train part\n t1 = time()\n t_train = utils.get_bpr_data(user_items, traindata, item_count, 1)\n indexs = np.arange(t_train.shape[0])\n np.random.shuffle(indexs)\n sum_auc,sum_loss1,sum_loss2,sum_loss3,sum_train = 0, 0, 0, 0, 0 \n for k in range(int(t_train.shape[0]/batch_size)+1):\n start_index = k*batch_size\n end_index = min(t_train.shape[0], (k+1)*batch_size)\n if end_index == t_train.shape[0]:\n start_index = end_index - batch_size\n triple_data = t_train[indexs[start_index:end_index]]\n u_list, i_list, j_list = triple_data[:,0], triple_data[:,1], triple_data[:,2]\n _auc,_loss1,_loss2,_loss3,_ = sess.run([auc,loss_s,loss_r,loss_d,opt], \\\n feed_dict={u_input:np.reshape(u_list,[-1,1]),\n i_input:np.reshape(i_list,[-1,1]), \n j_input:np.reshape(j_list,[-1,1])})\n sum_auc += _auc * len(u_list)\n sum_loss1 += _loss1 * len(u_list)\n sum_loss2 += _loss2 * len(u_list)\n sum_loss3 += _loss3 * len(u_list)\n sum_train += len(u_list) \n mean_auc = sum_auc/sum_train\n mean_loss1 = sum_loss1/sum_train\n mean_loss2 = sum_loss2/sum_train\n mean_loss3 = sum_loss3/sum_train\n mean_loss = mean_loss1+mean_loss2+mean_loss3\n print('epoch:{:d}, trainauc:{:.4f}, loss_s:{:.4f}, loss_r:{:.4f}, loss_d:{:.4f}, trainloss:{:.4f}'\n .format(epoch, mean_auc, mean_loss1, mean_loss2, mean_loss3, mean_loss))\n train_txt.write('epoch:{:d}, trainauc:{:.4f}, loss_s:{:.4f}, loss_r:{:.4f}, loss_d:{:.4f}, trainloss:{:.4f}'\n .format(epoch, mean_auc, mean_loss1, mean_loss2, mean_loss3, mean_loss)+ '\\n')\n t2 = time()\n \n #val part\n t_val = utils.get_bpr_data(user_items, valdata, item_count, 1)\n indexs = np.arange(t_val.shape[0])\n np.random.shuffle(indexs)\n sum_auc, sum_loss1, sum_loss2, sum_loss3, sum_train = 0, 0, 0, 0, 0 \n for k in range(int(t_val.shape[0]/batch_size)+1):\n start_index = k*batch_size\n end_index = min(t_val.shape[0], (k+1)*batch_size)\n if end_index == t_val.shape[0]:\n start_index = end_index - batch_size\n triple_data = t_val[indexs[start_index:end_index]]\n u_list, i_list, j_list = triple_data[:,0], triple_data[:,1], triple_data[:,2]\n _auc,_loss1,_loss2,_loss3 = sess.run([auc,loss_s,loss_r,loss_d], \\\n feed_dict={u_input:np.reshape(u_list,[-1,1]),\n i_input:np.reshape(i_list,[-1,1]), \n j_input:np.reshape(j_list,[-1,1])})\n sum_auc += _auc * len(u_list)\n sum_loss1 += _loss1 * len(u_list)\n sum_loss2 += _loss2 * len(u_list)\n sum_loss3 += _loss3 * len(u_list)\n sum_train += len(u_list) \n mean_auc = sum_auc/sum_train\n mean_loss1 = sum_loss1/sum_train\n mean_loss2 = sum_loss2/sum_train\n mean_loss3 = sum_loss3/sum_train\n mean_loss = mean_loss1+mean_loss2+mean_loss3\n print('epoch:{:d}, validauc:{:.4f}, loss_s:{:.4f}, loss_r:{:.4f}, loss_d:{:.4f}, validloss:{:.4f}'\n .format(epoch, mean_auc, mean_loss1, mean_loss2, mean_loss3, mean_loss))\n train_txt.write('epoch:{:d}, validauc:{:.4f}, loss_s:{:.4f}, loss_r:{:.4f}, loss_d:{:.4f}, validloss:{:.4f}'\n .format(epoch, mean_auc, mean_loss1, mean_loss2, mean_loss3, mean_loss)+ '\\n')\n\n if epoch >= start_epoch:\n _hr, _ndcg = evaluate(testdata, user_items, [5,10,15,20,25,30,35,40,45,50])\n valndcg[epoch] = _ndcg[10]\n max_ndcg = max(max_ndcg, _ndcg[10])\n if _ndcg[10] == max_ndcg:\n saver.save(sess, model_save_path+'epoch_'+str(epoch)+'_ndcg_'+str(_ndcg[10])+'.ckpt')\n best_ckpt = model_save_path+'epoch_'+str(epoch)+'_ndcg_'+str(_ndcg[10])+'.ckpt'\n t3 = time() \n print('hr@10:{:.5f}, ndcg@10:{:.5f}, train time:{:.4f}, test time:{:.4f}'.format(_hr[10], _ndcg[10], t2-t1, t3-t2), '\\n')\n train_txt.write('hr@10:{:.5f}, ndcg@10:{:.5f}, train time:{:.4f}, test time:{:.4f}'.format(_hr[10], _ndcg[10], t2-t1, t3-t2)+'\\n\\n')\n print('*****train over*****')\n train_txt.close()\n\n print('best ckpt is:', best_ckpt)\n saver.restore(sess, best_ckpt)\n _hr, _ndcg = evaluate(testdata, user_items, [5,10,15,20,25,30,35,40,45,50])\n for key in _hr.keys():\n print('topk:{:d}, hr{:.5f}, ndcg:{:.5f}'.format(key, _hr[key], _ndcg[key]))\n evaluate_txt.write('topk:{:d}, hr{:.5f}, ndcg:{:.5f}'.format(key, _hr[key], _ndcg[key]) + '\\n')\n evaluate_txt.close()\n\n\nif test_flag == 1:\n print('best ckpt is:', best_ckpt)\n saver.restore(sess, best_ckpt)\n _hr, _ndcg = evaluate(testdata, user_items, [5,10,15,20,25,30,35,40,45,50])\n for key in _hr.keys():\n print('topk:{:d}, hr{:.5f}, ndcg:{:.5f}'.format(key, _hr[key], _ndcg[key]))\n evaluate_txt.write('topk:{:d}, hr{:.5f}, ndcg:{:.5f}'.format(key, _hr[key], _ndcg[key]) + '\\n')\n evaluate_txt.write('\\n')\n _hr, _ndcg = user_group_test(testdata, user_items) \n for key in _hr.keys():\n print('topk:{:d}, hr{:.5f}, ndcg:{:.5f}'.format(key, _hr[key], _ndcg[key]))\n evaluate_txt.write('topk:{:d}, hr{:.5f}, ndcg:{:.5f}'.format(key, _hr[key], _ndcg[key]) + '\\n') \n evaluate_txt.write('\\n')\n evaluate_txt.close()\n","sub_path":"code/pinterest_code/egln.py","file_name":"egln.py","file_ext":"py","file_size_in_byte":23500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"175432500","text":"#!/usr/bin/env python3\n\"\"\"\n주가 [날짜, 종가, 시가, 고가, 저가, 거래량] --> to.xlsx\nOutput filename: ./input/[stock_name].xlsx\n\"\"\"\nimport pandas as pd\nimport pandas_datareader.data as web\n\nfrom datetime import datetime\n\nimport sys\nimport io\n\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\n\ndef stock_list():\n df = pd.read_excel('./input/상장법인목록.xlsx', dtype='str')\n\n return df\n\ndef get_url(company, stock_list):\n code = stock_list.query(\"company=='{}'\".format(company))['code'].to_string(index = False)\n url = 'http://finance.naver.com/item/sise_day.nhn?code={code}'.format(code = code)\n print(\"요청 URL = {}\".format(url))\n\n return url\n\ndef get_stock_price(stock_name, stock_list):\n url = get_url(stock_name, stock_list)\n df = pd.DataFrame()\n\n for page in range(1, 100): #range ==> 날짜별 주가 for-loop\n pg_url = '{url}&page={page}'.format(url=url, page=page)\n df = df.append(pd.read_html(pg_url, header=0)[0], ignore_index=True)\n\n df = df[['날짜', '종가', '시가', '고가', '저가', '거래량']]\n df = df.rename(columns={'날짜':'date', '종가':'close', '시가':'open',\n '고가':'high', '저가':'low', '거래량':'volume'})\n stock_df = df.dropna()\n\n return stock_df\n\ndef main():\n df = stock_list()\n company_list = df['company'][82]\n\n sp = get_stock_price(company_list, df)\n print(sp)\n\n file_name = './input/{}.xlsx'.format(company_list)\n sp = sp.astype('str')\n sp.to_excel(file_name, index=False)\n print('성공적으로 생성되었습니다. \\nfile_name : {}'.format(file_name))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"get_stock_price.py","file_name":"get_stock_price.py","file_ext":"py","file_size_in_byte":1766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"24539371","text":"\"\"\"Zurich Instruments LabOne Python API Example\n\nDemonstrate how to obtain demodulator data using ziDAQServer's blocking\n(synchronous) poll() command.\n\nThis example demonstrates that whilst poll() does indeed block for the specified\nrecording duration, it will not only return the data during the recording\nduration, but also data accumulated since subscribing (before poll was\ncalled). In other words, subscribed data is buffered by the data server and API\n(for a limited time) and this buffered data will also be returned by poll().\n\n\"\"\"\n\n# Copyright 2016 Zurich Instruments AG\n\nfrom __future__ import print_function\nimport time\nimport numpy as np\nimport zhinst.utils\n\n\ndef run_example(device_id, amplitude=0.5, do_plot=False):\n \"\"\"Run the example: Connect to the device specified by device_id and obtain\n demodulator data using ziDAQServer's blocking (synchronous) poll() command.\n\n Requirements:\n\n Hardware configuration: Connect signal output 1 to signal input 1 with a\n BNC cable.\n\n Arguments:\n\n device_id (str): The ID of the device to run the example with. For\n example, `dev2006` or `uhf-dev2006`.\n\n amplitude (float, optional): The amplitude to set on the signal output.\n\n do_plot (bool, optional): Specify whether to plot the polled data. Default\n is no plot output.\n\n Returns:\n\n sample (dict of numpy arrays): The demodulator sample dictionary with the\n additional demod R and demod phi fields calculated in the example.\n\n Raises:\n\n RuntimeError: If the device is not \"discoverable\" from the API.\n\n See the \"LabOne Programing Manual\" for further help, available:\n - On Windows via the Start-Menu:\n Programs -> Zurich Instruments -> Documentation\n - On Linux in the LabOne .tar.gz archive in the \"Documentation\"\n sub-folder.\n\n \"\"\"\n\n apilevel_example = 6 # The API level supported by this example.\n # Call a zhinst utility function that returns:\n # - an API session `daq` in order to communicate with devices via the data server.\n # - the device ID string that specifies the device branch in the server's node hierarchy.\n # - the device's discovery properties.\n err_msg = \"This example only supports instruments with demodulators.\"\n (daq, device, props) = zhinst.utils.create_api_session(device_id, apilevel_example,\n required_devtype='.*LI|.*IA|.*IS',\n required_err_msg=err_msg)\n zhinst.utils.api_server_version_check(daq)\n\n # Create a base instrument configuration: disable all outputs, demods and scopes.\n general_setting = [['/%s/demods/*/enable' % device, 0],\n ['/%s/demods/*/trigger' % device, 0],\n ['/%s/sigouts/*/enables/*' % device, 0],\n ['/%s/scopes/*/enable' % device, 0]]\n if 'IA' in props['options']:\n general_setting.append(['/%s/imps/*/enable' % device, 0])\n daq.set(general_setting)\n # Perform a global synchronisation between the device and the data server:\n # Ensure that the settings have taken effect on the device before setting\n # the next configuration.\n daq.sync()\n\n # Now configure the instrument for this experiment. The following channels\n # and indices work on all device configurations. The values below may be\n # changed if the instrument has multiple input/output channels and/or either\n # the Multifrequency or Multidemodulator options installed.\n out_channel = 0\n out_mixer_channel = zhinst.utils.default_output_mixer_channel(props)\n in_channel = 0\n demod_index = 0\n osc_index = 0\n demod_rate = 1e3\n time_constant = 0.01\n frequency = 400e3\n exp_setting = [['/%s/sigins/%d/ac' % (device, in_channel), 0],\n ['/%s/sigins/%d/range' % (device, in_channel), 2*amplitude],\n ['/%s/demods/%d/enable' % (device, demod_index), 1],\n ['/%s/demods/%d/rate' % (device, demod_index), demod_rate],\n ['/%s/demods/%d/adcselect' % (device, demod_index), in_channel],\n ['/%s/demods/%d/order' % (device, demod_index), 4],\n ['/%s/demods/%d/timeconstant' % (device, demod_index), time_constant],\n ['/%s/demods/%d/oscselect' % (device, demod_index), osc_index],\n ['/%s/demods/%d/harmonic' % (device, demod_index), 1],\n ['/%s/oscs/%d/freq' % (device, osc_index), frequency],\n ['/%s/sigouts/%d/on' % (device, out_channel), 1],\n ['/%s/sigouts/%d/enables/%d' % (device, out_channel, out_mixer_channel), 1],\n ['/%s/sigouts/%d/range' % (device, out_channel), 1],\n ['/%s/sigouts/%d/amplitudes/%d' % (device, out_channel, out_mixer_channel), amplitude]]\n # Some other device-type dependent configuration may be required. For\n # example, disable the signal inputs `diff` and the signal outputs `add` for\n # HF2 instruments.\n if props['devicetype'].startswith('HF2'):\n exp_setting.append(['/%s/sigins/%d/diff' % (device, in_channel), 0])\n exp_setting.append(['/%s/sigouts/%d/add' % (device, out_channel), 0])\n daq.set(exp_setting)\n\n # Unsubscribe any streaming data.\n daq.unsubscribe('*')\n\n # Wait for the demodulator filter to settle.\n time.sleep(10*time_constant)\n\n # Perform a global synchronisation between the device and the data server:\n # Ensure that 1. the settings have taken effect on the device before issuing\n # the poll() command and 2. clear the API's data buffers. Note: the sync()\n # must be issued after waiting for the demodulator filter to settle above.\n daq.sync()\n\n # Subscribe to the demodulator's sample node path.\n path = '/%s/demods/%d/sample' % (device, demod_index)\n daq.subscribe(path)\n\n # Sleep for demonstration purposes: Allow data to accumulate in the data\n # server's buffers for one second: poll() will not only return the data\n # accumulated during the specified poll_length, but also for data\n # accumulated since the subscribe() or the previous poll.\n sleep_length = 1.0\n # For demonstration only: We could, for example, be processing the data\n # returned from a previous poll().\n time.sleep(sleep_length)\n\n # Poll the subscribed data from the data server. Poll will block and record\n # for poll_length seconds.\n poll_length = 0.1 # [s]\n poll_timeout = 500 # [ms]\n poll_flags = 0\n poll_return_flat_dict = True\n data = daq.poll(poll_length, poll_timeout, poll_flags, poll_return_flat_dict)\n\n # Unsubscribe from all paths.\n daq.unsubscribe('*')\n\n # Check the dictionary returned is non-empty\n assert data, \"poll() returned an empty data dictionary, did you subscribe to any paths?\"\n\n # The data returned is a dictionary of dictionaries that reflects the node's path.\n # Note, the data could be empty if no data had arrived, e.g., if the demods\n # were disabled or had demodulator rate 0.\n assert path in data, \"The data dictionary returned by poll has no key `%s`.\" % path\n\n # Access the demodulator sample using the node's path.\n sample = data[path]\n\n # Let's check how many seconds of demodulator data were returned by poll.\n # First, get the sampling rate of the device's ADCs, the device clockbase...\n clockbase = float(daq.getInt('/%s/clockbase' % device))\n # ... and use it to convert sample timestamp ticks to seconds:\n dt_seconds = (sample['timestamp'][-1] - sample['timestamp'][0])/clockbase\n print(\"poll() returned {:.3f} seconds of demodulator data.\".format(dt_seconds))\n tol_percent = 10\n dt_seconds_expected = sleep_length + poll_length\n assert (dt_seconds - dt_seconds_expected)/dt_seconds_expected*100 < tol_percent, \\\n \"Duration of demod data returned by poll() (%.3f s) differs \" % dt_seconds + \\\n \"from the expected duration (%.3f s) by more than %0.2f %%.\" % \\\n (dt_seconds_expected, tol_percent)\n\n # Calculate the demodulator's magnitude and phase and add them to the dict.\n sample['R'] = np.abs(sample['x'] + 1j*sample['y'])\n sample['phi'] = np.angle(sample['x'] + 1j*sample['y'])\n print(\"Average measured RMS amplitude is {:.3e} V.\".format(np.mean(sample['R'])))\n\n if do_plot:\n import matplotlib.pyplot as plt\n\n # Convert timestamps from ticks to seconds via clockbase.\n t = (sample['timestamp'] - sample['timestamp'][0])/clockbase\n\n # Create plot\n plt.figure()\n plt.grid(True)\n plt.plot(t, sample['R'])\n plt.title('Demodulator data')\n plt.xlabel('Time (s)')\n plt.ylabel('R (V)')\n mean_r = np.mean(sample['R'])\n plt.axis([t[0], t[-1], 0.99*mean_r, 1.01*mean_r])\n plt.draw()\n plt.show()\n\n return sample\n","sub_path":"LIA-master/zhinst/examples/common/example_poll.py","file_name":"example_poll.py","file_ext":"py","file_size_in_byte":8998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"281163686","text":"from simple_linear_regression import *\n\nimport matplotlib.pyplot as plt\n\nret = linear_regression(\"test_data.txt\")\nfile_info = ret[0]\na = ret[1]\nb = ret[2]\n\nprint(\"a:\",a)\nprint(\"b:\",b)\n\nprint(\"y =\", a, \"+\", b, \"x\")\n\nx = np.array(range(0,20))\ny = a + b * x\n\nplt.figure(1)\n\nplt.plot(file_info[:,0], file_info[:,1], \"go\")\nplt.plot(x, y, \"b\")\n\n\nplt.xlim(0.0, 20.0)\nplt.ylim(0.0, 100.0)\n\nplt.show()","sub_path":"Math/Linear_regression/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"531412157","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport wxms.validate\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('wxms', '0007_auto_20151004_1709'),\n ]\n\n operations = [\n migrations.RemoveField(\n model_name='activity',\n name='content',\n ),\n migrations.AddField(\n model_name='activity',\n name='acontent',\n field=models.TextField(verbose_name=b'\\xe6\\xb4\\xbb\\xe5\\x8a\\xa8\\xe5\\x86\\x85\\xe5\\xae\\xb9', blank=True),\n ),\n migrations.AlterField(\n model_name='activity',\n name='atitle',\n field=models.CharField(max_length=200, verbose_name='\\u6807\\u9898'),\n ),\n migrations.AlterField(\n model_name='activity',\n name='descs',\n field=models.CharField(help_text='\\u56de\\u8f66\\u6362\\u884c\\uff0c\\u6bcf\\u884c\\u4e00\\u4e2a\\u63cf\\u8ff0', max_length=1000, verbose_name='\\u6d3b\\u52a8\\u63cf\\u8ff0'),\n ),\n migrations.AlterField(\n model_name='activity',\n name='hosts',\n field=models.ManyToManyField(default=None, to='wxms.Host', verbose_name='\\u5206\\u4eab\\u94fe\\u63a5'),\n ),\n migrations.AlterField(\n model_name='activity',\n name='jump_url',\n field=models.CharField(help_text='\\u56de\\u8f66\\u6362\\u884c\\uff0c\\u6bcf\\u884c\\u4e00\\u4e2a\\u94fe\\u63a5', max_length=1000, verbose_name='\\u8df3\\u8f6c\\u94fe\\u63a5', validators=[wxms.validate.validate_multiurl]),\n ),\n migrations.AlterField(\n model_name='activity',\n name='thumb_url',\n field=models.CharField(help_text='\\u56de\\u8f66\\u6362\\u884c\\uff0c\\u6bcf\\u884c\\u4e00\\u4e2a\\u94fe\\u63a5', max_length=1000, verbose_name='\\u7f29\\u7565\\u56fe\\u94fe\\u63a5', validators=[wxms.validate.validate_multiurl]),\n ),\n migrations.AlterField(\n model_name='activity',\n name='titles',\n field=models.CharField(help_text='\\u56de\\u8f66\\u6362\\u884c\\uff0c\\u6bcf\\u884c\\u4e00\\u4e2a\\u6807\\u9898', max_length=1000, verbose_name='\\u6d3b\\u52a8\\u6807\\u9898'),\n ),\n migrations.AlterField(\n model_name='host',\n name='name',\n field=models.CharField(max_length=200, verbose_name='\\u4e1a\\u52a1\\u57df\\u540d', validators=[wxms.validate.validate_host]),\n ),\n ]\n","sub_path":"wxms/migrations/0008_auto_20151014_0643.py","file_name":"0008_auto_20151014_0643.py","file_ext":"py","file_size_in_byte":2439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"185830888","text":"import os\nimport importlib\nimport inspect\n\nimport six\nimport sklearn\nfrom sklearn import base\nimport jinja2\n\n\nm_names = set()\nfor mod_name in sklearn.__all__:\n import_str = 'from sklearn import %s as _orig' % mod_name\n exec(import_str)\n for name in dir(_orig):\n c = getattr(_orig, name)\n try:\n if not issubclass(c, base.BaseEstimator):\n continue\n except TypeError:\n continue\n for m_name in c.__dict__:\n if m_name.startswith('_'):\n continue\n if m_name == 'score':\n continue\n m = getattr(c, m_name)\n if not six.callable(m):\n continue\n sig = inspect.signature(m)\n params = list(sig.parameters)\n if params[: 2] != ['self', 'X']:\n continue\n # print(name, c, m_name, params)\n m_names.add(m_name)\n\nloader = jinja2.FileSystemLoader(os.path.dirname(__file__))\nenv = jinja2.Environment(loader=loader)\ntmpl = env.get_template('_adapter.py.jinja2')\ncnt =tmpl.render(\n\tcomment='# Auto generted from _adapter.py.jinja2',\n\tm_names=m_names)\nopen(os.path.join(os.path.dirname(__file__), '../ibex/_adapter.py'), 'w').write(cnt)\n","sub_path":"scripts/generate.py","file_name":"generate.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"153792547","text":"import sys\n\nif __name__ == '__main__':\n\n in_list =[]\n while True:\n try:\n num = input()\n if not num:\n break\n in_dict = {}\n for i in range(int(num)):\n A = input()\n if not in_dict.keys().__contains__(A):\n in_dict[A] = int(A)\n\n in_list.append(in_dict)\n\n except:\n break\n\n for i in range(len(in_list)):\n dict = in_list[i]\n AA = sorted(dict.values())\n for k in AA:\n print(k)\n\n\n\n\n\n\n\n\n\n\n","sub_path":"PythonProj/mingming no rand numbers.py","file_name":"mingming no rand numbers.py","file_ext":"py","file_size_in_byte":563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"59306147","text":"import os\nimport sys\nimport subprocess as sp\nfrom shutil import rmtree, copytree\nfrom printing import *\n\n\ndef run_cmd(command):\n\t\"\"\"\n\tWrapper on subprocess.run to handle shell commands as either a list of args\n\tor a single string.\n\t\"\"\"\n\ttry:\n\t\tif not isinstance(command, list):\n\t\t\tprocess = sp.run(command.split(), stdout=sp.PIPE)\n\t\t\treturn process\n\t\telse:\n\t\t\tprocess = sp.run(command, stdout=sp.PIPE)\n\t\t\treturn process\n\texcept FileNotFoundError: # If package manager is missing\n\t\treturn None\n\n\ndef run_cmd_write_stdout(command, filepath):\n\t\"\"\"\n\tRuns a command and then writes its stdout to a file\n\t:param: command str representing command to run\n\t\"\"\"\n\tprocess = run_cmd(command)\n\tif process:\n\t\twith open(filepath, \"w+\") as f:\n\t\t\tf.write(process.stdout.decode('utf-8'))\n\n\ndef mkdir_overwrite(path):\n\t\"\"\"\n\tMakes a new directory, destroying the one at the path if it exits.\n\t\"\"\"\n\tif os.path.isdir(path):\n\t\trmtree(path)\n\tos.makedirs(path)\n\n\ndef mkdir_warn_overwrite(path):\n\t\"\"\"\n\tMake destination dir if path doesn't exist, confirm before overwriting if it does.\n\t\"\"\"\n\tsubdirs = [\"dotfiles\", \"packages\", \"fonts\", \"configs\"]\n\tif os.path.exists(path) and path.split(\"/\")[-1] in subdirs:\n\t\tprint_red_bold(\"Directory {} already exists\\n\".format(path))\n\t\tif prompt_yes_no(\"Erase directory and make new back up?\", Fore.RED):\n\t\t\tmkdir_overwrite(path)\n\t\telse:\n\t\t\tprint_red_bold(\"Exiting to prevent accidental deletion of data.\")\n\t\t\tsys.exit()\n\telif not os.path.exists(path):\n\t\tos.makedirs(path)\n\t\tprint(Fore.BLUE + Style.BRIGHT + \"CREATED DIR: \" + Style.NORMAL + path + Style.RESET_ALL)\n\n\ndef destroy_backup_dir(backup_path):\n\t\"\"\"\n\tDeletes the backup directory and its content\n\t\"\"\"\n\ttry:\n\t\t# TODO: PRINT PATH STYLING. PATH SHOULD NOT BE BOLDED.\n\t\tprint_red_bold(\"Deleting backup directory: {}\".format(backup_path))\n\t\trmtree(backup_path)\n\texcept OSError as e:\n\t\tprint(\"{} Error: {} - {}. {}\".format(Fore.RED, e.filename, e.strerror, Style.RESET_ALL))\n\n\ndef get_abs_path_subfiles(directory):\n\t\"\"\"\n\tReturns list of absolute paths of immediate files and folders in a directory.\n\t\"\"\"\n\tfile_paths = []\n\tfor path, subdirs, files in os.walk(directory):\n\t\tfor name in files:\n\t\t\tfile_paths.append(os.path.join(path, name))\n\treturn file_paths\n\n\ndef copy_dir_if_valid(source_dir, backup_path):\n\t\"\"\"\n\tCopy dotfolder from $HOME, excluding invalid directories.\n\t\"\"\"\n\tinvalid = {\".Trash\", \".npm\", \".cache\", \".rvm\"}\n\tif len(invalid.intersection(set(source_dir.split(\"/\")))) != 0:\n\t\treturn\n\tdest = os.path.join(backup_path, os.path.split(source_dir)[-1])\n\tcopytree(source_dir, dest, symlinks=True)\n\n\ndef safe_mkdir(directory):\n\t\"\"\"\n\tMakes directory if it doesn't already exist.\n\t:param directory:\n\t:return:\n\t\"\"\"\n\tif not os.path.isdir(directory):\n\t\tos.makedirs(directory)\n\n\ndef home_prefix(path):\n\t\"\"\"\n\tAppends the path to the user's home path.\n\t:param path: Path to be appended.\n\t:return: (str) ~/path\n\t\"\"\"\n\thome_path = os.path.expanduser('~')\n\treturn os.path.join(home_path, path)\n\ndef expand_to_abs_path(path):\n\t\"\"\"\n\tExpands relative and user's home paths to the respective absolute path.\n\t:param path: Path to be expanded.\n\t:return: (str) The absolute path.\n\t\"\"\"\n\texpanded_path = os.path.expanduser(path)\n\treturn os.path.abspath(expanded_path)\n","sub_path":"shallow_backup/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"275614063","text":"import requests\nfrom elasticsearch import Elasticsearch\nimport psycopg2\nimport json\nimport operator\nclass phrase_suggester():\n def Query_Retrieval(Modality):\n conn1 = psycopg2.connect(port='5432', database='HCA', user='hca_reader', password='Ge6=fw!f5Km',\n host='dalphoi.natlab.research.philips.com')\n cur1 = conn1.cursor()\n cur1.execute(\n \"select session_id,case_number, sap_swo,query,modality,number_of_results, parts_consumed, parts_ordered,parts_recommended_in_metric_calc, completeness, success, ftr_actual from hca_logging.cha_eval_session where completeness = '0' and strategy = 'combined with replaceBy' and parts_consumed != '' and modality = '%s' and query !='' ;\"%Modality)\n # 0 1 2 3 4 5 6 7 8 9 10 11\n Query_List = cur1.fetchall()\n cur1.close()\n for session in Query_List:\n session_list = list(session)\n Query_List[Query_List.index(session)] = session_list\n print(len(Query_List))\n return Query_List\n\n\n def Request_Definition(Query_List, Index):\n ES_HOST = {\"host\": \"localhost\", \"port\": 9200}\n es = Elasticsearch([ES_HOST], verify_certs=True, timeout=300)\n for item in Query_List:\n query = item[3]\n\n standard_payload = {\n \"suggest\": {\n \"text\": query,\n \"simple_phrase\": {\n \"phrase\": {\n \"field\": \"Sub_Act.my_standard_analyzer\",\n \"size\": 1,\n \"gram_size\": 5,\n \"max_errors\":3,\n \"direct_generator\": [ {\n \"field\": \"Sub_Act.my_standard_analyzer\",\n \"suggest_mode\": \"always\"\n } ],\n \"highlight\": {\n \"pre_tag\": \"\",\n \"post_tag\": \"\"\n }\n }\n }\n }\n }\n\n #Standard_Request = {\\\"suggest\\\": {\\\"text\\\":\\\"PRss champs can be notified\\\",\\\"simple_phrase\\\": {\\\"phrase\\\": {\\\"field\\\": \\\"Sub_Act.trigram\\\",\\\"size\\\": 3,\\\"gram_size\\\": 5,\\\"max_errors\\\":2,\\\"direct_generator\\\": [{\\\"field\\\": \\\"Sub_Act.trigram\\\",\\\"suggest_mode\\\":\\\"always\\\"}]}}}}\n\n Standard_Result = requests.post(\"http://localhost:9200/\"+Index+\"/_search\", data =json.dumps(standard_payload), headers = {'Content-type': 'application/json'} )\n\n Standard_Content = json.loads(Standard_Result.content)\n\n if Standard_Content['suggest']['simple_phrase'][0]['options'] == []:\n Standard_Suggester = query\n item.append(Standard_Suggester) #12\n item.append(\"Standard_Phrase_Suggester\") #13\n else:\n Standard_Suggester = Standard_Content['suggest']['simple_phrase'][0]['options'][0]['text']\n item.append(Standard_Suggester) #12\n item.append(\"Standard_Phrase_Suggester\") #13\n Standard_Score = Standard_Content['suggest']['simple_phrase'][0]['options'][0]['score']\n\n whitespace_payload = {\n \"suggest\": {\n \"text\": query,\n \"simple_phrase\": {\n \"phrase\": {\n \"field\": \"Sub_Act.my_whitespace_analyzer\",\n \"size\": 1,\n \"gram_size\": 5,\n \"max_errors\": 3,\n \"direct_generator\": [{\n \"field\": \"Sub_Act.my_whitespace_analyzer\",\n \"suggest_mode\": \"always\"\n }],\n \"highlight\": {\n \"pre_tag\": \"\",\n \"post_tag\": \"\"\n }\n }\n }\n }\n }\n Whitespace_Result = requests.post(\"http://localhost:9200/\"+Index+\"/_search\", data=json.dumps(whitespace_payload),\n headers={'Content-type': 'application/json'})\n\n Whitespace_Result_Content = json.loads(Whitespace_Result.content)\n\n if Whitespace_Result_Content['suggest']['simple_phrase'][0]['options'] == []:\n Whitespace_Suggester = query\n item.append(Whitespace_Suggester) # 14\n item.append(\"Whitespace_Phrase_Suggester\") # 15\n else:\n Whitespace_Suggester = Whitespace_Result_Content['suggest']['simple_phrase'][0]['options'][0]['text']\n item.append(Whitespace_Suggester) # 14\n item.append(\"Whitespace_Phrase_Suggester\") # 15\n Whitespace_Score = Whitespace_Result_Content['suggest']['simple_phrase'][0]['options'][0]['score']\n\n Trigram_payload = {\n \"suggest\": {\n \"text\": query,\n \"simple_phrase\": {\n \"phrase\": {\n \"field\": \"Sub_Act.trigram\",\n \"size\": 1,\n \"gram_size\": 5,\n \"max_errors\": 3,\n \"direct_generator\": [{\n \"field\": \"Sub_Act.trigram\",\n \"suggest_mode\": \"always\"\n }],\n \"highlight\": {\n \"pre_tag\": \"\",\n \"post_tag\": \"\"\n }\n }\n }\n }\n }\n\n # Standard_Request = {\\\"suggest\\\": {\\\"text\\\":\\\"PRss champs can be notified\\\",\\\"simple_phrase\\\": {\\\"phrase\\\": {\\\"field\\\": \\\"Sub_Act.trigram\\\",\\\"size\\\": 3,\\\"gram_size\\\": 5,\\\"max_errors\\\":2,\\\"direct_generator\\\": [{\\\"field\\\": \\\"Sub_Act.trigram\\\",\\\"suggest_mode\\\":\\\"always\\\"}]}}}}\n\n Trigram_Result = requests.post(\"http://localhost:9200/\"+Index+\"/_search\", data=json.dumps(Trigram_payload),\n headers={'Content-type': 'application/json'})\n\n Trigram_Content = json.loads(Trigram_Result.content)\n\n if Trigram_Content['suggest']['simple_phrase'][0]['options'] == []:\n Trigram_Suggester = query\n item.append(Trigram_Suggester) # 16\n item.append(\"Trigram_Suggester\") # 17\n else:\n Trigram_Suggester = Trigram_Content['suggest']['simple_phrase'][0]['options'][0]['text']\n item.append(Trigram_Suggester) # 16\n item.append(\"Trigram_Suggester\") # 17\n Trigram_Score = Trigram_Content['suggest']['simple_phrase'][0]['options'][0]['score']\n\n List = []\n Score_List = []\n if Standard_Content['suggest']['simple_phrase'][0]['options'] != []:\n List.append({'Analyzer':'Standard','Socre':Standard_Score})\n Score_List.append(Standard_Score)\n if Whitespace_Result_Content['suggest']['simple_phrase'][0]['options'] != []:\n List.append({'Analyzer':'Whitespace','Socre':Whitespace_Score})\n Score_List.append(Whitespace_Score)\n if Trigram_Content['suggest']['simple_phrase'][0]['options'] != []:\n List.append({'Analyzer':'Trigram','Socre':Trigram_Score})\n Score_List.append(Trigram_Score)\n if Score_List != []:\n index, value = max(enumerate(Score_List), key=operator.itemgetter(1))\n Analyzer = List[index]['Analyzer']\n if Analyzer == \"Standard\":\n item.append(Standard_Content['suggest']['simple_phrase'][0]['options'][0]['text']) # 18\n item.append(\"Best_Suggester\") # 19\n elif Analyzer == \"Whitespace\":\n item.append( Whitespace_Result_Content['suggest']['simple_phrase'][0]['options'][0]['text']) # 18\n item.append(\"Best_Suggester\") # 19\n elif Analyzer == \"Trigram\":\n item.append(Trigram_Content['suggest']['simple_phrase'][0]['options'][0]['text']) # 18\n item.append(\"Best_Suggester\") # 19\n else:\n item.append(query) # 18\n item.append(\"Best_Suggester\") # 19\n\n return Query_List\n\n def Insert_Result(Query_List):\n conn = psycopg2.connect(port='5432', database='ddvp', user='ddvpadmin', password='vr=1QzX.5p',\n host='dalphoi.natlab.research.philips.com')\n cur = conn.cursor()\n\n for session in Query_List:\n cur.execute(\n \"INSERT INTO cha.Query_Suggester (session_id,case_number, sap_swo,origional_query, modality,num_results, parts_consumed, parts_ordered,parts_recommended, completeness, success, act_ftr, Suggester, Strategy)VALUES (%s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);\",\n (str(session[0]), str(session[1]), str(session[2]), str(session[3]), str(session[4]), str(session[5]),\n str(session[6]),\n str(session[7]), str(session[8]), str(session[9]), str(session[10]), str(session[11]),\n str(session[12]), str(session[13])))\n cur.execute(\n \"INSERT INTO cha.Query_Suggester (session_id,case_number, sap_swo,origional_query, modality,num_results, parts_consumed, parts_ordered,parts_recommended, completeness, success, act_ftr, Suggester, Strategy)VALUES (%s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);\",\n (str(session[0]), str(session[1]), str(session[2]), str(session[3]), str(session[4]), str(session[5]),\n str(session[6]),\n str(session[7]), str(session[8]), str(session[9]), str(session[10]), str(session[11]),\n str(session[14]), str(session[15])))\n cur.execute(\n \"INSERT INTO cha.Query_Suggester (session_id,case_number, sap_swo,origional_query, modality,num_results, parts_consumed, parts_ordered,parts_recommended, completeness, success, act_ftr, Suggester, Strategy)VALUES (%s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);\",\n (str(session[0]), str(session[1]), str(session[2]), str(session[3]), str(session[4]), str(session[5]),\n str(session[6]),\n str(session[7]), str(session[8]), str(session[9]), str(session[10]), str(session[11]),\n str(session[16]), str(session[17])))\n cur.execute(\n \"INSERT INTO cha.Query_Suggester (session_id,case_number, sap_swo,origional_query, modality,num_results, parts_consumed, parts_ordered,parts_recommended, completeness, success, act_ftr, Suggester, Strategy)VALUES (%s, %s, %s, %s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s);\",\n (str(session[0]), str(session[1]), str(session[2]), str(session[3]), str(session[4]), str(session[5]),\n str(session[6]),\n str(session[7]), str(session[8]), str(session[9]), str(session[10]), str(session[11]),\n str(session[18]), str(session[19])))\n conn.commit()\n cur.close()\n\n if __name__ == '__main__':\n for modality in [\"CT\",\"MR\",\"iXR\"]:\n if modality == 'CT':\n Query_List = Query_Retrieval(str(modality))\n Query = Request_Definition(Query_List, 'ct_0730')\n Insert_Result(Query)\n elif modality == 'MR':\n Query_List = Query_Retrieval(str(modality))\n Query = Request_Definition(Query_List, 'mr_0730')\n Insert_Result(Query)\n elif modality == 'iXR':\n Query_List = Query_Retrieval(str(modality))\n Query = Request_Definition(Query_List, 'ixr_0730')\n Insert_Result(Query)\n else:\n print('no modality')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#curl -XPOST http://localhost:9200/ct_0730/_search?pretty -H \"Content-Type:application/json\" -d \"{\\\"suggest\\\": {\\\"text\\\":\\\"abc\\\",\\\"simple_phrase\\\": {\\\"phrase\\\": {\\\"field\\\": \\\"Sub_Act.trigram\\\",\\\"size\\\": 3,\\\"gram_size\\\": 5,\\\"max_errors\\\":2,\\\"direct_generator\\\": [{\\\"field\\\": \\\"Sub_Act.trigram\\\",\\\"suggest_mode\\\":\\\"always\\\"}]}}}}\"","sub_path":"experiment_1/Phrase_Suggester.py","file_name":"Phrase_Suggester.py","file_ext":"py","file_size_in_byte":12684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"588083939","text":"\"\"\" Tests of Ezyme\n\n:Author: Yosef Roth \n:Author: Jonathan Karr \n:Date: 2017-05-04\n:Copyright: 2017, Karr Lab\n:License: MIT\n\"\"\"\n\nfrom attrdict import AttrDict\nfrom datanator.data_source import ezyme\nfrom datanator.core import data_model\nfrom datanator.util import molecule_util\nfrom datanator.util import warning_util\nimport unittest\n\nwarning_util.disable_warnings()\n\n@unittest.skip('not working currently')\nclass TestEzyme(unittest.TestCase):\n molecules = {\n 'adp': 'NC1=C2N=CN(C3OC(COP([O-])(=O)OP([O-])([O-])=O)C(O)C3O)C2=NC=N1',\n 'amet': 'C[S+](CCC([NH3+])C([O-])=O)CC1OC(C(O)C1O)N1C=NC2=C(N)N=CN=C12',\n 'atp': 'NC1=C2N=CN(C3OC(COP([O-])(=O)OP([O-])(=O)OP([O-])([O-])=O)C(O)C3O)C2=NC=N1',\n 'dr1p': 'OCC1OC(CC1O)OP([O-])([O-])=O',\n 'dr5p': 'OC1CC(O)C(COP([O-])([O-])=O)O1',\n 'e1dG': 'CCN1C(N)=NC2=C(N=CN2C2CC(O)C(CO)O2)C1=O',\n 'e1dGMP': 'CCN1C(N)=NC2=C(N=CN2C2CC(O)C(COP([O-])([O-])=O)O2)C1=O',\n 'f1p': 'OCC1OC(O)(COP([O-])([O-])=O)C(O)C1O',\n 'gly': '[NH3+]CC([O-])=O',\n 'glyceraldehyde': 'OCC(O)C=O',\n 'h': '[H+]',\n 'h2o': 'O',\n 'ileile': 'CCC(C)C([NH3+])C(=O)NC(C(C)CC)C([O-])=O',\n 'leu': 'CC(C)CC([NH3+])C([O-])=O',\n 'leuleu': 'CC(C)CC([NH3+])C(=O)NC(CC(C)C)C([O-])=O',\n 'met': 'CSCCC([NH3+])C([O-])=O',\n 'metthf': 'NC1=NC(=O)C2=C(NCC3CN(CN23)C2=CC=C(C=C2)C(=O)NC(CCC([O-])=O)C([O-])=O)N1',\n 'pep': '[O-]C(=O)C(=C)OP([O-])([O-])=O',\n 'pi': 'OP([O-])([O-])=O',\n 'ppi': '[O-]P([O-])(=O)OP([O-])([O-])=O',\n 'pyr': 'CC(=O)C([O-])=O',\n 'r5p': 'OC(COP([O-])([O-])=O)C(O)C(O)C=O',\n 's7p': 'OCC(=O)C(O)C(O)C(O)C(O)COP([O-])([O-])=O',\n 'ser': '[NH3+]C(CO)C([O-])=O',\n 't3p1': 'OC(COP([O-])([O-])=O)C=O',\n 't3p2': 'OCC(=O)COP([O-])([O-])=O',\n 'thf': 'NC1=NC(=O)C2=C(NCC(CNC3=CC=C(C=C3)C(=O)NC(CCC([O-])=O)C([O-])=O)N2)N1',\n 'udp': 'OC1C(O)C(OC1COP([O-])(=O)OP([O-])([O-])=O)N1C=CC(=O)NC1=O',\n 'utp': 'OC1C(O)C(OC1COP([O-])(=O)OP([O-])(=O)OP([O-])([O-])=O)N1C=CC(=O)NC1=O',\n 'x5p': 'OCC(=O)C(O)C(O)COP([O-])([O-])=O',\n 'no_structure': '',\n }\n\n def test__run(self):\n m = AttrDict()\n for name, structure in self.molecules.items():\n if structure:\n m[name] = molecule_util.Molecule(structure=structure).to_mol()\n else:\n m[name] = None\n\n # 1 --> 1: dr1p <==> d5rp\n print(m.dr1p)\n results = ezyme.Ezyme()._run([m.dr1p], [m.dr5p])\n self.assertEqual(len(results), 1)\n self.assertEqual(results[0].__dict__, {'ec_number': '5.4.2', 'score': 16.0})\n\n # 1 --> 2: f1p ==> glyceraldehyde + t3p2\n self.assertEqual(ezyme.Ezyme()._run([m.f1p], [m.glyceraldehyde, m.t3p2])[0].ec_number, \"2.2.1\")\n\n # 2 --> 1 and example with stoichiometric coefficient != 1: H2O + LeuLeu ==> (2) LEU\n self.assertEqual(ezyme.Ezyme()._run([m.leuleu, m.h2o], [m.leu])[0].ec_number, \"3.5.1\")\n\n # 2 --> 2: t3p1 + s7p <==> x5p + r5p\n self.assertEqual(ezyme.Ezyme()._run([m.t3p1, m.s7p], [m.x5p, m.r5p])[0].ec_number, \"4.1.2\")\n\n # 3 --> 2: UDP + H + PEP ==> UTP + PYR\n self.assertEqual(ezyme.Ezyme()._run([m.utp, m.pep, m.h], [m.utp, m.pyr])[0].ec_number, '2.7.3')\n\n # 3 --> 2: metthf + gly + h2o <==> thf + ser\n self.assertEqual(ezyme.Ezyme()._run([m.metthf, m.gly, m.h2o], [m.thf, m.ser])[0].ec_number, '2.1.2')\n\n # 3 --> 4: IleIle[e] + ATP[c] + H2O[c] ==> IleIle[c] + PI[c] + H[c] + ADP[c]\n self.assertEqual(ezyme.Ezyme()._run([m.ileile, m.atp, m.h2o], [m.ileile, m.pi, m.adp])[0].ec_number, \"3.6.1\")\n\n # 4 --> 3: ATP + MET + H2O ==> AMET + PPI + PI + H\n self.assertEqual(ezyme.Ezyme()._run([m.met, m.atp, m.h2o], [m.amet, m.ppi, m.pi, m.h]), [])\n\n \"\"\" test that Ezyme predicts the same EC number for a reversible reaction when the substrates and products and swapped \"\"\"\n self.assertEqual(ezyme.Ezyme()._run([m.dr5p], [m.dr1p])[0].ec_number, '5.4.2')\n\n \"\"\" test that Ezyme returns None for partiicpants with no defined structure \"\"\"\n self.assertEqual(ezyme.Ezyme()._run([m.dr5p], [m.no_structure]), None)\n\n @unittest.skip('skip')\n def test_run(self):\n rxn = data_model.Reaction(participants=[\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['atp'], id='atp'),\n compartment=data_model.Compartment(id='c'),\n coefficient=-1,\n order=0),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['h2o'], id='h2o'),\n compartment=data_model.Compartment(id='c'),\n coefficient=-1,\n order=1),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['adp'], id='adp'),\n compartment=data_model.Compartment(id='c'),\n coefficient=1,\n order=2),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['ppi'], id='ppi'),\n compartment=data_model.Compartment(id='c'),\n coefficient=1,\n order=3),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['h'], id='h'),\n compartment=data_model.Compartment(id='c'),\n coefficient=1,\n order=4),\n ])\n self.assertEqual(rxn.get_reactants()[0].specie.id, 'atp')\n self.assertEqual(rxn.get_products()[0].specie.id, 'adp')\n self.assertEqual(rxn.get_reactant_product_pairs()[0][0].specie.id, 'atp')\n self.assertEqual(rxn.get_reactant_product_pairs()[0][1].specie.id, 'adp')\n self.assertEqual(rxn.get_reactant_product_pairs()[1][0].specie.id, 'h2o')\n self.assertEqual(rxn.get_reactant_product_pairs()[1][1].specie.id, 'ppi')\n result = ezyme.Ezyme().run(rxn)\n print(result)\n self.assertEqual(result[0].ec_number, '3.6.1') # true EC is 3.6.1.3\n\n # example where Ezyme predicts no EC number when the order is swapped\n rxn = data_model.Reaction(participants=[\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['t3p1'], id='atp'),\n coefficient=-1,\n order=0),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['s7p'], id='h2o'),\n coefficient=-1,\n order=1),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['x5p'], id='adp'),\n coefficient=1,\n order=2),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['r5p'], id='ppi'),\n coefficient=1,\n order=3),\n ])\n result = ezyme.Ezyme().run(rxn)\n\n self.assertEqual(result[0].ec_number, \"4.1.2\") # true EC is 2.2.1.1\n self.assertEqual(result[1].ec_number, \"2.2.1\")\n\n rxn = data_model.Reaction(participants=[\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['s7p'], id='atp'),\n coefficient=-1,\n order=0),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['t3p1'], id='h2o'),\n coefficient=-1,\n order=1),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['x5p'], id='adp'),\n coefficient=1,\n order=2),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['r5p'], id='ppi'),\n coefficient=1,\n order=3),\n ])\n result = ezyme.Ezyme().run(rxn)\n self.assertEqual(result, [])\n\n # example where Ezyme predicts different EC numbers when the order is swapped\n rxn = data_model.Reaction(participants=[\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['e1dGMP'], id='atp'),\n coefficient=-1,\n order=0),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['h2o'], id='h2o'),\n coefficient=-1,\n order=1),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['e1dG'], id='adp'),\n coefficient=1,\n order=2),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['pi'], id='ppi'),\n coefficient=1,\n order=3),\n ])\n result = ezyme.Ezyme().run(rxn)\n self.assertEqual(result[0].ec_number, \"3.1.3\") # true EC is 3.1.3.89\n\n rxn = data_model.Reaction(participants=[\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['h2o'], id='atp'),\n coefficient=-1,\n order=1),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['e1dGMP'], id='h2o'),\n coefficient=-1,\n order=0),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['e1dG'], id='adp'),\n coefficient=1,\n order=2),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['pi'], id='ppi'),\n coefficient=1,\n order=3),\n ])\n result = ezyme.Ezyme().run(rxn)\n self.assertEqual(result[0].ec_number, \"3.1.3\") # true EC is 3.1.3.89\n\n # example where a structure is not defined\n rxn = data_model.Reaction(participants=[\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['h2o'], id='atp'),\n coefficient=-1,\n order=1),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['e1dGMP'], id='h2o'),\n coefficient=-1,\n order=0),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure=self.molecules['e1dG'], id='adp'),\n coefficient=1,\n order=2),\n data_model.ReactionParticipant(\n specie=data_model.Specie(structure='', id='ppi'),\n coefficient=1,\n order=3),\n ])\n result = ezyme.Ezyme().run(rxn)\n self.assertEqual(result, None)\n","sub_path":"tests/data_source/test_ezyme.py","file_name":"test_ezyme.py","file_ext":"py","file_size_in_byte":11002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"410448286","text":"from zplot import *\n\nt = table('fig1.data')\n\nbuff_size = [('256M', 'red')] \noptions = [('FLUSH', 'hline', 0.5), ('RFLUSH', 'dline12',0.5), ('NOFLUSH', 'solid', 0.5)]\n#c = postscript('fig1.eps')\nctype = 'eps' if len(sys.argv) < 2 else sys.argv[1]\n#c = canvas(ctype, 'fig1', dimensions=['3.5in', '2.95in'])\nc = canvas(ctype, 'fig1-pattern', dimensions=['2.5in', '2.3in'])\n#c = canvas(ctype, 'fig1-pattern', dimensions=['3.5in', '2in'])\nd = drawable(c, xrange=[-0.5, t.getmax('rownumber')+2.5], yrange=[0,500])\n\n#axis(d, xtitle='Cache size', ytitle='iops', yauto=[0,200,50])\naxis(d, style='box', xtitle='Cache size', xmanual=[['256M', 2],['512M', 6], ['1G', 10], ['2G', 14]], ytitle='iops', yauto=[0,500,100])\n#axis(d, xtitle='Cache size', xmanual=t.query(select='opt,rownumber'), ytitle='iops', yauto=[0,200,50])\n\np = plotter()\nL = legend()\n\noffset = 0\nprint(type(offset))\nfor opt, ftype, fsize, in options:\n#\tst = table(table=t, where='buff=\"bsize\"')\n\tw = 'opt=\"%s\"' % opt\n#\tprint(w)\n\tst = table(table=t, where=w)\n#\tst = table(table=t, where='buff=\"256M\"')\n\tbarargs = {'drawable':d, 'table':st, 'xfield':'rowline', \n\t\t\t'yfield':'iops', 'fill':True, 'barwidth':0.8, \n\t\t\t'fillsize':fsize, 'fillstyle':ftype, 'fillcolor':'darkgray',\n\t\t\t'legend':L, 'legendtext':opt}\n\n#\tbarargs = {'drawable':d, 'table':st, 'xfield':'rownumber', \n\tp.verticalbars(**barargs)\n\toffset += st.getmax('rownumber')\n\nL.draw(c, coord=[d.left()+20, \n#\tfontsize=7,\n\td.top()-20], skipnext=3, skipspace=40)\n\n\nc.render()\n","sub_path":"figure/fig1-pattern.py","file_name":"fig1-pattern.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"607299977","text":"import game_framework\nfrom pico2d import *\nimport main_state\nimport title_state\nimport game_world\n\ngame_name = \"PauseState\"\nimage = None\npause_time = 0.0\npause_draw = True\n\n\ndef enter():\n global image\n image = load_image('./image/pause.png')\n\n\ndef exit():\n global image\n del image\n\n\n\ndef update():\n global pause_time, pause_draw\n\n if pause_time > 0.1:\n pause_time = 0\n if pause_draw:\n pause_draw = False\n else:\n pause_draw = True\n\n delay(0.01)\n pause_time += 0.01\n\n\ndef draw():\n global image\n clear_canvas()\n main_state.draw()\n if pause_draw:\n image.draw(game_world.WIDTH * 0.5, game_world.HEIGHT * 0.5, 200, 200)\n update_canvas()\n\n\ndef handle_events():\n events = get_events()\n for event in events:\n if event.type == SDL_QUIT:\n game_framework.quit()\n elif event.type == SDL_KEYDOWN:\n if event.key == SDLK_ESCAPE:\n game_world.clear()\n game_framework.change_state(title_state)\n elif (event.type, event.key) == (SDL_KEYDOWN, SDLK_p):\n game_framework.pop_state()\n\n\ndef pause(): pass\n\n\ndef resume(): pass\n","sub_path":"pause_state.py","file_name":"pause_state.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104860760","text":"pattle = [862, 814, 797, 899, 614, 921, 2, 4, 1]\nmarker = [20, 21, 22, 25, 24, 26]\n\nfrom ROOT import gStyle\nfrom ROOT import gROOT\nfrom ROOT import TStyle\n\ngStyle.SetPadTopMargin(0.05)\ngStyle.SetPadRightMargin(0.05)\n\ndef formatST(h):\n h.SetMarkerStyle(20)\n h.SetMarkerColor(923)\n h.SetLineColor(923)\n h.SetXTitle(\"S_{T} (GeV)\")\n h.SetYTitle(\"Events / %d GeV\" % h.GetBinWidth(1))\n h.GetYaxis().SetTitleOffset(1.1)\n h.GetYaxis().SetTitleSize(0.045)\n h.GetYaxis().SetLabelSize(0.045)\n h.GetXaxis().SetTitleSize(0.045)\n h.GetXaxis().SetLabelSize(0.045)\n \ndef formatTemplate(f, N, iformula):\n f.SetLineWidth(2)\n f.SetLineColor(pattle[iformula])\n if N == 2:\n f.SetLineStyle(1)\n elif N == 3:\n f.SetLineStyle(2)\n\ndef formatUncertainty(g):\n g.SetLineWidth(2)\n g.SetFillColor(862)\n #g.SetLineColor(33)\n g.SetLineColor(862)\n g.SetFillColor(33)\n #g.SetFillStyle()\n g.GetXaxis().SetTitle(\"S_{T} (GeV)\")\n g.GetYaxis().SetTitle(\"Events / 100 GeV\")\n g.GetYaxis().SetTitleOffset(1.1)\n g.GetYaxis().SetTitleSize(0.045)\n g.GetYaxis().SetLabelSize(0.045)\n g.GetXaxis().SetTitleSize(0.045)\n g.GetXaxis().SetLabelSize(0.045)\n \ndef formatCL(g, type, width=4):\n g.SetLineWidth(width)\n g.GetXaxis().SetTitle(\"S_{T}^{ min} (GeV)\")\n g.GetXaxis().SetNdivisions(5,5,0)\n g.GetYaxis().SetTitle(\"#sigma(S_{T} > S_{T}^{ min}) #times A (pb)\")\n g.GetYaxis().SetTitleOffset(1.045)\n g.GetYaxis().SetTitleSize(0.045)\n g.GetYaxis().SetLabelSize(0.045)\n g.GetXaxis().SetTitleSize(0.045)\n g.GetXaxis().SetLabelSize(0.045)\n \n if type == \"CL95\":\n g.SetLineColor(862)\n g.SetFillColor(862)\n elif type == \"CLA\":\n g.SetLineColor(899)\n g.SetFillColor(899)\n g.SetLineStyle(2)\n elif type == \"CLA1\":\n g.SetLineColor(899)\n g.SetFillColor(3)\n g.SetLineStyle(2)\n g.SetLineWidth(3)\n elif type == \"CLA2\":\n g.SetLineColor(899)\n g.SetFillColor(5)\n g.SetLineStyle(2)\n g.SetLineWidth(3)\n\ndef formatXsecCL(g, icolor, line_style=1):\n g.SetLineWidth(2)\n g.SetLineColor(pattle[icolor])\n g.SetLineStyle(line_style)\n g.SetMarkerColor(pattle[icolor])\n g.SetMarkerSize(1)\n g.GetXaxis().SetTitle(\"M_{BH}^{ min} (TeV)\")\n g.GetYaxis().SetTitle(\"#sigma (pb)\")\n g.GetYaxis().SetTitleOffset(1.2)\n\ndef formatExcludedMass(g, name = \"\"):\n g.GetXaxis().SetTitle(\"M_{D} (TeV)\")\n g.GetYaxis().SetTitle(\"Excluded M_{BH}^{ min} (TeV)\")\n g.GetYaxis().SetTitleOffset(1.1)\n g.GetYaxis().SetTitleSize(0.045)\n g.GetYaxis().SetLabelSize(0.045)\n \n if not name == \"\":\n g.SetLineWidth(3)\n g.SetMarkerSize(1)\n\n if \"BH1_BM\" in name or \"BH4_CH\" in name:\n color = 922\n marker_style = 20\n line_style = 1\n\n if \"BH2_BM\" in name or \"BH2_CH\" in name or \"BH0_QB\" in name:\n color = 862\n marker_style = 21\n line_style = 2\n\n if \"BH8_CH\" in name:\n color = 899\n marker_style = 22\n line_style = 3\n\n if \"BH6_CH\" in name or \"BH5_BM\" in name:\n color = 797\n marker_style = 20#34\n line_style = 1\n\n if \"BH10_CH\" in name:\n color = 2\n marker_style = 23\n line_style = 2\n \n if \"BH9_CH\" in name:\n color = 4\n marker_style = 24\n line_style = 3\n \n g.SetLineColor(color)\n g.SetLineStyle(line_style)\n g.SetMarkerStyle(marker_style)\n g.SetMarkerSize(1)\n g.SetMarkerColor(color)\n\ndef formatRatio(h, icolor):\n h.SetMarkerColor(pattle[icolor])\n #h.SetMarkerStyle(marker[icolor])\n h.SetLineColor(pattle[icolor])\n","sub_path":"BHScripts_8TeV_postICHEP_Final_WithRun2012C_NewFitRange/Styles.py","file_name":"Styles.py","file_ext":"py","file_size_in_byte":3513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"102028369","text":"import json\nfrom collections import defaultdict\nfrom bs4 import BeautifulSoup\nfrom ..common.utils import download_soup\n\n\ndef main():\n soup = BeautifulSoup(download_soup(\"https://champion.gg/statistics/\", use_cache=False), \"lxml\")\n scripts = [script.text.strip() for script in soup.find_all(\"script\")]\n data = [script for script in scripts if script.startswith(\"matchupData.stats =\")][0]\n data = data[len(\"matchupData.stats =\") :].strip()[:-1]\n data = json.loads(data)\n\n patch = [script for script in scripts if script.startswith(\"var currentPatch =\")][0]\n patch = patch[len(\"var currentPatch =\") :].split()[0].strip()\n if patch.endswith(\";\"):\n patch = patch[:-1]\n patch = json.loads(patch)\n\n ids = [script for script in scripts if script.startswith(\"var champData =\")][0]\n ids = ids[len(\"var champData =\") :].strip().split(\";\")[0].strip()\n ids = json.loads(ids)\n ids = {k: int(v) for k, v in ids.items()}\n\n role_name_map = {\"Top\": \"TOP\", \"Jungle\": \"JUNGLE\", \"Middle\": \"MIDDLE\", \"ADC\": \"BOTTOM\", \"Support\": \"UTILITY\"}\n\n final = defaultdict(dict)\n for datum in data:\n id = ids[datum[\"key\"]]\n role = role_name_map[datum[\"role\"]]\n final[id][role] = {\n \"playRate\": datum[\"general\"][\"playPercent\"],\n \"winRate\": datum[\"general\"][\"winPercent\"],\n \"banRate\": datum[\"general\"][\"banRate\"],\n }\n final = {\"data\": final, \"patch\": patch}\n for id in ids.values():\n if id not in final[\"data\"]:\n final[\"data\"][id] = {}\n\n filename = \"/home/meraki/code/meraki/Data/champion-rates/rates.json\"\n with open(filename, \"w\") as f:\n json.dump(final, f)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lolstaticdata/championrates/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"305748920","text":"# -*- coding:utf-8 -*-\nimport logging\nimport os\nimport time\n\n\nclass Logger(object):\n def __init__(self):\n self.init_logger()\n\n def init_logger(self, log_path=os.path.join(os.path.dirname(os.getcwd()), \"log\")):\n def get_timestamp():\n timestamp = time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(time.time()))\n return timestamp\n\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n log_path = os.path.join(log_path, \"openapi_log_{}.txt\".format(get_timestamp()))\n self.logger = logging.getLogger(__name__)\n self.logger.setLevel(level=logging.DEBUG)\n handler = logging.FileHandler(log_path)\n # handler.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n # formatter = logging.Formatter('%(asctime)s - %(message)s')\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)\n\n\nlogger = Logger()\n","sub_path":"utils/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"413174847","text":"import pandas as pd\nimport statsmodels.api as sm\nimport pylab as pl\nimport numpy as np\n\ndef dataClean(myName):\n user = pd.read_csv(\"D://JData/JData_User.csv\")\n del user['user_reg_tm']\n dataCsv = pd.read_csv(\"D://JData/\" + myName + \".csv\")\n dataCsv = pd.merge(dataCsv,user, on='user_id')\n return dataCsv\n\ndef dataSel(myName):\n data = pd.read_csv(\"D://JData/\" + myName + \".csv\")\n data = data[data.cate==8]\n #data = data[(data.type==2) | (data.type==4)]\n del data['time']\n del data['model_id']\n return data\n\ndef dataPro(myName):\n product = pd.read_csv(\"D://JData/JData_Product.csv\")\n del product['cate']\n del product['brand']\n\n data = pd.read_csv(\"D://JData/\" + myName + \".csv\")\n data.drop(data.columns[[0,1,5]],axis=1,inplace=True)\n data = pd.merge(data,product,on='sku_id')\n return data\n\ndef dataCom(myName):\n comment = pd.read_csv(\"D://JData/NewComment.csv\")\n del comment['dt']\n\n data = pd.read_csv(\"D://JData/\" + myName + \".csv\")\n data.drop(data.columns[[0]],axis=1,inplace=True)\n data = pd.merge(data,comment,on='sku_id')\n return data\n'''\ndataSel(\"JData_Action_201602\").to_csv(\"D://JData/JData02.csv\")\ndataSel(\"JData_Action_201603\").to_csv(\"D://JData/JData03.csv\")\ndataSel(\"JData_Action_201604\").to_csv(\"D://JData/JData04.csv\")\n'''\n'''\ndataClean(\"JData02\").to_csv(\"D://JData/JData02WithUser.csv\")\ndataClean(\"JData03\").to_csv(\"D://JData/JData03WithUser.csv\")\ndataClean(\"JData04\").to_csv(\"D://JData/JData04WithUser.csv\")\n'''\n#dataCombo(\"JData02WithUser\").to_csv(\"D://JData/JData02.csv\")\n#dataCombo(\"JData03WithUser\").to_csv(\"D://JData/JData03.csv\")\n#dataCombo(\"JData04WithUser\").to_csv(\"D://JData/JData04.csv\")\n'''\ndataPro(\"JData02WithUser\").to_csv(\"D://JData/ProData02.csv\")\ndataPro(\"JData03WithUser\").to_csv(\"D://JData/ProData03.csv\")\ndataPro(\"JData04WithUser\").to_csv(\"D://JData/ProData04.csv\")\n'''\ndataCom(\"ProData02\").to_csv(\"D://JData/ComData02.csv\")\ndataCom(\"ProData03\").to_csv(\"D://JData/ComData03.csv\")\ndataCom(\"ProData04\").to_csv(\"D://JData/ComData04.csv\")","sub_path":"OLD/jdata.py","file_name":"jdata.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"18783827","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\n Topic: 菜单栏、工具栏、状态栏\n Desc : \n\"\"\"\n__author__ = 'Xiong Neng'\nimport wx\nimport wx.py.images as images\n\n\nclass MenuToobarFrame(wx.Frame):\n \"\"\"Frame class that displays an image\"\"\"\n\n def __init__(self, parent, id):\n \"\"\"Create a frame instnace and display image\"\"\"\n wx.Frame.__init__(self, parent,id,'Toolbars', size=(300, 200))\n panel = wx.Panel(self) # 创建画板\n panel.SetBackgroundColour('White')\n statusBar = self.CreateStatusBar() # 创建状态栏\n toolbar = self.CreateToolBar() # 创建工具栏\n # 给工具栏增加一个工具\n toolbar.AddSimpleTool(wx.NewId(), images.getPyBitmap(), 'New', \"Long help for 'New'\")\n toolbar.Realize() # 准备显示工具栏\n menuBar = wx.MenuBar() # 创建一个菜单栏\n # 创建两个菜单\n menu1 = wx.Menu()\n menuBar.Append(menu1, '&File')\n menu1.Append(-1, \"&Open...\", 'Open new file')\n menuItem = menu1.Append(-1, \"&Exit...\", 'Exit System')\n # 菜单项绑定事件\n self.Bind(wx.EVT_MENU, self.OnCloseMe, menuItem)\n menu2 = wx.Menu()\n # 创建菜单项MenuItem\n menu2.Append(wx.NewId(), '&Copy', 'Copy in status bar')\n menu2.Append(wx.NewId(), '&Cut', '')\n menu2.Append(wx.NewId(), '&Paste','')\n menu2.AppendSeparator()\n menu2.Append(wx.NewId(), '&Options', 'Display Options')\n menuBar.Append(menu2, '&Edit') # 在菜单栏上附上菜单\n self.SetMenuBar(menuBar) # 在Frame上面附加菜单\n\n def OnCloseMe(self, event):\n self.Close(True)\n\n\ndef main():\n app = wx.App()\n frame = MenuToobarFrame(parent=None, id=-1)\n frame.Show()\n app.MainLoop()\n\n","sub_path":"gui/examples/menu_toolbar.py","file_name":"menu_toolbar.py","file_ext":"py","file_size_in_byte":1795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"508341707","text":"# Python3 Program to remove duplicates \n# from a sorted linked list \nimport math \n \n# Link list node \nclass Node: \n def __init__(self,data): \n self.data = data \n self.next = None\n \n# The function removes duplicates \n# from a sorted list \ndef removeDuplicates(head): \n \n # Pointer to store the pointer of a node \n # to be deleted to_free \n \n # do nothing if the list is empty \n if (head == None): \n return\n \n # Traverse the list till last node \n if (head.next != None): \n \n # Compare head node with next node \n if (head.data == head.next.data): \n \n # The sequence of steps is important. \n # to_free pointer stores the next of head \n # pointer which is to be deleted. \n to_free = head.next\n head.next = head.next.next\n \n # free(to_free) \n removeDuplicates(head) \n \n # This is tricky: only advance if no deletion \n else: \n removeDuplicates(head.next) \n \n return head \n \n# UTILITY FUNCTIONS \n# Function to insert a node at the \n# beginging of the linked list \ndef push(head_ref, new_data): \n \n # allocate node \n new_node = Node(new_data) \n \n # put in the data \n new_node.data = new_data \n \n # link the old list off the new node \n new_node.next = head_ref \n \n # move the head to point to the new node \n head_ref = new_node \n return head_ref \n \n# Function to print nodes in a given linked list \ndef printList(node): \n while (node != None): \n print(node.data, end = \" \") \n node = node.next\n \n# Driver code \nif __name__=='__main__': \n \n # Start with the empty list \n head = None\n \n # Let us create a sorted linked list \n # to test the functions \n # Created linked list will be 11.11.11.13.13.20 \n head = push(head, 20) \n head = push(head, 13) \n head = push(head, 13) \n head = push(head, 11) \n head = push(head, 11) \n head = push(head, 11) \n \n print(\"Linked list before duplicate removal \", \n end = \"\") \n printList(head) \n \n # Remove duplicates from linked list \n removeDuplicates(head) \n \n print(\"\\nLinked list after duplicate removal \", \n end = \"\") \n printList(head) \n ","sub_path":"removeduplicatelinklistrecursive.py","file_name":"removeduplicatelinklistrecursive.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"129720909","text":"\"\"\" Script to apply LAT correction and enforce mask from operational AMM15\n\n Original LSM is developed in AMM15_EMODNET_LSM_v2.py \n Coordinates come from amm15_coord.py\n\n Variables needed in nemo bathy file are:\n bathymetry (metres)\n lat/lon - unrotated (gphi/glam)\n zenv? (only pre-v3.6?)\nParameters\n----------\nOP_LSM : str\n The location of the operational LSM e.g. EMODNET_LSM_v2.nc (create by J graham from EMODNET to AMM15 plus fill in lakes etc)\nCS3X_CS20: str\n The location of the merged Surge data for LAT correction (valid only on AMM15 inner domain)\nBATHY_DATA: str\n THe location the GEBCO data on the extended AMM15 grid\nLAT_LON: str\n The location of the file containing the extended AMM15 domain lat lon grid\nOUT_FILE: str\n Where to write the final output file after processing.\n\nReturns\n-------\niris cube\n The bathymetry in cube format\n\"\"\"\n\nfrom netCDF4 import Dataset\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\n\nfrom pathlib import Path\nimport argparse, sys\n\nimport sys, platform\n\nfrom datetime import datetime\nimport subprocess\n\n\n\n##=== Parse arguments\nparser = argparse.ArgumentParser(description='Process inputs and output file paths')\nparser.add_argument('-o','--OP_LSM', metavar='Operational LSM file', nargs=1,\n help='File location of Operational AMM15 LSM', required=True)\nparser.add_argument('-c','--CS3X_CS20',metavar='CS3X_CS20', nargs=1,\n help='File Locationof CS3X_CS20 LAT', required=True)\nparser.add_argument( '-b','--BATHY_DATA',metavar='BATHY_DATA', nargs=1,\n help='Path to output to ', required=True)\nparser.add_argument( '-l','--LAT_LON',metavar='LAT_LON', nargs=1,\n help='Path to coordinates file with lat lon ', required=True)\nparser.add_argument( '-f','--OUT_FILE',metavar='OUT_FILE', nargs=1,\n help='Path output file ', required=True)\n\nargs = parser.parse_args()\n\nprint(\"\\n----------------------------------------------------\\n\")\n\nprint( \"Thanks, you have chosen: \\n \")\n\nfor arg in vars(args):\n print(\" {} as --{}\".format(getattr(args,arg)[0],arg) )\n print( arg)\n if arg != 'OUT_FILE':\n if Path(getattr(args,arg)[0]).is_file():\n print(\" and the file {} exists. \\n\".format (getattr(args,arg)[0]))\n else:\n sys.exit(\"However, {} does not exist so we exit here\".format(getattr(args,arg)[0]))\n#========================= end of arguments parsing\n\n\nop_lsm_file = Dataset((args.OP_LSM)[0],'r')\nop_lsm_bath = op_lsm_file.variables['Bathymetry'][:,:]\n\n# NB. nemo interprets any point <= 0 as land (sets mbathy = 0)\n# For now, convert any point <=0 to 1e-3, then sets nan values = 0.\n# NEMO will convert these to the min depth variable for now.\n# Will need to consider changing this method when W&D comes in...\n\ncs3xcs20 = Dataset((args.CS3X_CS20)[0],'r')\ncs3xcs20_bathy = cs3xcs20.variables['sea_floor_depth_below_geoid'][:,:]\n\ninput_dataset = Dataset((args.BATHY_DATA)[0],'r')\ninput_bathy = -input_dataset.variables['sea_floor_depth_below_geoid'][:,:]\ninflate_lat = 100\ninflate_lon = 100\n\n# Thus we effectively have a core part of the domain and an outer part\n\n\ninput_bathy_amm15core = input_bathy[inflate_lat:-inflate_lat,inflate_lon:-inflate_lon]\n\n\ninput_bathy_amm15core_m_cs3xcs20 = input_bathy_amm15core - cs3xcs20_bathy # we could probably make an expanded cs3x at least to the south, NW, NE not probably possible # NB. nemo interprets any point <= 0 as land (sets mbathy = 0)\n# For now, convert any point <=0 to 1e-3, then land values = 0.\n# Will need to consider changing this method when W&D comes in...\n\ninput_bathy_amm15core_m_cs3xcs20[ np.isnan( op_lsm_bath ) ] = np.nan # Enfoce LSM as OP on core part of domain\n\n\n# get coordinates of expanded domain\n\ncoord = Dataset((args.LAT_LON)[0],'r')\nlat = coord.variables['gphit'][:,:]\nlon = coord.variables['glamt'][:,:]\n\ny,x = np.shape(lat)\n\n\n# ====== write to file\n\nncfile = Dataset((args.OUT_FILE)[0],'w')\ny_dim = ncfile.createDimension('x', x) # Y\nx_dim = ncfile.createDimension('y', y) # X\nlatout = ncfile.createVariable('lat', 'f4', ('y','x',))\nlonout = ncfile.createVariable('lon', 'f4', ('y','x',))\nbathy = ncfile.createVariable('Bathymetry', 'f4', ('y','x',))\n\nlatout[:]=lat\nlonout[:]=lon\n\nBath_hook = np.copy(input_bathy_amm15core_m_cs3xcs20)\nBath_hook[514:516,1106:1109] = 0\n\n\n\ninput_bathy[100:-100,100:-100] = Bath_hook[:]\nbathy[:] = input_bathy\n\nncfile.description = 'Expanded AMM15 bathymetry: Original source GEBCO data converted from LAT to MSL [land mask=0]. Using CS3X and CS20'\n\nnow = datetime.now()\ncurrent_time = now.strftime(\"%Y/%m/%d %H:%M:%S\")\nrepos = subprocess.run(['git', 'config', '--get', 'remote.origin.url'],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\nrepos = repos.stdout.decode('utf-8').strip('\\n')\nbranch = subprocess.run(['git', 'rev-parse', '--abbrev-ref', 'HEAD'],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\nbranch = branch.stdout.decode('utf-8').strip('\\n')\n\n\nscript = parser.prog\n\nncfile.history = \"Created by {} from branch {} of {} on {} \".format(script,branch[:],repos[:],current_time)\nncfile.inputs = \"{}, {}, {}, {}\".format((args.OP_LSM)[0],(args.CS3X_CS20)[0],(args.BATHY_DATA)[0], (args.LAT_LON)[0] )\nncfile.pyversion = platform.python_version()\nncfile.System = platform.system()\nncfile.Release = platform.release()\nimport sys\nncfile.CommandLine = \" \".join(sys.argv) # str(sys.argv)\n\nlatout.units = 'degrees north'\nlonout.units = 'degrees east'\nbathy.units = 'meters'\n\nncfile.close()\n\n\n","sub_path":"scripts/PROCESS_GEBCO_EMOD_2020/GEBCO_PROCESS/Correct_LAT_apply_op_mask.py","file_name":"Correct_LAT_apply_op_mask.py","file_ext":"py","file_size_in_byte":5598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"350788333","text":"import ast\nfrom distutils.util import strtobool\n\nfrom rest_framework import viewsets\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.response import Response\nfrom rest_framework.throttling import AnonRateThrottle\nfrom rest_framework.views import APIView\nfrom yaml import load as load_yaml, Loader\n\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth.password_validation import validate_password\nfrom django.core.exceptions import ValidationError\nfrom django.core.validators import URLValidator\nfrom django.db import IntegrityError, transaction\nfrom django.db.models import Q, F, Sum, Prefetch\nfrom django.http import JsonResponse, HttpResponse\nfrom requests import get\n\nfrom .models import (Category, ConfirmEmailKey, Contact, Order, OrderItem,\n Parameter, Product, ProductInfo, ProductParameter,\n Shop, STATE_CHOICES, User)\nfrom .serializers import (CategoriesSerializer, ContactSerializer,\n OrderItemSerializer, OrdersSerializer,\n ProductSerializer, ProductInfoSerializer,\n ProductsSerializer,\n ShopSerializer, UserSerializer)\nfrom .tasks import send_auth_key_task, send_email_task\n\n\nclass CartException(Exception):\n \"\"\"Ошибка добавления товаров в коризну\n\n Атрибуты:\n product_info -- информация о продукте\n reason -- причина возникновения ошибки\n \"\"\"\n\n def __init__(self, product_info, reason):\n self.product_info = product_info\n self.reason = reason\n\n\nclass RegisterView(APIView):\n \"\"\"\n Регистрация аккаунта\n \"\"\"\n\n @staticmethod\n def post(request, *args, **kwargs):\n\n if not {'email', 'first_name', 'last_name',\n 'password', 'password_2'}.issubset(request.data):\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Не указаны все необходимые аргументы'}\n )\n elif request.data['password'] != request.data['password_2']:\n return JsonResponse({'Status': False,\n 'Errors': 'Переданные пароли не совпадают'})\n else:\n try:\n validate_password(request.data['password'])\n except ValidationError as exp:\n error_list = []\n for error in exp:\n error_list.append(error)\n return JsonResponse({'Status': False,\n 'Errors': {'Password_errors': error_list}})\n else:\n user_serializer = UserSerializer(data=request.data)\n try:\n user_serializer.is_valid(raise_exception=True)\n user = user_serializer.save()\n user.set_password(request.data['password'])\n user.save()\n send_auth_key_task.delay(user.id)\n return JsonResponse({'Status': True})\n except ValidationError:\n return JsonResponse({'Status': False,\n 'Errors': user_serializer.errors})\n\n\nclass ConfirmAccountView(APIView):\n \"\"\"\n Подтверждение почтового адреса\n \"\"\"\n\n @staticmethod\n def post(request, *args, **kwargs):\n confirmation_key = request.data.get('confirmation_key')\n email = request.data.get('email')\n try:\n key = ConfirmEmailKey.objects.select_related('user').get(\n user__email=email, key=confirmation_key)\n except ConfirmEmailKey.DoesNotExist:\n return JsonResponse({'Status': False,\n 'Errors': 'Неправильно указан ключ или email'})\n key.user.is_active = True\n key.user.save()\n key.delete()\n return JsonResponse({'Status': True})\n\n\nclass LoginView(APIView):\n \"\"\"\n Авторизация аккаунта\n \"\"\"\n\n @staticmethod\n def post(request, *args, **kwargs):\n\n if not {'email', 'password'}.issubset(request.data):\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Не указаны все необходимые аргументы'}\n )\n else:\n user = authenticate(request, username=request.data['email'],\n password=request.data['password'])\n if user is not None and user.is_active:\n token, _ = Token.objects.get_or_create(user=user)\n return JsonResponse({'Status': True, 'Token': token.key})\n else:\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Указанного пользователя не существует '\n 'или аккаунт заблокирован'}\n )\n\n\nclass UserView(APIView):\n \"\"\"\n Аккаунт пользователя\n \"\"\"\n\n @staticmethod\n def get(request, *args, **kwargs):\n \"\"\"\n Получение данных о пользователе\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для получения данных пользователя '\n 'необходима авторизация'},\n status=403)\n\n user_serializer = UserSerializer(request.user)\n\n return Response(user_serializer.data)\n\n @staticmethod\n def put(request, *args, **kwargs):\n \"\"\"\n Исправление данных о пользователе\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для получения данных пользователя '\n 'необходима авторизация'},\n status=403)\n\n if 'password' in request.data:\n try:\n validate_password(request.data['password'])\n except ValidationError as exp:\n error_list = []\n for error in exp:\n error_list.append(error)\n return JsonResponse({'Status': False,\n 'Errors': {'Password_errors': error_list}})\n else:\n request.user.set_password(request.data['password'])\n\n user_serializer = UserSerializer(request.user, data=request.data,\n partial=True)\n if user_serializer.is_valid():\n user_serializer.save()\n return JsonResponse({'Status': True})\n else:\n return JsonResponse({'Status': False,\n 'Errors': user_serializer.errors})\n\n\nclass PasswordResetView(APIView):\n \"\"\"\n Сброс пароля пользователем\n \"\"\"\n\n @staticmethod\n def post(request, *args, **kwargs):\n e_mail = request.data.get('email')\n try:\n user = User.objects.get(email=e_mail)\n except User.DoesNotExist:\n return JsonResponse({'Status': False,\n 'Errors': 'Неправильно указан email'})\n send_auth_key_task.delay(user.id)\n return JsonResponse({'Status': True})\n\n\nclass PasswordConfirmView(APIView):\n \"\"\"\n Подтверждение пароля пользователя\n \"\"\"\n\n @staticmethod\n def post(request, *args, **kwargs):\n e_mail = request.data.get('email')\n password = request.data.get('password')\n password_2 = request.data.get('password_2')\n confirmation_key = request.data.get('confirmation_key')\n try:\n key = ConfirmEmailKey.objects.select_related('user').get(\n user__email=e_mail, key=confirmation_key)\n except ConfirmEmailKey.DoesNotExist:\n return JsonResponse({'Status': False,\n 'Errors': 'Неправильно указан ключ или email'})\n if password != password_2:\n return JsonResponse({'Status': False,\n 'Errors': 'Переданные пароли не совпадают'})\n else:\n try:\n validate_password(password)\n except ValidationError as exp:\n error_list = []\n for error in exp:\n error_list.append(error)\n return JsonResponse({'Status': False,\n 'Errors': {'Password_errors': error_list}})\n else:\n key.user.set_password(password)\n key.user.save()\n key.delete()\n return JsonResponse({'Status': True})\n\n\nclass ShopsView(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Список магазинов\n \"\"\"\n queryset = Shop.objects.all()\n serializer_class = ShopSerializer\n\n\nclass ProductsView(APIView):\n \"\"\"\n Список всех товаров без описания и привязки к магазину\n \"\"\"\n throttle_classes = [AnonRateThrottle]\n\n @staticmethod\n def get(request, *args, **kwargs):\n shop = request.data.get('shop')\n category = request.data.get('category')\n\n filter = Q(shop__state=True)\n\n if shop:\n filter = filter & Q(shop_id=shop)\n\n if category:\n filter = filter & Q(product__category_id=category)\n\n products = ProductInfo.objects.filter(\n filter).select_related(\n 'shop', 'product__category').prefetch_related(\n 'product_parameters__parameter').distinct()\n\n products_serializer = ProductInfoSerializer(products, many=True)\n\n return Response(products_serializer.data)\n\n\nclass CategoriesView(viewsets.ReadOnlyModelViewSet):\n \"\"\"\n Список магазинов\n \"\"\"\n queryset = Category.objects.all()\n serializer_class = CategoriesSerializer\n\n\nclass ProductInfoView(APIView):\n \"\"\"\n Карточка товара с описанием и привязкой к магазинам\n \"\"\"\n throttle_classes = [AnonRateThrottle]\n\n @staticmethod\n def get(request, product_id=False, *args, **kwargs):\n if not product_id:\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Необходимо передать id товара в параметрах запроса'}\n )\n try:\n product = Product.objects.prefetch_related(Prefetch(\n 'product_infos',\n queryset=ProductInfo.objects.filter(shop__state=True)\n )).get(id=product_id)\n except Product.DoesNotExist:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Товара с указанным id не существует'\n })\n products_serializer = ProductSerializer(product)\n\n return Response(products_serializer.data)\n\n\nclass CartView(APIView):\n \"\"\"\n Корзина\n \"\"\"\n\n @staticmethod\n def get(request, *args, **kwargs):\n \"\"\"\n Содержимое корзины\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для получения данных корзины '\n 'в корзину необходима авторизация'},\n status=403)\n\n if request.user.type != 'buyer':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Корзина доступна только покупателям'\n }, status=403)\n\n cart, _ = Order.objects.get_or_create(\n user=request.user,\n state=STATE_CHOICES[0][0],\n )\n order_items = OrderItem.objects.filter(order=cart).select_related(\n 'product_info__product', 'product_info__shop').annotate(\n total_sum=Sum(\n F('quantity') * F(\n 'product_info__price'\n )\n )\n )\n order_items_serializer = OrderItemSerializer(order_items, many=True)\n\n return Response(order_items_serializer.data)\n\n @staticmethod\n def delete(request, *args, **kwargs):\n \"\"\"\n Удаление товаров из корзины\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для удаления товаров '\n 'из корзины необходима авторизация'},\n status=403)\n\n if request.user.type != 'buyer':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Удалять товары из коризны'\n ' возможно только покупателям'\n }, status=403)\n\n order_items = request.data.get('order_items')\n if not order_items:\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Не передан параметр с инофрмацией о товарах'\n 'в теле запроса'}\n )\n\n order_items = order_items.split(',')\n try:\n with transaction.atomic():\n for item in order_items:\n order_item = OrderItem.objects.get(id=item.strip())\n if order_item.order.user != request.user:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Нельзя вносить изменения '\n 'не в свою корзину'\n })\n else:\n order_item.delete()\n except OrderItem.DoesNotExist:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Информация о товаре отсутствует'\n })\n else:\n return JsonResponse({'Status': True})\n\n @staticmethod\n def put(request, *args, **kwargs):\n \"\"\"\n Изменение количества товаров в корзине\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для изменения товаров '\n 'в корзине необходима авторизация'},\n status=403)\n\n if request.user.type != 'buyer':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Изменять товары в коризне'\n ' возможно только покупателям'\n }, status=403)\n\n data = request.data.get('items')\n if not data:\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Не передан параметр с информацией о товарах'\n 'в теле запроса'}\n )\n try:\n data = ast.literal_eval(data)\n except SyntaxError:\n JsonResponse({'Status': False, 'Errors': 'Неверный формат запроса'})\n\n try:\n with transaction.atomic():\n for item in data:\n order_item = OrderItem.objects.get(id=item['id'])\n if order_item.product_info.quantity < item['quantity']:\n raise CartException(\n product_info=order_item.product_info,\n reason='quantity')\n elif order_item.order.user != request.user:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Нельзя вносить изменения не в свою корзину'\n })\n else:\n order_item.quantity = item['quantity']\n order_item.save()\n except CartException as exc:\n if exc.reason == 'quantity':\n return JsonResponse({\n 'Status': False,\n 'Error': 'У магазина {} недостаточное количество товара {}'\n ' для добавления в корзину.Всего в магазине {} '\n 'штук, доступных для добавления '\n 'в корзину'.format(exc.product_info.shop.name,\n exc.product_info.product.name,\n exc.product_info.quantity)\n })\n except OrderItem.DoesNotExist:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Информация о товаре отсутствует'\n })\n\n return JsonResponse({'Status': True})\n\n @staticmethod\n def post(request, *args, **kwargs):\n \"\"\"\n Добавление товаров в коризну\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для добавления товаров '\n 'в корзину необходима авторизация'},\n status=403)\n\n if request.user.type != 'buyer':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Добавлять товары в коризну'\n ' возможно только покупателям'\n }, status=403)\n\n data = request.data.get('items')\n if not data:\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Не передан параметр с инофрмацией о товарах'\n 'в теле запроса'}\n )\n cart, _ = Order.objects.get_or_create(\n user=request.user,\n state=STATE_CHOICES[0][0],\n )\n try:\n data = ast.literal_eval(data)\n except SyntaxError:\n JsonResponse({'Status': False, 'Errors': 'Неверный формат запроса'})\n\n try:\n with transaction.atomic():\n for item in data:\n product_info = ProductInfo.objects.get(\n id=item['product_info'])\n if product_info.quantity < item['quantity']:\n raise CartException(product_info=product_info,\n reason='quantity')\n elif not product_info.shop.state:\n raise CartException(product_info=product_info,\n reason='state')\n try:\n OrderItem.objects.create(\n order=cart,\n product_info=product_info,\n quantity=item['quantity']\n )\n except IntegrityError:\n return JsonResponse({\n 'Status': False,\n 'Error': 'В корзину уже добавлен товар с '\n 'информацией по id {}. Если хотите '\n 'изменить информацию по данному id '\n 'используйте метод PUT'.format(\n product_info.id\n )\n })\n except CartException as exc:\n if exc.reason == 'quantity':\n return JsonResponse({\n 'Status': False,\n 'Error': 'У магазина {} недостаточное количество товара {}'\n ' для добавления в корзину.Всего в магазине {} '\n 'штук, доступных для добавления '\n 'в корзину'.format(exc.product_info.shop.name,\n exc.product_info.product.name,\n exc.product_info.quantity)\n })\n elif exc.reason == 'state':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Магазин {} не принимает заказы '\n 'в данный момент '.format(\n exc.product_info.shop.name\n )\n })\n return JsonResponse({'Status': True})\n\n\nclass OrdersView(APIView):\n \"\"\"\n Заказы\n \"\"\"\n\n @staticmethod\n def get(request, *args, **kwargs):\n \"\"\"\n Получить все имеющиеся заказы\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для получения информации о заказах '\n 'необходима авторизация'},\n status=403)\n\n if request.user.type != 'buyer':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Заказы доступны только покупателям'\n }, status=403)\n orders = Order.objects.select_related('contact').prefetch_related(\n 'ordered_items'\n ).filter(user=request.user).exclude(state=STATE_CHOICES[0][0]).annotate(\n total_sum=Sum(\n F('ordered_items__quantity') * F(\n 'ordered_items__product_info__price'\n )\n )\n ).distinct()\n orders_serializer = OrdersSerializer(orders, many=True)\n\n return Response(orders_serializer.data)\n\n\nclass OrderView(APIView):\n \"\"\"\n Заказ\n \"\"\"\n\n @staticmethod\n def get(request, *args, **kwargs):\n \"\"\"\n Получить расифровку по заказу\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для получения информации о заказах '\n 'необходима авторизация'},\n status=403)\n\n if request.user.type != 'buyer':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Заказы доступны только покупателям'\n }, status=403)\n try:\n orders = Order.objects.select_related('contact').prefetch_related(\n 'ordered_items').annotate(\n total_sum=Sum(\n F('ordered_items__quantity') * F(\n 'ordered_items__product_info__price'\n )\n )\n ).get(\n id=request.data.get('id')\n )\n except Order.DoesNotExist:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Указанный id заказа не существует'\n })\n orders_serializer = OrdersSerializer(orders)\n\n return Response(orders_serializer.data)\n\n @staticmethod\n def post(request, *args, **kwargs):\n \"\"\"\n Создать заказ\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для создания заказа '\n 'необходима авторизация'},\n status=403)\n\n if request.user.type != 'buyer':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Создать заказ доступно только покупателям'\n }, status=403)\n\n if not {'id', 'contact_id'}.issubset(request.data):\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Не указаны все необходимые аргументы'}\n )\n\n try:\n order = Order.objects.get(\n id=request.data.get('id'),\n state=STATE_CHOICES[0][0],\n )\n contact = Contact.objects.get(id=request.data.get('contact_id'))\n\n except Contact.DoesNotExist:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Указанного id контакта не существует'\n })\n except Order.DoesNotExist:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Указанный id корзины не существует'\n })\n else:\n order.contact = contact\n order.state = ''.join(STATE_CHOICES[1][0])\n order.save()\n send_email_task.delay(instance_state=order.state,\n instance_id=order.id,\n instance_e_mail=order.user.email)\n return JsonResponse({'Status': True})\n\n\nclass ContactView(APIView):\n \"\"\"\n Контакты пользователя\n \"\"\"\n\n @staticmethod\n def get(request, *args, **kwargs):\n \"\"\"\n Получить имеющиеся контакты\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для получения контактов '\n 'необходима авторизация'},\n status=403)\n\n contact = Contact.objects.filter(user=request.user)\n contact_serializer = ContactSerializer(contact, many=True)\n\n return Response(contact_serializer.data)\n\n @staticmethod\n def delete(request, *args, **kwargs):\n \"\"\"\n Удалить контакты\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для удаления контактов '\n 'необходима авторизация'},\n status=403)\n\n contacts_id = request.data.get('contacts_id')\n if not contacts_id:\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Не передан параметр с информацией о контактах'\n 'в теле запроса'}\n )\n\n contacts_id = contacts_id.split(',')\n try:\n with transaction.atomic():\n for item in contacts_id:\n contact = Contact.objects.get(id=item.strip())\n if contact.user != request.user:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Нельзя удалять чужие контакты '\n })\n else:\n contact.delete()\n except OrderItem.DoesNotExist:\n return JsonResponse({\n 'Status': False,\n 'Error': 'id контакта не существует'\n })\n else:\n return JsonResponse({'Status': True})\n\n @staticmethod\n def post(request, *args, **kwargs):\n \"\"\"\n Создать контакт\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для создания контакта '\n 'необходима авторизация'},\n status=403)\n\n if not {'city', 'street', 'phone'}.issubset(request.data):\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Не указаны все необходимые аргументы'}\n )\n\n request.data._mutable = True\n request.data.update({'user': request.user.id})\n serializer = ContactSerializer(data=request.data)\n\n if serializer.is_valid():\n serializer.save()\n return JsonResponse({'Status': True})\n else:\n return JsonResponse({'Status': False, 'Errors': serializer.errors})\n\n @staticmethod\n def put(request, *args, **kwargs):\n \"\"\"\n Изменить контакт\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для изменения контакта '\n 'необходима авторизация'},\n status=403)\n\n if not {'city', 'street', 'phone', 'id'}.issubset(request.data):\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Не указаны все необходимые аргументы'}\n )\n try:\n contact = Contact.objects.get(id=request.data.get('id'))\n except Contact.DoesNotExist:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Указанный id контакта не существует'\n })\n else:\n if contact.user != request.user:\n return JsonResponse({\n 'Status': False,\n 'Error': 'Нельзя вносить изменения '\n 'не в свои контакты'\n })\n serializer = ContactSerializer(contact, data=request.data,\n partial=True)\n if serializer.is_valid():\n serializer.save()\n return JsonResponse({'Status': True})\n else:\n return JsonResponse({'Status': False,\n 'Errors': serializer.errors})\n\n\nclass LoadInfo(APIView):\n \"\"\"\n Обновление информации о товарах от поставщика\n \"\"\"\n\n @staticmethod\n def post(request, *args, **kwargs):\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для обновления товаров '\n 'необходима авторизация'},\n status=403)\n if request.user.type != 'shop':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Обновлять информацию о товарах '\n 'возможно только магазинам'\n }, status=403)\n\n url = request.data.get('url')\n if url:\n validate_url = URLValidator()\n try:\n validate_url(url)\n except ValidationError as e:\n return JsonResponse({'Status': False, 'Error': str(e)})\n else:\n stream = get(url).content\n data = load_yaml(stream, Loader=Loader)\n shop, _ = Shop.objects.get_or_create(name=data['shop'],\n user_id=request.user.id)\n for category in data['categories']:\n category_object, _ = Category.objects.get_or_create(\n id=category['id'], name=category['name']\n )\n category_object.shops.add(shop.id)\n category_object.save()\n\n for item in data['goods']:\n product, _ = Product.objects.get_or_create(\n name=item['name'],\n category_id=item['category'])\n\n product_info, _ = ProductInfo.objects.get_or_create(\n product_id=product.id,\n external_id=item['id'],\n model=item['model'],\n price=item['price'],\n price_rrc=item['price_rrc'],\n quantity=item['quantity'],\n shop_id=shop.id\n )\n for name, value in item['parameters'].items():\n parameter, _ = Parameter.objects.get_or_create(\n name=name\n )\n ProductParameter.objects.create(\n product_info_id=product_info.id,\n parameter_id=parameter.id,\n value=value\n )\n\n return JsonResponse({'Status': True})\n\n return JsonResponse(\n {'Status': False, 'Errors': 'Не указаны все необходимые аргументы'}\n )\n\n\nclass StateChange(APIView):\n \"\"\"\n Статус получения заказов магазина\n \"\"\"\n\n @staticmethod\n def get(request, *args, **kwargs):\n \"\"\"\n Получить статус заказов магазина\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для получения статуса '\n 'необходима авторизация'},\n status=403)\n if request.user.type != 'shop':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Получать инфомрацию о статусе '\n 'возможно только магазинам'\n }, status=403)\n\n shop = Shop.objects.get(user=request.user)\n\n shops_serializer = ShopSerializer(shop)\n\n return Response(shops_serializer.data)\n\n @staticmethod\n def put(request, *args, **kwargs):\n \"\"\"\n Изменить статус получения заказов магазина\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для обновления статуса '\n 'необходима авторизация'},\n status=403)\n if request.user.type != 'shop':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Обновлять статус '\n 'возможно только магазинам'\n }, status=403)\n\n state = request.data.get('state')\n shop = Shop.objects.get(user=request.user)\n if not state:\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Не указаны все необходимые аргументы'}\n )\n try:\n shop.state = strtobool(state)\n except ValueError:\n return JsonResponse(\n {'Status': False,\n 'Errors': 'Переданный параметр статуса некорректен'}\n )\n else:\n shop.save()\n return JsonResponse({'Status': True})\n\n\nclass ShopOrders(APIView):\n \"\"\"\n Заказы магазина\n \"\"\"\n\n @staticmethod\n def get(request, *args, **kwargs):\n \"\"\"\n Получить заказы магазина\n \"\"\"\n if not request.user.is_authenticated:\n return JsonResponse({'Status': False,\n 'Error': 'Для получения статуса '\n 'необходима авторизация'},\n status=403)\n if request.user.type != 'shop':\n return JsonResponse({\n 'Status': False,\n 'Error': 'Получать инфомрацию о статусе '\n 'возможно только магазинам'\n }, status=403)\n\n orders = Order.objects.select_related('contact').prefetch_related(\n 'ordered_items').exclude(\n state=STATE_CHOICES[0][0]\n ).filter(\n ordered_items__product_info__shop__user=request.user\n ).annotate(\n total_sum=Sum(\n F('ordered_items__quantity') * F(\n 'ordered_items__product_info__price'\n )\n )\n ).distinct()\n orders_serializer = OrdersSerializer(orders, many=True)\n\n return Response(orders_serializer.data)\n\n\ndef empty_view(request, *args, **kwargs):\n return HttpResponse()\n","sub_path":"netology_pd/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":38317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"249782093","text":"import copy\nimport requests\n\nfrom cliquet.events import ResourceChanged\nfrom kinto import authorization\n\n\ndef includeme(config):\n def on_resource_changed(event):\n \"\"\"On any event on the collection, see if there are registered WebPush\n URLs and notify them if there is.\n \"\"\"\n resource_name = event.payload['resource_name']\n action = event.payload['action']\n\n collection_update = (resource_name == 'collection' and\n action != \"create\")\n if collection_update or resource_name == \"record\":\n bucket_id = event.payload['bucket_id']\n collection_id = event.payload['collection_id']\n parent_id = \"/buckets/%s/collections/%s\" % (bucket_id, collection_id)\n\n # XXX Need to find a way to expire the URLs after some point,\n # otherwise they will just pile up and being pinged even if\n # useless.\n recipients, _ = event.request.registry.storage.get_all(\n collection_id=\"webpush\", parent_id=parent_id)\n for recipient in recipients:\n requests.post(recipient['url'])\n\n config.add_subscriber(on_resource_changed, ResourceChanged)\n config.scan('kinto_webpush.views')\n\n","sub_path":"kinto_webpush/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"193762830","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\n\nimport numpy as np\nimport sklearn\nimport pickle\nfrom keras.models import Sequential\nfrom keras.layers import Flatten, Dense, Lambda, Activation, Dropout, Cropping2D\nfrom keras.layers.convolutional import Convolution2D\nfrom keras.layers.pooling import MaxPooling2D\nfrom sklearn.utils import shuffle\nimport matplotlib.pyplot as plt\n#import cv2\n\n# There is 1 output class\nnb_classes = 1 \n\n\npickle_train = 'train.pickle'\npickle_validation = 'validation.pickle'\n\nbatch_size = 32\n\n\n\n# load the training dataset from the train pickle \nwith open(pickle_train, 'rb') as f:\n #train, test1, _ = ds.getDigitStruct()\n save = pickle.load(f)\n train_dataset = save['train_dataset']\n train_labels = save['train_labels']\n #delete save to free up memory\n del save\n print('Training set: ', np.array(train_dataset).shape, np.array(train_labels).shape)\n \n \n \n# load the validation dataset from the test pickle \nwith open(pickle_validation, 'rb') as f:\n save = pickle.load(f)\n valid_dataset = save['valid_dataset']\n valid_labels = save['valid_labels']\n del save\n print('Validation set: ', np.array(valid_dataset).shape, np.array(valid_labels).shape)\n\n\ndef generator(images, angles, batch_size=32):#2):\n num_samples = len(images)\n while True: # Loop forever so the generator never terminates\n images, angles = shuffle(images, angles)\n for offset in range(0, num_samples, batch_size):\n generated_images = images[offset:offset+batch_size]\n generated_angles = angles[offset:offset+batch_size]\n # trim image to only see section with road\n #X_train = np.array(images)\n #y_train = np.array(angles)\n X_train = np.array(generated_images)\n y_train = np.array(generated_angles)\n yield sklearn.utils.shuffle(X_train, y_train)\n\n# compile and train the model using the generator function\ntrain_generator = generator(train_dataset, train_labels, batch_size=32)\nvalidation_generator = generator(valid_dataset, valid_labels, batch_size=32)\n\nch, row, col = 3, 80, 320 # Trimmed image format\n\nshape = (ch, row, col)\n\n# number of convolutional filters to use\nnb_filters = [16, 8, 4, 2]\n\t\n\n# size of pooling area for max pooling\npool_size = 2\n\n# convolution kernel size\nkernel_size = 3\n\nmodel = Sequential()\n# Preprocess incoming data, centered around zero with small standard deviation \nmodel.add(Cropping2D(cropping=((60,20), (0,0)), input_shape=(160,320,3)))\nmodel.add(Lambda(lambda x: x/127.5 - 1.))\n\n# Starting with the convolutional layer\nmodel.add(Convolution2D(nb_filters[0], kernel_size, kernel_size))\n# ReLU Activation\nmodel.add(Activation('elu'))\n# The second conv layer will convert 16 channels into 8 channels\nmodel.add(Convolution2D(nb_filters[1], kernel_size, kernel_size))\n# ReLU Activation\nmodel.add(Activation('elu'))\n# The second conv layer will convert 8 channels into 4 channels\nmodel.add(Convolution2D(nb_filters[2], kernel_size, kernel_size))\n# ReLU Activation\nmodel.add(Activation('elu'))\n# The second conv layer will convert 4 channels into 2 channels\nmodel.add(Convolution2D(nb_filters[3], kernel_size, kernel_size))\n# ReLU Activation\nmodel.add(Activation('elu'))\n# Apply Max Pooling for each 2 x 2 pixels\nmodel.add(MaxPooling2D(pool_size=(pool_size, pool_size), strides=None, border_mode='valid', dim_ordering='default'))# MaxPooling2D(pool_size=pool_size))\n# Dropout with keep probability 0.5\nmodel.add(Dropout(0.5))\n\n# Flatten the matrix. The input has size of 360\nmodel.add(Flatten())\n# Input 360 Output 16\nmodel.add(Dense(512))\n# ReLU Activation\nmodel.add(Activation('elu'))\n# Dropout with keep probability 0.5\nmodel.add(Dropout(0.5))\n# Input 16 Output 16\nmodel.add(Dense(64))\n# ReLU Activation\nmodel.add(Activation('elu'))\n# Input 16 Output 16\nmodel.add(Dense(16))\n# ReLU Activation\nmodel.add(Activation('elu'))\n# Dropout with keep probability 0.5\nmodel.add(Dropout(0.5))\n# Input 16 Output 1\nmodel.add(Dense(nb_classes))\n\n## Print out summary of the model\nmodel.summary()\n\n\nmodel.compile(loss='mse', optimizer='adam')\nhistory_object = model.fit_generator(train_generator, samples_per_epoch= len(train_dataset), validation_data=validation_generator, nb_val_samples=len(valid_dataset), nb_epoch=6, verbose=1)\n\n### print the keys contained in the history object\nprint(history_object.history.keys())\n\n### plot the training and validation loss for each epoch\nplt.plot(history_object.history['loss'])\nplt.plot(history_object.history['val_loss'])\nplt.title('model mean squared error loss')\nplt.ylabel('mean squared error loss')\nplt.xlabel('epoch')\nplt.legend(['training set', 'validation set'], loc='upper right')\nplt.show()\n\n#from keras.utils.visualize_util import plot\n\n#plot(model, to_file='model.png', show_shapes=True)\n\n#img = cv2.imread('model.png')\n\n# original image\n#plt.subplots(figsize=(5,10))\n#plt.subplot(111)\n#plt.axis('off')\n#plt.imshow(img)\n\nmodel.save('./model.h5')\nprint(\"Model Saved\")\n \n \n \n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"342612901","text":"\"\"\"\nImplementation of REST API for latch creation\n\"\"\"\nfrom flask import Blueprint, request\nfrom .utils import typename_to_type\nfrom .contexts import contexts\n\nlr = Blueprint('latches', __name__)\n\n@lr.route('', methods=['GET'])\ndef list_latches():\n \"\"\"\n Lists the available latches\n \"\"\"\n context = request.args.get('context')\n if context is None:\n return {'result': 'error'}, 400\n ctx = contexts[context]['context']\n return {'latches': [key for key, _ in ctx.latches.items()]}, 200\n\n@lr.route('/create', methods=['POST'])\ndef create_latch():\n \"\"\"\n Creates a new latch\n \"\"\"\n context = request.get_json()['context']\n typ = request.get_json()['type']\n if context is None or typ is None:\n return {'result': 'error'}, 400\n ctx = contexts[context]['context']\n name = '__l{}'.format(len(ctx.latches.items()))\n ctx.mk_latch(name, typename_to_type(ctx, typ))\n return {'result': name}, 201\n\n@lr.route('/initnext', methods=['PUT'])\ndef set_latch_init_next():\n \"\"\"\n Sets the initial and next value of a latch\n \"\"\"\n context = request.get_json()['context']\n latch = request.get_json()['latch']\n init = request.get_json()['init']\n nex = request.get_json()['next']\n if context is None or latch is None or init is None or nex is None:\n return {'result': 'error'}, 400\n ctx = contexts[context]['context']\n latch_net = ctx.nets[latch]\n init_net = ctx.nets[init]\n next_net = ctx.nets[nex]\n ctx.set_latch_init_next(latch_net, init_net, next_net)\n return {'result': 'ok'}, 200\n","sub_path":"app/latches.py","file_name":"latches.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"311008662","text":"# -*- coding: utf-8 -*-\nfrom schematics.types import StringType\nfrom schematics.types.compound import ModelType\nfrom schematics.exceptions import ValidationError\nfrom schematics.transforms import whitelist\nfrom schematics.types.serializable import serializable\nfrom zope.interface import implementer\nfrom openprocurement.api.models import (\n Model, ListType\n)\nfrom openprocurement.api.models import TZ, get_now, SANDBOX_MODE, Value\nfrom openprocurement.auctions.core.models import IAuction\nfrom openprocurement.auctions.dgf.models import (\n DGFFinancialAssets as BaseAuction,\n get_auction, Bid as BaseBid,\n Organization\n)\n\nfrom openprocurement.auctions.insider.utils import generate_participation_url\n\n\nclass Bid(BaseBid):\n tenderers = ListType(ModelType(Organization), required=True, min_size=1, max_size=1)\n\n class Options:\n roles = {\n 'create': whitelist('tenderers', 'parameters', 'lotValues', 'status', 'qualified', 'eligible'),\n }\n\n def validate_value(self, data, value):\n if isinstance(data['__parent__'], Model):\n auction = data['__parent__']\n if not value:\n return\n if auction.value.amount > value.amount:\n raise ValidationError(u\"value of bid should be greater than value of auction\")\n if auction.get('value').currency != value.currency:\n raise ValidationError(u\"currency of bid should be identical to currency of value of auction\")\n if auction.get('value').valueAddedTaxIncluded != value.valueAddedTaxIncluded:\n raise ValidationError(u\"valueAddedTaxIncluded of bid should be identical to valueAddedTaxIncluded of value of auction\")\n\n @serializable(serialized_name=\"participationUrl\", serialize_when_none=False)\n def participation_url(self):\n if not self.participationUrl and self.status != \"draft\":\n request = get_auction(self).__parent__.request\n url = generate_participation_url(request, self.id)\n return url\n\n\n@implementer(IAuction)\nclass Auction(BaseAuction):\n \"\"\"Data regarding auction process - publicly inviting prospective contractors to submit bids for evaluation and selecting a winner or winners.\"\"\"\n procurementMethodType = StringType(default=\"dgfInsider\")\n bids = ListType(ModelType(Bid), default=list()) # A list of all the companies who entered submissions for the auction.\n minimalStep = ModelType(Value)\n\n @serializable(serialized_name=\"minimalStep\", type=ModelType(Value))\n def auction_minimalStep(self):\n return Value(dict(amount=0))\n\nDGFInsider = Auction\n\n","sub_path":"openprocurement/auctions/insider/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"395284506","text":"# https://www.hackerrank.com/challenges/merge-the-tools\r\n\r\n\r\ndef merge_the_tools(string, k):\r\n for part in zip(*[iter(string)] * k):\r\n d = dict()\r\n print(''.join([d.setdefault(c, c) for c in part if c not in d]))\r\n\r\n\r\nif __name__ == '__main__':\r\n string, k = input(), int(input())\r\n merge_the_tools(string, k)\r\n\r\n\r\n# If we want to add a single element to an existing set, we can use the .add() operation.\r\n# It adds the element to the set and returns 'None'.\r\n# Task\r\n#\r\n# Apply your knowledge of the .add() operation to help your friend Rupal.\r\n#\r\n# Rupal has a huge collection of country stamps.\r\n# She decided to count the total number of distinct country stamps in her collection.\r\n# She asked for your help. You pick the stamps one by one from a stack of country stamps.\r\n#\r\n# Find the total number of distinct country stamps.\r\n#\r\n# Input Format\r\n#\r\n# The first line contains an integer , the total number of country stamps.\r\n# The next lines contains the name of the country where the stamp is from.\r\n\r\nnum = input('Number: ')\r\nmark = set()\r\nfor i in range(int(num)):\r\n mark.add(input('Country: '))\r\nprint('Total: {} countries'.format(len(mark)))","sub_path":"merge_the_tools.py","file_name":"merge_the_tools.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"119120399","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/benchexec/containerexecutor.py\n# Compiled at: 2020-05-07 05:52:35\nfrom __future__ import absolute_import, division, print_function, unicode_literals\nimport argparse, errno, logging, os, collections, shutil\ntry:\n import cPickle as pickle\nexcept ImportError:\n import pickle\n\nimport signal, subprocess, sys, tempfile\nfrom benchexec import __version__\nfrom benchexec import baseexecutor\nfrom benchexec import BenchExecException\nfrom benchexec.cgroups import Cgroup\nfrom benchexec import container\nfrom benchexec import libc\nfrom benchexec import util\nfrom benchexec.container import DIR_MODES, DIR_HIDDEN, DIR_READ_ONLY, DIR_OVERLAY, DIR_FULL_ACCESS, NATIVE_CLONE_CALLBACK_SUPPORTED\nsys.dont_write_bytecode = True\n_HAS_SIGWAIT = hasattr(signal, b'sigwait')\n\ndef add_basic_container_args(argument_parser):\n argument_parser.add_argument(b'--network-access', action=b'store_true', help=b'allow process to use network communication')\n argument_parser.add_argument(b'--no-tmpfs', dest=b'tmpfs', action=b'store_false', help=b'Store temporary files (e.t., tool output files) on the actual file system instead of a tmpfs (\"RAM disk\") that is included in the memory limit')\n argument_parser.add_argument(b'--keep-system-config', dest=b'container_system_config', action=b'store_false', help=b'do not use a special minimal configuration for local user and host lookups inside the container')\n argument_parser.add_argument(b'--keep-tmp', action=b'store_true', help=b\"do not use a private /tmp for process (same as '--full-access-dir /tmp')\")\n argument_parser.add_argument(b'--hidden-dir', metavar=b'DIR', action=b'append', default=[], help=b\"hide this directory by mounting an empty directory over it (default for '/tmp' and '/run')\")\n argument_parser.add_argument(b'--read-only-dir', metavar=b'DIR', action=b'append', default=[], help=b'make this directory visible read-only in the container')\n argument_parser.add_argument(b'--overlay-dir', metavar=b'DIR', action=b'append', default=[], help=b\"mount an overlay filesystem over this directory that redirects all write accesses to temporary files (default for '/')\")\n argument_parser.add_argument(b'--full-access-dir', metavar=b'DIR', action=b'append', default=[], help=b'give full access (read/write) to this host directory to processes inside container')\n\n\ndef handle_basic_container_args(options, parser=None):\n \"\"\"Handle the options specified by add_basic_container_args().\n @return: a dict that can be used as kwargs for the ContainerExecutor constructor\n \"\"\"\n dir_modes = {}\n error_fn = parser.error if parser else sys.exit\n\n def handle_dir_mode(path, mode):\n path = os.path.abspath(path)\n if not os.path.isdir(path):\n error_fn((b\"Cannot specify directory mode for '{}' because it does not exist or is no directory.\").format(path))\n if path in dir_modes:\n error_fn((b\"Cannot specify multiple directory modes for '{}'.\").format(path))\n dir_modes[path] = mode\n\n for path in options.hidden_dir:\n handle_dir_mode(path, DIR_HIDDEN)\n\n for path in options.read_only_dir:\n handle_dir_mode(path, DIR_READ_ONLY)\n\n for path in options.overlay_dir:\n handle_dir_mode(path, DIR_OVERLAY)\n\n for path in options.full_access_dir:\n handle_dir_mode(path, DIR_FULL_ACCESS)\n\n if options.keep_tmp:\n if b'/tmp' in dir_modes and not dir_modes[b'/tmp'] == DIR_FULL_ACCESS:\n error_fn(b'Cannot specify both --keep-tmp and --hidden-dir /tmp.')\n dir_modes[b'/tmp'] = DIR_FULL_ACCESS\n elif b'/tmp' not in dir_modes:\n dir_modes[b'/tmp'] = DIR_HIDDEN\n if b'/' not in dir_modes:\n dir_modes[b'/'] = DIR_OVERLAY\n if b'/run' not in dir_modes:\n dir_modes[b'/run'] = DIR_HIDDEN\n if options.container_system_config:\n if options.network_access:\n logging.warning(b'The container configuration disables DNS, host lookups will fail despite --network-access. Consider using --keep-system-config.')\n else:\n if b'/run/resolvconf' not in dir_modes and os.path.isdir(b'/run/resolvconf'):\n dir_modes[b'/run/resolvconf'] = DIR_READ_ONLY\n if b'/run/systemd/resolve' not in dir_modes and os.path.isdir(b'/run/systemd/resolve'):\n dir_modes[b'/run/systemd/resolve'] = DIR_READ_ONLY\n return {b'network_access': options.network_access, b'container_tmpfs': options.tmpfs, \n b'container_system_config': options.container_system_config, \n b'dir_modes': dir_modes}\n\n\ndef add_container_output_args(argument_parser):\n \"\"\"Define command-line arguments for output of a container (result files).\n @param argument_parser: an argparse parser instance\n \"\"\"\n argument_parser.add_argument(b'--output-directory', metavar=b'DIR', default=b'output.files', help=b\"target directory for result files (default: './output.files')\")\n argument_parser.add_argument(b'--result-files', metavar=b'PATTERN', action=b'append', default=[], help=b\"pattern for specifying which result files should be copied to the output directory (default: '.')\")\n\n\ndef handle_container_output_args(options, parser):\n \"\"\"Handle the options specified by add_container_output_args().\n @return: a dict that can be used as kwargs for the ContainerExecutor.execute_run()\n \"\"\"\n if options.result_files:\n result_files_patterns = [ os.path.normpath(p) for p in options.result_files if p ]\n for pattern in result_files_patterns:\n if pattern.startswith(b'..'):\n parser.error((b\"Invalid relative result-files pattern '{}'.\").format(pattern))\n\n else:\n result_files_patterns = [\n b'.']\n output_dir = options.output_directory\n if os.path.exists(output_dir) and not os.path.isdir(output_dir):\n parser.error((b\"Output directory '{}' must not refer to an existing file.\").format(output_dir))\n return {b'output_dir': output_dir, b'result_files_patterns': result_files_patterns}\n\n\ndef main(argv=None):\n \"\"\"\n A simple command-line interface for the containerexecutor module of BenchExec.\n \"\"\"\n if argv is None:\n argv = sys.argv\n parser = argparse.ArgumentParser(fromfile_prefix_chars=b'@', description=b\"Execute a command inside a simple container, i.e., partially\\n isolated from the host. Command-line parameters can additionally be read\\n from a file if file name prefixed with '@' is given as argument.\\n Part of BenchExec: https://github.com/sosy-lab/benchexec/\")\n parser.add_argument(b'--dir', metavar=b'DIR', help=b'working directory for executing the command (default is current directory)')\n parser.add_argument(b'--root', action=b'store_true', help=b'Use UID 0 and GID 0 (i.e., fake root account) within container. This is mostly safe, but processes can use this to circumvent some file system restrictions of the container and access otherwise hidden directories.')\n parser.add_argument(b'--uid', metavar=b'UID', type=int, default=None, help=b'use given UID within container (default: current UID)')\n parser.add_argument(b'--gid', metavar=b'GID', type=int, default=None, help=b'use given GID within container (default: current UID)')\n add_basic_container_args(parser)\n add_container_output_args(parser)\n baseexecutor.add_basic_executor_options(parser)\n options = parser.parse_args(argv[1:])\n baseexecutor.handle_basic_executor_options(options, parser)\n logging.debug(b'This is containerexec %s.', __version__)\n container_options = handle_basic_container_args(options, parser)\n container_output_options = handle_container_output_args(options, parser)\n if options.root:\n if options.uid is not None or options.gid is not None:\n parser.error(b'Cannot combine option --root with --uid/--gid')\n options.uid = 0\n options.gid = 0\n formatted_args = (b' ').join(map(util.escape_string_shell, options.args))\n logging.info(b'Starting command %s', formatted_args)\n executor = ContainerExecutor(uid=options.uid, gid=options.gid, **container_options)\n\n def signal_handler_kill(signum, frame):\n executor.stop()\n\n signal.signal(signal.SIGTERM, signal_handler_kill)\n signal.signal(signal.SIGQUIT, signal_handler_kill)\n signal.signal(signal.SIGINT, signal_handler_kill)\n try:\n result = executor.execute_run(options.args, workingDir=options.dir, **container_output_options)\n except (BenchExecException, OSError) as e:\n if options.debug:\n logging.exception(e)\n sys.exit((b'Cannot execute {0}: {1}.').format(util.escape_string_shell(options.args[0]), e))\n\n return result.signal or result.value\n\n\nclass ContainerExecutor(baseexecutor.BaseExecutor):\n \"\"\"Extended executor that allows to start the processes inside containers\n using Linux namespaces.\"\"\"\n\n def __init__(self, use_namespaces=True, uid=None, gid=None, network_access=False, dir_modes={b'/': DIR_OVERLAY, b'/run': DIR_HIDDEN, b'/tmp': DIR_HIDDEN}, container_system_config=True, container_tmpfs=True, *args, **kwargs):\n \"\"\"Create instance.\n @param use_namespaces: If False, disable all container features of this class\n and ignore all other parameters.\n @param uid: Which UID to use inside container.\n @param gid: Which GID to use inside container.\n @param network_access:\n Whether to allow processes in the contain to access the network.\n @param dir_modes: Dict that specifies which directories should be accessible\n and how in the container.\n @param container_system_config: Whether to use a special system configuration in\n the container that disables all remote host and user lookups, sets a custom\n hostname, etc.\n \"\"\"\n super(ContainerExecutor, self).__init__(*args, **kwargs)\n self._use_namespaces = use_namespaces\n if not use_namespaces:\n return\n else:\n self._container_tmpfs = container_tmpfs\n self._container_system_config = container_system_config\n self._uid = uid if uid is not None else container.CONTAINER_UID if container_system_config else os.getuid()\n self._gid = gid if gid is not None else container.CONTAINER_GID if container_system_config else os.getgid()\n self._allow_network = network_access\n self._env_override = {}\n if container_system_config:\n self._env_override[b'HOME'] = container.CONTAINER_HOME\n if container.CONTAINER_HOME not in dir_modes:\n dir_modes[container.CONTAINER_HOME] = DIR_HIDDEN\n if b'/' not in dir_modes:\n raise ValueError(b\"Need directory mode for '/'.\")\n for path, kind in dir_modes.items():\n if kind not in DIR_MODES:\n raise ValueError((b\"Invalid value '{}' for directory '{}'.\").format(kind, path))\n if not os.path.isabs(path):\n raise ValueError((b\"Invalid non-absolute directory '{}'.\").format(path))\n if path == b'/proc':\n raise ValueError(b'Cannot specify directory mode for /proc.')\n\n sorted_special_dirs = sorted(((path.encode(), kind) for path, kind in dir_modes.items()), key=lambda tupl: len(tupl[0]))\n self._dir_modes = collections.OrderedDict(sorted_special_dirs)\n\n def is_accessible(path):\n mode = container.determine_directory_mode(self._dir_modes, path)\n return os.access(path, os.R_OK) and mode not in [None, container.DIR_HIDDEN]\n\n if not is_accessible(container.LXCFS_PROC_DIR):\n logging.info(b'LXCFS is not available, some host information like the uptime leaks into the container.')\n if not NATIVE_CLONE_CALLBACK_SUPPORTED:\n logging.debug(b'Using a non-robust fallback for clone callback. If you have many threads please read https://github.com/sosy-lab/benchexec/issues/435')\n return\n\n def _get_result_files_base(self, temp_dir):\n \"\"\"Given the temp directory that is created for each run, return the path to the\n directory where files created by the tool are stored.\"\"\"\n if not self._use_namespaces:\n return super(ContainerExecutor, self)._get_result_files_base(temp_dir)\n else:\n return os.path.join(temp_dir, b'temp')\n\n def execute_run(self, args, workingDir=None, output_dir=None, result_files_patterns=[], rootDir=None, environ=os.environ.copy()):\n \"\"\"\n This method executes the command line and waits for the termination of it,\n handling all setup and cleanup.\n\n Note that this method does not expect to be interrupted by KeyboardInterrupt\n and does not guarantee proper cleanup if KeyboardInterrupt is raised!\n If this method runs on the main thread of your program,\n make sure to set a signal handler for signal.SIGINT that calls stop() instead.\n\n @param args: the command line to run\n @param rootDir: None or a root directory that contains all relevant files\n for starting a new process\n @param workingDir:\n None or a directory which the execution should use as working directory\n @param output_dir: the directory where to write result files\n (required if result_files_pattern)\n @param result_files_patterns:\n a list of patterns of files to retrieve as result files\n \"\"\"\n temp_dir = None\n if rootDir is None:\n temp_dir = tempfile.mkdtemp(prefix=b'BenchExec_run_')\n pid = None\n returnvalue = 0\n logging.debug(b'Starting process.')\n try:\n pid, result_fn = self._start_execution(args=args, stdin=None, stdout=None, stderr=None, env=environ, root_dir=rootDir, cwd=workingDir, temp_dir=temp_dir, cgroups=Cgroup({}), output_dir=output_dir, result_files_patterns=result_files_patterns, child_setup_fn=util.dummy_fn, parent_setup_fn=util.dummy_fn, parent_cleanup_fn=util.dummy_fn)\n with self.SUB_PROCESS_PIDS_LOCK:\n self.SUB_PROCESS_PIDS.add(pid)\n returnvalue, unused_ru_child, unused = result_fn()\n finally:\n logging.debug(b'Process terminated, exit code %s.', returnvalue)\n with self.SUB_PROCESS_PIDS_LOCK:\n self.SUB_PROCESS_PIDS.discard(pid)\n if temp_dir is not None:\n logging.debug(b'Cleaning up temporary directory.')\n util.rmtree(temp_dir, onerror=util.log_rmtree_error)\n\n return util.ProcessExitCode.from_raw(returnvalue)\n\n def _start_execution(self, root_dir=None, output_dir=None, result_files_patterns=[], memlimit=None, memory_nodes=None, *args, **kwargs):\n if not self._use_namespaces:\n return super(ContainerExecutor, self)._start_execution(*args, **kwargs)\n else:\n if result_files_patterns:\n if not output_dir:\n raise ValueError(b'Output directory needed for retaining result files.')\n for pattern in result_files_patterns:\n if not pattern:\n raise ValueError((b'Invalid empty result-files pattern in {}').format(result_files_patterns))\n pattern = os.path.normpath(pattern)\n if pattern.startswith(b'..'):\n raise ValueError((b\"Invalid relative result-files pattern '{}'.\").format(pattern))\n\n return self._start_execution_in_container(root_dir=root_dir, output_dir=output_dir, memlimit=memlimit, memory_nodes=memory_nodes, result_files_patterns=result_files_patterns, *args, **kwargs)\n\n def _start_execution_in_container(self, args, stdin, stdout, stderr, env, root_dir, cwd, temp_dir, memlimit, memory_nodes, cgroups, output_dir, result_files_patterns, parent_setup_fn, child_setup_fn, parent_cleanup_fn):\n \"\"\"Execute the given command and measure its resource usage similarly to\n super()._start_execution(), but inside a container implemented using Linux\n namespaces. The command has no network access (only loopback),\n a fresh directory as /tmp and no write access outside of this,\n and it does not see other processes except itself.\n \"\"\"\n assert self._use_namespaces\n if root_dir is None:\n env.update(self._env_override)\n CHILD_OSERROR = 128\n CHILD_UNKNOWN_ERROR = 129\n from_parent, to_grandchild = os.pipe()\n from_grandchild, to_parent = os.pipe()\n MARKER_USER_MAPPING_COMPLETED = b'A'\n MARKER_PARENT_COMPLETED = b'B'\n MARKER_PARENT_POST_RUN_COMPLETED = b'C'\n if root_dir is None:\n cwd = os.path.abspath(cwd or os.curdir)\n else:\n root_dir = os.path.abspath(root_dir)\n cwd = os.path.abspath(cwd)\n\n def grandchild():\n \"\"\"Setup everything inside the process that finally exec()s the tool.\"\"\"\n try:\n my_outer_pid = container.get_my_pid_from_procfs()\n container.mount_proc(self._container_system_config)\n container.drop_capabilities()\n container.reset_signal_handling()\n child_setup_fn()\n os.write(to_parent, str(my_outer_pid).encode())\n received = os.read(from_parent, 1)\n assert received == MARKER_PARENT_COMPLETED, received\n finally:\n os.close(from_parent)\n os.close(to_parent)\n\n def child():\n \"\"\"Setup everything inside the container,\n start the tool, and wait for result.\"\"\"\n try:\n logging.debug(b'Child: child process of RunExecutor with PID %d started', container.get_my_pid_from_procfs())\n container.block_all_signals()\n necessary_fds = {\n sys.stdin,\n sys.stdout,\n sys.stderr,\n to_parent,\n from_parent,\n stdin,\n stdout,\n stderr} - {\n None}\n container.close_open_fds(keep_files=necessary_fds)\n try:\n if self._container_system_config:\n libc.sethostname(container.CONTAINER_HOSTNAME)\n if not self._allow_network:\n container.activate_network_interface(b'lo')\n received = os.read(from_parent, len(MARKER_USER_MAPPING_COMPLETED))\n assert received == MARKER_USER_MAPPING_COMPLETED, received\n if root_dir is not None:\n self._setup_root_filesystem(root_dir)\n else:\n self._setup_container_filesystem(temp_dir, output_dir if result_files_patterns else None, memlimit, memory_nodes)\n libc.prctl(libc.PR_SET_DUMPABLE, libc.SUID_DUMP_DISABLE, 0, 0, 0)\n except EnvironmentError as e:\n logging.critical(b'Failed to configure container: %s', e)\n return CHILD_OSERROR\n\n try:\n os.chdir(cwd)\n except EnvironmentError as e:\n logging.critical(b'Cannot change into working directory inside container: %s', e)\n return CHILD_OSERROR\n\n container.setup_seccomp_filter()\n try:\n grandchild_proc = subprocess.Popen(args, stdin=stdin, stdout=stdout, stderr=stderr, env=env, close_fds=False, preexec_fn=grandchild)\n except (EnvironmentError, RuntimeError) as e:\n logging.critical(b'Cannot start process: %s', e)\n return CHILD_OSERROR\n\n necessary_capabilities = [libc.CAP_SYS_ADMIN] if result_files_patterns else []\n container.drop_capabilities(keep=necessary_capabilities)\n container.close_open_fds(keep_files={\n sys.stdout, sys.stderr, to_parent, from_parent})\n if _HAS_SIGWAIT:\n grandchild_result = container.wait_for_child_and_forward_signals(grandchild_proc.pid, args[0])\n else:\n container.forward_all_signals_async(grandchild_proc.pid, args[0])\n grandchild_result = self._wait_for_process(grandchild_proc.pid, args[0])\n logging.debug(b'Child: process %s terminated with exit code %d.', args[0], grandchild_result[0])\n if result_files_patterns:\n libc.umount(temp_dir.encode())\n libc.prctl(libc.PR_SET_DUMPABLE, libc.SUID_DUMP_USER, 0, 0, 0)\n os.write(to_parent, pickle.dumps(grandchild_result))\n os.close(to_parent)\n assert os.read(from_parent, 1) == MARKER_PARENT_POST_RUN_COMPLETED\n os.close(from_parent)\n return 0\n except EnvironmentError:\n logging.exception(b'Error in child process of RunExecutor')\n return CHILD_OSERROR\n except:\n logging.exception(b'Error in child process of RunExecutor')\n return CHILD_UNKNOWN_ERROR\n\n return\n\n try:\n try:\n child_pid = container.execute_in_namespace(child, use_network_ns=not self._allow_network)\n except OSError as e:\n if e.errno == errno.EPERM and util.try_read_file(b'/proc/sys/kernel/unprivileged_userns_clone') == b'0':\n raise BenchExecException(b\"Unprivileged user namespaces forbidden on this system, please enable them with 'sysctl kernel.unprivileged_userns_clone=1' or disable container mode\")\n else:\n raise BenchExecException(b'Creating namespace for container mode failed: ' + os.strerror(e.errno))\n\n logging.debug(b'Parent: child process of RunExecutor with PID %d started.', child_pid)\n\n def check_child_exit_code():\n \"\"\"Check if the child process terminated cleanly\n and raise an error otherwise.\"\"\"\n child_exitcode, unused_child_rusage = self._wait_for_process(child_pid, args[0])\n child_exitcode = util.ProcessExitCode.from_raw(child_exitcode)\n logging.debug(b'Parent: child process of RunExecutor with PID %d terminated with %s.', child_pid, child_exitcode)\n if child_exitcode:\n if child_exitcode.value:\n if child_exitcode.value == CHILD_OSERROR:\n raise BenchExecException(b'execution in container failed, check log for details')\n elif child_exitcode.value == CHILD_UNKNOWN_ERROR:\n raise BenchExecException(b'unexpected error in container')\n raise OSError(child_exitcode.value, os.strerror(child_exitcode.value))\n raise OSError(0, b'Child process of RunExecutor terminated with ' + str(child_exitcode))\n\n os.close(from_parent)\n os.close(to_parent)\n container.setup_user_mapping(child_pid, uid=self._uid, gid=self._gid)\n os.write(to_grandchild, MARKER_USER_MAPPING_COMPLETED)\n try:\n grandchild_pid = int(os.read(from_grandchild, 10))\n except ValueError:\n check_child_exit_code()\n assert False, b'Child process of RunExecutor terminated cleanly but did not send expected data.'\n\n logging.debug(b'Parent: executing %s in grand child with PID %d via child with PID %d.', args[0], grandchild_pid, child_pid)\n cgroups.add_task(grandchild_pid)\n parent_setup = parent_setup_fn()\n os.write(to_grandchild, MARKER_PARENT_COMPLETED)\n from_grandchild_copy = os.dup(from_grandchild)\n to_grandchild_copy = os.dup(to_grandchild)\n finally:\n os.close(from_grandchild)\n os.close(to_grandchild)\n\n def wait_for_grandchild():\n try:\n received = os.read(from_grandchild_copy, 1024)\n except OSError as e:\n if self.PROCESS_KILLED and e.errno == errno.EINTR:\n received = os.read(from_grandchild_copy, 1024)\n else:\n raise e\n\n received or os.close(from_grandchild_copy)\n os.close(to_grandchild_copy)\n check_child_exit_code()\n assert False, b'Child process terminated cleanly without sending result'\n exitcode, ru_child = pickle.loads(received)\n base_path = (b'/proc/{}/root').format(child_pid)\n parent_cleanup = parent_cleanup_fn(parent_setup, util.ProcessExitCode.from_raw(exitcode), base_path)\n if result_files_patterns:\n self._transfer_output_files(base_path + temp_dir, cwd, output_dir, result_files_patterns)\n os.close(from_grandchild_copy)\n os.write(to_grandchild_copy, MARKER_PARENT_POST_RUN_COMPLETED)\n os.close(to_grandchild_copy)\n check_child_exit_code()\n return (\n exitcode, ru_child, parent_cleanup)\n\n return (\n grandchild_pid, wait_for_grandchild)\n\n def _setup_container_filesystem(self, temp_dir, output_dir, memlimit, memory_nodes):\n \"\"\"Setup the filesystem layout in the container.\n As first step, we create a copy of all existing mountpoints in mount_base,\n recursively, and as \"private\" mounts\n (i.e., changes to existing mountpoints afterwards won't propagate to our copy).\n Then we iterate over all mountpoints and change them according to the mode\n the user has specified (hidden, read-only, overlay, or full-access).\n This has do be done for each mountpoint because overlays are not recursive.\n Then we chroot into the new mount hierarchy.\n\n The new filesystem layout still has a view of the host's /proc. We do not mount\n a fresh /proc here because the grandchild still needs the old /proc.\n\n We do simply iterate over all existing mount points and set them to\n read-only/overlay them, because it is easier to create a new hierarchy and\n chroot into it. First, we still have access to the original mountpoints while\n doing so, and second, we avoid race conditions if someone else changes the\n existing mountpoints.\n\n @param temp_dir:\n The base directory under which all our directories should be created.\n \"\"\"\n temp_base = self._get_result_files_base(temp_dir).encode()\n temp_dir = temp_dir.encode()\n tmpfs_opts = [\n b'size=' + str(memlimit or b'100%')]\n if memory_nodes:\n tmpfs_opts.append(b'mpol=bind:' + (b',').join(map(str, memory_nodes)))\n tmpfs_opts = (b',').join(tmpfs_opts).encode()\n if self._container_tmpfs:\n libc.mount(None, temp_dir, b'tmpfs', 0, tmpfs_opts)\n mount_base = os.path.join(temp_dir, b'mount')\n os.mkdir(mount_base)\n os.mkdir(temp_base)\n work_base = os.path.join(temp_dir, b'overlayfs')\n os.mkdir(work_base)\n container.duplicate_mount_hierarchy(mount_base, temp_base, work_base, self._dir_modes)\n\n def make_tmpfs_dir(path):\n \"\"\"Ensure that a tmpfs is mounted on path, if the path exists\"\"\"\n if path in self._dir_modes:\n return\n else:\n mount_tmpfs = mount_base + path\n temp_tmpfs = temp_base + path\n util.makedirs(temp_tmpfs, exist_ok=True)\n if os.path.isdir(mount_tmpfs):\n if self._container_tmpfs:\n container.make_bind_mount(temp_tmpfs, mount_tmpfs)\n else:\n libc.mount(None, mount_tmpfs, b'tmpfs', 0, tmpfs_opts)\n return\n\n make_tmpfs_dir(b'/dev/shm')\n make_tmpfs_dir(b'/run/shm')\n if self._container_system_config:\n container.setup_container_system_config(temp_base, mount_base, self._dir_modes)\n if output_dir:\n util.makedirs(mount_base + temp_dir, exist_ok=True)\n container.make_bind_mount(temp_base, mount_base + temp_dir, read_only=True)\n if os.path.exists(mount_base + temp_dir):\n util.makedirs(temp_base + temp_dir, exist_ok=True)\n container.make_bind_mount(temp_base + temp_dir, mount_base + temp_dir)\n container.chroot(mount_base)\n return\n\n def _setup_root_filesystem(self, root_dir):\n \"\"\"Setup the filesystem layout in the given root directory.\n Create a copy of the existing proc- and dev-mountpoints in the specified root\n directory. Afterwards we chroot into it.\n\n @param root_dir:\n The path of the root directory that is used to execute the process.\n \"\"\"\n root_dir = root_dir.encode()\n proc_base = os.path.join(root_dir, b'proc')\n util.makedirs(proc_base, exist_ok=True)\n dev_base = os.path.join(root_dir, b'dev')\n util.makedirs(dev_base, exist_ok=True)\n container.make_bind_mount(b'/dev/', dev_base, recursive=True, private=True)\n container.make_bind_mount(b'/proc/', proc_base, recursive=True, private=True)\n os.chroot(root_dir)\n\n def _transfer_output_files(self, tool_output_dir, working_dir, output_dir, patterns):\n \"\"\"Transfer files created by the tool in the container to the output directory.\n @param tool_output_dir:\n The directory under which all tool output files are created.\n @param working_dir: The absolute working directory of the tool in the container.\n @param output_dir: the directory where to write result files\n @param patterns: a list of patterns of files to retrieve as result files\n \"\"\"\n assert output_dir\n assert patterns\n if any(os.path.isabs(pattern) for pattern in patterns):\n base_dir = tool_output_dir\n else:\n base_dir = tool_output_dir + working_dir\n\n def transfer_file(abs_file):\n assert abs_file.startswith(base_dir)\n file = os.path.join(b'/', os.path.relpath(abs_file, base_dir))\n if os.path.isfile(abs_file) and not os.path.islink(abs_file) and not container.is_container_system_config_file(file):\n target = output_dir + file\n logging.debug(b'Transferring output file %s to %s', abs_file, target)\n try:\n os.makedirs(os.path.dirname(target))\n except EnvironmentError:\n pass\n\n try:\n shutil.move(abs_file, target)\n except EnvironmentError as e:\n logging.warning(b\"Could not retrieve output file '%s': %s\", file, e)\n\n for pattern in patterns:\n if os.path.isabs(pattern):\n pattern = tool_output_dir + pattern\n else:\n pattern = tool_output_dir + os.path.join(working_dir, pattern)\n for abs_file in util.maybe_recursive_iglob(os.path.normpath(pattern), recursive=True):\n if os.path.isdir(abs_file):\n for root, unused_dirs, files in os.walk(abs_file):\n for file in files:\n transfer_file(os.path.join(root, file))\n\n else:\n transfer_file(abs_file)\n\n\nif __name__ == b'__main__':\n main()","sub_path":"pycfiles/BenchExec-2.6-py2.7/containerexecutor.py","file_name":"containerexecutor.py","file_ext":"py","file_size_in_byte":31833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"392664306","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='departments',\n fields=[\n ('dept_no', models.CharField(max_length=4, serialize=False, primary_key=True)),\n ('dept_name', models.CharField(max_length=49)),\n ],\n ),\n migrations.CreateModel(\n name='dept_emp',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('from_date', models.DateField()),\n ('to_date', models.DateField()),\n ('dept_no', models.ForeignKey(to='django_models.departments')),\n ],\n ),\n migrations.CreateModel(\n name='dept_manager',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('from_date', models.DateField()),\n ('to_date', models.DateField()),\n ('dept_no', models.ForeignKey(to='django_models.departments')),\n ],\n ),\n migrations.CreateModel(\n name='employees',\n fields=[\n ('empl_no', models.IntegerField(serialize=False, primary_key=True)),\n ('birthdate', models.DateField()),\n ('first_name', models.CharField(max_length=14)),\n ('last_name', models.CharField(max_length=16)),\n ('gender', models.CharField(max_length=1, choices=[(b'M', b'Male'), (b'F', b'Female')])),\n ('hire_date', models.DateField()),\n ],\n ),\n migrations.CreateModel(\n name='salaries',\n fields=[\n ('salary', models.IntegerField()),\n ('from_date', models.DateField(serialize=False, primary_key=True)),\n ('to_date', models.DateField()),\n ('emp_no', models.ForeignKey(to='django_models.employees')),\n ],\n ),\n migrations.CreateModel(\n name='titles',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=50)),\n ('from_date', models.DateField()),\n ('to_date', models.DateField()),\n ('emp_no', models.ForeignKey(to='django_models.employees')),\n ],\n ),\n migrations.AddField(\n model_name='dept_manager',\n name='emp_no',\n field=models.ForeignKey(to='django_models.employees'),\n ),\n migrations.AddField(\n model_name='dept_emp',\n name='emp_no',\n field=models.ForeignKey(to='django_models.employees'),\n ),\n migrations.AlterUniqueTogether(\n name='titles',\n unique_together=set([('title', 'from_date')]),\n ),\n ]\n","sub_path":"inicial/django_models/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":3119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"513914661","text":"# encoding: utf-8\n\nimport unicodecsv\n\nfrom django.http import HttpResponse\nfrom django.db import models\n\n\nENCODING = 'ISO-8859-15'\n\nEXPORT_FORMATS = [\n # (name, dialect, extension)\n ('XLSX', 'xlsx', 'xlsx'),\n ('CSV', 'excel', 'csv'),\n ('TSV', 'excel-tab', 'tsv'),\n]\n\n\nclass CsvExportMixin(object):\n @classmethod\n def get_csv_fields(cls, event):\n fields = []\n\n for field in cls._meta.fields:\n if not isinstance(field, models.ForeignKey):\n fields.append((cls, field))\n\n for field, unused in cls._meta.get_m2m_with_model():\n fields.append((cls, field))\n\n return fields\n\n def get_csv_related(self):\n return dict()\n\n\ndef write_header_row(event, writer, fields, m2m_mode='separate_columns'):\n header_row = []\n\n for (model, field) in fields:\n if isinstance(field, (unicode, str)):\n field_name = field\n field_type = None\n else:\n field_name = field.name\n field_type = type(field)\n\n if field_type == models.ManyToManyField:\n if m2m_mode == 'separate_columns':\n choices = get_m2m_choices(event, field)\n header_row.extend(\n u\"{field_name}: {choice}\"\n .format(field_name=field_name, choice=choice.__unicode__())\n for choice in choices\n )\n elif m2m_mode == 'comma_separated':\n header_row.append(field_name)\n else:\n raise NotImplemented(m2m_mode)\n else:\n header_row.append(field_name)\n\n writer.writerow(header_row)\n\n\ndef get_m2m_choices(event, field):\n target_model = field.rel.to\n\n if any(f.name == 'event' for f in target_model._meta.fields):\n choices = target_model.objects.filter(event=event)\n else:\n choices = target_model.objects.all()\n\n return choices.order_by('pk')\n\n\ndef write_row(event, writer, fields, model_instance, m2m_mode):\n result_row = []\n related = model_instance.get_csv_related()\n\n for model, field in fields:\n if isinstance(field, (unicode, str)):\n field_name = field\n field_type = None\n else:\n field_name = field.name\n field_type = type(field)\n\n if model in related:\n source_instance = related.get(model, None)\n else:\n source_instance = model_instance\n\n field_value = getattr(source_instance, field_name) if source_instance is not None else None\n\n if field_type is models.ManyToManyField and field_value is not None:\n if m2m_mode == 'separate_columns':\n choices = get_m2m_choices(event, field)\n\n result_row.extend(\n field_value.filter(pk=choice.pk).exists()\n for choice in choices\n )\n elif m2m_mode == 'comma_separated':\n result_row.append(u', '.join(item.__unicode__() for item in field_value.all()))\n else:\n raise NotImplemented(m2m_mode)\n elif field_type is models.DateTimeField and field_value is not None:\n from django.utils.timezone import localtime\n result_row.append(localtime(field_value).replace(tzinfo=None))\n else:\n result_row.append(field_value)\n\n writer.writerow(result_row)\n\n\ndef make_writer(output_stream, dialect):\n if dialect == 'xlsx':\n from .excel_export import XlsxWriter\n return XlsxWriter(output_stream)\n else:\n return unicodecsv.writer(output_stream, encoding=ENCODING, dialect=dialect)\n\n\ndef export_csv(event, model, model_instances, output_file, m2m_mode='separate_columns', dialect='excel-tab'):\n fields = model.get_csv_fields(event)\n writer = make_writer(output_file, dialect)\n\n write_header_row(event, writer, fields, m2m_mode)\n\n for model_instance in model_instances:\n if isinstance(model_instance, (str, unicode, int)):\n model_instance = model.objects.get(pk=int(model_instances))\n\n write_row(event, writer, fields, model_instance, m2m_mode)\n\n if getattr(writer, 'must_close', False):\n writer.close()\n\n\nCONTENT_TYPES = dict(\n xlsx='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n)\n\n\ndef csv_response(*args, **kwargs):\n filename = kwargs.pop('filename')\n dialect = kwargs.get('dialect', 'excel')\n\n response = HttpResponse(content_type=CONTENT_TYPES.get(dialect, 'text/csv'))\n response['Content-Disposition'] = 'attachment; filename=\"{filename}\"'.format(\n filename=filename\n )\n\n kwargs['output_file'] = response\n\n export_csv(*args, **kwargs)\n\n return response\n","sub_path":"core/csv_export.py","file_name":"csv_export.py","file_ext":"py","file_size_in_byte":4715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"156917912","text":"# -*- coding: utf-8 -*-\nimport json\nimport os\n\n\nclass MetaData:\n def __init__(self, name=\"Unbenannt\", stagingDir=\"\", file=\"\", date=\"Unbekannt\", author=\"Unbekannt\", comments=\"\"):\n \"\"\"\n Passes all MetaData on\n :param file: Filename without Extension\n :param stagingDir: Directory the file is in\n :param name: Full File Name\n :param date: Last Edit Date\n :param author: File Author\n :param comments: Comments on File\n :return:\n \"\"\"\n self.stagingDir = stagingDir\n self.file = file\n self.name = name\n self.date = date\n self.author = author\n self.comments = comments\n\n def createMetaFile(self):\n \"\"\"\n Creates Meta file from given Data\n :return:\n \"\"\"\n os.chdir(self.stagingDir)\n self.file = self.file[self.file.rfind(\"/\") + 1:]\n with open(self.name + \".meta\", \"w\") as file:\n meta = {'Meta': {'name': self.name,\n 'date': self.date,\n 'author': self.author,\n 'comments': self.comments,\n 'fullFile': self.file}}\n file.write(json.dumps(meta))\n\n","sub_path":"IO/fileMeta.py","file_name":"fileMeta.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"167072364","text":"from __future__ import unicode_literals\nfrom gratipay.models._mixin_team import StubParticipantAdded\n\nfrom gratipay.testing import Harness\nfrom gratipay.security.user import User\nfrom gratipay.models.team import Team\n\n\nclass TestNewTeams(Harness):\n\n valid_data = {\n 'name': 'Gratiteam',\n 'homepage': 'http://gratipay.com/',\n 'agree_terms': 'true',\n 'product_or_service': 'Sample Product',\n 'getting_paid': 'Getting Paid',\n 'getting_involved': 'Getting Involved'\n }\n\n def post_new(self, data, auth_as='alice', expected=200):\n r = self.client.POST('/teams/create.json', data=data, auth_as=auth_as, raise_immediately=False)\n assert r.code == expected\n return r\n\n def test_harness_can_make_a_team(self):\n team = self.make_team()\n assert team.name == 'The A Team'\n assert team.owner == 'hannibal'\n\n def test_can_construct_from_slug(self):\n self.make_team()\n team = Team.from_slug('TheATeam')\n assert team.name == 'The A Team'\n assert team.owner == 'hannibal'\n\n def test_can_construct_from_id(self):\n team = Team.from_id(self.make_team().id)\n assert team.name == 'The A Team'\n assert team.owner == 'hannibal'\n\n def test_can_create_new_team(self):\n self.make_participant('alice', claimed_time='now', email_address='', last_ach_result='')\n self.post_new(dict(self.valid_data))\n team = self.db.one(\"SELECT * FROM teams\")\n assert team\n assert team.owner == 'alice'\n\n def test_401_for_anon_creating_new_team(self):\n self.post_new(self.valid_data, auth_as=None, expected=401)\n assert self.db.one(\"SELECT COUNT(*) FROM teams\") == 0\n\n def test_error_message_for_no_valid_email(self):\n self.make_participant('alice', claimed_time='now')\n r = self.post_new(dict(self.valid_data), expected=400)\n assert self.db.one(\"SELECT COUNT(*) FROM teams\") == 0\n assert \"You must have a verified email address to apply for a new team.\" in r.body\n\n def test_error_message_for_no_payout_route(self):\n self.make_participant('alice', claimed_time='now', email_address='alice@example.com')\n r = self.post_new(dict(self.valid_data), expected=400)\n assert self.db.one(\"SELECT COUNT(*) FROM teams\") == 0\n assert \"You must attach a bank account or PayPal to apply for a new team.\" in r.body\n\n def test_error_message_for_terms(self):\n self.make_participant('alice', claimed_time='now', email_address='alice@example.com', last_ach_result='')\n data = dict(self.valid_data)\n del data['agree_terms']\n r = self.post_new(data, expected=400)\n assert self.db.one(\"SELECT COUNT(*) FROM teams\") == 0\n assert \"Please agree to the terms of service.\" in r.body\n\n def test_error_message_for_missing_fields(self):\n self.make_participant('alice', claimed_time='now', email_address='alice@example.com', last_ach_result='')\n data = dict(self.valid_data)\n del data['name']\n r = self.post_new(data, expected=400)\n assert self.db.one(\"SELECT COUNT(*) FROM teams\") == 0\n assert \"Please fill out the 'Team Name' field.\" in r.body\n\n def test_error_message_for_slug_collision(self):\n self.make_participant('alice', claimed_time='now', email_address='alice@example.com', last_ach_result='')\n self.post_new(dict(self.valid_data))\n r = self.post_new(dict(self.valid_data), expected=400)\n assert self.db.one(\"SELECT COUNT(*) FROM teams\") == 1\n assert \"Sorry, there is already a team using 'gratiteam'.\" in r.body\n\n\nclass TestOldTeams(Harness):\n\n def setUp(self):\n Harness.setUp(self)\n self.team = self.make_participant('A-Team', number='plural')\n\n def test_is_team(self):\n expeted = True\n actual = self.team.IS_PLURAL\n assert actual == expeted\n\n def test_show_as_team_to_admin(self):\n self.make_participant('alice', is_admin=True)\n user = User.from_username('alice')\n assert self.team.show_as_team(user)\n\n def test_show_as_team_to_team_member(self):\n self.make_participant('alice')\n self.team.add_member(self.make_participant('bob', claimed_time='now'))\n user = User.from_username('bob')\n assert self.team.show_as_team(user)\n\n def test_show_as_team_to_non_team_member(self):\n self.make_participant('alice')\n self.team.add_member(self.make_participant('bob', claimed_time='now'))\n user = User.from_username('alice')\n assert self.team.show_as_team(user)\n\n def test_show_as_team_to_anon(self):\n self.make_participant('alice')\n self.team.add_member(self.make_participant('bob', claimed_time='now'))\n assert self.team.show_as_team(User())\n\n def test_dont_show_individuals_as_team(self):\n alice = self.make_participant('alice', number='singular')\n assert not alice.show_as_team(User())\n\n def test_dont_show_plural_no_members_as_team_to_anon(self):\n group = self.make_participant('Group', number='plural')\n assert not group.show_as_team(User())\n\n def test_dont_show_plural_no_members_as_team_to_auth(self):\n group = self.make_participant('Group', number='plural')\n self.make_participant('alice')\n assert not group.show_as_team(User.from_username('alice'))\n\n def test_show_plural_no_members_as_team_to_self(self):\n group = self.make_participant('Group', number='plural')\n assert group.show_as_team(User.from_username('Group'))\n\n def test_show_plural_no_members_as_team_to_admin(self):\n group = self.make_participant('Group', number='plural')\n self.make_participant('Admin', is_admin=True)\n assert group.show_as_team(User.from_username('Admin'))\n\n def test_can_add_members(self):\n alice = self.make_participant('alice', claimed_time='now')\n expected = True\n self.team.add_member(alice)\n actual = alice.member_of(self.team)\n assert actual == expected\n\n def test_get_teams_for_member(self):\n alice = self.make_participant('alice', claimed_time='now')\n bob = self.make_participant('bob', claimed_time='now')\n team = self.make_participant('B-Team', number='plural')\n self.team.add_member(alice)\n team.add_member(bob)\n expected = 1\n actual = alice.get_teams().pop().nmembers\n assert actual == expected\n\n def test_preclude_adding_stub_participant(self):\n stub_participant = self.make_participant('stub')\n with self.assertRaises(StubParticipantAdded):\n self.team.add_member(stub_participant)\n\n def test_remove_all_members(self):\n alice = self.make_participant('alice', claimed_time='now')\n self.team.add_member(alice)\n bob = self.make_participant('bob', claimed_time='now')\n self.team.add_member(bob)\n\n assert len(self.team.get_current_takes()) == 2 # sanity check\n self.team.remove_all_members()\n assert len(self.team.get_current_takes()) == 0\n","sub_path":"tests/py/test_teams.py","file_name":"test_teams.py","file_ext":"py","file_size_in_byte":7086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"71753036","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n 기능 2 : 다수/소수의 노드 반환\n\n 최대 방문비율(다수)을 가질 수록 파란색, 최소 방문비율(소수)을 가질 수록 빨간색으로 노드를 표시한다.\n 최대 방문비율와 최소 방문비율의 결정 기준은 0.5이다.\n\n ※ 본 기능 2에는 프로젝트명과 노드의 ID가 입력되면 해당 노드 ID의 방문비율을 몽고 DB에서 가져와\n 결정 기준치 값 이상이면 'Blue'를, 미만이면 'Red'를 반환한다.\n\"\"\"\n\nfrom pymongo import MongoClient\n\nclass Function2:\n\n def is_blue_or_red_node(self, project_name, object_id):\n conn = MongoClient('mongodb+srv://dots_user:TzE66c5O0KB0bnjG@dots-test-x41en.mongodb.net/test?retryWrites=true&w=majority')\n db = conn['JMH']\n collection = db[project_name + '_after_data']\n\n dataList = collection.find()\n for data in dataList:\n if str(data['_id']) == object_id:\n visit_rate = float(data['visit_rate'])\n\n if visit_rate >= 0.5:\n return 'Blue'\n else:\n return 'Red'\n\nf2 = Function2()\nprint(f2.is_blue_or_red_node('first_project', '5d88f9eebf68e1e79161bac9'))","sub_path":"JMH/Function2.py","file_name":"Function2.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"303616486","text":"#!/usr/bin/env python3\n# Python 3.6\n\nimport random\nimport logging\nimport heapq\nimport time\nfrom math import sqrt\n\n\nimport hlt\nfrom hlt import constants\nfrom hlt.positionals import Direction, Position\nfrom hlt.entity import Dropoff\n\n\n# constants #########################################################\n\nrandom.seed(0)\n\nFULL = 0.975\nEMPTY = 0.05\n\nINDEX_TO_DIRECTION = {\n 0: Direction.North,\n 1: Direction.South,\n 2: Direction.East,\n 3: Direction.West,\n 4: Direction.Still,\n}\n\nDIRECTION_TO_INDEX = {v: k for k, v in INDEX_TO_DIRECTION.items()}\n\nCONSTRAIN_WEIGHT = -1.0\nMIN_WEIGHT = 0.0\nMAX_WEIGHT = 1.0\n\nTIME_TO_HALITE_RATIO = 0.5\n\nAUTO_RETURN_RATIO = 1.4\nMOVE_COST_TIME = 2.7178 ** 2\n\nSTOP_BUYING_RATIO = 1.27\nSTOP_BUYING_RESOUCES = 0.36\nSAFE_RETURN_RATIO = 1.7\nHALITE_SAVED_BONUS = 1.5\n\nDANGER_WEIGHT = 10.0\n\nMIN_SHIPS_TO_BUY_DROPOFF = 12\nRADIUS_TO_STOP = 6\n\nCONSTANT_BUFFER = 4\n\nOPTIMAL_DISTANCE = 14\nMIN_DISTANCE = 12\n\n\n# visualization #####################################################\n\n# import numpy as np\n# import matplotlib.pyplot as plt\n\ndef v(d, f):\n a = np.zeros((constants.HEIGHT, constants.WIDTH))\n for y in range(constants.HEIGHT):\n for x in range(constants.WIDTH):\n p = Position(x, y)\n if p in d:\n a[y][x] = d[p]\n plt.imsave(f, a)\n\n# helpers ###########################################################\n\ndef look(ship, game, direction, length, opponents_in_range, danger_cost):\n\n length = constants.WIDTH // 3\n\n if get_unload_distance(ship, game) > 1:\n cell = game.game_map[ship.position.directional_offset(direction)]\n if cell.ship and cell.ship.owner != game.my_id:\n return 0\n\n pos = ship.position\n val = 0\n\n for i in range(length):\n t = 0\n for j in range(-i - 1, i + 2):\n if direction == Direction.North or direction == Direction.South:\n p = Position(pos.x + j, pos.y + (i + 1) * direction[1])\n else:\n p = Position(pos.x + (i + 1) * direction[0], pos.y + j)\n cell = game.game_map[p]\n if not (cell.ship and cell.ship.owner != game.my_id):\n if opponents_in_range[p] >= constants.INSPIRATION_SHIP_COUNT:\n t += cell.halite_amount * constants.INSPIRED_BONUS_MULTIPLIER * HALITE_SAVED_BONUS\n else:\n t += cell.halite_amount \n else:\n t += 0.0\n v = t / (1 + (i + 1) * 2)\n val += (v / (i + 1)) ** 2\n\n cost_of_loosing_ship = ship.halite_amount / constants.MAX_HALITE\n\n if len(game.players) > 2:\n cost_of_loosing_ship += 1.0\n\n pos = ship.position.directional_offset(direction)\n\n danger = danger_cost[pos] * cost_of_loosing_ship if pos in danger_cost else 0\n\n val /= (1.0 + danger * DANGER_WEIGHT) ** 2\n\n return val\n\ndef weights_to_direction(weights):\n return INDEX_TO_DIRECTION[\n weights.index(max(weights))\n ]\n\nclass Map:\n\n def __init__(self):\n self._data = {}\n\n def add(self, key, value):\n if key in self._data:\n self._data[key].append(value)\n else:\n self._data[key] = [value]\n\n def swap(self, from_key, to_key, value):\n element = self._data[from_key]\n element.pop(element.index(value))\n self.add(to_key, value)\n\n def items(self):\n return list(self._data.items())\n\n\ndef get_unload_positions(game):\n return [\n game.me.shipyard.position\n ] + [\n dropoff.position for dropoff in game.me.get_dropoffs()\n ]\n\ndef get_unload_distance(ship, game):\n return min(game.game_map.calculate_distance(ship.position, p) for p in get_unload_positions(game))\n\ndef get_move_cost(game, position):\n return (game.game_map[position].halite_amount / constants.MAX_HALITE) * TIME_TO_HALITE_RATIO\n\ndef has_enemy(game, position):\n cell = game.game_map[position]\n return cell.ship and cell.ship.owner != game.my_id\n\ndef get_danger_cost(game, position):\n cost = 0.0\n if has_enemy(game, position):\n cost += 1.0\n for cardinal in position.get_surrounding_cardinals():\n if has_enemy(game, cardinal):\n cost += 0.25\n return cost\n\ndef get_halite_amount(game):\n total = 0\n for row in game.game_map._cells:\n for cell in row:\n total += cell.halite_amount\n return total\n\n\ndef is_shipyard_or_dropoff(game, position):\n cell = game.game_map[position]\n return cell.structure and cell.structure.owner == game.my_id\n\n\ndef get_halite_around(game, position, radius):\n t = 0\n for x in range(-radius, radius + 1):\n for y in range(-radius, radius + 1):\n if abs(x) + abs(y) <= radius:\n t += game.game_map[Position(position.x + x, position.y + y)].halite_amount / (4 * (abs(x) + abs(y) + 1))\n return t\n\ndef get_enemy_ships(game):\n s = []\n for i, p in game.players.items():\n if i != game.my_id:\n for ship in p.get_ships():\n s.append(ship)\n return s\n\ndef get_too_close_to_enemy(game, position):\n for i, p in game.players.items():\n if i != game.my_id:\n if game.game_map.calculate_distance(position, p.shipyard.position) < MIN_DISTANCE:\n return True\n for do in p.get_dropoffs():\n if game.game_map.calculate_distance(position, do.position) < 2:\n return True \n return False\n\n# calculations ######################################################\n\n\nclass Calculations:\n '''\n to allow for caching of calculations\n '''\n def __init__(self):\n self.return_cost = {}\n\n def update(self):\n pass\n\n def calculate(self, game, curr, can_buy):\n t0 = time.time()\n self._compute_danger(game)\n logging.info(f'danger: {time.time() - t0}')\n t1 = time.time()\n self._compute_return_cost(game)\n logging.info(f'return: {time.time() - t0}')\n t2 = time.time()\n self._compute_opponents_in_radius(game)\n logging.info(f'opponents: {time.time() - t1}')\n t3 = time.time()\n if not curr and can_buy:\n self._compute_dropoff_value(game)\n logging.info(f'dropoff: {time.time() - t3}')\n\n def _compute_danger(self, game):\n self.danger_cost = {}\n\n for i, player in game.players.items():\n if i == game.my_id:\n continue\n self.danger_cost[player.shipyard.position] = 2.0\n for dropoff in player.get_dropoffs():\n self.danger_cost[dropoff.position] = 2.0\n\n for ship in get_enemy_ships(game):\n if ship.position in self.danger_cost:\n self.danger_cost[ship.position] += 1.0\n else:\n self.danger_cost[ship.position] = 1.0\n for p in ship.position.get_surrounding_cardinals():\n if p in self.danger_cost:\n self.danger_cost[p] += 0.25\n else:\n self.danger_cost[p] = 0.25\n\n def _compute_return_cost(self, game):\n buy_fix = len(game.me.get_ships()) - MIN_SHIPS_TO_BUY_DROPOFF\n\n q = [(0.0, p) if game.game_map[p].structure.id != 1337 else (10.0 - (min(4, buy_fix) * 2.5), p) for p in get_unload_positions(game)]\n\n seen = {}\n while q:\n cost, position = heapq.heappop(q)\n\n if position in seen:\n continue\n else:\n seen[position] = cost\n\n for destination in position.get_surrounding_cardinals():\n if destination in seen:\n continue\n danger = self.danger_cost[destination] if destination in self.danger_cost else 0\n heapq.heappush(\n q, \n (\n cost + 1.0 + get_move_cost(game, destination) + danger * DANGER_WEIGHT, \n destination\n )\n )\n\n min_return_cost = min(seen.values())\n max_return_cost = max(seen.values())\n\n self.return_cost = {k: 1.0 - (v / (max_return_cost - min_return_cost)) for k, v in seen.items()}\n\n def _compute_opponents_in_radius(self, game):\n self.opponents_in_range = {}\n for x in range(constants.WIDTH):\n for y in range(constants.HEIGHT):\n self.opponents_in_range[Position(x, y)] = 0\n\n pos = []\n for player_id, player in game.players.items():\n if player_id != game.my_id:\n for ship in player.get_ships():\n pos.append(ship.position)\n\n for p in pos:\n for i in range(-4, 5):\n for j in range(-4, 5):\n dist = abs(i) + abs(j)\n if dist <= constants.INSPIRATION_RADIUS:\n self.opponents_in_range[Position(p.x + i, p.y + j)] += 1\n\n def _compute_dropoff_value(self, game):\n dropoff_value = {}\n\n looking_at = []\n\n for x in range(constants.WIDTH):\n for y in range(constants.HEIGHT):\n p = Position(x, y)\n looking_at.append(game.game_map[p])\n\n looking_at = sorted(looking_at, key=lambda x: -x.halite_amount)[:int(sqrt(constants.WIDTH * constants.HEIGHT))]\n \n m = 100000\n\n for cell in looking_at:\n dist = get_unload_distance(cell, game)\n ene = get_too_close_to_enemy(game, cell.position)\n if dist > MIN_DISTANCE and not ene:\n t = get_halite_around(game, cell.position, 12) \n if t // 12 > constants.MAX_HALITE // 7:\n dropoff_value[cell.position] = t // (1 + abs(dist - OPTIMAL_DISTANCE))\n if dist < m:\n m = dropoff_value[cell.position]\n\n if m == 100000:\n self.dropoff_value = {}\n return\n\n min_dropoff_value = min(dropoff_value.values())\n max_dropoff_value = max(dropoff_value.values()) + 1\n\n self.dropoff_value = {\n k: (v / (max_dropoff_value - min_dropoff_value))for k, v in dropoff_value.items() if v == m}\n\n\n# logic #############################################################\n\nclass Agent:\n\n def __init__(self):\n self.ship = None\n self.weights = None\n self.current = None\n\n self.returning = False\n\n def update(self, ship):\n self.ship = ship\n self.current = None \n\n def calculate(self, game, calculations):\n if self.ship.halite_amount < EMPTY * constants.MAX_HALITE:\n self.returning = False\n if game.turn_number + CONSTANT_BUFFER + get_unload_distance(self.ship, game) * SAFE_RETURN_RATIO > constants.MAX_TURNS:\n self.returning = True\n if self.ship.halite_amount < game.game_map[self.ship.position].halite_amount // constants.MOVE_COST_RATIO:\n self.weights = [CONSTRAIN_WEIGHT, CONSTRAIN_WEIGHT,\n CONSTRAIN_WEIGHT, CONSTRAIN_WEIGHT, MAX_WEIGHT]\n elif not self.returning and (self.ship.halite_amount < FULL * constants.MAX_HALITE or self.ship.halite_amount + game.game_map[self.ship.position].halite_amount // constants.EXTRACT_RATIO < constants.MAX_HALITE):\n # collect\n self.weights = self.get_weights_for_collecting(game, calculations)\n else:\n # go home\n self.returning = True\n self.weights = self.get_weights_for_returning(game, calculations)\n\n def get_weights_for_collecting(self, game, calculations):\n weights = [CONSTRAIN_WEIGHT] * 5\n for index, direction in enumerate(Direction.get_all_cardinals()):\n halite_to_collect = 1.0 - (self.ship.halite_amount / constants.MAX_HALITE)\n length = max(10 + int(halite_to_collect * ((min(constants.HEIGHT, constants.WIDTH) - 20) // 2)), 24)\n weights[index] = look(self.ship, game, direction, length, calculations.opponents_in_range, calculations.danger_cost)\n if not is_shipyard_or_dropoff(game, self.ship.position):\n weights[4] = (game.game_map[self.ship.position].halite_amount ** 2) * MOVE_COST_TIME\n if calculations.opponents_in_range[self.ship.position] >= constants.INSPIRATION_SHIP_COUNT:\n weights[4] *= constants.INSPIRED_BONUS_MULTIPLIER * HALITE_SAVED_BONUS\n return weights\n\n def get_weights_for_returning(self, game, calculations):\n weights = [MIN_WEIGHT] * 5\n for index, direction in enumerate(Direction.get_all_cardinals()):\n p = self.ship.position.directional_offset(direction)\n weights[index] = calculations.return_cost[p]\n weights[4] = calculations.return_cost[self.ship.position]\n return weights\n \n def constrain(self):\n self.weights[DIRECTION_TO_INDEX[self.current]] = CONSTRAIN_WEIGHT\n\n def policy(self, game):\n d = INDEX_TO_DIRECTION[\n self.weights.index(max(self.weights))\n ]\n p = self.ship.position.directional_offset(d)\n self.current = d \n\n return d, p\n\n def is_constrainable(self, game=None):\n t = sum(1 for w in self.weights if w > CONSTRAIN_WEIGHT) > 1\n return t\n\n def __str__(self):\n return f'Agent({self.current})'\n\n\nclass Orchestrator:\n\n def __init__(self, game):\n self.agents = {}\n self.calculations = Calculations()\n self.total_halite = get_halite_amount(game)\n\n def update(self, game, curr):\n if curr:\n fake = Dropoff(game.my_id, 1337, curr)\n game.me._dropoffs[curr] = fake\n cell = game.game_map[curr]\n cell.structure = fake\n \n alive = set()\n for ship in game.me.get_ships():\n alive.add(ship.id)\n if ship.id in self.agents:\n self.agents[ship.id].update(ship)\n else:\n self.agents[ship.id] = Agent()\n self.agents[ship.id].update(ship)\n\n for ship_id in list(self.agents.keys()):\n if ship_id not in alive:\n del self.agents[ship_id]\n\n\n def calculate(self, game, curr, can_buy):\n t0 = time.time()\n self.calculations.calculate(game, curr, can_buy)\n logging.info(f'calc: {time.time() - t0}')\n t1 = time.time()\n for agent in self.agents.values():\n agent.calculate(game, self.calculations)\n logging.info(f'agents: {time.time() - t1}')\n self.halite_collected = 1 - (get_halite_amount(game) / self.total_halite)\n\n def policy(self, game, curr):\n # Shuld return orders for all the ships\n returning = game.turn_number + constants.WIDTH > constants.MAX_TURNS\n\n if curr:\n dist_to_dropoff = [game.game_map.calculate_distance(s.position, curr) for s in game.me.get_ships() if self.agents[s.id].returning or s.halite_amount > constants.MAX_HALITE * 0.7]\n save_more = all((\n dist_to_dropoff and RADIUS_TO_STOP > min(dist_to_dropoff),\n not game.me.halite_amount >= constants.DROPOFF_COST - game.game_map[curr].halite_amount + constants.SHIP_COST\n ))\n else:\n save_more = False\n\n requests = Map()\n\n for agent in self.agents.values():\n d, p = agent.policy(game)\n # direction \n requests.add(p, agent)\n\n\n finnished = False\n counter = 1\n\n while not finnished:\n finnished = True\n\n for position, agents in requests.items():\n # if collide\n if len(agents) > 1 and not (returning and is_shipyard_or_dropoff(game, position)):\n # dont\n # should be the one with the currently worst choice\n # try to change one of the collecting\n constrainable = [a for a in agents if a.is_constrainable(game=game)]\n\n\n collecting = [a for a in constrainable if not a.returning]\n r = [a for a in constrainable if a.returning]\n\n if collecting:\n agent = min(collecting, key=lambda x: x.ship.halite_amount)\n elif r:\n agent = min(r, key=lambda x: x.weights[DIRECTION_TO_INDEX[x.current]])\n else:\n swapable = [\n a for a in agents if agent.current != Direction.Still]\n if swapable:\n agent = random.choice(swapable)\n agent.weights = [CONSTRAIN_WEIGHT, CONSTRAIN_WEIGHT,\n CONSTRAIN_WEIGHT, CONSTRAIN_WEIGHT, MAX_WEIGHT]\n d, p = agent.policy(game)\n requests.swap(position, p, agent)\n finnished = False\n continue\n\n\n # stop the agent from making the decision the next call\n agent.constrain()\n # make a new move\n d, p = agent.policy(game)\n requests.swap(position, p, agent)\n # flag that there was a conflict\n \n finnished = False\n \n counter += 1\n\n if counter > 200:\n logging.error('FAILED FIXING :(')\n break\n\n commands = []\n\n for agent in self.agents.values():\n if curr and agent.ship.position == curr and game.me.halite_amount >= constants.DROPOFF_COST - game.game_map[curr].halite_amount - agent.ship.halite_amount:\n commands.append(agent.ship.make_dropoff())\n continue\n if returning and is_shipyard_or_dropoff(game, agent.ship.position):\n continue\n commands.append(agent.ship.move(agent.current))\n p = agent.ship.position.directional_offset(agent.current)\n if game.game_map[agent.ship.position].ship == agent.ship:\n game.game_map[agent.ship.position].ship = None\n\n game.game_map[p].mark_unsafe(agent.ship)\n\n \n if not save_more and (self.halite_collected < (1.0 - STOP_BUYING_RESOUCES) and game.turn_number < constants.MAX_TURNS / STOP_BUYING_RATIO) and game.me.halite_amount >= constants.SHIP_COST and not game.game_map[game.me.shipyard].is_occupied and len(game.me.get_ships()) <= 120:\n commands.append(game.me.shipyard.spawn())\n\n return commands\n\n \n# usage #############################################################\n\n\ngame = hlt.Game()\ngame.ready('3 sigma')\n\n# planned = [Position(0, 32), Position(16, 14), Position(0, 52), Position(24, 50)]\nplanned = []\n\ncurr = None\n\ncan_buy = False\nallowed_dropoffs = ((constants.WIDTH - 32) // 8) + (1 if len(game.players.keys()) == 2 else 0)\ndropoff_cooldown = 0\n\norchestrator = Orchestrator(game)\n\nwhile True:\n game.update_frame()\n\n logging.info('state:')\n logging.info(f'ships -> {len(game.me.get_ships())}')\n\n logging.info(f'dropoffs -> {game.me.get_dropoffs()}')\n\n if dropoff_cooldown > 0:\n dropoff_cooldown -= 1\n\n if len(game.me.get_ships()) >= MIN_SHIPS_TO_BUY_DROPOFF and len(game.me.get_dropoffs()) < allowed_dropoffs and game.turn_number < constants.MAX_TURNS - 100:\n can_buy = True\n else:\n if len(game.me.get_dropoffs()) >= allowed_dropoffs:\n can_buy = False\n\n\n if (not curr) or game.game_map[curr].structure:\n if curr and game.game_map[curr].structure.id == game.my_id:\n dropoff_cooldown = 10\n curr = None\n \n logging.info(f'curr -> {curr}')\n\n t0 = time.time()\n\n orchestrator.update(game, curr if dropoff_cooldown == 0 else None)\n\n t1 = time.time()\n\n logging.info(f'[UPDATE] t={t1 - t0}')\n\n orchestrator.calculate(game, curr, can_buy)\n\n if not curr and can_buy:\n if orchestrator.calculations.dropoff_value:\n curr = max(\n orchestrator.calculations.dropoff_value.items(), key=lambda x: x[1])[0]\n else:\n curr = None\n \n t2 = time.time()\n\n logging.info(f'[CALCUlATE] t={t2 - t1}')\n\n commands = orchestrator.policy(\n game, curr if dropoff_cooldown == 0 else None)\n\n t3 = time.time()\n\n logging.info(f'[POLICY] t={t3 - t2}')\n\n logging.info(f'[DONE] t={t3 - t0}')\n\n for row in game.game_map._cells:\n for cell in row:\n if cell.structure and cell.structure.id == 1337:\n cell.structure = None \n \n for pos, d in list(game.me._dropoffs.items()):\n if d.id == 1337:\n del game.me._dropoffs[pos]\n\n game.end_turn(commands)\n\n # print('yolo')\n","sub_path":"MyBot.py","file_name":"MyBot.py","file_ext":"py","file_size_in_byte":20681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"522949951","text":"import pygame\r\nimport random\r\nimport sys\r\n\r\n\r\n\r\n# Initializing pygame\r\npygame.init()\r\n\r\n\r\n\r\n# Window resolution\r\nWIDTH = 1024\r\nHEIGHT = 768\r\n\r\n\r\n\r\n# Color Grid\r\nRED = (178,34,34)\r\nBLUE = (65,105,225)\r\nYELLOW = (255,255,0)\r\nBACKGROUND_COLOR = (139,137,137)\r\n\r\n\r\n\r\n# Player block Size and Position\r\nplayer_size = 50\r\nplayer_pos = [WIDTH/2, HEIGHT-2*player_size]\r\n\r\n\r\n# Block size position and list \r\nblock_size = 50\r\nblock_position = [random.randint(0,WIDTH-block_size), 0]\r\nblock_list = [block_position]\r\n\r\n\r\n# Game speed\r\nSPEED = 10\r\n\r\nscreen = pygame.display.set_mode((WIDTH, HEIGHT))\r\n\r\ngame_over = False\r\n\r\nscore = 0\r\n\r\nclock = pygame.time.Clock()\r\n\r\n\r\n# Font style for the game\r\nfont_style = pygame.font.SysFont(\"monospace\", 35)\r\n\r\n\r\n# Level and speed increases as player advances\r\ndef set_level(score, SPEED):\r\n\t''' This function sets the speed and score '''\r\n\tif score < 20:\r\n\t\tSPEED = 5\r\n\telif score < 40:\r\n\t\tSPEED = 8\r\n\telif score < 60:\r\n\t\tSPEED = 12\r\n\telse:\r\n\t\tSPEED = 15\r\n\treturn SPEED\r\n\t# SPEED = score/5 + 1\r\n\r\n# Function for falling blocks, which is random\r\ndef block_fall(block_list):\r\n\t''' This function regulates how the blocks fall '''\r\n\tdelay = random.random()\r\n\tif len(block_list) < 10 and delay < 0.1:\r\n\t\tx_pos = random.randint(0,WIDTH-block_size)\r\n\t\ty_pos = 0\r\n\t\tblock_list.append([x_pos, y_pos])\r\n# Funciton to draw\r\ndef draw_block(block_list):\r\n\t''' This function generates blocks '''\r\n\tfor block_position in block_list:\r\n\t\tpygame.draw.rect(screen, BLUE, (block_position[0], block_position[1], block_size, block_size))\r\n\r\ndef update_block_positionitions(block_list, score):\r\n\t''' This function updates the position, increases the speed and keeps trach of score counter '''\r\n\tfor idx, block_position in enumerate(block_list):\r\n\t\tif block_position[1] >= 0 and block_position[1] < HEIGHT:\r\n\t\t\tblock_position[1] += SPEED\r\n\t\telse:\r\n\t\t\tblock_list.pop(idx)\r\n\t\t\tscore += 1\r\n\treturn score\r\n\r\ndef check_impact(block_list, player_pos):\r\n\t''' This funcion checks for impact with other blocks'''\r\n\tfor block_position in block_list:\r\n\t\tif find_impact(block_position, player_pos):\r\n\t\t\treturn True\r\n\treturn False\r\n\r\ndef find_impact(player_pos, block_position):\r\n\t''' This fucntions finds the impact and terminates the game'''\r\n\tp_x = player_pos[0]\r\n\tp_y = player_pos[1]\r\n\r\n\te_x = block_position[0]\r\n\te_y = block_position[1]\r\n\r\n\tif (e_x >= p_x and e_x < (p_x + player_size)) or (p_x >= e_x and p_x < (e_x+block_size)):\r\n\t\tif (e_y >= p_y and e_y < (p_y + player_size)) or (p_y >= e_y and p_y < (e_y+block_size)):\r\n\t\t\treturn True\r\n\treturn False\r\n\r\nwhile not game_over:\r\n\r\n\tfor event in pygame.event.get():\r\n\t\tif event.type == pygame.QUIT:\r\n\t\t\tsys.exit()\r\n\r\n\t\tif event.type == pygame.KEYDOWN:\r\n\r\n\t\t\tx = player_pos[0]\r\n\t\t\ty = player_pos[1]\r\n\r\n\t\t\tif event.key == pygame.K_LEFT:\r\n\t\t\t\tx -= player_size\r\n\t\t\telif event.key == pygame.K_RIGHT:\r\n\t\t\t\tx += player_size\r\n\r\n\t\t\tplayer_pos = [x,y]\r\n\r\n\tscreen.fill(BACKGROUND_COLOR)\r\n\t\r\n\tblock_fall(block_list)\r\n\tscore = update_block_positionitions(block_list, score)\r\n\tSPEED = set_level(score, SPEED)\r\n\r\n\ttext = \"Score:\" + str(score) \r\n\tlabel = font_style.render(text, 1, YELLOW)\r\n\tscreen.blit(label, (WIDTH-200, HEIGHT-40))\r\n\r\n\r\n\tif check_impact(block_list, player_pos):\r\n\t\tgame_over = True\r\n\t\tbreak\r\n\r\n\tdraw_block(block_list)\r\n\r\n\tpygame.draw.rect(screen, RED, (player_pos[0], player_pos[1], player_size, player_size))\r\n\r\n\tclock.tick(30)\r\n\r\n\tpygame.display.update()\r\n","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"206936811","text":"# coding: utf-8\n\n# Tensorflow\nimport tensorflow as tf\n# Rnn common functions\nfrom tensorflow.contrib.learn.python.learn.estimators import rnn_common\n# Model builder\nfrom tensorflow.python.estimator import model_fn as model_fn_lib\nprint('Tested with TensorFLow 1.2.0')\nprint('Your TensorFlow version:', tf.__version__) \nfrom tensorflow.python.ops import math_ops\n\n# Plot images with pyplot\nfrom matplotlib import pyplot as plt\n\n# Helpers for data processing\nimport pandas as pd\nimport numpy as np\nimport argparse\n\n# parser definition\nimport argparse\n\nparser = argparse.ArgumentParser(prog='Play with Colorbot!')\n\nparser.add_argument('--model_path', type=str, default='pretrained',\n\t\t help='Local path to the folder where the colorbot'\n\t\t\t\t 'model is.')\n\n# ## Helper functions\n\n# In[170]:\n\n# This function creates a sparse tensor in the following way, given:\n# indices = [[0, 0], [1, 1], [2, 2]]\n# values = [1, 2, 3]\n# dense_shape = [3, 4]\n#\n# The output will be a sparse tensor that represents this dense tensor:\n# [ \n# [1, 0, 0, 0]\n# [0, 2, 0, 0]\n# [0, 0, 3, 0]\n# ]\n#\n# We're using this to generate a Sparse tensor that can be easily\n# formated in a one hot representation.\n# More at: https://www.tensorflow.org/api_docs/python/tf/SparseTensor\ndef _sparse_string_to_index(sp, mapping):\n # This operation constructs a lookup table to convert tensor of strings\n # into int64 IDs\n table = tf.contrib.lookup.index_table_from_tensor(mapping, dtype=tf.string)\n \n return tf.SparseTensor(indices=sp.indices,\n values=table.lookup(sp.values),\n dense_shape=sp.dense_shape)\n\n# Returns the column values from a CSV file as a list\ndef _get_csv_column(csv_file, column_name):\n with open(csv_file, 'r') as f:\n df = pd.read_csv(f)\n return df[column_name].tolist()\n\n# Plot a color image\ndef _plot_rgb(rgb, color_name):\n data = [[rgb]]\n plt.imshow(data, interpolation='nearest')\n plt.title(color_name)\n plt.show()\n\n# Helper variables for the input function\nCHARACTERS = [chr(i) for i in range(256)]\nSEQUENCE_LENGTH_KEY = 'sequence_length'\nCOLOR_NAME_KEY = 'color_name'\nRGB_KEY = 'rgb'\n\n# Creating my own input function for a given color\ndef get_input_fn(color):\n\tdef input_fn():\n\t\tseq_len = list([len(color)])\n\t\t#x = math_ops.to_int32(seq_len)\n\t\t#print(x.get_shape().ndims)\n\t\tcolor_name = [color] # the input for string_split needs to be\n\t\t\t\t\t\t # a tensor\n\n\t\tsplit_color_name = tf.string_split(color_name, delimiter='')\n\t\tmapping = tf.constant(CHARACTERS, name=\"mapping\")\n\t\tintegerized_color_name = _sparse_string_to_index(split_color_name, mapping)\n\n\t\t# generating anything (0, 0, 0) for the y\n\t\t# since for most cases there's no right answer \n\t\ty = np.asarray([[0, 0, 0]], dtype=np.float32)\n\n\t\t# creates inputs\n\t\tx = {COLOR_NAME_KEY: integerized_color_name,\n SEQUENCE_LENGTH_KEY: seq_len}\n \n\t\treturn x, y\n\treturn input_fn\n\n# Loading the Estimator model\ndef get_model_fn(rnn_cell_sizes,\n label_dimension,\n dnn_layer_sizes=[],\n optimizer='SGD',\n learning_rate=0.01):\n \n def model_fn(features, labels, mode):\n \n color_name = features[COLOR_NAME_KEY]\n sequence_length = features[SEQUENCE_LENGTH_KEY]\n print(sequence_length)\n x = math_ops.to_int32(sequence_length)\n print(x.get_shape().ndims)\n\n # Creating dense representation for the names\n # and then converting it to one hot representation\n dense_color_name = tf.sparse_tensor_to_dense(color_name, default_value=len(CHARACTERS))\n color_name_onehot = tf.one_hot(dense_color_name, depth=len(CHARACTERS) + 1)\n \n # Each RNN layer will consist of a LSTM cell\n rnn_layers = [tf.contrib.rnn.LSTMCell(size) for size in rnn_cell_sizes]\n \n # Construct the layers\n multi_rnn_cell = tf.contrib.rnn.MultiRNNCell(rnn_layers)\n \n # Runs the RNN model dynamically\n # more about it at: \n # https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn\n outputs, final_state = tf.nn.dynamic_rnn(cell=multi_rnn_cell,\n inputs=color_name_onehot,\n sequence_length=sequence_length,\n dtype=tf.float32)\n\n # Slice to keep only the last cell of the RNN\n last_activations = rnn_common.select_last_activations(outputs,\n sequence_length)\n\n # Construct dense layers on top of the last cell of the RNN\n for units in dnn_layer_sizes:\n last_activations = tf.layers.dense(\n last_activations, units, activation=tf.nn.relu)\n \n # Final dense layer for prediction\n predictions = tf.layers.dense(last_activations, label_dimension)\n\n loss = None\n train_op = None\n\n if mode != tf.contrib.learn.ModeKeys.INFER: \n loss = tf.losses.mean_squared_error(labels, predictions)\n \n if mode == tf.contrib.learn.ModeKeys.TRAIN: \n train_op = tf.contrib.layers.optimize_loss(\n loss,\n tf.contrib.framework.get_global_step(),\n optimizer=optimizer,\n learning_rate=learning_rate)\n \n return model_fn_lib.EstimatorSpec(mode,\n predictions=predictions,\n loss=loss,\n train_op=train_op)\n return model_fn\n\nmodel_fn = get_model_fn(rnn_cell_sizes=[256, 128], # size of the hidden layers\n label_dimension=3, # since is RGB\n dnn_layer_sizes=[128], # size of units in the dense layers on top of the RNN\n optimizer='Adam', #changing optimizer to Adam\n learning_rate=0.01)\n\nargs = parser.parse_args()\nestimator = tf.estimator.Estimator(model_fn=model_fn,\n\t\t\t\t\t\t\t\t model_dir=args.model_path)\n\n\n# Making Predictions\nprint('Colorbot is ready to generate colors!')\n\nEXIT_COMMAND = ''\nwhile True:\n\tcolor_name = input('give me a color name (or %s): ' % (EXIT_COMMAND))\n\tif color_name == EXIT_COMMAND:\n\t\tbreak\n\t\n\tprint('Generating color...')\n\tpreds = estimator.predict(input_fn=get_input_fn(color_name))\n\tfor p, name in zip(preds, [color_name]):\n\t\tcolor = tuple(map(int, p * 255))\n\t\t_plot_rgb(p, name)\n","sub_path":"code_samples/RNN/colorbot/play_colorbot.py","file_name":"play_colorbot.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"202702181","text":"import cv2\nimport numpy as np\nimport time\nimport os\nimport glob\nimport sys\nimport reductNoise\nimport pytesseract\nfrom PIL import Image\nfrom waterDropleCut import WaterCutting\nfrom waterFill import WaterFilled\n\nbaseDir = \"C:/Users/Administrator/PlainGame/easy_img/\"\nprint(os.path.basename)\n\nfile = \"getCheckImg_0.jpg\"\nfileName= file.split(\".\")[0]\nfilepath = os.path.join(\"easy_img\",file)\nim = cv2.imread(filepath)\n#将图片转成灰度图\nim_gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY)\n# cv2.imwrite(os.path.join(\"image\",fileName+\"_gray.jpg\"),im_gray)\n#将像素做二值化处理\nret,im_res = cv2.threshold(im_gray,240,255,cv2.THRESH_BINARY)\n# cv2.imwrite(os.path.join(\"image\",fileName+\"_threshold.jpg\"),im_res)\n#用高斯模糊对图片进行降噪\n# kernel = 1/9*np.array([[1,2,1],[2,4,2],[1,2,1]])\n# im_blur = cv2.filter2D(im_inv,-1,kernel)\n# cv2.imwrite(os.path.join(\"image\",fileName+\"_filter2d.jpg\"),im_inv)\n# #对图片进行进一轮的二值化处理\n# ret,im_res = cv2.threshold(im_blur,150,255,cv2.THRESH_BINARY_INV)\n# cv2.imwrite(os.path.join(\"image\",fileName+\"_threshold2.jpg\"),im_res)\n\nprint(\"第二轮二值化处理完毕。。。。\")\n\n#根据扫描法对图片降噪处理\nresults = reductNoise.reductNoise(im_res)\n# cv2.imwrite(os.path.join(\"image\",fileName+\"_reductNoise.jpg\"),im_res)\nprint(\"扫描法处理完毕。。。。\")\n\n#去除两边的空白\nim_res = reductNoise.arrayTrim(im_res)\n# cv2.imwrite(os.path.join(\"image\",fileName+\"_arrayTrim.jpg\"),im_res)\n\n#使用泛水填充法对图片进行区域划分\nwaterFill = WaterFilled()\nwaterFill.filled(im_res)\nwaterFill.splitRegion(8,im_res)\nret,im_res = cv2.threshold(im_res,100,255,cv2.THRESH_BINARY)\n# cv2.imwrite(os.path.join(\"image\",fileName+\"_waterFill.jpg\"),im_res)\nprint(\"泛水填充法处理完毕。。。。。\")\n\n#对图片进行切割\nx_len = len(im_res[0])\ny_len = len(im_res)\ncut_step = int(x_len/4)\ncut_points = [cut_step,cut_step*2,cut_step*3,cut_step*4]\nfor index,point in enumerate(cut_points) :\n try :\n results = [[0] for i in range(y_len)]\n waterCutt = WaterCutting(point,0,im_res)\n results[0] = [im_res[0][i] for i in range(index*cut_step,point)]\n temp_y = 0\n while 1==1 :\n x,y = next(waterCutt)\n print(index,point,x,y)\n #如果y大于temp_y 则results增加一行\n if y > temp_y :\n results[y] = [im_res[y][i] for i in range(index*cut_step,point)]\n else :\n results[y].append(im_res[y][x])\n except StopIteration :\n im_result = np.asarray(results)\n im_result = cv2.resize(im_result, (60, 60))\n kernel = 1/9*np.array([[1,2,1],[2,4,2],[1,2,1]])\n im_result = cv2.filter2D(im_result,-1,kernel)\n char = pytesseract.image_to_string(im_result,lang=\"normal\",config=\"--psm 10\")\n print(\"char====\"+char)\n # cv2.imwrite(os.path.join(\"char\",fileName+\"_waterCutting\"+\"_\"+str(index)+\".jpg\"),im_result)\n # del results\n print(\"处理完毕了\")\nprint(\"水滴切割法处理完毕。。。。。\")","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"138186617","text":"#!/usr/bin/env python\nimport roslib\nroslib.load_manifest('bakebot')\nimport rospy\nfrom bakebot.srv import *\nimport tf\nimport actionlib\nimport math\nimport sys\nimport time\nfrom utilities.bakebot_controller_manager import *\nfrom pr2_controller_manager import pr2_controller_manager_interface as pr2cm_interface\n\nclass PR2CMClient:\n '''\n This class switches between the ee_cart_imped controller and the standard move_arm\n suite of controllers on the PR2 arms.\n\n When invoked, the methods stop one controller and start the other. They require that the\n controllers are already loaded (albeit stopped).\n '''\n \n pr2cm_client_instance = None\n\n @staticmethod\n def get_pr2cm_client():\n ''' \n use this static factory method instead of the constructor to enforce singleton \n '''\n if PR2CMClient.pr2cm_client_instance == None:\n rospy.loginfo('instantiating a new pr2 controller manager client')\n PR2CMClient.pr2cm_client_instance = PR2CMClient()\n else:\n rospy.loginfo('returning existing instance of client ' + str(PR2CMClient.pr2cm_client_instance))\n return PR2CMClient.pr2cm_client_instance\n\n def __init__(self):\n '''\n do not call this constructor, call the static factory method instead\n '''\n rospy.loginfo('creating PR2CMClient')\n #pr2cm_interface.list_controllers()\n rospy.loginfo('loading (but not starting) impedance controllers')\n status = pr2cm_interface.load_controller('r_arm_cart_imped_controller')\n status = pr2cm_interface.load_controller('l_arm_cart_imped_controller')\n pr2cm_interface.list_controllers()\n rospy.loginfo('done')\n\n\n def load_ee_cart_imped(self, isRightArm):\n '''\n loads the ee_cart_imped controller on the arm specified.\n stops the cartesian controller first.\n @param isRightArm: if true then the controller will be loaded on the right arm, else the left arm\n '''\n rospy.logwarn('starting ee_cart_imped controller on the '+ ('right' if isRightArm else 'left') + ' arm')\n if isRightArm:\n status = pr2cm_interface.stop_controller('r_arm_controller')\n #print 'stopping arm controller status:', status\n status = pr2cm_interface.stop_controller('r_arm_cartesian_trajectory_controller')\n #print 'stopping trajectory controller status:', status\n status = pr2cm_interface.stop_controller('r_arm_cartesian_pose_controller')\n #print 'stopping pose controller status:', status\n status = pr2cm_interface.start_controller('r_arm_cart_imped_controller')\n #print 'starting controller status:', status\n else:\n status = pr2cm_interface.stop_controller('l_arm_controller')\n #print 'stopping controller status:', status\n status = pr2cm_interface.stop_controller('l_arm_cartesian_trajectory_controller')\n #print 'stopping trajectory controller status:', status\n status = pr2cm_interface.stop_controller('l_arm_cartesian_pose_controller')\n #print 'stopping pose controller status:', status\n status = pr2cm_interface.start_controller('l_arm_cart_imped_controller')\n #print 'starting controller status:', status\n rospy.loginfo('done')\n return status\n \n\n\n def load_cartesian(self, isRightArm):\n '''\n loads the cartesian controller on the arm specified.\n stops the impedance controller first.\n @param isRightArm: if true then the controller will be loaded on the right arm, else the left arm\n '''\n rospy.logwarn('starting cartesian controller on the '+ ('right' if isRightArm else 'left') + ' arm')\n if isRightArm:\n status = pr2cm_interface.stop_controller('r_arm_cart_imped_controller')\n #print 'stopping controller status:', status\n status = pr2cm_interface.start_controller('r_arm_controller')\n #print 'starting controller status:', status\n status = pr2cm_interface.start_controller('r_arm_cartesian_trajectory_controller')\n #print 'starting trajectory controller status:', status\n status = pr2cm_interface.start_controller('r_arm_cartesian_pose_controller')\n #print 'starting pose controller status:', status\n else:\n status = pr2cm_interface.stop_controller('l_arm_cart_imped_controller')\n #print 'stopping controller status:', status\n status = pr2cm_interface.start_controller('l_arm_controller')\n #print 'starting controller status:', status\n status = pr2cm_interface.start_controller('l_arm_cartesian_trajectory_controller')\n #print 'starting trajectory controller status:', status\n status = pr2cm_interface.start_controller('l_arm_cartesian_pose_controller')\n #print 'starting pose controller status:', status\n rospy.loginfo('done')\n return status\n\nif __name__ == '__main__':\n rospy.init_node('pr2cm_client_tester', anonymous=True)\n c = PR2CMClient.get_pr2cm_client()\n c.load_ee_cart_imped(False)\n time.sleep(3)\n c.load_cartesian(False)\n","sub_path":"branches/sandbox/bakebot/src/clients/pr2cm_client.py","file_name":"pr2cm_client.py","file_ext":"py","file_size_in_byte":5234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"272639997","text":"# Autor: Roberto Emmanuel González Muñoz\r\n# Programa que le las dimensiones (base y altura) de dos rectángulos\r\n# y calcula e imprime el perímetro y área de cada uno.\r\n\r\n\r\ndef calcularArea(b, a):\r\n area = b * a\r\n return area\r\n\r\n\r\ndef calcularPerimetro(b, a):\r\n perimetro = 2*b + 2*a\r\n return perimetro\r\n\r\n\r\ndef main():\r\n # Leer Dimensiones del rectángulo 1.\r\n base1 = int(input(\"Teclea la medida de la base 1: \"))\r\n altura1 = int(input(\"Teclea la medida de la altura 1: \"))\r\n\r\n # Leer dimensiones del rectángulo 2.\r\n base2 = int(input(\"Teclea la medida de la base 2: \"))\r\n altura2 = int(input(\"Teclea la medida de la altura 2: \"))\r\n\r\n # Calcula el área 1.\r\n area1 = calcularArea(base1, altura1)\r\n # Calcula el perímeto 1.\r\n perimetro1 = calcularPerimetro(base1, altura1)\r\n # Calcula el área 2.\r\n area2 = calcularArea(base2, altura2)\r\n # Calcula el área 2.\r\n perimetro2 = calcularPerimetro(base2, altura2)\r\n\r\n print(\"---------------------------------------------------\")\r\n print(\"El área del rectángulo 1 es: %.d\" % area1)\r\n print(\"El perímetro del rectángulo 1 es: %.d\" % perimetro1)\r\n print(\"---------------------------------------------------\")\r\n print(\"El área del rectángulo 2 es: %.d\" % area2)\r\n print(\"El perímetro del rectángulo 2 es: %.d\" % perimetro2)\r\n print(\"---------------------------------------------------\")\r\n\r\n if area1 > area2:\r\n print(\"EL área del rectángulo 1 es mayor.\")\r\n\r\n elif area1 == area2:\r\n print(\"El área del rectángulo 1 y 2 son iguales.\")\r\n\r\n else:\r\n print(\"El área del rectángulo 2 es mayor.\")\r\n\r\n\r\nmain()","sub_path":"Rectanguos.py","file_name":"Rectanguos.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"362486297","text":"import random\nimport tempfile\n\nimport json\nfrom boto.s3.connection import S3Connection\nfrom boto.s3.key import Key\n\nfrom django.core.files import File\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.contrib import auth\nfrom django.db.models import Q\nfrom django.db import transaction\nfrom django.views.decorators.csrf import csrf_exempt\n\n#from django.http import HttpResponseNotModified\nfrom django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.utils import timezone\nfrom phone_auth.signals import user_avatar_changed\nfrom photos_api.permissions import IsUserInAlbum, UserDetailsPagePermission, \\\n IsSameUserOrStaff\nfrom photos_api.parsers import PhotoUploadParser\nfrom photos_api import device_push, is_phone_number_mobile\nfrom phone_auth.models import AnonymousPhoneNumber, random_default_avatar_file_data, User, PhoneContact, PhoneNumber, \\\n UserGlanceScoreSnapshot\nfrom photos_api.private_serializers import PhotoObjectSerializer\nfrom photos_api.signals import photos_added_to_album, member_leave_album\nfrom photos_api import optimized_views\n\nfrom rest_framework import generics, serializers, mixins\nfrom rest_framework.decorators import api_view, permission_classes\nfrom rest_framework.generics import GenericAPIView\nfrom rest_framework.reverse import reverse\nfrom rest_framework.response import Response\nfrom rest_framework.exceptions import PermissionDenied\nfrom rest_framework.permissions import IsAdminUser, IsAuthenticated\nfrom rest_framework import status\nfrom rest_framework import views\n\nimport datetime\nimport requests\n\nimport phonenumbers\n\nfrom photos.image_uploads import process_file_upload\nfrom photos.models import Album, PendingPhoto, AlbumMember, Photo, PhotoComment, \\\n PhotoUserTag, PhotoGlance\nfrom photos_api.serializers import AlbumNameSerializer, AlbumSerializer, \\\n UserSerializer, AlbumUpdateSerializer, AlbumAddSerializer, \\\n QueryPhonesRequestSerializer, DeletePhotosSerializer, \\\n AlbumMemberNameSerializer, AlbumMemberSerializer, AlbumViewSerializer, \\\n AlbumNameChangeSerializer, AlbumMembersSerializer, \\\n PhotoCommentSerializer, PhotoUserTagSerializer, PhotoGlanceScoreSerializer, \\\n PhotoGlanceSerializer, UserGlanceScoreSerializer, \\\n AlbumMemberPhoneNumberSerializer, YouTubeUploadSerializer\nfrom photos_api.check_modified import supports_last_modified, supports_etag\n\nimport invites_manager\nfrom photos import photo_operations\n\nfrom photos.models import UserHiddenPhoto\n\n@api_view(['PUT'])\n# @permission_classes((IsAllowedPrivateAPI, ))\ndef youtube_upload(request, storage_id):\n permission_classes = (IsAuthenticated)\n serializer = YouTubeUploadSerializer(data=request.DATA)\n\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n author_id = serializer.object['author_id']\n client_upload_id = serializer.object['client_upload_id']\n album_id = serializer.object['album_id']\n status_ = serializer.object['status']\n\n youtube = False\n\n if client_upload_id == 'youtube':\n youtube_id = serializer.object['youtube_id']\n youtube = True\n\n try:\n author = User.objects.get(pk=author_id)\n except User.DoesNotExist:\n return Response('Invalid user id: ' + str(author_id), status=status.HTTP_400_BAD_REQUEST)\n\n try:\n album = Album.objects.get(pk=album_id)\n except Album.DoesNotExist:\n return Response('Invalid album id: ' + str(album_id), status=status.HTTP_400_BAD_REQUEST)\n\n # TODO Verify that the author is allowed to add a video into album\n\n now = timezone.now()\n\n if status_ == 'processing':\n raise RuntimeError('\"processing\" status not yet implemented')\n elif status_ == 'ready':\n if youtube == True:\n photo_operations.add_youtube_photo(client_upload_id, storage_id, author, album, now, youtube_id)\n else:\n photo_operations.add_photo(client_upload_id, storage_id, author, album, now)\n\n elif status_ == 'invalid':\n raise RuntimeError('\"invalid\" status not yet implemented')\n else:\n raise RuntimeError('Unknown status: ' + status_)\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n@api_view(['GET'])\ndef api_root(request, format=None):\n \"\"\"\n The entry endpoint of our API.\n \"\"\"\n response_data = {\n 'all_albums': reverse('album-list', request=request),\n 'all_users': reverse('user-list', request=request),\n 'upload_photos_request': reverse('photos-upload-request', request=request),\n }\n return Response(response_data)\n\n\n\n\nclass AlbumList(generics.ListAPIView):\n \"\"\"\n The list of all albums in the database, of all users.\n \"\"\"\n permission_classes = (IsAdminUser,)\n model = Album\n serializer_class = AlbumNameSerializer\n\n\ndef parse_phone_number(phone_number, default_country):\n try:\n number = phonenumbers.parse(phone_number, default_country)\n except phonenumbers.phonenumberutil.NumberParseException:\n return None\n\n if not phonenumbers.is_possible_number(number):\n return None\n\n return number\n\ndef album_add_members(album, inviter, member_identifiers, date_added):\n result = []\n\n with album.modify(date_added) as m:\n for member_identifier in member_identifiers:\n if member_identifier.user_id is None:\n number = parse_phone_number(member_identifier.phone_number, member_identifier.default_country)\n if not number:\n result.append({\"success\": False, \"error\": \"invalid_phone_number\"})\n else:\n m.add_phone_number(inviter, number, member_identifier.contact_nickname, invites_manager.send_invite)\n\n # Later check for the result of the Twilio SMS send, which\n # will tell us if sending the SMS failed due to being an invalid\n # number. In this save success=False, error=invalid_phone_number\n result.append({\"success\": True})\n\n else:\n if m.add_user_id(inviter, member_identifier.user_id):\n result.append({\"success\": True})\n else:\n result.append({\"success\": False, \"error\": \"invalid_user_id\"})\n\n return result\n\n@supports_etag\nclass AlbumDetail(GenericAPIView):\n permission_classes = (IsUserInAlbum,)\n\n def initial(self, request, pk, *args, **kwargs):\n self.album = get_object_or_404(Album, pk=pk)\n\n return super(AlbumDetail, self).initial(request, pk, *args, **kwargs)\n\n def get_etag(self, request, pk):\n return self.album.get_etag()\n\n def get(self, request, pk):\n payload = optimized_views.get_album_detail_payload(request.user, self.album)\n return Response(payload, content_type='application/json')\n\n def post(self, request, pk):\n serializer = AlbumUpdateSerializer(data=request.DATA)\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n now = timezone.now()\n\n if serializer.object.add_photos:\n photo_ids = serializer.object.add_photos\n\n try:\n photo_operations.add_pending_photos_to_album(photo_ids, self.album.id, now)\n except photo_operations.PhotoNotUploadedAddPhotoException:\n return Response(u\"Trying to add a Photo that has not yet been uploaded\", status=status.HTTP_400_BAD_REQUEST)\n except photo_operations.InvalidPhotoIdAddPhotoException:\n return Response(u\"Trying to add a Photo with an invalid photo_id\", status=status.HTTP_400_BAD_REQUEST)\n\n photos_added_to_album.send(sender=self,\n photos=photo_ids,\n by_user=request.user,\n to_album=self.album)\n\n if serializer.object.copy_photos:\n photo_ids = serializer.object.copy_photos\n photo_operations.copy_photos_to_album(request.user, photo_ids, self.album.id, now)\n\n photos_added_to_album.send(sender=self,\n photos=photo_ids,\n by_user=request.user,\n to_album=self.album)\n\n album_add_members(self.album, request.user, serializer.object.add_members, now)\n\n payload = optimized_views.get_album_detail_payload(request.user, self.album)\n return Response(payload, content_type='application/json')\n\n\nclass AlbumNameView(GenericAPIView):\n permission_classes = (IsAuthenticated, IsUserInAlbum)\n serializer_class = AlbumNameChangeSerializer\n\n def initial(self, request, pk, *args, **kwargs):\n self.album = get_object_or_404(Album, pk=pk)\n\n return super(AlbumNameView, self).initial(request, pk, *args, **kwargs)\n\n def get(self, request, *args, **kwargs):\n responseSerializer = (self.get_serializer_class())()\n responseSerializer.data['name'] = self.album.name\n return Response(responseSerializer.data)\n\n def put(self, request, *args, **kwargs):\n # if request.user != self.album.creator:\n # return Response(status=403)\n\n serializer = self.get_serializer(data=request.DATA, files=request.FILES)\n if serializer.is_valid():\n with self.album.modify(timezone.now()):\n self.album.name = serializer.object['name']\n self.album.save(update_fields=['name'])\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass AlbumMembersView(generics.CreateAPIView):\n model = Album\n permission_classes = (IsUserInAlbum,)\n serializer_class = AlbumMembersSerializer\n\n def create(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.DATA, files=request.FILES)\n\n now = timezone.now()\n\n if serializer.is_valid():\n album = Album.objects.get(pk=kwargs.get('pk'))\n result = album_add_members(album, request.user, serializer.object['members'], now)\n\n return Response(result, status=status.HTTP_200_OK)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\n# TODO Must add proper permission to only allow user that added the member to\n# access this view\nclass AlbumMemberPhoneNumberView(GenericAPIView):\n serializer_class = AlbumMemberPhoneNumberSerializer\n\n def initial(self, request, pk, user_id):\n self.album = get_object_or_404(Album, pk=pk)\n self.user = get_object_or_404(User, pk=user_id)\n\n def get(self, request, pk, user_id):\n phone_number = self.user.get_primary_phone_number()\n if phone_number:\n phone_number_str = phone_number.phone_number\n else:\n phone_number_str = ''\n responseSerializer = (self.get_serializer_class())(\n {\n 'user': self.user,\n 'phone_number': phone_number_str\n },\n context={'request': request})\n return Response(responseSerializer.data)\n\n\nclass LeaveAlbum(generics.DestroyAPIView):\n model = AlbumMember\n permission_classes = (IsAuthenticated, IsUserInAlbum)\n\n def post(self, request, *args, **kwargs):\n album = self.get_object().album\n response = self.delete(request, *args, **kwargs)\n\n member_leave_album.send(sender=self, user=request.user, album=album)\n\n return response\n\n def get_object(self, queryset=None):\n\n if queryset is None:\n queryset = self.get_queryset()\n\n # pk from URL points to the album, not AlbumMember\n album_pk = self.kwargs.get(self.pk_url_kwarg, None)\n if album_pk is None:\n raise AttributeError(\"Missing Album pk\")\n\n try:\n obj = queryset.get(user=self.request.user, album__pk=album_pk)\n except self.model.DoesNotExist:\n # This should never happen since we already checked if this records exists by checking permissions\n raise Http404(_(u\"Album not found\"))\n return obj\n\n\nclass AlbumMemberUser(generics.DestroyAPIView):\n model = AlbumMember\n permission_classes = (IsAuthenticated, IsUserInAlbum)\n\n def get_object(self, queryset=None):\n if queryset is None:\n queryset = self.get_queryset()\n\n # pk from URL points to the album, not AlbumMember\n album_pk = self.kwargs.get(self.pk_url_kwarg, None)\n if album_pk is None:\n raise AttributeError(\"Missing Album pk\")\n\n user_id = self.kwargs['user_id']\n\n try:\n obj = queryset.get(user__id=user_id, album__pk=album_pk)\n except self.model.DoesNotExist:\n # This should never happen since we already checked if this records exists by checking permissions\n raise Http404(_(u\"Album not found\"))\n return obj\n\n\nclass ViewAlbum(GenericAPIView):\n permission_classes = (IsAuthenticated, IsUserInAlbum)\n serializer_class = AlbumViewSerializer\n\n def post(self, request, *args, **kwargs):\n serializer = AlbumViewSerializer(data=request.DATA)\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n timestamp = serializer.object\n if timestamp.tzinfo is None:\n return Response(u\"Missing timezone\", status=status.HTTP_400_BAD_REQUEST)\n\n album_pk = self.kwargs.get(self.pk_url_kwarg, None)\n if album_pk is None:\n raise AttributeError(\"Missing Album pk\")\n\n obj = AlbumMember.objects.get(user=self.request.user, album__pk=album_pk)\n obj.update_last_access(timestamp)\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass UserList(generics.ListCreateAPIView):\n \"\"\"\n API endpoint that represents a list of users.\n \"\"\"\n model = auth.get_user_model()\n serializer_class = UserSerializer\n\n\nclass CompetitionUserList(generics.ListAPIView):\n def get(self, request):\n \"\"\"\n Use the GET parameter `num_users` to specify how many users\n \"\"\"\n num_users = int(request.GET.get('num_users', 100))\n\n now = timezone.now()\n week_start_day = (now - datetime.timedelta(days=now.weekday())).date()\n week_start = week_start_day\n start_date = UserGlanceScoreSnapshot.objects.get_closest_snapshot(week_start)\n score_deltas = UserGlanceScoreSnapshot.objects.get_score_delta(start_date)\n\n result = []\n count = 0\n for s in score_deltas:\n user = s['user']\n s['user'] = UserSerializer(user).data\n if s['user']['invite_status'] == 'joined':\n result.append(s)\n count += 1\n if count == num_users:\n break\n\n return Response(result)\n\n\nclass UserDetail(generics.RetrieveUpdateAPIView):\n \"\"\"\n API endpoint that represents a single user.\n \"\"\"\n model = auth.get_user_model()\n serializer_class = UserSerializer\n permission_classes = (UserDetailsPagePermission,)\n\n # These attributes can be changed with PUT or PATCH request\n __allowed_attributes_to_change = ['nickname']\n\n def __check_update_attr_permissions(self, request):\n \"\"\"Ensure that only allowed attributes can be changed\"\"\"\n for key, value in request.DATA.iteritems():\n if key not in self.__allowed_attributes_to_change:\n raise PermissionDenied(\"You are not allowed to \"\n \"change '{0}'\".format(key))\n\n def get_queryset(self):\n \"\"\"For PUT and PATCH requests limit queryset to only user\n who makes request\"\"\"\n\n if self.request.method in ['PUT', 'PATCH']:\n return self.model.objects.filter(pk=self.request.user.pk)\n\n return super(UserDetail, self).get_queryset()\n\n def put(self, request, *args, **kwargs):\n \"\"\"PUT handler\"\"\"\n self.__check_update_attr_permissions(request)\n return super(UserDetail, self).put(request, *args, **kwargs)\n\n def patch(self, request, *args, **kwargs):\n \"\"\"PATCH handler\"\"\"\n self.check_permissions(request)\n self.__check_update_attr_permissions(request)\n return super(UserDetail, self).patch(request, *args, **kwargs)\n\n\nclass UserGlanceScore(generics.RetrieveAPIView):\n model = auth.get_user_model()\n serializer_class = UserGlanceScoreSerializer\n permission_classes = (UserDetailsPagePermission,)\n\n\nclass UserAvatarDetail(views.APIView):\n \"\"\"View that handles avatar uploads\"\"\"\n permission_classes = (IsAuthenticated, IsSameUserOrStaff)\n\n parser_classes = (PhotoUploadParser,)\n\n def process_upload_request(self, request, uploaded_chunks):\n \"\"\"Uploads user avatar to the randomly picked location\n where we host user avatars and returns Response\"\"\"\n\n avatar_image_filename = settings.AVATAR_FILENAME_FORMAT_STRING.format(\n user_id=request.user.id,\n timestamp=timezone.now().strftime(\"%s\")\n )\n avatar_location_format_str = random.choice(settings.AVATAR_BUCKETS)\n storage, bucket_name, filename = avatar_location_format_str.split(\":\")\n\n # Write uploaded data to temporary file\n # File will be delete once handler is closed\n temp_file = tempfile.TemporaryFile()\n for chunk in uploaded_chunks:\n temp_file.write(chunk)\n temp_file.seek(0)\n\n if storage == \"s3\":\n # Upload to S3\n try:\n conn = S3Connection(settings.AWS_ACCESS_KEY,\n settings.AWS_SECRET_ACCESS_KEY)\n bucket = conn.get_bucket(bucket_name)\n key = Key(bucket, avatar_image_filename)\n key.metadata = {'Content-Type': 'image/jpeg'}\n key.set_contents_from_file(temp_file)\n # Otherwise it's not accessible\n key.make_public()\n key.close(fast=True)\n temp_file.close()\n except:\n temp_file.close()\n raise\n else:\n temp_file.close()\n raise ValueError(\"Failed to upload avatar. \"\n \"Unknown storage '{0}'.\".format(storage))\n\n request.user.avatar_file = avatar_location_format_str.format(\n filename=avatar_image_filename)\n request.user.save()\n\n user_avatar_changed.send(sender=self, user=request.user)\n\n return Response()\n\n def post(self, request, *args, **kwargs):\n return self.process_upload_request(request,\n request.FILES['photo'].chunks())\n\n def put(self, request, *args, **kwargs):\n return self.process_upload_request(request,\n request.DATA.chunks())\n\n\n@supports_last_modified\nclass Albums(GenericAPIView):\n \"\"\"\n This Resource supports the \"If-Modified-Since\" HTTP header.\n\n Clients should remember the value returned in the \"Date\" HTTP header, and\n use this as the value of \"If-Modified-Since\" for future requests.\n\n If there have been no updates to the resource, the server will return an\n empty response body, and a status code of: 304 Not Modified\n \"\"\"\n permission_classes = (IsAuthenticated,)\n\n def initial(self, request, *args, **kwargs):\n if request.user.is_staff:\n self.is_staff = True\n self.albums = Album.objects.all()\n elif request.user.is_authenticated():\n self.is_staff = False\n self.albums = AlbumMember.objects.get_user_memberships(self.request.user.id)\n\n return super(Albums, self).initial(request, *args, **kwargs)\n\n # TODO This could use an optimized query\n def last_modified(self, request):\n if not self.albums:\n return None\n\n if self.is_staff:\n return max([a.last_updated for a in self.albums])\n else:\n return max([m.album.last_updated for m in self.albums])\n\n def get(self, request):\n payload = optimized_views.get_album_list_payload(request.user.id)\n return Response(payload, content_type='application/json')\n\n def post(self, request):\n serializer = AlbumAddSerializer(data=request.DATA)\n if not serializer.is_valid():\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n now = timezone.now()\n\n album = Album.objects.create_album(self.request.user, serializer.object.album_name)\n album_add_members(album, request.user, serializer.object.members, now)\n\n payload = optimized_views.get_album_detail_payload(request.user, album)\n return Response(payload, content_type='application/json')\n\n\nclass HidePhotoView(mixins.DestroyModelMixin, generics.MultipleObjectAPIView):\n model = Photo\n serializer_class = DeletePhotosSerializer\n\n def post(self, request, *args, **kwargs):\n self.object_list = self.get_queryset()\n for photo in self.object_list:\n UserHiddenPhoto.objects.create(user=request.user, photo=photo)\n\n\n # Save album revision, because we deleted photo from it.\n # photo.album.save_revision(timezone.now())\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def get_queryset(self):\n serializer = self.get_serializer(data=json.loads(self.request.body))\n if serializer.is_valid():\n queryset = self.model._default_manager.filter(\n photo_id__in=serializer.data.get('photos', []))\n return queryset\n else:\n return self.model._default_manager.none()\n\nclass DeletePhotosView(mixins.DestroyModelMixin, generics.MultipleObjectAPIView):\n model = Photo\n serializer_class = DeletePhotosSerializer\n\n def post(self, request, *args, **kwargs):\n self.object_list = self.get_queryset()\n for photo in self.object_list:\n\n if photo.author == self.request.user:\n # This matches the number of points that the user received when\n # he added the photo\n photo.author.increment_user_glance_score(-3)\n\n album = photo.album or None\n photo.delete()\n\n # Save album revision, because we deleted photo from it.\n album.save_revision(timezone.now())\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def get_queryset(self):\n serializer = self.get_serializer(data=json.loads(self.request.body))\n if serializer.is_valid():\n queryset = self.model._default_manager.filter(\n photo_id__in=serializer.data.get('photos', []))\n return queryset\n else:\n return self.model._default_manager.none()\n\n\n@api_view(['POST'])\n@permission_classes((IsAuthenticated, ))\ndef photos_upload_request(request, format=None):\n \"\"\"\n Use the GET parameter `num_photos` to specify how many photos you would\n like to upload\n \"\"\"\n num_photos = int(request.GET.get('num_photos', 1))\n\n response_data = []\n for i in xrange(num_photos):\n pending_photo = Photo.objects.upload_request(author=request.user)\n\n if settings.USING_LOCAL_PHOTOS:\n upload_url = reverse('photo-upload', [pending_photo.photo_id], request=request)\n fullres_upload_url = reverse('photo-upload', [pending_photo.photo_id], request=request)\n else:\n upload_url = settings.PHOTO_UPLOAD_SERVER_URL + '/photos/upload/' + pending_photo.photo_id + '/'\n fullres_upload_url = settings.PHOTO_UPLOAD_SERVER_URL + '/photos/upload/' + pending_photo.photo_id + '/original/'\n\n response_data.append({\n 'photo_id': pending_photo.photo_id,\n 'upload_url': upload_url,\n 'fullres_upload_url': fullres_upload_url\n })\n\n return Response(response_data)\n\n\nclass PhotoUpload(views.APIView):\n permission_classes = (IsAuthenticated,)\n\n parser_classes = (PhotoUploadParser,)\n\n @transaction.non_atomic_requests\n # @csrf_exempt\n def dispatch(self, *args, **kwargs):\n return super(PhotoUpload, self).dispatch(*args, **kwargs)\n\n def process_upload_request(self, request, photo_id, uploaded_chunks):\n pending_photo = get_object_or_404(PendingPhoto, pk=photo_id)\n if pending_photo.author != request.user:\n return Response(status=403)\n\n if settings.USING_LOCAL_PHOTOS:\n process_file_upload(pending_photo, uploaded_chunks)\n else:\n # Forward request to photo upload server\n r = requests.put(settings.PHOTO_UPLOAD_SERVER_URL + '/photos/upload/' + pending_photo.photo_id + '/original/',\n headers = { 'Authorization': 'Token ' + request.auth.key },\n data = uploaded_chunks)\n r.raise_for_status()\n\n return Response()\n\n def post(self, request, photo_id, format=None):\n return self.process_upload_request(request, photo_id,\n request.FILES['photo'].chunks())\n\n def put(self, request, photo_id, format=None):\n return self.process_upload_request(request, photo_id,\n request.DATA.chunks())\n\n\nclass PhotoCommentView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = PhotoCommentSerializer\n\n def initial(self, request, photo_id, author_id, client_msg_id, *args, **kwargs):\n self.photo = get_object_or_404(Photo, pk=photo_id)\n self.author = get_object_or_404(User, pk=author_id)\n self.client_msg_id = client_msg_id\n\n return super(PhotoCommentView, self).initial(request, photo_id, author_id, client_msg_id, *args, **kwargs)\n\n def put(self, request, *args, **kwargs):\n if request.user != self.author:\n return Response(status=403)\n\n serializer = self.get_serializer(data=request.DATA, files=request.FILES)\n if serializer.is_valid():\n with self.photo.album.modify(timezone.now(), True) as m:\n m.comment_on_photo(self.photo, request.user, self.client_msg_id, serializer.object['comment'])\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, *args, **kwargs):\n # TODO Also allow any admins to delete comments\n if request.user != self.author:\n return Response(status=403)\n\n photo_comment = get_object_or_404(PhotoComment, photo=self.photo, author=self.author, client_msg_id=self.client_msg_id)\n with self.photo.album.modify(timezone.now()) as m:\n m.delete_photo_comment(photo_comment)\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass PhotoUserTagView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = PhotoUserTagSerializer\n\n def initial(self, request, photo_id, tagged_user_id, *args, **kwargs):\n self.photo = get_object_or_404(Photo, pk=photo_id)\n self.tagged_user = get_object_or_404(User, pk=tagged_user_id)\n\n return super(PhotoUserTagView, self).initial(request, photo_id, tagged_user_id, *args, **kwargs)\n\n def put(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.DATA, files=request.FILES)\n if serializer.is_valid():\n with self.photo.album.modify(timezone.now()) as m:\n m.photo_tag_user(self.photo, request.user, self.tagged_user, serializer.object['tag_coord_x'], serializer.object['tag_coord_y'])\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def delete(self, request, *args, **kwargs):\n photo_user_tag = get_object_or_404(PhotoUserTag, photo=self.photo, tagged_user=self.tagged_user)\n\n # TODO Also allow any admins to delete user tags\n if not (request.user == photo_user_tag.author or request.user == photo_user_tag.tagged_user):\n return Response(status=403)\n\n with self.photo.album.modify(timezone.now()) as m:\n m.delete_photo_user_tag(photo_user_tag)\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n\nclass PhotoGlanceScoreView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = PhotoGlanceScoreSerializer\n\n def initial(self, request, photo_id, author_id, *args, **kwargs):\n self.photo = get_object_or_404(Photo, pk=photo_id)\n self.author = get_object_or_404(User, pk=author_id)\n\n return super(PhotoGlanceScoreView, self).initial(request, photo_id, author_id, *args, **kwargs)\n\n def put(self, request, *args, **kwargs):\n if request.user != self.author:\n return Response(status=403)\n\n serializer = self.get_serializer(data=request.DATA, files=request.FILES)\n if serializer.is_valid():\n with self.photo.album.modify(timezone.now()) as m:\n m.set_photo_user_glance_score_delta(request.user, self.photo, serializer.object['score_delta'])\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n\nclass PhotoGlanceView(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = PhotoGlanceSerializer\n\n def initial(self, request, photo_id, *args, **kwargs):\n self.photo = get_object_or_404(Photo, pk=photo_id)\n\n return super(PhotoGlanceView, self).initial(request, photo_id, *args, **kwargs)\n\n def put(self, request, *args, **kwargs):\n serializer = self.get_serializer(data=request.DATA, files=request.FILES)\n if serializer.is_valid():\n with self.photo.album.modify(timezone.now()) as m:\n m.glance_photo(self.photo, request.user, serializer.object['emoticon_name'])\n\n return Response(status=status.HTTP_204_NO_CONTENT)\n else:\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n PhotoGlance.objects.get_or_create()\n\n\nclass PublicAlbum(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n\n def get(self, request):\n public_album_id = settings.PUBLIC_ALBUM_ID\n payload = {\n 'album_id': public_album_id\n }\n return Response(payload, content_type='application/json')\n\n\nclass QueryPhoneNumbers(GenericAPIView):\n permission_classes = (IsAuthenticated,)\n serializer_class = QueryPhonesRequestSerializer\n\n def __process_requested_item(self, user, country_code, existing_contacts,\n phone_number, nickname):\n\n try:\n number = phonenumbers.parse(phone_number, country_code)\n except phonenumbers.phonenumberutil.NumberParseException as e:\n return {'phone_type': 'invalid'}\n\n if not phonenumbers.is_possible_number(number):\n return {'phone_type': 'invalid'}\n\n phone_number_str = phonenumbers.format_number(\n number,\n phonenumbers.PhoneNumberFormat.E164\n )\n\n existing_phone_contact = existing_contacts.get(phone_number_str)\n if existing_phone_contact:\n phone_contact = existing_phone_contact\n apn = phone_contact.anonymous_phone_number\n else:\n try:\n apn = AnonymousPhoneNumber.objects.get(phone_number=phone_number_str)\n except AnonymousPhoneNumber.DoesNotExist:\n is_mobile = is_phone_number_mobile(number)\n apn = AnonymousPhoneNumber.objects.create(\n phone_number=phone_number_str,\n date_created=timezone.now(),\n avatar_file=random_default_avatar_file_data(),\n is_mobile=is_mobile,\n is_mobile_queried=timezone.now()\n )\n\n try:\n phone_contact = apn.phonecontact_set.get(created_by_user=user)\n except PhoneContact.DoesNotExist:\n try:\n owner_user = PhoneNumber.objects.only('user').\\\n get(phone_number=phone_number_str).user\n except PhoneNumber.DoesNotExist:\n owner_user = None\n phone_contact = PhoneContact.objects.create(\n anonymous_phone_number=apn,\n user=owner_user,\n created_by_user=user,\n date_created=timezone.now(),\n contact_nickname=nickname\n )\n\n user = phone_contact.user\n if user:\n if user.get_invite_status() == User.STATUS_JOINED:\n user_id = user.id\n else:\n user_id = None\n else:\n user_id = None\n\n if phone_contact.user:\n avatar_url = phone_contact.user.get_avatar_url()\n else:\n avatar_url = apn.get_avatar_url()\n\n data = {}\n data['phone_type'] = 'mobile' if apn.is_mobile else 'landline'\n data['avatar_url'] = avatar_url\n data['user_id'] = user_id\n data['phone_number'] = apn.phone_number\n return data\n\n def post(self, request, *args, **kwargs):\n data = json.loads(str(request.body))\n serializer = self.get_serializer(data=data, files=request.FILES)\n if not serializer.is_valid():\n return Response(serializer.errors,\n status=status.HTTP_400_BAD_REQUEST)\n\n default_country = serializer.data.get('default_country')\n phone_numbers = serializer.data.get('phone_numbers')\n\n q = Q()\n for pn in phone_numbers:\n try:\n number = phonenumbers.parse(pn['phone_number'], default_country)\n except phonenumbers.phonenumberutil.NumberParseException as e:\n continue\n\n phone_number_str = phonenumbers.format_number(\n number,\n phonenumbers.PhoneNumberFormat.E164)\n\n q = q | Q(anonymous_phone_number__phone_number=phone_number_str)\n\n existing_contacts = {}\n for existing_contact in PhoneContact.objects.filter(q, created_by_user=request.user):\n existing_contacts[existing_contact.anonymous_phone_number.phone_number] = existing_contact\n\n response_items = []\n for pn in phone_numbers:\n item = self.__process_requested_item(request.user,\n default_country,\n existing_contacts,\n pn['phone_number'],\n pn['contact_nickname'])\n response_items.append(item)\n\n # return Response(json.dumps(response_items))\n return Response({'phone_number_details': response_items})\n","sub_path":"photos_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":35171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"220430039","text":"class Student:\n def get_age(self):\n return self._age\n\n def set_age(self, value):\n if value >= 0 and value <= 88:\n self._age = value\n else:\n raise ValueError('年龄必须在0到88之间')\n\n\nclass Student1:\n @property\n def age(self):\n return self._age\n\n @age.setter # 当前属性值可以修改\n def age(self, value):\n if value >= 0 and value <= 88:\n self._age = value\n else:\n raise ValueError('年龄必须在0到88之间')\n\n @property # 对外暴露的是name,实际上的属性值应该是self._name\n def name(self):\n self._name = '张三'\n return self._name\n\n\nif __name__ == '__main__':\n s = Student1()\n s.age = 23\n # s.name = 'i'\n print(s.age) # age可读可写\n print(s.name) # name只可读\n","sub_path":"Python高级编程/面向对象进阶/property装饰器的使用.py","file_name":"property装饰器的使用.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"388593575","text":"# coding:utf-8\n\nimport csv\nimport requests\nimport threading\nfrom queue import Queue\nfrom lxml import etree\nimport time\n\n'''通过多线程来下载百思不得姐的文本'''\n'''\n生产者消费者模式\n 生产者将通过每个页面的连接来获得每个段子,并放到队列中\n 消费者通过每个段子将内容保存到文件中\n'''\n\nHEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.94 Safari/537.36'\n}\nNUM = 1\nclass Product(threading.Thread):\n \"\"\"生产者\"\"\"\n\n def __init__(self,page_queue,joke_queue,*args,**kwargs):\n super(Product, self).__init__(*args,**kwargs)\n self.page_queue = page_queue\n self.joke_queue = joke_queue\n\n def run(self):\n while True:\n if self.page_queue.empty():\n break\n else:\n url = self.page_queue.get()\n response = requests.get(url,headers = HEADERS)\n text = response.text\n html = etree.HTML(text)\n jokes = html.xpath(\"//div[@class='j-r-list-c-desc']\")\n users = html.xpath(\"//div[@class='u-txt']\") # 里面包含作者还有发布时间\n\n myjoke = []\n for joke in jokes: # 处理每一个笑话返回笑话列表\n joke = joke.xpath(\".//text()\")\n joke = \"\\n\".join(joke).strip()\n myjoke.append(joke)\n\n myuser = []\n mytime = []\n for user in users: # 处理每一个用户和发布时间的列表\n user = user.xpath(\".//text()\")\n u = user[1].strip() # 用户名\n t = user[3].strip() # 发表时间\n myuser.append(u)\n mytime.append(t)\n\n\n li = zip(myuser,mytime,myjoke) # 返回一个以元素为元素的列表\n\n for i in li:\n self.joke_queue.put(i) # 将每一个元组放到列表中 元组(user,time,joke)\n\n print(self.joke_queue.empty()) #------------------------\n print('=' * 50 + \"第%s页下载完成!\" % url.split('/')[-1] + \"=\" * 50)\n\n\nclass Consumer(threading.Thread):\n \"\"\"消费者\"\"\"\n def __init__(self,joke_queue,writer,gLock,*args,**kwargs):\n super(Consumer, self).__init__(*args,**kwargs)\n self.joke_queue = joke_queue\n self.writer = writer\n self.lock = gLock\n\n def run(self):\n global NUM\n while True:\n try:\n if not self.joke_queue.empty(): # 因为前面有了一段积累了一段时间的数据,所以可以在这判断,如果没有了可以直接结束程序\n joke_info = self.joke_queue.get(timeout=40)\n self.lock.acquire()\n self.writer.writerow(joke_info)\n self.lock.release()\n print('保存成功%d 条' %NUM)\n NUM += 1\n else:\n break\n except:\n break\n\ndef main():\n page_queue = Queue(10)\n joke_queue = Queue(200)\n gLock = threading.Lock() #########重点 有括号\n\n fp = open('99_test//bsbdj.csv', 'a', newline='', encoding='utf-8')\n writer = csv.writer(fp)\n writer.writerow(('user', 'time','joke')) # 先写表头\n\n for x in range(1,11):\n url = \"http://www.budejie.com/text/%d\" %x\n # print(url)\n page_queue.put(url)\n\n for i in range(5):\n p = Product(page_queue,joke_queue)\n p.start()\n\n time.sleep(1) # 沉睡一秒是为了使得joke_queue队列中存有数据然后消费者那去消费\n\n for i in range(5):\n c = Consumer(joke_queue,writer,gLock)\n c.start()\n\nif __name__ == '__main__':\n main()","sub_path":"05_爬虫进阶/01_多线程/09_网站保存到cvs文件中.py","file_name":"09_网站保存到cvs文件中.py","file_ext":"py","file_size_in_byte":3908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"458904486","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@brief test log(time=1061s)\n\"\"\"\nimport sys\nimport unittest\nimport warnings\nfrom pyquickhelper.loghelper import fLOG, CustomLog\nfrom pyquickhelper.pycode import get_temp_folder, add_missing_development_version\nfrom pyquickhelper.pycode import fix_tkinter_issues_virtualenv\nfrom pyquickhelper.ipythonhelper import execute_notebook_list_finalize_ut\nimport actuariat_python\n\n\nclass TestLONGNotebookPopulationS6Enonce(unittest.TestCase):\n\n def setUp(self):\n add_missing_development_version(\n [\"pyensae\", \"pymyinstall\", \"pymmails\", \"pyrsslocal\", \"mlstatpy\",\n \"jyquickhelper\"], __file__, hide=True)\n\n def test_long_long_notebook_population_s6_enonce(self):\n fLOG(\n __file__,\n self._testMethodName,\n OutputPrint=__name__ == \"__main__\")\n fix_tkinter_issues_virtualenv()\n from actuariat_python.automation.notebook_test_helper import ls_notebooks, execute_notebooks, clean_function_notebook\n\n if \"travis\" in sys.executable:\n # matplotlib is still failing\n warnings.warn(\n \"travis, unable to test TestNotebookPopulation.test_notebook_sessions\")\n return\n\n temp = get_temp_folder(__file__, \"temp_sessions_long_long\")\n keepnote = [_ for _ in ls_notebooks(\"sessions\")\n if \"seance6_graphes_enonce\" in _]\n self.assertTrue(len(keepnote) > 0)\n clog = CustomLog(temp)\n res = execute_notebooks(temp, keepnote,\n lambda i, n: \"deviner\" not in n,\n fLOG=fLOG, clean_function=clean_function_notebook,\n detailed_log=clog)\n execute_notebook_list_finalize_ut(\n res, fLOG=fLOG, dump=actuariat_python)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"_unittests/ut_documentation/test_LONG_LONG_seance6_enonce.py","file_name":"test_LONG_LONG_seance6_enonce.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"143582900","text":"import requests, json, static_vars, pytz, openpyxl\nfrom datetime import datetime, date, timedelta, time\nfrom pytz import timezone, datetime, tzinfo\nfrom openpyxl import workbook\nfrom openpyxl.compat import range\nfrom openpyxl.utils import get_column_letter\n\nheaders = static_vars.BASE_HEADERS\n\ndef inventory():\n inventory_url = static_vars.BASE_URL+\"/procurement/products?conditions=&pageSize=300&page=1\"\n response = requests.request('GET', inventory_url, headers=headers)\n jsondata = json.loads(response.text)\n i = 0\n j = 2\n \n #OpenPYXL stuff\n filename = 'inventory.xlsx'\n wb = openpyxl.load_workbook(filename)\n ws1 = wb.active\n ws1.title = 'Inventory'\n ws1['A1'] = 'Catalogue Item' #jsondata[i]['catalogItem']['identifier']\n ws1['B1'] = 'Dropship Item' #jsondata[i]['dropshipFlag']\n ws1['C1'] = 'Product Class' #jsondata[i]['productClass']\n ws1['D1'] = 'Taxable' #jsondata[i]['taxableFlag']\n ws1['E1'] = 'Last Updated' #jsondata[i]['_info']['lastUpdated']\n ws1['F1'] = 'Last Opportunity Name' #jsondata[i]['opportunity']['name']\n ws1['G1'] = 'Last Opportunity ID' #jsondata[i]['opportunity']['_info']['opportunity_href']\n\n while inventory_url:\n try:\n response = requests.request(\"GET\", inventory_url, headers=headers)\n jsondata = json.loads(response.text)\n while i < len(jsondata):\n ws1['A' + str(j)] = jsondata[i]['catalogItem']['identifier']\n ws1['B' + str(j)] = jsondata[i]['dropshipFlag']\n ws1['C' + str(j)] = jsondata[i]['productClass']\n ws1['D' + str(j)] = jsondata[i]['taxableFlag']\n ws1['E' + str(j)] = jsondata[i]['_info']['lastUpdated']\n if 'opportunity' in jsondata[i]:\n ws1['F' + str(j)] = jsondata[i]['opportunity']['name']\n ws1['G' + str(j)] = jsondata[i]['opportunity']['_info']['opportunity_href']\n i += 1\n j += 1\n else:\n i += 1\n j += 1\n print(\"Row \"+ str(j) + \" updated.\")\n\n inventory_url = response.links['next']['url']\n print(inventory_url)\n print(\"Next Page\")\n i = 0\n\n except KeyError:\n print(\"No next URL.\")\n print(\"End of file reached.\")\n wb.save(filename = filename)\n print(\"\")\n print(\"File saved as \"+ filename +\".\")\n break\n except IndexError:\n print(\"End of file reached\")\n wb.save(filename = filename)\n print(\"File saved as \"+ filename +\".\")\n break\n\ninventory()\n","sub_path":"inventory.py","file_name":"inventory.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"142845464","text":"\nimport tensorflow as tf\nimport random\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.keras import layers\nimport gensim\nfrom sklearn.metrics import *\nfrom tensorflow.keras.models import Model\nimport matplotlib.pyplot as plt\nfrom tensorflow.keras.preprocessing import *\nfrom collections import defaultdict\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n\n# In[5]:\n\n\ndef getMetrics(trueLabels, predictedLabels, predictedLabelsProbabilities):\n \"\"\"Takes as input true labels, predictions, and prediction confidence scores and computes all metrics\"\"\"\n accuracy = accuracy_score(trueLabels,predictedLabels)\n aucScore = round(roc_auc_score(trueLabels, predictedLabelsProbabilities) * 100, 1)\n precisionLow = round(precision_score(trueLabels, predictedLabels, average=None)[0] * 100, 1)\n precisionHigh = round(precision_score(trueLabels, predictedLabels, average=None)[1] * 100, 1)\n recallLow = round(recall_score(trueLabels, predictedLabels, average=None)[0] * 100, 1)\n recallHigh = round(recall_score(trueLabels, predictedLabels, average=None)[1] * 100, 1)\n fpr, tpr, threshold = roc_curve(trueLabels, predictedLabelsProbabilities)\n f1_scoreLow = round(f1_score(trueLabels,predictedLabels, average=None)[0]*100,1)\n f1_scoreHigh = round(f1_score(trueLabels,predictedLabels, average=None)[1]*100,1)\n \n return accuracy, aucScore, precisionLow, precisionHigh, recallLow, recallHigh, fpr, tpr, f1_scoreLow, f1_scoreHigh\n\n\n# In[18]:\n\n\ndef splitRowIntoWords(row, length):\n \"\"\"Takes a variable length text input and convert it into a list of words with length equal to 'length' in the function parameter\"\"\"\n words = tf.keras.preprocessing.text.text_to_word_sequence(row, filters=' !#$%&()*+,-./:;<=>?@[\\\\]^_{|}~\\t\\n\"\\'', lower=True, split=\" \")\n \n # If length is less than required length, add zeros\n while len(words) < length:\n words.append(0)\n \n # If greater, remove stuff at the end\n if len(words) >= length:\n words = words[:length]\n \n return words\n\n\n# In[63]:\n\n\ndef buildAndTrainModel(model, learningRate, batchSize, epochs, trainingData, validationData, testingData, trainingLabels, testingLabels, validationLabels, MODEL_NAME, isPrintModel=True):\n \"\"\"Take the model and model parameters, build and train the model\"\"\"\n \n # Build and compile model\n # To use other optimizers, refer to: https://keras.io/optimizers/\n # Please do not change the loss function\n \n optimizer = tf.keras.optimizers.Adam(lr=learningRate)\n model.compile(optimizer=optimizer,\n loss='binary_crossentropy', \n metrics=['accuracy', tf.keras.metrics.AUC()])\n \n if isPrintModel:\n print(model.summary())\n \n\n \n for epoch in range(0, epochs):\n model.fit(trainingData, trainingLabels,\n epochs=1,\n verbose=0,\n batch_size=batchSize,\n shuffle=False)\n\n \n # Evaluate model\n trainLoss, trainAccuracy, trainAUC = model.evaluate(trainingData, trainingLabels, verbose=False)\n valLoss, valAccuracy, valAUC = model.evaluate(validationData, validationLabels, verbose=False)\n #model.save('Results/StructuredBinary/{}/epoch_{}'.format(filename,epoch))\n \n ## get metrics\n predictionsProbs = model.predict(testingData)\n predictions = [1 if value >= 0.5 else 0 for value in predictionsProbs]\n predictionsProbs = [value for value in predictionsProbs]\n accuracy, aucScore, precision1, precision2, recall1, recall2, fpr, tpr, f1_score1, f1_score2 = getMetrics(testingLabels, predictions, predictionsProbs)\n Accuracy.append(accuracy)\n AUCScore.append(aucScore)\n precisionLow.append(precision1)\n precisionHigh.append(precision2)\n recallLow.append(recall1)\n recallHigh.append(recall2)\n f1_scoreLow.append(f1_score1)\n f1_scoreHigh.append(f1_score2)\n Epoch.append(epoch)\n batch_size.append(batchSize)\n VALAccuracy.append(valAccuracy)\n ValAuc.append(valAUC)\n \n print('Epoch: {}, AUC: {}, F1_High: {}'.format(epoch, aucScore, f1_score2))\n \n if valAUC >= max(ValAuc):\n max_predictionsProbs = predictionsProbs\n max_predictions = predictions\n \n return Accuracy, AUCScore, precisionLow, precisionHigh, recallLow, recallHigh, f1_scoreLow, f1_scoreHigh, Epoch, batch_size, VALAccuracy, ValAuc, max_predictionsProbs, max_predictions\n \n\n \n\n\n# In[35]:\n\n\ndef attachOutputLayerToModel(lastDenseLayer, modelInputs):\n \"\"\"Take as input a dense layer and attach an output layer\"\"\"\n output = layers.Dense(1, activation='sigmoid')(lastDenseLayer)\n model = Model(inputs=modelInputs, outputs=output)\n return model\n\n\n# In[30]:\n\n\ndef createWordLSTM(trainFeatures, validationFeatures, testFeatures, numLstmLayers, vocabularyWords, embeddingsDimensionality, lstmNodes, bidirectional, isPreTrainedEmbeddings): \n \"\"\"Create a word lstm\"\"\"\n \n ## create basic cnn model\n #wordInput = layers.Input(shape=trainFeatures.shape[1:], dtype='float32')\n wordLSTM = wordInput = layers.Input(shape=trainFeatures.shape[1:], dtype='float32', name = 'wordInput')\n \n ## word convolutional neural network\n if isPreTrainedEmbeddings == False:\n # Create embeddings using keras built in function.\n wordLSTM = layers.Embedding(input_dim=vocabularyWords + 1, \n output_dim=embeddingsDimensionality, \n input_length=len(trainFeatures[0]))(wordInput)\n \n # Add CNN layers\n for i in range(numLstmLayers):\n\n #name = 'layer_bidirectional_lstm_{0}'.format(i+1)\n\n if( (i==0) and (numLstmLayers==1) ):\n wordLSTM = layers.Bidirectional(layers.LSTM(units=lstmNodes, dropout=0.2, recurrent_dropout=0.2, \n return_sequences=False))(wordLSTM)\n elif(i != (numLstmLayers-1) ):\n wordLSTM = layers.Bidirectional(layers.LSTM(units=lstmNodes, dropout=0.2, recurrent_dropout=0.2, \n return_sequences=True))(wordLSTM)\n else:\n wordLSTM = layers.Bidirectional(layers.LSTM(units=lstmNodes, dropout=0.2, recurrent_dropout=0.2, \n return_sequences=False))(wordLSTM)\n else:\n \n for i in range(numLstmLayers):\n\n #name = 'layer_bidirectional_lstm_{0}'.format(i+1)\n\n if( (i==0) and (numLstmLayers==1) ):\n wordLSTM = layers.Bidirectional(layers.LSTM(units=lstmNodes, dropout=0.2, recurrent_dropout=0.2,\n return_sequences=False))(wordLSTM)\n elif(i != (numLstmLayers-1) ):\n wordLSTM = layers.Bidirectional(layers.LSTM(units=lstmNodes, dropout=0.2, recurrent_dropout=0.2, \n return_sequences=True))(wordLSTM)\n else:\n wordLSTM = layers.Bidirectional(layers.LSTM(units=lstmNodes, dropout=0.2, recurrent_dropout=0.2, \n return_sequences=False))(wordLSTM)\n \n #wordLSTM = layers.Dropout(0.5)(wordLSTM)\n denseLayer = layers.Dense(128)(wordLSTM)\n \n \n return denseLayer, wordInput\n\n\n# In[31]:\n\n\ndef loadDataForWordsWithPreTrainedEmbeddings(trainText, validationText, testText, EMBEDDING_PATH):\n \"\"\"This function takes as input three text files and and a pre-trained word embedding file and returns arrays containing word embeddings for each word in the text. These arrays can be used \n directly in a keras model without the use of keras.layers.Embedding layer.\"\"\"\n \n # Load embeddings\n #embeddingsData = pd.DataFrame(pd.read_csv(EMBEDDING_PATH, \" \",engine='python')).values.tolist()\n #embeddingsDataDictionary = dict([(item[0], item[1:len(item)-1]) for item in embeddingsData]) # create dictionary of key=word, value=word embedding from the embedding file\n #EMBEDDING_SIZE = int(len(embeddingsDataDictionary[random.choice(list(embeddingsDataDictionary.keys()))]))\n \n EMBEDDING_SIZE = 300\n embeddingsDataDictionary = {}\n \n with open(EMBEDDING_PATH, 'r') as f:\n for line in f:\n values = line.split()\n word = values[0]\n vector = np.asarray(values[1:], 'float32')\n embeddingsDataDictionary[word] = vector\n \n ## convert words into word ids\n meanLength = np.mean([len(item.split(\" \")) for item in trainText])\n MAX_SENTENCE_LENGTH = int(meanLength + 10) # we let a sentence go 100 words longer than the mean sentence length.\n \n ## convert train, validation, and test text into lists with word ids\n trainTextWords = [splitRowIntoWords(row, MAX_SENTENCE_LENGTH) for row in trainText]\n trainWordFeatures = []\n for row in trainTextWords:\n rowEmbeddings = [embeddingsDataDictionary[word] if word in embeddingsDataDictionary else [0]*EMBEDDING_SIZE for word in row]\n trainWordFeatures.append(rowEmbeddings)\n\n validationTextWords = [splitRowIntoWords(row, MAX_SENTENCE_LENGTH) for row in validationText]\n validationWordFeatures = []\n for row in validationTextWords:\n rowEmbeddings = [embeddingsDataDictionary[word] if word in embeddingsDataDictionary else [0]*EMBEDDING_SIZE for word in row]\n validationWordFeatures.append(rowEmbeddings)\n\n testTextWords = [splitRowIntoWords(row, MAX_SENTENCE_LENGTH) for row in testText]\n testWordFeatures = []\n for row in testTextWords:\n rowEmbeddings = [embeddingsDataDictionary[word] if word in embeddingsDataDictionary else [0]*EMBEDDING_SIZE for word in row]\n testWordFeatures.append(rowEmbeddings)\n \n return np.array(trainWordFeatures), np.array(validationWordFeatures), np.array(testWordFeatures), None\n\n\n\n# In[32]:\n\n \nfiles = ['TrustPhys_','SubjectiveLit_','Anxiety_','Numeracy_']\n\ncv = ['1','2','3','4','5']\n# In[69]:\n\n\nfor filename in files:\n \n for i in cv:\n \n Accuracy = []\n AUCScore = []\n precisionHigh = []\n precisionLow = []\n recallHigh = []\n recallLow = []\n f1_scoreLow = []\n f1_scoreHigh = []\n Epoch = []\n batch_size = []\n VALAccuracy = []\n ValAuc = []\n \n string_train = 'MedianCV/{}/{}train.txt'.format(i, filename)\n string_test = 'MedianCV/{}/{}test.txt'.format(i, filename)\n string_val = 'MedianCV/{}/{}val.txt'.format(i, filename)\n data_train = pd.read_csv(string_train, header = None, sep = '\\t',encoding='ISO-8859-1').dropna()\n data_test = pd.read_csv(string_test, header = None, sep = '\\t',encoding='ISO-8859-1').dropna()\n data_val = pd.read_csv(string_val, header = None, sep = '\\t',encoding='ISO-8859-1').dropna()\n \n binary_mapper = {-1: 0}\n \n xtrain = data_train[1]\n ytrain = data_train[0]\n ytrain = ytrain.replace(binary_mapper)\n \n xtest = data_test[1]\n ytest = data_test[0]\n ytest = ytest.replace(binary_mapper)\n \n xval = data_val[1]\n yval = data_val[0]\n yval = yval.replace(binary_mapper)\n \n \n # Create input feature arrays\n ##################################################### You can set the embedding path to REPRESENTATION EMBEDDINGS too which you can find in \"RepresentationEmbeddings\" folder ################################\n EMBEDDING_PATH = \"glove.840B.300d.txt\"\n \n VocabSize = None\n trainFeatures, validationFeatures, testFeatures, WORDS_TO_KEEP = loadDataForWordsWithPreTrainedEmbeddings(xtrain,xval,xtest, EMBEDDING_PATH)\n \n # Build WordCNN model\n EMBEDDINGS_DIMENSIONALITY = 300 # don't need this now\n LSTM_NODES_IN_LAYERS = 64\n NUM_LSTM_LAYERS = 3\n wordLSTMDenseLayer, wordLSTMInput = createWordLSTM(trainFeatures, validationFeatures, testFeatures, \n NUM_LSTM_LAYERS, WORDS_TO_KEEP, EMBEDDINGS_DIMENSIONALITY, LSTM_NODES_IN_LAYERS, bidirectional=True, isPreTrainedEmbeddings=True)\n \n # Attach the output layer with the model\n wordLSTMModel = attachOutputLayerToModel(wordLSTMDenseLayer, wordLSTMInput)\n \n # Train model\n LEARNING_RATE = 0.0001\n BATCH_SIZE = 32\n EPOCHS = 50\n Accuracy, AUCScore, precisionLow, precisionHigh, recallLow, recallHigh, f1_scoreLow, f1_scoreHigh, Epoch, batch_size, VALAccuracy, VALAUC, max_predictionsProbs, max_predictions = buildAndTrainModel(wordLSTMModel, LEARNING_RATE, BATCH_SIZE, EPOCHS, trainFeatures, validationFeatures, testFeatures, ytrain, ytest, yval, \"WordCNNWithoutPretrainedEmbeddings\")\n \n \n results = {\n 'AUCScore': AUCScore,\n 'Accuracy': Accuracy,\n 'PrecisionHigh': precisionHigh,\n 'PrecisionLow': precisionLow,\n 'RecallHigh': recallHigh,\n 'RecallLow': recallLow,\n 'F1_ScoreLow': f1_scoreLow,\n 'F1_ScoreHigh': f1_scoreHigh,\n 'Epoch': Epoch,\n 'BatchSize': batch_size,\n 'ValAccuracy':VALAccuracy,\n 'ValAuc': VALAUC\n }\n \n predictions_dictionary = {\n 'label': max_predictions,\n 'sentence': xtest,\n 'probs': max_predictionsProbs\n }\n \n # results_df = pd.DataFrame.from_dict(results)\n # results_string = 'Results/WordLSTM/results/{}_{}results.csv'.format(i, filename)\n # results_df.to_csv(results_string, index = False)\n \n # predictions_df = pd.DataFrame.from_dict(predictions_dictionary)\n # predictions_df.to_csv('Results/WordLSTM/predictions/{}_{}_Binarypredictions.csv'.format(i, filename), index=False)\n ","sub_path":"Code/BinaryLSTM.py","file_name":"BinaryLSTM.py","file_ext":"py","file_size_in_byte":13967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"338807872","text":"\nfrom openpyxl.utils.cell import column_index_from_string\nimport table_python as tp\nimport openpyxl\n\nlog_errs = []\ndef genLogErrors():\n file = open(\"erros.txt\", \"w\")\n file.write(log_errs.__repr__())\n file.close()\n \ndef write_tests(file_name=\"FP.xlsx\", interval=\"E13:E54\"):\n # Escreve os numeros de ensaios na lista\n # values_to_insert['0-> primeira lista da minha lista']['0-> primeiro elemento da minha lista']['corresponde aos elementos da lista querida ->0]\n book_xlsx = tp.loadBook(file_name)\n ws = book_xlsx.active\n listsheet_tup = ws[interval]\n values_to_insert = tp.getArcValues(\"elet\",30,15,5)\n arc_size = len(values_to_insert)\n arc_index = 0\n error = None\n while(arc_index < arc_size+1):\n try:\n for tup_value in listsheet_tup:\n tup_value[0].value = values_to_insert[arc_index][0].value\n book_xlsx.save(file_name)\n arc_index += 1\n except Exception as error:\n log_errs.append(error)\n book_xlsx.close()\n break\n\n\ndef write_FP(interval, type_row, time_values, first_term, ratio, file_name=\"FP.xlsx\", index_list=1):\n # Escreve no fp.xlsx\n # values_to_insert['0-> primeira lista da minha lista']['0-> primeiro elemento da minha lista']['corresponde aos elementos da lista querida ->0]\n book_xlsx = tp.loadBook(file_name)\n ws = book_xlsx.active\n listsheet_tup = ws[interval]\n arc_index = 0\n error = None\n if(type_row == \"elet\"):\n values_to_insert = tp.getArcValues(\"elet\",time_values,first_term,ratio)\n arc_size = len(values_to_insert)\n while(arc_index < arc_size+1):\n try:\n for tup_value in listsheet_tup:\n tup_value[0].value = values_to_insert[arc_index][1][index_list]\n book_xlsx.save(file_name)\n arc_index += 1\n except Exception as error:\n log_errs.append(error)\n book_xlsx.close()\n break\n else:\n values_to_insert = tp.getArcValues(\"amp\", time_values,first_term,ratio)\n arc_size = len(values_to_insert)\n while(arc_index < arc_size+1):\n try:\n for tup_value in listsheet_tup:\n if not values_to_insert[arc_index][1]:\n arc_index += 1\n break\n else:\n tup_value[0].value = values_to_insert[arc_index][1][index_list]\n book_xlsx.save(file_name)\n arc_index += 1\n except Exception as error:\n log_errs.append(error)\n book_xlsx.close()\n break\n\n\ndef init():\n try:\n insert_opt = input(\"Deseja inicializar com as config. padrões (S/N)?\\t\")\n if(insert_opt.lower() == 'n'):\n insert_test_interval = input(\"Insira o intervalo para preencher o num's de ensaios: \\t\")\n insert_fname = input(\"Insira o nome do arquivo. (Ex.:FP.xlsx):\\t \")\n insert_interval = input(\"Escreva o intervalo que deseja preencher na tabela (Ex.:E13:E54):\\t\")\n insert_type = input(\"Insira o tipo que quer preencher na tabela (ELET ou AMP):\\t\")\n insert_time = int(input(\"Insira o valor de tempo que deseja (Em segundos):\\t\"))\n insert_fst_term = int(input(\"Insira o primeiro valor de tempo da tabela:\\t\"))\n insert_rat = int(input(\"Insira a razao dos tempos:\\t\"))\n insert_indl = int(input(\"Insira o indice de lista que deseja retornar(Para amp->(0,1); para elet->(0,2)):\\t\"))\n print(\"Registrando num's de ensaio no arquivo... Aguarde\")\n write_tests(insert_fname, insert_test_interval)\n print(\"Registrando dados no arquivo... Aguarde\")\n write_FP(insert_interval,insert_type,insert_time,insert_fst_term,insert_rat,insert_fname,insert_indl)\n print(\"Sucesso!\")\n else:\n print(\"Registrando num's de ensaio no arquivo... Aguarde\")\n write_tests(\"FP.xlsx\")\n print(\"Registrando dados no arquivo... Aguarde\")\n write_FP(\"I13:I54\",\"elet\",30,15,5,\"FP.xlsx\",2)\n write_FP(\"M13:M54\",\"amp\",30,15,5,\"FP.xlsx\")\n print(\"Sucesso!\")\n except Exception as error:\n log_errs.append(error)\n genLogErrors()\n print(\"Falha! Consultar log de erros!\")\n \n\n return 0\n\n\ninit()\n\n","sub_path":"wp v1.4.py","file_name":"wp v1.4.py","file_ext":"py","file_size_in_byte":4451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"572902857","text":"from ghetto_manim import *\nimport time\n\n# Suppress scientific notation\nnp.set_printoptions(suppress=True)\n\n# Special Shape Classes\n\nclass CircleGroup(ParamShapeGroup):\n def __init__(self, n_circles, radius, color, global_center_x=0, global_center_y=0, start=0, stop=0):\n circles = []\n sep_distance = 20\n group_width = (2*radius*n_circles) + (sep_distance*(n_circles - 1))\n\n a = -group_width / 2\n add_factor = (2*radius) + sep_distance\n for i in range(n_circles):\n x = a + (i * add_factor)\n\n # NOTE: Here, stop>0 does not necessarily mean that circle will show up! It also depends on where the global\n # stop also is. If the global stop is greater than this stop, then this stop actually matters.\n\n circles.append(Circle(x + global_center_x, global_center_y, radius, color, start=start, stop=1))\n\n super().__init__(circles, start=start, stop=stop)\n\n\nclass Pendulum(ParamShapeGroup):\n def __init__(self, pos_x, pos_y, length, g, init_theta, rod_color, bob_color):\n # Remember again: `stop' has a different meaning for subobjects in a group. See CircleGroup class.\n rod = Rectangle(-5, length/2, 5, -length/2, rod_color, fill_p=0.2, start=0, stop=1, rot_theta=0,\n anchor_x=0, anchor_y=length/2, drawing_point_delta=0.02)\n bob = Circle(0, -length/2, 30, bob_color, fill_p=0.1, start=0, stop=1, rot_theta=0, anchor_x=0, anchor_y=0,\n drawing_point_delta=0.02)\n super().__init__([rod, bob], start=0, stop=0, global_x=pos_x, global_y=pos_y, global_theta=init_theta,\n global_anchor_x=0, global_anchor_y=length/2)\n\n self.length = length\n self.g = g\n self.omega = 0\n self.alpha = 0\n\n def swing_step(self, t):\n self.alpha = -(self.g / self.length) * np.sin(self.offset_theta)\n self.omega += self.alpha\n self.offset_theta += self.omega\n\n self.rot_matrix = self.__rotation_matrix__(self.offset_theta)\n\n\n# Time step\ndt = 0.01\n\ndef scene1():\n # Object Creation\n objects = []\n n = 3\n for i in range(n):\n p = Pendulum((-window_w / 2) + (((i + 1) / (n + 1)) * window_w), 0, length=200, g=(i+1), init_theta=np.pi / 4,\n rod_color=apple_colors['lightorange'], bob_color=apple_colors['lightteal'])\n objects.append(p)\n\n # Animation Tree Construction\n animator = Animator()\n empty_anim = animator.get_root()\n\n for p in objects:\n draw = animator.add_animation(p.draw_step, [1, smooth], duration=30, parent_animation=empty_anim, delay=0)\n coincide = animator.add_animation(p.translate_step, [-70, -100, smooth], duration=25, parent_animation=draw,\n delay=10)\n swing = animator.add_animation(p.swing_step, [], duration=500, parent_animation=coincide, delay=10)\n\n # ... add more animations\n\n # Scene parameters\n da_Vinci = Painter(w, objects)\n\n # Play em'! ––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––––\n while True:\n animator.update_step()\n da_Vinci.paint_step()\n\n time.sleep(dt)\n\ndef scene2():\n # Object Creation\n r = 100\n fake_rect = Rectangle(-r, r, r, -r, color=apple_colors['darkpurple'], stop=0)\n real_rect = Rectangle(-r, r, r, -r, color=(0, 0, 0), stop=0)\n circle = Circle(-350, 0, r, color=apple_colors['lightindigo'], stop=0)\n triangle = Triangle(0, 300, apple_colors['lightyellow'], 0.1, stop=0, scale_x=100, scale_y=100, rot_theta=-np.pi/6)\n\n shubadha = Pendulum(300, 0, 200, 3, np.pi/6, apple_colors['lightgreen'], apple_colors['lightred'])\n objects = [real_rect, fake_rect, circle, shubadha, triangle]\n\n # Animation Tree Construction\n animator = Animator()\n empty_anim = animator.get_root()\n\n draw_circ = animator.add_animation(circle.draw_step, [1, smooth], duration=20, parent_animation=empty_anim, delay=0)\n\n fade_in = animator.add_animation(real_rect.fade_color_step, [apple_colors['lightred'], smooth], duration=10, parent_animation=empty_anim, delay=0)\n draw_real = animator.add_animation(real_rect.draw_step, [1, smooth], duration=25, parent_animation=empty_anim, delay=0)\n draw = animator.add_animation(fake_rect.draw_step, [1, smooth], duration=35, parent_animation=draw_real, delay=10)\n undraw = animator.add_animation(fake_rect.undraw, [1, smooth], duration=30, parent_animation=draw_real, delay=25)\n\n morph = animator.add_animation(real_rect.morph_step, [triangle, smooth], duration=40, parent_animation=undraw, delay=10)\n flip = animator.add_animation(real_rect.scale_step, [-1, 1, smooth], duration=20, parent_animation=undraw, delay=10)\n fade_indigo = animator.add_animation(real_rect.fade_color_step, [apple_colors['lightindigo'], smooth], duration=15, parent_animation=undraw, delay=5)\n\n draw_pend = animator.add_animation(shubadha.draw_step, [1, smooth], duration=25, parent_animation=fade_indigo, delay=5)\n swing = animator.add_animation(shubadha.swing_step, [], duration=100, parent_animation=draw_pend, delay=5)\n\n drop_dead = animator.add_animation(shubadha.drop_exit, ['down'], duration=20, parent_animation=fade_indigo, delay=100)\n\n # ... add more animations\n\n picasso = Painter(w, objects)\n\n # Run Scene!\n while True:\n animator.update_step()\n picasso.paint_step()\n time.sleep(dt)\n\n\n# Main function\nif __name__ == '__main__':\n scene1()\n # scene2()\n\n# Necessary line for Tkinter\nmainloop()","sub_path":"2D Animation/Pendulums.py","file_name":"Pendulums.py","file_ext":"py","file_size_in_byte":5773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"593568671","text":"import os\nimport sys \n\nimport metaparticle.option as option\nimport metaparticle.builder as builder\nimport metaparticle.runner as runner\n\ndef is_in_docker_container():\n mp_in_container = os.getenv('METAPARTICLE_IN_CONTAINER', None)\n if mp_in_container in ['true', '1']:\n return True\n elif mp_in_container in ['false', '0']:\n return False\n\n try:\n with open('/proc/1/sched', 'rt') as f:\n if '(1,' in f.readline():\n return False\n else:\n return True\n\n except FileNotFoundError:\n return False\n\n\ndef write_dockerfile(package):\n with open('Dockerfile', 'w+t') as f:\n f.write(\"\"\"FROM python:{version}\n\nCOPY ./ /{name}/\nRUN pip install -r /{name}/requirements.txt\n\nCMD python /{name}/example.py\n\"\"\".format(name=package.name, version=package.py_version))\n\n\nclass Containerize(object):\n\n def __init__(self, runtime={}, package={}):\n self.runtime = option.load(option.RuntimeOptions, runtime)\n self.package = option.load(option.PackageOptions, package)\n self.image = \"{repo}/{name}:latest\".format(repo=self.package.repository, name=self.package.name)\n\n self.builder = builder.select(self.package.builder)\n self.runner = runner.select(self.runtime.executor)\n\n def __call__(self, func):\n def wrapped(*args, **kwargs):\n if is_in_docker_container():\n return func(*args, **kwargs)\n\n write_dockerfile(self.package)\n self.builder.build(self.image)\n\n try:\n if self.package.publish:\n self.builder.publish(self.image)\n except KeyError:\n pass\n \n return self.runner.run(self.image, self.package.name, self.runtime)\n return wrapped","sub_path":"python/metaparticle/containerize.py","file_name":"containerize.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"561553569","text":"import tkinter\nimport os\nimport threading\nfrom tkinter import *\n\n\nclass NewThread (threading.Thread):\n def __init__(self, path):\n threading.Thread.__init__(self)\n self.path = path\n\n def run(self):\n os.system(self.path)\n\n\ndef visualise(path):\n nt = NewThread(path)\n nt.start()\n\n\ndef closeApplication(root):\n root.destroy()\n\n\ndef gui():\n # Hauptframe\n mainFrame = tkinter.Tk()\n mainFrame.wm_title(\"Visualisierungen\")\n\n # Groesse bestimmen\n fr0 = tkinter.Frame(mainFrame)\n fr0.pack()\n\n # Label \"Visualisierungen\"\n bt0 = tkinter.Button(fr0, text=\"Importierung\", width=20, command=lambda: visualise('ImportierungV2.py'))\n bt0.pack(pady=10)\n\n separator2 = Frame(fr0, height=2, bd=1, relief=SUNKEN)\n separator2.pack(fill=X, padx=5, pady=5)\n\n bt1 = tkinter.Button(fr0, text=\"Wochentagsvisualisierung\", width=20, command=lambda: visualise('Wochentagsvisualisierung.py'))\n bt1.pack(pady=10)\n\n bt2 = tkinter.Button(fr0, text=\"Jahresverlauf\", width=20, command=lambda: visualise('Jahresvisualisierung.py'))\n bt2.pack(pady=10)\n\n bt3 = tkinter.Button(fr0, text=\"Lastverteilung (Prozent)\", width=20, command=lambda: visualise('LastverteilungProzent.py'))\n bt3.pack(pady=10)\n\n bt4 = tkinter.Button(fr0, text=\"Einzeltagsvisualisierung\", width=20, command=lambda: visualise('Einzeltagesvisualisierung.py'))\n bt4.pack(pady=10)\n\n bt5 = tkinter.Button(fr0, text=\"Tagesvisualisierung\", width=20, command=lambda: visualise('Tagesvisualisierung.py'))\n bt5.pack(pady=10)\n\n bt6 = tkinter.Button(fr0, text=\"Leistungsspitze\", width=20, command=lambda: visualise('Leistungsspitze.py'))\n bt6.pack(pady=10)\n\n bt7 = tkinter.Button(fr0, text=\"Lastverteilung (Anzahl)\", width=20, command=lambda: visualise('LastverteilungAnzahl.py'))\n bt7.pack(pady=10)\n\n bt8 = tkinter.Button(fr0, text=\"Verteilung der Spitzenlasten\", width=20, command=lambda: visualise('VerteilungSpitzenlasten.py'))\n bt8.pack(pady=10)\n\n bt9 = tkinter.Button(fr0, text=\"Summierte Visualisierung\", width=20, command=lambda: visualise('SummierteVisualisierung.py'))\n bt9.pack(pady=10)\n\n bt13 = tkinter.Button(fr0, text=\"XY-Plot\", width=20, command=lambda: visualise('xy.py'))\n bt13.pack(pady=10)\n\n bt10 = tkinter.Button(fr0, text=\"Importierung\", width=20, command=lambda: visualise('Importierung.py'))\n bt10.pack(pady=10)\n\n bt11 = tkinter.Button(fr0, text=\"Sonnenenergierechner\", width=20, command=lambda: visualise('sonnenstand.py'))\n bt11.pack(pady=10)\n\n bt12 = tkinter.Button(fr0, text=\"PV-Dimensionierung\", width=20, command=lambda: visualise('vergleich.py'))\n bt12.pack(pady=10)\n\n separator3 = Frame(fr0, height=2, bd=1, relief=SUNKEN)\n separator3.pack(fill=X, padx=5, pady=5)\n\n fr4 = tkinter.Frame(fr0)\n fr4.pack(expand=1, fill=\"x\")\n bt23 = tkinter.Button(fr4, text=\"Beenden\", width=20, command=lambda: closeApplication(mainFrame))\n bt23.pack(padx=20, pady=10)\n\n mainFrame.mainloop()\n\n\nif __name__ == '__main__':\n gui()\n","sub_path":"src/Uebersicht.py","file_name":"Uebersicht.py","file_ext":"py","file_size_in_byte":3029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"228289322","text":"# Same code as dijkstras_algorithm2.py without using visited set\nimport sys\nfrom heapq import heappush, heappop\n\n\nclass WeightedGraph:\n def __init__(self, v):\n self.vertices = v\n self.graph = [[0]*self.vertices for _ in range(self.vertices)]\n\n def printSolution(self, dist):\n print(\"Vertex \\tDistance from Source\")\n for node in range(self.vertices):\n print(node, \"\\t\", dist[node])\n\n def dijkstra(self, src):\n dist = [sys.maxsize]*self.vertices\n dist[src] = 0\n pq = [(dist[src], src)]\n while(pq):\n node = heappop(pq)\n v = node[1]\n print(\"popped node =\", v)\n dist_v = node[0]\n if dist_v > dist[v]:\n continue\n for neighbor in range(self.vertices):\n if (self.graph[v][neighbor] > 0):\n if dist[neighbor] > (dist_v + self.graph[v][neighbor]):\n dist[neighbor] = (dist_v + self.graph[v][neighbor])\n heappush(pq, (dist[neighbor], neighbor))\n print(pq)\n self.printSolution(dist)\n\n\n# Output\ng = WeightedGraph(9)\ng.graph = [[0, 4, 0, 0, 0, 0, 0, 8, 0],\n [4, 0, 8, 0, 0, 0, 0, 11, 0],\n [0, 8, 0, 7, 0, 4, 0, 0, 2],\n [0, 0, 7, 0, 9, 14, 0, 0, 0],\n [0, 0, 0, 9, 0, 10, 0, 0, 0],\n [0, 0, 4, 14, 10, 0, 2, 0, 0],\n [0, 0, 0, 0, 0, 2, 0, 1, 6],\n [8, 11, 0, 0, 0, 0, 1, 0, 7],\n [0, 0, 2, 0, 0, 0, 6, 7, 0]]\ng.dijkstra(0)\n","sub_path":"Graphs/dijkstras_algorithm3.py","file_name":"dijkstras_algorithm3.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"363162855","text":"\"\"\"\r\nprint(\"primeiro programa\")\r\n\r\nt1 = 3\r\nt2 = 3.0\r\nt3 = \"3.0\"\r\nt4 = True\r\n\r\nprint(type(t1))\r\nprint(type(t2))\r\nprint(type(t3))\r\nprint(type(t4))\r\n\"\"\"\r\n\r\nmsg1 = \"meu nome é \"\r\nmsg2 = \"Vinícius\"\r\nRG = 40227419880\r\nprint(\"\\nOlá \" + msg1 + msg2 + \", meu RG é \" + str(RG))\r\n\r\n# OU\r\n\r\nnome = \"Vinicius\"\r\nMyRG = 40227419880\r\nprint(\"\\nMeu nome é %s e meu RG é %s\" %(nome, MyRG))\r\nprint(\"Meu nome é %s e meu RG é %d\" %(nome, MyRG))\r\n\r\nn1 = 5\r\nn2 = 2\r\nprint(\"\\n\\t\\tTotal \" + str(n1 + n2)) # Primeiro acontece oq esta no parentes (a soma) depois vira String.","sub_path":"Second Semester/Python Class/Classes/class01.py","file_name":"class01.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"45652042","text":"from jinja2 import Template\nfrom toolz import groupby\n\nimport glob\nimport janus\nimport os\nimport re\n\n\ndef make_doc_index(root_path: str, doc_index_path: str):\n d = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'docs')\n template = os.path.join(d, 'docsIndexTemplate.jinja')\n with open(template) as f:\n template = Template(f.read())\n\n project_name = root_path.split('/')[-1]\n src_folder = os.path.join(root_path, project_name)\n if not root_path.endswith('/'):\n root_path = root_path + '/'\n fs = [x.replace(root_path, '') \n for x in glob.glob('{}/**'.format(src_folder), recursive=True)\n if ('__pycache__' not in x) and ('__init__' not in x)]\n\n fs = [x for x in fs if not os.path.isdir(x)]\n fg = groupby(key=lambda s: os.path.dirname(s), seq=fs)\n \n S = ''\n for k in sorted(fg.keys()):\n s = 'Module {}'.format(k)\n S += s + '\\n'\n S += '='*len(s) + '\\n\\n'\n\n for x in sorted(fg[k]):\n q = x[:-3].replace('/', '.') # -3 is for \".py\"\n t = ' '.join(q.split('.')[-1].split('_')).capitalize()\n\n S += t + '\\n'\n S += '*'*len(t) + '\\n'\n S += '.. automodule:: {}'.format(q) + '\\n'\n S += ' :members:\\n'\n S += ' :undoc-members:\\n'\n S += ' :show-inheritance:\\n'\n S += ' :noindex:\\n\\n'\n\n rendered_project_name = '{0}\\n{1}'.format(project_name, '='*len(project_name))\n S = template.render(project_name=rendered_project_name, refs=S)\n with open(doc_index_path, 'w') as f:\n f.write(S)","sub_path":"ocean/docs_index_generator.py","file_name":"docs_index_generator.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"117659084","text":"\"\"\"Colin's notes on recursion\"\"\"\n\ndef stairs(n):\n \"\"\"Give the number of ways to take n steps, given that at each step, you can choose to take 1, 2\n >>> stairs(2)\n 2\n >>> stairs(4)\n 5\n >>> stairs(1)\n 1\n >>> stairs(3)\n 3\n \"\"\"\n ### Your code here ###\n if n <= 0:\n return 0\n elif n == 1:\n return 1\n elif n == 2:\n return 2\n else:\n return stairs(n-1) + stairs(n-2)\n\ndef kstairs(n, k):\n \"\"\"Give the number of ways to take n steps, given that at each step, you can choose to take 1,2,3,k-2,k-1 or k steps,\n >>> kstairs(5, 2)\n 8\n >>> kstairs(5, 5)\n 16\n >>> kstairs(10, 5)\n 464\n \"\"\"\n if n == 0:\n return 0\n if n <= k:\n return 2**(n-1)\n return sum([kstairs(n - i, k) for i in range(1, k + 1)])\n\ndef permutations(lst):\n \"\"\"List all permutations of the given list\n enumerate() function might be helpful\n >>> permutations([\"angie\", \"cat\"])\n [[\"angie\", \"cat\"], [\"cat\", \"angie\"]]\n >>> permutations([1, 2, 3])\n [[1, 2, 3], [1, 3, 2], [2, 1, 3], [2, 3, 1], [3, 1, 2], [3, 2, 1]]\n \"\"\"\n ### Your code here ###\n if len(lst) <= 1:\n return [lst]\n total = []\n for i, k in enumerate(lst):\n total.extend([[k] + p for p in permutations(lst[:i] + lst[i+1:])])\n return total\n\n\nclass Tree(object):\n \"\"\" A tree with internal values. \"\"\"\n def __init__(self, entry, left=None, right=None):\n self.entry = entry\n self.left = left\n self.right = right\n\n def __repr__(self):\n args = repr(self.entry)\n if self.left or self.right:\n args += \", {0}, {1}\".format(repr(self.left), repr(self.right))\n return \"Tree({0})\".format(args)\n\n def print(self):\n def print_helper(tree, depth):\n if tree.right:\n print_helper(tree.right, depth + 1)\n print(\"{0}{1}\".format(\"\\t\" * depth, tree.entry))\n if tree.left:\n print_helper(tree.left, depth + 1)\n print_helper(self, 0)\n\n\ndef tree_to_reversed_list(tree):\n \"\"\"\n >>> t = Tree(5, Tree(1, None, Tree(4)), Tree(7, Tree(6), Tree(8)))\n >>> tree_to_reversed_list(t)\n [8, 7, 6, 5, 4, 1]\n \"\"\"\n ### Your code here ###\n lst = []\n if tree is not None:\n if tree.right:\n lst.extend(tree_to_reversed_list(tree.right))\n lst.append(tree.entry)\n if tree.left:\n lst.extend(tree_to_reversed_list(tree.left))\n return lst\n","sub_path":"ProgrammingCourses/CS61A/week03/SR_colin_tree_recursion.py","file_name":"SR_colin_tree_recursion.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"134114944","text":"\"\"\"\nThis is the core logic for the Free-text Response XBlock\n\"\"\"\n\nimport os\n\nimport pkg_resources\nfrom django.utils.translation import ugettext as _\nfrom django.utils.translation import ungettext\nfrom enum import Enum\nfrom xblock.core import XBlock\nfrom xblock.fields import Boolean\nfrom xblock.fields import Float\nfrom xblock.fields import Integer\nfrom xblock.fields import List\nfrom xblock.fields import Scope\nfrom xblock.fields import String\nfrom xblock.fragment import Fragment\nfrom xblock.validation import ValidationMessage\nfrom xblockutils.studio_editable import StudioEditableXBlockMixin\n\n\nclass FreeTextResponse(StudioEditableXBlockMixin, XBlock):\n # pylint: disable=too-many-ancestors, too-many-instance-attributes\n \"\"\"\n Enables instructors to create questions with free-text responses.\n \"\"\"\n @staticmethod\n def workbench_scenarios():\n \"\"\"\n Gather scenarios to be displayed in the workbench\n \"\"\"\n scenarios = [\n ('Free-text Response XBlock',\n '''\n \n \n \n '''),\n ]\n return scenarios\n\n display_correctness = Boolean(\n display_name=_('Display Correctness?'),\n help=_(\n 'This is a flag that indicates if the indicator '\n 'icon should be displayed after a student enters '\n 'their response'\n ),\n default=True,\n scope=Scope.settings,\n )\n display_name = String(\n display_name=_('Display Name'),\n help=_(\n 'This is the title for this question type'\n ),\n default=_('Free-text Response'),\n scope=Scope.settings,\n )\n fullcredit_keyphrases = List(\n display_name=_('Full-Credit Key Phrases'),\n help=_(\n 'This is a list of words or phrases, one of '\n 'which must be present in order for the student\\'s answer '\n 'to receive full credit'\n ),\n default=[],\n scope=Scope.settings,\n )\n halfcredit_keyphrases = List(\n display_name=_('Half-Credit Key Phrases'),\n help=_(\n 'This is a list of words or phrases, one of '\n 'which must be present in order for the student\\'s answer '\n 'to receive half credit'\n ),\n default=[],\n scope=Scope.settings,\n )\n max_attempts = Integer(\n display_name=_('Maximum Number of Attempts'),\n help=_(\n 'This is the maximum number of times a '\n 'student is allowed to attempt the problem'\n ),\n default=0,\n values={'min': 1},\n scope=Scope.settings,\n )\n max_word_count = Integer(\n display_name=_('Maximum Word Count'),\n help=_(\n 'This is the maximum number of words allowed for this '\n 'question'\n ),\n default=10000,\n values={'min': 1},\n scope=Scope.settings,\n )\n min_word_count = Integer(\n display_name=_('Minimum Word Count'),\n help=_(\n 'This is the minimum number of words required '\n 'for this question'\n ),\n default=1,\n values={'min': 1},\n scope=Scope.settings,\n )\n prompt = String(\n display_name=_('Prompt'),\n help=_(\n 'This is the prompt students will see when '\n 'asked to enter their response'\n ),\n default=_('Please enter your response within this text area'),\n scope=Scope.settings,\n )\n submitted_message = String(\n display_name=_('Submission Received Message'),\n help=_(\n 'This is the message students will see upon '\n 'submitting their response'\n ),\n default=_('Your submission has been received'),\n scope=Scope.settings,\n )\n weight = Integer(\n display_name=_('Weight'),\n help=_(\n 'This assigns an integer value representing '\n 'the weight of this problem'\n ),\n default=0,\n values={'min': 1},\n scope=Scope.settings,\n )\n\n count_attempts = Integer(\n default=0,\n scope=Scope.user_state,\n )\n score = Float(\n default=0.0,\n scope=Scope.user_state,\n )\n student_answer = String(\n default='',\n scope=Scope.user_state,\n )\n\n has_score = True\n\n editable_fields = (\n 'display_name',\n 'prompt',\n 'weight',\n 'max_attempts',\n 'display_correctness',\n 'min_word_count',\n 'max_word_count',\n 'fullcredit_keyphrases',\n 'halfcredit_keyphrases',\n 'submitted_message',\n )\n\n def student_view(self, context=None):\n # pylint: disable=unused-argument\n \"\"\"\n Build the fragment for the default student view\n \"\"\"\n view_html = FreeTextResponse.get_resource_string('view.html')\n view_html = view_html.format(\n self=self,\n indicator_class=self._get_indicator_class(),\n problem_progress=self._get_problem_progress(),\n used_attempts_feedback=self._get_used_attempts_feedback(),\n submit_class=self._get_submit_class(),\n indicator_visibility_class=self._get_indicator_visiblity_class(),\n word_count_message=self._get_word_count_message(\n self.count_attempts\n ),\n submitted_message=self._get_submitted_message(),\n )\n fragment = self.build_fragment(\n html_source=view_html,\n paths_css=[\n 'view.less.min.css',\n ],\n paths_js=[\n 'view.js.min.js',\n ],\n fragment_js='FreeTextResponseView',\n )\n return fragment\n\n @classmethod\n def _generate_validation_message(cls, msg):\n \"\"\"\n Helper method to generate a ValidationMessage from\n the supplied string\n \"\"\"\n result = ValidationMessage(\n ValidationMessage.ERROR,\n _(msg)\n )\n return result\n\n def validate_field_data(self, validation, data):\n \"\"\"\n Validates settings entered by the instructor.\n \"\"\"\n if data.weight < 0:\n msg = FreeTextResponse._generate_validation_message(\n 'Weight Attempts cannot be negative'\n )\n validation.add(msg)\n if data.max_attempts < 0:\n msg = FreeTextResponse._generate_validation_message(\n 'Maximum Attempts cannot be negative'\n )\n validation.add(msg)\n if data.max_word_count < 0:\n msg = FreeTextResponse._generate_validation_message(\n 'Maximum Word Count cannot be negative'\n )\n validation.add(msg)\n if data.min_word_count < 1:\n msg = FreeTextResponse._generate_validation_message(\n 'Minimum Word Count cannot be less than 1'\n )\n validation.add(msg)\n if data.min_word_count > data.max_word_count:\n msg = FreeTextResponse._generate_validation_message(\n 'Minimum Word Count cannot be greater than Max Word Count'\n )\n validation.add(msg)\n if not data.submitted_message:\n msg = FreeTextResponse._generate_validation_message(\n 'Submission Received Message cannot be blank'\n )\n validation.add(msg)\n\n @classmethod\n def get_resource_string(cls, path):\n \"\"\"\n Retrieve string contents for the file path\n \"\"\"\n path = os.path.join('public', path)\n resource_string = pkg_resources.resource_string(__name__, path)\n return resource_string.decode('utf8')\n\n def get_resource_url(self, path):\n \"\"\"\n Retrieve a public URL for the file path\n \"\"\"\n path = os.path.join('public', path)\n resource_url = self.runtime.local_resource_url(self, path)\n return resource_url\n\n def build_fragment(\n self,\n html_source=None,\n paths_css=[],\n paths_js=[],\n urls_css=[],\n urls_js=[],\n fragment_js=None,\n ):\n # pylint: disable=dangerous-default-value, too-many-arguments\n \"\"\"\n Assemble the HTML, JS, and CSS for an XBlock fragment\n \"\"\"\n fragment = Fragment(html_source)\n for url in urls_css:\n fragment.add_css_url(url)\n for path in paths_css:\n url = self.get_resource_url(path)\n fragment.add_css_url(url)\n for url in urls_js:\n fragment.add_javascript_url(url)\n for path in paths_js:\n url = self.get_resource_url(path)\n fragment.add_javascript_url(url)\n if fragment_js:\n fragment.initialize_js(fragment_js)\n return fragment\n\n def _get_indicator_visiblity_class(self):\n \"\"\"\n Returns the visibility class for the correctness indicator html element\n \"\"\"\n if self.display_correctness:\n result = ''\n else:\n result = 'hidden'\n return result\n\n def _get_word_count_message(self, ignore_attempts=False):\n \"\"\"\n Returns the word count message based on the student's answer\n \"\"\"\n result = ''\n if (\n (ignore_attempts or self.count_attempts > 0) and\n (not self._word_count_valid())\n ):\n result = ungettext(\n \"Invalid Word Count. Your response must be \"\n \"between {min} and {max} word.\",\n \"Invalid Word Count. Your response must be \"\n \"between {min} and {max} words.\",\n self.max_word_count,\n ).format(\n min=self.min_word_count,\n max=self.max_word_count,\n )\n return result\n\n def _get_indicator_class(self):\n \"\"\"\n Returns the class of the correctness indicator element\n \"\"\"\n result = ''\n if self.count_attempts == 0:\n result = 'unanswered'\n elif self._determine_credit() == Credit.zero:\n result = 'incorrect'\n else:\n result = 'correct'\n return result\n\n def _word_count_valid(self):\n \"\"\"\n Returns a boolean value indicating whether the current\n word count of the user's answer is valid\n \"\"\"\n word_count = len(self.student_answer.split())\n result = (\n word_count <= self.max_word_count and\n word_count >= self.min_word_count\n )\n return result\n\n @classmethod\n def _is_at_least_one_phrase_present(cls, phrases, answer):\n \"\"\"\n Determines if at least one of the supplied phrases is\n present in the given answer\n \"\"\"\n answer = answer.lower()\n matches = [\n phrase.lower() in answer\n for phrase in phrases\n ]\n return any(matches)\n\n def _get_problem_progress(self):\n \"\"\"\n Returns a statement of progress for the XBlock, which depends\n on the user's current score\n \"\"\"\n if self.weight == 0:\n result = ''\n elif self.score == 0.0:\n result = \"({})\".format(\n ungettext(\n \"{weight} point possible\",\n \"{weight} points possible\",\n self.weight,\n ).format(\n weight=self.weight,\n )\n )\n else:\n score_string = '{0:g}'.format(self.score)\n result = \"({})\".format(\n ungettext(\n \"{score_string}/{weight} point\",\n \"{score_string}/{weight} points\",\n self.weight,\n ).format(\n score_string=score_string,\n weight=self.weight,\n )\n )\n return result\n\n def _compute_score(self):\n \"\"\"\n Computes and publishes the user's core for the XBlock\n based on their answer\n \"\"\"\n credit = self._determine_credit()\n if credit == Credit.full:\n self.score = self.weight\n elif credit == Credit.half:\n self.score = float(self.weight)/2\n else:\n self.score = 0.0\n self.runtime.publish(\n self,\n 'grade',\n {\n 'value': self.score,\n 'max_value': self.weight\n }\n )\n\n def _determine_credit(self):\n \"\"\"\n Helper Method that determines the level of credit that\n the user should earn based on their answer\n \"\"\"\n result = None\n if self.student_answer == '' or not self._word_count_valid():\n result = Credit.zero\n elif not self.fullcredit_keyphrases \\\n and not self.halfcredit_keyphrases:\n result = Credit.full\n elif FreeTextResponse._is_at_least_one_phrase_present(\n self.fullcredit_keyphrases,\n self.student_answer\n ):\n result = Credit.full\n elif FreeTextResponse._is_at_least_one_phrase_present(\n self.halfcredit_keyphrases,\n self.student_answer\n ):\n result = Credit.half\n else:\n result = Credit.zero\n return result\n\n def _get_used_attempts_feedback(self):\n \"\"\"\n Returns the text with feedback to the user about the number of attempts\n they have used if applicable\n \"\"\"\n result = ''\n if self.max_attempts > 0:\n result = ungettext(\n 'You have used {count_attempts} of {max_attempts} submission',\n 'You have used {count_attempts} of {max_attempts} submissions',\n self.max_attempts,\n ).format(\n count_attempts=self.count_attempts,\n max_attempts=self.max_attempts,\n )\n return result\n\n def _get_submit_class(self):\n \"\"\"\n Returns the css class for the submit button\n \"\"\"\n result = ''\n if self.max_attempts > 0 and self.count_attempts >= self.max_attempts:\n result = 'nodisplay'\n return result\n\n def _get_submitted_message(self):\n \"\"\"\n Returns the message to display in the submission-received div\n \"\"\"\n result = ''\n if self.count_attempts > 0 and self._word_count_valid():\n result = self.submitted_message\n return result\n\n @XBlock.json_handler\n def submit(self, data, suffix=''):\n # pylint: disable=unused-argument\n \"\"\"\n Processes the user's submission\n \"\"\"\n if self.max_attempts > 0 and self.count_attempts >= self.max_attempts:\n raise StandardError(\n _(\n 'User has already exceeded the '\n 'maximum number of allowed attempts'\n )\n )\n self.student_answer = data['student_answer']\n if self._word_count_valid():\n if self.max_attempts == 0:\n self.count_attempts = 1\n else:\n self.count_attempts += 1\n self._compute_score()\n result = {\n 'status': 'success',\n 'problem_progress': self._get_problem_progress(),\n 'indicator_class': self._get_indicator_class(),\n 'used_attempts_feedback': self._get_used_attempts_feedback(),\n 'submit_class': self._get_submit_class(),\n 'word_count_message': self._get_word_count_message(\n ignore_attempts=True\n ),\n 'submitted_message': self._get_submitted_message(),\n }\n return result\n\n\nclass Credit(Enum):\n # pylint: disable=too-few-public-methods\n \"\"\"\n An enumeration of the different types of credit a submission can be\n awareded: Zero Credit, Half Credit, and Full Credit\n \"\"\"\n zero = 0\n half = 1\n full = 2\n","sub_path":"freetextresponse/freetextresponse.py","file_name":"freetextresponse.py","file_ext":"py","file_size_in_byte":16132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"362247815","text":"# This module implements a collection of functions designed to estimate the\n# relevance of search results, thus helping naming functions to determine the\n# best match when naming a picture.\n\nimport re\nimport json\nimport math\nfrom urllib.parse import urlparse\n\nfrom .utils import get_image_size\nfrom .search import SearchResult\n\n# In order to improve your results, please update 'patterns.json' file\n# JSON fields:\n# specific -> List of specific patterns, usually relevant with the source of\n# the images. GREATLY BOOSTS RESULTS\n# generic -> List of generic patterns, usually applicable with different\n# sources. BOOSTS RESULTS\n# avoid -> List of words that negatively impacts results\n# YOU can use '%s' in patterns to refer to a website's name\n\npatterns_file = 'patterns.json'\n\n# Implementation of a 2D Vector\nclass Vector():\n \"\"\"Class used to describe 2D vectors.\"\"\"\n def __init__(self, x, y):\n self.x = x\n self.y = y\n def norm(self):\n return math.sqrt(self.x**2 + self.y**2)\n def __add__(self, v):\n return(Vector(self.x + v.x, self.y + v.y))\n def __sub__(self, v):\n return(Vector(self.x - v.x, self.y - v.y))\n def __mul__(self, v):\n return ((self.x * v.x) + (self.y * v.y))\n\ndef dimensions_similarity(dim1, dim2):\n \"\"\"\n Returns a value representing the similarity between two images' sizes.\n The closer to 1, the more similar the images in terms of dimensions.\n \"\"\"\n u = Vector(int(dim1[0]), int(dim1[1]))\n v = Vector(int(dim2[0]), int(dim2[1]))\n\n diff = (u-v).norm()\n normal_diff = diff / max(u.norm(), v.norm())\n return(1 - normal_diff)\n\ndef build_pattern(expr, location):\n \"\"\"\n Returns a pattern obtained by replacing the placeholder of an expression\n 'expr' with the host name of the website at 'location'.\n\n Arguments:\n - expr: a regular expression containing a placeholder\n - location: a valid URL\n \"\"\"\n if (expr.find('%s') == -1):\n return expr\n else:\n if location[:4] == 'http':\n site = urlparse(location).hostname\n else:\n site = urlparse('http://' + location).hostname\n site = site.split('.')\n if (site[0] == 'www'):\n site = site[1]\n else:\n site = site[0]\n return expr % site\n\ndef apply_bonus(value, title, pattern_list):\n \"\"\"\n Applies a given value if 'title' matches a pattern of 'pattern_list'\n \"\"\"\n for pattern in pattern_list:\n if re.findall(pattern, title, re.IGNORECASE):\n return value\n # Default value on loop end if no value has been returned beforehand\n return 1\n\ndef pattern_bonus(title, location):\n \"\"\"\n Returns a multiplicative bonus factor when a result's 'title' matches one\n of the patterns contained in 'patterns_file' file.\n \"\"\"\n val = 1\n with open(patterns_file, 'r') as f:\n data = json.loads(f.read())\n val = val * apply_bonus(2, title,\n [build_pattern(e, location) for e in data['specific']])\n val = val * apply_bonus(1.5, title,\n [build_pattern(e, location) for e in data['generic']])\n val = val * apply_bonus(0.5, title,\n [build_pattern(e, location) for e in data['avoid']])\n return val\n\ndef hint_bonus(title, hint, min_size=1):\n \"\"\"\n Returns a multiplicative bonus factor when the words in a title match those\n of a given hint (partially or completely).\n The optional 'min_size' allows the user to filter out words shorter than it.\n \"\"\"\n # Convert the hint in a list of words (> min_size)\n hint_words = hint.split(' ')\n for word in hint_words:\n if len(word) < min_size:\n hint_words.remove(word)\n\n # Count how many of the words appear in the title\n count = 0\n for word in hint_words:\n # Match only words in the title\n # (we do not use 'find' function that may match sub-words too)\n if word.lower() in title.lower():\n count = count + 1\n return (1 + count/len(hint_words))\n\ndef score(result, original_file=None, hint=''):\n score = pattern_bonus(result.title, result.location)\n score = score * hint_bonus(result.title, hint, min_size=2)\n if original_file:\n score = score * dimensions_similarity(result.dimensions,\n get_image_size(original_file))\n return score\n","sub_path":"relevance.py","file_name":"relevance.py","file_ext":"py","file_size_in_byte":4408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"618353219","text":"import os\nimport stat\nimport sys\n\nfrom kubernetes import client, config\nfrom kubernetes.client import ApiException\nfrom kubernetes.stream import stream\nimport subprocess\n\nfrom colorama import Fore, Back, Style\nfrom termcolor import colored, cprint\nfrom tabulate import tabulate\n\n\nclass PythonKubePodExecTool(object):\n\n def __init__(self):\n self.config = config.load_kube_config()\n self.v1 = client.CoreV1Api()\n self.pod_list = []\n\n def read_namespaced_pod(self,pod_name, name_space=\"default\"):\n try:\n resp = self.v1.read_namespaced_pod(pod_name,name_space)\n print(resp)\n return True\n except ApiException as e:\n if e.status != 404:\n print(\"Unknown error: %s\" % e)\n exit(1)\n\n def list_all_pods_all_namespaces(self):\n print(\"\\nListing all the pods\\n\")\n data = self.v1.list_pod_for_all_namespaces(watch=False)\n for i in data.items:\n self.pod_list.append([i.status.pod_ip, i.metadata.namespace, i.metadata.name])\n print(tabulate(self.pod_list, headers=['IP', 'NAMESPACE', 'POD_NAME']))\n return True\n\n def list_available_pods_namespaces(self):\n print(\"\\nSELECT FROM THE FOLLOWING PODS, NAMESPACES\")\n if self.pod_list:\n print(tabulate(self.pod_list, headers=['IP', 'NAMESPACE', 'POD_NAME']))\n return True\n self.list_all_pods_all_namespaces()\n return True\n\n def exec_in_pod(self, pod=None, namespace=\"default\", usr_command=\"echo this is an echo \"\n \"from the pod\"):\n \"\"\"\n\n :param pod: name of pod (str)\n :param namespace: string value\n :param usr_command: unix command\n :return: True\n \"\"\"\n try:\n exec_command = ['/bin/sh']\n resp = stream(self.v1.connect_get_namespaced_pod_exec,\n pod,\n namespace,\n command=exec_command,\n stderr=True, stdin=True,\n stdout=True, tty=False,\n _preload_content=False)\n\n commands = [\n usr_command\n ]\n\n while resp.is_open():\n resp.update(timeout=1)\n if resp.peek_stdout():\n print(\"STDOUT:\\n\")\n print(\"%s\" % resp.read_stdout())\n if resp.peek_stderr():\n print(\"STDERR: %s\" % resp.read_stderr())\n if commands:\n c = commands.pop(0)\n print(\"Running command: %s\\n\" % c)\n resp.write_stdin(c + \"\\n\")\n else:\n break\n\n resp.write_stdin(\"date\\n\")\n print(\"date: %s\" % resp.read_stdout())\n\n resp.write_stdin(\"whoami\\n\")\n print(\"user: %s\" % resp.read_stdout())\n\n resp.close()\n return resp\n\n except ApiException as e:\n if e.status != 404:\n print(\"Unknown error: %s\" % e)\n\n exit(1)\n\n\ndef main():\n os.chmod(\"run.sh\", stat.S_IEXEC)\n\n with open('run.sh', 'rb') as file:\n script = file.read()\n subprocess.call(script, shell=True)\n obj = PythonKubePodExecTool()\n\n print_red_on_white = lambda x: cprint(x, 'red', 'on_white')\n\n while True:\n print(\"\\n\")\n print_red_on_white('########################################################')\n print_red_on_white('####### SELECT ANY OF THE FOLLOWING OPTION #########')\n print_red_on_white('########################################################')\n print(\"\\n\")\n\n cprint(\"1. List all pods of all namespaces\", 'blue', attrs=['bold'], file=sys.stderr)\n cprint(\"2. Show pod data\", 'blue', attrs=['bold'], file=sys.stderr)\n cprint(\"3. Exec into a particular pod\", 'blue', attrs=['bold'], file=sys.stderr)\n cprint(\"0. TO EXIT\", 'blue', attrs=['bold'], file=sys.stderr)\n\n num = input(\"\\nEnter number: \")\n\n if num == '1':\n obj.list_all_pods_all_namespaces()\n\n if num == '2':\n obj.list_available_pods_namespaces()\n pod_name = input(\"\\nEnter the pod to exec: \")\n namespace = input(\"\\nEnter the Namespace of the pod: \")\n if not namespace:\n namespace=\"default\"\n obj.read_namespaced_pod(pod_name, namespace)\n\n if num == '3':\n obj.list_available_pods_namespaces()\n pod_name = input(\"\\nEnter the pod to exec: \")\n cmd = input(\"\\nEnter the command to exec: \")\n\n if not cmd:\n cmd = \"python --version\"\n obj.exec_in_pod(pod_name, \"default\", cmd)\n\n if num == '0':\n break\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python_kubernetes_tool/PythonKubePodExecTool.py","file_name":"PythonKubePodExecTool.py","file_ext":"py","file_size_in_byte":4892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"555011029","text":"from urllib import request\nfrom bs4 import BeautifulSoup\nimport re\nimport requests\n# sample:\n# http://bejsrtl605.bj.intel.com:8080/harts/logs/test_sets//54198825/test_engine/MTBF_RELEASE_TEST_7660.xml_20181102071957/PASSED_FLASH_232763981805431/FLASH.txt\n# (abort)\n# str(tr_table[i]) sample:\n# Control 75 Name : DesiredRATSIM0\n# Control 75 Enabled : Yes\n# Control 75 Value : TM_4G3G2G\n\n\nclass InformationCheck:\n\n #display wabpage content\n @staticmethod\n def webpage(url):\n page = request.urlopen(url)\n content = page.read()\n content1 = content.decode('utf-8')\n return BeautifulSoup(content1, 'html.parser')\n\n @staticmethod\n def sheet_name():\n pass\n\n\n\n\n @staticmethod\n def section_name():\n print('1: SSIM-ALL 2: SSIM-SEB 3: SSIM-UTC 4: MSIM-ALL 5: MSIM-SEB 6: MSIM-UTC 7: MSIM-B0')\n input_str = input(\"please input number which part u want to check:\")\n csv_name = {'1': 'SSIM-ALL', '2': 'SSIM-SEB', '3': 'SSIM-UTC', '4': 'MSIM-ALL', '5': 'MSIM-SEB', '6': 'MSIM-UTC', '7': 'MSIM-B0'}\n try:\n csv_name[input_str]\n except KeyError:\n print(\"please input 1-8!\")\n else:\n return csv_name[input_str]\n\n def check_mtbf_release_test_folder(self, url):\n if requests.get(url).status_code == 200:\n page_content = self.webpage(url)\n mtbf_release_test_folder_exist = page_content.find_all(text=re.compile('MTBF_RELEASE_TEST*'))\n return mtbf_release_test_folder_exist[0]\n else:\n return 'false'\n\n\n def check_baseline_from_campaign_start(self, url):\n if self.check_mtbf_release_test_folder(url) != 'false':\n result = requests.get(url + 'campaign_start.txt').text\n pattern_baseline = re.compile(r'Java\\shome\\sfolder:\\s/harts/baselines/.+')\n baseline = re.search(pattern_baseline, str(result)).group(0).strip().strip('//')\n return baseline\n else:\n return 'false'\n\n def find_prepare_folder(self, url):\n if requests.get(url).status_code == 200:\n page_content = self.webpage(url)\n prepare_folder = page_content.find_all(text=re.compile('.?COMBI_TC_MTBF_Prepare_Device_.+'))\n if len(prepare_folder) > 0:\n return prepare_folder[0]\n else:\n return 'still no prepare folder!!!'\n\n def check_rat_combo_from_prepare(self, url):\n if requests.get(url).status_code == 200:\n if self.find_prepare_folder(url) != 'still no prepare folder!!!':\n if requests.get(url + self.find_prepare_folder(url)).status_code == 200:\n result = requests.get(url + self.find_prepare_folder(url) + 'COMBI_TC_MTBF_Prepare_Device.txt').text\n #Operator (CUC, CMCC)\n #Operator (CUC)\n #SIM0 rat: TM_4G3G2G\n pattern_operator_sim0 = re.compile(r'Operator \\(.+\\)')\n pattern_operator_sim1 = re.compile(r'Operator \\(.+\\)')\n pattern_sim0 = re.compile(r'SIM0 rat:\\s.+')\n pattern_sim1 = re.compile(r'SIM1 rat:\\s.+')\n search_sim0 = re.search(pattern_sim0, str(result)).group(0)\n search_sim1 = re.search(pattern_sim1, str(result))\n search_sim0_operator = re.search(pattern_operator_sim0, str(result)).group(0).strip()\n if search_sim1 is None:\n rat_combo = search_sim0_operator.split(' ')[1].lstrip('(').rstrip(')') + ' ' + search_sim0.split(' ')[2] + ' ' + 'NA' + ' ' + 'NA'\n else:\n rat_combo = search_sim0_operator.split(' ')[1].lstrip('(').rstrip(',') + ' ' + search_sim0.split(' ')[2] + ' ' + \\\n search_sim0_operator.split(' ')[2].rstrip(')') + ' ' + search_sim1.group(0).split(' ')[2]\n return rat_combo\n else:\n return 'false'\n else:\n return self.find_prepare_folder(url)\n else:\n return 'false'\n\n\n def check_build(self, url):\n result = requests.get(url=url).text\n pattern = re.compile(r\"__...7660_.+\")\n pattern1 = re.compile(r\"/harts/baselines/.+mtbf\")\n re_result = re.search(pattern, str(result))\n re_result1 = re.search(pattern1, str(result))\n if requests.get(url).status_code == 200:\n if re_result:\n #print(re_result1)\n return re_result.group(0)[2:-3]\n else:\n return 'check flash step!!!'\n else:\n return 'false'\n\n\n def output_flash_txt_link(self, url):\n if requests.get(url).status_code == 200:\n page_content = self.webpage(url)\n flash_folder = page_content.find_all(text=re.compile('.?FLASH_.+'))\n flash_folder1 = page_content.find_all(text=re.compile('.+BootAndATCheck_.+'))\n if len(flash_folder) > 0:\n return self.check_build(url + flash_folder[0] + 'FLASH.txt')\n elif len(flash_folder1) > 0:\n return self.check_build(url + flash_folder1[0] + 'TC_100_2_0_BootAndATCheck.txt')\n else:\n return 'still not flashing...'\n return 'false'\n\n\n def check_tc_mtbf_7660_txt(self, url):\n if requests.get(url).status_code == 200:\n page_content = self.webpage(url)\n tc_mtbf_folder = page_content.find_all(text=re.compile('^TC_MTBF_7660.+'))\n if len(tc_mtbf_folder) > 0:\n if '7660' in tc_mtbf_folder[0]:\n txt = 'TC_MTBF_7660.txt'\n else:\n txt = 'TC_MTBF.txt'\n tr_table = self.webpage(url + tc_mtbf_folder[0]).find_all('td')\n if len(tr_table) > 0:\n for i in range(len(tr_table)):\n if txt in str(tr_table[i]):\n return str(tr_table[i + 1])[-23:-7]\n else:\n return 'still in prepare stage!!!'\n else:\n return 'still in prepare stage!!!'\n else:\n return 'false'\n","sub_path":"Check_TC_MTBF_Recent_Update_Time/founction.py","file_name":"founction.py","file_ext":"py","file_size_in_byte":6432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"459709573","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Item',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('value', models.CharField(max_length=250, verbose_name=b'value')),\n ('pos', models.SmallIntegerField(default=b'0', verbose_name=b'position')),\n ],\n options={\n 'ordering': ['pos'],\n 'verbose_name': 'answer',\n 'verbose_name_plural': 'answers',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Poll',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=250, verbose_name=b'question')),\n ('date', models.DateField(default=datetime.date.today, verbose_name=b'date')),\n ('is_published', models.BooleanField(default=True, verbose_name=b'is published')),\n ],\n options={\n 'ordering': ['-date'],\n 'verbose_name': 'poll',\n 'verbose_name_plural': 'polls',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Vote',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('ip', models.IPAddressField(verbose_name=b\"user's IP\")),\n ('datetime', models.DateTimeField(auto_now_add=True)),\n ('item', models.ForeignKey(verbose_name=b'voted item', to='poll.Item')),\n ('poll', models.ForeignKey(verbose_name=b'poll', to='poll.Poll')),\n ('user', models.ForeignKey(verbose_name=b'user', blank=True, to=settings.AUTH_USER_MODEL, null=True)),\n ],\n options={\n 'verbose_name': 'vote',\n 'verbose_name_plural': 'votes',\n },\n bases=(models.Model,),\n ),\n migrations.AddField(\n model_name='item',\n name='poll',\n field=models.ForeignKey(to='poll.Poll'),\n preserve_default=True,\n ),\n ]\n","sub_path":"poll/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"424870372","text":"import random\r\nimport math\r\nimport csv\r\n\r\n\r\n\r\nSpecies = input('Pokemon Species: ')\r\ntry:\r\n\tGenderRatioMales = int(input('Males in gender ratio (1-10, default 1): '))\r\nexcept ValueError:\r\n\tGenderRatioMales = 1\r\nif GenderRatioMales <= 0 or GenderRatioMales > 10:\r\n\tGenderRatioMales = 1\r\ntry:\r\n\tGenderRatioFemales = int(input('Females in gender ratio (1-10, default 1): '))\r\nexcept ValueError:\r\n\tGenderRatioFemales = 1\r\nif GenderRatioFemales <= 0 or GenderRatioFemales > 10:\r\n\tGenderRatioFemales = 1\r\ntry:\r\n\tMinPop = int(input('Minimum Population (1-1000, default 10): '))\r\nexcept ValueError:\r\n\tMinPop = 10\r\nif MinPop <= 0 or MinPop > 1000:\r\n\tMinPop = 10\r\n\r\nwhile (MalePop + FemalePop) < 10:\r\n\tMalePop += GenderRatioMales\r\n\tFemalePop += GenderRatioFemales\r\n\r\ntry:\r\n\tPenaltyTime = int(input('Fertility penalty after x ticks (default 100): '))\r\nexcept ValueError:\r\n\tPenaltyTime = 100\r\ntry:\r\n\tFertilityPenalty = int(input('Fertility penalty (in percentage, subtracted from 50, default 40): '))\r\nexcept ValueError:\r\n\tFertilityPenalty = 40\r\n\r\ntick = 0\r\nEggs = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n\r\nwith open(Species + 'Population.csv', 'a', newline='') as csv_file:\r\n\t\tfieldnames = ['tick', 'females', 'males']\r\n\t\twriter = csv.DictWriter(csv_file, fieldnames=fieldnames)\r\n\t\twriter.writeheader()\r\n\t\tcsv_file.close()\r\n\r\nwhile True:\r\n\twith open(Species + 'Population.csv', 'a', newline='') as csv_file:\r\n\t\tfieldnames = ['tick', 'females', 'males']\r\n\t\twriter = csv.DictWriter(csv_file, fieldnames=fieldnames)\r\n\t\twriter.writerow({'tick': tick, 'females': FemalePop, 'males': MalePop})\r\n\t\tcsv_file.close()\r\n\ttick += 1\r\n\tprint('Tick: ', tick)\r\n\tfor x in range(FemalePop):\r\n\t\tdeath = random.randint(1, 10000)\r\n\t\tif death <= 243:\r\n\t\t\tFemalePop -= 1\r\n\r\n\tfor x in range(MalePop):\r\n\t\tdeath = random.randint(1, 10000)\r\n\t\tif death <= 243:\r\n\t\t\tMalePop += -1\r\n\r\n\tif Eggs[0] > 0:\r\n\t\tprint (Eggs[0], Species + ' Eggs are hatching!')\r\n\t\tfor x in range(Eggs[0]):\r\n\t\t\tsexroll = random.randint(1, GenderRatioFemales + GenderRatioMales)\r\n\t\t\tif sexroll <= GenderRatioMales:\r\n\t\t\t\tMalePop += 1\r\n\t\t\telse:\r\n\t\t\t\tFemalePop += 1\r\n\r\n\tfor x in range(0, 19):\r\n\t\tEggs[x] = Eggs[x + 1]\r\n\tEggs[19] = 0\r\n\r\n\tif MalePop >=1 and tick <= PenaltyTime:\r\n\t\tfor x in range(FemalePop):\r\n\t\t\tegg = random.randint(1,100)\r\n\t\t\tif egg <= 50:\r\n\t\t\t\tEggs[19] += 1\r\n\telif FemalePop >=1:\r\n\t\tfor x in range(FemalePop):\r\n\t\t\tegg = random.randint(1,100)\r\n\t\t\tif egg <= 50 - FertilityPenalty:\r\n\t\t\t\tEggs[19] += 1\r\n\r\n\tprint('Total Population: ', FemalePop+MalePop)\r\n\tprint('Females: ', FemalePop)\r\n\tprint('Males: ', MalePop)\r\n\tprint('')\r\n\r\n\tif FemalePop+MalePop >= 20000:\r\n\t\tprint('Population max reached')\r\n\t\tbreak\r\n\telif FemalePop == 0 and (len(list (filter (lambda x : x == 0, Eggs))) == 20):\r\n\t\tprint('Extinction')\r\n\t\tbreak","sub_path":"Pokemon Pop Model.py","file_name":"Pokemon Pop Model.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"91394053","text":"import sys\nimport time\nfrom collections import defaultdict\n\nclass CONST:\n\tdef __init__(self, MAX_M, MAX_C, CAP_BOAT, MAX_TIME_S):\n\t\tself.MAX_M = MAX_M\n\t\tself.MAX_C = MAX_C\n\t\tself.CAP_BOAT = CAP_BOAT\n\t\tself.MAX_TIME = MAX_TIME_S\n\nclass Graph:\n\n\tdef __init__(self):\n\n\t\tself.bfs_parent = {}\n\t\tself.dfs_parent = {}\n\t\n\tdef BFS(self, s):\n\t\tqueue = []\n\t\tqueue.append(s)\n\t\tvisited = {}\n\t\tself.bfs_parent[s] = None\n\t\tstart_time = time.time()\n\t\twhile len(queue):\n\t\t\tu = queue.pop(0)\n\t\t\tvisited[(u.missionaries, u.cannibals, u.dir)] = 1\n\t\t\t# return if goal state i.e. cannibals = 0, missionaries = 0, boat direction = 0 \n\t\t\tif u.isGoalState():\n\t\t\t\tself.bfs_parent[TERMINAL_STATE] = u\n\t\t\t\tqueue.clear()\n\t\t\t\treturn self.bfs_parent\n\t\t\t\n\t\t\tif time.time()-start_time > u.CONSTANTS.MAX_TIME:\n\t\t\t\tqueue.clear()\n\t\t\t\treturn {}\n\t\t\t# Stops searching after a certain time limit \n\t\t\tfor v in u.successors():\n\t\t\t\tif visited.get((v.missionaries, v.cannibals, v.dir), 0) == 0:\n\t\t\t\t\tqueue.append(v)\n\t\t\t\t\tself.bfs_parent[v] = u\n\t\treturn {} \n\t\t\t\t \n\tdef DFS(self, s):\n\t\tstack = []\n\t\tstack.append(s)\n\t\tvisited = {}\n\t\tself.dfs_parent[s] = None\n\t\tstart_time = time.time()\n\t\twhile len(stack):\n\t\t\tu = stack.pop()\n\t\t\tvisited[(u.missionaries, u.cannibals, u.dir)] = 1\n\t\t\t# return if goal state i.e. cannibals = 0, missionaries = 0, boat direction = 0 \n\t\t\tif u.isGoalState():\n\t\t\t\tself.dfs_parent[TERMINAL_STATE] = u\n\t\t\t\tstack.clear()\n\t\t\t\treturn self.dfs_parent\n\t\t\t# Stops searching after a certain time limit \n\t\t\tif time.time()-start_time > u.CONSTANTS.MAX_TIME:\n\t\t\t\tstack.clear()\n\t\t\t\treturn {}\n\t\t\t\n\t\t\tfor v in u.successors():\n\t\t\t\tif visited.get((v.missionaries, v.cannibals, v.dir), 0) == 0:\n\t\t\t\t\tstack.append(v)\n\t\t\t\t\tself.dfs_parent[v] = u\n\t\treturn {} \n\t\t\t\t \n\n\n\t# Prints the path returned by BFS/DFS\n\tdef printPath(self, parentList, tail):\n\t\tcount = 0\n\t\tif parentList == {} or parentList is None or tail is None:\n\t\t\treturn\n\t\tif tail == TERMINAL_STATE: \n\t\t\ttail = parentList[tail]\n\n\t\tstate_list = []\n\t\twhile tail is not None:\n\t\t\tcount+=1\n\t\t\tstate_list.append(tail)\n\t\t\ttail = parentList[tail]\n\n\t\twhile state_list:\n\t\t\tprint(state_list.pop(-1))\n\t\tprint(\"Count = \",count-1)\n\n\nCON_IN = sys.stdin\nCON_OUT = sys.stdout\n\n# Generate All possible next moves for each state to reduce number of iterations on each node\ndef genPossibleMoves(CAP_BOAT):\n\tmoves = []\n\tfor m in range(CAP_BOAT + 1):\n\t\tfor c in range(CAP_BOAT + 1):\n\t\t\tif 0 < m < c:\n\t\t\t\tcontinue\n\t\t\tif 1 <= m + c <= CAP_BOAT:\n\t\t\t\tmoves.append((m, c))\n\treturn moves\n\n\ndef runBFS(g, INITIAL_STATE):\n\tprint(\"\\nBFS : \\n\")\n\tp = g.BFS(INITIAL_STATE)\n\tif len(p):\n\t\tg.printPath(p, TERMINAL_STATE)\n\telse:\n\t\tprint(\"No Solution\")\n\n\ndef runDFS(g, INITIAL_STATE):\n\tprint(\"\\nDFS : \\n\")\n\tp = g.DFS(INITIAL_STATE)\n\tif len(p):\n\t\tg.printPath(p, TERMINAL_STATE)\n\telse:\n\t\tprint(\"No Solution\")\n\n\nMAX_M = 30\nMAX_C = 30\nCAP_BOAT = 20\nCNST = None\n\n\nclass State(object):\n\tdef __init__(self, missionaries, cannibals, dir, missionariesPassed, cannibalsPassed, level, CONSTS,moves):\n\t\tself.missionaries = missionaries\n\t\tself.cannibals = cannibals\n\t\tself.dir = dir\n\t\tself.action = \"\"\n\t\tself.level = level\n\t\tself.missionariesPassed = missionariesPassed\n\t\tself.cannibalsPassed = cannibalsPassed\n\t\tself.CONSTANTS = CONSTS\n\n\t\tself.moves = moves\n\n\t\tglobal MAX_M\n\t\tglobal MAX_C\n\t\tglobal CAP_BOAT\n\t\tglobal CNST\n\n\t\tif not CONSTS is None:\n\t\t\tCNST = CONSTS\n\t\t\tMAX_M = CONSTS.MAX_M\n\t\t\tMAX_C = CONSTS.MAX_C\n\t\t\tCAP_BOAT = CONSTS.CAP_BOAT\n\n\t# pass True to count forward\n\tdef successors(self):\n\t\tlistChild = []\n\t\tif not self.isValid() or self.isGoalState():\n\t\t\treturn listChild\n\t\tif self.dir == 1:\n\t\t\tsgn = -1\n\t\t\tdirection = \"from the original shore to the new shore\"\n\t\telse:\n\t\t\tsgn = 1\n\t\t\tdirection = \"back from the new shore to the original shore\"\n\t\tfor i in self.moves:\n\t\t\t(m, c) = i\n\t\t\tself.addValidSuccessors(listChild, m, c, sgn, direction)\n\t\treturn listChild\n\n\tdef addValidSuccessors(self, listChild, m, c, sgn, direction):\n\t\tnewState = State(self.missionaries + sgn * m, self.cannibals + sgn * c, self.dir + sgn * 1,\n\t\t\t\t\t\t\tself.missionariesPassed - sgn * m, self.cannibalsPassed - sgn * c, self.level + 1,\n\t\t\t\t\t\t\tself.CONSTANTS,self.moves)\n\t\tif newState.isValid():\n\t\t\tnewState.action = \"Move %d M and %d C %s.\" % (m, c, direction)\n\t\t\tlistChild.append(newState)\n\n\tdef isValid(self):\n\t\t# obvious\n\t\tif self.missionaries < 0 or self.cannibals < 0 or self.missionaries > MAX_M or self.cannibals > MAX_C or (self.dir != 0 and self.dir != 1):\n\t\t\treturn False\n\n\t\t# then check whether missionaries outnumbered by cannibals in any shore\n\t\tif (self.cannibals > self.missionaries > 0) or (self.cannibalsPassed > self.missionariesPassed > 0): # more cannibals then missionaries on original shore\n\t\t\treturn False\n\n\t\treturn True\n\n\tdef isGoalState(self):\n\t\treturn self.cannibals == 0 and self.missionaries == 0 and self.dir == 0\n\n\tdef __repr__(self):\n\t\tif self.dir == 1:\n\t\t\ttext = \"left\"\n\t\telse:\n\t\t\ttext = \"right\"\n\t\treturn \"\\n%s\\nState: Left (M : %d, C : %d), Boat Position: (%s), Right (M: %d, C: %d)\" % (\n\t\t\tself.action, self.missionaries, self.cannibals, text, self.missionariesPassed,self.cannibalsPassed)\n\n\tdef __eq__(self, other):\n\t\treturn self.missionaries == other.missionaries and self.cannibals == other.cannibals and self.dir == other.dir\n\n\tdef __hash__(self):\n\t\treturn hash((self.missionaries, self.cannibals, self.dir))\n\n\tdef __ne__(self, other):\n\t\treturn not (self == other)\n\n\nTERMINAL_STATE = State(-1, -1, 0, -1, -1, 0, CNST,None)\n\ndef main():\n\tm = int(input(\"Number of Missionaries: \"))\n\tc = int(input(\"Number of Cannibals: \"))\n\tk = int(input(\"Boat Capacity: \"))\n\n\tCNST = CONST(m, c, k, 100000)\n\n\tmoves = genPossibleMoves(CNST.CAP_BOAT)\n\n\tINITIAL_STATE = State(CNST.MAX_M, CNST.MAX_C, 1, 0, 0, 0, CNST, moves)\n\tg = Graph()\n\tsys.stdout = CON_OUT\n\trunBFS(g, INITIAL_STATE)\n\trunDFS(g, INITIAL_STATE)\n\tsys.stdout = CON_OUT\n\n\nif __name__ == '__main__':\n\tmain()","sub_path":"final_code.py","file_name":"final_code.py","file_ext":"py","file_size_in_byte":5891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"23586409","text":"'''\nI am trying to test if somehow i can apply the backtracking and recursion knowledge to\n get the answer and why is this only a graph problem\n\n we are trying to figure out connected components instead of exploring single cell values\n dfs suits better.\n\nmodified this to figure out the biggest island contains how many cells.\n\n'''\n\n\ndef countislands(grid):\n n = len(grid)\n m = len(grid[0])\n\n for i in range(len(grid)):\n grid[i] = list(grid[i])\n\n def get_neighbours(row,col):\n xmoves = [-1,1,0,0]\n ymoves = [0,0,1,-1]\n\n retlist = []\n for i in range(len(xmoves)):\n if row+xmoves[i] =0 and col+ymoves[i] =0:\n retlist.append((row+xmoves[i],col+ymoves[i]))\n print(retlist)\n return retlist\n\n def explore(row,col,count):\n grid[row][col] = 'W' # we are marking this as visited. we might need to use alternate solution\n for nextrow,nextcol in get_neighbours(row,col):\n if grid[nextrow][nextcol] == 'G':\n count = explore(nextrow,nextcol,count+1)\n return count\n\n result = 0\n maxislandcount = 0\n\n for row in range(n):\n for col in range(m):\n if grid[row][col] == 'G':\n for cur in grid:\n print(''.join(cur))\n print()\n maxislandcount = max (maxislandcount,explore(row,col,1))\n print(maxislandcount)\n result+=1\n\n return result\n\n\n\nif __name__ == \"__main__\":\n\n grid = ['WWWGGGG',\n 'WGGWGGG',\n 'WWWWWWW',\n 'WGGWWGG'\n ]\n\n\n print(countislands(grid))\n","sub_path":"Python/Practice/Graphs/countislands.py","file_name":"countislands.py","file_ext":"py","file_size_in_byte":1688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"638309528","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\n\n\"\"\"\nSpider for multi-url crawl task\n\nCrawler for one-page-crawl task\n\nbased on the url re-expression matching result, we will have this mapping rule:\n\nurl re-expression → the Crawler type\n\nif the url match the re-expression , then we will create such Crawler type to handle it.\n\nCrawler is a subclass of Task object ,\n\n\"\"\"\n\nimport logging\nimport asyncio\nimport re\nimport json\nimport os\nimport time\nfrom urllib.parse import urlparse\nfrom asyncio import Queue\nimport aiohttp\n\nfrom expython.web.coroutine import aiowget_links\nfrom expython.web.gfun import is_url_belong\n\nfrom expython.web.gfun import get_random_agent\n\nclass Spider(object):\n \"\"\"A web walk spider.\nstart_url\n start url\ntodo_urls\n a set of urls to do the crawl\ndone_urls\n a set of urls have been crawl\n\nmapping_rules\n it is a dict , we will implement this logic:\n If the url pantern is match then we will create this type of Crawler to handle it .\n\n \"\"\"\n start_url = ''\n\n mapping_rules = {}\n lazy_mode = True #only dig into it\n forcejs = False\n interval = 3 #\n timeout = 30##\n retry_times = 5\n\n suicide_timing = 0\n headers = {'USER-AGENT':get_random_agent()}\n ###\n encoding = 'utf-8'\n\n def __init__(self,loop=None,maxsize=0,max_tasks=10):\n self.done_urls = set()\n self.max_tasks = max_tasks\n self.workers = []\n self.retry_count = {}\n self.current_url = []\n\n if loop is None:\n self.loop = asyncio.get_event_loop()\n else:\n self.loop = loop\n self.q = asyncio.Queue(maxsize=maxsize)\n\n if os.path.exists('spider.json'):\n self.load_status()\n else:\n self.add_url(self.start_url)\n\n def cancel(self):\n for w in self.workers:\n w.cancel()\n\n @asyncio.coroutine\n def introspection(self):\n while (not self.q.empty()):\n url = yield from self.q.get()\n\n if url in self.done_urls:## we just pass it.\n self.q.task_done()\n continue\n\n logging.debug('working on url: {}'.format(url))\n self.current_url.append(url)\n\n for p,func in self.mapping_rules.items():\n if re.match(p,url):\n logging.debug('func')\n try:\n self.retry_count[url] = 0##\n links = yield from asyncio.wait_for(func(url,encoding=self.encoding,forcejs=self.forcejs,headers=self.headers),timeout=self.timeout)\n\n if not self.forcejs:##js is very slow\n yield from asyncio.sleep(self.interval)\n except asyncio.TimeoutError:\n self.retry_count[url] += 1\n if self.retry_count[url] >= self.retry_times:\n logging.warning('{} {} failed'.format(url,func.__name__))\n links = set()#empty\n else:\n logging.debug('url {} put back'.format(url))\n self.add_url(url)\n except Exception as e:\n logging.error('func maybe the html parse have some problem {}'.format(url))\n logging.error(e)\n links = set()\n else:### get all url\n logging.debug('get all links')\n try:\n self.retry_count[url] = 0##\n links = yield from asyncio.wait_for(aiowget_links(url,encoding=self.encoding,forcejs=self.forcejs,headers=self.headers),timeout=self.timeout)\n\n if not self.forcejs:\n yield from asyncio.sleep(self.interval)\n\n except asyncio.TimeoutError:\n self.retry_count[url] += 1\n if self.retry_count[url] >= self.retry_times:\n logging.warning('{} {} failed'.format(url,'get all links'))\n links = set()#empty\n else:\n logging.debug('url {} put back'.format(url))\n self.add_url(url)\n except Exception as e:\n logging.error('get all links maybe the html parse have some problem {}'.format(url))\n logging.error(e)\n links = set()\n\n ###update done url\n self.done_urls.add(url)\n logging.debug('done_urls add {}'.format(url))\n\n if self.lazy_mode:\n links = set([link for link in links if is_url_belong(link,self.start_url)])\n\n #remove done urls,we will not do it again.\n if links:\n links = links - self.done_urls\n # add for next operation\n if links:\n logging.debug('links is {}'.format(links))\n for link in links:\n logging.debug('add link {}'.format(link))\n self.add_url(link)\n\n self.q.task_done()\n self.current_url.remove(url)\n\n def add_url(self,url):\n if url not in self.done_urls:\n self.q.put_nowait(url)\n\n def get_rest_item(self):\n lst = []\n while True:\n try:\n item = self.q.get_nowait()\n lst.append(item)\n except asyncio.QueueEmpty:\n return lst\n\n @asyncio.coroutine\n def crawl(self):\n self.workers = [asyncio.Task(self.introspection(), loop=self.loop)\n for _ in range(self.max_tasks)]\n\n if self.suicide_timing:\n yield from asyncio.sleep(self.suicide_timing)\n logging.debug('suicide time up')\n self.cancel()\n else:\n yield from self.q.join()\n ### autocancel\n self.cancel()\n\n def save_status(self):\n '''save:\n done_urls is a list object\n retry_count\n queue is a list object\n '''\n done_urls = list(self.done_urls)\n queue = self.get_rest_item()\n ###if we in the process progress then we will do it again.\n if self.current_url:\n for current_url in self.current_url:\n print('current url {}'.format(current_url))\n queue.insert(0,current_url)\n try:\n done_urls.remove(current_url)\n except ValueError:\n pass\n retry_count = self.retry_count\n\n res = {\n 'done_urls' : done_urls,\n 'queue' : queue,\n 'retry_count' : retry_count,\n }\n\n with open('spider.json','w') as f:\n json.dump(res,f,indent=4)\n f.write('\\n')\n\n def load_status(self):\n '''\n if spider.json exits then we load it\n '''\n with open('spider.json','r') as f:\n res = json.load(f)\n done_urls = set(res['done_urls'])\n self.done_urls.update(done_urls)\n self.retry_count.update(res['retry_count'])\n for i in res['queue']:\n self.add_url(i)\n\n\n######\n\nfrom .polisher import Polisher\n\n\n","sub_path":"lib/python3.4/site-packages/expython/web/spider/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"462003491","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Dec 31 12:14:45 2019\n\n@author: john\n\nThis file contains classes defining a bot that plays brandubh using a monte\ncarlo tree search\n\"\"\"\n\nimport math\nimport random\nimport copy\nfrom brandubh import Act\nfrom random_bot import RandomBot\n\n\nclass MCTSBot:\n \"\"\"\n This bot plays brandubh using a Monte Carlo tree search to select its\n move. An intance of this class is initialised with two parameters.\n 1) num_rounds - the number of nodes to be added to the tree\n 2) temperature- effects the balance between exploration and exploitation\n when picking child nodes with uct score\n When an instance is created, and RandomBot object is also created as\n an instance variable. This is used for simulating random games\n \n Monte Carlo tree search:\n This algorithm builds a tree data structure. Each node of the tree\n represents a possible move following the move represented by the\n node's parent node. The root node of the tree is the current board\n position the bot is trying to decide a move for. \n \n The algorithm starts with a root node and then adds child nodes to\n the rooth until all no more can be added to the root node (i.e. all\n legal moves from the current board position have been added). Once \n all possible child nodes have been added to a node, the algorithm\n picks a child node at random (using uct score) to add a child node to.\n \n Each time a child node is added to the tree, a random game is played \n until the a winner is decided, begining from the board position \n represented by the new child node. The winner is saved in the node\n and passed to all parent nodes. So that each node in the tree has a\n record of how many random games the black or white player won that\n began from a board position that stemmed from that node. The ratio\n of black wins vs white wins gives a way of ranking how good a move\n is for a particular player.\n \n This continues until a given number of nodes ('num_rounds') are\n added to the tree.\n \n The child node of the root with the best ranking is selected as the\n next move\n \"\"\"\n \n def __init__(self, num_rounds, temp):\n self.num_rounds = num_rounds\n self.temperature = temp\n self.bot = RandomBot()\n \n \n def select_move(self, game_state):\n \"\"\"\n This method uses the Monte Carlo tree search to select what move\n to make next given the board position in game_state.\n \"\"\"\n \n # Create the root of the tree.\n root = MCTSNode(game_state)\n \n # add num_rounds nodes to the tree.\n for i in range(self.num_rounds):\n # print('\\rrunning rollout {0}'.format(i), end='')\n \n # To add a child node, begin at the root of the tree.\n node = root\n \n # While child nodes can't be added to the current node and\n # the current node doesn't represent a game state where the\n # game is over, select a child as the current node using uct\n while (not node.can_add_child()) and (not node.is_terminal()):\n if node.children == []:\n break\n node = self.select_child(node)\n \n # Add a random child node if possible\n if node.can_add_child():\n node = node.add_random_child()\n \n # Simulate a random game from the current board position, record\n # the winner and pass it back to all parent nodes\n winner = self.simulate_random_game(node.game_state)\n while node is not None:\n node.record_win(winner)\n node = node.parent\n \n # Once 'num_rounds' nodes have been added to the tree, select the \n # child node of the root with the best ranking as the next move\n # print('finished rollouts')\n best_move = None\n best_frac = -1\n for child in root.children:\n child_frac = child.winning_frac(game_state.player)\n if child_frac > best_frac:\n best_frac = child_frac\n best_move = child.move\n \n # return the best move\n if best_move is None:\n return Act.pass_turn()\n return Act.play(best_move)\n \n \n def select_child(self, node):\n \"\"\"\n This method selects a child with the best uct score\n \"\"\"\n total_rollouts = sum(child.num_rollouts for child in node.children)\n \n best_score = -1\n best_child = None\n \n for child in node.children:\n score = uct_score(total_rollouts, child.num_rollouts,\n child.winning_frac(node.game_state.player),\n self.temperature)\n if score > best_score:\n best_score = score\n best_child = child\n \n return best_child\n \n \n def simulate_random_game(self, game_state):\n \"\"\"\n This method plays a game of brandubh begining from the board position\n in game_state and plays until a winner is decided. At each turn,\n moves are selected at random. The method returns the winner of the\n game when it is over.\n \"\"\"\n game = copy.deepcopy(game_state)\n \n while game.is_not_over():\n random_move = self.bot.select_move(game)\n game.take_turn(random_move)\n \n return game.winner\n \n \n \nclass MCTSNode:\n \"\"\"\n This class is a node of a tree used in Monte Carlo tree search. Instance\n variables include:\n * game_state containing the board position the node represents\n * parent - a link to the parent node\n * move - a the move that created the board position from the parent\n * win_counts - a dictionary containg number of wins for blacl/white\n * num_rollouts - number of random games that stemmed from this node\n * children - a list of child nodes\n * unvisited_moves - a list of possible child nodes to add\n \"\"\"\n \n def __init__(self, game_state, parent=None, move=None):\n self.game_state = game_state\n self.parent = parent\n self.move = move\n self.win_counts = {-1: 0,\n 1: 0}\n self.num_rollouts = 0\n self.children = []\n self.unvisited_moves = game_state.legal_moves()\n \n \n def add_random_child(self):\n \"\"\"\n This method adds a random child to self and returns the added child\n \"\"\"\n index = random.randint(0, len(self.unvisited_moves)-1)\n new_move = self.unvisited_moves.pop(index)\n new_game_state = copy.deepcopy(self.game_state)\n new_game_state.take_turn(Act.play(new_move))\n new_node = MCTSNode(new_game_state, self, new_move)\n self.children.append(new_node)\n return new_node\n \n \n def record_win(self, winner):\n self.win_counts[winner] += 1\n self.num_rollouts += 1\n \n \n def can_add_child(self):\n return len(self.unvisited_moves) > 0\n \n \n def is_terminal(self):\n return not self.game_state.is_not_over()\n \n \n def winning_frac(self, player):\n return float(self.win_counts[player]) / float(self.num_rollouts)\n \n\n\ndef uct_score(parent_rollouts, child_rollouts, win_frac, temp):\n \"\"\"\n The uct score (Upper Confidence bounds applied to Trees) is given by\n the formula:\n s = w/n + c*sqrt( ln(N)/n ),\n where \n w = number of wins for the current player in games\n stemming from the current node, \n n = number of games stemming from the current node\n N = number of games stemming from the parent node\n c = temperature\n \n The first term is called exploitation (large for child nodes where the\n player wins alot)\n The second term is called exploration (large for child nodes with few\n random games stemming from them)\n \"\"\"\n exploration = math.sqrt( math.log(parent_rollouts) / child_rollouts )\n return win_frac + temp*exploration","sub_path":"bots/mcbot.py","file_name":"mcbot.py","file_ext":"py","file_size_in_byte":8311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"563236569","text":"from connect4 import*\nfrom montecarloai import*\nimport random\n\ndef main():\n result = 0\n offwins = 0\n defwins = 0\n ties = 0\n mygame = ConnectFour()\n play = 'y'\n\n turn = input('if you want to go first press 1 if you want to go second press 2 ')\n while turn != '1' and turn != '2':\n turn = input('press 1 or 2 ')\n piece = ''\n if turn == '1':\n print('you are X')\n piece = 'O'\n else:\n print('you are O')\n piece = 'X'\n \n \n myai = MonteCarloAiConnectFour(piece)\n \n turn = -1\n\n\n\n mygame.drawBoard()\n while play == 'y':\n print('computers turn')\n print('thinking....')\n sboard = mygame.getBoard()[:]\n dboard = mygame.getDisBoard()[:]\n aimov = myai.makeMove(sboard,dboard)\n mygame.placeKey(aimov,1)\n mygame.drawBoard()\n\n if mygame.checkWin() == True:\n print('Computer Win!')\n result = 2\n play = 'n'\n\n elif mygame.checkTie() == True:\n print('Tie Game!')\n result = 3\n play = 'n' \n \n else:\n pos = input('where do you want to go: ')\n mygame.placeKey(int(pos),-1)\n mygame.drawBoard()\n if mygame.checkWin() == True:\n print('You Win!')\n play = 'n'\n result = 1\n \n elif mygame.checkTie() == True:\n print('Tie Game!')\n result = 3\n play = 'n'\n \n #play = input('cont: ')\n\n if result == 1:\n offwins += 1\n elif result == 2:\n defwins += 1\n else:\n ties += 1\n \n print(\"off\",offwins,\" def\",defwins,\" Ties\",ties)\n \n\n\nmain()\n","sub_path":"improved again/con4driver.py","file_name":"con4driver.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"306544168","text":"from tensorflow import keras\nimport tensorflow as tf\n\nfrom libspn_keras.backprop_mode import BackpropMode, infer_logspace_accumulators\nfrom libspn_keras.constraints.greater_equal_epsilon import GreaterEqualEpsilon\nfrom libspn_keras.logspace import logspace_wrapper_initializer\nfrom libspn_keras.math.logmatmul import logmatmul\nfrom libspn_keras.math.hard_em_grads import \\\n logmatmul_hard_em_through_grads_from_accumulators, logmultiply_hard_em\nfrom tensorflow.keras import initializers\nfrom tensorflow.keras import regularizers\nfrom tensorflow.keras import constraints\n\nfrom libspn_keras.math.soft_em_grads import log_softmax_from_accumulators_with_em_grad\nimport numpy as np\n\n\nclass RootSum(keras.layers.Layer):\n \"\"\"\n Final sum of an SPN. Expects input to be in log-space and produces log-space output.\n\n Args:\n return_weighted_child_logits: If True, returns a weighted child log probability, which\n can be used for e.g. (Sparse)CategoricalCrossEntropy losses. If False, computes\n the weighted sum of the input, which effectively is the log probability of the\n distribution defined by the SPN.\n logspace_accumulators: If ``True``, accumulators will be represented in log-space which\n is typically used with ``BackpropMode.GRADIENT``. If ``False``, accumulators will be\n represented in linear space. Weights are computed by normalizing the accumulators\n per sum, so that we always end up with a normalized SPN. If ``None`` (default) it\n will be set to ``True`` for ``BackpropMode.GRADIENT`` and ``False`` otherwise.\n accumulator_initializer: Initializer for accumulator. If None, defaults to\n initializers.Constant(1.0)\n backprop_mode: Backpropagation mode. Can be either BackpropMode.GRADIENT,\n BackpropMode.HARD_EM, BackpropMode.SOFT_EM or BackpropMode.HARD_EM_UNWEIGHTED\n accumulator_regularizer: Regularizer for accumulator.\n linear_accumulator_constraint: Constraint for linear accumulators. Defaults to a\n constraint that ensures a minimum of a small positive constant. If\n logspace_accumulators is set to True, this constraint wil be ignored\n **kwargs: kwargs to pass on to the keras.Layer super class\n \"\"\"\n def __init__(\n self, return_weighted_child_logits=True, logspace_accumulators=None,\n accumulator_initializer=None, backprop_mode=BackpropMode.GRADIENT,\n accumulator_regularizer=None, linear_accumulator_constraint=None, **kwargs\n ):\n super(RootSum, self).__init__(**kwargs)\n self.return_weighted_child_logits = return_weighted_child_logits\n self.accumulator_initializer = accumulator_initializer or initializers.Constant(1.0)\n self.logspace_accumulators = infer_logspace_accumulators(backprop_mode) \\\n if logspace_accumulators is None else logspace_accumulators\n self.backprop_mode = backprop_mode\n self.accumulator_regularizer = accumulator_regularizer\n self.linear_accumulator_constraint = \\\n linear_accumulator_constraint or GreaterEqualEpsilon(1e-10)\n self.accumulators = self._num_nodes_in = None\n\n if backprop_mode != BackpropMode.GRADIENT and logspace_accumulators:\n raise NotImplementedError(\n \"Logspace accumulators can only be used with BackpropMode.GRADIENT\")\n\n def build(self, input_shape):\n _, num_scopes_in, num_decomps_in, self._num_nodes_in = input_shape\n\n if num_scopes_in != 1 or num_decomps_in != 1:\n raise ValueError(\"Number of scopes and decomps must both be 1\")\n\n initializer = self.accumulator_initializer\n accumulator_constraint = self.linear_accumulator_constraint\n if self.logspace_accumulators:\n initializer = logspace_wrapper_initializer(initializer)\n accumulator_constraint = None\n\n self.accumulators = self.add_weight(\n name='weights', shape=(self._num_nodes_in,), initializer=initializer,\n regularizer=self.accumulator_regularizer, constraint=accumulator_constraint\n )\n\n def call(self, x):\n log_weights_unnormalized = self.accumulators\n x_squeezed = tf.reshape(x, (-1, self._num_nodes_in))\n if not self.logspace_accumulators:\n\n if self.backprop_mode in [BackpropMode.HARD_EM, BackpropMode.HARD_EM_UNWEIGHTED]:\n if self.return_weighted_child_logits:\n return logmultiply_hard_em(x_squeezed, self.accumulators)\n\n logmatmul_out = logmatmul_hard_em_through_grads_from_accumulators(\n tf.reshape(x, (1, 1, -1, self._num_nodes_in)),\n tf.reshape(self.accumulators, (1, 1, self._num_nodes_in, 1)),\n unweighted=self.backprop_mode == BackpropMode.HARD_EM_UNWEIGHTED\n )\n return tf.reshape(logmatmul_out, (-1, 1))\n\n log_weights_unnormalized = tf.math.log(log_weights_unnormalized)\n\n if self.backprop_mode == BackpropMode.EM:\n log_weights_normalized = log_softmax_from_accumulators_with_em_grad(\n self.accumulators, axis=0)\n else:\n log_weights_normalized = tf.nn.log_softmax(log_weights_unnormalized, axis=0)\n\n if self.return_weighted_child_logits:\n return tf.expand_dims(log_weights_normalized, axis=0) + x_squeezed\n else:\n return logmatmul(\n x_squeezed, tf.expand_dims(log_weights_normalized, axis=1))\n\n def compute_output_shape(self, input_shape):\n num_batch, _, _, num_nodes_in = input_shape\n if self.return_weighted_child_logits:\n return [num_batch, num_nodes_in]\n else:\n return [num_batch, 1]\n\n def get_config(self):\n config = dict(\n accumulator_initializer=initializers.serialize(self.accumulator_initializer),\n logspace_accumulators=self.logspace_accumulators,\n return_weighted_child_logits=self.return_weighted_child_logits,\n backprop_mode=self.backprop_mode,\n accumulator_regularizer=regularizers.serialize(self.accumulator_regularizer),\n linear_accumulator_constraint=constraints.serialize(self.linear_accumulator_constraint)\n )\n base_config = super(RootSum, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n","sub_path":"libspn_keras/layers/root_sum.py","file_name":"root_sum.py","file_ext":"py","file_size_in_byte":6440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"120794126","text":"\nimport pygame\n\nimport random\n\nMISSILES_TO_ADD = 1\nHIT_SIZE = 376\n\nclass Level:\n loadImg = pygame.image.load # To reduce lookup time\n clouds = [\n loadImg(\"art/cloud1.png\"),\n loadImg(\"art/cloud2.png\"),\n loadImg(\"art/cloud3.png\"),\n loadImg(\"art/cloud4.png\"),\n ]\n cloudsSpecial = [\n loadImg(\"art/cloud_car.png\"),\n loadImg(\"art/cloud_sail.png\"),\n loadImg(\"art/cloud_duck.png\")\n ]\n explosions = [\n loadImg(\"art/explosion1.png\"),\n loadImg(\"art/explosion2.png\"),\n loadImg(\"art/explosion3.png\"),\n loadImg(\"art/explosion4.png\"),\n loadImg(\"art/explosion5.png\"),\n loadImg(\"art/explosion6.png\"),\n loadImg(\"art/explosion7.png\"),\n loadImg(\"art/explosion8.png\")\n ]\n centerMissiles = [\n loadImg(\"art/missile1.png\"),\n loadImg(\"art/missile2.png\"),\n loadImg(\"art/missile3.png\"),\n loadImg(\"art/missile4.png\")\n ]\n skull = loadImg(\"art/skull.png\")\n\n MAX_IMG_SIZE = 328\n\n def __init__(self, game, window, lvl, screenWidth, screenHeight, difficulty, controls):\n self.game = game\n self.window = window\n self.lvl = lvl\n self.screenWidth = screenWidth\n self.screenHeight = screenHeight\n self.difficulty = difficulty\n self.controls = controls\n self.cloudList = self.genClouds(lvl)\n self.storm = self.genStorm(lvl) # 0: no storm | 1: cloud cover | 2: storm\n self.clouds = Level.clouds\n self.numMissiles = lvl + MISSILES_TO_ADD # Missiles per wave\n if (self.numMissiles) >= 4 and (self.numMissiles) % 2 == 0:\n self.waves = 2\n elif (self.numMissiles) > 3 and (self.numMissiles) % 3 == 0:\n self.waves = 3\n else:\n self.waves = 1\n self.missileList = self.genMissiles(lvl, self.waves)\n self.numDestroyed = 0\n self.numFinished = 0\n self.animationFinished = False\n\n def genStorm(self, lvl):\n return\n\n\n def genClouds(self, lvl):\n list_o_clouds = []\n if False:\n return []\n else:\n seed = 0 # Seed can take 4 values, each corresponding to a quadrant\n rand = random.random # To reduce lookup time\n halfX = int(self.screenWidth / 2)\n halfY = int(self.screenHeight / 2)\n quarterX = int(self.screenWidth / 4)\n quarterY = int(self.screenHeight / 4)\n\n # randX and randY will be the initial distance from origin of\n # the window. halfX and halfY are added appropriately to push the\n # clouds into necessary quadrants. The idea behind these quadrants\n # is to ensure the clouds are spread out more.\n for i in range(lvl+4):\n if seed == 0: # Quardant 1\n randX = int(rand() * halfX) + halfX\n randY = int(rand() * halfY)\n elif seed == 1: # Quadrant 2\n randX = int(rand() * halfX)\n randY = int(rand() * halfY)\n elif seed == 2: # Quadrant 3\n randX = int(rand() * halfX)\n randY = int(rand() * halfY) + halfY\n elif seed == 3: # Quadrant 4\n randX = int(rand() * halfX) + halfX\n randY = int(rand() * halfY) + halfY\n\n randSize = rand() # Cloud's size\n\n randImage = int(rand() * len(self.clouds) - 0.01)\n image = self.clouds[randImage]\n\n list_o_clouds.append(Level.Cloud(image, randSize, randX, randY,\n seed, self.screenWidth, self.screenHeight))\n seed = (seed + 1) % 4\n\n return list_o_clouds\n\n def genMissiles(self, lvl, waves):\n # Center missiles\n list_o_missiles = []\n rand = random.random # To reduce lookup time\n\n halfX = int(self.screenWidth / 2)\n halfY = int(self.screenHeight / 2)\n quarterX = int(self.screenWidth / 4)\n quarterY = int(self.screenHeight / 4)\n halfXMinEdge = halfX - int(Level.MAX_IMG_SIZE / 2)\n halfYMinEdge = halfY - int(Level.MAX_IMG_SIZE / 2)\n\n for i in range(self.numMissiles * waves):\n if i % 3 == 0: # Quadrant 1\n imageNum = 0\n\n randX = int(rand() * halfX) + halfXMinEdge\n randY = int(rand() * halfY)\n elif i % 3 == 1:\n seed = rand()\n if seed > 0.5: # Quadrant 2\n imageNum = 1\n\n randX = int(rand() * halfX)\n randY = int(rand() * halfY)\n else: # Quadrant 3\n imageNum = 2\n\n randX = int(rand() * halfX)\n randY = int(rand() * halfY) + halfYMinEdge\n elif i % 3 == 2: # Quadrant 4\n imageNum = 3\n\n randX = int(rand() * halfX) + halfXMinEdge\n randY = int(rand() * halfY) + halfYMinEdge\n\n list_o_missiles.append(Level.Missile(imageNum, Level.centerMissiles[imageNum],\n randX, randY, self.screenWidth, self.screenHeight))\n\n return list_o_missiles\n\n def nextRender(self, wave):\n if wave == self.waves:\n return False\n else: # Reset fields for next wave\n self.numDestroyed = 0\n self.numFinished = 0\n self.animationFinished = False\n return True\n\n def renderClouds(self, mouseX, mouseY):\n # Simulates moving the cursor, but really the cloud is moved\n # the mouse's (+/-) distance from its own position.\n # This distance is stored in xDiff and yDiff.\n for cloud in self.cloudList:\n # Directional controls\n if self.controls == 0:\n # How much the mouse has moved\n xDiff = mouseX - cloud.centerX\n yDiff = mouseY - cloud.centerY\n # Render image based on mouse position\n self.window.blit(cloud.image, (cloud.posX - xDiff, cloud.posY - yDiff))\n # Inverted controls\n elif self.controls == 1:\n # How much the mouse has moved\n xDiff = mouseX - cloud.centerX\n yDiff = mouseY - cloud.centerY\n # Render image based on mouse position\n self.window.blit(cloud.image, (cloud.posX + xDiff, cloud.posY + yDiff))\n\n def renderMissiles(self, mouseX, mouseY, wave):\n liveSpeed = 10\n deadSpeed = 5\n imgs = len(Level.explosions)\n for i in range(self.numMissiles*(wave-1),self.numMissiles*wave):\n missile = self.missileList[i]\n\n if missile.image == None:\n continue\n\n missile.growth += 1 # Used to control speed of missile\n\n if not missile.explode: # Missile is live\n if self.lvl > 10:\n liveSpeed = 7\n # Missile gets closer without hardcoding framerate for it\n if missile.growth % liveSpeed == 0: # Controls speed of animation\n if missile.imageSize > 132:\n missile.imageSize += 16\n elif missile.imageSize > 92:\n missile.imageSize += 8\n else:\n missile.imageSize += 4\n\n missile.image = pygame.transform.scale(self.centerMissiles[missile.imageNum],\n (missile.imageSize, missile.imageSize))\n\n # Gives smoke animation\n missile.imageNum = (missile.imageNum + 1) % 4\n else: # Missile was shot\n # First part of animation - Ball of fire\n if missile.imageNum < imgs and missile.growth <= 30:\n if missile.growth % deadSpeed == 0: # Controls speed of animation\n missile.image = pygame.transform.scale(self.explosions[missile.imageNum],\n (missile.imageSize, missile.imageSize))\n\n # Animation\n if missile.growth < 30:\n missile.imageNum = (missile.imageNum + 1) % 3\n else:\n # Begin second part of animation\n missile.imageNum = 3\n # second part of animation - Burning out\n elif missile.imageNum < imgs:\n if missile.growth % deadSpeed == 0: # Controls speed of animation\n missile.image = pygame.transform.scale(self.explosions[missile.imageNum],\n (missile.imageSize, missile.imageSize))\n\n # Animation\n missile.imageNum += 1\n\n # On last animation the imageNum will go to 8 which is out of\n # bounds but never used\n if missile.imageNum == imgs:\n self.numFinished += 1 # Increment number done animating\n if self.numFinished != self.numMissiles:\n # Note to self: Don't do missile = None because you still\n # need\n missile.image = None\n\n # Render image based on mouse position\n if missile.imageNum < imgs:\n\n # Directional controls\n if self.controls == 0:\n xDiff = mouseX - missile.centerX\n yDiff = mouseY - missile.centerY\n missile.currPos = (missile.posX - xDiff, missile.posY - yDiff)\n self.window.blit(missile.image, missile.currPos)\n # Inverted controls\n elif self.controls == 1:\n xDiff = mouseX - missile.centerX\n yDiff = mouseY - missile.centerY\n missile.currPos = (missile.posX + xDiff, missile.posY + yDiff)\n self.window.blit(missile.image, missile.currPos)\n\n elif missile.imageNum == imgs:\n missile.imageNum += 1 # For conditional elif directly below\n\n elif missile.imageNum > imgs:\n # If last missile destroyed and its animation is done\n if self.numFinished == self.numMissiles:\n self.animationFinished = True\n\n if missile.imageSize >= Level.MAX_IMG_SIZE and not missile.explode: # Prevent multiple damage\n self.game.decLives()\n self.remMissile(missile)\n\n\n def drawCrossHairs(self, window, width, height):\n pygame.draw.rect(window, (0, 0, 0), (int(width/2), int(height/2) - 15, 1, 30))\n pygame.draw.rect(window, (0, 0, 0), (int(width/2) - 15, int(height/2), 30, 1))\n\n\n def remMissile(self, missile):\n self.numDestroyed += 1\n missile.explode = True\n missile.imageNum = 0\n missile.growth = 0\n\n\n class Cloud():\n\n def __init__(self, image, size, posX, posY, seed, screenW, screenH):\n self.image = pygame.transform.scale(image,\n (image.get_width() + int(image.get_width() * size),\n image.get_height() + int(image.get_height() * size)))\n self.size = size\n self.centerX = int(screenW / 2) # To compare to mouse x\n self.centerY = int(screenH / 2) # To compare to mouse y\n self.posX = posX\n self.posY = posY\n\n\n class Missile():\n\n def __init__(self, imageNum, image, posX, posY, screenW, screenH):\n self.imageSize = 16\n self.imageNum = imageNum\n self.growth = 1\n self.image = pygame.transform.scale(image, (16, 16))\n self.centerX = int(screenW / 2)\n self.centerY = int(screenH / 2)\n self.posX = posX\n self.posY = posY\n self.currPos = (posX, posY) # Will hold screen position\n self.explode = False\n","sub_path":"missileenv/level2.py","file_name":"level2.py","file_ext":"py","file_size_in_byte":12013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"607842700","text":"import sys\nimport numpy as np\nimport math\nimport scipy\nimport scipy.optimize\nimport matplotlib.pyplot as plt\nimport pylab as p, time\nfrom grids import VoxelGrid\nfrom box_counting import BoxCounter\nfrom numpy import mean,cov,double,cumsum,dot,linalg,array,rank\nfrom pylab import plot,subplot,axis,stem,show,figure\n\nclass BTStats(object) :\n '''\n Compute morphometric features and statistics of a single morphology\n\n Assume the \"3 point\" soma of the curated NeuroMorpho format. (`website `_)\n \n B. Torben-Nielsen (legacy code)\n '''\n \n def __init__(self,tree) :\n \"\"\"\n Constructor.\n\n Parameters\n -----------\n tree : :class:`STree2`\n Neuronal tree for which to compute morphometrics\n \"\"\"\n self._tree = tree\n self._all_nodes = self._tree.get_nodes()\n \n # compute some of the most used stats + \n self._soma_points, self._bif_points, self._end_points = \\\n self.get_points_of_interest()\n \n def get_points_of_interest(self) :\n \"\"\"\n Get lists containting the \"points of interest\", i.e., soma points, \\\n bifurcation points and end/terminal points.\n\n Returns\n -------\n soma_points : list\n bif_points : list\n end_points : list\n \n \"\"\"\n soma_points = []\n bif_points = []\n end_points = []\n \n # upated 2014-01-21 for compatibility with new btstructs2\n for node in self._all_nodes :\n if len(node.children) > 1 :\n if not node.parent is None :\n bif_points.append(node) # the root is not a bifurcation\n if len(node.children) == 0 :\n if node.parent.index != 1: # \"3 point soma\", avoid the two side branches\n end_points.append(node)\n if node.parent is None :\n soma_points = node\n \n return soma_points, bif_points, end_points\n \n \"\"\"\n Global measures (1 for each tree)\n \"\"\"\n def approx_soma(self):\n \"\"\"\n *Scalar, global morphometric*\n \n By NeuroMorpho.org convention: soma surface ~ 4*pi*r^2, \\\n where r is the abs(y_value) of point 2 and 3 in the SWC file\n\n\n Returns\n -------\n surface : float\n soma surface in micron squared\n \n \"\"\"\n \n r = self._tree.get_node_with_index(1).content['p3d'].radius\n return 4.0*np.pi*r*r\n\n def no_bifurcations(self) :\n \"\"\"\n *Scalar, global morphometric*\n \n Count the number of bifurcations points in a complete moprhology\n \n Returns\n -------\n no_bifurcations : int\n number of bifurcation\n \"\"\"\n return len(self._bif_points)\n \n def no_terminals(self) :\n \"\"\"\n *Scalar, global morphometric*\n \n Count the number of temrinal points in a complete moprhology\n \n Returns\n -------\n no_terminals : int\n number of terminals\n \"\"\" \n return len(self._end_points)\n \n def no_stems(self) :\n \"\"\"\n *Scalar, global morphometric*\n \n Count the number of stems in a complete moprhology (except the three \\\n point soma from the Neuromoprho.org standard)\n\n\n Returns\n -------\n no_stems : int\n number of stems\n \n \"\"\"\n return len(self._tree.root.children)-2 \n \n def total_length(self) :\n \"\"\"\n *Scalar, global morphometric*\n \n Calculate the total length of a complete morphology\n\n\n Returns\n -------\n total_length : float\n total length in micron\n \n \"\"\" \n L = 0\n # upated 2014-01-21 for compatibility with new btstructs2\n for node in self._all_nodes :\n n = node.content['p3d']\n if not node.index in (1,2,3) :\n p = node.parent.content['p3d']\n d = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n L += d\n \n return L\n\n def total_surface(self) :\n \"\"\"\n *Scalar, global morphometric*\n \n Total neurite surface (at least, surface of all neurites excluding\n the soma. In accordance to the NeuroMorpho / L-Measure standard)\n\n Returns\n -------\n total_surface : float \n total surface in micron squared\n \n \"\"\"\n total_surf = 0\n all_surfs = []\n # upated 2014-01-21 for compatibility with new btstructs2\n for node in self._all_nodes :\n n = node.content['p3d']\n if not node.index in (1,2,3) :\n p = node.parent.content['p3d']\n H = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n surf = 2*np.pi*n.radius*H\n all_surfs.append(surf)\n total_surf = total_surf + surf\n return total_surf, all_surfs\n\n def total_volume(self) :\n \"\"\"\n *Scalar, global morphometric*\n \n Total neurite volume (at least, surface of all neurites excluding\n the soma. In accordance to the NeuroMorpho / L-Measure standard)\n\n Returns\n -------\n total_volume : float \n total volume in micron cubed\n \n \"\"\"\n total_vol = 0\n all_vols = []\n # upated 2014-01-21 for compatibility with new btstructs2\n for node in self._all_nodes :\n n = node.content['p3d']\n if not node.index in (1,2,3) :\n p = node.parent.content['p3d']\n H = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n vol = np.pi*n.radius*n.radius*H\n all_vols.append(vol)\n total_vol = total_vol + vol\n return total_vol, all_vols\n\n def total_dimension(self) :\n \"\"\"\n *Scalar, global morphometric* Overall dimension of the morphology\n\n Returns\n -------\n dx : float\n x-dimension\n dy : float\n y-dimension\n dz : float\n z-dimension\n \n \"\"\"\n dx,dy,dz,values = self.total_dimensions_verbose()\n return dx,dy,dz \n\n\n def total_dimensions_verbose(self) :\n \"\"\"\n *Scalar, global morphometric*\n \n Overall dimension of the whole moprhology. (No translation of the \\\n moprhology according to arbitrary axes.)\n\n\n Returns\n -------\n dx : float\n x-dimension\n dy : float\n y-dimension\n dz : float\n z-dimension\n data : list\n minX,maxX,minY,maxY,minZ,maxZ\n \n \"\"\"\n minX = sys.maxint\n maxX = -1 * sys.maxint\n minY = sys.maxint\n maxY = -1 * sys.maxint\n minZ = sys.maxint\n maxZ = -1 * sys.maxint\n for node in self._all_nodes :\n n = node.content['p3d']\n nx = n.xyz[0]\n ny = n.xyz[1]\n nz = n.xyz[2] \n minX = nx if nx < minX else minX\n maxX = nx if nx > maxX else maxX\n\n minY = ny if ny < minY else minY\n maxY = ny if ny > maxY else maxY\n\n minZ = nz if nz < minZ else minZ\n maxZ = nz if nz > maxZ else maxZ\n dx = np.sqrt((maxX-minX)*(maxX-minX))\n dy = np.sqrt((maxY-minY)*(maxY-minY))\n dz = np.sqrt((maxZ-minZ)*(maxZ-minZ))\n return dx,dy,dz, [minX,maxX,minY,maxY,minZ,maxZ]\n \n def global_horton_strahler(self):\n \"\"\"\n Calculate Horton-Strahler number at the root\n See :func:`local_horton_strahler`\n \n Parameters\n ---------\n \n Returns\n ---------\n Horton-Strahler number at the root\n \"\"\"\n return self.local_horton_strahler(self._tree.root)\n\n \"\"\"\n Local measures\n \"\"\"\n def get_diameters(self):\n \"\"\"\n *Vector, local morphometric*\n\n Get the diameters of all points in the morphology\n \"\"\"\n diams = []\n for node in self._all_nodes:\n if not node.index in (1,2,3):\n diams.append(node.content['p3d'].radius*2.0)\n return diams\n \n def get_segment_pathlength(self,to_node) :\n \"\"\"\n *Vector, local morphometric*. \n\n Length of the incoming segment. Between this node and the soma or \\\n another branching point. A path is defined as a stretch between \\\n the soma and a bifurcation point, between bifurcation points, \\\n or in between of a bifurcation point and a terminal point\n \n Parameters\n ----------\n to_node : :class:`btmorph.btstructs2.SNode2`\n Node *to* which the measurement is taken\n\n Returns\n -------\n length : float\n length of the incoming path in micron\n \n \"\"\"\n # upated 2014-01-21 for compatibility with new btstructs2\n L = 0\n if self._tree.is_leaf(to_node) :\n path = self._tree.path_to_root(to_node)\n L = 0\n else :\n path = self._tree.path_to_root(to_node)[1:]\n p = to_node.parent.content['p3d']\n n = to_node.content['p3d']\n d = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n L = L + d\n \n for node in path :\n # print 'going along the path'\n n = node.content['p3d']\n if len(node.children) >= 2 : # I arrive at either the soma or a branchpoint close to the soma\n return L\n else :\n p = node.parent.content['p3d']\n d = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n L = L + d\n\n def get_pathlength_to_root(self,from_node) :\n \"\"\"\n Length of the path between from_node to the root. \n another branching point\n\n Parameters\n ----------\n from_node : :class:`btmorph.btstructs2.SNode2`\n \n Returns\n -------\n length : float\n length of the path between the soma and the provided node\n \n \"\"\"\n L = 0\n if self._tree.is_leaf(from_node) :\n path = self._tree.path_to_root(from_node)\n L = 0\n else :\n path = self._tree.path_to_root(from_node)[1:]\n p = from_node.parent.content['p3d']\n n = from_node.content['p3d']\n d = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n L = L + d\n \n for node in path[:-1]:\n # print 'going along the path'\n n = node.content['p3d']\n p = node.parent.content['p3d']\n d = np.sqrt(np.sum((n.xyz-p.xyz)**2))#np.sqrt(np.sum((n.xyz-p.xyz)**2))\n L = L + d\n return L\n \n def get_segment_Euclidean_length(self,to_node) :\n \"\"\"\n Euclidean length to the incoming segment. Between this node and the soma or \\\n another branching point\n\n Parameters\n ----------\n from_node : :class:`btmorph.btstructs2.SNode2`\n \n Returns\n -------\n length : float\n Euclidean distance *to* provided node (from soma or first branch point with lower order)\n \n \"\"\"\n L = 0\n if self._tree.is_leaf(to_node) :\n path = self._tree.path_to_root(to_node)\n else :\n path = self._tree.path_to_root(to_node)[1:]\n\n n = to_node.content['p3d']\n for node in path :\n if len(node.children) >= 2 :\n return L\n else :\n p = node.parent.content['p3d']\n d = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n L = d \n\n def get_Euclidean_length_to_root(self,from_node) :\n \"\"\"\n euclidean length between the from_node and the root\n\n Parameters\n ----------\n from_node : :class:`btmorph.btstructs2.SNode2`\n \n Returns\n -------\n length : float\n length of the path between the soma and the provided node\n\n \"\"\"\n n = from_node.content['p3d']\n p = self._tree.root.content['p3d']\n d = np.sqrt(np.sum((n.xyz-p.xyz)**2))\n return d\n\n def degree_of_node(self,node) :\n \"\"\"\n Degree of a node. (The number of leaf node in the subtree mounted at \\\n the provided node)\n\n Parameters\n ----------\n node : :class:`btmorph.btstructs2.SNode2`\n\n Returns\n -------\n degree : float\n degree of the subtree rooted at node\n \n \"\"\"\n return self._tree.degree_of_node(node) \n \n def order_of_node(self,node):\n \"\"\"\n Order of a node. (Going centrifugally away from the soma, the order \\\n increases with 1 each time a bifurcation point is passed)\n\n Parameters\n ----------\n node : :class:`btmorph.btstructs2.SNode2`\n\n Returns\n -------\n order : float\n order of the subtree rooted at node \n \n \"\"\" \n return self._tree.order_of_node(node)\n\n def partition_asymmetry(self,node) :\n \"\"\"\n *Vector, local morphometric*\n\n Compute the partition asymmetry for a given node.\n\n Parameters\n ----------\n node : :class:`btmorph.btstructs2.SNode2`\n\n Returns\n -------\n partition_asymmetry : float\n partition asymmetry of the subtree rooted at node (according to vanpelt and schierwagen 199x)\n \n \"\"\"\n if node.children is None or len(node.children) == 1 :\n return None \n d1 = self._tree.degree_of_node(node.children[0])\n d2 = self._tree.degree_of_node(node.children[1])\n if(d1 == 1 and d2 == 1) :\n return 0 # by definition\n else :\n return np.abs(d1-d2)/(d1+d2-2.0)\n\n\n def bifurcation_angle_vec(self,node,where='local'):\n \"\"\"\n *Vector, local morphometric*\n\n Only to be computed at branch points (_bif_points). Computes the angle\n between the two daughter branches in the plane defined by the \\\n parent and the two daughters.\n \n cos alpha = :math:`(a \\dot b) / (|a||b|)`\n\n Parameters\n -----------\n node : :class:`btmorph.btstructs2.SNode2`\n where : string\n either \"local\" or \"remote\". \"Local\" uses the immediate daughter \\\n points while \"remote\" uses the point just before the next bifurcation or terminal point.\n\n Returns\n -------\n angle : float\n Angle in degrees\n \"\"\"\n child_node1,child_node2 = self._get_child_nodes(node,where=where)\n scaled_1 = child_node1.content['p3d'].xyz - node.content['p3d'].xyz\n scaled_2 = child_node2.content['p3d'].xyz - node.content['p3d'].xyz\n amp = lambda a: np.sqrt(np.sum((a)**2))\n return np.arccos(np.dot(scaled_1,scaled_2)/(amp(scaled_1)*amp(scaled_2))) / (2*np.pi/360)\n\n def bifurcation_sibling_ratio(self,node,where='local') :\n \"\"\"\n *Vector, local morphometric*\n\n Ratio between the diameters of two siblings. \n\n Parameters\n ----------\n node : :class:`btmorph.btstructs2.SNode2`\n where : string\n Toggle 'local' or 'remote'\n\n Returns\n -------\n result : float\n Ratio between the diameter of two siblings\n \n \"\"\"\n child1,child2 = self._get_child_nodes(node,where=where)\n radius1 = child1.content['p3d'].radius\n radius2 = child2.content['p3d'].radius\n if radius1 > radius2 :\n return radius1 / radius2\n else :\n return radius2 / radius1\n \n def _get_child_nodes(self,node,where) :\n if where == 'local' : \n return node.children[0],node.children[1]\n else :\n grandchildren = []\n for child in node.children :\n t_child = self._find_remote_child(child)\n grandchildren.append(t_child)\n return grandchildren[0],grandchildren[1]\n\n def _find_remote_child(self,node) :\n t_node = node\n while len(t_node.children) < 2 :\n if len(t_node.children) == 0 :\n # print t_node, '-> found a leaf'\n return t_node\n t_node = t_node.children[0]\n # print t_node,' -> found a bif'\n return t_node\n\n def bifurcation_ralls_power_fmin(self,node,where='local') :\n \"\"\"\n *Vector, local morphometric*\n\n Approximation of Rall's ratio using scipy.optimize.fmin.\n The error function is :math:`F={D_{d1}}^n+{D_{d2}}^n-{D_p}^n`\n\n Parameters\n ----------\n node : :class:`btmorph.btstructs2.SNode2`\n where : string\n either \"local\" or \"remote\". \"Local\" uses the immediate daughter \\\n points while \"remote\" uses the point just before the next bifurcation or terminal point.\n\n Returns\n -------\n rr : float\n Appriximation of Rall's ratio\n \"\"\"\n p_diam = node.content['p3d'].radius*2\n child1,child2 = self._get_child_nodes(node,where=where)\n d1_diam = child1.content['p3d'].radius*2\n d2_diam = child2.content['p3d'].radius*2\n #print 'pd=%f,d1=%f,d2=%f' % (p_diam,d1_diam,d2_diam)\n\n if d1_diam >= p_diam or d2_diam >= p_diam :\n return np.nan\n\n import scipy.optimize\n mismatch = lambda n : np.abs(np.power(d1_diam,n) + np.power(d2_diam,n) - np.power(p_diam,n))\n\n p_lower = 0.0\n p_upper = 5.0 # THE associated mismatch MUST BE NEGATIVE\n\n best_n = scipy.optimize.fmin(mismatch,(p_upper-p_lower)/2.0,disp=False)\n if 0.0 < best_n < 5.0:\n return best_n\n else:\n return np.nan\n\n def bifurcation_rall_ratio_classic(self,node,where='local'):\n \"\"\"\n *Vector, local morphometric*\n\n The ratio :math:`\\\\frac{ {d_1}^p + {d_2}^p }{D^p}` computed with :math:`p=1.5`\n\n Parameters\n -----------\n node : :class:`btmorph.btstructs2.SNode2`\n where : string\n either 'local or 'remote'. 'Local' uses the immediate daughter \\\n points while \"remote\" uses the point just before the next bifurcation or terminal point.\n\n Returns\n -------\n rr : float\n Approximation of Rall's ratio\n \n \"\"\"\n p_diam = node.content['p3d'].radius*2\n child1,child2 = self._get_child_nodes(node,where=where)\n d1_diam = child1.content['p3d'].radius*2\n d2_diam = child2.content['p3d'].radius*2\n\n return ( np.power(d1_diam,1.5) + np.power(d2_diam,1.5)) / np.power(p_diam,1.5)\n \n def bifurcation_ralls_power_brute(self,node,where='local',min_v=0,max_v=5,steps=1000) :\n \"\"\"\n *Vector, local morphometric*\n\n Approximation of Rall's ratio.\n :math:`D^p = {d_1}^p + {d_2}^p`, p is approximated by brute-force checking the \\\n interval [0,5] in 1000 steps (by default, but the exact search \\\n dimensions can be specified by keyworded arguments.\n\n Parameters\n -----------\n node : :class:`btmorph.btstructs2.SNode2`\n where : string\n either 'local or 'remote'. 'Local' uses the immediate daughter \\\n points while \"remote\" uses the point just before the next bifurcation or terminal point.\n\n Returns\n -------\n rr : float\n Approximation of Rall's power, p\n \n \"\"\"\n p_diam = node.content['p3d'].radius*2\n child1,child2 = self._get_child_nodes(node,where=where)\n d1_diam = child1.content['p3d'].radius*2\n d2_diam = child2.content['p3d'].radius*2\n #print 'pd=%f,d1=%f,d2=%f' % (p_diam,d1_diam,d2_diam)\n\n if d1_diam >= p_diam or d2_diam >= p_diam :\n return None\n\n test_v = np.linspace(min_v,max_v,steps)\n min_mismatch=100000000000.0\n best_n = -1\n for n in test_v:\n mismatch = (np.power(d1_diam,n) + np.power(d2_diam,n))-np.power(p_diam,n)\n #print \"n=%f -> mismatch: %f\" % (n,mismatch)\n if np.abs(mismatch) < min_mismatch:\n best_n = n\n min_mismatch = np.abs(mismatch)\n return best_n\n \n def _get_ampl_angle(self,node) :\n \"\"\"\n Compute the angle of this node on the XY plane and against the origin\n \"\"\"\n pos_angle = lambda x: x if x > 0 else 180 + (180+x)\n a = np.rad2deg(np.arctan2(node.content['p3d'].y,node.content['p3d'].x))\n return pos_angle(a)\n \n def local_horton_strahler(self, node) :\n \"\"\"\n We assign Horton-Strahler number to all nodes of a tree, in bottom-up order, as follows:\n\n If the node is a leaf (has no children), its Strahler number is one.\n If the node has one child with Strahler number i, and all other children have Strahler numbers less than i, then the Strahler number of the node is i again.\n If the node has two or more children with Strahler number i, and no children with greater number, then the Strahler number of the node is i + 1.\n *If the node has only one child, the Strahler number of the node equals to the Strahler number of the child\n The Strahler number of a tree is the number of its root node.\n \n See wikipedia for more information: http://en.wikipedia.org/wiki/Strahler_number\n \n Parameters\n ---------\n node : :class:`btmorph.btstructs2.SNode2`\n Node of interest\n Returns\n ---------\n hs : int\n The Horton-Strahler number (Strahler number) of the node\n \"\"\"\n # Empy tree\n if node is None:\n return -1\n # Leaf => HS=1\n if len(node.children) == 0:\n return 1\n # Not leaf\n childrenHS = map(self.local_horton_strahler, node.children)\n return max(childrenHS + [(min(childrenHS)+1)])\n \n def fractal_dimension_box_counting_core(self, vg):\n \"\"\"\n Calculates fractal dimension of the given voxel grid by this formula:\n D = lim e -> 0 of (log(Ne)/log(e))\n http://rsbweb.nih.gov/ij/plugins/fraclac/FLHelp/Glossary.htm#db\n \"\"\"\n # Box counting\n bc = BoxCounter(vg) \n if vg.res[2] == 0 or vg.res[2] == 1:\n startDim = min(vg.res[:-1])/2\n else:\n startDim = min(vg.res)/2\n bc.grid_coverage(startDim)\n szs = map(lambda x: 2**x/float(max(vg.res)), range(1, int(math.log(startDim, 2))+1))\n cover = bc.coverageVals[1:-1]\n slope,intercept=np.polyfit(np.log(szs), np.log(cover),1)\n return -slope\n \n def lacunarity_box_counting_core(self, vg):\n \"\"\"\n Calculate lacunarity based on standard fixed grid box counting method with coef. of variation\n See wikipedia for more information: http://en.wikipedia.org/wiki/Lacunarity#equation_1\n Note: here we ignore orientations (all boxes start from (0,0,0)) and box sizes are always power of two\n \n Parameters\n ----------\n vg : :class:`btmorph.btstructs2.VoxelGrid`\n Ready to use voxel grid\n\n Returns\n -------\n lacunarity : float\n \n \n \"\"\"\n bc = BoxCounter(vg)\n if vg.res[2] == 0 or vg.res[2] == 1:\n startDim = min(vg.res[:-1])/2\n else:\n startDim = min(vg.res)/2\n bc.grid_count(startDim)\n lambdas = []\n for el in bc.countVals[1:-1]:\n lambdas.append((np.std(el)/np.mean(el))**2)\n lc = np.mean(lambdas)\n return lc\n \n def fractal_dimension_lacunarity(self, voxelSize):\n \"\"\"\n Calculate both lacunarity and fractal dimension of a tree.\n Faster than calling fractal_dim_box_counting and lacunarity_standard separately\n \n Parameters\n ----------\n voxelSize : number\n Desired voxel size, affects resolution. Both measures use voxelization of the 3D tree for calculations\n \n Returns\n ----------\n (lacunarity, fractal_dimension)\n \"\"\"\n # Best resolution\n dx,dy,dz = self.total_dimension()\n res = [int(2**round(math.log(dx/voxelSize, 2))), int(2**round(math.log(dy/voxelSize, 2))), 1]\n if dz > 0.0:\n res[2] = int(2**round(math.log(dz/voxelSize, 2)))\n dim = [dx, dy, dz]\n self.vg = VoxelGrid(dim, res, self._tree)\n return self.frac_dim_lac(self.vg)\n \n def frac_dim_lac(self, vg=None):\n \"\"\"\n Compute both lacunarity and fractal dimension\n Calculates lacunarity based on standard fixed grid box counting method with coef. of variation\n See wikipedia for more information: http://en.wikipedia.org/wiki/Lacunarity#equation_1\n Note: here we ignore orientations (all boxes start from (0,0,0)) and box sizes are always power of two\n Calculates fractal dimension of the given voxel grid by this formula:\n D = lim e -> 0 of (log(Ne)/log(e))\n http://rsbweb.nih.gov/ij/plugins/fraclac/FLHelp/Glossary.htm#db\n \n Parameters\n ----------\n vg : :class:`btmorph.btstructs2.VoxelGrid`\n Ready to use voxel grid\n \n Returns\n --------\n lacunarity, fractal_dimension : tuple\n \"\"\"\n bc = BoxCounter(vg)\n if vg.res[2] == 0 or vg.res[2] == 1:\n startDim = min(vg.res[:-1])/2\n else:\n startDim = min(vg.res)/2\n bc.grid_count(startDim)\n lambdas = []\n for el in bc.countVals[1:-1]:\n lambdas.append((np.std(el)/np.mean(el))**2)\n lc = np.mean(lambdas)\n for i in range(1,len(bc.countVals)):\n s = sum(1 for e in bc.countVals[i] if e)\n bc.coverageVals[i] = s\n szs = map(lambda x: 2**(3*x), range(1, int(math.log(startDim, 2))+1))\n szs2 = map(lambda x: 2**x/float(max(vg.res)), range(1, int(math.log(startDim, 2))+1))\n cover = bc.coverageVals[1:-1]\n slope,intercept=np.polyfit(np.log(szs2), np.log(cover),1)\n #----\n lc_slope,interc_lac = np.polyfit(np.log(szs), np.log(lambdas), 1)\n return (lc, -slope)\n \n def pca(self, A):\n \"\"\" performs principal components analysis \n (PCA) on the n-by-p data matrix A\n Rows of A correspond to observations, columns to variables. \n \n Returns : \n coeff :\n is a p-by-p matrix, each column containing coefficients \n for one principal component.\n score : \n the principal component scores; that is, the representation \n of A in the principal component space. Rows of SCORE \n correspond to observations, columns to components.\n latent : \n a vector containing the eigenvalues \n of the covariance matrix of A.\n source: http://glowingpython.blogspot.jp/2011/07/principal-component-analysis-with-numpy.html\n \"\"\"\n # computing eigenvalues and eigenvectors of covariance matrix\n M = (A-mean(A.T,axis=1)).T # subtract the mean (along columns)\n [latent,coeff] = linalg.eig(cov(M)) # attention:not always sorted\n score = dot(coeff.T,M) # projection of the data in the new space\n return coeff,score,latent\n","sub_path":"btmorph/btstats.py","file_name":"btstats.py","file_ext":"py","file_size_in_byte":27481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"269695678","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef impot():\n \n ligne = 0\n i = 0\n somme = 0.0\n\n fichier = open (\"donnees.txt\",\"r\")\n val = fichier.readlines()\n\n val = list(map(float,val))\n \n\n while i < len(val):\n \n somme = somme + val[i]\n i = i + 1\n \n i = 0\n\n n, bins, patches = plt.hist(val, 30, normed=1, facecolor='b', alpha=1)\n \n moy = (somme/len(val))//1\n\n plt.xlabel('Notes')\n plt.ylabel('Nombre de notes en pourcentage')\n plt.axis([3, 30, 0, 0.15])\n plt.grid(True)\n plt.show()\n \n fichier.close()\n \n\nimpot()\n","sub_path":"Licence 2/S4/Outils_statistiques/TP1/exo4.py","file_name":"exo4.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"593453994","text":"# Concord\n#\n# Copyright (c) 2019 VMware, Inc. All Rights Reserved.\n#\n# This product is licensed to you under the Apache 2.0 license (the \"License\").\n# You may not use this product except in compliance with the Apache 2.0 License.\n#\n# This product may include a number of subcomponents with separate copyright\n# notices and license terms. Your use of these subcomponents is subject to the\n# terms and conditions of the subcomponent's license, as noted in the LICENSE\n# file.\n\nimport os.path\nimport random\nimport unittest\nimport trio\n\nfrom util.bft import with_trio, with_bft_network, KEY_FILE_PREFIX\nfrom util.skvbc_history_tracker import verify_linearizability\nfrom util import skvbc as kvbc\nfrom util import eliot_logging as log\n\nNUM_OPS = 20\n\ndef start_replica_cmd(builddir, replica_id):\n \"\"\"\n Return a command that starts an skvbc replica when passed to\n subprocess.Popen.\n\n Note each arguments is an element in a list.\n \"\"\"\n statusTimerMilli = \"500\"\n view_change_timeout_milli = \"10000\"\n path = os.path.join(builddir, \"tests\", \"simpleKVBC\", \"TesterReplica\", \"skvbc_replica\")\n return [path,\n \"-k\", KEY_FILE_PREFIX,\n \"-i\", str(replica_id),\n \"-s\", statusTimerMilli,\n \"-v\", view_change_timeout_milli,\n \"-e\", str(True)\n ]\n\nclass SkvbcCommitPathTest(unittest.TestCase):\n\n __test__ = False # so that PyTest ignores this test scenario\n\n @unittest.skip(\"This is the initial state covered in test_commit_path_transitions and is kept as a manual testing option.\")\n @with_trio\n @with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n >= 6, rotate_keys=True)\n @verify_linearizability()\n async def test_fast_path_is_default(self, bft_network, tracker, exchange_keys=True):\n \"\"\"\n This test aims to check that the fast commit path is prevalent\n in the normal, synchronous case (no failed replicas, no network partitioning).\n\n First we write a series of K/V entries and check that in the process\n we have stayed on the fast path.\n\n Finally the decorator verifies the KV execution.\n \"\"\"\n\n bft_network.start_all_replicas()\n skvbc = kvbc.SimpleKVBCProtocol(bft_network,tracker)\n\n # Initially all replicas are running on the fast path\n await bft_network.wait_for_fast_path_to_be_prevalent(\n run_ops=lambda: skvbc.run_concurrent_ops(num_ops=NUM_OPS, write_weight=1), threshold=NUM_OPS)\n\n\n @unittest.skip(\"This is a transition covered in test_commit_path_transitions and is kept as a manual testing option.\")\n @with_trio\n @with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n >= 6, rotate_keys=True)\n @verify_linearizability()\n async def test_fast_to_slow_path(self, bft_network, tracker):\n \"\"\"\n This test aims to check the correct transitions from fast to slow commit path.\n\n First we write a series of K/V entries making sure we stay on the fast path.\n\n Once the first series of K/V writes have been processed we bring down C + 1\n replicas (more than what the fast path can tolerate), which should trigger a transition to the slow path.\n\n We send a new series of K/V writes and make sure they\n have been processed using the slow commit path.\n\n Finally the decorator verifies the KV execution.\n \"\"\"\n\n bft_network.start_all_replicas()\n skvbc = kvbc.SimpleKVBCProtocol(bft_network,tracker)\n\n # Initially all replicas are running on the fast path\n await bft_network.wait_for_fast_path_to_be_prevalent(\n run_ops=lambda: skvbc.run_concurrent_ops(num_ops=NUM_OPS, write_weight=1), threshold=NUM_OPS)\n\n # Crash C+1 replicas excluding the primary - this ensures that the slow path will be used\n # without forcing a view change\n crash_targets = random.sample(bft_network.all_replicas(without={0}), bft_network.config.c + 1)\n bft_network.stop_replicas(crash_targets)\n\n await bft_network.wait_for_slow_path_to_be_prevalent(\n run_ops=lambda: skvbc.run_concurrent_ops(num_ops=NUM_OPS, write_weight=1), threshold=NUM_OPS)\n\n @with_trio\n @with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n >= 6, rotate_keys=True)\n @verify_linearizability()\n async def test_commit_path_transitions(self, bft_network, tracker):\n \"\"\"\n This test aims to check the correct transitions from fast to slow and back to fast commit path.\n\n First we write a series of K/V entries making sure we stay on the fast path.\n\n Once the first series of K/V writes have been processed we bring down C + 1\n replicas (more than what the fast path can tolerate), which should trigger a transition to the slow path.\n\n We send a new series of K/V writes and make sure they\n have been processed using the slow commit path.\n\n Then we recover the crashed nodes and expect the fast path to be eventually restored.\n\n Finally the decorator verifies the KV execution.\n \"\"\"\n\n bft_network.start_all_replicas()\n skvbc = kvbc.SimpleKVBCProtocol(bft_network,tracker)\n\n # Initially all replicas are running on the fast path\n await bft_network.wait_for_fast_path_to_be_prevalent(\n run_ops=lambda: skvbc.run_concurrent_ops(num_ops=NUM_OPS, write_weight=1), threshold=NUM_OPS)\n\n # Crash C+1 replicas excluding the primary - this ensures that the slow path will be used\n # without forcing a view change\n crash_targets = random.sample(bft_network.all_replicas(without={0}), bft_network.config.c + 1)\n bft_network.stop_replicas(crash_targets)\n\n await bft_network.wait_for_slow_path_to_be_prevalent(\n run_ops=lambda: skvbc.run_concurrent_ops(num_ops=NUM_OPS, write_weight=1), threshold=NUM_OPS)\n\n # Recover crashed replicas and check that the fast path is recovered\n bft_network.start_replicas(crash_targets)\n\n await bft_network.wait_for_fast_path_to_be_prevalent(\n run_ops=lambda: skvbc.run_concurrent_ops(num_ops=NUM_OPS, write_weight=1), threshold=NUM_OPS)\n\n @with_trio\n @with_bft_network(start_replica_cmd,\n num_clients=4,\n selected_configs=lambda n, f, c: c >= 1 and n >= 6, rotate_keys=True)\n @verify_linearizability()\n async def test_fast_path_resilience_to_crashes(self, bft_network, tracker):\n \"\"\"\n In this test we check the fast path's resilience when \"c\" nodes fail.\n\n We write a series of K/V entries making sure the fast path is prevalent despite the crashes.\n\n Finally the decorator verifies the KV execution.\n \"\"\"\n\n bft_network.start_all_replicas()\n\n crash_targets = random.sample(bft_network.all_replicas(without={0}), bft_network.config.c)\n bft_network.stop_replicas(crash_targets)\n\n skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)\n\n await bft_network.wait_for_fast_path_to_be_prevalent(\n run_ops=lambda: skvbc.run_concurrent_ops(num_ops=NUM_OPS, write_weight=1), threshold=NUM_OPS)\n\n\n @with_trio\n @with_bft_network(start_replica_cmd, selected_configs=lambda n, f, c: n >= 6, rotate_keys=True)\n @verify_linearizability()\n async def test_fast_path_after_view_change(self, bft_network, tracker):\n \"\"\"\n This test validates the BFT engine's ability to restore the fast path\n after a view change due to crashed primary.\n\n First we write a batch of K/V entries and check those entries have been processed via the fast commit path.\n\n We stop the primary and send a single write requests to trigger a view change.\n\n We bring the primary back up.\n\n We make sure the fast path is eventually maintained.\n\n Finally the decorator verifies the KV execution.\n \"\"\"\n\n bft_network.start_all_replicas()\n skvbc = kvbc.SimpleKVBCProtocol(bft_network, tracker)\n\n # Initially all replicas are running on the fast path\n await bft_network.wait_for_fast_path_to_be_prevalent(\n run_ops=lambda: skvbc.run_concurrent_ops(num_ops=NUM_OPS, write_weight=1), threshold=NUM_OPS)\n\n # Stop the primary\n bft_network.stop_replica(0)\n\n # Send a write request to trigger a view change\n with trio.move_on_after(seconds=3):\n await skvbc.send_write_kv_set()\n\n randRep = random.choice(\n bft_network.all_replicas(without={0}))\n\n log.log_message(f'wait_for_view - Random replica {randRep}')\n\n await bft_network.wait_for_view(\n replica_id=randRep,\n expected=lambda v: v > 0,\n err_msg=\"Make sure view change has occurred.\"\n )\n\n # Restore the crashed primary\n bft_network.start_replica(0)\n\n # Make sure that the fast path is maintained eventually\n await bft_network.wait_for_fast_path_to_be_prevalent(\n run_ops=lambda: skvbc.run_concurrent_ops(num_ops=NUM_OPS, write_weight=1), threshold=NUM_OPS)\n\n","sub_path":"tests/apollo/test_skvbc_commit_path.py","file_name":"test_skvbc_commit_path.py","file_ext":"py","file_size_in_byte":9096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"444926542","text":"def convert_string_to_int(list_a):\n new_list = []\n for item in list_a:\n num = int(item)\n new_list.append(num)\n return new_list\n \ndef get_intersection_of_n_sets(num_set_list):\n result = num_set_list[0]\n for num_set in num_set_list:\n result = result.intersection(num_set)\n return result\n \nn = int(input()) \nnum_set_list = []\nfor i in range(n):\n value_list = input().split()\n value_list = convert_string_to_int(value_list)\n value_set = set(value_list)\n num_set_list.append(value_set)\n\nresult_set = get_intersection_of_n_sets(num_set_list)\nresult_list = list(result_set)\nresult_list.sort()\nprint(result_list)\n","sub_path":"dictionaries/commonelementsinNsets.py","file_name":"commonelementsinNsets.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"501531395","text":"# coding: utf-8\nimport os\n\ndef sum_position(n, p):\n s = 0\n for i in range(p):\n s = s + n % 10\n n = n // 10\n return s\n\ndef decimal_transform(n):\n st = bin(n)\n st = st.replace('0b', '')\n n = int(st)\n return n\n\ndef main():\n fp = os.path.dirname(os.path.abspath(__file__))+'\\\\'\n f = open(fp+'A-large-practice.in', 'r')\n T = int(f.readline()[:-1])\n for i in range(T):\n N, K = [int(x) for x in f.readline()[:-1].split()]\n K = decimal_transform(K)\n s = sum_position(K, N)\n if N == s:\n switch = 'ON'\n else:\n switch = 'OFF' \n print(\"Case #\" + str(i + 1) + \": \" + str(switch))\n print(\"終了\")\n\nif __name__ == '__main__':\n main()","sub_path":"Practice/codejam/juzu/juzu_large.py","file_name":"juzu_large.py","file_ext":"py","file_size_in_byte":736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"398784600","text":"from enum import Enum\nimport codecs\nimport json\nfrom pprint import pprint\n\n\"\"\"\n DELETE = 1\n ADD_LAT_LONG = 2\n\"\"\"\n\n\nclass ChangeLogEntry(object):\n def __init__(self, hash_id, log_type, lat=None, long=None):\n self.hash_id = hash_id\n self.log_type = log_type\n self.lat = lat\n self.long = long\n\n\ndef implement_change_log(json_input, changelog_input, json_alternated_path):\n json_log = ''\n json_rests = []\n with codecs.open(changelog_input, 'r', 'utf-8') as f:\n json_log = json.load(f)\n with codecs.open(json_input, 'r', 'utf-8') as f:\n json_rests = json.load(f)\n\n print('about to implement data change')\n print('the original json file includes {} restaurants'.format(len(json_rests)))\n\n # checking missing information.\n missing_hash_id = [entry for entry in json_rests if entry.get('hash_id') is None]\n missing_name = [entry for entry in json_rests if entry.get('name') is None]\n missing_sub_type = [entry for entry in json_rests if entry.get('sub_type') is None]\n missing_kosher = [entry for entry in json_rests if entry.get('kosher') is None]\n missing_address = [entry for entry in json_rests if entry.get('address') is None]\n missing_pic = [entry for entry in json_rests if entry.get('pic') is None]\n missing_lat = [entry for entry in json_rests if entry.get('lat') is None]\n missing_long = [entry for entry in json_rests if entry.get('long') is None]\n\n print('##################################')\n print('missing status')\n print('##################################')\n\n print('number of hash_id missing is {}'.format(len(missing_hash_id)))\n print('number of name missing is {}'.format(len(missing_name)))\n print('number of sub_type missing is {}'.format(len(missing_sub_type)))\n print('number of kosher missing is {}'.format(len(missing_kosher)))\n print('number of address missing is {}'.format(len(missing_address)))\n print('number of pic missing is {}'.format(len(missing_pic)))\n print('number of lat missing is {}'.format(len(missing_lat)))\n print('number of long missing is {}'.format(len(missing_long)))\n\n # taking care of deletion.\n deletion_hashes = [entry.get('hash_id') for entry in json_log if entry.get('log_type') == 1]\n deletion_group = [rest for rest in json_rests if rest.get('hash_id') in deletion_hashes]\n json_rests = [rest for rest in json_rests if rest.get('hash_id') not in deletion_hashes]\n print('##################################')\n print('deleting {} restaurants, out of {} size deletion list'.format(len(deletion_group), len(deletion_hashes)))\n print('after deletion {} restaurants'.format(len(json_rests)))\n print('##################################')\n # deleted.\n\n json_rests_dict = {rest.get('hash_id'): rest for rest in json_rests}\n add_lat_long_group = [entry for entry in json_log if entry.get('log_type') == 2]\n changed_counter = 0\n for entry in add_lat_long_group:\n hash_id = entry.get('hash_id')\n lat = entry.get('lat')\n long = entry.get('long')\n if hash_id in json_rests_dict.keys():\n json_rests_dict[hash_id]['lat'] = str(lat)\n json_rests_dict[hash_id]['long'] = str(long)\n changed_counter += 1\n\n print('##################################')\n print('alternating lat-long. alternated {} restaurants'.format(changed_counter))\n print('##################################')\n\n json_alternated_rest = [rest for rest in json_rests_dict.values()]\n print('writing alternated json file. {} restaurants.'.format(len(json_alternated_rest)))\n with codecs.open(json_alternated_path, 'w+', 'utf-8') as f_output:\n json.dump(json_alternated_rest , f_output)\n\n","sub_path":"parser_main/alternator.py","file_name":"alternator.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"360263591","text":"\"\"\"\nSecond-Hand-Shop Project\n\n@author: Malte Gerth\n@copyright: Copyright (C) 2015 Malte Gerth\n@license: MIT\n@maintainer: Malte Gerth\n@email: mail@malte-gerth.de\n\"\"\"\n\nfrom django.contrib.auth import get_user_model\nfrom django.core.urlresolvers import reverse\nfrom django.test import TestCase\nfrom django.utils.datetime_safe import datetime\nfrom django.utils.timezone import make_aware, now\nfrom django_dynamic_fixture import G\nfrom django_webtest import WebTest\nfrom freezegun import freeze_time\n\nfrom pos.models import Cart\nfrom sale_lists.models import Article, SaleList\n\nfrom .models import Event, get_active_event_id_not_cached\nfrom .templatetags.events import (\n count_free_list_assign,\n count_free_lists,\n count_free_lists_percentage,\n count_max_lists,\n is_booking_open,\n)\n\n\n__author__ = \"Malte Gerth \"\n__copyright__ = \"Copyright (C) 2015 Malte Gerth\"\n__license__ = \"MIT\"\n\n\nclass EventAdminTest(WebTest):\n def setUp(self):\n self.user = G(get_user_model(), is_superuser=True, is_staff=True)\n\n def test_list(self):\n url = reverse(\"admin:events_event_changelist\")\n G(Event, is_active=True, name=\"Herbst 2015\")\n event = G(Event, is_active=False, name=\"Frühjahr 2015\")\n G(\n Article,\n sale_list=G(SaleList, event=event),\n cart=G(Cart, event=event),\n event=event,\n )\n response = self.app.get(url, user=self.user)\n self.assertContains(response, \"Herbst 2015\")\n self.assertContains(response, \"Frühjahr 2015\")\n\n\nclass EventModelTest(TestCase):\n def test_str(self):\n event = G(Event, name=\"Spring 2018\")\n self.assertEqual(\"Spring 2018\", str(event))\n\n def test_get_active_event_id_not_cached(self):\n event = G(Event, id=55, is_active=True)\n self.assertEqual(55, get_active_event_id_not_cached())\n event.is_active = False\n event.save()\n G(Event, id=75, is_active=True)\n self.assertEqual(75, get_active_event_id_not_cached())\n\n\nclass EventTemplateTagsTest(TestCase):\n def test_count_free_lists(self):\n event = G(Event, is_active=True, max_sale_lists=10)\n self.assertEqual(10, count_free_lists())\n G(SaleList, event=event)\n self.assertEqual(9, count_free_lists())\n for __ in range(0, 20):\n G(SaleList, event=event)\n self.assertEqual(0, count_free_lists())\n\n def test_count_free_list_assign(self):\n event = G(Event, is_active=True, max_sale_lists=10)\n self.assertEqual(10, count_free_list_assign())\n G(SaleList, event=event)\n self.assertEqual(9, count_free_list_assign())\n for __ in range(0, 20):\n G(SaleList, event=event)\n self.assertEqual(0, count_free_list_assign())\n\n def test_count_free_lists_percentage(self):\n event = G(Event, is_active=True, max_sale_lists=10)\n self.assertEqual(100, count_free_lists_percentage())\n G(SaleList, event=event)\n self.assertEqual(90, count_free_lists_percentage())\n for __ in range(0, 20):\n G(SaleList, event=event)\n self.assertEqual(0, count_free_lists_percentage())\n\n def test_count_max_lists(self):\n event = G(Event, is_active=True, max_sale_lists=10)\n self.assertEqual(10, count_max_lists())\n\n def test_is_booking_open(self):\n G(\n Event,\n is_active=True,\n booking_start_date=datetime(2018, 10, 1, 12),\n booking_end_date=datetime(2018, 10, 14, 18),\n )\n with freeze_time(\"2018-10-01 11:59\"):\n self.assertFalse(is_booking_open())\n with freeze_time(\"2018-10-01 12:00\"):\n self.assertTrue(is_booking_open())\n with freeze_time(\"2018-10-14 18:00\"):\n self.assertTrue(is_booking_open())\n with freeze_time(\"2018-10-14 18:01\"):\n self.assertFalse(is_booking_open())\n","sub_path":"src/events/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"544684625","text":"import asyncio\nimport time\nfrom asyncio import Queue\n\nfrom aiohttp import ClientSession, ClientError\nfrom datas import create_logging, write_header_csv\nfrom exchange import message\nfrom faker import Factory\nfrom lxml import etree\n\nfake = Factory.create()\n\n\nclass CrawlURL(object):\n def __init__(self, max_tries=2, max_tasks=10, loop=None):\n self.loop = loop or asyncio.get_event_loop()\n self.max_tries = max_tries\n self.max_tasks = max_tasks\n self.q = Queue(loop=self.loop)\n self.seen_urls = set()\n self.t0 = time.time()\n self.t1 = None\n self._session = None\n\n @property\n def session(self):\n if self._session is None:\n self._session = ClientSession(headers={'User-Agent': fake.user_agent()}, loop=self.loop)\n return self._session\n\n @session.setter\n def session(self, values):\n self._session = values\n\n def close(self):\n self.session.close()\n\n @staticmethod\n async def parse_link(response):\n rs = await response.read()\n selector = etree.HTML(rs)\n urls = selector.xpath('//div[@class=\"info-panel\"]/h2/a/@href')\n if urls:\n for uri in urls:\n if 'sh.lianjia.com' in str(response.url):\n pass\n # 因为上海的网页结构和其它的不一样,懒得处理\n # await message(urljoin(str(response.url), uri))\n else:\n await message(uri)\n\n async def fetch(self, url):\n tries = 0\n while tries < self.max_tries:\n try:\n response = await self.session.get(url, allow_redirects=False)\n break\n except ClientError as client_error:\n logger.info(client_error)\n tries += 1\n else:\n return\n try:\n if response.status == 200:\n print(response.url, 'fetch')\n await self.parse_link(response)\n else:\n logger.info(response.url)\n logger.info(response.status)\n\n finally:\n await response.release()\n\n async def work(self):\n try:\n while 1:\n url = await self.q.get()\n assert url in self.seen_urls\n await self.fetch(url)\n self.q.task_done()\n # asyncio.sleep(5)\n except asyncio.CancelledError:\n pass\n except Exception as e:\n logger.info(e)\n logger.info(url)\n self.q.task_done()\n\n def add_url(self, url):\n if url not in self.seen_urls:\n self.seen_urls.add(url)\n self.q.put_nowait(url)\n\n async def crawl(self):\n workers = [asyncio.Task(self.work(), loop=self.loop)\n for _ in range(self.max_tasks)]\n self.t0 = time.time()\n await self.q.join()\n self.t1 = time.time()\n for w in workers:\n w.cancel()\n\n\nif __name__ == '__main__':\n logger = create_logging('链家URL', 'logurl.log')\n write_header_csv()\n # 琼海没有信息\n # 苏州没有信息\n # 石家庄没有信息\n # 沈阳没有信息\n # 三亚没有信息\n # 文昌没有信息\n # 万宁没有信息\n # 海口没有信息\n # 西安没有信息\n # 陵水没有信息\n # 廊坊燕郊没有信息\n URLs = ['http://bj.lianjia.com/zufang/pg{}/', 'http://nj.lianjia.com/zufang/pg{}/',\n 'http://cd.lianjia.com/zufang/pg{}/', 'http://cq.lianjia.com/zufang/pg{}/',\n 'http://cs.lianjia.com/zufang/pg{}/', 'http://qd.lianjia.com/zufang/pg{}/',\n 'http://dl.lianjia.com/zufang/pg{}/', 'http://dg.lianjia.com/zufang/pg{}/',\n 'http://sh.lianjia.com/zufang/d{}/', 'http://sz.lianjia.com/zufang/pg{}/',\n 'http://fs.lianjia.com/zufang/pg{}/', 'http://tj.lianjia.com/zufang/pg{}/',\n 'http://gz.lianjia.com/zufang/pg{}/', 'http://wh.lianjia.com/zufang/pg{}/',\n 'http://hz.lianjia.com/zufang/pg{}/', 'http://hf.lianjia.com/zufang/pg{}/',\n 'http://xm.lianjia.com/zufang/pg{}/', 'http://jn.lianjia.com/zufang/pg{}/',\n 'http://yt.lianjia.com/zufang/pg{}/', 'http://zs.lianjia.com/zufang/pg{}/',\n 'http://zh.lianjia.com/zufang/pg{}/'\n ]\n loop = asyncio.get_event_loop()\n crawler = CrawlURL(max_tasks=10)\n for URL in URLs:\n for num in range(1, 101):\n crawler.add_url(URL.format(num))\n # crawler.add_url('http://bj.lianjia.com/zufang/pg16/') #测试\n loop.run_until_complete(crawler.crawl())\n print('Finished in {:.3f} seconds'.format(crawler.t1 - crawler.t0))\n print('一共抓取网页--->', len(crawler.seen_urls))\n crawler.close()\n loop.close()\n","sub_path":"Pyspider/异步爬虫/lianjia/租房/2/lianjia_zufang.py","file_name":"lianjia_zufang.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"58732421","text":"import os\n\nclass InteractiveLogger:\n def __init__(self, sudoku, cell, interactive=False):\n self.sudoku = sudoku\n self.cell = cell\n self.interactive = interactive\n self.messages = [\"Running in interactive mode, press enter to continue\"]\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, traceback):\n\n if not self.interactive:\n return\n\n os.system(\"clear\")\n with self.cell.highlighted():\n lines_in_sudoku = str(self.sudoku).split(\"\\n\")\n\n line_count = max(len(lines_in_sudoku), len(self.messages))\n\n print()\n print(\" Sudoku | Messages\")\n print(\" ---------------------------|---------------------------\")\n for index in range(line_count):\n\n try:\n sudoku_line = lines_in_sudoku[index]\n except IndexError:\n sudoku_line = \" \" * 25\n\n try:\n message = self.messages[index]\n except IndexError:\n message = \"\"\n\n print(f\" {sudoku_line} | {message}\")\n\n input()\n\n def log(self, message):\n\n self.messages.append(message)\n","sub_path":"pysudoku/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"57304109","text":"import datetime\nimport importlib\nimport os\nimport logging\n\nimport sys\n\nimport aiohttp\nimport aioredis\nimport asyncpg\nfrom discord.ext import commands\n\nfrom dog.core.context import DogbotContext\nfrom dog.core.helpformatter import DogbotHelpFormatter\n\nlogger = logging.getLogger(__name__)\n\n__base = commands.Bot if '--selfbot' in ' '.join(sys.argv) else commands.AutoShardedBot\n\n\nclass ReloadableBot(__base):\n \"\"\" A bot subclass that contains utility methods that aid in reloading cogs and extensions, and recursively\n loading extensions. \"\"\"\n def load_exts_recursively(self, directory: str, prefix: str = 'Recursive load'):\n \"\"\" Loads extensions from a directory recursively. \"\"\"\n def ext_filter(f):\n return f not in ('__init__.py', '__pycache__') and not f.endswith('.pyc')\n\n exts = []\n\n # walk the ext directory to find extensions\n for path, _, files in os.walk(directory):\n # replace the base path/like/this to path.like.this\n # add the filename at the end, but without the .py\n # filter out stuff we don't need\n exts += [path.replace('/', '.').replace('\\\\', '.') + '.' + file.replace('.py', '')\n for file in filter(ext_filter, files)]\n\n for ext in exts:\n\n module = importlib.import_module(ext)\n if hasattr(module, 'setup'):\n logger.info('%s: %s', prefix, ext)\n self.load_extension(ext)\n else:\n logger.debug('Skipping %s, doesn\\'t seem to be an extension.', ext)\n\n # update exts to load\n self._exts_to_load = list(self.extensions.keys()).copy()\n\n def reload_extension(self, name: str):\n \"\"\" Reloads an extension. \"\"\"\n self.unload_extension(name)\n self.load_extension(name)\n\n def perform_full_reload(self):\n \"\"\" Fully reloads Dogbot.\n\n This reloads all Dogbot related modules, and all\n extensions.\n \"\"\"\n logger.info('*** Performing full reload! ***')\n self.reload_all_extensions()\n self.reload_modules()\n\n def reload_all_extensions(self):\n \"\"\" Reloads all extensions. \"\"\"\n logger.info('Reloading all %d extensions', len(self._exts_to_load))\n for name in self._exts_to_load:\n try:\n logger.info('Reloading extension: %s', name)\n self.reload_extension(name)\n except:\n logger.exception('While reloading all: Failed extension reload for %s', name)\n raise\n\n def reload_modules(self):\n \"\"\" Reloads all Dogbot related modules. \"\"\"\n # get applicable modules to reload\n modules = {k: m for k, m in sys.modules.items() if 'dog' in k and 'ext' not in k and\n k != 'dog'}\n for name, module in modules.items():\n logger.info('Reloading bot module: %s', name)\n importlib.reload(module)\n logger.info('Finished reloading bot modules!')\n\n\nclass BaseBot(ReloadableBot):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs, formatter=DogbotHelpFormatter())\n\n # aiohttp session used for fetching data\n self.session = aiohttp.ClientSession(loop=self.loop)\n\n # boot time (for uptime)\n self.boot_time = datetime.datetime.utcnow()\n\n # hack because __init__ cannot be async\n redis_coroutine = aioredis.create_redis(\n (kwargs.pop('redis_url'), 6379), loop=self.loop)\n\n # aioredis connection\n self.redis = self.loop.run_until_complete(redis_coroutine)\n\n # asyncpg\n pg = kwargs.pop('postgresql_auth')\n self.database = pg['database']\n self.pgpool = self.loop.run_until_complete(asyncpg.create_pool(**pg))\n\n # load core extensions\n self.load_exts_recursively('dog/core/ext', 'Core recursive load')\n\n async def on_message(self, msg):\n # do not process messages from other bots\n if msg.author.bot:\n return\n\n # wait until ready before processing any messages\n await self.wait_until_ready()\n\n ctx = await self.get_context(msg, cls=DogbotContext)\n await self.invoke(ctx)\n\n async def on_ready(self):\n print('Bot is ready!')\n print('[User]', self.user)\n print('[ID] ', self.user.id)\n\n\nclass Selfbot(commands.Bot):\n def __init__(self, *args, **kwargs):\n super().__init__(self_bot=True, *args, **kwargs)\n\n async def is_owner(self, user):\n return True\n","sub_path":"dog/core/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":4528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"8004600","text":"import statistics as stat\n\n\ncount = 0\n\n\ndef quicksort(A, n):\n\n if n == 1:\n return A\n\n else:\n pivot = get_pivot(A, n)\n A = partition(A, pivot, n)\n return A\n\n\ndef get_pivot(A, n):\n\n if n%2 == 0:\n m = int((n/2)-1)\n else:\n m = int((n-1)/2)\n\n pivot = stat.median([A[0], A[m], A[n-1]])\n return pivot\n\n\n\ndef partition(A, pivot, n):\n l = A.index(pivot)\n temp2 = A[l] # swap pivot to first element of array\n A[l] = A[0]\n A[0] = temp2\n l = A.index(pivot) # update pivot index to reflect new position\n\n i = 1\n\n global count\n count += n - 1\n\n for j in range(l, n):\n if A[j] < pivot:\n temp = A[i]\n A[i] = A[j]\n A[j] = temp\n i += 1\n\n temp2 = A[l]\n A[l] = A[i - 1]\n A[i - 1] = temp2\n l = A.index(pivot)\n\n if len(A) == 2:\n return A\n\n else:\n len_B = len(A[0:l])\n len_C = len(A[l+1:n])\n\n if len_B > 0:\n A[0:l] = quicksort(A[0:l], len_B)\n\n if len_C > 0:\n A[l+1:n] = quicksort(A[l+1:n], len_C)\n\n return A\n\n\ndef run_quicksort(A):\n print(\"original array is\", A)\n global count\n count = 0\n\n n = len(A)\n\n A = quicksort(A, n)\n print(\"sorted array is\", A, \"& there were\", count, \"comparisons\")\n\n return A, count\n\n\n\nB = [3, 20, 12, 14]\nrun_quicksort(B)\n","sub_path":"QuickSort/quicksort_pivot_median.py","file_name":"quicksort_pivot_median.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575505189","text":"import time\nimport random\n\n\nprint(\"Catch the truth about your future.\")\nprint(\"Think of a question. We'll send the answer your way.\")\n\ntime.sleep(2)\n\nprint(\"Wait, wait, your number is almost here: \")\n\ntime.sleep(2)\n\nanswerList = [\"Maybe not the best idea ever.\", \"Be happy now.\", \"Be yourself.\", \"Make some noise.\", \"Rain clearing.\", \"Find your own path.\", \"Season of change ahead!\"]\n\nanswer = random.choice(answerList)\n\nprint(answer)\n","sub_path":"magic8.py","file_name":"magic8.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"426437672","text":"import unittest\nfrom collections import defaultdict\n\n\nclass Node:\n def __init__(self, value):\n self.value = value\n self.left = None\n self.right = None\n\n\n\ndef num_paths_with_running_sum(node, k):\n # assume we have some path from the root that looks like\n # root --> past_node --> current_node\n\n # let running_sum = sum(root....current_node)\n # if sum(past_node ... current_node) == k\n # then we can increase num_paths by the number of occurrences of sum(root ... past_node) == running_sum - k\n\n # maintain a running_sum and dictionary where sums[v] = frequency of values of current path\n sums = defaultdict(int)\n return running_sum_helper(node, k, sums, 0)\n\n\ndef running_sum_helper(node, k, sums, running_sum):\n if node:\n running_sum += node.value\n\n num_paths = 0\n\n if sums[running_sum - k]:\n # add in number of ways we can complete the current target sum\n num_paths += sums[running_sum - k]\n if running_sum == k:\n # special case where the entire path can complete the target sum by itself\n num_paths += 1\n\n print(\"{0} paths at node {1}, running sum {2}, sums {3}\".format(num_paths, node.value, running_sum, sums))\n # add running_sum to dict before recursing\n # this means we are telling future iterations that it is possible to create a path of running_sum\n sums[running_sum] += 1\n\n num_paths += running_sum_helper(node.left, k, sums, running_sum)\n num_paths += running_sum_helper(node.right, k, sums, running_sum)\n\n # undo the history since we are backing out from this branch\n sums[running_sum] -= 1\n\n return num_paths\n else:\n # need a node to be a path\n return 0\n\n\n\ndef num_paths_with_sum(node, k):\n # assume we have some path that looks like\n # start --> past_node --> current_node\n # if sum(start.value ... past_node.value) + current_node.value == k, then we have found one new solution\n\n # maintain a dictionary sums where sums[v] = count of number of times we can make v from sums of ancestors of the current node\n\n sums = defaultdict(int)\n return num_paths_helper(node, k, sums)\n\n\ndef num_paths_helper(node, k, sums):\n if node:\n num_paths = 0\n\n # special case where this node itself is enough to complete the target sum\n if k == node.value:\n num_paths += 1\n\n # add the number of previous paths of value (k - node.value) to match target sum\n num_paths += sums[k - node.value]\n\n # add current node into sums dict before we recurse\n for s in sums:\n sums[s] += node.value\n sums[node.value] += 1\n\n num_paths += num_paths_helper(node.left, k, sums)\n num_paths += num_paths_helper(node.right, k, sums)\n\n # undo the dict change as we are backing out of this branch\n for s in sums:\n sums[s] -= node.value\n sums[node.value] -= 1\n\n return num_paths\n else:\n # no paths can be made without a node\n return 0\n\n\ndef paths_with_sum(node, k):\n paths = []\n paths_with_sum_easy(node, paths, k)\n return paths\n\n\ndef paths_with_sum_easy(node, paths, k):\n \"\"\"For each node, calculate all paths starting at this node and going downward.\n If sum of path == k, add to answers\n Time Complexity:\n in a balanced tree of height H, there are 2^H leaf nodes\n from the root, there are 2^H paths to the leaf nodes\n + 2^(H-1) paths to the nodes above the leaf nodes\n + 2^(H-2)...\n = 2^(H+1) - 1\n = O(2^(H+1))\n\n then we recurse on root.left and root.right\n O(root.left) = O(2^H)\n\n and so on...\n\n so O(2^(H+1)) + O(2^H) + ...\n = O(2^(H+1))\n = O(2^H)\n\n \"\"\"\n if node:\n paths_left = paths_with_sum_easy(node.left, paths, k)\n paths_right = paths_with_sum_easy(node.right, paths, k)\n\n if node.value == k:\n paths.append([node.value])\n ret = [[node.value]]\n\n # calculate all paths that start from this node\n for p in paths_left:\n if sum(p) + node.value == k:\n paths.append([node.value] + p)\n ret.append([node.value] + p)\n for p in paths_right:\n if sum(p) + node.value == k:\n paths.append([node.value] + p)\n ret.append([node.value] + p)\n\n return ret\n else:\n return []\n\n\nclass TestPathsWithSUm(unittest.TestCase):\n \"\"\"PathsWithSUm docs\"\"\"\n #@unittest.skip(\"\")\n def testPathsWithSUm(self):\n root = Node(0)\n p1 = Node(1)\n root.right = p1\n n1 = Node(-1)\n p1.left = n1\n p2 = Node(2)\n root.left = p2\n z0 = Node(0)\n p2.left = z0\n n2 = Node(-2)\n z0.right = n2\n p3 = Node(3)\n n1.left = p3\n z1 = Node(0)\n n2.left = z1\n\n\n # 0\n # 2 1\n # 0 -1\n # -2 3\n # 0\n paths = paths_with_sum(root, 0)\n print(\"paths are {0}\".format(paths))\n self.assertIn([0], paths)\n self.assertIn([1, -1], paths)\n self.assertIn([2, 0, -2], paths)\n self.assertIn([2, 0, -2, 0], paths)\n\n # self.assertEqual(9, num_paths_with_sum(root, 0))\n self.assertEqual(9, num_paths_with_running_sum(root, 0))\n\n def test2(self):\n root = Node(1)\n p2 = Node(3)\n root.left = p2\n z0 = Node(1)\n p2.left = z0\n\n\n # 0\n # 2\n # 0\n paths = paths_with_sum(root, 1)\n print(\"paths are {0}\".format(paths))\n self.assertEqual(2, num_paths_with_running_sum(root, 1))\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"TreesAndGraphs/4_12_PathsWithSum.py","file_name":"4_12_PathsWithSum.py","file_ext":"py","file_size_in_byte":5796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"81802823","text":"# -*- coding: utf-8 -*-\nfrom app.lib.common.hash import hash as hash_proc\n\n\nclass nickname:\n\n def __init__(self, nick_name, area_id, database):\n self.nick_name = nick_name\n self.area_id = area_id\n self.database = database\n\n def process(self):\n # 赋空值\n user_account = ''\n # 初始化数据库\n db = self.database\n db_conn = db.db_select(1, self.area_id)\n cur = db_conn.cursor()\n # 哈希表名\n hash_res = hash_proc(self.nick_name)\n table_num = hash_res.res\n sql = \"select a.useraccount from nickname_%s a where a.nickname = '%s';\" % (\n table_num, self.nick_name)\n cur.execute(sql)\n result = cur.fetchone()\n if result:\n user_account = result[0]\n db_conn.close()\n cur.close()\n return user_account\n","sub_path":"app/lib/common/nickname.py","file_name":"nickname.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"542434973","text":"\"\"\"\nAll classes in this module manipulate generators, this means they *never* load the entire dataset to memory.\nOne exception is SentenceSplit(segmenter, read_n=-1), see documentation below.\n\nThey all have the same API, generator comes in, generator comes out.\nGenerators (input or output) are assumed to yield a strings (e.g. a line in a file) whenever undergo next(...).\n\n\"\"\"\n\nfrom text.textprocessing import Pipeline, SentenceSegmenter\n\n\nclass Preprocess:\n\n def __init__(self, pipeline: Pipeline):\n self.pipeline = pipeline\n\n def __call__(self, generator):\n for line in generator:\n yield self.pipeline.pre(line.strip())\n\n\nclass Postprocess:\n\n def __init__(self, pipeline: Pipeline):\n self.pipeline = pipeline\n\n def __call__(self, generator):\n for line in generator:\n yield self.pipeline.post(line.strip())\n\n\nclass SentenceSplit:\n \"\"\"\n Generator of sentence-segmented data.\n \"\"\"\n\n def __init__(self, segmenter: SentenceSegmenter, read_n=1):\n \"\"\"\n\n :param segmenter: a TextProcessingModule that segments strings into sentences.\n :param read_n: reads a number of lines and sends them all at once to the sentence segmenter.\n use -1 for all lines (WARNING: this means the whole data will be in memory!)\n \"\"\"\n self.segmenter = segmenter\n self.read_n = read_n\n\n def __call__(self, generator):\n if self.read_n > 0:\n batch = []\n for line in generator:\n batch.append(line)\n if len(batch) >= self.read_n:\n for sentence in self.segmenter.split(batch):\n yield sentence\n batch = []\n if len(batch) > 0:\n for sentence in self.segmenter.split(batch):\n yield sentence\n else:\n for sentence in self.segmenter.split([line for line in generator]):\n yield sentence\n\n\nclass EnsureMaxLength:\n \"\"\"\n This wrapper chops sentences enforcing a maximum length, but this is different from SentenceSplit\n * SentenceSplit uses punctuation to determine where to segment strings into sentences\n while EnsureMaxLength uses number of tokens alone.\n * SentenceSplit may produce sentences of arbitrary length\n while EnsureMaxLength will impose a constraint.\n\n EnsureMaxLength may discard sentences that are longer than the maximum allowed, if split=False, or\n it will chop them at the maximum sentence length and chain the parts in sequence.\n The class also provides a mechanism to join the parts back together.\n \"\"\"\n\n def __init__(self, max_length=-1, split=False):\n \"\"\"\n\n :param max_length: longest sentence allowed (in number of tokens as produce by string.split()),\n use -1 for no maximum\n :param split: by default (False) we discard long sentences,\n if you switch this to True, then long sentences will be chopped, the parts will be yielded\n in sequence and you can count on self.join to regroup them.\n \"\"\"\n self.max_length = max_length\n self.split = split\n self.nb_parts = []\n\n @staticmethod\n def split_list(x, size):\n output = []\n if size < 0:\n return [x]\n elif size == 0:\n raise ValueError(\"Use size -1 for no splitting or size more than 0.\")\n while True:\n if len(x) > size:\n output.append(x[:size])\n x = x[size:]\n else:\n output.append(x)\n break\n return output\n\n def __call__(self, generator):\n if self.max_length < 0:\n yield from generator\n for line in generator:\n line = line.strip() # strip \\n\n tokens = line.split()\n if len(tokens) <= self.max_length:\n # it's important to update nb_parts before yielding (not to break 'join')\n self.nb_parts.append(1)\n yield line\n elif self.split: # sentence splitting (saves metadata to restore line-alignment with input)\n parts = EnsureMaxLength.split_list(tokens, self.max_length)\n # it's important to update nb_parts before yielding (not to break 'join')\n self.nb_parts.append(len(parts))\n for part in parts:\n yield ' '.join(part)\n\n def join(self, generator):\n if self.max_length < 0 or not self.split:\n yield from generator\n else:\n # This is important: note that perhaps the generator hasn't yet generated nb_parts\n # thus we cannot iterate over nb_parts\n # we should rather get a line from the generator and maintain an index\n i = 0\n parts = []\n for line in generator:\n parts.append(line)\n if len(parts) == self.nb_parts[i]:\n i += 1\n yield ' '.join(parts)\n parts = []","sub_path":"text/lazy.py","file_name":"lazy.py","file_ext":"py","file_size_in_byte":5067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"421016972","text":"#!/usr/bin/env python\n\nimport logging\nimport optparse\nimport os\nimport sys\n\nfrom appdirs import AppDirs\nfrom pkg_resources import resource_filename\n\nfrom ansiblelater import classify\nfrom ansiblelater.utils import info, warn, read_config, get_property\n\n\ndef main():\n config_dir = AppDirs(\"ansible-later\").user_config_dir\n default_config_file = os.path.join(config_dir, \"config.ini\")\n\n parser = optparse.OptionParser(\"%prog playbook_file|role_file|inventory_file\",\n version=\"%prog \" + get_property(\"__version__\"))\n parser.add_option('-c', dest='configfile', default=default_config_file,\n help=\"Location of configuration file: [%s]\" % default_config_file)\n parser.add_option('-d', dest='rulesdir',\n help=\"Location of standards rules\")\n parser.add_option('-q', dest='log_level', action=\"store_const\", default=logging.WARN,\n const=logging.ERROR, help=\"Only output errors\")\n parser.add_option('-s', dest='standards_filter', action='append',\n help=\"limit standards to specific names\")\n parser.add_option('-v', dest='log_level', action=\"store_const\", default=logging.WARN,\n const=logging.INFO, help=\"Show more verbose output\")\n\n options, args = parser.parse_args(sys.argv[1:])\n settings = read_config(options.configfile)\n\n # Merge CLI options with config options. CLI options override config options.\n for key, value in options.__dict__.items():\n if value:\n setattr(settings, key, value)\n\n if os.path.exists(settings.configfile):\n info(\"Using configuration file: %s\" % settings.configfile, settings)\n else:\n warn(\"No configuration file found at %s\" % settings.configfile, settings, file=sys.stderr)\n if not settings.rulesdir:\n rules_dir = os.path.join(resource_filename('ansiblelater', 'examples'))\n warn(\"Using example standards found at %s\" % rules_dir, settings, file=sys.stderr)\n settings.rulesdir = rules_dir\n\n if len(args) == 0:\n candidates = []\n for root, dirs, files in os.walk(\".\"):\n for filename in files:\n candidates.append(os.path.join(root, filename))\n else:\n candidates = args\n\n errors = 0\n for filename in candidates:\n if ':' in filename:\n (filename, lines) = filename.split(\":\")\n else:\n lines = None\n candidate = classify(filename)\n if candidate:\n if candidate.binary:\n info(\"Not reviewing binary file %s\" % filename, settings)\n continue\n if candidate.vault:\n info(\"Not reviewing vault file %s\" % filename, settings)\n continue\n if lines:\n info(\"Reviewing %s lines %s\" % (candidate, lines), settings)\n else:\n info(\"Reviewing all of %s\" % candidate, settings)\n errors = errors + candidate.review(settings, lines)\n else:\n info(\"Couldn't classify file %s\" % filename, settings)\n return errors\n\n\nmain()\n","sub_path":"ansiblelater/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"370947721","text":"#coding=utf8\nimport copy\ndef disposition(seq,split_e=\"\"):\n #如果序列非列表就转成列表\n if not isinstance(seq,list):\n if isinstance(seq,str) and split_e !=\"\":\n seq=seq.split(split_e)\n else: \n seq=[i for i in seq]\n #定义要返回的序列\n ret_seq=[]\n #递归出口,如果未排列序列只有一个元素,就把未排列序列加入返回序列\n copy_seq=copy.deepcopy(seq)\n if len(seq)==1:\n ret_seq.append(copy_seq)\n return ret_seq\n '''\n 递归主逻辑\n 1.每个元素都循环一次,作为第一元素\n 2.将第一元素插入剩余全排列列表的首位,作为一组排列\n 3.把每组排列都加入ret_seq\n 4.最后返回ret_seq'''\n for i in range(len(seq)):\n copy2_seq=copy.deepcopy(seq)\n first_arg=copy2_seq.pop(i)\n for j in disposition(copy2_seq):\n j.insert(0,first_arg)\n ret_seq.append(j)\n return ret_seq\n\nif __name__ ==\"__main__\":\n g=disposition([1,2,3,4])\n print(g)\n k=disposition(\"abef\")\n print(k)\n j=disposition(\"a,b,c\",split_e=\",\")\n print(j)\n l=disposition(\"a b c\",split_e=\" \")\n print(l)\n","sub_path":"disposition.py","file_name":"disposition.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"481744801","text":"#!/usr/bin/env python3\n# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)\nimport contextlib\nimport sys\nimport unittest\nfrom _io import StringIO, TextIOWrapper\n\nfrom test_support import pyro_only, cpython_only\n\n\n# Tests for traceback printing in sys.excepthook\ndef panic():\n raise RuntimeError(\"PANIC!!!\")\n\n\ndef call_panic():\n # empty line\n panic()\n\n\ndef raise_after_n_frames(n):\n if not n:\n raise RuntimeError(\"PANIC!!!\")\n raise_after_n_frames(n - 1)\n\n\nclass DisplayhookTest(unittest.TestCase):\n def test_displayhook_with_none_does_not_set_underscore(self):\n import builtins\n\n if hasattr(builtins, \"_\"):\n del builtins._\n\n orig_out = sys.stdout\n out = StringIO()\n sys.stdout = out\n sys.__displayhook__(None)\n self.assertEqual(out.getvalue(), \"\")\n self.assertTrue(not hasattr(builtins, \"_\"))\n sys.stdout = orig_out\n\n def test_displayhook_with_int_sets_underscore(self):\n import builtins\n\n orig_out = sys.stdout\n out = StringIO()\n sys.stdout = out\n sys.__displayhook__(42)\n self.assertEqual(out.getvalue(), \"42\\n\")\n self.assertEqual(builtins._, 42)\n sys.stdout = orig_out\n\n def test_has_displayhook(self):\n self.assertTrue(hasattr(sys, \"displayhook\"))\n\n def test_displayhook_initial_value(self):\n self.assertIs(sys.displayhook, sys.__displayhook__)\n\n def test_dunder_std_streams_are_text_io_wrappers(self):\n self.assertIsInstance(sys.__stderr__, TextIOWrapper)\n self.assertIsInstance(sys.__stdin__, TextIOWrapper)\n self.assertIsInstance(sys.__stdout__, TextIOWrapper)\n\n\nclass ExceptHookTests(unittest.TestCase):\n def test_traceback_with_sys_tracebacklimit_truncates_stack_trace(self):\n has_tracebacklimit = hasattr(sys, \"tracebacklimit\")\n if has_tracebacklimit:\n tmp = sys.tracebacklimit\n\n with StringIO() as stderr, contextlib.redirect_stderr(stderr):\n try:\n sys.tracebacklimit = 1\n call_panic()\n except Exception:\n sys.excepthook(*sys.exc_info())\n finally:\n if has_tracebacklimit:\n sys.tracebacklimit = tmp\n else:\n del sys.tracebacklimit\n self.assertRegex(\n stderr.getvalue(),\n r\"\"\"Traceback \\(most recent call last\\):\n File \".*/sys_test.py\", line \\d+, in panic\n raise RuntimeError\\(\"PANIC!!!\"\\)\nRuntimeError: PANIC!!!\n\"\"\",\n )\n\n def test_traceback_without_sys_tracebacklimit_prints_entire_small_traceback(self):\n has_tracebacklimit = hasattr(sys, \"tracebacklimit\")\n if has_tracebacklimit:\n tmp = sys.tracebacklimit\n del sys.tracebacklimit\n\n with StringIO() as stderr, contextlib.redirect_stderr(stderr):\n try:\n call_panic()\n except Exception:\n sys.excepthook(*sys.exc_info())\n finally:\n if has_tracebacklimit:\n sys.tracebacklimit = tmp\n self.assertRegex(\n stderr.getvalue(),\n r\"\"\"Traceback \\(most recent call last\\):\n File \".*/sys_test.py\", line \\d+, in test_traceback_without_sys_tracebacklimit_prints_entire_small_traceback\n call_panic\\(\\)\n File \".*/sys_test.py\", line \\d+, in call_panic\n panic\\(\\)\n File \".*/sys_test.py\", line \\d+, in panic\n raise RuntimeError\\(\"PANIC!!!\"\\)\nRuntimeError: PANIC!!!\n\"\"\",\n )\n\n def test_traceback_cuts_recursion_at_3_repeated_lines(self):\n has_tracebacklimit = hasattr(sys, \"tracebacklimit\")\n if has_tracebacklimit:\n tmp = sys.tracebacklimit\n del sys.tracebacklimit\n\n with StringIO() as stderr, contextlib.redirect_stderr(stderr):\n try:\n raise_after_n_frames(4)\n except Exception:\n sys.excepthook(*sys.exc_info())\n finally:\n if has_tracebacklimit:\n sys.tracebacklimit = tmp\n self.assertRegex(\n stderr.getvalue(),\n r\"\"\"Traceback \\(most recent call last\\):\n File \".*/sys_test.py\", line \\d+, in test_traceback_cuts_recursion_at_3_repeated_lines\n raise_after_n_frames\\(4\\)\n File \".*/sys_test.py\", line \\d+, in raise_after_n_frames\n raise_after_n_frames\\(n - 1\\)\n File \".*/sys_test.py\", line \\d+, in raise_after_n_frames\n raise_after_n_frames\\(n - 1\\)\n File \".*/sys_test.py\", line \\d+, in raise_after_n_frames\n raise_after_n_frames\\(n - 1\\)\n \\[Previous line repeated 1 more time\\]\n File \".*/sys_test.py\", line \\d+, in raise_after_n_frames\n raise RuntimeError\\(\"PANIC!!!\"\\)\nRuntimeError: PANIC!!!\n\"\"\",\n )\n\n def test_traceback_limits_recursion_and_depth(self):\n has_tracebacklimit = hasattr(sys, \"tracebacklimit\")\n if has_tracebacklimit:\n tmp = sys.tracebacklimit\n\n with StringIO() as stderr, contextlib.redirect_stderr(stderr):\n try:\n sys.tracebacklimit = 10\n raise_after_n_frames(20)\n except Exception:\n sys.excepthook(*sys.exc_info())\n finally:\n if has_tracebacklimit:\n sys.tracebacklimit = tmp\n else:\n del sys.tracebacklimit\n self.assertRegex(\n stderr.getvalue(),\n r\"\"\"Traceback \\(most recent call last\\):\n File \".*/sys_test.py\", line \\d+, in raise_after_n_frames\n raise_after_n_frames\\(n - 1\\)\n File \".*/sys_test.py\", line \\d+, in raise_after_n_frames\n raise_after_n_frames\\(n - 1\\)\n File \".*/sys_test.py\", line \\d+, in raise_after_n_frames\n raise_after_n_frames\\(n - 1\\)\n \\[Previous line repeated 6 more times\\]\n File \".*/sys_test.py\", line \\d+, in raise_after_n_frames\n raise RuntimeError\\(\"PANIC!!!\"\\)\nRuntimeError: PANIC!!!\n\"\"\",\n )\n\n\nclass SysTests(unittest.TestCase):\n class Mgr:\n def __enter__(self):\n pass\n\n def __exit__(self, type, value, tb):\n return True\n\n def test_excepthook_initial_value(self):\n self.assertIs(sys.excepthook, sys.__excepthook__)\n\n def test_exit_raises_system_exit(self):\n with self.assertRaises(SystemExit) as ctx:\n sys.exit()\n\n self.assertEqual(ctx.exception.args, ())\n\n def test_exit_with_code_raises_system_exit_with_code(self):\n with self.assertRaises(SystemExit) as ctx:\n sys.exit(\"foo\")\n\n self.assertEqual(ctx.exception.args, (\"foo\",))\n\n def test_exc_info_with_context_manager(self):\n try:\n raise RuntimeError()\n except RuntimeError:\n info1 = sys.exc_info()\n with self.Mgr():\n raise ValueError()\n info2 = sys.exc_info()\n self.assertEqual(info1, info2)\n\n def test_getdefaultencoding_returns_utf8(self):\n self.assertEqual(sys.getdefaultencoding(), \"utf-8\")\n\n def test_getsizeof_without_dunder_sizeof_raises_type_error(self):\n class M(type):\n def mro(cls):\n return (cls,)\n\n class C(metaclass=M):\n __new__ = type.__new__\n __call__ = type.__call__\n\n with self.assertRaises(TypeError):\n sys.getsizeof(C())\n\n def test_getsizeof_with_non_int_without_default_raises_type_error(self):\n class C:\n def __sizeof__(self):\n return \"not an integer\"\n\n with self.assertRaises(TypeError):\n sys.getsizeof(C())\n\n def test_getsizeof_with_non_int_returns_default(self):\n class C:\n def __sizeof__(self):\n return \"not an integer\"\n\n self.assertEqual(sys.getsizeof(C(), 42), 42)\n\n def test_getsizeof_with_negative_raises_value_error(self):\n class C:\n def __sizeof__(self):\n return -1\n\n with self.assertRaises(ValueError):\n sys.getsizeof(C())\n\n @pyro_only\n def test_getsizeof_without_default_returns_size_int(self):\n class C:\n def __sizeof__(self):\n return 42\n\n self.assertEqual(sys.getsizeof(C()), 42)\n\n @pyro_only\n def test_getsizeof_with_default_returns_size_int(self):\n class C:\n def __sizeof__(self):\n return 42\n\n self.assertEqual(sys.getsizeof(C(), 3), 42)\n\n @pyro_only\n def test_getsizeof_with_int_subclass_returns_int(self):\n class N(int):\n pass\n\n class C:\n def __sizeof__(self):\n return N(42)\n\n result = sys.getsizeof(C())\n self.assertIs(type(result), int)\n self.assertEqual(result, 42)\n\n def test_getsetrecursionlimit(self):\n limit = sys.getrecursionlimit()\n self.assertGreater(limit, 0)\n sys.setrecursionlimit(limit + 1)\n self.assertEqual(sys.getrecursionlimit(), limit + 1)\n sys.setrecursionlimit(limit)\n self.assertEqual(sys.getrecursionlimit(), limit)\n\n def test_gettrace_returns_none(self):\n self.assertIs(sys.gettrace(), None)\n\n def test_implementation_cache_tag_matches_version_major_minor(self):\n name = sys.implementation.name\n major, minor = sys.version_info.major, sys.version_info.minor\n cache_tag = f\"{name}-{major}{minor}\"\n self.assertEqual(sys.implementation.cache_tag, cache_tag)\n\n def test_implementation_version_matches_module_version_info(self):\n self.assertEqual(sys.implementation.version, sys.version_info)\n\n def test_settrace_with_none_does_nothing(self):\n sys.settrace(None)\n self.assertIs(sys.gettrace(), None)\n\n def test_setrecursionlimit_with_large_limit_raises_overflowerror(self):\n with self.assertRaises(OverflowError) as context:\n sys.setrecursionlimit(230992039023490234904329023904239023)\n self.assertEqual(\n str(context.exception), \"Python int too large to convert to C int\"\n )\n\n def test_hash_info_is_plausible(self):\n def is_power_of_two(x):\n return x & (x - 1) == 0\n\n hash_info = sys.hash_info\n max_value = (1 << (hash_info.width - 1)) - 1\n self.assertTrue(hash_info.modulus <= max_value)\n self.assertTrue(is_power_of_two(hash_info.modulus + 1))\n self.assertTrue(hash_info.inf <= max_value)\n self.assertTrue(hash_info.nan <= max_value)\n self.assertTrue(hash_info.imag <= max_value)\n self.assertIsInstance(hash_info.algorithm, str)\n self.assertTrue(hash_info.hash_bits >= hash_info.width)\n self.assertTrue(hash_info.seed_bits >= hash_info.hash_bits)\n self.assertIs(hash_info.width, hash_info[0])\n self.assertIs(hash_info.modulus, hash_info[1])\n self.assertIs(hash_info.inf, hash_info[2])\n self.assertIs(hash_info.nan, hash_info[3])\n self.assertIs(hash_info.imag, hash_info[4])\n self.assertIs(hash_info.algorithm, hash_info[5])\n self.assertIs(hash_info.hash_bits, hash_info[6])\n self.assertIs(hash_info.seed_bits, hash_info[7])\n self.assertIs(hash_info.cutoff, hash_info[8])\n\n def test_hash_info_matches_cpython(self):\n # We should not deviate from cpython without a good reason.\n hash_info = sys.hash_info\n self.assertEqual(hash_info.modulus, (1 << 61) - 1)\n self.assertEqual(hash_info.inf, 314159)\n self.assertEqual(hash_info.nan, 0)\n self.assertEqual(hash_info.imag, 1000003)\n self.assertEqual(hash_info.algorithm, \"siphash24\")\n self.assertEqual(hash_info.hash_bits, 64)\n self.assertEqual(hash_info.seed_bits, 128)\n\n def test_float_info_matches_cpython(self):\n float_info = sys.float_info\n self.assertEqual(float_info.max, 1.7976931348623157e308)\n self.assertEqual(float_info.max_exp, 1024)\n self.assertEqual(float_info.max_10_exp, 308)\n self.assertEqual(float_info.min, 2.2250738585072014e-308)\n self.assertEqual(float_info.min_exp, -1021)\n self.assertEqual(float_info.min_10_exp, -307)\n self.assertEqual(float_info.dig, 15)\n self.assertEqual(float_info.mant_dig, 53)\n self.assertEqual(float_info.epsilon, 2.220446049250313e-16)\n self.assertEqual(float_info.radix, 2)\n self.assertEqual(float_info.rounds, 1)\n\n def test_intern_returns_str(self):\n self.assertEqual(sys.intern(\"id\"), \"id\")\n self.assertEqual(sys.intern(\"long identifier\"), \"long identifier\")\n\n def test_intern_with_nonstr_raises_typeerror(self):\n with self.assertRaises(TypeError):\n sys.intern(12345)\n\n def test_intern_with_str_subclass_raises_typeerror(self):\n class NewString(str):\n pass\n\n with self.assertRaises(TypeError) as context:\n sys.intern(NewString(\"identifier\"))\n\n self.assertEqual(str(context.exception), \"can't intern NewString\")\n\n def test_is_finalizing_before_shutdown_returns_false(self):\n self.assertEqual(sys.is_finalizing(), False)\n\n def test_stdio_initial_values(self):\n self.assertIs(sys.stderr, sys.__stderr__)\n self.assertIs(sys.stdin, sys.__stdin__)\n self.assertIs(sys.stdout, sys.__stdout__)\n\n def test_std_streams_are_utf_8_encoded(self):\n self.assertEqual(sys.stderr.encoding, \"utf-8\")\n self.assertEqual(sys.stdin.encoding, \"utf-8\")\n self.assertEqual(sys.stdout.encoding, \"utf-8\")\n\n def test_std_streams_have_correct_modes(self):\n self.assertEqual(sys.stderr.mode, \"w\")\n self.assertEqual(sys.stdin.mode, \"r\")\n self.assertEqual(sys.stdout.mode, \"w\")\n\n @pyro_only\n def test_std_streams_point_to_correct_fileno(self):\n self.assertEqual(sys.stderr.buffer.fileno(), sys._stderr_fd)\n self.assertEqual(sys.stdin.buffer.fileno(), sys._stdin_fd)\n self.assertEqual(sys.stdout.buffer.fileno(), sys._stdout_fd)\n\n # TODO(T89882231) enable test for Pyro\n @cpython_only\n def test_under_getframe_returns_frame(self):\n from types import ModuleType\n\n frame = sys._getframe(0)\n self.assertTrue(frame.f_globals is not None)\n self.assertEqual(frame.f_globals[\"__name__\"], \"__main__\")\n self.assertTrue(frame.f_locals is not None)\n self.assertEqual(frame.f_locals[\"self\"], self)\n builtins = __builtins__\n if isinstance(builtins, ModuleType):\n builtins = builtins.__dict__\n self.assertIs(frame.f_builtins, builtins)\n self.assertTrue(frame.f_code is not None)\n\n def test_under_getframe_with_noninteger_raises_typeerror(self):\n with self.assertRaises(TypeError):\n sys._getframe(None)\n\n # TODO(T89882231) enable test for Pyro\n @cpython_only\n def test_under_getframe_returns_frame_with_locals(self):\n def baz():\n return sys._getframe(1).f_locals\n\n def bar():\n foo = 1 # noqa: F841\n return baz()\n\n bar_locals = bar()\n self.assertEqual(len(bar_locals), 2)\n self.assertEqual(bar_locals[\"foo\"], 1)\n self.assertEqual(bar_locals[\"baz\"], baz)\n\n def test_under_getframe_from_function_cannot_modify_locals(self):\n def baz():\n sys._getframe(1).f_locals[\"foo\"] = \"wrong\"\n\n def bar():\n foo = \"correct\"\n baz()\n return foo\n\n self.assertEqual(bar(), \"correct\")\n\n def test_under_getframe_from_global_scope_gets_locals(self):\n from types import ModuleType\n\n module = ModuleType(\"\")\n module.sys = sys\n module.temp = \"wrong\"\n exec(\"sys._getframe(0).f_locals['temp'] = 'correct'\", module.__dict__)\n self.assertEqual(module.temp, \"correct\")\n\n def test_under_getframe_with_high_depth_raises_valueerror(self):\n with self.assertRaises(ValueError) as context:\n sys._getframe(1000)\n self.assertEqual(str(context.exception), \"call stack is not deep enough\")\n\n def test_under_getframe_with_negative_integer_returns_top_frame(self):\n self.assertEqual(sys._getframe(-1).f_code, sys._getframe(0).f_code)\n\n def test_under_getframe_with_no_argument_returns_top_frame(self):\n self.assertEqual(sys._getframe().f_code, sys._getframe(0).f_code)\n\n def test_under_getframe_f_back_points_to_previous_frame(self):\n def baz():\n return sys._getframe(0)\n\n def bar():\n return baz()\n\n def foo():\n return bar()\n\n frame = foo()\n self.assertIs(frame.f_code, baz.__code__)\n self.assertIs(frame.f_back.f_code, bar.__code__)\n self.assertIs(frame.f_back.f_back.f_code, foo.__code__)\n\n def test_under_getframe_f_back_leads_to_module_frame(self):\n frame = sys._getframe()\n while True:\n if frame.f_back is None:\n break\n frame = frame.f_back\n self.assertIsNone(frame.f_back)\n self.assertIs(frame.f_globals, sys.modules[self.__module__].__dict__)\n\n @pyro_only\n def test_under_getframe_f_back_excludes_builtins_function(self):\n recorded_frame = None\n\n class C:\n def __hash__(self):\n nonlocal recorded_frame\n recorded_frame = sys._getframe()\n return 1234\n\n def foo():\n c = C()\n d = {}\n # Calling C.__hash__ via native code, dict.__delitem__.\n try:\n del d[c]\n except KeyError:\n pass\n\n foo()\n\n self.assertIs(recorded_frame.f_code, C.__hash__.__code__)\n # The result excludes dict.__delitem__.\n self.assertIs(recorded_frame.f_back.f_code, foo.__code__)\n\n def test_version(self):\n self.assertTrue(sys.version)\n self.assertEqual(len(sys.version_info), 5)\n\n def test_hexversion(self):\n self.assertIsInstance(sys.hexversion, int)\n self.assertEqual((sys.hexversion >> 24) & 0xFF, sys.version_info.major)\n self.assertEqual((sys.hexversion >> 16) & 0xFF, sys.version_info.minor)\n self.assertEqual((sys.hexversion >> 8) & 0xFF, sys.version_info.micro)\n release_level = (sys.hexversion >> 4) & 0xF\n release_level_str = {0xA: \"alpha\", 0xB: \"beta\", 0xC: \"candidate\", 0xF: \"final\"}\n self.assertEqual(\n release_level_str[release_level], sys.version_info.releaselevel\n )\n self.assertEqual(sys.hexversion & 0xF, sys.version_info.serial)\n\n def test_under_getframe_f_lineno(self):\n d = {}\n exec(\"import sys\\n\\nresult = sys._getframe().f_lineno\", d)\n self.assertIs(d[\"result\"], 3)\n\n def test_set_asyncgen_hooks_raises_type_error_on_non_none_non_callable_finalizer(\n self,\n ):\n with self.assertRaises(TypeError):\n sys.set_asyncgen_hooks(finalizer=1)\n\n def test_set_asyncgen_hooks_raises_type_error_on_non_none_non_callable_firstiter(\n self,\n ):\n with self.assertRaises(TypeError):\n sys.set_asyncgen_hooks(firstiter=1)\n\n def test_set_asyncgen_hooks_with_none_values(self):\n sys.set_asyncgen_hooks(None, None)\n hooks = sys.get_asyncgen_hooks()\n self.assertIsNone(hooks[0], None)\n self.assertIsNone(hooks[1], None)\n\n def test_set_asyncgen_hooks_with_callables(self):\n def f1():\n pass\n\n def f2():\n pass\n\n sys.set_asyncgen_hooks(f1, f2)\n hooks = sys.get_asyncgen_hooks()\n self.assertEqual(hooks[0], f1)\n self.assertEqual(hooks[1], f2)\n\n def test_set_asyncgen_hooks_with_only_named_firstiter(self):\n def f():\n pass\n\n # Clear any existing values\n sys.set_asyncgen_hooks(None, None)\n\n sys.set_asyncgen_hooks(firstiter=f)\n hooks = sys.get_asyncgen_hooks()\n self.assertEqual(hooks[0], f)\n self.assertEqual(hooks[1], None)\n\n def test_set_asyncgen_hooks_with_only_positional_firstiter(self):\n def f():\n pass\n\n # Clear any existing values\n sys.set_asyncgen_hooks(None, None)\n\n sys.set_asyncgen_hooks(f)\n hooks = sys.get_asyncgen_hooks()\n self.assertEqual(hooks[0], f)\n self.assertEqual(hooks[1], None)\n\n def test_set_asyncgen_hooks_with_only_named_finalizer(self):\n def f():\n pass\n\n # Clear any existing values\n sys.set_asyncgen_hooks(None, None)\n\n sys.set_asyncgen_hooks(finalizer=f)\n hooks = sys.get_asyncgen_hooks()\n self.assertEqual(hooks[0], None)\n self.assertEqual(hooks[1], f)\n\n def test_set_asyncgen_hooks_with_no_args(self):\n def f1():\n pass\n\n def f2():\n pass\n\n # Set initial values which shouldn't be affected\n sys.set_asyncgen_hooks(f1, f2)\n\n sys.set_asyncgen_hooks()\n hooks = sys.get_asyncgen_hooks()\n self.assertEqual(hooks[0], f1)\n self.assertEqual(hooks[1], f2)\n\n def test_asyncgen_hooks_attributes(self):\n def f1():\n pass\n\n def f2():\n pass\n\n sys.set_asyncgen_hooks(f1, f2)\n hooks = sys.get_asyncgen_hooks()\n self.assertEqual(hooks.firstiter, f1)\n self.assertEqual(hooks.finalizer, f2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"library/sys_test.py","file_name":"sys_test.py","file_ext":"py","file_size_in_byte":21329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"74329115","text":"#!/usr/bin/env python3\nimport traceback\ndef f2(x):\n return 2/(x-1)\n\ndef f1(x):\n ergebnis = f2(x) + 7\n return ergebnis\n \ntry:\n n=f1(1)\n print(n)\nexcept ZeroDivisionError as e:\n print(traceback.print_exc())\n","sub_path":"examples/kap10-error/nested.py","file_name":"nested.py","file_ext":"py","file_size_in_byte":224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"328443519","text":"\"\"\" Compiled: 2020-09-18 10:38:51 \"\"\"\n\n#__src_file__ = \"extensions/BuySideOMS/./etc/FComplianceCheckReport.py\"\n\"\"\"--------------------------------------------------------------------------\nMODULE\n FComplianceCheckReport\n\n (c) Copyright 2018 FIS FRONT ARENA. All rights reserved.\n\nDESCRIPTION\n Functionality for producing reports of compliance checks\n\n-----------------------------------------------------------------------------\"\"\"\nimport os\nimport acm\nimport FSheetUtils\nimport FFileUtils\nfrom FParameterSettings import ParameterSettingsCreator\n\nSETTINGS = ParameterSettingsCreator.FromRootParameter('ComplianceCheckReportSettings')\n\n\nclass ComplianceCheckReport(object):\n\n def __init__(self, output):\n self._alerts = None\n self._reportName = ' '.join((SETTINGS.ReportName(), acm.Time.TimeNow()))\n self._report = acm.Report.CreateReport(self._reportName, output)\n self._reportGrid = self.CreateDefaultReportGrid()\n \n def GridBuilder(self):\n return self._reportGrid.GridBuilder()\n \n def Report(self):\n return self._report\n \n def Generate(self, alerts):\n self._SetColumnCreators()\n self.GridBuilder().InsertItem(alerts)\n self._SaveReport()\n \n def _SaveReport(self): \n self._reportGrid.Generate()\n \n def CreateDefaultReportGrid(self, includeInsAndLeg=True, includeRows=True):\n gridConfig = acm.Report.CreateGridConfiguration(includeInsAndLeg, includeRows)\n return self.Report().OpenSheet(acm.FAlertSheet(), gridConfig, None)\n \n def _SetColumnCreators(self):\n columnIds = FSheetUtils.ColumnIds('_defaultColumnsAlertSheet', \"FAlertSheet\")\n columnCreators = FSheetUtils.ColumnCreators(columnIds)\n i = 0\n while i < columnCreators.Size():\n creator = columnCreators.At(i)\n self.GridBuilder().ColumnCreators().Add(creator)\n i = i + 1\n\n\nclass XmlReportOutputBase(object):\n \n SEPARATOR = ' '\n \n def __init__(self, name, writer = None):\n self.name = name\n self.writer = writer\n \n def Writer(self):\n if self.writer is None:\n self.writer = self.CreateXmlReportOutput()\n self.writer.IncludeColorInformation(True)\n self.writer.IncludeFormattedData(True)\n self.writer.IncludeRawData(False)\n self.writer.IncludeFullData(False)\n return self.writer\n\n def CreateXmlReportOutput(self):\n raise NotImplementedError\n \n def StoreAndGenerateKey(self):\n return self.name\n\nclass XmlReportOutputFile(XmlReportOutputBase):\n\n FILE_TYPE = '.xml'\n \n def CreateDirectory(self):\n return FFileUtils.createDirectory(\n r'{0}'.format(SETTINGS.ReportDir()), \n SETTINGS.SubDir(), \n dirNameSeparator=self.SEPARATOR)\n \n def FilePath(self):\n return FFileUtils.getFilePath(\n self.CreateDirectory(), \n self.name, \n self.FILE_TYPE, \n dateBeginning=True,\n overwriteIfFileExists=SETTINGS.Overwrite(),\n fileNameSeparator=self.SEPARATOR)\n \n def CreateXmlReportOutput(self):\n output = acm.FXmlReportOutputFile(self.FilePath())\n output.EnableWhitespace(True)\n return output\n \n def StoreAndGenerateKey(self):\n return os.path.relpath(str(self.Writer().Name()), r'{0}'.format(SETTINGS.ReportDir()))\n\nclass XmlReportOutput(XmlReportOutputBase):\n\n PREFIX = 'ComplianceCheckReport'\n \n def CreateXmlReportOutput(self):\n output = acm.FXmlReportOutput(\"\") \n output.EnableWhitespace(False)\n return output\n \n @staticmethod\n def CreateFile(archiveName):\n archive = acm.FLimitCheckReport()\n archive.Name(archiveName)\n archive.AutoUser(False)\n return archive\n\n def GetNextArchiveName(self, suggested_name):\n i=1\n newFileName = suggested_name\n while self.ArchiveOrNone(newFileName) is not None:\n if i == 1:\n numbering = ''\n else:\n numbering = '#' + str(i)\n newFileName = suggested_name + numbering\n i = i + 1\n return newFileName\n \n def LimitCheckReportTextObject(self):\n suggested_name = '_'.join((self.PREFIX, self.name))\n if SETTINGS.Overwrite():\n existingFile = self.ArchiveOrNone(suggested_name)\n if not existingFile:\n file = self.CreateFile(suggested_name)\n else: \n file = existingFile.StorageImage()\n else:\n fileName = self.GetNextArchiveName(suggested_name)\n file = self.CreateFile(fileName)\n return file\n \n def StoreAndGenerateKey(self):\n archive = self.LimitCheckReportTextObject()\n archive.XmlData(self.Writer().AsString())\n archive.Commit()\n return str(archive.Oid())\n\n @staticmethod\n def ArchiveOrNone(filename):\n try:\n return acm.FLimitCheckReport.Select('name=\"{0}\"'.format(filename))[0]\n except IndexError:\n return \n\ndef CreateComplianceReportFile(alerts, name):\n if SETTINGS.Storage() == 'ADS':\n output = XmlReportOutput(name)\n elif SETTINGS.Storage() == 'File':\n output = XmlReportOutputFile(name)\n else:\n logger.error('Unknown storage place %s'%STORAGE)\n return\n ComplianceCheckReport(output.Writer()).Generate(alerts)\n return output.StoreAndGenerateKey()\n","sub_path":"Extensions/Buy Side OMS/FPythonCode/FComplianceCheckReport.py","file_name":"FComplianceCheckReport.py","file_ext":"py","file_size_in_byte":5649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"491508032","text":"import numpy as np\nfrom gurobipy import *\nfrom Data import Data\n\n\nclass Simulator:\n def __init__(self, model_type='fct', file=None, data=None):\n \"\"\"\n :param test_num: (<1:48>,<1:10>)\n :param model_type: 'fct', 'sfct', or 'isfct'\n \"\"\"\n # self.data = Data(test_num)\n # self.data.gen_scn(sample_size, scn_count)\n self.data = data\n self.grb = Model('model: Simulator')\n if model_type not in {'fct', 'sfct', 'isfct'}:\n raise Exception(\"model_name should be in {'fct', 'sfct', 'isfct'}\")\n self.type = model_type\n self.x = np.loadtxt(file).astype(int)\n self.z = None\n self.f = None\n self.d = None\n self.obj = None\n self.det_x = np.zeros(1024).reshape(32, 32)\n self.time = None\n self.gap = None\n self.status = False\n\n self.solve()\n\n def solve(self):\n self.add_vars()\n self.add_const_2a()\n self.add_const_2b()\n self.add_const_3()\n self.add_const_4()\n self.add_const_5()\n self.add_const_6()\n self.add_const_7()\n self.add_obj()\n self.grb.setParam('TimeLimit', 60)\n self.grb.setParam('OutputFlag', 0)\n self.grb.optimize()\n if self.grb.status == 2:\n self.status = True\n self.obj = self.grb.ObjVal\n self.time = self.grb.Runtime\n else:\n print(\"******** problem {} is not solved optimally.\".format(self.type))\n\n def add_vars(self):\n # amount of resource r that will pass to activity j after the completion of activity i\n self.f = self.grb.addVars(self.data.activities, self.data.activities, self.data.resources, lb=0.0,\n ub=self.data.big_r, vtype='C', name=\"F\")\n if self.type in {'fct'}:\n # start time of activity i\n self.z = self.grb.addVars(self.data.activities, lb=0.0, ub=self.data.big_t, vtype='C', name=\"Z\")\n elif self.type in {'sfct', 'isfct'}:\n # start time of activity i in scenario s\n self.z = self.grb.addVars(self.data.activities, self.data.scenarios, lb=0.0, ub=self.data.big_t, vtype='C',\n name=\"Z\")\n if self.type in {'isfct'}:\n # delivery date of procured materials/equipment for activity i\n self.d = self.grb.addVars(self.data.activities, lb=0.0, ub=self.data.big_t, vtype='C', name=\"O\")\n else:\n raise Exception(\"model_name should be in {'fct', 'sfct', 'isfct'}\")\n\n\n def add_const_2a(self):\n if self.type in {'sfct', 'isfct'}:\n for i in self.data.activities[:-1]:\n for j in self.data.activities[1:]:\n if self.x[i, j] == 1:\n self.grb.addConstrs(\n (self.z[j, s] - self.z[i, s] >= self.data.p_scn[i][s]\n for s in self.data.scenarios),\n name=\"NetworkStartTimeRelations\")\n elif self.type in {'fct'}:\n self.grb.addConstrs(\n (self.z[j] - self.z[i] >= self.data.duration[i] - self.data.big_t * (1 - self.x[i, j])\n for i in self.data.activities[:-1]\n for j in self.data.activities[1:]),\n name=\"NetworkStartTimeRelations\")\n else:\n raise Exception(\"model_name should be in {'fct', 'sfct', 'isfct'}\")\n\n\n def add_const_2b(self):\n if self.type in {'isfct'}:\n self.grb.addConstrs(\n (self.z[i, s] >= self.d[i]\n for s in self.data.scenarios\n for i in self.data.activities),\n name=\"NetworkStartTimeAndOrderRelations\")\n\n def add_const_3(self):\n self.grb.addConstrs(\n (self.f[i, j, r] - self.data.big_r * self.x[i, j] <= 0\n for i in self.data.activities[:-1]\n for j in self.data.activities[1:]\n for r in self.data.resources),\n name=\"NetworkFlowRelations\")\n\n def add_const_4(self):\n self.grb.addConstrs(\n (quicksum(self.f[i, j, r] for j in self.data.activities[1:]) == self.data.res_use[i][r]\n for i in self.data.activities[1:]\n for r in self.data.resources),\n name=\"OutgoingFlows\")\n\n def add_const_5(self):\n self.grb.addConstrs(\n (quicksum(self.f[i, j, r] for i in self.data.activities[:-1]) == self.data.res_use[j][r]\n for j in self.data.activities[:-1]\n for r in self.data.resources),\n name=\"IngoingFlows\")\n\n def add_const_6(self):\n self.grb.addConstrs(\n (quicksum(self.f[self.data.activities[0], j, r] for j in self.data.activities[1:]) ==\n self.data.available_resources[r]\n for r in self.data.resources),\n name=\"FirstFlow\")\n\n def add_const_7(self):\n self.grb.addConstrs(\n (quicksum(self.f[i, self.data.activities[-1], r] for i in self.data.activities[:-1]) ==\n self.data.available_resources[r]\n for r in self.data.resources),\n name=\"LastFlow\")\n\n def add_obj(self):\n if self.type in {'fct'}:\n obj = (self.z[self.data.activities[-1]])\n elif self.type in {'sfct'}:\n obj = (quicksum(self.z[self.data.activities[-1], s] for s in self.data.scenarios) / self.data.scn_count)\n elif self.type in {'isfct'}:\n tmp_1 = quicksum(self.z[self.data.activities[-1], s] for s in self.data.scenarios) / self.data.scn_count\n tmp_2 = quicksum(self.data.w[i] * (self.d[i] - self.z[i, s]) for i in self.data.activities for s in\n self.data.scenarios) / self.data.scn_count\n obj = tmp_1 + self.data.gamma * tmp_2\n else:\n raise Exception(\"model_name should be in {'fct', 'sfct', 'isfct'}\")\n self.grb.setObjective(obj, GRB.MINIMIZE)\n\n\n","sub_path":"CPLEX/Simulator.py","file_name":"Simulator.py","file_ext":"py","file_size_in_byte":5960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"125610433","text":"# -*- coding: utf-8\n\nwidth = 600 # Ширина окна\nheight = 700 # Высота окна\nname = \"PatientName Updater\" # Название программы (отображается в title)\nversion = \"1.1\" # Версия программы\n\n# Описание\nabout_text = \\\n\"\"\"\n Демонстрационная программа \"%s\" позволяет редактировать\nсекцию PatientName в бинарном файле формата DICOM.\nНа вход программа принимает *.dcm - файл, \"сканирует\" его\nсигнатуру, находит секцию, отвечающую за PatientName и\nизменяет данный параметр с учетом ввода пользователя.\n\"\"\" % name\n\n# EOF\n","sub_path":"dicom_ptn_updater/src/define.py","file_name":"define.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"78811810","text":"# Example 2.7 from\n#\n# Near-Optimal Robust Bilevel Optimization\n# M. Besancon, M. F. Anjos and L. Brotcorne\n# arXiv:1908.04040v5 (2019)\n#\n# Optimal solution: (x,v) = (0,1)\n#\nimport pyomo.environ as pe\nfrom pao.bilevel import *\n\n\ndef create():\n M = pe.ConcreteModel()\n\n M.x = pe.Var(bounds=(0,None))\n M.v = pe.Var()\n\n M.o = pe.Objective(expr=M.x, sense=pe.minimize)\n M.c = pe.Constraint(expr= M.v >= 1 - M.x/10)\n\n M.L = SubModel(fixed=M.x)\n M.L.o = pe.Objective(expr=M.v, sense=pe.maximize)\n M.L.c = pe.Constraint(expr= 1 + M.x/10 >= M.v)\n\n return M\n\n\nif __name__ == \"__main__\": #pragma: no cover\n M = create()\n M.pprint()\n","sub_path":"pao/bilevel/examples/besancon27.py","file_name":"besancon27.py","file_ext":"py","file_size_in_byte":668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"35124741","text":"import tensorflow as tf\nfrom layers import conv2d, conv2d_transpose\nfrom parameter import args\nimport numpy as np\n\n\nclass CRUnet(object):\n def __init__(self, height, width, channel, n_class=1):\n self.x = tf.placeholder(tf.float32, shape=[None, height, width, channel], name=\"input_x\")\n self.y = tf.placeholder(tf.float32, shape=[None, height, width, 1], name=\"input_y\")\n x = tf.image.pad_to_bounding_box(self.x, args.height - height, args.width - width, args.height, args.width)\n self.l1, self.l2, self.l3, self.l4 = self.downsampling_step(x)\n self.l7 = self.upsampling_step(self.l4, self.l1, self.l2, self.l3)\n out = tf.layers.conv2d(self.l7, 1, 1, padding='SAME')\n self.out = tf.image.crop_to_bounding_box(out, args.height - height, args.width - width, height, width)\n self.pred = tf.nn.sigmoid(self.out, name=\"output\")\n self.miou = self.get_miou(self.y, self.pred)\n self.loss = self.get_loss(self.y, self.out)\n\n def downsampling_step(self, x):\n with tf.variable_scope('layer1'):\n conv=x\n for i in range(3):\n conv = tf.layers.conv2d(conv, 16, 3, padding='SAME', name='conv'+str(i))\n conv = tf.nn.relu(conv)\n l1 = conv + tf.layers.conv2d(conv, 16, 1, padding='SAME')\n l2 = self.downstep_block(l1, 32, 'layer2')\n l3 = self.downstep_block(l2, 64, 'layer3')\n l4 = self.downstep_block(l3, 128, 'layer4')\n return l1, l2, l3, l4\n\n def downstep_block(self, x, filters, name):\n with tf.variable_scope(name):\n b1 = tf.nn.max_pool(x, (1, 2, 2, 1), (1, 2, 2, 1), 'SAME')\n b2 = tf.layers.conv2d(b1, filters, 3, padding='SAME')\n b2 = tf.nn.relu(b2)\n b3 = tf.layers.conv2d(b2, filters, 3, padding='SAME')\n b3 = tf.nn.relu(b3)\n out = b3 + tf.layers.conv2d(x, filters, 1, (2, 2), padding='SAME')\n return out\n\n def upsampling_step(self, x, l1, l2, l3):\n l5 = self.upstep_block(x, l3, 64, \"layer5\")\n l6 = self.upstep_block(l5, l2, 32, \"layer6\")\n l7 = self.upstep_block(l6, l1, 16, \"layer7\")\n return l7\n\n\n def upstep_block(self, x, d, filters, name):\n with tf.variable_scope(name):\n b1 = tf.layers.conv2d_transpose(x, filters, 2, (2, 2), 'SAME')\n b1 = tf.nn.relu(b1)\n b2 = tf.concat([b1, d], axis=3)\n b3 = tf.layers.conv2d(b2, filters, 3, padding='SAME')\n b3 = tf.nn.relu(b3)\n b4 = tf.layers.conv2d(b3, filters, 3, padding='SAME')\n b4 = tf.nn.relu(b4)\n out = b4 + tf.layers.conv2d_transpose(x, filters, 2, (2, 2), 'SAME')\n return out\n\n def get_loss(self, y_true, y_pred):\n loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(labels=y_true, logits=y_pred))\n return loss\n\n def get_miou(self, y_true, y_pred):\n y_true = tf.squeeze(y_true, [3])\n y_pred = tf.squeeze(y_pred, [3])\n # shape = y_true.get_shape().as_list() + [2]\n # y = tf.zeros(tf.shape(y_true)+[2])\n # y_ = tf.zeros(tf.shape(y_true)+[2])\n y_true_mask = tf.greater_equal(y_true, 0.5)\n y_pred_mask = tf.greater_equal(y_pred, 0.5)\n ones = tf.ones_like(y_true)\n zeros = tf.zeros_like(y_pred)\n\n def get_label(mask):\n u = tf.where(tf.logical_not(mask), ones, zeros)\n m = tf.where(mask, ones, zeros)\n return tf.concat([tf.expand_dims(u, axis=3), tf.expand_dims(m, axis=3)], axis=3)\n\n y = get_label(y_true_mask)\n y_ = get_label(y_pred_mask)\n\n scores = []\n for threshold in np.arange(0.5, 1.0, 0.05):\n precisions = []\n for j in range(2):\n tp = tf.reduce_sum(y[:, :, :, j] * y_[:, :, :, j], [1, 2])\n fp = tf.reduce_sum((1 - y[:, :, :, j]) * y_[:, :, :, j], [1, 2])\n fn = tf.reduce_sum(y[:, :, :, j] * (1 - y_[:, :, :, j]), [1, 2])\n prec = tf.divide(tf.to_float(tp), tf.to_float(tp + fp + fn) + 1e-20)\n\n precisions.append(tf.reshape(prec, [-1, 1]))\n score = tf.concat(precisions, axis=1)\n score = tf.greater_equal(score, threshold)\n\n scores.append(tf.to_float(score))\n\n miou = tf.reduce_mean(tf.stack(scores), axis=1)\n miou = tf.reduce_mean(miou)\n return miou","sub_path":"CRUnet.py","file_name":"CRUnet.py","file_ext":"py","file_size_in_byte":4395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"97753948","text":"#!/usr/bin/env python\n\n\"\"\" ur5_calib.py\n Script used to moving the robot to calibrate wrt to realsense camera.\n This assumes that we have already launched the ur5 bringup node\n author: Michael Andres Lin (michaelv03@gmail.com)\n date: 10/31/2019\n\"\"\"\n\nimport os\nimport sys\nimport time\nimport rospy\nimport numpy as np\nimport copy\nfrom std_msgs.msg import String, Int16\nfrom futek_data_logger.msg import z_pos\nfrom tf.transformations import euler_from_quaternion, quaternion_from_euler, quaternion_multiply\nfrom ur5_interface import UR5Interface\n\n### Global definitions\nINTER_COMMAND_DELAY = 4\n\nHOME = 0\nSTART = 1\nMOVE_1 = 2\nMOVE_2 = 3\nMOVE_3 = 4\nMOVE_4 = 5\nMOVE_5 = 6\nMOVE_6 = 7\nMOVE_7 = 8\nMOVE_8 = 9\nMOVE_9 = 10\nMOVE_10 = 11\nMOVE_11 = 12\nMOVE_12 = 13\n\nstate = HOME\nmove_completed = 0\n\npose_x, pose_y, pose_z = 0.0, 0.0, 0.0\neul_1, eul_2, eul_3 = 0.0, 0.0, 0.0\n\n### end global definitions\n\n# Callback functions \ndef state_callback(data):\n incomingString = str(data.data)\n global state, move_completed\n# print(\"Incoming command: \" + incomingString)\n\n if (incomingString == \"home_arm\"):\n state = HOME\n move_completed = 0\n \n elif (incomingString == \"start\"):\n state = START\n move_completed = 0\n\n\n \n elif (incomingString == \"move_1\"):\n state = MOVE_1\n move_completed = 0\n \n elif (incomingString == \"move_2\"):\n state = MOVE_2\n move_completed = 0\n\n elif (incomingString == \"move_3\"):\n state = MOVE_3\n move_completed = 0\n\n elif (incomingString == \"move_4\"):\n state = MOVE_4\n move_completed = 0\n\n elif (incomingString == \"move_5\"):\n state = MOVE_5\n move_completed = 0\n\n elif (incomingString == \"move_6\"):\n state = MOVE_6\n move_completed = 0\n\n elif (incomingString == \"move_7\"):\n state = MOVE_7\n move_completed = 0\n\n elif (incomingString == \"move_8\"):\n state = MOVE_8\n move_completed = 0\n\ndef set_quaternion(pose, quaternion):\n pose.orientation.x = quaternion[0]\n pose.orientation.y = quaternion[1]\n pose.orientation.z = quaternion[2]\n pose.orientation.w = quaternion[3]\n return pose\n\ndef get_quaternion(pose):\n quat = np.zeros(4)\n quat[0] = pose.orientation.x\n quat[1] = pose.orientation.y\n quat[2] = pose.orientation.z\n quat[3] = pose.orientation.w\n return quat\n\ndef relative_pose(old_pose, x_dot, y_dot, z_dot, eul_1, eul_2, eul_3):\n new_pose = copy.deepcopy(old_pose)\n delta_quat = quaternion_from_euler(eul_1, eul_2, eul_3)\n og_quat = get_quaternion(old_pose)\n new_quat = quaternion_multiply(delta_quat, og_quat)\n new_pose = set_quaternion(new_pose, new_quat)\n new_pose.position.x += x_dot\n new_pose.position.y += y_dot\n new_pose.position.z += z_dot\n return new_pose\n\n\ndef pinch_test_arm_control():\n \"\"\"\n Function to demonstrate moving the ur5 to home pose\n \"\"\"\n # Initialize the ros node\n rospy.init_node(\"pinch_test_arm_control\", anonymous=True, disable_signals=True)\n \n # Setup subscription to cmd_motor_controller topic\n rospy.Subscriber(\"master_state\", String, state_callback)\n \n # Publish just z position for plotting later\n z_pos_pub = rospy.Publisher('ur5_position', z_pos, queue_size=10)\n\n # Instantiate the UR5 interface.\n ur5 = UR5Interface()\n \n ur5.goto_home_down()\n home_pose = ur5.get_pose()\n move_completed = 1\n \n while not rospy.is_shutdown(): \n \n global move_completed\n \n if (move_completed == 0):\n \n if (state == HOME): \n print(\"Moving to home position\")\n ur5.set_speed(.1)\n ur5.goto_home_down()\n home_pose = ur5.get_pose()\n move_completed = 1\n \n elif (state == START):\n print(\"Moving to start position\")\n start_pose = copy.deepcopy(home_pose)\n ur5.set_speed(.1)\n start_pose = relative_pose(home_pose, 0, 0, -0.13, 0, 0, 0)\n ur5.goto_pose_target(start_pose, wait = False)\n move_completed = 1\n \n elif (state == MOVE_1):\n print(\"Performing first movement\")\n ur5.set_speed(.05)\n pose_1 = relative_pose(start_pose, 0, 0, 0.06, 0, 0, 0)\n ur5.goto_pose_target(pose_1, wait = False)\n move_completed = 1\n \n elif (state == MOVE_2):\n print(\"Performing second movement\")\n pose_2 = relative_pose(pose_1, 0, -.047, 0, 0,0,0)\n ur5.goto_pose_target(pose_2, wait = False)\n move_completed = 1\n\n elif (state == MOVE_3):\n print(\"Performing third movement\")\n ur5.set_speed(.15)\n pose_3 = relative_pose(pose_2, 0,0,-0.06,0,0,0)\n ur5.goto_pose_target(pose_3, wait = False)\n move_completed = 1\n\n elif (state == MOVE_4):\n print(\"Performing fourth movement\")\n ur5.set_speed(.15)\n pose_4 = relative_pose(pose_3, 0, 0, 0.1, 0, 0, 0)\n ur5.goto_pose_target(pose_4, wait=False)\n move_completed = 1\n\n elif (state == MOVE_5):\n print(\"Performing fifth movement\")\n ur5.set_speed(.05)\n pose_4 = relative_pose(pose_4, 0, 0, 0, 0, 0, -3.14159/2)\n ur5.goto_pose_target(pose_4, wait=False)\n move_completed = 1\n\n elif (state == MOVE_6):\n print(\"Performing sixth movement\")\n move_completed = 1\n\n elif (state == MOVE_7):\n print(\"Performing seventh movement\")\n move_completed = 1\n\n elif (state == MOVE_8):\n print(\"Performing eighth movement\")\n move_completed = 1\n\n\nif __name__ == '__main__': \n pinch_test_arm_control()\n","sub_path":"src/ur5_manip/src/qual_testing_2.py","file_name":"qual_testing_2.py","file_ext":"py","file_size_in_byte":6022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"150232352","text":"import numpy as np\n\nfrom finmag import Simulation as Sim\nfrom finmag.energies import Exchange, Demag, DMI, Zeeman\nfrom finmag.energies import UniaxialAnisotropy\nfrom finmag.util.consts import mu0\n\nimport os, shutil\nimport dolfin as df\n\n# Geometries\nimport mshr\nfrom finmag.util.mesh_templates import Nanodisk\n\n\n# MESH ------------------------------------------------------------------------\n\nmesh_file = 'mesh/nanodisk.xml.gz'\nmesh = mshr.Circle(df.Point(0, 0), 50)\nmesh = mshr.generate_mesh(mesh, 40)\nmesh = df.Mesh(mesh)\n\n\n# Simulation and energies -----------------------------------------------------\n\n# Bulk\nA = 13e-12\nD = 3e-3\nMs = 0.86e6\nKu = 0.4e6\n\nsim = Sim(mesh, Ms=Ms, unit_length=1e-9, name=\"nanodisk_T\")\n\n\ndef m_init(pos):\n x, y = np.array(pos)[:2]\n if (x ** 2 + y ** 2) ** 0.5 < 25:\n return (0, 0.1, 1)\n else:\n return (0, 0.1, -1)\n\nsim.set_m(m_init)\n\n# -----------------------------------------------------------------------------\n\n# Exchange Energy\nsim.add(Exchange(A))\n# DMI\nsim.add(DMI(D, dmi_type='bulk'))\n# Uniaxial Anisotropy\nsim.add(UniaxialAnisotropy(Ku, (0, 0, 1), name='Ku'))\n\n# -----------------------------------------------------------------------------\n\n# sim.set_tol(1e-8, 1e-8)\nsim.llg.presession = False\nsim.alpha = 0.9\n\nif not os.path.exists('vtks'):\n # shutil.rmtree('vtks')\n os.mkdir('vtks')\n\nsim.relax(stopping_dmdt=0.01)\nsim.save_vtk(filename='vtks/nanodisk_T.pvd', overwrite=True)\nsim.save_field('m', 'nanodisk_T.npy', overwrite=True)\n\n# Extract data\nr_diam = np.linspace(-49.9, 49.9, 100)\ndata = np.zeros((len(r_diam), 4))\n\nfor i, r in enumerate(r_diam):\n data[i][0] = r\n data[i][1:] = sim.m_field.f((r, 0))\n\nnp.savetxt('nanodisk_T_skyrmion.dat', data)\n\n# Skyrmion radius\nimport scipy.optimize\nr_sk = scipy.optimize.brentq(lambda r: sim.m_field.f((r, 0))[2], 0, 49)\nprint(r_sk)\n\n# Extract the radial component at r=r_sk\nphi_ring = np.linspace(0, 2 * np.pi, 100)\ndata_mr = np.zeros((len(phi_ring), 2))\nfor i, phi in enumerate(phi_ring):\n x = r_sk * np.cos(phi)\n y = r_sk * np.sin(phi)\n\n mx, my, mz = sim.m_field.f((x, y))\n\n mr = mx * np.cos(phi) + my * np.sin(phi)\n\n data_mr[i][0] = phi\n data_mr[i][1] = mr\n\nnp.savetxt('nanodisk_T_phi_mr_rsk.dat', data_mr)\n","sub_path":"sims/finmag/2D/nanodisk_T.py","file_name":"nanodisk_T.py","file_ext":"py","file_size_in_byte":2247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"513336448","text":"# -*- coding: utf-8 -*-\n\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.contrib.admin.utils import unquote\nfrom django.core.urlresolvers import reverse\nfrom django.http import JsonResponse\n\nfrom import_export.admin import ExportMixin\n\nfrom blanc_pages import block_admin\n\nfrom addresses.admin import AddressInline\nfrom notes.admin import NoteInline\n\nfrom . import choices as people_choices\nfrom .blocks.forms import NominateFormBlock\nfrom .models import Person, Nominator, Nominee\n\n\n@admin.register(Person)\nclass PersonAdmin(ExportMixin, admin.ModelAdmin):\n inlines = [\n AddressInline, NoteInline\n ]\n date_hierarchy = 'created_at'\n search_fields = (\n 'first_name', 'last_name', 'life__first_name', 'life__last_name', 'email',\n 'home_phone', 'mobile_phone',\n )\n list_display = (\n 'first_name', 'last_name', 'reason', 'life', 'email', 'home_phone',\n 'mobile_phone', 'hear_about_us', 'updated_at', 'created_at',\n )\n list_filter = ('reason',)\n fieldsets = (\n (\n 'Person', {\n 'fields': (\n 'first_name', 'last_name', 'life', 'reason', 'message',\n 'hear_about_us',\n )\n }\n ),\n (\n 'Contacts', {\n 'classes': ('collapse',),\n 'fields': (\n 'email', 'home_phone', 'mobile_phone',\n )\n }\n ),\n (\n 'Meta data', {\n 'classes': ('collapse',),\n 'fields': (\n 'id', 'created_at', 'updated_at',\n )\n }\n ),\n )\n raw_id_fields = ('life',)\n\n readonly_fields = ('id', 'created_at', 'updated_at', 'is_agreed',)\n\n class Media:\n js = ('js/admin/addresses/addresses.js',)\n\n def get_queryset(self, request):\n qs = super(PersonAdmin, self).get_queryset(request)\n return qs.exclude(reason=people_choices.REASON_TYPE_WOULD_LIKE_TO_NOMINATE)\n\n def get_urls(self):\n urls = super(PersonAdmin, self).get_urls()\n extra_urls = [\n url(\n r'^(\\d+)/add-note/$',\n self.add_note,\n name='persons-add-note'\n ),\n url(\n r'^(\\d+)/add-address/$',\n self.add_address,\n name='persons-add-address'\n )\n ]\n return extra_urls + urls\n\n def get_inline_instances(self, request, obj=None):\n \"\"\" Update post url for AddressInline and NotesInline. \"\"\"\n inline_instances = super(PersonAdmin, self).get_inline_instances(request, obj)\n if obj is not None:\n for inline in inline_instances:\n if isinstance(inline, AddressInline):\n inline.dialog_data['post_url'] = reverse(\n 'admin:persons-add-address', args=(obj.id,)\n )\n elif isinstance(inline, NoteInline):\n inline.dialog_data['post_url'] = reverse(\n 'admin:persons-add-note', args=(obj.id,)\n )\n return inline_instances\n\n def get_formsets_with_inlines(self, request, obj=None):\n if obj is None:\n return []\n return super(PersonAdmin, self).get_formsets_with_inlines(request, obj)\n\n def add_note(self, request, object_id):\n \"\"\" Add note for life. \"\"\"\n context = {}\n obj = self.get_object(request, unquote(object_id))\n context = NoteInline.create_note(\n request, obj\n )\n return JsonResponse(context)\n\n def add_address(self, request, object_id):\n \"\"\" Add address for person. \"\"\"\n obj = self.get_object(request, unquote(object_id))\n context = AddressInline.create_address(\n request, obj\n )\n\n return JsonResponse(context)\n\n\nclass NomineeInline(admin.StackedInline):\n model = Nominee\n max_num = 1\n readonly_fields = (\n 'first_name', 'country', 'relation', 'why_help', 'what_need'\n )\n\n\n@admin.register(Nominator)\nclass NominatorAdmin(PersonAdmin):\n list_filter = ()\n inlines = [\n NomineeInline, NoteInline\n ]\n fieldsets = (\n (\n 'Person', {\n 'fields': (\n 'first_name', 'last_name', 'life', 'message',\n 'hear_about_us', 'is_agreed',\n )\n }\n ),\n (\n 'Contacts', {\n 'classes': ('collapse',),\n 'fields': (\n 'email', 'home_phone', 'mobile_phone',\n )\n }\n ),\n (\n 'Meta data', {\n 'classes': ('collapse',),\n 'fields': (\n 'id', 'created_at', 'updated_at',\n )\n }\n ),\n )\n\n def get_queryset(self, request):\n qs = super(PersonAdmin, self).get_queryset(request)\n return qs.filter(reason=people_choices.REASON_TYPE_WOULD_LIKE_TO_NOMINATE)\n\n\nblock_admin.site.register((NominateFormBlock))\nblock_admin.site.register_block(NominateFormBlock, 'Forms')\n","sub_path":"apps/people/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5185,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"347016127","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nimport torch.utils.data as data\nfrom torchvision.models import resnet18\nfrom utils.dataload import NLVRDataset\nimport os\nimport sys\nfrom cnn_lstm import train,validate,CELoss\nsys.path.insert(0, os.getcwd())\n\nclass BasicCNN(nn.Module):\n \n def __init__(self, pretrained_path=None):\n super(BasicCNN, self).__init__()\n self.conv1 = nn.Conv2d(9,12,kernel_size=3,stride=2)\n self.bn1 = nn.BatchNorm2d(12)\n self.conv2 = nn.Conv2d(12,12,kernel_size=3,stride=2)\n self.bn2 = nn.BatchNorm2d(12)\n self.conv3 = nn.Conv2d(12,12,kernel_size=3,stride=2)\n self.bn3 = nn.BatchNorm2d(12)\n self.avgpool = nn.AdaptiveAvgPool2d((4,4))\n self.filters = 24\n self.entities = 3*3\n# self.fc = nn.Linear(2048,1)\n \n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = F.relu(x)\n x = self.conv3(x)\n x = self.avgpool(x)\n return x\n\ndef build_mlp(input_dim,layers):\n mlp = nn.ModuleList()\n for li,lj in zip([input_dim]+layers[:-1],layers[:-1]):\n mlp.append(nn.Linear(li,lj))\n return mlp\n\nclass MLP(nn.Module):\n\n def __init__(self,input_dim,layers):\n super(MLP, self).__init__()\n self.mlp = build_mlp(input_dim,layers)\n self.input_dim = input_dim\n self.output_dim = layers[-1]\n\n def forward(self,x):\n for l in self.mlp[:-1]:\n x = l(x)\n x = F.relu(x)\n return self.mlp[-1](x)\n \nclass ImageLSTM(nn.Module):\n \n def __init__(self, wv_dim, wv_outdim, wv_pretrained_path, f_layers=[256,1], g_layers=[128], cnn_pretrained_path=None):\n super(RN_LSTM, self).__init__()\n self.cnn = BasicCNN(cnn_pretrained_path)\n self.improj = nn.Linear(self.cnn.entities*self.cnn.filters,wv_dim)\n self.lstm = nn.LSTM(2*wv_dim,wv_outdim)\n self.wv_dim = wv_dim\n self.wv_outdim = wv_outdim\n self.embedding = nn.Embedding(230,wv_dim)\n self.embedding = self.embedding.from_pretrained(torch.FloatTensor(np.load(wv_pretrained_path)))\n self.embedding.weight.requires_grad = False\n self.linear = nn.Linear(self.wv_outdim,1)\n \n def forward(self,img,sentence,idx):\n batch_size = img.shape(0)\n x = self.cnn(img)\n x = x.reshape([-1,self.cnn.entities*self.cnn.filters])\n x = self.improj(x)\n x = x.reshape([-1,1,wv_dim])\n h0,s0 = torch.zeros(batch_size,2*wv_outdim),torch.zeros(batch_size,2*wv_outdim)\n s = self.embedding(sentence)\n x = x.repeat(1,s.size(1),1)\n s = torch.cat((s,x),dim=-1)\n s,(h,c) = self.lstm(s,(h0,s0))\n s_stack = []\n for i in range(s.size(0)):\n s_stack.append(s[i][idx[i]-1])\n# s_stack.append(torch.mean(s[i][:idx[i]],dim=0))\n x = torch.stack(s_stack)\n \n return F.sigmoid(self.linear(x))\n\ndataset = NLVRDataset('train','data.txt','wordidx.txt')\n# train/val\ntrain_idx = []\nval_idx = []\nfor i in range(len(dataset)):\n k = np.random.binomial(1,0.9)\n if k == 1:\n train_idx.append(i)\n else:\n val_idx.append(i)\ntrainloader = data.DataLoader(dataset,batch_size=32,num_workers=8,sampler=data.sampler.SubsetRandomSampler(train_idx))\nvalloader = data.DataLoader(dataset,batch_size=32,num_workers=8,sampler=data.sampler.SubsetRandomSampler(val_idx))\nmodel = ImageLSTM(wv_dim=100,wv_outdim=1024,wv_pretrained_path='train/vocab.npy')\nmodel = nn.DataParallel(model).cuda()\ncriterion = CELoss()\ncriterion.cuda()\noptimizer = optim.Adam(model.parameters(),lr=2e-3,weight_decay=100.0)\nn_epochs = 300\nbest_val = 0.0\nbest_val_c = 0\nfor epoch_n in range(n_epochs):\n print(\"############ Epoch [\"+str(epoch_n)+\"] ############\")\n train(trainloader,model,criterion,optimizer,epoch_n)\n val_score = validate(valloader,model,criterion,epoch_n)\n if val_score > best_val:\n best_val = val_score\n best_val_c = epoch_n\n print(\"[VALIDATE BEST] Epoch [\"+str(epoch_n)+\"] : Best Epoch = \"+str(best_val_c)+\", Best Accuracy = \"+str(best_val))\n","sub_path":"nlvr/lstm.py","file_name":"lstm.py","file_ext":"py","file_size_in_byte":4302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"545015348","text":"from __future__ import division\nimport cv2\nimport numpy as np\nimport logging\nimport math\nimport time\nfrom collections import deque\n\nfrom constants import *\n\n\nH = np.array([[ -1.07693537e+01, -1.03211106e+00, 4.41554723e+03],\n [ -5.61158586e-02, 7.60254152e-01, -7.36972948e+03],\n [ -3.59519827e-04, -5.10817084e-02, 1.00000000e+00]])\n \n\n\nclass droidVision():\n\n def __init__(self):\n \n self.dataAvailable = 0\n self.failedFrame = 0\n self.histVPHeading = deque([0],3)\n self.histLeftOffset = deque([0],3)\n self.histRightOffset = deque([0],3)\n self.histObDist = deque([0],3)\n self.obMissing = 0\n self.obstacle = False\n\n self.frameNo = 0\n self.kernel = np.ones((5,5),np.uint8)\n\n self.vpX = 0\n self.vpY = 0\n self.leftOffset = 0\n self.rightOffset = 0\n\n self.clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8,8))\n self.threshYellow = 160\n self.threshBlue = 110\n self.threshPurple = 160\n \n self.cannyLow = 30\n self.cannyHigh = 80\n \n self.thetaThresh = np.pi/180 *1.5\n self.rhoThresh = 80\n self.minLineLength = 100 \n self.maxLineGap = 30 \n self.centreY = DEFAULT_CAM_H # Result of calibration\n self.canny = np.empty((DEFAULT_CAM_H,DEFAULT_CAM_W,3))\n self.frame_edited = np.empty((DEFAULT_CAM_H,DEFAULT_CAM_W,3))\n\n # Main method\n def processFrame(self, frame):\n \n centreX = np.round(frame.shape[1]/2)\n clA,clB = self.raw2Frame(frame)\n \n purple, yellow, blue = self.thresholdFrame(clA, clB)\n self.yellow = yellow *255\n self.blue = blue *255\n self.purple = purple *255\n \n yM,yEdgeCrossing,yMeanPoint = self.detectLine(yellow)\n bM,bEdgeCrossing,bMeanPoint = self.detectLine(blue)\n self.vanishingPoint(yM,yEdgeCrossing,bM,bEdgeCrossing)\n self.robotHeading(yM,yMeanPoint,bM,bMeanPoint)\n \n goalHeading = np.nanmean(self.histVPHeading)\n trackLeftOffset = np.nanmean(self.histLeftOffset)\n trackRightOffset = np.nanmean(self.histRightOffset)\n obstacleDist = np.nanmean(self.histObDist)\n \n \n try:\n cv2.line(self.frame_edited,(int(bEdgeCrossing),int(self.centreY)),(int(self.vpX),int(self.vpY)),(0,255,0),2)\n cv2.line(self.frame_edited,(int(yEdgeCrossing),int(self.centreY)),(int(self.vpX),int(self.vpY)),(0,255,0),2)\n # Show direction to vanishing point\n cv2.line(self.frame_edited,(int(centreX),int(self.centreY)),(int(self.vpX),int(self.vpY)),(0,0,255),2)\n except:\n pass #print('cant show lines') \n # Draw outputs\n try:\n cv2.line(self.frame_edited,(0,bottomEdge[1]),(width,bottomEdge[1]),(0,255,0),2)\n cv2.circle(self.frame_edited, center, 5, (0,0,255), -1)\n except:\n logging.debug('no object rect')\n \n \n return goalHeading, trackLeftOffset, trackRightOffset, self.obstacle, obstacleDist\n \n \n def raw2Frame(self,frame):\n \n if frame is not None:\n # Create frames for processing\n# frameLAB = cv2.cvtColor(frame,cv2.COLOR_BGR2LAB) \n frameHSV = cv2.cvtColor(frame,cv2.COLOR_BGR2LAB) \n\n self.frame_edited = np.copy(frame)\n# l,a,b = cv2.split(frameLAB)\n l,a,b = cv2.split(frameHSV)\n\n# clA = self.clahe.apply(a) # histogram adjustment\n# clB = self.clahe.apply(b) # histogram adjustment\n \n return a,b\n else:\n self.failedFrame +=1\n \n def BGR2invariant(self,frame,alpha):\n #G - B - R BGR\n invariant = 0.5 + np.log(frame[:,:,1]) - alpha * np.log(frame[:,:,2]) - (1-alpha) * np.log(frame[:,:,0])\n return invariant\n \n \n \n def thresholdFrame(self, clA, clB):\n thresh,purple = cv2.threshold(clA,self.threshPurple,1,cv2.THRESH_BINARY)\n retY,yellow = cv2.threshold(clB,self.threshYellow,1,cv2.THRESH_BINARY)\n retB,blue = cv2.threshold(clB,self.threshBlue,1,cv2.THRESH_BINARY)\n blue = cv2.bitwise_not(blue)-254 # invert blue line\n \n# ret2,th2 = cv2.threshold(img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\n\n return purple, yellow, blue\n \n def detectLine(self,channel):\n channel = cv2.Canny(channel, self.cannyLow, self.cannyHigh,5)\n# channel = cv2.morphologyEx(channel, cv2.MORPH_CLOSE, self.kernel)\n\n lines = np.squeeze(cv2.HoughLinesP(channel,cv2.HOUGH_PROBABILISTIC,self.thetaThresh,self.rhoThresh,self.minLineLength,self.maxLineGap))#detect lines\n self.canny = channel\n if lines.ndim >= 2:\n grad = (lines[:,0]-lines[:,2])/(lines[:,1]-lines[:,3]+0.0001)# find gradient of lines\n filt = rejectOutliers(grad, m=5)\n M = np.median(filt)\n #find intersection point with baseline centreY, using gradient and mean point\n meanPoint = np.sum((lines[:,0] + lines[:,2])/(2*lines.shape[0])),np.sum((lines[:,1] + lines[:,3])/(2*lines.shape[0]))\n EdgeCrossing = meanPoint[0] + M * (self.centreY - meanPoint[1])\n #crossingPoint = [pointX,pointY]\n \n for x1,y1,x2,y2 in lines:\n cv2.line(self.frame_edited,(x1,y1),(x2,y2),(0,255,0),1)\n else:\n# print('VS209: HoughLines not found')\n M = None\n EdgeCrossing = None\n meanPoint = None\n \n \n return M,EdgeCrossing,meanPoint\n \n \n def vanishingPoint(self,yM,yEdgeCrossing,bM,bEdgeCrossing):\n # Conditional to create vP or virtual vP\n # Both lines visible\n if bM != None and yM != None:\n \n self.vpY = (yEdgeCrossing - bEdgeCrossing)/(bM - yM) + self.centreY\n self.vpX = bM * (self.vpY - self.centreY) + bEdgeCrossing\n self.dataAvailable = 1\n \n # Only Blue line visible, set VP to be far away and extend visible line\n elif bM != None and yM == None:\n self.vpY = -10000 - self.centreY\n self.vpX = bM * self.vpY + bEdgeCrossing\n self.dataAvailable = 1\n \n # Only Yellow line visible\n elif yM != None and bM == None:\n self.vpY = -10000 - self.centreY\n self.vpX = yM * self.vpY + yEdgeCrossing\n self.dataAvailable = 1\n \n # No lines visible \n else:\n self.dataAvailable = 0\n # Temporary only, use potential field system \n def robotHeading(self,yM,yMeanPoint,bM,bMeanPoint):\n if self.dataAvailable: \n # Using Homography to compute heading angle\n realCoords = robotFrame([self.vpX,self.vpY],H)\n Heading = math.atan2(-realCoords[1], -realCoords[0])\n # logging.debug(\"Heading: %.2f\", heading_deg)\n self.histVPHeading.append(Heading)\n \n if bM != None:\n leftOffset = findTrackOffset(bMeanPoint, realCoords)\n self.histLeftOffset.append(leftOffset)\n\n if yM != None:\n rightOffset = findTrackOffset(yMeanPoint, realCoords)\n self.histRightOffset.append(rightOffset)\n \n else:\n self.failedFrame += 1\n # If lines are not detected for N frames, remove history\n if self.failedFrame >= 3:\n self.histVPHeading = deque([0],3)\n self.histLeftOffset = deque([0],3)\n self.histRightOffset = deque([0],3)\n # logging.debug('10 failed frames')\n \n if self.obstacle:\n self.obMissing = 0\n self.histObDist.append(obDistance)\n \n else:\n self.obMissing +=1\n \n if self.obMissing >= 3:\n self.histObDist = deque([0],3)\n # logging.debug('Lost the obstacle')\n \n\n \n def detectObjects(self,purple): \n # process purple objects\n purple = cv2.morphologyEx(purple, cv2.MORPH_OPEN, self.kernel)\n __, contours, __ = cv2.findContours(purple,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\n \n if contours:\n \n # find centroid of largest blob\n blob = max(contours, key=lambda el: cv2.contourArea(el))\n M = cv2.moments(blob)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n \n # Find edges of obstacle\n# leftEdge = tuple(blob[blob[:,:,0].argmin()][0])\n# rightEdge = tuple(blob[blob[:,:,0].argmax()][0])\n# topEdge = tuple(blob[blob[:,:,1].argmin()][0])\n bottomEdge = tuple(blob[blob[:,:,1].argmax()][0])\n # Calculate distance to object\n \n obDistance = objectDistance(DEFAULT_CAM_H, DEFAULT_CAM_TILT, DEFAULT_CAM_HEIGHT, bottomEdge)\n \n self.obstacle = True\n else:\n self.obstacle = False\n \n\n \ndef rejectOutliers(data, m = 10.):\n d = np.abs(data - np.median(data))\n mdev = np.median(d)\n s = d/(mdev if mdev else 1.)\n return data[s My name
    is Harshit ')\n\ndef test3(requset):\n\n\ta = 20\n\tb = 7\n\tc= a+b \n\td= a-b \n\te= a*b \n\tf= a/b \n\tg= a%b \n\tcal = [c,' ',d,' ',e,' ',f,' ',g]\n\treturn HttpResponse(cal)\n\ndef test4(requset):\n\n\tnum = 25;\n\n\tif num%2==0:\n\t\treturn HttpResponse('even no')\n\n\telse:\n\t\treturn HttpResponse('odd no')\n\ndef test5(requset):\n\n\treturn render(requset,'test.html')\n\ndef test6(requset):\n\n\tif requset.method == \"GET\":\n\n\t\tnum = int(requset.GET[\"num\"])\n\n\t\tif num%2==0:\n\t\t\treturn HttpResponse('even no')\n\n\t\telse:\n\t\t\treturn HttpResponse('odd no')\n\ndef test7(requset):\n\n\treturn render(requset,'cal.html')\n\ndef cal(requset):\n\n\tif requset.method == \"GET\":\n\n\t\tnum1 = requset.GET['num1']\n\t\tnum2 = requset.GET['num2']\n\t\ta = int(num1)\n\t\tb = int(num2)\n\t\tadd = requset.GET.get('add')\n\t\tsub = requset.GET.get('sub')\n\t\tmul = requset.GET.get('mul')\n\t\tdiv = requset.GET.get('div')\n\t\trem = requset.GET.get('rem')\n\n\t\tif add:\t\n\t\t\tv1 = a+b\n\t\t\treturn render(requset,'cal.html',{'data':v1})\n\t\t\t#return HttpResponse(a+b)\n\t\telif sub:\n\t\t\tv2 = a-b\n\t\t\treturn render(requset,'cal.html',{'data':v2})\n\t\t\t#return HttpResponse(a-b)\n\t\telif mul:\n\t\t\tv3 = a*b\n\t\t\treturn render(requset,'cal.html',{'data':v3})\n\t\t\t#return HttpResponse(a*b)\n\t\telif div:\n\t\t\tv4 = a/b\n\t\t\treturn render(requset,'cal.html',{'data':v4})\n\t\t\t#return HttpResponse(a/b)\n\t\telif rem:\n\t\t\tv5 = a%b\n\t\t\treturn render(requset,'cal.html',{'data':v5})\n\t\t\t#return HttpResponse(a%b)\n\n\n\n","sub_path":"demo/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"455204678","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport math\n\nn = input(\"Digite o valor de n: \")\n\ni = 1\nj = i**2\n\ncont = 1\n\nwhile cont<=n:\n r = -(i / j)\n \n i = i + 1\n \n print (i)\n \n ","sub_path":"moodledata/vpl_data/28/usersdata/64/7895/submittedfiles/serie1.py","file_name":"serie1.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"563087342","text":"class Account:\n\n def __init__(self,owner,balance):\n self.balance = balance\n self.owner = owner\n \n def __str__(self):\n return \"{} balance is ${}\".format(self.owner, self.balance)\n \n def deposit(self,amount):\n self.balance += amount \n tot = self.balance\n return 'your total balance = {}'.format(tot)\n \n def withdraw(self,amount):\n self.balance -= amount\n withdraw = amount\n tot = self.balance\n if self.balance >= 0:\n return 'withdraw of {} accepted, your current balance is {}'.format(amount,tot)\n return 'Poor fucker, your current balance is {}'.format(tot)\n\n# 1. Instantiate the class\nacct1 = Account('Dennis',500)\n\n# 2. Print the object\nprint(acct1)\n\n# 3. Show the account owner attribute\nprint(acct1.owner)\n\nprint(acct1.balance)\n\nprint(acct1.deposit(150))\n\nprint(acct1.withdraw(60))\n\nprint(acct1.withdraw(500))","sub_path":"bankaccount.py","file_name":"bankaccount.py","file_ext":"py","file_size_in_byte":925,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"630096878","text":"# -*- coding: utf-8 -*-\nimport sys\nimport time\nimport traceback\nimport functools\nimport threading\nimport platform\nimport multiprocessing\nimport contextlib\n\nif sys.version_info > (3, 2):\n from contextlib import ContextDecorator\nelse:\n try:\n from contextlib2 import ContextDecorator\n except ImportError:\n class ContextDecorator(object):\n def __call__(self, fn):\n @functools.wraps(fn)\n def decorator(*args, **kw):\n with self:\n return fn(*args, **kw)\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, tb):\n # Do whatever cleanup.\n if any((type, value, tb)):\n raise (type, value, tb)\n\nfrom yurlungur.tool.meta import meta\nfrom yurlungur.core import env, logger\n\n# assign UndoGroup\nif env.UE4():\n UndoGroup = meta.ScopedEditorTransaction\n\nelif env.Houdini():\n UndoGroup = meta.undos.group\n\nelif env.Substance():\n UndoGroup = meta.sd.UndoGroup\n\nelif env.Max():\n UndoGroup = functools.partial(meta.undo, True)\n\nelif env.Nuke():\n UndoGroup = meta.Undo\n\nelse:\n class UndoGroup(ContextDecorator):\n \"\"\"\n undoGroup for with statements.\n .\n >>> import yurlungur\n >>> with yurlungur.UndoGroup(\"undo group\"):\n >>> for node in \"hoge\", \"fuga\", \"piyo\":\n >>> yurlungur.YNode(node).delete()\n \"\"\"\n\n def __init__(self, label):\n self.label = label\n\n def __enter__(self):\n if env.Maya():\n meta.undoInfo(ock=1)\n\n elif env.Photoshop():\n self.label = (\n meta.doc.activeHistoryState if Windows()\n else meta.doc.currentHistoryState().get()\n )\n\n elif env.C4D():\n meta.doc.StartUndo()\n\n elif env.Davinci():\n meta.fusion.StartUndo()\n\n elif env.Blender():\n self.label = 0\n\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if env.Maya():\n meta.undoInfo(cck=1)\n\n elif env.Photoshop():\n from yurlungur.adapters import photoshop\n\n if Windows():\n meta.doc.activeHistoryState = self.label\n else:\n meta.doc.currentHistoryState().setTo_(self.label)\n\n photoshop.do(\"undo\")\n\n elif env.C4D():\n meta.doc.EndUndo()\n\n elif env.Davinci():\n meta.fusion.EndUndo()\n\n elif env.Blender():\n meta.ops.ed.undo_history(item=self.label)\n meta.ops.ed.redo()\n\n\ndef cache(func, *args, **kwargs):\n \"\"\"\n Substance, Blender and Davinch use lcu_cache at Python3.\n :param func:\n :param args:\n :param kwargs:\n :return:\n \"\"\"\n saved = {}\n\n @functools.wraps(func)\n def wrapper(*args):\n if args in saved:\n return saved[args]\n result = func(*args)\n saved[args] = result\n return result\n\n return wrapper if sys.version_info < (3, 2) else functools.lcu_cache(*args, **kwargs)\n\n\ndef trace(func):\n \"\"\"\n\n :param func:\n :return:\n \"\"\"\n try:\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except:\n if hasattr(logger.logger, \"warning\"):\n logger.logger.warning(traceback.format_exc())\n else:\n logger.logger.log(traceback.format_exc(), logger.Warning)\n except (NameError, ImportError):\n wrapper = func\n\n return wrapper\n\n\ndef timer(func):\n \"\"\"\n\n :param func:\n :return:\n \"\"\"\n import yurlungur\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n yurlungur.logger.pprint(\n '{0} start'.format(func.__name__)\n )\n start_time = time.clock()\n ret = func(*args, **kwargs)\n end_time = time.clock()\n yurlungur.logger.pprint(\n '\\n{0}: {1:,f}s'.format(\"total: \", (end_time - start_time))\n )\n return ret\n\n return wrapper\n\n\n@contextlib.contextmanager\ndef threads(func):\n \"\"\"\n with statements for threads.\n available for Maya, Houdini, Nuke, 3dsMax, Substance Blender and Cinema 4D\n >>>\n :param func:\n :return:\n \"\"\"\n\n # https://developers.maxon.net/docs/Cinema4DPythonSDK/html/modules/c4d.threading/index.html\n if env.C4D():\n from c4d.threading import C4DThread\n class _Thread(C4DThread):\n def Main(self):\n func()\n\n t = _Thread()\n t.Start()\n # Do some other operations here\n t.Wait(True)\n\n elif env.Substance():\n class _Thread(threading.Thread):\n def run(self):\n func()\n return\n\n t = _Thread()\n t.start()\n\n else:\n t = threading.Thread(target=__worker, args=(func,))\n t.daemon = True\n t.start()\n t.join()\n\n\ndef __worker(func):\n \"\"\"\n thread runner\n :param func:\n :return:\n \"\"\"\n if env.Maya():\n import maya.utils as utils\n utils.executeDeferred(func)\n\n # https://forums.odforce.net/topic/22570-execute-in-main-thread-with-results/\n elif env.Houdini():\n import hdefereval\n\n n = 0\n while n < multiprocessing.cpu_count() + 1:\n hdefereval.executeInMainThreadWithResult(func)\n n += 1\n\n elif env.Nuke():\n meta.executeInMainThreadWithResult(func)\n\n elif env.Max():\n try:\n func.acquire()\n with meta.mxstoken():\n func()\n except:\n raise\n finally:\n if func.locked():\n func.release()\n\n return func\n\n\ndef Windows(func=None):\n if func is None:\n return platform.system() == \"Windows\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if platform.system() == \"Windows\":\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef Linux(func=None):\n if func is None:\n return platform.system() == \"Linux\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if platform.system() == \"Linux\":\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef Mac(func=None):\n if func is None:\n return platform.system() == \"Darwin\"\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n if platform.system() == \"Darwin\":\n return func(*args, **kwargs)\n\n return wrapper\n","sub_path":"yurlungur/core/deco.py","file_name":"deco.py","file_ext":"py","file_size_in_byte":6667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"85008266","text":"from selenium import webdriver\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom bs4 import BeautifulSoup\r\nfrom selenium.webdriver.common.by import By\r\nfrom selenium.webdriver.support.ui import WebDriverWait\r\nfrom selenium.webdriver.support import expected_conditions as EC\r\nfrom selenium.common.exceptions import TimeoutException\r\nfrom selenium.webdriver.support.ui import Select\r\nfrom selenium.webdriver.common.keys import Keys\r\nfrom selenium.webdriver.common.action_chains import ActionChains\r\n\r\nimport time\r\nimport csv\r\nimport datetime\r\n\r\n'''\r\nPurpose:\r\nThis trial2 will implement if elif logic: \r\nif NA, do not move anything from input file. Else do so. THen, Rank up\r\n'''\r\n#click on customer tab\r\n\r\n# customer_tab = browser.find_element_by_xpath('//*[@id=\"ui-id-134\"]')\r\n# customer_tab.click()\r\n\r\n# all_customers_link s= browser.find_element_by_xpath('//*[@id=\"s_5_2_19_0_mb\"]')\r\n# all_customers_link = browser.find_element_by_link_text(\"All Customers\")\r\n# all_customers_link.click()\r\ndef add_sales_team(browser, gsfa_id, sale_id):\r\n time.sleep(15)\r\n #search in customers tab, all customers, by GSFA Customer Id\r\n browser.get(\"http://cometpreprod.dhl.com/sales_apc/start.swe?SWECmd=GotoView&SWEView=All+Account+List+View&SWERF=1&SWEHo=cometpreprod.dhl.com&SWEBU=1\")\r\n time.sleep(15)\r\n print(\"If no loading of page go and seek help\")\r\n textBox = browser.find_element_by_name('s_1_1_1_0')\r\n print(\"Found textBox for GSFA Customer ID\")\r\n textBox.send_keys(\"GSFA Customer Id\")\r\n searchBox = browser.find_element_by_name('s_1_1_2_0')\r\n searchBox.send_keys(gsfa_id)\r\n browser.find_element_by_xpath('//*[@id=\"s_1_1_0_0_Ctrl\"]').click()\r\n time.sleep(3)\r\n print(\"Slept for 3 seconds\")\r\n browser.find_element_by_id('1Name').click()\r\n print(\"Link should bring up\")\r\n time.sleep(3)\r\n browser.find_element_by_id('s_1_1_139_0_icon').click()\r\n time.sleep(3)\r\n print(\"Bring up the popup\")\r\n #Popup has now been opened, proceed to input data\r\n time.sleep(3)\r\n print(\"Fnd the search box\")\r\n searchBox2 = browser.find_element_by_name('s_4_1_169_0')\r\n searchBox2.clear()\r\n searchBox2.send_keys('Position')\r\n #Search Sales Staff by user ID\r\n searchBox3 = browser.find_element_by_name('s_4_1_170_0')\r\n searchBox3.send_keys(sale_id)\r\n arrow_enter = browser.find_element_by_name('s_4_1_171_0')\r\n arrow_enter.click()\r\n time.sleep(1)\r\n print(\"Step 11, staff moves to the right\")\r\n browser.find_element_by_name('s_3_1_169_0').click()\r\n time.sleep(1)\r\n browser.find_element_by_name('s_3_1_173_0').click()\r\n return\r\n\r\n# time.sleep(30)\r\n# print(\"First block of time is done\")\r\n# browser.get(\"http://cometpreprod.dhl.com/sales_apc/start.swe?SWECmd=GotoView&SWEView=Account+Screen+Homepage+View&SWERF=1&SWEHo=cometpreprod.dhl.com&SWEBU=1\")\r\n# print(\"Search for element CUstomers Done\")\r\ndef add_comments_to_activities(browser, comments):\r\n time.sleep(5)\r\n activities_tab = browser.find_element_by_xpath('(//*[contains(text(),\"Activities\")])[2]').click()\r\n # action = ActionChains(driver)\r\n # action.move_to_element(activities_tab).perform()\r\n print('if click is successful')\r\n # Comment selection. Not to be done until given dummy account. \r\n # browser.get('http://cometpreprod.dhl.com/sales_apc/start.swe?SWECmd=GotoView&SWEView=Account+Detail+-+Activities+View&SWERF=1&SWEHo=cometpreprod.dhl.com&SWEBU=1')\r\n time.sleep(3)\r\n browser.find_element_by_id('s_1_1_10_0_Ctrl').click()\r\n print(\"Check that new record has been added\")\r\n # browser.find_element_by_css_selector('div.siebui-list-textareactrl-nofocus').click()\r\n time.sleep(3)\r\n browser.find_element_by_id('1Comment').click()\r\n time.sleep(3)\r\n text_comment = browser.find_element_by_id('Comment_icon')\r\n text_comment.click()\r\n # text_comment.send_keys(comments)\r\n time.sleep(3)\r\n browser.find_element_by_id('Custom_TextArea').click()\r\n browser.find_element_by_id('Custom_TextArea').send_keys(comments)\r\n browser.find_element_by_id('btnOK').click()\r\n time.sleep(5)\r\n\r\n status_dropdown = browser.find_element_by_id('1Status')\r\n status_dropdown.click()\r\n time.sleep(1)\r\n status_newdrop = browser.find_element_by_id('1_Status')\r\n status_newdrop.send_keys(u'\\ue015')\r\n\r\n status_newdrop.send_keys(u'\\ue015')\r\n status_newdrop.send_keys(u'\\ue007')\r\n\r\n browser.find_element_by_id('s_1_1_10_0_Ctrl').click()\r\n # select_status = Select(status_dropdown)\r\n print(\"Check that status has been activated\")\r\n time.sleep(3)\r\n # select.select_by_visible_text('Done')\r\n #To save the record (and prevent from undoing)\r\n # time.sleep(1)\r\n # activities_tab = browser.find_element_by_xpath('//*[contains(text(),\"Opportunities\")[1]]').click()\r\n # time.sleep(3)\r\n print(\"All should be saved\")\r\n return \r\n\r\ndef reason_for_lead(gsfa_id):\r\n #Will run because to MC Rankup needs this prgroam. \r\n time.sleep(15)\r\n browser.get('http://cometpreprod.dhl.com/sales_apc/start.swe?SWECmd=GotoView&SWEView=DHL+Create+Development+Lead+List+View&SWERF=1&SWEHo=cometpreprod.dhl.com&SWEBU=1')\r\n time.sleep(20)\r\n textBox = browser.find_element_by_name('s_1_1_0_0')\r\n print(\"Found textBox for GSFA CUstomer ID\")\r\n textBox.send_keys(\"GSFA Customer Id\")\r\n searchBox = browser.find_element_by_name('s_1_1_1_0')\r\n searchBox.send_keys(row[3])\r\n browser.find_element_by_name('s_1_1_2_0').click()\r\n time.sleep(10)\r\n print(\"Slept for 3 seconds\")\r\n browser.find_element_by_name('s_2_1_11_0').click()\r\n time.sleep(15)\r\n browser.find_element_by_id('1Reason_for_Lead').click()\r\n reason_for_lead = browser.find_element_by_id('1_Reason_for_Lead')\r\n # reason_for_lead.send_keys('MC Rank up')\r\n reason_for_lead.click()\r\n actions = ActionChains(browser) \r\n actions.send_keys(Keys.DOWN * 4)\r\n actions.perform() \r\n reason_for_lead.send_keys(Keys.TAB)\r\n name_textbox = browser.find_element_by_id('1_Name')\r\n try:\r\n name_textbox.send_keys('AUTOBOT2V8')\r\n except:\r\n print(\"There is already a bot named the same. Please look at another update\")\r\n actions = ActionChains(browser)\r\n actions.send_keys(Keys.Enter*3)\r\n actions.perform()\r\n time.sleep(1)\r\n\r\n name_textbox.send_keys(Keys.TAB)\r\n opportunity_type_textbox = browser.find_element_by_id('1_Opportunity_Type')\r\n # opportunity_type_textbox.click()\r\n\r\n opportunity_type_textbox.send_keys(Keys.DOWN)\r\n print(\"Step Down to Cross?\")\r\n opportunity_type_textbox.send_keys(Keys.DOWN)\r\n opportunity_type_textbox.send_keys(Keys.DOWN) #KEYS DOWN\r\n opportunity_type_textbox.send_keys(Keys.ENTER) #keys ENTER\r\n opportunity_type_textbox.send_keys(Keys.TAB)\r\n # reason_for_lead.click()\r\n # actions = ActionChains(browser) \r\n # actions.send_keys(Keys.TAB * 4)\r\n # actions.send_keys(Keys.DOWN)\r\n # actions.perform() \r\n time.sleep(3)\r\n browser.find_element_by_name('s_2_1_11_0').click()\r\n return\r\n\r\nif __name__ == '__main__':\r\n a = datetime.datetime.now()\r\n chromedriver = \"C:\\\\chromedriver.exe\"\r\n browser = webdriver.Chrome(chromedriver)\r\n browser.get(\"https://cometpreprod.dhl.com/sales_apc/start.swe?SWECmd=Start&SWEHo=cometpreprod.dhl.com\")\r\n #login details\r\n username = browser.find_element_by_name(\"SWEUserName\")\r\n password = browser.find_element_by_name(\"SWEPassword\")\r\n\r\n username.send_keys(\"vsayyed\")\r\n password.send_keys(\"changeme\")\r\n\r\n browser.find_element_by_id(\"s_swepi_22\").click()\r\n with open('eggs.csv', 'r') as f:\r\n rows = csv.reader(f)\r\n next(rows)\r\n #go through each row in the csv file that Pooja's SALES BOT outputs. \r\n for row in rows:\r\n if row[15] == 'MCA':\r\n continue\r\n print(row[16],row[17])\r\n if row[16] != 'NA':\r\n print(\"Add Sales Team\")\r\n add_sales_team(browser, row[3], row[15])\r\n if row[17] != 'NA':\r\n print(\"add Comments\")\r\n add_comments_to_activities(browser, row[17])\r\n print(\"Reason_for_lead)\")\r\n # reason_for_lead(row[3])\r\n\r\n browser.close()\r\n b = datetime.datetime.now()\r\n c = b - a\r\n print(a, b)\r\n print(str(c) + 'seconds' )\r\n","sub_path":"trial3.py","file_name":"trial3.py","file_ext":"py","file_size_in_byte":8294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"604281472","text":"# -*- coding: utf-8 -*-\n\"\"\"API connections\"\"\"\n\nimport requests\n\nkey = 'd3df9f41-dc53-4aaf-ba74-e23c649c875b'\n\n\ndef get_summoner_id(region, summ_name):\n \"\"\"Gets the id from a summoner name\n :param region Send the region of the summoner\n :param summ_name Send the summoner name\"\"\"\n res = requests.get(\"https://%s.api.pvp.net/api/lol/%s/v1.4/summoner/by-name/%s?api_key=%s\" %\n (region, region, summ_name, key))\n if res.status_code == 200:\n summ = summ_name.replace(' ', '').lower()\n return res.json()[summ]['id']\n elif res.status_code == 404:\n return 'Summoner not found, check the name and region'\n elif res.status_code == 500:\n return 'Riot API server error'\n else:\n return res.status_code\n\n\ndef get_champion_name(region, champion_id):\n \"\"\"Gets the champion name\n :param region Send the region of the summoner\n :param champion_id Send the id of the champion\"\"\"\n res = requests.get('https://global.api.pvp.net/api/lol/static-data/%s/v1.2/champion/%s?api_key=%s' %\n (region, champion_id, key))\n return res.json()['name'], res.json()['title']\n\n\ndef get_highest_mastery(region, summ_id):\n \"\"\"Gets the highest mastery champions from a summoner\n :param region Send the region of the summoner\n :param summ_id Send the id of the summoner\"\"\"\n regions = {'br': 'BR1', 'eune': 'EUN1', 'euw': 'EUW1', 'jp': 'JP1', 'kr': 'KR',\n 'lan': 'LA1', 'las': 'LA2', 'na': 'NA1', 'oce': 'OC1', 'ru': 'RU', 'tr': 'TR1'}\n res = requests.get('https://%s.api.pvp.net/championmastery/location/%s/player/%s/topchampions?api_key=%s' %\n (region, regions[region], summ_id, key))\n champs = ['None', 'None', 'None']\n titles = ['None', 'None', 'None']\n points = ['None', 'None', 'None']\n for x in range(0, len(res.json())):\n champs[x], titles[x] = get_champion_name(region, res.json()[x]['championId'])\n points[x] = res.json()[x]['championPoints']\n return champs, titles, points\n\n\ndef get_active_runes_and_masteries(region, summ_id):\n \"\"\"Get the active runes and masteries from a subject player\n :param region Send the region where the user is\n :param summ_id Send the id from the summoner\n Returns [mastery name, masteries] [rune name, runes]\"\"\"\n res = requests.get('https://%s.api.pvp.net/api/lol/%s/v1.4/summoner/%s/masteries?api_key=%s' %\n (region, region, summ_id, key))\n summ_id = str(summ_id)\n if len(res.json()[summ_id][\"pages\"]) != 0:\n for x in range(0, len(res.json()[summ_id][\"pages\"])):\n if res.json()[summ_id]['pages'][x]['current'] is True:\n mastery_name = res.json()[summ_id]['pages'][x]['name']\n masteries = res.json()[summ_id]['pages'][x][\"masteries\"]\n else:\n continue\n else:\n masteries = \"This summoner have no masteries yet\"\n res = requests.get('https://%s.api.pvp.net/api/lol/%s/v1.4/summoner/%s/runes?api_key=%s' %\n (region, region, summ_id, key))\n if len(res.json()[summ_id][\"pages\"]) != 0:\n for x in range(0, len(res.json()[summ_id][\"pages\"])):\n if res.json()[summ_id]['pages'][x]['current'] is True:\n rune_name = res.json()[summ_id]['pages'][x]['name']\n runes = res.json()[summ_id]['pages'][x][\"slots\"]\n else:\n continue\n else:\n runes = \"This summoner have no masteries yet\"\n return [mastery_name, masteries], [rune_name, runes]\n\n","sub_path":"summ_stats/connections.py","file_name":"connections.py","file_ext":"py","file_size_in_byte":3556,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"264539248","text":"from replacer import Replacer\n\nimport enchant\nfrom nltk.metrics import edit_distance\n\n\n\nclass SpellingReplacer(Replacer):\n\n def __init__(self,dict_name = \"en\" , max_dist=2):\n\n self.spell_dict = enchant.Dict(dict_name)\n self.max_dist = max_dist # the number of charecters necessary to transform a given\n # word into suggestion\n\n\n def replace(self,word):\n\n\n ''' if the word exists then in the dict then return just the word '''\n\n if self.spell_dict.check(word):\n return word\n\n\n '''\n if the word does not exists then we see all the seggestion\n available . if seggestion edit_distance is in range than\n we take the suggestion other wise just return the word.\n\n '''\n\n suggestions = self.spell_dict.suggest(word)\n\n if suggestions and edit_distance(word,suggestions[0]) <= self.max_dist:\n word = suggestions[0]\n\n\n return word\n\n\ndef main():\n\n replacer = SpellingReplacer()\n print(replacer.replace(\"Hello\"))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"spellingReplacer.py","file_name":"spellingReplacer.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"107702459","text":"from django.conf.urls import url\nfrom django.views.generic.base import RedirectView\nfrom .views import index, books, new_book, detail_book, update_book, delete_book\nfrom .views import new_publisher, new_category, categories, update_category\nfrom .views import delete_category, publishers, update_publisher, delete_publisher\nfrom .views import clients, new_client, detail_client, update_client, delete_client\nfrom .views import basket, detail_basket, new_lend, lends, clean_basket, delete_basket, back_lend, search_book\n\nurlpatterns = [\n url(r'^$', RedirectView.as_view(url='/panel')),\n url(r'^panel/search/$', search_book, name='search'),\n url(r'^panel/$', index, name='index'),\n url(r'^panel/books/$', books, name='books'),\n url(r'^panel/book/new/$', new_book, name='newbook'),\n url(r'^panel/book/(?P\\d+)/$', detail_book, name='detailbook'),\n url(r'^panel/book/edit/(?P\\d+)/$', update_book, name='updatebook'),\n url(r'^panel/book/delete/(?P\\d+)/$', delete_book, name='deletebook'),\n #urls editora\n url(r'^panel/publisher/new/$', new_publisher, name='newpublisher'),\n url(r'^panel/publishers/$', publishers, name='publishers'),\n url(r'^panel/publisher/edit/(?P\\d+)/$', update_publisher, name='updatepublisher'),\n url(r'^panel/publisher/delete/(?P\\d+)/$', delete_publisher, name='deletepublisher'),\n #urls categoria\n url(r'^panel/category/new/$', new_category, name='newcategory'),\n url(r'^panel/categories/$', categories, name='categories'),\n url(r'^panel/category/edit/(?P\\d+)/$', update_category, name='updatecategory'),\n url(r'^panel/category/delete/(?P\\d+)/$', delete_category, name='deletecategory'),\n #urls clients\n url(r'^panel/clients/$', clients, name='clients'),\n url(r'^panel/client/new/$', new_client, name='newclient'),\n url(r'^panel/client/(?P\\d+)/$', detail_client, name='detailclient'),\n url(r'^panel/client/edit/(?P\\d+)/$', update_client, name='updateclient'),\n url(r'^panel/client/delete/(?P\\d+)/$', delete_client, name='deleteclient'),\n #urls basket\n url(r'^panel/basket/(?P\\d+)/$', basket, name='basket'),\n url(r'^panel/basket/$', detail_basket, name='detailbasket'),\n url(r'^panel/cleanbasket/$', clean_basket, name='cleanbasket'),\n url(r'^panel/deletebasket/(?P\\d+)', delete_basket, name='deletebasket'),\n #urls lends\n url(r'^panel/lend/$', new_lend, name='newlend'),\n url(r'^panel/lends/$', lends, name='lends'),\n url(r'^panel/backlend/(?P\\d+)/$', back_lend, name='backlend'),\n]\n","sub_path":"mylibrary/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"147603585","text":"from django.shortcuts import render, redirect\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse, HttpResponse\nfrom .models import User\nimport hashlib\nimport json\nfrom django.core import exceptions\nfrom django.core.paginator import Paginator\n# Create your views here.\n\n# home\ndef index(request):\n try:\n user = request.session.get(\"user\", \"未登录\")\n currenUser = User.objects.get(username=user)\n useravatar = currenUser.useravatar\n except:\n user = \"未登录\"\n useravatar = \"/static/uploadfiles/default.png\"\n return render(\n request, \"webapp/index.html\", {\"username\": user, \"useravatar\": useravatar}\n )\n\n\n# reading\ndef reading(request):\n try:\n user = request.session.get(\"user\", \"未登录\")\n currenUser = User.objects.get(username=user)\n useravatar = currenUser.useravatar\n except:\n user = \"未登录\"\n useravatar = \"/static/uploadfiles/default.png\"\n return render(\n request, \"webapp/reading.html\", {\"username\": user, \"useravatar\": useravatar}\n )\n\n# getdoubanbooks\n@csrf_exempt\ndef getdoubanbooks(request,pageindex):\n fname = \"static/spiderInfos/books.json\"\n with open(fname,'rb') as books:\n booklist = json.load(books,encoding=\"utf-8\")\n obj = {\"booklist\":booklist[pageindex],\"pagenums\":len(booklist)}\n return JsonResponse(obj,safe=False)\n\n\n# games -LOL\ndef games(request):\n try:\n user = request.session.get(\"user\", \"未登录\")\n currenUser = User.objects.get(username=user)\n useravatar = currenUser.useravatar\n except:\n user = \"未登录\"\n useravatar = \"/static/uploadfiles/default.png\"\n return render(\n request, \"webapp/games.html\", {\"username\": user, \"useravatar\": useravatar}\n )\n\n# games -炉石传说\ndef gameslushi(request):\n try:\n user = request.session.get(\"user\", \"未登录\")\n currenUser = User.objects.get(username=user)\n useravatar = currenUser.useravatar\n except:\n user = \"未登录\"\n useravatar = \"/static/uploadfiles/default.png\"\n return render(\n request, \"webapp/lushichuanshuo.html\", {\"username\": user, \"useravatar\": useravatar}\n )\n\n# games--获取炉石传说数据\n@csrf_exempt\ndef getlushiinfos(request,pageindex):\n fname = \"static/spiderInfos/炉石传说.json\"\n with open(fname, \"r\",encoding=\"utf-8\") as cards:\n card = json.load(cards)\n Paginators = Paginator(card,15)\n cardobj = []\n for item in Paginators.page(pageindex):\n cardobj.append(item)\n\n targetcards = {'cards':cardobj,'pagenums':Paginators.num_pages}\n return JsonResponse(targetcards,safe=False)\n\n\n# shopping\ndef shopping(request):\n try:\n user = request.session.get(\"user\", \"未登录\")\n currenUser = User.objects.get(username=user)\n useravatar = currenUser.useravatar\n except:\n user = \"未登录\"\n useravatar = \"/static/uploadfiles/default.png\"\n return render(\n request, \"webapp/shopping.html\", {\"username\": user, \"useravatar\": useravatar}\n )\n\n\n# otherproject\ndef otherproject(request):\n try:\n user = request.session.get(\"user\", \"未登录\")\n currenUser = User.objects.get(username=user)\n useravatar = currenUser.useravatar\n except:\n user = \"未登录\"\n useravatar = \"/static/uploadfiles/default.png\"\n return render(\n request,\n \"webapp/otherprojects.html\",\n {\"username\": user, \"useravatar\": useravatar},\n )\n\n\n# login-page\ndef login(request):\n return render(request, \"webapp/login.html\", {\"msg\": \"login\"})\n\n\n# login-method\ndef checkuser(request):\n users = request.POST.get(\"user\")\n pwd = request.POST.get(\"password\")\n # 首先判断用户是否存在\n try:\n user = User.objects.get(username=users)\n except exceptions.ObjectDoesNotExist:\n user = None\n if user != None:\n # 其次判断密码是否正确\n if password_encrypt(pwd) == user.userpwd:\n request.session[\"user\"] = users\n return redirect(\"/index\")\n else:\n return render(request, \"webapp/login.html\", {\"msg2\": \"用户名或密码不可用\"})\n else:\n return render(request, \"webapp/login.html\", {\"msg2\": \"用户名或密码不可用\"})\n\n\n# logout\ndef logout(request):\n del request.session[\"user\"]\n return redirect(\"/index\")\n\n\n# register-page\ndef register(request):\n return render(request, \"webapp/register.html\", {\"msg\": \"\"})\n\n\n# check-register\ndef checkregister(request):\n verifycode = request.POST.get(\"verifycode\")\n if request.session[\"verify\"].upper() == verifycode.upper():\n # 获取用户提交的信息\n username = request.POST.get(\"user\")\n userpwd = request.POST.get(\"password\")\n usertel = request.POST.get(\"tel\")\n userage = request.POST.get(\"age\")\n # 这里还需要判断一下用户名是否可用--用户名重复判断\n try:\n user = User.objects.get(username=username)\n print(type(user))\n except exceptions.ObjectDoesNotExist:\n user = 200\n print(user)\n if user == 200:\n # 经查验后可以保存新用户到数据库中\n user = User()\n user.username = username\n user.userpwd = password_encrypt(userpwd)\n user.usertel = usertel\n user.userage = userage\n user.useravatar = \"/static/uploadfiles/default.png\"\n user.save()\n request.session[\"user\"] = username\n return redirect(\"/index\")\n else:\n return render(request, \"webapp/register.html\", {\"msg2\": \"用户名已存在或不可用\"})\n else:\n return render(request, \"webapp/register.html\", {\"msg\": \"验证码不正确,请重新输入\"})\n\n\n# 验证密码函数\ndef password_encrypt(pwd):\n md5 = hashlib.md5() # 2,实例化md5() 方法\n md5.update(pwd.encode()) # 3,对字符串的字节类型加密\n result = md5.hexdigest() # 4,加密\n return result\n\n\n# 生成验证码图片\n@csrf_exempt\ndef verify_code(request):\n from PIL import Image, ImageDraw, ImageFont\n\n # 引入随机函数模块\n import random\n\n # 定义变量,用于画面的背景色、宽、高\n bgcolor = (random.randrange(40, 200), random.randrange(40, 200), 255)\n width = 200\n height = 40\n # 创建画面对象\n im = Image.new(\"RGB\", (width, height), bgcolor)\n # 创建画笔对象\n draw = ImageDraw.Draw(im)\n # 调用画笔的point()函数绘制噪点\n for i in range(0, 100):\n xy = (random.randrange(0, width), random.randrange(0, height))\n fill = (random.randrange(0, 255), 255, random.randrange(0, 255))\n draw.point(xy, fill=fill)\n # 定义验证码的备选值\n str1 = \"ABCD123EFGHIJK456LMNOPQRS789TUVWXYZ0\"\n # 随机选取4个值作为验证码\n rand_str = \"\"\n for i in range(0, 4):\n rand_str += str1[random.randrange(0, len(str1))]\n # 构造字体对象,一个默认的(字体太小),一个LINUX下的,一个WINDOWS下的。23号可以了。\n # font = ImageFont.load_default().font\n # font = ImageFont.truetype('/usr/share/fonts/truetype/dejavu/DejaVuSans-Bold.ttf', 23)\n font = ImageFont.truetype(\"C:\\Windows\\Fonts\\Arial.ttf\", 23)\n # 构造字体颜色\n fontcolor = (255, random.randrange(0, 255), random.randrange(0, 255))\n # 绘制4个字,要注意间距\n draw.text((20, 10), rand_str[0], font=font, fill=fontcolor)\n draw.text((70, 10), rand_str[1], font=font, fill=fontcolor)\n draw.text((120, 10), rand_str[2], font=font, fill=fontcolor)\n draw.text((170, 10), rand_str[3], font=font, fill=fontcolor)\n # 释放画笔\n del draw\n # 存入session,用于做进一步验证\n request.session[\"verify\"] = rand_str\n # 内存文件操作\n import io\n\n buf = io.BytesIO()\n # 将图片保存在内存中,文件类型为png\n im.save(buf, \"png\")\n # 将内存中的图片数据返回给客户端,MIME类型为图片png\n return HttpResponse(buf.getvalue(), \"image/png\")\n\n\n# PresonalCenter-Page\ndef personalcenter(request):\n try:\n user = request.session.get(\"user\")\n currenUser = User.objects.get(username=user)\n useravatar = currenUser.useravatar\n except:\n user = \"\"\n useravatar = \"\"\n return render(\n request,\n \"webapp/personalcenter.html\",\n {\"username\": user, \"useravatar\": useravatar},\n )\n\n\n# upload--avatar\n@csrf_exempt\ndef uploadavatar(request):\n if request.method == \"POST\":\n try:\n currenUser = request.session.get(\"user\")\n userObJ = User.objects.get(username=currenUser)\n uploadFile = request.FILES.get(\"file\", None)\n fpath = \"static/uploadfiles/\" + uploadFile.name\n with open(fpath, \"wb\") as f:\n for line in uploadFile.chunks():\n f.write(line)\n userObJ.useravatar = fpath\n userObJ.save()\n obj = {\"status\": 200, \"msg\": \"上传成功!\", \"path\": fpath}\n except:\n obj = {\"status\": 202, \"msg\": \"上传失败!\", \"path\": None}\n\n return JsonResponse(obj)\n\n else:\n return JsonResponse({\"status\": 305, \"msg\": \"you cant upload with methos 'GET'\"})\n\n\n# 需要定义一个公共方法--每个页面获取用户头像的方法\ndef getAvatar(request):\n if request.session[\"user\"] is not None:\n currenUserName = request.session[\"user\"]\n currenUser = User.objects.get(username=currenUserName)\n obj = {\"avatar\": currenUser.useravatar}\n return JsonResponse(obj, safe=False)\n else:\n obj = {\"avatar\": \"/static/uploadfiles/default.png\"}\n return JsonResponse(obj, safe=False)\n\n\n# games页面--获取英雄信息\n@csrf_exempt\ndef getHeroInfos(request,pageindex):\n \n fname = \"static/spiderInfos/Allheroinfos.json\"\n with open(fname, \"r\",encoding=\"utf-8\") as heroinfos:\n heros = json.load(heroinfos)\n Paginators = Paginator(heros,18)\n herosobj = []\n for item in Paginators.page(pageindex):\n herosobj.append(item)\n targetHeros = {'heros':herosobj,'pagenums':Paginators.num_pages}\n return JsonResponse(targetHeros,safe=False)\n\n\n# games页面--英雄详细页\ndef heroDetails(request,heroid):\n return render(request,\"webapp/herodetails.html\",{\"title\":\"HeroDetails\"})\n\n# games-details 页面 接收英雄id获取详细信息\n@csrf_exempt\ndef getHeroDetials(request,heroid):\n fname = \"static/spiderInfos/Allheroinfos.json\"\n with open(fname, \"r\",encoding=\"utf-8\") as heroinfos:\n heros = json.load(heroinfos)\n \n for item in heros:\n #print(int(item[\"detailAndStory\"][\"heroId\"]))\n #print(int(heroid))\n try:\n if int(item[\"detailAndStory\"][\"heroId\"]) == int(heroid):\n obj = {\"status\":200,\"herocover\":item[\"heroDetails\"],\"details\":item[\"detailAndStory\"]}\n return JsonResponse(obj,safe=False)\n except:\n obj = {\"status\":202}\n return JsonResponse(obj,safe=False)\n\n \n \n \n","sub_path":"ToTheDed/totheend/webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":11120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"91312300","text":"import os, sys\nos.environ['QT_API'] = 'pyqt'\nimport sip\nsip.setapi(\"QString\", 2)\nsip.setapi(\"QVariant\", 2)\nfrom ui.ui_utility import *\n\nfrom PyQt4 import QtCore, QtGui\nfrom ui.frmMain import frmMain\nfrom ui.EPANET.frmEnergyOptions import frmEnergyOptions\nfrom ui.EPANET.frmHydraulicsOptions import frmHydraulicsOptions\nfrom ui.EPANET.frmMapBackdropOptions import frmMapBackdropOptions\nfrom ui.EPANET.frmQualityOptions import frmQualityOptions\nfrom ui.EPANET.frmReactionsOptions import frmReactionsOptions\nfrom ui.EPANET.frmReportOptions import frmReportOptions\nfrom ui.EPANET.frmTimesOptions import frmTimesOptions\nfrom ui.EPANET.frmTitle import frmTitle\nfrom ui.model_utility import *\nfrom core.epanet.project import Project\n\n\nclass frmMainEPANET(frmMain):\n def __init__(self, parent=None, *args):\n frmMain.__init__(self, parent)\n\n QtCore.QObject.connect(self.actionStdNewProjectMenu, QtCore.SIGNAL('triggered()'), self.std_newproj)\n QtCore.QObject.connect(self.actionStdNewProject, QtCore.SIGNAL('triggered()'), self.std_newproj)\n\n QtCore.QObject.connect(self.actionStdOpenProjMenu, QtCore.SIGNAL('triggered()'), self.std_openproj)\n QtCore.QObject.connect(self.actionStdOpenProj, QtCore.SIGNAL('triggered()'), self.std_openproj)\n\n QtCore.QObject.connect(self.actionStdExit, QtCore.SIGNAL('triggered()'), self.action_exit)\n\n self.model = 'EPANET'\n self.modelenv1 = 'EXE_EPANET'\n assembly_path = os.path.dirname(os.path.abspath(__file__))\n exe_name = \"epanet2d.exe\"\n pexe = os.path.join(assembly_path, exe_name)\n if not os.path.exists(pexe):\n pp = os.path.dirname(os.path.dirname(assembly_path))\n pexe = os.path.join(pp, \"Externals\", exe_name)\n if not os.path.exists(pexe):\n pexe = QtGui.QFileDialog.getOpenFileName(self, 'Locate EPANET Executable', '/',\n 'exe files (*.exe)')\n if os.path.exists(pexe):\n os.environ[self.modelenv1] = pexe\n else:\n os.environ[self.modelenv1] = ''\n\n self.on_load(model=self.model)\n\n self._frmEnergyOptions = None\n self._frmHydraulicsOptions = None\n self._frmMapBackdropOptions = None\n self._frmQualityOptions = None\n self._frmReactionsOptions = None\n self._frmReportOptions = None\n self._frmTimesOptions = None\n self._frmTitle = None\n\n def std_newproj(self):\n self.project = Project()\n self.setWindowTitle(self.model + \" - New\")\n self.project.file_name = \"New.inp\"\n pass\n\n def std_openproj(self):\n file_name = QtGui.QFileDialog.getOpenFileName(self, \"Open Project...\", \"\", \"Inp files (*.inp);;All files (*.*)\")\n if file_name:\n self.project = Project()\n try:\n self.project.read_file(file_name)\n self.setWindowTitle(self.model + \" - \" + os.path.split(file_name)[1])\n except:\n self.project = None\n self.setWindowTitle(self.model)\n pass\n\n def proj_save(self):\n self.project.write_file(self.project.file_name)\n\n def proj_save_as(self):\n file_name = QtGui.QFileDialog.getSaveFileName(self, \"Save As...\", \"\", \"Inp files (*.inp)\")\n if file_name:\n self.project.write_file(file_name)\n self.setWindowTitle(self.model + \" - \" + os.path.split(file_name)[1])\n\n def edit_options(self, itm, column):\n if self.project == None:\n return\n\n if itm.data(0, 0) == 'Energy':\n self._frmEnergyOptions = frmEnergyOptions(self)\n self._frmEnergyOptions.show()\n if itm.data(0, 0) == 'Hydraulics':\n self._frmHydraulicsOptions = frmHydraulicsOptions(self)\n self._frmHydraulicsOptions.show()\n if itm.data(0, 0) == 'Map/Backdrop':\n self._frmMapBackdropOptions = frmMapBackdropOptions(self)\n self._frmMapBackdropOptions.show()\n if itm.data(0, 0) == 'Quality':\n self._frmQualityOptions = frmQualityOptions(self)\n self._frmQualityOptions.show()\n if itm.data(0, 0) == 'Reactions':\n self._frmReactionsOptions = frmReactionsOptions(self)\n self._frmReactionsOptions.show()\n if itm.data(0, 0) == 'Report':\n self._frmReportOptions = frmReportOptions(self)\n self._frmReportOptions.show()\n if itm.data(0, 0) == 'Times':\n self._frmTimesOptions = frmTimesOptions(self)\n self._frmTimesOptions.show()\n if itm.data(0, 0) == 'Title/Notes':\n self._frmTitle = frmTitle(self)\n self._frmTitle.show()\n\n # mitm = itm\n # if self.project == None or mitm.data(0, 0) != 'Options':\n # return\n # from ui.frmOptions import frmOptions\n # dlg = frmOptions(self, self.project.options)\n # dlg.show()\n # result = dlg.exec_()\n # if result == 1:\n # pass\n def proj_run_simulation(self):\n\n margs=[]\n prog = os.environ[self.modelenv1]\n if not os.path.exists(prog):\n QMessageBox.information(None, \"EPANET\", \"EPANET Executable not found\", QMessageBox.Ok)\n return -1\n\n filename = ''\n if self.project == None:\n #file_name = QtGui.QFileDialog.getOpenFileName(parent=self, caption='Input file')\n filename = QtGui.QFileDialog.getOpenFileName(self, 'Open Existing Project', '/', 'Inp files (*.inp)')\n else:\n filename = self.project.file_name\n pass\n if os.path.exists(filename):\n fpre, fext = os.path.splitext(filename)\n margs.append(filename)\n margs.append(fpre + '.txt')\n margs.append(fpre + '.out')\n else:\n QMessageBox.information(None, \"EPANET\", \"EPANET input file not found\", QMessageBox.Ok)\n\n status = StatusMonitor0(prog, margs, self, model='EPANET')\n status.show()\n\n def on_load(self, **kwargs):\n #self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\n #cleaner = QtCore.QObjectCleanupHandler()\n #cleaner.add(self.tabProjMap.layout())\n self.obj_tree = ObjectTreeView(model=kwargs['model'])\n self.obj_tree.itemDoubleClicked.connect(self.edit_options)\n #self.tabProjMap.addTab(self.obj_tree, 'Project')\n layout = QVBoxLayout(self.tabProject)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.addWidget(self.obj_tree)\n self.tabProject.setLayout(layout)\n self.setWindowTitle(self.model)\n\n self.obj_list = ObjectListView(model=kwargs['model'],ObjRoot='',ObjType='',ObjList=None)\n mlayout = self.dockw_more.layout()\n #mlayout.setContentsMargins(0, 0, 0, 0)\n mlayout.addWidget(self.obj_list)\n #layout1 = QVBoxLayout(self.dockw_more)\n self.dockw_more.setLayout(mlayout)\n #self.actionPan.setEnabled(False)\n\n def action_exit(self):\n # TODO: check project status and prompt if there are unsaved changed\n app.quit()\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n MainApp = frmMainEPANET()\n MainApp.show()\n sys.exit(app.exec_())\n","sub_path":"src/ui/EPANET/frmMainEPANET.py","file_name":"frmMainEPANET.py","file_ext":"py","file_size_in_byte":7239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"168572144","text":"\"\"\"Core functions.\"\"\"\n\nimport os\nimport nibabel as nb\nimport numpy as np\nfrom matplotlib.cm import get_cmap\nfrom nibabel.processing import resample_to_output\nfrom imageio import mimwrite\nfrom PIL import Image, ImageDraw, ImageFont\nfrom skimage.transform import resize\n\n\ndef parse_filename(filepath):\n \"\"\"Parse input file path into directory, basename and extension.\n\n Parameters\n ----------\n filepath: string\n Input name that will be parsed into directory, basename and extension.\n\n Returns\n -------\n dirname: str\n File directory.\n basename: str\n File name without directory and extension.\n ext: str\n File extension.\n\n \"\"\"\n path = os.path.normpath(filepath)\n dirname = os.path.dirname(path)\n filename = path.split(os.sep)[-1]\n basename, ext = filename.split(os.extsep, 1)\n return dirname, basename, ext\n\n\ndef load_and_prepare_image(filename, size=1, iso=False, time=None):\n \"\"\"Load and prepare image data.\n\n Parameters\n ----------\n filename1: str\n Input file (eg. /john/home/image.nii.gz)\n size: float\n Image resizing factor.\n iso: bool\n Resample image to appear isotropic based on pixel dimension size.\n time: str\n Plane to pick for 4D-mode.\n\n Returns\n -------\n out_img: numpy array\n\n \"\"\"\n # Load NIfTI file\n image = nb.load(filename)\n # no need to make volumes things isometric\n if time is not None:\n out_img = image.get_data()\n x, y, z, t = out_img.shape\n out_img = out_img/out_img.max() # implicit conversion\n if size != 1:\n out_img = resize(out_img, list(np.array([x,y,z]) * size) + [t])\n return out_img, z\n\n data = image.get_data()\n if iso:\n pixdims = list(image.header['pixdim'][1:4])\n if pixdims == [1, 1, 1]:\n print('Pixel dims are identical, skipping isotropic resampling.')\n else:\n image.set_sform(np.diag(pixdims + [1]))\n image = resample_to_output(image, mode='constant', order=0)\n data = image.get_data()\n if data.min() < 0:\n data -= data.min() # shift to positive integers after resampling\n\n # Pad data array with zeros to make the shape isometric\n maximum = np.max(data.shape)\n\n out_img = np.zeros([maximum] * 3)\n\n a, b, c = data.shape\n x, y, z = (list(data.shape) - maximum) / -2\n\n out_img[int(x):a + int(x),\n int(y):b + int(y),\n int(z):c + int(z)] = data\n\n out_img /= out_img.max() # scale image values between 0-1\n\n # Resize image by the following factor\n if size != 1:\n out_img = resize(out_img, [int(size * maximum)] * 3)\n\n maximum = int(maximum * size)\n\n return out_img, maximum\n\n\ndef create_mosaic_normal(out_img, maximum, time, cols):\n \"\"\"Create grayscale image.\n\n Parameters\n ----------\n out_img: numpy array\n maximum: int\n time: str\n\n Returns\n -------\n new_img: numpy array\n\n \"\"\"\n # one gif per volume\n if time is not None:\n # calculate grid size\n x, y, z, t = out_img.shape\n rows = int(np.ceil(t/cols)) \n # pad missing volumes with zeros\n out_img = np.append(out_img, np.zeros((x, y, z, rows * cols - t)), axis=3)\n\n if time == 'sagittal':\n new_img = np.array([\n np.vstack([\n np.hstack([\n np.flip(out_img[x - 1 - d, :, :, col + row * cols], 1).T\n for col in range(cols)\n ]) for row in range(rows)\n ]) for d in range(x)\n ])\n elif time == 'coronal':\n new_img = np.array([\n np.vstack([\n np.hstack([\n np.flip(out_img[:, y - 1 - d, :, col + row * cols], 1).T\n for col in range(cols)\n ]) for row in range(rows)\n ]) for d in range(y)\n ])\n else:\n new_img = np.array([\n np.vstack([\n np.hstack([\n np.flip(out_img[:, :, d, col + row * cols], 1).T\n for col in range(cols)\n ]) for row in range(rows)\n ]) for d in range(z)\n ])\n # draw volume index into grid\n fontsizes = np.array([8, 12, 16, 24, 48])\n # find font size closest to 10% of slice width\n fontsize = fontsizes[np.argmin(np.abs(fontsizes - x/10))]\n font = ImageFont.truetype('Vera.ttf', fontsize)\n for depth in range(z):\n mosaic = Image.fromarray(np.uint8(new_img[depth, :, :] * 255))\n draw = ImageDraw.Draw(mosaic)\n i = 0\n for row in range(rows):\n for col in range(cols):\n draw.text((col*x, row*y), str(i), font=font, fill=255)\n i = i + 1\n new_img[depth, :, :] = np.array(mosaic)/np.max(mosaic)\n # 3x1 gif for each plane\n else:\n new_img = np.array(\n [np.hstack((\n np.hstack((\n np.flip(out_img[i, :, :], 1).T,\n np.flip(out_img[:, maximum - i - 1, :], 1).T)),\n np.flip(out_img[:, :, maximum - i - 1], 1).T))\n for i in range(maximum)])\n\n return new_img\n\n\ndef create_mosaic_depth(out_img, maximum):\n \"\"\"Create an image with concurrent slices represented with colors.\n\n The image shows you in color what the value of the next slice will be. If\n the color is slightly red or blue it means that the value on the next slide\n is brighter or darker, respectifely. It therefore encodes a certain kind of\n depth into the gif.\n\n Parameters\n ----------\n out_img: numpy array\n maximum: int\n\n Returns\n -------\n new_img: numpy array\n\n \"\"\"\n # Load normal mosaic image\n new_img = create_mosaic_normal(out_img, maximum)\n\n # Create RGB image (where red and blue mean a positive or negative shift in\n # the direction of the depicted axis)\n rgb_img = [new_img[i:i + 3, ...] for i in range(maximum - 3)]\n\n # Make sure to have correct data shape\n out_img = np.rollaxis(np.array(rgb_img), 1, 4)\n\n # Add the 3 lost images at the end\n out_img = np.vstack(\n (out_img, np.zeros([3] + [o for o in out_img[-1].shape])))\n\n return out_img\n\n\ndef create_mosaic_RGB(out_img1, out_img2, out_img3, maximum):\n \"\"\"Create RGB image.\n\n Parameters\n ----------\n out_img: numpy array\n maximum: int\n\n Returns\n -------\n new_img: numpy array\n\n \"\"\"\n # Load normal mosaic image\n new_img1 = create_mosaic_normal(out_img1, maximum)\n new_img2 = create_mosaic_normal(out_img2, maximum)\n new_img3 = create_mosaic_normal(out_img3, maximum)\n\n # Create RGB image (where red and blue mean a positive or negative shift\n # in the direction of the depicted axis)\n rgb_img = [[new_img1[i, ...], new_img2[i, ...], new_img3[i, ...]]\n for i in range(maximum)]\n\n # Make sure to have correct data shape\n out_img = np.rollaxis(np.array(rgb_img), 1, 4)\n\n # Add the 3 lost images at the end\n out_img = np.vstack(\n (out_img, np.zeros([3] + [o for o in out_img[-1].shape])))\n\n return out_img\n\n\ndef write_gif_normal(filename, size=1, fps=18, iso=False, time=None, cols=10):\n \"\"\"Procedure for writing grayscale image.\n\n Parameters\n ----------\n filename: str\n Input file (eg. /john/home/image.nii.gz)\n size: float\n Between 0 and 1.\n fps: int\n Frames per second\n iso: bool\n Isotropic\n time: str\n Slice mode for 4D image\n\n \"\"\"\n # Load NIfTI and put it in right shape\n out_img, maximum = load_and_prepare_image(filename, size, iso, time)\n\n # Create output mosaic\n new_img = create_mosaic_normal(out_img, maximum, time, cols)\n\n # Figure out extension\n ext = '.{}'.format(parse_filename(filename)[2])\n\n # Write gif file\n mimwrite(filename.replace(ext, '.gif'), new_img,\n format='gif', fps=int(fps * size))\n\n\ndef write_gif_depth(filename, size=1, fps=18):\n \"\"\"Procedure for writing depth image.\n\n The image shows you in color what the value of the next slice will be. If\n the color is slightly red or blue it means that the value on the next slide\n is brighter or darker, respectifely. It therefore encodes a certain kind of\n depth into the gif.\n\n Parameters\n ----------\n filename: str\n Input file (eg. /john/home/image.nii.gz)\n size: float\n Between 0 and 1.\n fps: int\n Frames per second\n\n \"\"\"\n # Load NIfTI and put it in right shape\n out_img, maximum = load_and_prepare_image(filename, size)\n\n # Create output mosaic\n new_img = create_mosaic_depth(out_img, maximum)\n\n # Figure out extension\n ext = '.{}'.format(parse_filename(filename)[2])\n\n # Write gif file\n mimwrite(filename.replace(ext, '_depth.gif'), new_img,\n format='gif', fps=int(fps * size))\n\n\ndef write_gif_rgb(filename1, filename2, filename3, size=1, fps=18):\n \"\"\"Procedure for writing RGB image.\n\n Parameters\n ----------\n filename1: str\n Input file for red channel.\n filename2: str\n Input file for green channel.\n filename3: str\n Input file for blue channel.\n size: float\n Between 0 and 1.\n fps: int\n Frames per second\n\n \"\"\"\n # Load NIfTI and put it in right shape\n out_img1, maximum1 = load_and_prepare_image(filename1, size)\n out_img2, maximum2 = load_and_prepare_image(filename2, size)\n out_img3, maximum3 = load_and_prepare_image(filename3, size)\n\n if maximum1 == maximum2 and maximum1 == maximum3:\n maximum = maximum1\n\n # Create output mosaic\n new_img = create_mosaic_RGB(out_img1, out_img2, out_img3, maximum)\n\n # Generate output path\n out_filename = '{}_{}_{}_rgb.gif'.format(parse_filename(filename1)[1],\n parse_filename(filename2)[1],\n parse_filename(filename3)[1])\n out_path = os.path.join(parse_filename(filename1)[0], out_filename)\n\n # Write gif file\n mimwrite(out_path, new_img, format='gif', fps=int(fps * size))\n\n\ndef write_gif_pseudocolor(filename, size=1, fps=18, colormap='hot'):\n \"\"\"Procedure for writing pseudo color image.\n\n The colormap can be any colormap from matplotlib.\n\n Parameters\n ----------\n filename1: str\n Input file (eg. /john/home/image.nii.gz)\n size: float\n Between 0 and 1.\n fps: int\n Frames per second\n colormap: str\n Name of the colormap that will be used.\n\n \"\"\"\n # Load NIfTI and put it in right shape\n out_img, maximum = load_and_prepare_image(filename, size)\n\n # Create output mosaic\n new_img = create_mosaic_normal(out_img, maximum)\n\n # Transform values according to the color map\n cmap = get_cmap(colormap)\n color_transformed = [cmap(new_img[i, ...]) for i in range(maximum)]\n cmap_img = np.delete(color_transformed, 3, 3)\n\n # Figure out extension\n ext = '.{}'.format(parse_filename(filename)[2])\n # Write gif file\n mimwrite(filename.replace(ext, '_{}.gif'.format(colormap)),\n cmap_img, format='gif', fps=int(fps * size))\n","sub_path":"gif_your_nifti/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":11246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"244859249","text":"import netCDF4\nimport os\nfrom nco import Nco\n\n###\n# UNDER CONSTRUCTION\n###\n\n\ndef xgeo_multifile_load(nc_wildcard, nc_dir=None):\n \"\"\"\n Read data from multiple (e.g. hourly xgeo data) netcdf files\n :param nc_wildcard: common section of the filenams, e.g. mf_files*.nc\n :return:\n \"\"\"\n if dir:\n nc_path = os.path.join(nc_dir, nc_wildcard)\n else:\n nc_path = nc_wildcard\n\n nco = Nco()\n nc_temp = nco.ncrcat(input=nc_path)\n nc = netCDF4.Dataset(nc_temp)\n\n #add function to restrict dates and times\n return nc\n\n\ndef test_31jan2018_precip():\n nc_dir = r'Y:\\metdata\\met_obs_v2.0\\rr1h\\2018\\01'\n nc_wildcard = r'rr1h_2018_01_31_?.nc'\n nc = xgeo_multifile_load(nc_wildcard, nc_dir)\n\n print(nc)\n\n\nif __name__ == \"__main__\":\n test_31jan2018_precip()","sub_path":"aps/aps_io/aps_nc.py","file_name":"aps_nc.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"121104144","text":"\"\"\"\nSYNOPSIS\n---------------------------------------------------------------------\n Script to update firmware using catalog\n\nDESCRIPTION\n---------------------------------------------------------------------\n This script exercises the OME REST API to allow updating a firmware using catalog.\n\n Note that the credentials entered are not stored to disk.\n\nEXAMPLE\n---------------------------------------------------------------------\npython update_firmware_using_catalog_3.0.py --ip --user admin\n --password --groupid 25315\n\"\"\"\nimport json\nimport sys\nimport time\nimport argparse\nimport requests\nimport urllib3\nfrom argparse import RawTextHelpFormatter\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nrequests.packages.urllib3.disable_warnings(InsecureRequestWarning)\n\nCATALOGDETAILS = []\nCATALOG_INFO = {}\nBASELINE_INFO = {}\n\ndef authenticate_with_ome(ip_address, user_name, password):\n \"\"\" X-auth session creation \"\"\"\n auth_success = False\n session_url = \"https://%s/api/SessionService/Sessions\" % ip_address\n user_details = {'UserName': user_name,\n 'Password': password,\n 'SessionType': 'API'}\n headers = {'content-type': 'application/json'}\n session_info = requests.post(session_url, verify=False,\n data=json.dumps(user_details),\n headers=headers)\n if session_info.status_code == 201:\n headers['X-Auth-Token'] = session_info.headers['X-Auth-Token']\n auth_success = True\n else:\n error_msg = \"Failed create of session with {0} - Status code = {1}\"\n print(error_msg.format(ip_address, session_info.status_code))\n return auth_success, headers\n\n\ndef check_for_existing_catalog(ip_address, headers):\n \"\"\" Check if existing catalog exists \"\"\"\n url = 'https://%s/api/UpdateService/Catalogs' % ip_address\n cat_response = requests.get(url, headers=headers, verify=False)\n if cat_response.status_code == 200:\n cat_json_resp = cat_response.json()\n if cat_json_resp['@odata.count'] > 0:\n process_value_node(cat_json_resp)\n return CATALOGDETAILS\n raise Exception(\"Unable to retrieve catalog information\")\n\ndef process_value_node(cat_json_resp):\n \"\"\" Processing each value to extract catalog id, baseline id\n and repository id.\n \"\"\"\n associated_baseline_list = []\n i = 0\n while i < len(cat_json_resp[\"value\"]):\n if cat_json_resp[\"value\"][i].get(\"Repository\")[\"Source\"] == \"downloads.dell.com\":\n if cat_json_resp[\"value\"][i].get(\"AssociatedBaselines\"):\n j = 0\n while j < len(cat_json_resp[\"value\"][i].get(\"AssociatedBaselines\")):\n associated_baseline_list.append(\n cat_json_resp[\"value\"][i].get(\n \"AssociatedBaselines\")[j][\"BaselineId\"])\n j += 1\n CATALOGDETAILS.append(\n {'REPO_ID': cat_json_resp[\"value\"][i].get(\"Repository\")[\"Id\"],\n 'CATALOG_ID': cat_json_resp[\"value\"][i][\"Id\"],\n 'associated_baseline_id': associated_baseline_list})\n else:\n CATALOGDETAILS.append(\n {'REPO_ID': cat_json_resp[\"value\"][i].get(\"Repository\")[\"Id\"],\n 'CATALOG_ID': cat_json_resp[\"value\"][i][\"Id\"],\n 'associated_baseline_id': []})\n i += 1\n return CATALOGDETAILS\n\ndef delete_catalog(ip_address, headers):\n \"\"\" Delete existing catalog from dell repo \"\"\"\n url = 'https://%s/api/UpdateService/Actions/UpdateService.RemoveCatalogs' % ip_address\n catalog_list = [d['CATALOG_ID'] for d in CATALOGDETAILS]\n payload = catalog_deletion_payload(catalog_list)\n status, data = request(ip_address=ip_address, url=url,\n header=headers, payload=payload, method='POST')\n return status, data\n\n\ndef delete_baseline(ip_address, headers, baseline_list):\n \"\"\" Delete existing baseline from dell repo \"\"\"\n url = 'https://%s/api/UpdateService/Actions/UpdateService.RemoveBaselines' % ip_address\n payload = baseline_deletion_payload(baseline_list)\n status, data = request(ip_address=ip_address, url=url,\n header=headers, payload=payload, method='POST')\n return status, data\n\n\ndef catalog_creation(ip_address, headers):\n \"\"\" Create new catalog \"\"\"\n url = 'https://%s/api/UpdateService/Catalogs' % ip_address\n print(\"Creating new catalog.!\")\n payload = catalog_creation_payload()\n status, data = request(ip_address=ip_address, url=url,\n header=headers, payload=payload, method='POST')\n if status != 201:\n raise Exception(\"Unable to create catalog\", data)\n time.sleep(180)\n get_catalog_status, get_catalog_data = request(ip_address=ip_address, url=url, header=headers)\n if get_catalog_status == 200 and get_catalog_data[\"@odata.count\"] != 0:\n if get_catalog_data[\"value\"][0].get(\"Repository\")[\"Source\"] == \"downloads.dell.com\":\n return get_catalog_data[\"value\"][0][\"Id\"]\n raise Exception(\"Exiting the code, Unable to create catalog\")\n else:\n raise Exception(\"Exiting the code, Unable to create catalog : System Info \", sys.exc_info())\n\n\ndef baseline_creation(ip_address, headers, param_map):\n \"\"\" Create new baseline \"\"\"\n global CATALOG_INFO\n url = 'https://%s/api/UpdateService/Baselines' % ip_address\n print(\"Creating new Baseline.!\")\n CATALOG_INFO = get_catalog_details(ip_address, headers)\n if param_map['group_id']:\n group_type, group_name = get_group_details(ip_address, headers, param_map['group_id'])\n payload = baseline_creation_payload(CATALOG_INFO[\"CATALOG_ID\"],\n CATALOG_INFO[\"REPO_ID\"], param_map['group_id'],\n group_type, \"GROUP\")\n else:\n device_type, device_name = get_device_details(ip_address, headers, param_map['device_id'])\n payload = baseline_creation_payload(CATALOG_INFO[\"CATALOG_ID\"],\n CATALOG_INFO[\"REPO_ID\"], param_map['device_id'],\n device_type, device_name)\n baseline_status, baseline_data = request(ip_address=ip_address, url=url,\n header=headers, payload=payload, method='POST')\n if baseline_status == 201:\n baseline_task_id = baseline_data[\"TaskId\"]\n track_job_to_completion(ip_address, headers, baseline_task_id, 'Baseline job')\n id_repo = CATALOG_INFO.get(\"REPO_ID\")\n id_cat = CATALOG_INFO.get(\"CATALOG_ID\")\n return get_baseline_id(ip_address, headers, id_repo, id_cat)\n raise Exception(\"Unable to create baseline, Job status : \", baseline_status)\n\n\ndef check_device_compliance_report(ip_address, headers, id_baseline):\n \"\"\" Checks device compliances \"\"\"\n compliance_report_list = []\n source_names = None\n compl_url = \"https://%s/api/UpdateService/Baselines(%s)/DeviceComplianceReports\"%(ip_address,\n id_baseline)\n component_status, component_data = request(ip_address=ip_address, url=compl_url, header=headers)\n if (component_status == 200 and component_data[\"value\"]):\n comp_val_list = component_data[\"value\"]\n response_flag = check_response_type(comp_val_list)\n if response_flag:\n for compliance_dict in comp_val_list:\n compliance_list = compliance_dict.get('ComponentComplianceReports')\n if compliance_list:\n for component in compliance_list:\n if component[\"UpdateAction\"] == \"UPGRADE\":\n if source_names:\n source_names = source_names + ';' + component[\"SourceName\"]\n else:\n source_names = component[\"SourceName\"]\n if source_names:\n compliance_report_list.append({\"Id\": compliance_dict.get(\"DeviceId\"), \"Data\":source_names })\n else:\n for compliance_dict in comp_val_list:\n source_names = None\n navigation_url_link = compliance_dict.get('ComponentComplianceReports@odata.navigationLink')\n navigation_url = \"https://%s%s\"%(ip_address, navigation_url_link)\n component_status, component_data = request(ip_address=ip_address, url=navigation_url, header=headers)\n\n if (component_status == 200 and component_data[\"value\"]):\n comp_val_list = component_data[\"value\"]\n for compliance_dicts in comp_val_list:\n if compliance_dicts:\n if compliance_dicts[\"UpdateAction\"] == \"UPGRADE\":\n if source_names:\n source_names = source_names + ';' + compliance_dicts[\"SourceName\"]\n else:\n source_names = compliance_dicts[\"SourceName\"]\n \n if source_names:\n compliance_report_list.append({\"Id\": compliance_dict.get(\"DeviceId\"), \"Data\":source_names })\n \n else:\n sys.exit(\"component data is empty\")\n else:\n raise Exception(\"Unable to get compliance data\")\n return compliance_report_list\n\ndef create_target_payload(compliance_data_list):\n \"\"\" Create target for firmware payload \"\"\"\n my_dist = {}\n target_list = []\n for data in compliance_data_list:\n for key, value in data.items():\n if key == \"Id\":\n my_dist[\"Id\"] = value\n my_dist[\"TargetType\"] = {\n \"Id\": 1000,\n \"Name\": \"DEVICE\"\n }\n if key == \"Data\":\n my_dist[\"Data\"] = value\n\n if my_dist[\"Data\"] != \"\":\n target_list.append(my_dist.copy())\n if target_list:\n return target_list\n return 0\n\ndef check_response_type(comp_val_list):\n \"\"\" Checks whether response contains ComponentComplianceReports or not \"\"\"\n flag = False\n for val in comp_val_list:\n if 'ComponentComplianceReports' in val:\n flag = True\n return flag\n\n\ndef firmware_update(ip_address, headers, repository_id, id_cat, id_baseline, target_data):\n \"\"\" Updates Firmware \"\"\"\n job_type_id = 0\n status, job_type_response_data = get_job_types(ip_address, headers)\n if status == 200:\n values = len(job_type_response_data[\"value\"])\n job_type_id = get_job_type_id(values, job_type_response_data)\n payload = create_payload_for_firmware_update(job_type_id, str(id_baseline),\n str(id_cat), str(repository_id), target_data)\n url = 'https://{0}/api/JobService/Jobs'.format(ip_address)\n update_status, update_data = request(ip_address=ip_address, url=url,\n header=headers, payload=payload, method='POST')\n if update_status == 201 and update_data != 0:\n job_id = update_data[\"Id\"]\n if job_id != -1 or job_id != 0 or job_id is not None:\n track_job_to_completion(ip_address, headers, job_id, 'Firmware Update')\n else:\n print(\"unsuccessful or Unable to get job id\")\n else:\n print(\"unable to get job types\")\n\n\ndef get_job_type_id(values, job_type_response_data):\n \"\"\" Return the id of Job Type which has name Update Task \"\"\"\n i = 0\n while i < values:\n if job_type_response_data[\"value\"][i][\"Name\"] == \"Update_Task\":\n job_type_id = job_type_response_data[\"value\"][i][\"Id\"]\n return job_type_id\n i += 1\n return 0\ndef track_job_to_completion(ip_address, headers, job_id, job_name):\n \"\"\" Tracks the update job to completion / error \"\"\"\n job_status_map = {\n \"2020\": \"Scheduled\",\n \"2030\": \"Queued\",\n \"2040\": \"Starting\",\n \"2050\": \"Running\",\n \"2060\": \"Completed\",\n \"2070\": \"Failed\",\n \"2090\": \"Warning\",\n \"2080\": \"New\",\n \"2100\": \"Aborted\",\n \"2101\": \"Paused\",\n \"2102\": \"Stopped\",\n \"2103\": \"Canceled\"\n }\n max_retries = 20\n sleep_interval = 60\n failed_job_status = [2070, 2090, 2100, 2101, 2102, 2103]\n job_url = 'https://%s/api/JobService/Jobs(%s)' % (ip_address, job_id)\n loop_ctr = 0\n job_incomplete = True\n print(\"Polling %s to completion ...\" % job_id)\n while loop_ctr < max_retries:\n loop_ctr += 1\n time.sleep(sleep_interval)\n job_resp = requests.get(job_url, headers=headers, verify=False)\n if job_resp.status_code == 200:\n job_status = str((job_resp.json())['LastRunStatus']['Id'])\n print(\"Iteration %s: Status of %s is %s\" % (loop_ctr, job_id,\n job_status_map[job_status]))\n if int(job_status) == 2060:\n job_incomplete = False\n print(\"%s completed successfully ... Exiting\"%job_name)\n break\n elif int(job_status) in failed_job_status:\n job_incomplete = False\n print(\"%s job failed ... \"%job_name)\n job_hist_url = str(job_url) + \"/ExecutionHistories\"\n job_hist_resp = requests.get(job_hist_url, headers=headers, verify=False)\n if job_hist_resp.status_code == 200:\n get_execution_detail(job_hist_resp, headers, job_hist_url)\n break\n else:\n print(\"Unable to poll status of %s - Iteration %s \" % (job_id, loop_ctr))\n if job_incomplete:\n print(\"Job %s incomplete after polling %s times...Check status\" % (job_id, max_retries))\n\ndef get_execution_detail(job_hist_resp, headers, job_hist_url):\n \"\"\" Get execution details \"\"\"\n job_history_id = str((job_hist_resp.json())['value'][0]['Id'])\n execution_hist_detail = \"(\" + job_history_id + \")/ExecutionHistoryDetails\"\n job_hist_det_url = str(job_hist_url) + execution_hist_detail\n job_hist_det_resp = requests.get(job_hist_det_url,\n headers=headers,\n verify=False)\n if job_hist_det_resp.status_code == 200:\n print(job_hist_det_resp.text)\n else:\n print(\"Unable to parse job execution history .. Exiting\")\n\ndef get_job(ip_address, header, job_id):\n \"\"\" Get Job details \"\"\"\n url = 'https://{0}/api/JobService/Jobs({1})'.format(ip_address, job_id)\n pool = urllib3.HTTPSConnectionPool(ip_address, port=443, cert_reqs='CERT_NONE',\n assert_hostname=False)\n return pool.urlopen('GET', url, headers=header)\n\n\ndef get_baseline_id(ip_address, headers, id_repo, id_cat):\n \"\"\" Get Baseline id \"\"\"\n url = 'https://%s/api/UpdateService/Baselines' % ip_address\n status, data = request(ip_address=ip_address, url=url, header=headers)\n if status == 200:\n if data[\"@odata.count\"]:\n i = 0\n while i < len(data[\"value\"]):\n repo_data = data[\"value\"][i][\"RepositoryId\"]\n catalog_data = data[\"value\"][i][\"CatalogId\"]\n if id_repo == repo_data and id_cat == catalog_data:\n return id_repo, id_cat, data[\"value\"][i][\"Id\"], data[\"value\"][i][\"TaskId\"]\n if i == len(data[\"value\"]):\n print(\"unable to find the corresponding baseline\")\n return 0\n i += 1\n else:\n return 0\n print(\"unable to get baseline id\")\n return 0\n\n\t\ndef get_job_types(ip_address, header):\n \"\"\" Get job type \"\"\"\n url = \"https://{0}/api/JobService/JobTypes\".format(ip_address)\n return request(ip_address=ip_address, url=url, header=header)\n\n\ndef request(ip_address, url, header, payload=None, method='GET'):\n \"\"\" Returns status and data \"\"\"\n pool = urllib3.HTTPSConnectionPool(ip_address, port=443, cert_reqs='CERT_NONE',\n assert_hostname=False)\n request_obj = pool.urlopen(method, url, headers=header, body=json.dumps(payload))\n data = None\n if request_obj.data and request_obj.status != 400:\n data = json.loads(request_obj.data)\n else:\n data = request_obj.data\n return request_obj.status, data\n\n\ndef get_catalog_details(ip_address, headers):\n \"\"\" Get Catalog details \"\"\"\n url = 'https://%s/api/UpdateService/Catalogs' % ip_address\n catalog_response = requests.get(url, headers=headers, verify=False)\n if catalog_response.status_code == 200:\n catalog_json_response = catalog_response.json()\n if catalog_json_response['@odata.count'] > 0:\n i = 0\n while i < len(catalog_json_response[\"value\"]):\n if catalog_json_response[\"value\"][i].get(\"Repository\")[\"Source\"] == \"downloads.dell.com\":\n CATALOG_INFO[\"REPO_ID\"] = catalog_json_response[\"value\"][i].get(\"Repository\")[\"Id\"]\n CATALOG_INFO[\"CATALOG_ID\"] = catalog_json_response[\"value\"][i][\"Id\"]\n return CATALOG_INFO\n i += 1\n else:\n raise Exception(\"Not able to get Catalog details for baseline creation\")\n else:\n print(\"unable to get catalog details\")\n return 0\n\n\ndef get_group_details(ip_address, headers, group_id):\n \"\"\" Get group details from OME \"\"\"\n group_service_url = 'https://%s/api/GroupService/Groups(%s)' % (ip_address, group_id)\n group_response = requests.get(group_service_url, headers=headers, verify=False)\n if group_response.status_code == 200:\n group_json_response = group_response.json()\n if group_json_response['Id'] == group_id:\n group_type = group_json_response[\"TypeId\"]\n group_name = group_json_response[\"Name\"]\n return group_type, group_name\n raise Exception(\"Unable to find group id\")\n else:\n raise Exception(\"Unable to fetch group details\")\n\n\ndef get_device_details(ip_address, headers, device_id):\n \"\"\" Get device details from OME \"\"\"\n device_url = 'https://%s/api/DeviceService/Devices(%s)' % (ip_address, device_id)\n device_details_response = requests.get(device_url, headers=headers, verify=False)\n if device_details_response.status_code == 200:\n device_details_json_response = device_details_response.json()\n if device_details_json_response['Id'] == device_id:\n device_type = device_details_json_response[\"Type\"]\n device_name = device_details_json_response[\"DeviceName\"]\n return device_type, device_name\n print(\"unable to find device id\")\n else:\n print(\"Unable to fetch device details\")\n return 0\n\n\ndef get_device_list(ip_address, headers):\n \"\"\" Get list of devices from OME \"\"\"\n ome_device_list = None\n device_url = 'https://%s/api/DeviceService/Devices' % ip_address\n device_response = requests.get(device_url, headers=headers, verify=False)\n if device_response.status_code == 200:\n dev_json_response = device_response.json()\n if dev_json_response['@odata.count'] > 0:\n ome_device_list = [x['Id'] for x in dev_json_response['value']]\n else:\n print(\"No devices found at \", ip_address)\n else:\n print(\"No devices found at \", ip_address)\n return ome_device_list\n\ndef refresh_compliance_data(ip_address, headers, baseline_job_id, id_baseline):\n\t\"\"\" Reruns baseline job to refresh inventory data \"\"\"\n\turl = 'https://%s/api/JobService/Actions/JobService.RunJobs' % ip_address\n\tpayload = {\n\t\t\"JobIds\": [10203]\n\t}\n\tpayload[\"JobIds\"][:] = []\n\tpayload[\"JobIds\"].append(baseline_job_id)\n\tprint(\"payload\",payload)\n\tstatus, data = request(ip_address=ip_address, url=url,\n\t\t\t\t\t\t\t\t\t\t\t header=headers, payload=payload, method='POST')\n\tif status != 204:\n\t\tjob_url = 'https://%s/api/JobService/Jobs(%s)' % (ip_address, baseline_job_id)\n\t\tjob_response = requests.get(job_url, headers=headers, verify=False)\n\t\tjob_status = job_response[\"LastRunStatus\"][\"Name\"]\n\t\tif job_status == \"Running\":\n\t\t\tprint(\"Baseline job is rerunning\")\n\t\t\ttrack_job_to_completion(ip_address, headers, baseline_job_id, 'Baseline job')\n\telif status == 204:\n\t\tprint(\"Baseline rerun job created\")\n\t\ttrack_job_to_completion(ip_address, headers, baseline_job_id, 'Baseline job')\n\t'''\n\ttime.sleep(10)\n\tfresh_compliance_list = check_device_compliance_report(ip_address, headers, id_baseline)\n\tif (len(fresh_compliance_list) == 0):\n\t\tprint(\"All components are compliant\")\n\telse:\n\t\tprint(\"Compliance refresh failed\")\n\t'''\n\t\n\t\t\ndef get_group_list(ip_address, headers):\n \"\"\" Get list of groups from OME \"\"\"\n group_list = None\n group_list_url = 'https://%s/api/GroupService/Groups' % ip_address\n group_response = requests.get(group_list_url, headers=headers, verify=False)\n if group_response.status_code == 200:\n group_response = group_response.json()\n if group_response['@odata.count'] > 0:\n group_list = [x['Id'] for x in group_response['value']]\n else:\n print(\"No groups found at \", ip_address)\n else:\n print(\"No groups found at \", ip_address)\n return group_list\n\n\ndef catalog_creation_payload():\n\t\"\"\"\n\t:return: dict representing the payload\n\t\"\"\"\n\treturn {\n \"Filename\": \"\",\n \"SourcePath\": \"\",\n \"Repository\": {\n \"Name\": 'Test' + time.strftime(\":%Y:%m:%d-%H:%M:%S\"),\n \"Description\": \"Factory test\",\n \"RepositoryType\": \"DELL_ONLINE\",\n \"Source\": \"downloads.dell.com\",\n \"DomainName\": \"\",\n \"Username\": \"\",\n \"Password\": \"\",\n \"CheckCertificate\": False\n }\n }\n\ndef baseline_creation_payload(id_cat, repository_id, target_id, target_type, target_name):\n \"\"\" Return payload for Baseline creation \"\"\"\n return {\n \"Name\": \"Factory Baseline\" + time.strftime(\":%Y:%m:%d-%H:%M:%S\"),\n \"Description\": \"Factory test1\",\n \"CatalogId\": id_cat,\n \"RepositoryId\": repository_id,\n \"DowngradeEnabled\": True,\n \"Is64Bit\": True,\n \"Targets\": [\n {\n \"Id\": target_id,\n \"Type\": {\n \"Id\": target_type,\n \"Name\": target_name\n }\n }\n ]\n }\n\n\ndef create_payload_for_firmware_update(job_type_id, id_baseline,\n id_cat, repository_id, target_data):\n \"\"\" Formulate the payload to initiate a firmware update job \"\"\"\n return {\n \"JobName\": \"Update Firmware-Test:\" + id_baseline,\n \"JobDescription\": \"Firmware Update Job\",\n \"Schedule\": \"startNow\",\n \"State\": \"Enabled\",\n \"JobType\": {\n \"Id\": job_type_id,\n \"Name\": \"Update_Task\"\n },\n \"Params\": [{\n \"Key\": \"complianceReportId\",\n \"Value\": id_baseline\n }, {\n \"Key\": \"repositoryId\",\n \"Value\": repository_id\n }, {\n \"Key\": \"catalogId\",\n \"Value\": id_cat\n }, {\n \"Key\": \"operationName\",\n \"Value\": \"INSTALL_FIRMWARE\"\n }, {\n \"Key\": \"complianceUpdate\",\n \"Value\": \"true\"\n }, {\n \"Key\": \"signVerify\",\n \"Value\": \"true\"\n }, {\n \"Key\": \"stagingValue\",\n \"Value\": \"false\"\n }],\n \"Targets\": target_data\n }\n\n\ndef catalog_deletion_payload(catalog_list):\n \"\"\" Returns payload to delete catalog \"\"\"\n return {\n \"CatalogIds\": catalog_list\n }\n\n\ndef baseline_deletion_payload(baseline_list):\n \"\"\" Returns payload to delete baseline \"\"\"\n return {\n \"BaselineIds\": baseline_list\n }\n\ndef rerun_baseline(ip_address, headers, baseline_job_id):\n\t\"\"\" Reruns baseline job to refresh inventory data \"\"\"\n\turl = 'https://%s/api/JobService/Actions/JobService.RunJobs'%ip_address\n\tpayload = {\n\t\t\"JobIds\": [10203]\n\t}\n\tpayload[\"JobIds\"][:] = []\n\tpayload[\"JobIds\"].append(baseline_job_id)\n\tstatus, data = request(ip_address=ip_address, url=url,\n\t\t\t\t\t\t\t\t\t\t\t header=headers, payload=payload, method='POST')\n\tif status != 204:\n\t\tjob_url = 'https://%s/api/JobService/Jobs(%s)' % (ip_address, baseline_job_id)\n\t\tjob_response = requests.get(job_url, headers=headers, verify=False)\n\t\t#track_job_to_completion(ip_address, headers, baseline_job_id, 'Baseline job')\n\t\tif job_response.status_code == 200:\n\t\t\tresponse = job_response.json()\n\t\t\tjob_status = response[\"LastRunStatus\"][\"Name\"]\n\t\t\tif job_status == \"Running\":\n\t\t\t\tprint(\"Baseline job is running\")\n\t\t\t\ttrack_job_to_completion(ip_address, headers, baseline_job_id, 'Baseline job')\n\telif status == 204:\n\t\tprint(\"Baseline rerun job created\")\n\t\ttrack_job_to_completion(ip_address, headers, baseline_job_id, 'Baseline job')\n\nif __name__ == '__main__':\n urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n PARSER = argparse.ArgumentParser(description=__doc__,\n formatter_class=RawTextHelpFormatter)\n PARSER.add_argument(\"--ip\", required=True, help=\"OME Appliance IP\")\n PARSER.add_argument(\"--user\", required=True,\n help=\"Username for OME Appliance\",\n default=\"admin\")\n PARSER.add_argument(\"--password\", required=True,\n help=\"Password for OME Appliance\")\n MUTEX_GROUP = PARSER.add_mutually_exclusive_group(required=True)\n MUTEX_GROUP.add_argument(\"--groupid\", type=int,\n help=\"Id of the group to update\")\n MUTEX_GROUP.add_argument(\"--deviceid\", type=int,\n help=\"Id of the device to update\")\n ARGS = PARSER.parse_args()\n IP_ADDRESS = ARGS.ip\n USER_NAME = ARGS.user\n PASSWORD = ARGS.password\n PARAM_MAP = {}\n TARGET_DATA = []\n try:\n AUTH_SUCCESS, HEADERS = authenticate_with_ome(IP_ADDRESS, USER_NAME,\n PASSWORD)\n if AUTH_SUCCESS:\n if ARGS.groupid:\n GROUP_ID = ARGS.groupid\n PARAM_MAP['group_id'] = GROUP_ID\n PARAM_MAP['device_id'] = None\n GROUP_LIST = get_group_list(IP_ADDRESS, HEADERS)\n if GROUP_LIST:\n if GROUP_ID in GROUP_LIST:\n GROUP_URL = \"https://%s/api/GroupService/Groups(%s)/Devices\"%(IP_ADDRESS, GROUP_ID)\n RESPONSE = requests.get(GROUP_URL, headers=HEADERS, verify=False)\n if RESPONSE.status_code == 200:\n DEV_RESPONSE = RESPONSE.json()\n if DEV_RESPONSE['@odata.count'] == 0:\n raise Exception(\"No devices associated with this group id\")\n else:\n raise Exception(\"Unable to fetch group device details\")\n else:\n raise ValueError(\"Group %s not found on %s ... Exiting\" % (\n GROUP_ID, IP_ADDRESS))\n else:\n DEVICE_ID = ARGS.deviceid\n PARAM_MAP['device_id'] = DEVICE_ID\n PARAM_MAP['group_id'] = None\n DEVICE_LIST = get_device_list(IP_ADDRESS, HEADERS)\n if DEVICE_LIST:\n if DEVICE_ID in DEVICE_LIST:\n pass\n else:\n raise ValueError(\"Device %s not found on %s ... Exiting\" % (\n DEVICE_ID, IP_ADDRESS))\n CATALOG_ID = catalog_creation(ip_address=IP_ADDRESS, headers=HEADERS)\n if CATALOG_ID:\n print(\"Successfully created the catalog\")\n else:\n raise Exception(\"Unable to create Catalog\")\n REPO_ID, ID_CATALOG, BASELINE_ID, BASELINE_JOB_ID = baseline_creation(ip_address=IP_ADDRESS,\n headers=HEADERS,\n param_map=PARAM_MAP)\n if BASELINE_ID == 0:\n raise Exception(\"Unable to create baseline\")\n elif BASELINE_ID != 0:\n print(\"Successfully created baseline\")\n COMPLIANCE_LIST = check_device_compliance_report(ip_address=IP_ADDRESS, headers=HEADERS,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t id_baseline=BASELINE_ID)\n print(\"Compliance List: %s\"%COMPLIANCE_LIST)\n if COMPLIANCE_LIST:\n TARGET_PAYLOAD = create_target_payload(compliance_data_list=COMPLIANCE_LIST)\n #sys.exit(0)\n if TARGET_PAYLOAD != 0:\n firmware_update(ip_address=IP_ADDRESS, headers=HEADERS, repository_id=REPO_ID,\n id_cat=ID_CATALOG,\n id_baseline=BASELINE_ID, target_data=TARGET_PAYLOAD)\n #Initiate compliance refresh\n refresh_compliance_data(ip_address=IP_ADDRESS, headers=HEADERS,\n baseline_job_id=BASELINE_JOB_ID, id_baseline=BASELINE_ID)\n else:\n print(\"No components found for upgrade\")\n else:\n print(\"No components found for upgrade...skipping firmware upgrade\")\n else:\n print(\"Unable to authenticate with OME .. Check IP/Username/Pwd\")\n except OSError:\n print(\"Unexpected error:\", sys.exc_info())\n","sub_path":"Scripts/Python/update_firmware_using_catalog.py","file_name":"update_firmware_using_catalog.py","file_ext":"py","file_size_in_byte":29716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"88091322","text":"from os.path import dirname, join\nimport re\nfrom languageflow.util.file_io import read\nimport pandas as pd\nimport re\n\n\ndef transform(s):\n sentence = {}\n sentence[\"text\"] = s.split(\"\\n\")[1]\n sentiments = s.split(\"\\n\")[2]\n sentiments_ = re.split(\"}, +{\", sentiments)\n sentiments__ = [re.sub(r\"[{}]\", \"\", item) for item in sentiments_]\n labels = [item.upper().replace(\", \", \"#\") for item in sentiments__]\n sentence[\"labels\"] = labels\n return sentence\n\n\ndef convert_to_corpus(sentences, corpus_file):\n data = []\n labels = list(set(sum([s[\"labels\"] for s in sentences], [])))\n for s in sentences:\n item = {}\n item[\"text\"] = s[\"text\"]\n for label in labels:\n if label in s[\"labels\"]:\n item[label] = 1\n else:\n item[label] = 0\n data.append(item)\n df = pd.DataFrame(data)\n columns = [\"text\"] + labels\n df.to_excel(corpus_file, index=False, columns=columns)\n\n\nif __name__ == '__main__':\n data = read(join(dirname(__file__), \"raw\", \"hotel\", \"1-VLSP2018-SA-hotel-train (7-3-2018).txt\")).split(\"\\n\\n\")\n corpus_file = join(dirname(__file__), \"corpus\", \"hotel\", \"train.xlsx\")\n sentences = [transform(item) for item in data]\n convert_to_corpus(sentences, corpus_file)\n\n sentences = read(join(dirname(__file__), \"raw\", \"hotel\", \"2-VLSP2018-SA-hotel-dev (7-3-2018).txt\")).split(\"\\n\\n\")\n corpus_file = join(dirname(__file__), \"corpus\", \"hotel\", \"dev.xlsx\")\n sentences = [transform(s) for s in sentences]\n convert_to_corpus(sentences, corpus_file)\n\n data = read(join(dirname(__file__), \"raw\", \"restaurant\", \"1-VLSP2018-SA-Restaurant-train (7-3-2018).txt\")).split(\n \"\\n\\n\")\n corpus_file = join(dirname(__file__), \"corpus\", \"restaurant\", \"train.xlsx\")\n sentences = [transform(item) for item in data]\n convert_to_corpus(sentences, corpus_file)\n\n sentences = read(join(dirname(__file__), \"raw\", \"restaurant\", \"2-VLSP2018-SA-Restaurant-dev (7-3-2018).txt\")).split(\n \"\\n\\n\")\n corpus_file = join(dirname(__file__), \"corpus\", \"restaurant\", \"dev.xlsx\")\n sentences = [transform(s) for s in sentences]\n convert_to_corpus(sentences, corpus_file)\n\n test_gold_data = read(join(dirname(__file__), \"raw\", \"hotel\", \"3-VLSP2018-SA-Hotel-test-eval-gold-data (8-3-2018).txt\")).split(\n \"\\n\\n\")\n corpus_file_gold_data = join(dirname(__file__), \"corpus\", \"hotel\", \"test-gold-data.xlsx\")\n test_gold_data = [transform(item) for item in test_gold_data]\n convert_to_corpus(test_gold_data, corpus_file_gold_data)\n\n test_gold_data = read(\n join(dirname(__file__), \"raw\", \"restaurant\", \"3-VLSP2018-SA-Restaurant-test-eval-gold-data (8-3-2018).txt\")).split(\n \"\\n\\n\")\n corpus_file_gold_data = join(dirname(__file__), \"corpus\", \"restaurant\", \"test-gold-data.xlsx\")\n test_gold_data = [transform(item) for item in test_gold_data]\n convert_to_corpus(test_gold_data, corpus_file_gold_data)\n","sub_path":"data/vlsp2018/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":2961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"237584377","text":"\n#%%\nimport torch.utils.data as data\nimport os, os.path\nimport re\nfrom PIL import Image\nimport numpy as np\nimport torch\nimport json\n\nfrom config import *\nfrom alphabet import Alphabet\n\n\nclass LipsDataset(data.Dataset):\n \"\"\"Lips custom Dataset\"\"\"\n\n def __init__(self, frame_dir):\n self.frame_dir = frame_dir\n self.alphabet = Alphabet()\n # self.words = [name for name in os.listdir(FRAME_DIR)]\n\n # для сквозного прохода по папкам с видео\n self.words = []\n for root, dirs, files in os.walk(self.frame_dir):\n if not dirs:\n self.words.append(root)\n\n # print('root: ', root)\n # print('dirs: ', dirs)\n # print('files: ', files)\n # print(self.words)\n self.count = 0\n\n def __len__(self):\n return len(self.words)\n\n def __getitem__(self, index):\n\n # загружаем все кадры для слова\n curr_dir = self.words[index]\n frames_list = [name for name in os.listdir(curr_dir) if not re.match(r'__', name)] \n if len(frames_list) < COUNT_FRAMES:\n #print(frames_list)\n\n is_valid = False\n else:\n is_valid = True\n\n frames = np.zeros((len(frames_list), 120, 120))\n count = 0\n for frame in frames_list:\n frame = np.array(Image.open(os.path.join(curr_dir, frame)).convert(mode='L').getdata()).reshape((120, 120))\n frames[count] = frame\n count += 1\n frames = torch.from_numpy(frames)\n\n # разбиваем на батчи\n if is_valid:\n frames = make_batches(frames)\n\n # загружаем субтитры\n subs_path = [name for name in os.listdir(curr_dir) if re.match(r'__', name)][0]\n with open(os.path.join(curr_dir, subs_path), 'r') as subs_file:\n subs = str(json.loads(subs_file.read())['word']).lower()\n characters = list()\n characters.append(self.alphabet.ch2index(''))\n for ch in subs:\n if self.alphabet.ch2index(ch) is None:\n is_valid = False\n break\n characters.append(self.alphabet.ch2index(ch))\n characters.append(self.alphabet.ch2index(''))\n\n targets = torch.LongTensor(characters)\n #print('get_item - targets: ', targets)\n return frames, targets, is_valid\n\n\ndef collate_fn(data):\n frames, targets, is_valid = zip(*data)\n\n # print('collate_fn - raw targets: ', targets)\n #print('collate_fn - raw frames shape: ', frames[0].shape)\n\n targets_lengths = [len(target) for target in targets]\n # print('collate_fn - targets_lengths: ', targets_lengths)\n batch_targets = torch.zeros(len(targets), max(targets_lengths)).long()\n for i, target in enumerate(targets):\n end = targets_lengths[i]\n batch_targets[i, :end] = target[:end]\n # print('collate_fn - batch_targets: ', batch_targets)\n\n frames_lengths = [frame.shape[0] for frame in frames]\n # print('collate_fn - frames_lengths: ', frames_lengths)\n batch_frames = torch.zeros(len(frames), max(frames_lengths), COUNT_FRAMES, 120, 120).long()\n for i, frame in enumerate(frames):\n end = frames_lengths[i]\n batch_frames[i, :end] = frame[:end]\n # print('collate_fn - batch_frames: ', batch_frames.shape)\n\n return batch_frames, batch_targets # batch_targets.shape = BATCH_SIZE*max_targets_length\n # batch_frames.shape = BATCH_SIZE*max_frames_length*5*120*120\n\n\ndef get_loader(frame_dir):\n\n lips_dataset = LipsDataset(frame_dir)\n data_loader = torch.utils.data.DataLoader(dataset=lips_dataset, num_workers=12,\n collate_fn=collate_fn, batch_size=BATCH_SIZE,drop_last=True)\n # print(data_loader)\n return data_loader\n\ndef get_loader_evaluate(frame_dir):\n\n lips_dataset = LipsDataset(frame_dir)\n data_loader = torch.utils.data.DataLoader(dataset=lips_dataset, num_workers=4)\n # print(data_loader)\n return data_loader\n\n\n\n\ndef make_batches(data_tensor, COUNT_FRAMES=COUNT_FRAMES):\n new_size = data_tensor.shape[0] - COUNT_FRAMES + 1\n # print('new size: ', new_size)\n new_data_tensor = torch.FloatTensor(new_size, 5, 120, 120).zero_()\n # print(new_data_tensor)\n for i in range(new_size):\n new_data_tensor[i] = data_tensor[i:i+5]\n # print(new_data_tensor)\n return new_data_tensor\n\n\n","sub_path":"data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":4484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"421705492","text":"import config\nfrom common.gAPI import GoogleAPI\nfrom common.DBOperations import DatabaseOp\nfrom Edelweiss.scrapEd import ScrapData\nimport time\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport numpy as np\nimport pandas as pd\nimport os\n\nclass HelpEdDB:\n\n def __init__(self):\n self.objDB = DatabaseOp()\n self.objGAPI = GoogleAPI()\n self.objDBOP = DatabaseOp()\n\n def get_sd_from_prev_day(self, scripName, table_name):\n sd = 0\n try:\n conn = self.objDB.create_connection()\n #query = 'SELECT ScrapedDate FROM {} WHERE ScripName=? ORDER BY StrTradeDateTime DESC LIMIT 1'.format(table_name)\n #'SELECT MinuteOI FROM {} WHERE ScripName=? AND ScrapedDate=? AND MinuteOI > ?'.format(table_name)\n query = 'SELECT ChangeCOI FROM {} WHERE ScripName=? AND ChangeCOI > ?'.format(table_name)\n cur = conn.cursor()\n cur.execute(query, [scripName, '0.0'])\n data = cur.fetchall()\n data = [float(x[0]) for x in data if float(x[0]) != 0.0]\n sd = np.std(data)\n return sd, True\n except Exception as e:\n print('Exception in SD calculation:', e)\n return sd, False\n\n def DB2CSV(self, scripName, table_name):\n try:\n conn = self.objDB.create_connection()\n cur = conn.cursor()\n #query = 'SELECT StrTradeDateTime FROM {} WHERE ScripName=? ORDER BY StrTradeDateTime DESC LIMIT 1'.format(table_name)\n query = 'SELECT * FROM {} WHERE ScripName=?'.format(table_name)\n cur.execute(query, [scripName])\n data = cur.fetchall()\n\n columns = ['ID', 'ScrapedDate', 'ScripName', 'IndexORStocks', 'StrikePrice', 'OptionType', 'StrTradeDateTime', 'TradeDateTime', 'ExpiryDate', 'OI',\n 'COI', 'IV', 'VOL', 'MinuteOI', 'Flag']\n df = pd.DataFrame(data, columns=columns)\n # df.to_csv(os.getcwd() + '/Edelweiss/csv/' + file_name, index=False)\n df = df.sort_values([\"ScrapedDate\", \"StrTradeDateTime\"], ascending=(False, False))\n return df, True\n # print(df.head())\n except Exception as e:\n print('Exception in converting db to csv:', e)\n return 0, False\n\n def createTable(self, conn, stocksORindicesExpiryDates):\n try:\n for dt in stocksORindicesExpiryDates:\n dt = dt.replace(' ', '_')\n self.objDBOP.create_table(conn, config.TableName + dt)\n except Exception as e:\n print('Exception in creating Table:', e)\n\n def InsertThreshold(self, conn, ScripName, ExpiryDate, Threshold):\n\n #que = 'SELECT Threshold FROM Threshold WHERE ExpiryDate=? AND ScripName=?' #sqlite\n que = \"SELECT Threshold FROM Threshold WHERE ExpiryDate='\"+str(ExpiryDate)+\"' AND ScripName='\"+str(ScripName)+\"'\" #mysql\n cur = conn.cursor()\n #cur.execute(que, [ExpiryDate, ScripName]) #SQLITE\n cur.execute(que)\n rows = cur.fetchone()\n if rows == None:\n self.objDBOP.insertThreshold(conn, ScripName, ExpiryDate, Threshold)\n else:\n self.objDBOP.updateThreshold(conn, ScripName, ExpiryDate, Threshold)\n\n\n def create_tables(self,expiry_date_stocks, expiry_date_indices_monthly, expiry_date_indices_weekly):\n try:\n name_of_file = config.DB_Name\n print(\"name_of_file============\",name_of_file)\n #file_id = self.objGAPI.search_file(name_of_file, \"text/csv\", '1llZZacQjhf2iNPjjpCBSSD4AdKFc5Con',#True)\n #if file_id == 0:\n # Create new DB\n ## Jitendra Changes\n #self.objDBOP.create_mysql_database(config.DB_Name)\n conn = self.objDBOP.connect2Mysql()\n print(\"conn====\",conn)\n #exit()\n\n ####\n #conn = self.objDBOP.create_connection()\n # Create Tables as per Expiry dates\n self.createTable(conn, expiry_date_stocks)\n self.createTable(conn, expiry_date_indices_monthly)\n self.createTable(conn, expiry_date_indices_weekly)\n # else:\n # file_to_save = os.getcwd() + '/DB/' + config.DB_Name\n # self.objGAPI.download_files(service, file_to_save, file_id, False)\n # conn = self.objDBOP.create_connection()\n # # Create Tables as per Expiry dates\n # self.createTable(conn, expiry_date_stocks)\n # self.createTable(conn, expiry_date_indices_monthly)\n # self.createTable(conn, expiry_date_indices_weekly)\n return True\n except Exception as e:\n print('Exception in downloading DB:', e)\n return False\n\n def downLoadAllCSV(self, service, Ndict, expiry_date_stocks, expiry_date_indices_monthly, expiry_date_indices_weekly, sessionRestart):\n try:\n for key, value in Ndict.items():\n if value == 'FALSE':\n for f in expiry_date_indices_monthly:\n f = f.replace(' ', '_')\n name_of_file = str(key) + \"_\" + str(f) + \".csv\"\n file_saved_as = os.getcwd() + \"/Edelweiss/d_csv/\" + str(key) + \"_\" + str(f) + \".csv\"\n file_id = self.objGAPI.search_file(service, name_of_file, \"text/csv\", '1GLA0S461C1yAc47jMXdwxBdoAWX9onbA')\n if file_id != 0:\n self.objGAPI.download_files(service, file_saved_as, file_id, False)\n if sessionRestart == 'yes':\n table_name = config.TableName + '_' + f\n self.CSV2SQL(file_saved_as, table_name)\n\n for f in expiry_date_indices_weekly:\n f = f.replace(' ', '_')\n name_of_file = str(key) + \"_\" + str(f) + \".csv\"\n file_saved_as = os.getcwd() + \"/Edelweiss/d_csv/\" + str(key) + \"_\" + str(f) + \".csv\"\n\n file_id = self.objGAPI.search_file(service, name_of_file, \"text/csv\", '1GLA0S461C1yAc47jMXdwxBdoAWX9onbA')\n if file_id != 0:\n self.objGAPI.download_files(service, file_saved_as, file_id, False)\n if sessionRestart == 'yes':\n table_name = config.TableName + '_' + f\n self.CSV2SQL(file_saved_as, table_name)\n else:\n for f in expiry_date_stocks:\n f = f.replace(' ', '_')\n name_of_file = str(key) + \"_\" + str(f) + \".csv\"\n file_saved_as = os.getcwd() + \"/Edelweiss/d_csv/\" + str(key) + \"_\" + str(f) + \".csv\"\n file_id = self.objGAPI.search_file(service, name_of_file, \"text/csv\", '1GLA0S461C1yAc47jMXdwxBdoAWX9onbA')\n if file_id != 0:\n self.objGAPI.download_files(service, file_saved_as, file_id, False)\n if sessionRestart == 'yes':\n table_name = config.TableName + '_' + f\n self.CSV2SQL(file_saved_as, table_name)\n\n except Exception as e:\n print('Exception in Downloading all CSVs:', e)\n\n\n def CSV2SQL(self, file_saved_as, table_name):\n try:\n df = pd.read_csv(file_saved_as, index_col=0)\n conn = self.objDB.create_connection()\n df.to_sql(table_name, conn, if_exists='append', index=False)\n conn.close()\n except Exception as e:\n print('Exception in converting CSV to SQL:', e)\n\n\n\n\n\n","sub_path":"Edelweiss_MYSQL_DB/Edelweiss/helpEdDB.py","file_name":"helpEdDB.py","file_ext":"py","file_size_in_byte":7721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"266749918","text":"'''\n给定一个仅包含数字 2-9 的字符串,返回所有它能表示的字母组合。\n'''\n\n\nclass Solution:\n def letterCombinations(self, digits):\n mapDict = {'2': list('abc'),\n '3': list('def'),\n '4': list('ghi'),\n '5': ['j', 'k', 'l'],\n '6': ['m', 'n', 'o'],\n '7': ['p', 'q', 'r', 's'],\n '8': ['t', 'u', 'v'],\n '9': ['w', 'x', 'y', 'z']}\n res = []\n\n def helper(coordinate, digits_temp):\n if not digits_temp:\n res.append(coordinate)\n else:\n for digit in mapDict[digits_temp[0]]:\n helper(coordinate + digit, digits_temp[1:])\n\n if digits:\n helper(\"\", list(digits))\n return res\n","sub_path":"LeetCode17.py","file_name":"LeetCode17.py","file_ext":"py","file_size_in_byte":830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"54689007","text":"from pages.login_page import LoginPage\nfrom pages.navigator_page import NavigatePage\nfrom pages.find_existing_value_page import FindExistingValuePage\nfrom pages.add_new_value_page import SupplierInformationANV\nfrom pages.summary_page import SummaryPage\nfrom pages.identifying_information_page import IdentifyingInformationPage\nfrom pages.address_page import AddressPage\nfrom pages.clean_address_page import CleanAddressPage\nfrom pages.contacts_page import ContactsPage\nfrom pages.location_page import LocationPage\nfrom popup_windows.supplier_xref_window import SupplierXrefWindow\nfrom popup_windows.procurement_options_window import ProcurementOptionsWindow\nfrom utilities.tests_status import TestStatus\n\nimport pytest\nimport unittest\nfrom ddt import ddt, data, unpack\n\n\n@pytest.mark.usefixtures(\"one_time_setup\", \"setup\")\n@ddt\nclass TestForeignBVMultiLocationsMultiLogons(unittest.TestCase):\n\n @pytest.fixture(autouse=True)\n def class_setup(self):\n self.ts = TestStatus(self.driver)\n self.lp = LoginPage(self.driver)\n self.nav = NavigatePage(self.driver)\n self.sup_info_fev = FindExistingValuePage(self.driver)\n self.sup_info_anv = SupplierInformationANV(self.driver)\n self.summary = SummaryPage(self.driver)\n self.id_info = IdentifyingInformationPage(self.driver)\n self.addr = AddressPage(self.driver)\n self.clean_addr = CleanAddressPage(self.driver)\n self.contacts = ContactsPage(self.driver)\n self.loc = LocationPage(self.driver)\n self.procurement = ProcurementOptionsWindow(self.driver)\n self.sup_xref = SupplierXrefWindow(self.driver)\n\n @pytest.mark.run(order=1)\n # @data((os.environ.get('PSFT_USER_ID'), \"wrongpassword\"))\n @data((\"AUTOTEST3\", \"wrongpassword\"))\n @unpack\n def test_invalid_password(self, username, password):\n self.lp.login(username, password)\n result = self.lp.verify_login_failed()\n self.ts.mark(result, \"Login Failed!\\n\")\n\n @pytest.mark.run(order=2)\n # @data((os.environ.get('PSFT_USER_ID'), os.environ.get('PSFT_USER_PWD')))\n @data((\"AUTOTEST3\", \"Psoft1234!\"))\n @unpack\n def test_foreign_master_and_branch_vendor_creation_multi_loc_multi_logon(self, username, password):\n # Login into PeopleSoft with CREATOR credentials\n self.lp.login(username, password)\n result_1 = self.lp.verify_title()\n self.ts.mark(result_1, \"Title is CORRECT\")\n self.nav.navigate_to_supplier_info()\n self.sup_info_fev.add_a_new_value()\n self.sup_info_anv.click_add_button()\n\n \"\"\" IDENTIFYING INFORMATION \"\"\"\n self.id_info.enter_identifying_info()\n\n \"\"\" ADDRESS(ES)\"\"\"\n \"\"\" FOREIGN CORPORATE INFO ADDRESS \"\"\"\n self.id_info.click_address_tab()\n self.clean_addr.clean_canadian_address(\"CANADA\", \"Corporate Info\")\n self.addr.enter_email_id()\n # self.addr.enter_payment_withholding_alt_names()\n self.addr.enter_business_phone()\n self.addr.enter_fax()\n self.addr.enter_trilogie_dm_fax()\n\n \"\"\" FOREIGN REMIT INFORMATION \"\"\"\n self.addr.click_add_new_address_btn()\n self.clean_addr.clean_brazilian_address(\"BRAZIL\", \"Remit\")\n self.addr.enter_email_id()\n # self.addr.enter_payment_withholding_alt_names()\n self.addr.enter_business_phone()\n self.addr.enter_fax()\n self.addr.enter_trilogie_dm_fax()\n\n \"\"\" FOREIGN TRILOGIE PO ADDRESS ONE \"\"\"\n self.addr.click_add_new_address_btn()\n self.clean_addr.clean_singapore_address(\"SINGAPORE\", \"Trilogie PO Address\")\n self.addr.enter_email_id()\n # self.addr.enter_payment_withholding_alt_names()\n self.addr.enter_business_phone()\n self.addr.enter_fax()\n self.addr.enter_trilogie_dm_fax()\n\n \"\"\" CONTACTS PAGE\"\"\"\n self.addr.click_contacts_tab()\n self.contacts.enter_contacts_details(\"Testing Contacts\", \"Accountant\")\n\n \"\"\" ADD LOCATIONS AND BRANCH VENDORS \"\"\"\n \"\"\" Add a LOC_1 \"\"\"\n self.addr.click_location_tab()\n self.loc.add_location(\"LOC_1\", \"Remit to LOC_1\")\n\n # Add Procurement Options\n self.loc.click_procurement_link()\n self.procurement.change_ordering_address(\"3\")\n self.procurement.change_returning_address(\"3\")\n self.procurement.change_ship_from_address(\"3\")\n self.procurement.select_payment_terms_id(\"NET30\")\n\n # Add Branch Vendor(s)\n self.loc.click_fei_trilogie_xref_link()\n self.sup_xref.select_two_accounts(\"HOUSTONWW\", \"LAHVAC\")\n\n \"\"\" Add LOC_2 \"\"\"\n self.loc.click_add_location_btn()\n self.loc.add_location(\"LOC_2\", \"Remit to LOC_2\")\n\n # Add Procurement Options\n self.loc.click_procurement_link()\n self.procurement.select_payment_terms_id(\"NET60\")\n\n # Add Branch Vendor(s)\n self.loc.click_fei_trilogie_xref_link()\n self.sup_xref.select_two_accounts(\"OHIOHVAC\", \"PLYMOUTH\")\n\n \"\"\" Add LOC_3 \"\"\"\n self.loc.click_add_location_btn()\n self.loc.add_location(\"LOC_3\", \"Remit to LOC_3\")\n\n # Add Procurement Options\n self.loc.click_procurement_link()\n self.procurement.change_ordering_address(\"2\")\n self.procurement.change_returning_address(\"2\")\n self.procurement.change_ship_from_address(\"2\")\n self.procurement.select_payment_terms_id(\"NET90\")\n\n # Add Branch Vendor(s)\n self.loc.click_fei_trilogie_xref_link()\n self.sup_xref.select_two_accounts(\"SACRAMENTO\", \"SANTAROSAWW\")\n\n \"\"\" Save record \"\"\"\n self.loc.click_save_btn()\n self.loc.click_summary_tab()\n\n self.summary.get_supplier_id()\n self.summary.search_for_created_supplier()\n\n result2 = self.summary.verify_supplier_id_created()\n self.ts.mark(result2, \"Successfully Created Foreign Master Vendor.\\n\")\n\n @pytest.mark.run(order=3)\n def test_sign_out(self):\n self.summary.sign_out_summary_page()\n\n result = self.lp.verify_title_of_log_out_page()\n self.ts.mark_final(\"Test Create Master and Branch Vendor\", result, \"Successfully Signed Out of Application.\\n\")\n\n # @pytest.mark.run(order=4)\n # @data((\"AUTOTEST4\", \"Psoft1234!\"))\n # @unpack\n # def test_adding_bank_account_data(self, username, password):\n # self.lp.login(username, password)\n # result_1 = self.lp.verify_title()\n # self.ts.mark(result_1, \"Title is CORRECT\\n\")\n #\n # self.nav.navigate_to_supplier_info()\n #\n # self.driver.switch_to.frame(\"ptifrmtgtframe\")\n #\n # self.sup_info_fev.search_for_supplier(\"0003015044\")\n #\n # self.summary.click_correct_history_btn()\n # self.summary.click_location_tab()\n #\n # self.loc.click_payables_link()\n # self.payable_options.enter_supplier_bank_account_details()\n #\n # \"\"\" Preview Audit, Enter Reason Codes/Comments and Finalize \"\"\"\n # self.preview.close_preview_supplier_audit_window_ok()\n #\n # result_1 = self.lp.verify_title_of_log_out_page()\n # # self.ts.mark(result_1, \"Successfully Signed Out of Application.\\n\")\n # self.ts.mark_final(\"Test Create Master and Branch Vendor\", result_1,\n # \"Successfully added Banking Information to Master Vendor.\\n\")\n","sub_path":"tests/create_branch_vendor/bv_for_multi_loc_multi_logons_test.py","file_name":"bv_for_multi_loc_multi_logons_test.py","file_ext":"py","file_size_in_byte":7322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"542474714","text":"import os\nimport copy\nimport torch\nimport pickle\nimport random\nimport numpy as np\nfrom rl_algorithms.algorithms import DetectAlgorithm\nfrom rl_algorithms.replay_buffer import ReplayBuffer\n\n\nclass Agent(object):\n def __init__(self, args):\n self.args = args\n torch.manual_seed(self.args.seed)\n np.random.seed(self.args.seed)\n random.seed(self.args.seed)\n torch.cuda.manual_seed_all(self.args.seed)\n torch.backends.cudnn.deterministic = True\n torch.backends.cudnn.benchmark = True\n self.solver = DetectAlgorithm(args)\n self.replay_buffer = ReplayBuffer(args)\n self.env_train = args.env\n self.env_eval = copy.deepcopy(args.env)\n self.attention_buffer = []\n self.t = 0\n\n def do_train(self):\n self.solver.train(self.replay_buffer)\n\n def get_action_train(self, pre_gru_ah, pre_gru_ch, pre_act, cur_obs):\n \"\"\"\n :param pre_gru_ah:\n :param pre_gru_ch:\n :param pre_act:\n :param cur_obs:\n :return:\n \"\"\"\n self.t += 1\n pre_gru_ah_tensor = torch.tensor(pre_gru_ah, dtype=torch.float, device=self.args.device).reshape(1, -1)\n pre_gru_ch_tensor = torch.tensor(pre_gru_ch, dtype=torch.float, device=self.args.device).reshape(1, -1)\n pre_act_tensor = torch.tensor(pre_act, dtype=torch.float, device=self.args.device).reshape(1, -1)\n cur_obs_tensor = torch.tensor(cur_obs, dtype=torch.float, device=self.args.device).reshape(1, -1)\n cur_gru_ah, dist = self.solver.actor(pre_gru_ah_tensor, pre_act_tensor, cur_obs_tensor)\n cur_act = dist.sample()\n\n cur_gru_ch, _ = self.solver.critic.Q1(pre_gru_ch_tensor, pre_act_tensor, cur_obs_tensor, cur_act)\n cur_act_np = cur_act.detach().cpu().numpy().squeeze(0)\n cur_gru_ah_np = cur_gru_ah.detach().cpu().numpy().squeeze(0)\n cur_gru_ch_np = cur_gru_ch.detach().cpu().numpy().squeeze(0)\n return cur_gru_ah_np, cur_gru_ch_np, cur_act_np\n\n def get_action_eval(self, pre_gru_ah, pre_act, cur_obs):\n \"\"\"\n :param pre_gru_ah:\n :param pre_act:\n :param cur_obs:\n :return:\n \"\"\"\n pre_gru_ah_tensor = torch.tensor(pre_gru_ah, dtype=torch.float, device=self.args.device).reshape(1, -1)\n pre_act_tensor = torch.tensor(pre_act, dtype=torch.float, device=self.args.device).reshape(1, -1)\n cur_obs_tensor = torch.tensor(cur_obs, dtype=torch.float, device=self.args.device).reshape(1, -1)\n cur_gru_ah, dist = self.solver.actor(pre_gru_ah_tensor, pre_act_tensor, cur_obs_tensor)\n cur_act = torch.tanh(dist.normal_mean)\n cur_act_np = cur_act.detach().cpu().numpy().squeeze(0)\n cur_gru_ah_np = cur_gru_ah.detach().cpu().numpy().squeeze(0)\n self.attention_buffer.append(self.solver.actor.actor_attention_weight)\n return cur_gru_ah_np, cur_act_np\n\n def eval_process(self, eval_episodes=10, cur_step=None):\n avg_reward = 0\n t = 0\n for itr in range(eval_episodes):\n self.attention_buffer = []\n cur_obs, done = self.env_eval.reset(), False\n pre_gru_ah, _, pre_act = self.solver.get_init_hidden()\n while not done:\n cur_gru_ah, cur_act = self.get_action_eval(pre_gru_ah, pre_act, cur_obs)\n next_obs, reward, done, _ = self.env_eval.step(cur_act)\n pre_gru_ah, pre_act = cur_gru_ah, cur_act\n cur_obs = next_obs\n avg_reward += reward\n t += 1\n if cur_step is not None:\n save_file = open(\n f'./results/{self.args.exp_name}/policy_{cur_step}_attention_{itr}.pkl', 'wb')\n pickle.dump(np.asarray(self.attention_buffer), save_file)\n avg_reward /= eval_episodes\n t /= eval_episodes\n print(\"---------------------------------------\"*4)\n print(f\"Evaluation over {eval_episodes}, time step: {t: .3f}, episodes: {avg_reward:.3f}\")\n print(\"---------------------------------------\"*4)\n return avg_reward\n\n def load(self, file_name, num_itr):\n self.solver.load(file_name, num_itr)\n\n def training_process(self):\n self.env_train.seed(self.args.seed)\n self.env_eval.seed(self.args.seed + 100)\n evaluations = [self.eval_process(self.args.eval_episodes)]\n\n cur_obs, done = self.env_train.reset(), False\n pre_gru_ah, pre_gru_ch, pre_act = self.solver.get_init_hidden()\n episode_reward, episode_timesteps, episode_num = 0, 0, 0\n for t in range(int(self.args.max_timesteps)):\n episode_timesteps += 1\n cur_gru_ah, cur_gru_ch, cur_act = self.get_action_train(pre_gru_ah, pre_gru_ch, pre_act, cur_obs)\n next_obs, reward, done, _ = self.env_train.step(cur_act)\n\n # todo: store data in replay buffer\n self.replay_buffer.add_sample(pre_gru_ah, pre_gru_ch, pre_act, cur_obs, cur_act, cur_gru_ah,\n cur_gru_ch, next_obs, reward, done, episode_timesteps)\n pre_gru_ah, pre_gru_ch, pre_act = cur_gru_ah, cur_gru_ch, cur_act\n cur_obs = next_obs\n episode_reward += reward\n\n if t >= self.args.start_timesteps:\n self.do_train()\n\n if done:\n print(f\"Total T: {t + 1} Episode Num: {episode_num + 1} Episode T: {episode_timesteps} Reward: {episode_reward:.3f}\")\n cur_obs, done = self.env_train.reset(), False\n pre_gru_ah, pre_gru_ch, pre_act = self.solver.get_init_hidden()\n episode_reward, episode_timesteps, episode_num = 0, 0, 0\n\n if (t+1) % self.args.eval_freq == 0:\n evaluations.append(self.eval_process(self.args.eval_episodes))\n np.save(f\"./results/{self.args.exp_name}/learning_process\", evaluations)\n if self.args.save_model:\n self.solver.save(f\"./models/{self.args.exp_name}\")\n\n\n\n\n\n\n\n\n","sub_path":"rl_algorithms/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":6014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"606906473","text":"# -*- coding: utf-8 -*-\n\"\"\"\nbuild_environment.py\n\nDescription: Build the environment as per the quick start using the OpenStack SDK\nWritten by: maharg101 on 24th February 2018\n\nRelated links:\n - https://docs.openstack.org/python-openstacksdk/latest/user/\n\"\"\"\n\nimport sys\nsys.path.insert(1, '..') # adjust path to enable 'learning' utilities to remain isolated from core deliverables.\n\nfrom openstack_infrastructure import facade as osf # noqa: E402\n\n# starter environment naming\nSERVER_NAME = 'blog_app_1'\nNETWORK_NAME = 'net1'\nSUBNET_NAME = 'net1'\nROUTER_NAME = 'r1'\n\n\ndef main():\n \"\"\"\n Main function. Call the various find or create methods in the correct order.\n\n Note that the methods display the created items on stdout, which is useful in a learning context.\n\n :return:\n \"\"\"\n os_facade = osf.OpenStackFacade(silent=False)\n router = os_facade.find_or_create_router(ROUTER_NAME)\n network = os_facade.find_or_create_network(NETWORK_NAME)\n subnet = os_facade.find_or_create_subnet(SUBNET_NAME, network=network)\n port = os_facade.find_or_create_port(network, subnet)\n os_facade.add_interface_to_router(router, subnet, port)\n\nif __name__ == '__main__':\n main()\n","sub_path":"utilities/build_environment_no_instances.py","file_name":"build_environment_no_instances.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"43987836","text":"from . import Kernel\nimport numpy as np\n\nclass GaussianKernel(Kernel.Kernel):\n \n def __init__(self, bandwidth):\n Kernel.Kernel.__init__(self,lambda x,y : np.exp(-(np.linalg.norm(x-y)/bandwidth)**2))\n self.bandwidth = bandwidth\n \n def compute_similarity_matrix(self,x,y):\n n = x.shape[0]\n p = y.shape[0]\n \n X_dup = np.einsum('pnm -> npm', np.tile(x[np.newaxis,:,:], reps=(p,1,1)))\n Y_dup = np.tile(y[np.newaxis,:,:], reps=(n,1,1))\n \n res = np.exp(-(np.linalg.norm(X_dup-Y_dup, axis=2)/self.bandwidth)**2)\n \n del X_dup\n del Y_dup\n \n return res\n ","sub_path":"Kernels/GaussianKernel.py","file_name":"GaussianKernel.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"575933496","text":"#!/usr/bin/env python\n\nfrom drmaa import *\nfrom drmaa import const as c\nfrom os import environ\n\nSession.initialize();\njt = Session.createJobTemplate();\n\ncommon_dir='/mnt/lustre1/HTCondor';\njt.remoteCommand = common_dir+'/programs/sum.py';\n\njt.args = [ common_dir+'/outputs/products', common_dir+'/outputs/sum' ];\n\n#stdout and stderr\njt.outputPath = ':'+common_dir+'/logs/stdout.sum';\njt.errorPath = ':'+common_dir+'/logs/stderr.sum';\n\n#request resources and set log file\njt.nativeSpecification = 'request_cpus=8\\n';\n\njid = Session.runJob(jt);\nprint('Job id: '+jid);\n\n#wait for job completion\nSession.synchronize([jid], Session.TIMEOUT_WAIT_FOREVER);\n\n\nSession.deleteJobTemplate(jt);\nSession.exit()\n\n\n","sub_path":"tools/sum/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"603459227","text":"from create_annotations import *\nfrom PIL import Image\nimport argparse\nimport json\nimport numpy as np\n\n# Get 'images' and 'annotations' info\ndef images_annotations_info(opt):\n\n path = opt.path\n # path : train.txt or test.txt\n annotations = []\n images = []\n\n file = open(path, \"r\")\n read_lines = file.readlines()\n file.close()\n\n image_id = 0\n annotation_id = 1 # In COCO dataset format, you must start annotation id with '1'\n\n for line in read_lines:\n # Check how many items have progressed\n if image_id % 1000 == 0:\n print(\"Processing \" + str(image_id) + \" ...\")\n\n line = line.replace('\\n', '')\n w, h = Image.open(line).size\n\n # read a label file\n label_path = line.replace(\"/images/\", \"/labels/\")\n label_path = label_path.split('.')\n label_path[-1] = 'txt'\n label_path = '.'.join(label_path)\n\n label_file = open(label_path,\"r\")\n label_read_line = label_file.readlines()\n label_file.close()\n\n # Create image annotation\n image = create_image_annotation(line, w, h, image_id)\n images.append(image)\n\n # yolo format - (class_id, x_center, y_center, width, height)\n # coco format - (annotation_id, x_upper_left, y_upper_left, width, height)\n for line1 in label_read_line:\n label_line = line1\n category_id = int(label_line.split()[0]) + 1 # you start with annotation id with '1'\n x_center = float(label_line.split()[1])\n y_center = float(label_line.split()[2])\n width = float(label_line.split()[3])\n height = float(label_line.split()[4])\n\n int_x_center = int(w*x_center)\n int_y_center = int(h*y_center)\n int_width = int(w*width)\n int_height = int(h*height)\n\n min_x = int_x_center-int_width/2\n min_y = int_y_center-int_height/2\n width = int_width\n height = int_height\n\n annotation = create_annotation_yolo_format(min_x, min_y, width, height, image_id, category_id, annotation_id)\n annotations.append(annotation)\n annotation_id += 1\n\n image_id += 1 # if you finished annotation work, updates the image id.\n\n return images, annotations\n\ndef get_objects(path):\n with open(path) as f:\n object_list = f.read().split()\n \n return object_list\n\ndef get_args():\n parser = argparse.ArgumentParser('Yolo format annotations to COCO dataset format')\n parser.add_argument('-p', '--path', type=str, help='Absolute path for \\'train.txt\\' or \\'test.txt\\'')\n parser.add_argument('--objects', type=str, default='obj.names', help='path for object list')\n parser.add_argument('--output', type=str, help='Name the output json file')\n\n args = parser.parse_args()\n return args\n\nif __name__ == '__main__':\n opt = get_args()\n output_path = opt.output\n classes = get_objects(opt.objects)\n print(classes)\n\n print(\"Start!\")\n\n # start converting format\n coco_format['images'], coco_format['annotations'] = images_annotations_info(opt)\n \n for index, label in enumerate(classes):\n ann = {\n \"supercategory\": \"supercategory\",\n \"id\": index + 1, # Index starts with '1' .\n \"name\": label\n }\n coco_format['categories'].append(ann)\n \n with open(output_path, 'w') as outfile:\n json.dump(coco_format, outfile)\n\n print(\"Finished!\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"56708551","text":"from django.contrib.auth import get_user_model\nfrom django.test import TestCase\n\nfrom instant_coverage import InstantCoverageMixin, optional\n\n\nclass EverythingTest(\n optional.ExternalLinks, optional.ValidHTML5, optional.ValidJSON,\n InstantCoverageMixin, TestCase\n):\n fixtures = ['vote.json']\n\n covered_urls = [\n '/vote-admin/abuse/46162630/',\n '/vote-admin/block/0007C3F2760E0541/',\n '/vote-admin/block/0007C3F2760E0541/reason?reason=announced',\n '/vote-admin/unblock/0007C3F2760E0541/',\n '/vote-admin/hide/0007C3F2760E0541/',\n '/vote-admin/unhide/0007C3F2760E0541/',\n '/vote-admin/lm/0007C3F2760E0541/',\n '/vote-admin/ulm/0007C3F2760E0541/',\n '/vote-admin/shortlist/0007C3F2760E0541/',\n '/vote-admin/discard/0007C3F2760E0541/',\n '/vote-admin/reset/0007C3F2760E0541/',\n '/vote-admin/make-note/0007C3F2760E0541/',\n '/vote-admin/remove-note/2/',\n '/vote-admin/hidden/',\n '/vote-admin/inudesu/',\n '/vote-admin/artless/',\n '/vote-admin/add-manual-vote/0007C3F2760E0541/',\n '/vote-admin/upload/',\n '/vote-admin/requests/',\n '/vote-admin/trivia/',\n '/vote-admin/all-the-anime/',\n '/vote-admin/all-the-artists/',\n '/vote-admin/all-the-roles/',\n '/vote-admin/play/0007C3F2760E0541/',\n\n '/js/deselect/',\n '/js/select/',\n '/js/selection/',\n '/js/clear_selection/',\n\n '/api/',\n '/api/week/',\n '/api/week/2014-02-05/',\n '/api/track/0007C3F2760E0541/',\n '/api/search/?q=Canpeki',\n '/api/user/EuricaeriS/',\n\n '/',\n '/info/',\n '/info/api/',\n '/request/',\n '/roulette/',\n '/roulette/hipster/',\n '/roulette/indiscriminate/',\n '/roulette/pro/',\n '/roulette/staple/',\n '/archive/',\n '/archive/2014/',\n '/stats/',\n '/0007C3F2760E0541/',\n '/canpeki-shinakya/0007C3F2760E0541/',\n '/0007C3F2760E0541/report/',\n '/artist/Hikasa Youko/',\n '/anime/RO-KYU-BU%21/',\n '/show/2014-02-05/listen/',\n '/show/2014-02-05/',\n '/show/',\n '/added/2014-02-05/',\n '/added/',\n '/search/?q=Canpeki',\n '/user/EuricaeriS/',\n\n '/login/',\n '/cpw/',\n '/cpw-done/',\n\n # it's important that logout be last since we have a sublcass of this\n # test that logs in at the start, and we want it to stay logged in\n '/logout/',\n ]\n\n uncovered_urls = [\n # some urls that require stuff to be in the session\n '/vote-admin/upload/confirm/',\n '/vote-admin/shortlist-selection/',\n '/vote-admin/discard-selection/',\n '/vote-admin/hide-selection/',\n '/vote-admin/unhide-selection/',\n '/vote-admin/reset-shortlist-discard-selection/',\n\n # only accepts POST\n '/vote-admin/shortlist-order/',\n '/vote-admin/requests/fill/1/',\n '/vote-admin/requests/claim/1/',\n '/set-dark-mode/',\n\n # would require me to put twitter credentials in the public settings\n # file\n '/pic/46162630/',\n '/pic/46162630/?size=original',\n ]\n\n uncovered_includes = [\n (r'^admin/',)\n ]\n\n instant_tracebacks = True\n\n\nclass LoggedInEverythingTest(EverythingTest):\n def setUp(self):\n super(LoggedInEverythingTest, self).setUp()\n user = get_user_model()(\n username='what',\n is_staff=True,\n is_superuser=True,\n )\n user.set_password('what')\n user.save()\n self.assertTrue(self.client.login(username='what', password='what'))\n","sub_path":"nkdsu/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":3695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"205012641","text":"import baostock as bs\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\n# 登陆系统\nlg = bs.login()\n# 获取历史K线数据, frequency=\"d\" 取日K线\n# adjustflag=\"3\" 默认不复权,1 后复权,2 前复权\nfields = \"date,code,open,high,low,close\"\nrs = bs.query_history_k_data_plus(\"sh.600001\", fields,\n start_date='1990-06-01', end_date='2021-12-31',\n frequency=\"d\", adjustflag=\"2\")\n# 打印结果\ndata_list = []\nwhile (rs.error_code == '0') & rs.next() :\n # 获取一条记录,将记录合并在一起\n data_list.append(rs.get_row_data())\n#result.index=pd.to_datetime(result.date)\nresult = pd.DataFrame(data_list, columns=rs.fields)\n\n# 输出至CSV文件\nresult.to_csv(\"/home/jackchen/history_k_data.csv\", encoding=\"utf_8_sig\",index=False)\n\n# 登出系统\nbs.logout()\n\n","sub_path":"python/finance/get_day.py","file_name":"get_day.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"250734341","text":"from urllib.request import urlopen\nfrom urllib.parse import urlsplit, urljoin\nfrom urllib.error import HTTPError\nimport requests\nimport os\nfrom time import sleep\nfrom json import loads\n\ndef url_download(domain, file_sequence, dirpath, EMAIL, AUTHKEY):\n\t'''\n\t:para domain: string, URL that links to a directory\n\t:para file_sequence: list of full filenames, in sequence of file numbering\n\t:para dirpath: relative path to folder to save files to\n\t:para EMAIL: email as username for HTTPBasicAuth\n\t:para AUTHKEY: authkey as password for HTTPBasicAuth\n\n\tdownloads files in file_sequence from domain, stores files in dirpath\n\tafter downloading, prints list of downloaded files and other errors\n\t'''\n\tdownloaded = []\n\terrors = []\n\n\t# add '/' to string if needed\n\tdomain = append_slash(domain)\n\tdirpath = append_slash(dirpath)\n\n\t# need to wait 60 seconds every 25 files\n\tfilecounter = 0\n\tfilecounterlimit = 25\n\tsleeptime = 60\n\n\t# create directory if not exist\n\tif not os.path.exists(dirpath):\n\t\tos.makedirs(dirpath)\n\n\tfor file in file_sequence:\n\t\tif filecounter == filecounterlimit:\n\t\t\tprint('{} URL requests made, sleeping for {} seconds'.format(filecounter, sleeptime))\n\t\t\tfilecounter = 0\n\t\t\tsleep(sleeptime)\n\n\t\t# if file exists, don't download\n\t\tif os.path.exists(dirpath + file):\n\t\t\tprint('{} already exists, continuing...'.format(dirpath + file))\n\t\t\tcontinue\n\t\tprint('{} does not exist, downloading...'.format(dirpath + file))\n\n\t\turl = urljoin(domain, file)\n\t\tcontents = ''\n\t\ttry:\n\t\t\tcontents = urlopen(url).read()\n\t\texcept HTTPError:\n\t\t\t# try authorization\n\t\t\twith requests.Session() as s:\n\t\t\t\ts.auth = (EMAIL, AUTHKEY)\n\t\t\t\tauth = s.post(url)\n\t\t\t\tr = s.get(url)\n\t\t\t\tcontents = r.content\n\t\t# log unknown errors\n\t\texcept Exception as e:\n\t\t\terrors.apppend(url + '\\t' + e)\n\n\t\t# check if contents show success or failure\n\t\tif contents:# and loads(contents.decode('utf-8'))['success'] != False:\n\t\t\tdownloaded.append(file)\n\t\t\twith open(dirpath + file, 'wb') as f:\n\t\t\t\tf.write(contents)\n\t\t# log content errors\n\t\telse:\n\t\t\terrors.append(url + '\\tunable to get contents')\n\n\t\tfilecounter += 1\n\n\t# log results in stdout\n\tif downloaded:\n\t\tprint('these files downloaded:')\n\t\tprint_url_message(downloaded)\n\telse:\n\t\tprint('No files downloaded\\n')\n\tprint_url_message(errors)\n\ndef print_url_message(lst):\n\t'''\n\t:para lst: list of URLs with concatenated error message\n\tprints entire list\n\t'''\n\tif lst:\n\t\tfor url in lst:\n\t\t\tprint(url)\n\t\tprint()\n\ndef append_slash(string):\n\t'''\n\t:para string: string\n\n\tadd forward slash to string if slash or backslash does not exist at end of string\n\t'''\n\tif string[-1] != '/' and string[-1] != '\\\\':\n\t\tstring += '/'\n\treturn string\n\ndef get_file_sequence(filename_filetype, start, end):\n\t'''\n\t:para domain: string, URL that links to a directory\n\t:para filename_filetype: dictionary, key is string of filename, value is string of filetype\n\t:para start: start of file numbering\n\t:para end: end of file numbering\n\n\tconcatenates filename, file numbering, and filetype together into a full filename,\n\twhere file numbering ranges from start to end.\n\tassumes file numbering is in range 01 to 99.\n\n\treturns list of full filenames, in sequence of file numbering\n\t'''\n\tif not 1 <= start <= end <= 99:\n\t\traise Exception('File numbering wrong. Please ensure 1 <= start <= end <= 99')\n\tfile_sequence = []\n\tfor filename, filetype in filename_filetype.items():\n\t\tfor i in range(start, end + 1):\n\t\t\ti = str(i)\n\t\t\t# pad\n\t\t\tif len(i) <= 3:\n\t\t\t\ti = '0'*(3-len(i)) + i\n\t\t\tfile_sequence.append(filename + i + filetype)\n\treturn file_sequence\n\nif __name__ == '__main__':\n\timport sys\n\tif len(sys.argv) == 3:\n\t\tdomain = 'http://challenges.tmlc1.unpossib.ly/api/datasets/'\n\t\tfilename_filetype = {'tmlc1-scoring-':'.json'}\n\t\tfile_sequence = get_file_sequence(filename_filetype, int(sys.argv[1]),int( sys.argv[2]))\n\t\tdirpath = './'\n\t\tEMAIL = 'zhengqun.koo@gmail.com'\n\t\tAUTHKEY = '69072a84e36a942c33a3ff678b6f23a4'\n\t\turl_download(domain, file_sequence, dirpath, EMAIL, AUTHKEY)\n\telse:\n\t\tprint(\"Usage: %s \\n\".format(sys.argv[0]))\n","sub_path":"gcloud/url_download.py","file_name":"url_download.py","file_ext":"py","file_size_in_byte":4031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"186027194","text":"from .pages.product_page import ProductPage\nfrom .pages.base_page import BasePage\nfrom .pages.login_page import LoginPage\nfrom .pages.basket_page import BasketPage\nimport pytest\nimport time\n\n\nlink_main_page = \"http://selenium1py.pythonanywhere.com/\"\nlink_product_page = \"http://selenium1py.pythonanywhere.com/catalogue/coders-at-work_207/\"\nlink_login_page = \"http://selenium1py.pythonanywhere.com/accounts/login/\"\n\n@pytest.mark.test_basket_guest\nclass TestBasketGuest():\n @pytest.mark.need_review\n def test_guest_can_add_product_to_basket(self, browser):\n link = link_product_page\n page = ProductPage(browser, link)\n page.open()\n page.add_to_basket()\n\n @pytest.mark.need_review\n def test_guest_can_go_to_basket_from_product_page(self, browser):\n link = link_product_page\n page = BasePage(browser, link)\n page.open()\n page.go_to_basket_page()\n basket_page = BasketPage(browser, browser.current_url)\n basket_page.should_be_basket_page()\n\n def test_guest_can_go_to_basket_from_main_page(self, browser):\n link = link_main_page\n page = BasePage(browser, link)\n page.open()\n page.go_to_basket_page()\n\n @pytest.mark.xfail(reason=\"так надо\")\n def test_guest_cant_see_success_message_after_adding_product_to_basket(self, browser):\n link = link_product_page\n page = ProductPage(browser, link)\n page.open()\n page.add_to_basket()\n page.should_not_be_success_message()\n\n @pytest.mark.xfail(reason=\"так надо\")\n def test_guest_message_disappeared_after_adding_product_to_basket(self, browser):\n link = link_product_page\n page = ProductPage(browser, link)\n page.open()\n page.add_to_basket()\n page.should_disappeare_element()\n\n\n@pytest.mark.login_guest\nclass TestLoginFromMainPage():\n def test_guest_should_see_login_link_on_product_page(self, browser):\n link = link_product_page\n page = ProductPage(browser, link)\n page.open()\n page.should_be_login_link()\n\n @pytest.mark.need_review\n def test_guest_can_go_to_login_page_from_product_page(self, browser):\n link = link_product_page\n page = ProductPage(browser, link)\n page.open()\n page.go_to_login_page()\n login_page = LoginPage(browser, browser.current_url)\n login_page.should_be_login_page()\n\n@pytest.mark.test_basket_user\nclass TestUserAddToBasketFromProductPage():\n @pytest.fixture(scope=\"function\", autouse=True)\n def setup(self, browser):\n link = link_login_page\n email = str(time.time()) + \"@fakemail.org\"\n psw = \"1qazwsxedc\"\n page = LoginPage(browser, link)\n page.open()\n page.register_new_user(email, psw)\n page.should_be_authorized_user()\n\n def test_user_cant_see_success_message(self, browser):\n link = link_product_page\n page = ProductPage(browser, link)\n page.open()\n page.should_not_be_success_message()\n\n @pytest.mark.need_review\n def test_user_can_add_product_to_basket(self, browser):\n link = link_product_page\n page = ProductPage(browser, link)\n page.open()\n page.add_to_basket()\n","sub_path":"test_product_page.py","file_name":"test_product_page.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"90497427","text":"# -*- coding: utf-8 -*-\nimport sys\nimport time\n\nfrom blindspin import spinner\n\nimport click\n\nimport emoji\n\nfrom . import test\nfrom .. import cli, options\nfrom ..api import Apps, Config, Releases\n\n\n@cli.cli.command()\n@click.option('--message', help='Deployment message')\n@click.option('--hard', is_flag=True,\n help='Pull the latest service images on deploy')\n@options.app(allow_option=False)\ndef deploy(app, message, hard):\n \"\"\"Deploy your app to Storyscript Cloud.\"\"\"\n cli.user()\n\n payload = test.compile_app(app, False) # Also adds a spinner.\n\n if payload is None:\n sys.exit(1) # Error already printed by compile_app.\n\n click.echo(f'Deploying app {app}... ', nl=False)\n\n with spinner():\n config = Config.get(app)\n release = Releases.create(config, payload, app, message, hard)\n\n url = f'https://{app}.storyscriptapp.com/'\n click.echo()\n click.echo(\n click.style('\\b' + emoji.emojize(':heavy_check_mark:'), fg='green')\n + f' Version {release[\"id\"]} of your app has '\n f'been queued for deployment.\\n'\n )\n\n click.echo('Waiting for deployment to complete… ', nl=False)\n with spinner():\n if Apps.maintenance(app, maintenance=None):\n click.echo()\n click.echo()\n click.echo(\n 'Your app is in maintenance mode.\\n'\n 'Run the following to turn off it off:'\n )\n cli.print_command('story maintenance off')\n click.echo()\n click.echo(\n 'Once maintenance mode is turned off, '\n 'your app will be deployed immediately.'\n )\n return\n\n state = 'QUEUED'\n while state in ['DEPLOYING', 'QUEUED']:\n state = Releases.get(app)[0]['state']\n time.sleep(0.5)\n\n click.echo()\n if state == 'DEPLOYED':\n click.echo(\n click.style('\\b' + emoji.emojize(':heavy_check_mark:'), fg='green')\n + ' Deployment successful!'\n )\n click.echo(\n f'If your Story responds to HTTP requests, please visit:\\n {url}'\n )\n elif state == 'FAILED':\n click.echo(\n click.style('X', fg='red') + ' Deployment failed!', err=True\n )\n click.echo(\n 'Please use the following command to view your app\\'s logs:',\n err=True,\n )\n cli.print_command('story logs')\n elif state == 'TEMP_DEPLOYMENT_FAILURE':\n click.echo(\n click.style('X', fg='red') + ' Deployment failed!', err=True\n )\n click.echo(\n 'An internal error occurred.\\n'\n 'The Storyscript team has been notified.\\n'\n 'Please visit https://status.storyscript.io/ '\n 'for incident reports and updates.',\n err=True,\n )\n else:\n click.echo(\n f'An unhandled state of your app has been encountered - {state}',\n err=True,\n )\n click.echo(f'Please shoot an email to support@storyscript.io')\n","sub_path":"story/commands/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":3052,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"586467700","text":"#########################################################################\n#\n# ZeOmega LLC license\n# Copyright (c) 2003-2007 ZeOmega LLC\n# http://www.zeomega.com\n# All rights reserved.\n#\n# ZeOmega software [both binary and source (if released)] (hereafter,\n# Software) is intellectual property owned by ZeOmega LLC is copyright\n# of ZeOmega LLC in all countries in the world, and ownership remains\n# with ZeOmega LLC The Software is protected by the copyright laws of\n# the United States and international copyright treaties. Licensee is\n# not allowed to distribute the binary and source code (if released) to\n# third parties. Licensee is not allowed to reverse engineer,\n# disassemble or decompile code, or make any modifications of the binary\n# or source code, remove or alter any trademark, logo, copyright or\n# other proprietary notices, legends, symbols, or labels in the\n# Software. Licensee is not allowed to sub-license the Software or any\n# derivative work based on or derived from the Software. Neither the\n# names of ZeOmega LLC , nor the names of its contributors may be used\n# to endorse or promote products derived from this Software without\n# specific prior written permission.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF\n# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.\n# IN NO EVENT SHALL THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR\n# ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF\n# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION\n# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH THE SOFTWARE.\n#\n#########################################################################\n## standard library imports\nimport os\nimport sys\nimport datetime\nfrom DateTime import DateTime\n\n## related third party imports\nimport simplejson as json\nfrom OFS.Folder import Folder\nfrom App.special_dtml import HTMLFile\nfrom App.class_init import InitializeClass\nfrom AccessControl import ClassSecurityInfo\n\n## local application/library specific imports\nfrom Products.ZeUtil import fstozodb\nfrom base64 import encodestring,decodestring\nfrom Products.zelogger.zelogger import ZEWARNING,ZEERROR,ZEINFO\n\nfrom Products.ZeWidget.ZeWidget import ZeWidget\n\nmanage_addMDWISEWidgetForm = HTMLFile('dtml/ze_widget',globals())\n\ndef manage_addMDWISEWidget(self, id, customercode='', REQUEST=None):\n \"\"\"\n Call back function for adding MDWISEWidget instance\n\n @param id: id of the BCIWidget instance\n @param REQUEST: form request of this instance\n\n @return: If REQUEST is not None returns manage main screen for adding this instance\n \n \"\"\"\n ob = MDWISEWidget(id)\n self._setObject(id,ob)\n if REQUEST is not None:\n return self.manage_main(self, REQUEST)\n\nclass MDWISEWidget(ZeWidget):\n \"\"\"\n MDWISEWidget class\n Meta type of this class is MDWISEWidget Controller\n\n All MDWISEWidget methods are defined here\n\n Mainly includes\n - \n - \n \"\"\"\n meta_type = \"MDWISEWidget\"\n security = ClassSecurityInfo()\n\n security.declareObjectProtected('View')\n\n def __init__(self,id):\n \"\"\"\n To initialize and create Object of MDWISEWidget class\n\n @param id: Identity\n \"\"\"\n self.id = id\n self.title = \"MDWISEWidget\"\n\n def manage_afterAdd(self, item, container):\n \"\"\"\n Creates view directory.\n\n @param item: item to be added\n @param container: context object\n\n @return: Confirmation for the action\n \"\"\"\n from Products import MDWISEWidget\n path = os.path.dirname(MDWISEWidget.__file__)\n products = path + '/'\n view_paths = [('views', 'ZeWidget/views'), ('mdwise_views', products + 'mdwise_views')]\n for item in view_paths:\n fstozodb.addDirectory(self, dir_name=item[0], dir_path=item[1])\n\n return \"done\"\n\n def episodeIdWidget(self,\n claimant_idn,\n encounter_idn,\n enc_type_cd,\n pat_first_name='',\n pat_last_name='',\n status='',\n pat_access='',\n case_group='',\n case_id='',\n link_text=None,\n user_roles='',\n allowed_enc_types='',\n file_path=''):\n \"\"\"\n This method gives Episode ID as link and points to Episode view\n\n @param claimant_idn: claimant idn\n @param encounter_idn: encounter idn\n @param enc_type_cd: encounter type cd\n @param pat_first_name: patient first name\n @param pat_last_name: patient last name\n @param status: status of episode\n @param pat_access: pat access (Y/N value)\n @param case_group: Group case\n @param case_id: Case identity\n @param link_text: Link text\n\n @return: returns Episode view of the Episode ID\n \"\"\"\n if not encounter_idn:\n return ''\n\n request = self.REQUEST\n dtml_page = self.mdwise_views.episodeIDWidget\n canSee = self.ZeUser.Controller.checkIfUserHasPermissionForEpisodeType(permission='Ze Episode ReadAccessToEpisode', enc_type=enc_type_cd)\n uroles = user_roles\n if(not user_roles):\n uroles = self.ZeUser.Model.getLoggedInUserRoles()\n showLink = 0\n if(canSee or (set(['PROVIDER','PROVIDER-STAFF','ADJUSTER','MD']).intersection(uroles))):\n showLink = 1\n ## if link text is not passed, encounter idn will be the link text\n if not link_text:\n link_text = encounter_idn\n\n if not showLink:\n return link_text\n\n if status != 'Historical':\n cached_data = self.ZeCache.getEpisodeIdHTMLData(encounter_idn)\n if cached_data:\n return cached_data\n else:\n content = dtml_page(self.views, REQUEST=request, encounter_idn=encounter_idn, showLink=showLink,\n claimant_idn=claimant_idn, link_text=link_text, I_ENC_TYPE_CD=enc_type_cd,\n status=status,file_path=file_path)\n self.ZeCache.setEpisodeIdHTMLData(encounter_idn, content)\n return content\n \n return dtml_page(self.views, REQUEST=request, encounter_idn=encounter_idn, showLink=showLink,\n claimant_idn=claimant_idn, link_text=link_text, I_ENC_TYPE_CD=enc_type_cd,\n status=status,file_path=file_path)\n\n def patientWidget(self, claimant_idn, clm_first_name='', clm_last_name='', clm_tpa_idn=0,group_term_date='',\n pat_term_dt='', is_vip='N', pat_expired='N',special_needs='N',show_mcv_onclink=1,pat_access='N',\n provider_dashboard='', user_idn='', user_payors='', user_roles=''):\n \"\"\"\n Generates and returns Patient Name with color coding along with link to check claimant details\n The widget will validate if the user accessing has permssion to view details or not\n\n @param claimant_idn: Claimant identity\n @param clm_first_name: Claimant first name\n @param clm_last_name: Claimant last name\n @param clm_tpa_idn: Claimant TPA identity\n @param group_term_date: Group term date\n @param pat_term_dt: Patient term date\n @param is_vip: Is VIP or not\n @param pat_expired: Patient expired or not\n @param special_needs: Special needs required or not\n @param show_mcv_onclink: Whether to allow MCV for pat_name on click event or not\n @param pat_access: Whether to give access to patient or not\n @param provider_dashboard:Provider dashboard details\n\n @return: Calimant details and icon details\n \"\"\"\n\n REQUEST = self.REQUEST\n clist = []\n # base_url = self.ZeUtil.getBaseURL() # not in use\n if self.ZeUI.getConfigurationStatus('PatientNameFirstLast'):\n patName = (clm_first_name or '') + ', ' + (clm_last_name or '')\n else:\n patName = (clm_last_name or '') + ', ' + (clm_first_name or '')\n if(not user_idn):\n user_idn = self.ZeUser.Model.getLoggedinUserIdn()\n\n if(not user_payors):\n loggedin_user_payors = self.ZeUser.Model.getLoggedInUserPayorDetails(user_idn)\n else:\n loggedin_user_payors = user_payors\n\n canSee = 0\n# clist = []\n if (str(clm_tpa_idn) in loggedin_user_payors) or (clm_tpa_idn in loggedin_user_payors) or (0 in loggedin_user_payors):\n canSee = 1\n\n patient_widget_status = self.ZeUI.getWidgetStatus(item='patientWidget')\n if not patient_widget_status:\n wpage = self.views.disabled_patient_widget\n return wpage(self.views, REQUEST=REQUEST, patient_name=patName, canSee=canSee,\n claimant_idn=claimant_idn)\n elif self.ZeUI.getConfigurationStatus('show_worklist_tooltip'):\n return self.patientToolTipWidget(claimant_idn, clm_first_name, clm_last_name, clm_tpa_idn,\n group_term_date, pat_term_dt, is_vip, pat_expired)\n\n # mcv_conf = self.ZeUI.getConfigurationStatus('showMCVIcon')\n # if(not user_roles):\n # user_roles = self.ZeUser.Model.getLoggedInUserRoles()\n # view_mcv_icon=0\n # mcv_conf_list = []\n # if mcv_conf:\n # mcv_conf_list = mcv_conf.split(',')\n # for each in user_roles:\n # if each in mcv_conf_list:\n # view_mcv_icon=1\n\n pdet = self.ZeCache.getPatientObjectData(claimant_idn)\n #Zc_Comment_Performance : checking whether data regarding patient updated if updated widget_cache will be false\n widget_cache = True\n if not pdet:\n widget_cache = False\n elif(pdet.get('COB', 0) == 0 or pdet.get('episodes', 0) == 0 or pdet.get('CACHED_KEYWORDS', 0) == 0):\n widget_cache = False\n\n linkurl = 'Patient/Controller/getPatientCentricView'\n has_mcv_permission = self.ZeUI.checkHasPermissionOnMethod(linkurl)\n\n if(widget_cache):\n html_cached = self.ZeCache.getHTMLStringForPatientWidget(claimant_idn)\n if html_cached and html_cached[0]:\n if show_mcv_onclink and has_mcv_permission:\n return html_cached[0] + html_cached[1]\n else:\n return html_cached[0] + patName\n\n ## Keyword Code start Here\n keyword_conf_list, keyword_conf_dict, keyword_conf = self.ZeUI.getKeywordsForPatWidget()\n if keyword_conf:\n #query = 1\n #pdet = self.ZeCache.getPatientObjectData(claimant_idn)\n #if pdet:\n #if pdet.get('CACHED_KEYWORDS'):\n #for i in keyword_conf_list:\n #clist.append((keyword_conf_dict[i],i))\n #query = 0\n #if query:\n keyword_result = self.Keyword.Model.getkeywordOnPatientWidget(claimant_idn,tuple(keyword_conf_list))\n if keyword_result:\n for item in keyword_result:\n value = item['KEYWORD_DESC']\n clist.append((keyword_conf_dict[value], value))\n self.ZeCache.setPatientObjectData(claimant_idn, {value: 'Y'})\n #all_keyword_list = {}\n #for item in keyword_result:\n #value = keyword_icon_name = item['KEYWORD_DESC']\n #for each_key,each_value in all_keyword_list.iteritems():\n #if each_key == keyword_conf_dict[value]:\n #keyword_icon_name = each_value + '@@' + keyword_icon_name\n #all_keyword_list[keyword_conf_dict[value]] = keyword_icon_name\n #for each_key,each_value in all_keyword_list.iteritems():\n #each_keyword = str(each_value.split('@@'))[1:-1].replace('\\'','')\n #clist.append((each_key,each_keyword))\n #if isinstance(each_keyword,list):\n #for i in each_value.split('@@'):\n #self.ZeCache.setPatientObjectData(claimant_idn, {i: 'Y'})\n #else:\n #self.ZeCache.setPatientObjectData(claimant_idn, {each_keyword: 'Y'})\n self.ZeCache.setPatientObjectData(claimant_idn, {'CACHED_KEYWORDS':1})\n ## Keyword Code End Here\n\n ## Member Policy code start here\n show_member_policy_conf = self.ZeUI.getConfigurationStatus('show_member_policy_widget')\n if show_member_policy_conf:\n class_name = str(show_member_policy_conf.split(':')[1]).strip()\n ## cache code start here\n db_check = 1\n #det = self.ZeCache.getPatWidgetCacheContent(claimant_idn)\n\n #det = self.ZeCache.getPatientObjectData(claimant_idn) Can use the pdet already assigned few lines above \n if pdet:\n if pdet.get('COB', '') == 'Y':\n clist.append((class_name, 'COB'))\n db_check = 0\n elif pdet.get('COB', '') == 'N':\n db_check = 0\n\n if db_check:\n policy_count = self.Patient.Model.checkClaimantPolicy(claimant_idn=claimant_idn)[0][0]\n if policy_count > 1:\n clist.append((class_name, 'COB'))\n #self.ZeCache.setPatWidgetCacheContent(claimant_idn, 'COB', 'Y')\n self.ZeCache.setPatientObjectData(claimant_idn, {'COB': 'Y'})\n else:\n #self.ZeCache.setPatWidgetCacheContent(claimant_idn, 'COB', 'N')\n self.ZeCache.setPatientObjectData(claimant_idn, {'COB': 'N'})\n ## Member Policy code End here\n\n grp_term = 'N'\n pat_term = 'N'\n\n if group_term_date:\n grp_term = self.Payor.Group.checkGroupTermed(group_term_date)\n elif pdet and ('grouptermdate' in pdet):\n group_term_date = pdet['grouptermdate']\n grp_term = self.Payor.Group.checkGroupTermed(group_term_date)\n\n # if (group_term_date or (pdet and ('grouptermdate' in pdet))):\n # if group_term_date == '':\n # grp_term = pdet['grouptermdate']\n # grp_term = self.Payor.Group.checkGroupTermed(group_term_date)\n\n if pat_term_dt:\n pat_term = self.Patient.Model.checkPatientTermed(pat_term_dt)\n elif pdet and ('patienttermdate' in pdet):\n pat_term_dt = pdet['patienttermdate']\n pat_term = self.Patient.Model.checkPatientTermed(pat_term_dt)\n\n # if (pat_term_dt or (pdet and ('patienttermdate' in pdet))):\n # if pat_term_dt=='':\n # pat_term_dt = pdet['patienttermdate']\n # pat_term = self.Patient.Model.checkPatientTermed(pat_term_dt)\n\n if (pdet and ('special_needs' in pdet)):\n special_needs = pdet['special_needs']\n if pat_expired == 'Y':\n clist.append(('ze-icon-memexpired', 'Expired'))\n if pat_term == 'Y' or grp_term == 'Y':\n clist.append(('ze-icon-memterminated', 'Terminated'))\n if is_vip == 'Y':\n clist.append(('ze-icon-memvip', 'VIP'))\n if special_needs == 'Y':\n clist.append(('ze-icon-specialneed', 'Special Needs'))\n\n ##Get color coding from configuration\n coldict = self.ZeUI.getColorCodingConfiguredForEpisode()\n tracklist = []\n query = 0\n if coldict:\n ##Checks in cache to see if it has info\n #d = self.ZeCache.getPatientObjectData(claimant_idn) # we can use the pdet \n if pdet:\n if 'episodes' not in pdet.keys() or pdet.get('episodes', 0) == 0:\n query = 1\n else:\n for k, v in coldict.items():\n if pdet.get(k, 0) == 1 and k not in tracklist:\n tracklist.append(k)\n clist.append((v, k))\n else:\n query = 1\n if query:\n ##Following part is updating Cache with patient level attributes\n configuredencs = self.ZeUI.getConfigurationStatus('encounters')\n configuredencs = str(configuredencs).split(',')\n existing_encs = []\n encs = self.Episode.Model.getActiveEncountersForPatient(claimant_idn=claimant_idn)\n for r in encs:\n insert = 1\n etype = r.ENC_TYPE_CD\n if etype not in existing_encs:\n existing_encs.append(etype)\n if insert:\n self.ZeCache.setPatientObjectData(r.MBR_IDN, {etype: 1})\n if coldict.has_key(etype):\n c = coldict[etype]\n if c not in tracklist:\n tracklist.append(c)\n clist.append((c, etype))\n self.ZeCache.setPatientObjectData(claimant_idn, {'episodes': 1}) ##Marking as Episode related data is loaded\n for e in configuredencs:\n if e not in existing_encs:\n self.ZeCache.setPatientObjectData(claimant_idn,{e:0})\n\n ##Storing similar icon Keywords as one key\n all_icon_dict = {}\n for item in clist:\n class_name = item[0]\n icon_name = item[1]\n\n if all_icon_dict.has_key(class_name):\n all_icon_dict[class_name] = all_icon_dict[class_name] + ', ' + icon_name\n else:\n all_icon_dict[class_name] = icon_name\n\n # if all_icon_dict:\n # for each_key,each_value in all_icon_dict.iteritems():\n # if each_key == class_name:\n # icon_name = each_value + '@@' + icon_name\n # all_icon_dict[class_name] = icon_name\n # else:\n # all_icon_dict[class_name] = icon_name\n\n all_icon_list = all_icon_dict.items()\n all_icon_list.sort()\n # all_icon_list = []\n # for icon_class,icon_value in all_icon_dict.iteritems():\n # all_icon_list.append((icon_class,str(icon_value.split('@@'))[1:-1].replace('\\'','')))\n # all_icon_list.sort()\n\n dtml_page1 = self.views.pat_icon_widget\n icon_cont = dtml_page1(self.views, REQUEST=REQUEST, claimant_idn=claimant_idn, colorList=all_icon_list)\n dtml_page2=self.views.patient_id_widget\n pat_cont =dtml_page2(self.views,\n REQUEST=REQUEST,\n claimant_idn=claimant_idn,\n pat_access=pat_access,\n patient_name=patName,\n show_mcv_onclink=show_mcv_onclink,\n provider_dashboard=provider_dashboard)\n self.ZeCache.setHTMLStringForPatientWidget(claimant_idn, {'icon_string':icon_cont, 'pat_string':pat_cont})\n if show_mcv_onclink and has_mcv_permission:\n return icon_cont + pat_cont\n else:\n return icon_cont + patName\n\n def episodeWidget(self, encounter_idn, claimant_idn,clm_tpa_idn=0, onEncID=1, issue_exists='N',\n episode_type='',defpage='',ask_confirm=1,hidden_param='',linkTxt=None,\n enc_status='',pat_acess='',user_idn='',user_payors='',user_roles='',allowed_enc_types=''):\n \"\"\"\n Generates and returns Encounter ID with color coding along with link to check encounter abstract\n The widget will validate if the user accessing has permssion to view details or not\n This calls member status widget also\n\n @param encounter_idn: encounter id\n @param claimant_idn : member id\n @param clm_tpa_idn: Member TPA identity\n @param onEncID: displaying is encounter code or not\n @param issue_exists: issue exist or not\n @param episode_type: episode type\n @param defpage: default page\n @param ask_confirm: to popup confirm message or not\n @param hidden_param: comma separated param list\n @param linkTxt: Link text to the widget\n\n @return: '', if no encounter idn\n html cached, if there is cache\n Disabled episode widget, if ther is no episode widget status\n else, episode tool tip widget\n \"\"\"\n if not encounter_idn:\n return ''\n REQUEST = self.REQUEST\n episode_widget_status=self.ZeUI.getWidgetStatus(item='episodeWidget')\n config_status = self.ZeUI.getConfigurationStatus('show_worklist_tooltip')\n \n if(not user_idn):\n user_idn = self.ZeUser.Model.getLoggedinUserIdn()\n uroles = self.ZeUser.Model.getLoggedInUserRoles()\n loggedin_user_payors = user_payors\n if(not user_payors):\n loggedin_user_payors = self.ZeUser.Model.getLoggedInUserPayorDetails(user_idn)\n\n from_prv = 0\n if('PROVIDER' in uroles or 'PROVIDER-STAFF' in uroles):\n from_prv = 1\n\n etype=episode_type\n canSee = 0\n if 'MEMBER' in uroles:\n canSee=0\n else:\n if clm_tpa_idn in [0, '0'] :\n canSee = self.ZeUser.Controller.checkIfUserHasPermissionForEpisodeType(permission='Ze Episode ReadAccessToEpisode', enc_type=etype)\n elif (str(clm_tpa_idn) in loggedin_user_payors) or (clm_tpa_idn in loggedin_user_payors) or (0 in loggedin_user_payors):\n canSee = self.ZeUser.Controller.checkIfUserHasPermissionForEpisodeType(permission='Ze Episode ReadAccessToEpisode', enc_type=etype)\n\n #setCache = 0\n #if not linkTxt:\n # setCache = 1\n \n if not (onEncID or linkTxt):\n linkTxt = self.ZeUtil.testCondition(self.ZeUtil.checkInList(etype, ['Appeal', '2nd Appeal']), True,self.ZeUtil.testCondition(self.ZeUI.getJivaLabel(element_name=(etype or '').replace('2nd Appeal', 'seappeal').lower(),default_label=etype),'', etype, self.ZeUI.getJivaLabel(element_name=(etype or '').replace('2nd Appeal', 'seappeal').lower(),\ndefault_label=etype)), etype.replace('CALL','MemSvc'))\n \n if not linkTxt:\n linkTxt = encounter_idn\n setCache = 0\n \n if not canSee and (not from_prv):\n return linkTxt\n \n html_cached = self.ZeCache.getHTMLStringForEpisodeWidget(claimant_idn, encounter_idn)\n if html_cached and html_cached[0] and (episode_widget_status and not config_status) and (not from_prv):\n epi_cont = html_cached[1]\n epi_cont = epi_cont.format(linkTxt=linkTxt)\n return html_cached[0] + epi_cont\n else:\n insert = 0\n query = 1\n case_enc_type_cd = episode_type\n if(episode_type.find('Appeal')):\n case_enc_type_cd = self.ZeUtil.testCondition(self.ZeUtil.checkInList(episode_type, ['Appeal', '2nd Appeal']), True, self.ZeUtil.testCondition(self.ZeUI.getJivaLabel(element_name=(episode_type or '').replace('2nd Appeal', 'seappeal').lower(), default_label=''), '', episode_type, self.ZeUI.getJivaLabel(element_name=(episode_type or '').replace('2nd Appeal', 'seappeal').lower(), default_label='')), episode_type)\n \n clist = []\n d = self.ZeCache.getEncWidgetCacheContent(int(encounter_idn), claimant_idn)\n if d:\n etype = d[1]\n query = 0\n pr_exists = d[3]\n if pr_exists=='':\n query = 1\n if query:\n ##Following part is updating Cache with patient level attributes\n for r in self.Episode.Model.getEncDetailsForWidget(encounter_idn):\n claimant_idn = r.MBR_IDN\n insert = 1\n etype = r.ENC_TYPE_CD\n pr_exists = ''\n pstatus = r.PREV_STATUS\n pstatus = pstatus.replace('', '').replace('', '')\n pstatus_list = pstatus.split(',')\n closed_status_list = self.ZeUI.getClosedStatusAsConfigured(sendAsItIs=1)\n entity_active = r.ENTITY_ACTIVE\n pc = ''\n for p in pstatus_list:\n if p and p not in closed_status_list:\n pc = p\n pr_exists = p\n if not pstatus:\n pc = None\n if insert:\n self.ZeCache.setEncWidgetCacheContent(r.ENC_IDN, etype, claimant_idn, pc, entity_active)\n if pr_exists:\n msg = 'PR - %s' % (pr_exists, )\n clist.append(('ze-icon-epi-pr-small', msg))\n #'not from_prv' check is added for not to get the alert window if it is from provider\n if (ask_confirm and not from_prv):\n link_function_call=\"$COMMON.askAndOpenEpisodeWindow('\"+str(claimant_idn) +\"','\"+str(encounter_idn)+\"','\"+hidden_param+\"','\"+defpage + \"');\"\n #Here changing the function call, because if from PP with the episode status as new\n #should redirect to Edit request screen\n elif (from_prv and enc_status.lower() == 'new'):\n pat_access = pat_acess\n link_function_call = \"$PP.getRequest('\"+str(claimant_idn) +\"','\"+str(encounter_idn)+\"','\"+str(episode_type)+\"','\"+str(pat_access)+\"','\"+str(enc_status.lower())+\"');\"\n else:\n\n str_url=self.ZeUtil.getBaseURL() + '/Episode/Controller/getEpisodeHome'\n dct_query_string= self.Episode.Controller.generate_episode_hidden_fields(encounter_idn,claimant_idn,hidden_param)\n str_query_string=''\n str_title = dct_query_string['WIN_ID']\n for key in dct_query_string:\n str_query_string=str_query_string+str(key)+\"=\"+str(dct_query_string[key])+\"&\"\n\n if defpage=='':\n defpage=self.ZeUI.getEncounterPage(episode_type)\n str_query_string=str_query_string+\"defpage=\"+defpage\n\n link_function_call=\"$COMMON.openEpisodeWindow('\" + str_title+ \"','epwindow','\" + str_url + \"','\"+ str_query_string+ \"',\"+ str(claimant_idn) + \",\" + str(encounter_idn) + \",'\" + etype + \"');\"\n\n if not episode_widget_status:\n wpage = self.views.disabled_episode_widget\n return wpage(self.views, REQUEST=REQUEST, claimant_idn=claimant_idn, encounter_idn=encounter_idn,linkTxt=linkTxt, canSee=canSee,link_function_call=link_function_call)\n elif config_status:\n return self.episodeTooltipWidget(encounter_idn, claimant_idn, clm_tpa_idn, onEncID, issue_exists, etype,link_function_call)\n\n if not self.ZeUI.getConfigurationStatus('enableIssuesIcon'):\n issue_exists = ''\n elif issue_exists == 'N':\n issue_exists = self.Issues.Model.checkIssueExists(encounter_idn)\n \n if from_prv:\n allowed_enc_types = self.ZeUser.Controller.checkIfUserHasPermissionForEpisode(permission='Ze Episode ReadAccessToEpisode')\n canSee = etype in allowed_enc_types\n dtml_page1 = self.mdwise_views.mdwise_epi_icon_widget\n icon_cont = dtml_page1(self.views, REQUEST=REQUEST, claimant_idn=claimant_idn,\n colorList=clist,\n encounter_idn=encounter_idn,\n I_ENC_TYPE_CD=etype,\n issue_exists=issue_exists, canSee=canSee)\n dtml_page2 = self.mdwise_views.mdwise_epi_widget\n epi_cont = dtml_page2(self.views,\n REQUEST=REQUEST,\n encounter_idn=encounter_idn,\n canSee=canSee,\n claimant_idn=claimant_idn,\n issue_exists=issue_exists,\n I_ENC_TYPE_CD=etype,\n link_function_call=link_function_call,\n case_enc_type_cd=case_enc_type_cd)\n\n if not from_prv:\n self.ZeCache.setHTMLStringForEpisodeWidget(claimant_idn, encounter_idn, {'icon_string':icon_cont, 'epi_string':epi_cont})\n epi_cont = epi_cont.format(linkTxt=linkTxt)\n return icon_cont + epi_cont\n\n def patientNameLinkWidget(self, claimant_idn, clm_first_name='', clm_last_name='', show_mcv_onclink=1,\n pat_access='N'):\n \"\"\"\n Generates and returns Patient Name with color coding along with link to check claimant details\n The widget will validate if the user accessing has permssion to view details or not\n\n @param claimant_idn: Claimant identity\n @param clm_first_name: Claimant first name\n @param clm_last_name: Claimant last name\n @param show_mcv_onclink: Whether to allow MCV for pat_name on click event or not\n @param pat_access: Whether to give access to patient or not\n\n @return: Calimant details and icon details\n \"\"\"\n REQUEST = self.REQUEST\n if self.ZeUI.getConfigurationStatus('PatientNameFirstLast'):\n patName = (clm_first_name or '') + ', ' + (clm_last_name or '')\n else:\n patName = (clm_last_name or '') + ', ' + (clm_first_name or '')\n linkurl = 'Patient/Controller/getPatientCentricView'\n has_mcv_permission = self.ZeUI.checkHasPermissionOnMethod(linkurl)\n html_cached = self.ZeCache.getHTMLStringForPatientWidget(claimant_idn)\n if html_cached:\n if show_mcv_onclink and has_mcv_permission:\n return html_cached[1]\n else:\n return patName\n dtml_page2 = self.views.patient_id_widget\n pat_cont =dtml_page2(self.views,\n REQUEST=REQUEST,\n claimant_idn=claimant_idn,\n pat_access=pat_access,\n patient_name=patName)\n self.ZeCache.setHTMLStringForPatientWidget(claimant_idn, {'icon_string': None, 'pat_string':pat_cont})\n if show_mcv_onclink and has_mcv_permission:\n return pat_cont\n else:\n return patName\n\n def episodeTypeLinkWidget(self, encounter_idn, claimant_idn,clm_tpa_idn=0, onEncID=1, issue_exists='N',\n episode_type='',defpage='',ask_confirm=1,hidden_param='',linkTxt=None,\n enc_status='',pat_acess='',user_idn='',user_payors='',user_roles='',allowed_enc_types=''):\n \"\"\"\n Generates and returns Encounter ID with color coding along with link to check encounter abstract\n The widget will validate if the user accessing has permssion to view details or not\n This calls member status widget also\n\n @param encounter_idn: encounter id\n @param claimant_idn : member id\n @param clm_tpa_idn: Member TPA identity\n @param onEncID: displaying is encounter code or not\n @param issue_exists: issue exist or not\n @param episode_type: episode type\n @param defpage: default page\n @param ask_confirm: to popup confirm message or not\n @param hidden_param: comma separated param list\n @param linkTxt: Link text to the widget\n\n @return: '', if no encounter idn\n html cached, if there is cache\n Disabled episode widget, if ther is no episode widget status\n else, episode tool tip widget\n \"\"\"\n if not encounter_idn:\n return ''\n REQUEST = self.REQUEST\n if not user_roles:\n user_roles = self.ZeUser.Model.getLoggedInUserRoles()\n from_prv = 0\n if('PROVIDER' in user_roles or 'PROVIDER-STAFF' in user_roles):\n from_prv = 1\n\n etype = episode_type\n canSee = 0\n if 'MEMBER' in user_roles:\n canSee=0\n else:\n canSee = self.ZeUser.Controller.checkIfUserHasPermissionForEpisodeType(permission='Ze Episode ReadAccessToEpisode', enc_type=etype)\n\n if not (onEncID or linkTxt):\n linkTxt = self.ZeUtil.testCondition(self.ZeUtil.checkInList(etype, ['Appeal', '2nd Appeal']), True,self.ZeUtil.testCondition(self.ZeUI.getJivaLabel(element_name=(etype or '').replace('2nd Appeal', 'seappeal').lower(),default_label=etype),'', etype, self.ZeUI.getJivaLabel(element_name=(etype or '').replace('2nd Appeal', 'seappeal').lower(),\ndefault_label=etype)), etype.replace('CALL','MemSvc'))\n\n if not linkTxt:\n linkTxt = encounter_idn\n setCache = 0\n\n if not canSee and (not from_prv):\n return linkTxt\n\n html_cached = self.ZeCache.getHTMLStringForEpisodeWidget(claimant_idn, encounter_idn)\n if html_cached and (not from_prv):\n epi_cont = html_cached[1]\n epi_cont = epi_cont.format(linkTxt=linkTxt)\n return epi_cont\n else:\n insert = 0\n query = 1\n case_enc_type_cd = episode_type\n if(episode_type.find('Appeal')):\n case_enc_type_cd = self.ZeUtil.testCondition(self.ZeUtil.checkInList(episode_type, ['Appeal', '2nd Appeal']), True, self.ZeUtil.testCondition(self.ZeUI.getJivaLabel(element_name=(episode_type or '').replace('2nd Appeal', 'seappeal').lower(), default_label=''), '', episode_type, self.ZeUI.getJivaLabel(element_name=(episode_type or '').replace('2nd Appeal', 'seappeal').lower(), default_label='')), episode_type)\n\n #'not from_prv' check is added for not to get the alert window if it is from provider\n if (ask_confirm and not from_prv):\n link_function_call=\"$COMMON.askAndOpenEpisodeWindow('\" + str(claimant_idn) + \"','\" \\\n + str(encounter_idn) + \"','\" + hidden_param + \"','\" + defpage + \"');\"\n #Here changing the function call, because if from PP with the episode status as new\n #should redirect to Edit request screen\n elif (from_prv and enc_status.lower() == 'new'):\n pat_access = pat_acess\n link_function_call = \"$PP.getRequest('\" + str(claimant_idn) + \"','\" + str(encounter_idn) \\\n + \"','\" + str(episode_type) + \"','\" + str(pat_access) + \"','\" + \\\n str(enc_status.lower()) + \"');\"\n else:\n str_url=self.ZeUtil.getBaseURL() + '/Episode/Controller/getEpisodeHome'\n dct_query_string = self.Episode.Controller.generate_episode_hidden_fields(encounter_idn,\n claimant_idn,\n hidden_param)\n str_query_string = ''\n str_title = dct_query_string['WIN_ID']\n for key in dct_query_string:\n str_query_string = str_query_string + str(key) + \"=\" + str(dct_query_string[key]) + \"&\"\n if defpage == '':\n defpage = self.ZeUI.getEncounterPage(episode_type)\n str_query_string = str_query_string + \"defpage=\" + defpage\n link_function_call = \"$COMMON.openEpisodeWindow('\" + str_title+ \"','epwindow','\" + \\\n str_url + \"','\"+ str_query_string+ \"',\"+ str(claimant_idn) + \",\" \\\n + str(encounter_idn) + \",'\" + etype + \"');\"\n if from_prv:\n allowed_enc_types = self.ZeUser.Controller.checkIfUserHasPermissionForEpisode(permission='Ze Episode ReadAccessToEpisode')\n canSee = etype in allowed_enc_types\n\n dtml_page2 = self.mdwise_views.mdwise_epi_widget\n epi_cont = dtml_page2(self.views,\n REQUEST=REQUEST,\n encounter_idn=encounter_idn,\n canSee=canSee,\n claimant_idn=claimant_idn,\n I_ENC_TYPE_CD=etype,\n link_function_call=link_function_call,\n case_enc_type_cd=case_enc_type_cd)\n if not from_prv:\n self.ZeCache.setHTMLStringForEpisodeWidget(claimant_idn, encounter_idn, {'icon_string': None, 'epi_string':epi_cont})\n epi_cont = epi_cont.format(linkTxt=linkTxt)\n return epi_cont\n\nInitializeClass(MDWISEWidget)\n","sub_path":"MDWISEWidget/MDWISEWidget.py","file_name":"MDWISEWidget.py","file_ext":"py","file_size_in_byte":36912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"344250319","text":"import bike\n\nif __name__ == '__main__':\n \n store = bike.Shop(\"Intrepid Moustache Rides\", 0.20)\n \n B1 = bike.Bicycle(\"Rickety Racket\", 10, 5)\n B2 = bike.Bicycle(\"Silver Ferret\", 100, 1)\n B3 = bike.Bicycle(\"Star Rider\", 250, 10)\n B4 = bike.Bicycle(\"Slobber Slick\", 400, 10)\n B5 = bike.Bicycle(\"Wicked Trickster\", 750, 15)\n B6 = bike.Bicycle(\"Sausage Maker\", 800, 2)\n bikes = {B1, B2, B3, B4, B5, B6} \n \n C1 = bike.Customer(\"MC Hammer\", 200)\n C2 = bike.Customer(\"Bill Maher\", 500)\n C3 = bike.Customer(\"Skrillex\", 1000)\n customers = {C1, C2, C3}\n \n store.reportStock(bikes)\n \n for c in customers:\n c.greet()\n store.makePitch(c, bikes)\n \n store.reportStock(bikes)\n store.reportProfit()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"385928635","text":"import sys\nimport time\nimport datetime\nimport requests\nimport json\nimport urllib3\n\nprint(\"Libraries are imported\")\n\nurllib3.disable_warnings()\n\nclass Interface():\n def __init__(self, name):\n '''\n This class is representetive of physical interfaces of dc switches\n '''\n self.name = name\n self.dn = ''\n self.adminState = ''\n self.operationalState = ''\n self.speed = ''\n self.sfpModel = 'unset'\n self.sfpSerial = 'unset'\n self.lastLinkStateChange = datetime.datetime(1970, 1, 1)\n\n\nclass Device():\n def __init__(self, name):\n '''\n This class is represantative of pyhsical switches in Pod\n '''\n self.name = name\n self.serial = ''\n self.model = ''\n self.dn = ''\n self.interfaces = []\n\n\nclass Pod():\n def __init__(self, name):\n '''\n This class is represantative of a group of switches combined as a data centre pod\n '''\n self.name = name\n self.devices = []\n\n\nclass Apic:\n def __init__(self, managementIP, username, password):\n '''\n This class is represantative of Cisco Aci sdn controller,\n Mainly; it holds authentication process, get and post data operations, and\n organizing obtained data with spesific functions\n '''\n self.IP = managementIP\n self.username = username\n self.password = password\n self.cookies = {}\n self.headers = {\n 'content-type': \"application/json\",\n 'cache-control': \"no-cache\"\n }\n self.authentication = False\n self.pods = []\n\n def login(self):\n try:\n AUTHENTICATION_URL = \"https://%s/api/aaaLogin.json\" % self.IP\n AUTHENTICATION_DATA = \"{\\\"aaaUser\\\":{\\\"attributes\\\":{\\\"name\\\":\\\"%s\\\" , \\\"pwd\\\":\\\"%s\\\"}}}\" % (\n self.username, self.password)\n auth = json.loads(requests.post(AUTHENTICATION_URL, AUTHENTICATION_DATA, self.headers, verify=False).text)\n auth_token = auth['imdata'][0]['aaaLogin']['attributes']['token']\n self.cookies['APIC-Cookie'] = auth_token\n print(auth_token)\n self.authentication = True\n print(\"You are authenticated to Apic on \", self.IP)\n except:\n e = sys.exc_info()[0]\n print(\"Token failed with exception: %s\" % e)\n finally:\n print(\"Login process to Apic on %s is finished\" % self.IP)\n\n def getData(self, URL):\n if self.authentication:\n Data = json.loads(requests.get(url=URL, cookies=self.cookies, verify=False).text)[\"imdata\"]\n return Data\n else:\n return False\n\n def getPods(self):\n podsJson = self.getData(\"https://%s/api/node/class/fabricPod.json\" % self.IP)\n if podsJson:\n #print(podsJson)\n for pod in podsJson:\n self.pods.append(Pod(pod['fabricPod']['attributes']['dn'].split('/')[1]))\n\n def getDevices(self):\n for pod in self.pods:\n devicesOfPodJson = self.getData(\"https://%s/api/node/mo/topology/%s.json?query-target=children&target-subtree-class=fabricNode\" % (self.IP, pod.name))\n for fabricNode in devicesOfPodJson:\n tempDevice = Device(fabricNode['fabricNode']['attributes']['name'])\n tempDevice.model = fabricNode['fabricNode']['attributes']['model']\n tempDevice.serial = fabricNode['fabricNode']['attributes']['serial']\n tempDevice.dn = fabricNode['fabricNode']['attributes']['dn']\n #Obtain interfaces of devices\n interfacesOfDeviceJson = self.getData(\"https://%s/api/node/class/%s/l1PhysIf.json?rsp-subtree=children&rsp-subtree-class=ethpmPhysIf\" % (self.IP, tempDevice.dn))\n #print(interfacesOfDeviceJson)\n if interfacesOfDeviceJson:\n print(\"Digging interfaces for \" + tempDevice.name)\n for interface in interfacesOfDeviceJson:\n tempInterface = Interface(interface['l1PhysIf']['attributes']['id'])\n tempInterface.dn = interface['l1PhysIf']['attributes']['dn']\n tempInterface.adminState = interface['l1PhysIf']['attributes']['adminSt']\n tempInterface.operationalState = interface['l1PhysIf']['children'][0]['ethpmPhysIf']['attributes']['operSt']\n timeSentence = interface['l1PhysIf']['children'][0]['ethpmPhysIf']['attributes']['lastLinkStChg']\n tempInterface.lastLinkStateChange = datetime.date(int(timeSentence.split('T')[0].split('-')[0]), int(timeSentence.split('T')[0].split('-')[1]), int(timeSentence.split('T')[0].split('-')[2]))\n tempInterface.speed = interface['l1PhysIf']['attributes']['speed']\n #Getting sfp serial through api, we have to gel a call from api\n sfpInfoOfInterface = self.getData(\"https://%s/api/node/mo/%s/phys.json?query-target=children&target-subtree-class=ethpmFcot\" % (self.IP, tempInterface.dn))\n if sfpInfoOfInterface:\n try:\n tempInterface.sfpModel = sfpInfoOfInterface[0]['ethpmFcot']['attributes']['guiPN']\n tempInterface.sfpSerial = sfpInfoOfInterface[0]['ethpmFcot']['attributes']['guiSN']\n except:\n print(tempInterface.dn)\n pass\n finally:\n pass\n tempDevice.interfaces.append(tempInterface)\n del tempInterface\n pod.devices.append(tempDevice)\n del tempDevice\n\n def getFabric(self):\n self.getPods()\n self.getDevices()\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n #First, we get the starting time, in fact, there is no effect to take this,\n #but, I like to show what time process takes,\n startingTime = time.time()\n #Creating an instance of Aic class,\n #Simply, use ip address of sdn controller web interface, and your login credentials,\n exampleApic = Apic(\"ManagementIpAddressOfYourACIWebScreen\", \"Username\", \"Password\")\n #After, creating instance, try to get authenticate with sdn controller\n exampleApic.login()\n #And, let's get what we have in pysical network\n exampleApic.getFabric()\n #For showing all of what you have, you can run this part\n for pod in exampleApic.pods:\n print(\"Pod name is \" + pod.name)\n for device in pod.devices:\n print(\"\\t Device name: %s \\tmodel: %s \\t serial: %s \\tdn: %s\" % (device.name, device.model, device.serial, device.dn))\n for interface in device.interfaces:\n print(\"\\t\\t Interface id : {0} \\t speed: {1} \\t admin state: {2} \\t operational state: {3} \\t sfp serial : {4} \\t dn: {5} \\t lastStateChange: {6}\".format(interface.name, interface.speed, \\\n interface.adminState, interface.operationalState, str(interface.sfpSerial), interface.dn, interface.lastLinkStateChange))\n\n\n #Or you can run this part, to find sfp's on ports, staying down state more than 10 days\n acceptableDaysToBeSurePortIsUnused = 10\n #For showing sfp on down ports, (Be carefull on here, maybe your port is experiencing)\n for pod in exampleApic.pods:\n print(\"Pod name is \" + pod.name)\n for device in pod.devices:\n #print(\"\\t Device name: %s \\tmodel: %s \\t serial: %s \\tdn: %s\" % (device.name, device.model, device.serial, device.dn))\n for interface in device.interfaces:\n if interface.adminState != 'up' and interface.operationalState != 'up' and interface.sfpSerial and (datetime.date.today() - interface.lastLinkStateChange).days > acceptableDaysToBeSurePortIsUnused:\n print(interface.dn + \"\\t\" + interface.adminState + \"\\t\" + interface.operationalState + \"\\t\" + interface.sfpModel + \"\\t\" + interface.sfpSerial)\n print(\"Last Up time: \" + str(interface.lastLinkStateChange) + \" (1970-01-01 means that it has never been up)\")\n\n\n print(\"Process take %s seconds to complete\" % str(time.time() - startingTime))\n\n\n","sub_path":"sfpDigger.py","file_name":"sfpDigger.py","file_ext":"py","file_size_in_byte":8307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"154113567","text":"from flask import Flask, request, render_template\n\napp = Flask(__name__)\n\nALPHABET = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nSHIFT_CHAR_NUMBER = 3\n\n\ndef rot13_encrypt(input_text=\"\"):\n\n encrypt_string = input_text\n encrypted_string = \"\"\n\n for character in encrypt_string:\n\n tmp_char_poz = ALPHABET.find(character.upper())\n new_char = None\n\n if tmp_char_poz + SHIFT_CHAR_NUMBER + 1 > len(ALPHABET):\n new_char_poz = tmp_char_poz + SHIFT_CHAR_NUMBER - len(ALPHABET)\n new_char = ALPHABET[new_char_poz]\n else:\n if tmp_char_poz > -1:\n new_char_poz = tmp_char_poz + SHIFT_CHAR_NUMBER\n new_char = ALPHABET[new_char_poz]\n\n if new_char:\n if character == character.lower():\n new_char = new_char.lower()\n\n if character == character.upper():\n new_char = new_char.upper()\n\n encrypted_string = f\"{encrypted_string}{new_char}\"\n else:\n encrypted_string = f\"{encrypted_string}{character}\"\n\n return encrypted_string\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index():\n\n if request.method == \"POST\":\n\n text_to_encrpyt = request.form.get('input_text')\n encrpyted_text = rot13_encrypt(text_to_encrpyt)\n\n return render_template('index.html', encrpyted_text=encrpyted_text)\n\n if request.method == \"GET\":\n\n return render_template('index.html')\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"SmartNinja Chalenge 2/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"163272436","text":"import os\n\nfrom label_formats.LabelFormatter import LabelFormatter\n\n\nclass YOLO(LabelFormatter):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.species = []\n self.train_im = [pos_json for pos_json in os.listdir(self.train_dir) if pos_json.endswith('.JPG')]\n self.test_im = [pos_json for pos_json in os.listdir(self.test_dir) if pos_json.endswith('.JPG')]\n\n def generate_metadata_files(self):\n names_file = os.path.join(self.dataset_dir, \"yolo.names\")\n data_file = os.path.join(self.dataset_dir, \"yolo.data\")\n train_list_file = os.path.join(self.dataset_dir, \"train\", \"yolo.labels\")\n test_list_file = os.path.join(self.dataset_dir, \"test\", \"yolo.labels\")\n backup_dir = os.path.join(self.dataset_dir, \"weights\")\n\n # create weights directory\n if not os.path.exists(backup_dir):\n os.makedirs(backup_dir)\n\n # write .data file\n data_content = \\\n \"classes = %d\\ntrain = %s\\nvalid = %s\\ntest = %s\\nnames = %s\\nbackup = %s\\n\" \\\n % (len(self.species), train_list_file, test_list_file, test_list_file, names_file, backup_dir)\n with open(data_file, 'w') as f:\n f.write(data_content)\n\n # write .names file\n with open(names_file, 'w') as f:\n for s in self.species:\n f.write('%s\\n' % s)\n\n # write test and train list files\n with open(train_list_file, 'w') as f:\n for s in self.train_im:\n fp = os.path.join(self.train_dir, s)\n f.write('%s\\n' % fp)\n\n with open(test_list_file, 'w') as f:\n for s in self.test_im:\n fp = os.path.join(self.test_dir, s)\n f.write('%s\\n' % fp)\n\n def generate_labels(self, dir, json_files):\n for f in json_files:\n fp = os.path.join(dir, f)\n labels = self.load_json(fp)\n lines = []\n for label in labels:\n global_label=label['global_label']\n relative_label=label['relative_label']\n species_name=label['species_name']\n if not species_name in self.species:\n self.species.append(species_name)\n\n im_w=label['im_w']\n im_h=label['im_h']\n x1 = relative_label['relative_x1']\n x2 = relative_label['relative_x2']\n y1 = relative_label['relative_y1']\n y2 = relative_label['relative_y2']\n\n class_id = self.species.index(species_name)\n x_center = (x1 + (x2-x1)/2.0)/im_w\n y_center = (y1 + (y2-y1)/2.0)/im_h\n w = (x2-x1)/im_w\n h = (y2-y1)/im_h\n lines.append(\"%d %f %f %f %f\" % (class_id, x_center, y_center, w, h))\n\n label_fp = fp.replace('.json', '.txt')\n with open(label_fp, 'w') as text_file:\n for line in lines:\n text_file.write(line + '\\n')\n\nlf = YOLO(\"/fast/generated_data/dataset_1/\")\nlf.format()","sub_path":"dataset/label_formats/YOLO.py","file_name":"YOLO.py","file_ext":"py","file_size_in_byte":3091,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"394288782","text":"# -*- coding: UTF-8 -*- \n'''\n采用BFS广度优先算法找到所有路径,更改一下的话也可以输出最短路径,暂未考虑线路出现回路的情况\n\n'''\n# 首先定义线路结构图\ngraph = {\"1\": [\"2\",\"3\",\"4\"], \"2\": [\"1\",\"3\",\"5\"], \"3\": [\"1\",\"2\",\"4\",\"5\",\"6\"], \"4\": [\"1\",\"3\",\"6\",\"7\"], \"5\": [\"2\",\"3\",\"6\"], \"6\":[\"3\",\"4\",\"5\",\"7\"],\"7\":[\"4\",\"6\"]}\n\n# 下面是输出线路的结构图,分别打印每个节点的\n'''\nprint(\"线路的连接图是:\")\nfor i in graph.keys():\n\tprint(\"graph[%s]: %s\" %(i, graph[i]))\n'''\n\n# bsf 实现\t\ndef bsf(graph, start , end):\n\t\n\tchecked = []\t\t#储存已经检查过的节点\n\t\n\tqueue = [[start]] \t# 将起始节点存入队列中\n\tallPath = []\n\t\n\t#path =[start]\t\t# 储存从开始节点到终点的路线列表\n\t\n\tif start == end:\n\t\treturn \"输入的起始节点:【%s】与末尾节点:【%s】一致,请检查节点输入是否正确。\"%(start, end)\n\t\n\twhile queue: \t\t# 如果队列中有节点\n\t\tpath = queue.pop(0) #将队列中的第一个加入的路径取出\n\t\t#print(path)\n\t\tnode = path[-1]\n\t\t#初始化完成\n\t\tif node not in checked:\n\t\t\tneighbours = graph[node]\n\t\t\tfor neighbour in neighbours:\n\t\t\t\tnew_path = list(path)\n\t\t\t\t\n\t\t\t\tnew_path.append(neighbour)\n\t\t\t\tqueue.append(new_path) \t\t\n\t\t\t\tif neighbour == end:\n\t\t\t\t\tallPath.append(new_path)\t\t\t\t\n\t\t\tchecked.append(node)\n\t\n\tif allPath:\n\t\treturn allPath\t\n\treturn (\"没有找到起点:【%s】 到 终点:【%s】 的路径,请检查节点是否正确\" %(start, end))\n\t\t\n\npath = bsf(graph, \"1\", \"2\")\nprint(path)\n","sub_path":"Program_lunjingfanyan/dfs_demo.py","file_name":"dfs_demo.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"397726488","text":"import re\nimport random\n\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\nfrom keras.layers import Dense, LSTM, Bidirectional\nfrom keras.layers.embeddings import Embedding\nfrom keras.models import Sequential\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\n\n\ndef clean_punctuation(joke):\n # убирает из строки все, что не соответствует регулярному выражению\n tokens = re.findall(r\"[\\w']+|[.,!?;]+\", joke)\n cleaned = []\n\n # если в токене хранится несколько знаков препинания подряд, то, например, если там есть вопрос, то в список\n # cleaned добавляется только вопросительный знак. Пример: [\"...?\"] -> [\"?\"]\n for token in tokens:\n if '?' in token:\n cleaned.append('?')\n elif '!' in token:\n cleaned.append('!')\n elif '..' in token:\n cleaned.append('...')\n else:\n cleaned.append(token)\n\n # если предложение не заканчивается на '.', '?', '!', то в конец ставится точка.\n if '.' not in cleaned[-1] and '?' not in cleaned[-1] and '!' not in cleaned[-1]:\n cleaned.append('.')\n return \" \".join(cleaned)\n\n\ndef sample(preds, temperature=1.0):\n preds = np.asarray(preds.astype('float64'))\n preds = np.log(preds) / temperature\n exp_preds = np.exp(preds)\n preds = exp_preds / np.sum(exp_preds)\n probas = np.random.multinomial(1, preds, 1)\n return np.argmax(probas) + 1\n\n\n\ndef generate_overlapping_encoded_sequences(jokes, maxlen, step):\n sentences = []\n next_words = [] # holds the targets\n for joke in jokes:\n for j in range(0, len(joke) - maxlen, step):\n sentences.append(joke[j: j + maxlen])\n next_words.append(joke[j + maxlen])\n return sentences, next_words\n\n\n# загрузка данных\nshort_jokes = pd.read_csv('./shortjokes.csv')[:20000]\n\n# т.к. в данных 2 столбца: ID и Joke, мы берем только Joke\njokes = []\nfor value in short_jokes['Joke']:\n jokes.append(value.lower())\n\njokes = list(map(clean_punctuation, jokes))\ntext = ' '.join(jokes) # преобразование из списка в один текст\n\ntokenizer = Tokenizer(filters='\"#$%&()*+,-/:;<=>@[\\\\]^_`{|}~\\t\\n') # filters - не учитывает выбранные символы\n# создает словарь индексов и слов по популярности (ключ с самым популярным словом имеет значение 1)\ntokenizer.fit_on_texts(jokes)\nvocab_size = len(tokenizer.word_index) + 1 # количество уникальных слов/символов\nprint('Vocab Size', vocab_size)\n\n# разбиение шуток в последовательности длиной 11\nseq_length = 11\nstep = 3\ninteger_encoded_docs = tokenizer.texts_to_sequences(jokes) # заменяет слова и символы в тексте на значения из словаря\ninteger_encoded_docs = pad_sequences(integer_encoded_docs,\n padding='post') # добавление 0 к спискам, размер которых меньше 11\nsplit_encoded_docs, next_words = generate_overlapping_encoded_sequences(integer_encoded_docs, seq_length, step)\n# размерность padded_docs = (len(split_encoded_docs), 11)\npadded_docs = pad_sequences(split_encoded_docs, padding='post')\nnext_words = np.asarray(next_words) # нужно получить следующее слово для каждого из этих\nprint(\"Number of Sequences:\", len(padded_docs))\n\n# Векторизация последовательностей\ny = np.zeros((len(padded_docs), vocab_size), dtype=np.bool)\nfor i, padded_doc in enumerate(padded_docs):\n y[i, next_words[i]] = 1\n\nnum_epochs = 20\n\nembedding_dim = 256\nmodel = Sequential()\nmodel.add(Embedding(vocab_size, embedding_dim, input_length=padded_docs.shape[1], mask_zero=True))\nmodel.add(Bidirectional(LSTM(128, dropout=0.1, recurrent_dropout=0.1, return_sequences=True)))\nmodel.add(Bidirectional(LSTM(128, dropout=0.1, recurrent_dropout=0.1, return_sequences=True)))\nmodel.add(Bidirectional(LSTM(128)))\nmodel.add(Dense(2048, kernel_regularizer=tf.keras.regularizers.l2(0.001), activation='relu'))\nmodel.add(Dense(vocab_size, activation='softmax'))\n\nfilename = \"./weights/weights-improvement-18-0.9652.hdf5\"\nmodel.load_weights(filename)\nmodel.compile(loss='categorical_crossentropy', optimizer='adam')\n\nword_index = tokenizer.word_index # словарь индексов и слов, где ключи - слова\nindex_to_word = dict(\n (index, word) for word, index in word_index.items()) # такой же словарь, где ключи - индексы\nmax_words = 11\nmaxlen = padded_docs.shape[1]\nstart_index = random.randint(0, len(text.split(' ')) - max_words - 1)\ngenerated_text = \" \".join(text.split(' ')[start_index: start_index + max_words])\ninteger_encoded_gen_text = tokenizer.texts_to_sequences([generated_text])\nreadable_gen_text = \" \".join(map(lambda key: index_to_word[key], integer_encoded_gen_text[0]))\nprint(\"Random Seed:\")\nprint(readable_gen_text)\nfor _ in range(50):\n integer_encoded_gen_text = tokenizer.texts_to_sequences([generated_text])\n padded_gen_text = pad_sequences(integer_encoded_gen_text, maxlen=maxlen, padding='pre')\n preds = model.predict(padded_gen_text, verbose=0)[0]\n next_index = sample(preds)\n\n most_probable_next_word = index_to_word[next_index]\n print('Generated:', generated_text, 'Next: ', most_probable_next_word)\n generated_text += \" \" + most_probable_next_word\n readable_gen_text += \" \" + most_probable_next_word\n generated_text = \" \".join(generated_text.split(' ')[1:])\n if most_probable_next_word in ('.', '?', '!'):\n break\n\nprint('\\nFull generated text:')\nprint(readable_gen_text)","sub_path":"8383/Shishkin/idz_Shishkin_Stepanov/gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"16693483","text":"#!/usr/bin/python3\n\n\ndef main():\n pieniadze = 0\n karty = 0\n n = int(input())\n x = input().split()\n c = input()\n\n for i in range(n):\n x[i] = int(x[i])\n if c[i] == '1':\n pieniadze += x[i]\n\n x.sort()\n\n while pieniadze > 0:\n if pieniadze >= x[karty]:\n pieniadze -= x[karty]\n karty += 1\n else:\n break\n print(karty)\n\nmain()","sub_path":"2020/05/16/karty.py","file_name":"karty.py","file_ext":"py","file_size_in_byte":416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"109401004","text":"import os\n\nfrom flask import Flask, request, session, g, redirect, url_for, abort, \\\n render_template, flash, jsonify\n\nfrom BigCalendar.utility.db_control import init_db, full_user_list, full_password_list, read_from_app_db, \\\n insert_into_app_db, entry_in_app_db, create_availability_entry, update_app_db, update_entries_availability\nfrom BigCalendar.utility.encryption import encrypt_sha256\nfrom BigCalendar.utility.id_handling import opposite_id, split_id\n\napp = Flask(__name__)\napp.config.from_object(__name__)\n\napp.config.update(dict(\n DATABASE=os.path.join(app.root_path, 'big_calendar.db'), # TODO: Instance Folders\n DEBUG=True,\n SECRET_KEY='development key', # TODO\n))\n\n\n@app.teardown_appcontext\ndef close_db(error):\n \"\"\"Closes the database again at the end of the request.\"\"\"\n if hasattr(g, 'sqlite_db'):\n g.sqlite_db.close()\n\n\n@app.cli.command('initdb')\ndef initdb_command():\n \"\"\"Initializes the database.\"\"\"\n init_db(app)\n print('Initialized the database.')\n\n\n@app.route('/')\ndef show_entries():\n add = request.args.get('add')\n entries = read_from_app_db(\n app=app,\n properties=['id', 'text', 'concert_date', 'available'],\n table='entries',\n additional=' order by concert_date asc'\n )\n return render_template('show_entries.html', entries=entries, add=add)\n\n\n@app.route('/add', methods=['POST'])\ndef add_entry():\n if not session.get('logged_in'):\n abort(401)\n insert_into_app_db(\n app=app,\n properties=['text', 'concert_date', 'available'],\n table='entries',\n values=[request.form['text'], request.form['date'], 2]\n )\n flash('Neuer Eintrag erfolgreich hinzugefuegt.')\n return redirect(url_for('show_entries'))\n\n\n@app.route('/add_true')\ndef add_true():\n return redirect(url_for('show_entries', add=True))\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n database = app.config['DATABASE']\n if request.form['username'] not in full_user_list(database):\n error = 'Invalid username'\n elif encrypt_sha256(request.form['password']) not in full_password_list(database):\n error = 'Invalid password'\n else:\n session['logged_in'] = True\n session['user'] = request.form['username']\n flash('You were logged in')\n return redirect(url_for('show_entries'))\n return render_template('login.html', error=error)\n\n\ndef bool_conversion(checked):\n if checked == 'true' or checked is True:\n return True\n elif checked == 'false' or checked is False:\n return False\n\n\n@app.route('/checkbox_clicked//', methods=['GET', 'POST'])\ndef checkbox_clicked(id_, checked):\n available = user_availability(id_, checked)\n id_number = int(split_id(id_)[1])\n id_exists = entry_in_app_db(app=app, table='availabilities', target={'id': id_number})\n if not id_exists:\n create_availability_entry(id_number, app)\n update_app_db(app=app, table='availabilities', property=session['user'], value=available,\n where='id = {}'.format(id_number))\n update_entries_availability(app, id_number)\n\n return jsonify(other_id=(opposite_id(id_)), id=id_)\n\n\ndef user_availability(id_, checked):\n checked = bool_conversion(checked)\n id_category = split_id(id_)[0]\n if (id_category == 'yes' and checked) or (id_category == 'no' and not checked):\n available = 1\n elif (id_category == 'yes' and not checked) or (id_category == 'no' and checked):\n available = 0\n return available\n\n\n@app.route('/logout')\ndef logout():\n session.pop('logged_in', None)\n session.pop('user', None)\n flash('You were logged out')\n return redirect(url_for('show_entries'))\n","sub_path":"BigCalendar/big_calendar.py","file_name":"big_calendar.py","file_ext":"py","file_size_in_byte":3801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"412521447","text":"import re\nfrom typing import Any, Optional\n\nimport pytest\n\nfrom snuba_sdk.column import Column\nfrom snuba_sdk.expressions import (\n Consistent,\n Debug,\n DryRun,\n Granularity,\n InvalidExpression,\n Legacy,\n Limit,\n Offset,\n Totals,\n Turbo,\n)\nfrom snuba_sdk.function import Function\nfrom snuba_sdk.orderby import Direction, LimitBy, OrderBy\n\nlimit_tests = [\n pytest.param(1, None),\n pytest.param(10, None),\n pytest.param(-1, InvalidExpression(\"limit '-1' must be at least 1\")),\n pytest.param(\"5\", InvalidExpression(\"limit '5' must be an integer\")),\n pytest.param(1.5, InvalidExpression(\"limit '1.5' must be an integer\")),\n pytest.param(10.0, InvalidExpression(\"limit '10.0' must be an integer\")),\n pytest.param(1000000, InvalidExpression(\"limit '1000000' is capped at 10,000\")),\n]\n\n\n@pytest.mark.parametrize(\"value, exception\", limit_tests)\ndef test_limit(value: Any, exception: Optional[Exception]) -> None:\n if exception is not None:\n with pytest.raises(type(exception), match=re.escape(str(exception))):\n Limit(value)\n else:\n assert Limit(value).limit == value\n\n\noffset_tests = [\n pytest.param(0, None),\n pytest.param(10, None),\n pytest.param(-1, InvalidExpression(\"offset '-1' must be at least 0\")),\n pytest.param(\"5\", InvalidExpression(\"offset '5' must be an integer\")),\n pytest.param(1.5, InvalidExpression(\"offset '1.5' must be an integer\")),\n pytest.param(10.0, InvalidExpression(\"offset '10.0' must be an integer\")),\n]\n\n\n@pytest.mark.parametrize(\"value, exception\", offset_tests)\ndef test_offset(value: Any, exception: Optional[Exception]) -> None:\n if exception is not None:\n with pytest.raises(type(exception), match=re.escape(str(exception))):\n Offset(value)\n else:\n assert Offset(value).offset == value\n\n\ngranularity_tests = [\n pytest.param(10, None),\n pytest.param(0, InvalidExpression(\"granularity '0' must be at least 1\")),\n pytest.param(\"5\", InvalidExpression(\"granularity '5' must be an integer\")),\n pytest.param(1.5, InvalidExpression(\"granularity '1.5' must be an integer\")),\n pytest.param(10.0, InvalidExpression(\"granularity '10.0' must be an integer\")),\n]\n\n\n@pytest.mark.parametrize(\"value, exception\", granularity_tests)\ndef test_granularity(value: Any, exception: Optional[Exception]) -> None:\n if exception is not None:\n with pytest.raises(type(exception), match=re.escape(str(exception))):\n Granularity(value)\n else:\n assert Granularity(value).granularity == value\n\n\norderby_tests = [\n pytest.param(Column(\"foo\"), Direction.ASC, None),\n pytest.param(Function(\"bar\", [Column(\"foo\")]), Direction.ASC, None),\n pytest.param(\n 0,\n Direction.DESC,\n InvalidExpression(\n \"OrderBy expression must be a Column, CurriedFunction or Function\"\n ),\n ),\n pytest.param(\n Column(\"foo\"), \"ASC\", InvalidExpression(\"OrderBy direction must be a Direction\")\n ),\n]\n\n\n@pytest.mark.parametrize(\"exp, direction, exception\", orderby_tests)\ndef test_orderby(exp: Any, direction: Any, exception: Optional[Exception]) -> None:\n if exception is not None:\n with pytest.raises(type(exception), match=re.escape(str(exception))):\n OrderBy(exp, direction)\n else:\n assert OrderBy(exp, direction)\n\n\nlimitby_tests = [\n pytest.param(Column(\"foo\"), 1, None),\n pytest.param(\"bar\", 1, InvalidExpression(\"LimitBy can only be used on a Column\")),\n pytest.param(\n Column(\"foo\"),\n \"1\",\n InvalidExpression(\"LimitBy count must be a positive integer (max 10,000)\"),\n ),\n pytest.param(\n Column(\"foo\"),\n -1,\n InvalidExpression(\"LimitBy count must be a positive integer (max 10,000)\"),\n ),\n pytest.param(\n Column(\"foo\"),\n 15000,\n InvalidExpression(\"LimitBy count must be a positive integer (max 10,000)\"),\n ),\n]\n\n\n@pytest.mark.parametrize(\"column, count, exception\", limitby_tests)\ndef test_limitby(column: Any, count: Any, exception: Optional[Exception]) -> None:\n if exception is not None:\n with pytest.raises(type(exception), match=re.escape(str(exception))):\n LimitBy(column, count)\n else:\n assert LimitBy(column, count).count == count\n\n\nboolean_tests = [\n pytest.param(\"totals\", Totals),\n pytest.param(\"consistent\", Consistent),\n pytest.param(\"turbo\", Turbo),\n pytest.param(\"debug\", Debug),\n pytest.param(\"dry_run\", DryRun),\n pytest.param(\"legacy\", Legacy),\n]\n\n\n@pytest.mark.parametrize(\"name, flag\", boolean_tests)\ndef test_boolean_flags(name: str, flag: Any) -> None:\n assert flag(True) is not None\n assert flag(False) is not None\n with pytest.raises(InvalidExpression, match=re.escape(f\"{name} must be a boolean\")):\n flag(0)\n","sub_path":"tests/test_expression.py","file_name":"test_expression.py","file_ext":"py","file_size_in_byte":4827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"588537122","text":"import json\nimport logging\nfrom io import BytesIO\nfrom typing import List, Tuple\n\nfrom django.conf import settings\nfrom django.core.files.uploadedfile import TemporaryUploadedFile\nfrom django.utils.crypto import get_random_string\n\nimport requests\nfrom zgw_consumers.client import ZGWClient\n\nfrom bptl.tasks.base import WorkUnit, check_variable\nfrom bptl.tasks.registry import register\n\nfrom .client import DRCClientPool, get_client, require_validsign_service\nfrom .models import CreatedPackage\n\nlogger = logging.getLogger(__name__)\n\n\nclass NoService(Exception):\n pass\n\n\nclass NoAuth(Exception):\n pass\n\n\nclass DoesNotExist(Exception):\n pass\n\n\nclass ValidSignTask(WorkUnit):\n @property\n def client(self) -> ZGWClient:\n if not hasattr(self, \"_client\"):\n self._client = get_client(self.task)\n return self._client\n\n\n@register\n@require_validsign_service\nclass CreateValidSignPackageTask(ValidSignTask):\n \"\"\"Create a ValidSign package with signers and documents and send a signing request to the signers.\n\n **Required process variables**\n\n * ``documents``: List of strings. List of API URLs where the documents to be signed can be retrieved.\n The API must comply with the Documenten API 1.0.x (\n https://vng-realisatie.github.io/gemma-zaken/standaard/documenten/index).\n\n * ``signers``: JSON list with signers information. For ValidSign, the first name, the last name and the\n email address of each signer are required. Example ``signers``:\n\n .. code-block:: json\n\n [{\n \"email\": \"example.signer@example.com\",\n \"firstName\": \"ExampleFirstName\",\n \"lastName\": \"ExampleLastName\"\n },\n {\n \"email\": \"another.signer@example.com\",\n \"firstName\": \"AnotherFirstName\",\n \"lastName\": \"AnotherLastName\"\n }]\n\n * ``packageName``: string. Name of the ValidSign package that contains the documents to sign and the signers.\n This name appears in the notification-email that is sent to the signers.\n\n\n * ``services``: JSON Object of connection details for ZGW services:\n\n .. code-block:: json\n\n {\n \"\": {\"jwt\": \"Bearer \"},\n \"\": {\"jwt\": \"Bearer \"}\n }\n\n **Optional process variables**\n\n * ``bptlAppId``: the application ID of the app that caused this task to be executed.\n The app-specific credentials will be used for the API calls, if provided.\n\n * ``messageId``: string. The message ID to send back into the process when the\n package is signed by everyone. You can use this to continue process execution.\n If left empty, then no message will be sent.\n\n **Sets the process variables**\n\n * ``packageId``: string. ID of the ValidSign package created by the task.\n \"\"\"\n\n def format_signers(self, signers: List[dict]) -> List[dict]:\n \"\"\"Format the signer information into an array of JSON objects as needed by ValidSign.\"\"\"\n\n return [{\"type\": \"SIGNER\", \"signers\": [signer]} for signer in signers]\n\n def _get_documents_from_api(self) -> List[Tuple[str, BytesIO]]:\n \"\"\"Retrieve the documents and their content from the Documenten API.\"\"\"\n\n logger.debug(\"Retrieving documents from Documenten API\")\n\n variables = self.task.get_variables()\n document_urls = check_variable(variables, \"documents\")\n\n client_pool = DRCClientPool(variables)\n client_pool.populate_clients(self.task, document_urls)\n\n documents = []\n\n current_total_documents_size = 0\n for document_url in document_urls:\n # Getting the appropriate client\n document_client = client_pool.get_client_for(document_url)\n # Retrieving the document\n document_data = document_client.retrieve(\n resource=\"enkelvoudiginformatieobject\",\n url=document_url,\n )\n\n # Retrieving the content of the document\n # Need use requests directly instead of `document_client.request()` since the response is not in JSON format\n response = requests.get(\n document_data[\"inhoud\"],\n headers=document_client.auth_header,\n stream=True,\n )\n\n # Get the document size in bytes\n document_size = document_data[\"bestandsomvang\"]\n\n # If the size of the document is above the max size or if all the documents together have already reached\n # the maximum size, write the file content to a temporary file\n if (\n document_size > settings.MAX_DOCUMENT_SIZE\n or (current_total_documents_size + document_size)\n > settings.MAX_TOTAL_DOCUMENT_SIZE\n ):\n # The file is created with rb+ mode by default\n tmp_file_object = TemporaryUploadedFile(\n name=f\"{document_data['titel']}-{get_random_string(length=5)}.tempfile\",\n content_type=\"application/octet-stream\",\n size=document_size,\n charset=None, # Required argument in TemporaryUploadedFile, but not in parent class UploadedFile\n )\n for chunk in response.iter_content(chunk_size=settings.CHUNK_SIZE):\n tmp_file_object.write(chunk)\n tmp_file_object.flush()\n doc_tuple = (document_data[\"titel\"], tmp_file_object)\n else:\n doc_tuple = (document_data[\"titel\"], BytesIO(response.content))\n current_total_documents_size += document_size\n\n response.close()\n\n documents.append(doc_tuple)\n\n return documents\n\n def _get_signers_from_package(self, package: dict) -> List[dict]:\n \"\"\"Retrieve all the roles from a ValidSign package and return those that are signers.\"\"\"\n\n logger.debug(\"Retrieving the roles from validSign package '%s'\", package[\"id\"])\n\n response = self.client.request(\n path=f\"api/packages/{package['id']}/roles\",\n operation=\"api.packages._packageId.roles.get\",\n method=\"GET\",\n )\n\n roles = response[\"results\"]\n # Not all the roles are signers (one of them is the account owner)\n return [role for role in roles if role[\"type\"] == \"SIGNER\"]\n\n def _get_approvals(self, signers: List[dict]) -> List[dict]:\n \"\"\"Make approvals from signers\n\n The approval is a placeholder for where a signature from a signer will go.\n \"\"\"\n\n fields = [\n {\n \"type\": \"SIGNATURE\",\n \"subtype\": \"FULLNAME\",\n \"extractAnchor\": {\n \"anchorText\": \"Capture Signature\",\n \"index\": 0,\n \"characterIndex\": 0,\n \"anchorPoint\": \"BOTTOMLEFT\",\n \"leftOffset\": 0,\n \"topOffset\": 0,\n \"width\": 150,\n \"height\": 50,\n },\n }\n ]\n\n approvals = []\n for signer in signers:\n approvals.append({\"role\": f\"{signer['id']}\", \"fields\": fields})\n\n return approvals\n\n def create_package(self) -> dict:\n \"\"\"Create a ValidSign package with the name specified by the process variable and add the signers to it.\"\"\"\n\n logger.debug(\"Creating ValidSign package\")\n\n variables = self.task.get_variables()\n signers = self.format_signers(check_variable(variables, \"signers\"))\n package_name = check_variable(variables, \"packageName\")\n\n body = {\n \"name\": package_name,\n \"type\": \"PACKAGE\",\n \"roles\": signers,\n }\n\n package = self.client.request(\n path=\"api/packages\", operation=\"api.packages.post\", method=\"POST\", json=body\n )\n\n return package\n\n def add_documents_and_approvals_to_package(self, package: dict) -> List[dict]:\n \"\"\"Add documents and approvals to the package.\"\"\"\n\n logger.debug(\n \"Adding documents and approvals to ValidSign package '%s'\", package[\"id\"]\n )\n\n documents = self._get_documents_from_api()\n\n # Multiple files can be added in one request by passing the following 'files' parameter\n # to the request, but then not sure how to specify the filename yet...\n # files = [(\"files[]\", content) for name, content in documents]\n\n signers = self._get_signers_from_package(package)\n approvals = self._get_approvals(signers)\n\n attached_documents = []\n for doc_name, doc_content in documents:\n url = f\"{self.client.base_url}api/packages/{package['id']}/documents\"\n payload = {\"name\": doc_name, \"extract\": True, \"approvals\": approvals}\n body = {\"payload\": json.dumps(payload)}\n doc_content.seek(0)\n\n # if doc_content is a TemporaryUploadedFile, this does a streaming upload\n file = [(\"file\", doc_content)]\n\n # Not using validsign_client because the request doesn't get formatted properly,\n # since this a multipart/form-data call while zds_client only supports JSON.\n response = requests.post(\n url=url, headers=self.client.auth_header, data=body, files=file\n )\n doc_content.close()\n\n response.raise_for_status()\n attached_doc = response.json()\n attached_documents.append(attached_doc)\n\n return attached_documents\n\n def send_package(self, package: dict):\n \"\"\"Change the status of the package to 'SENT'\n\n When the status of the package is changed, an email is automatically sent to all the signers with a\n link where they can sign the documents.\n \"\"\"\n\n logger.debug(\"Setting the status of package '%s' to SENT\", package[\"id\"])\n body = {\"status\": \"SENT\"}\n\n self.client.request(\n path=f\"api/packages/{package['id']}\",\n operation=\"api.packages._packageId.post\",\n method=\"PUT\",\n json=body,\n )\n\n def perform(self) -> dict:\n\n package = self.create_package()\n self.add_documents_and_approvals_to_package(package)\n self.send_package(package)\n\n CreatedPackage.objects.create(package_id=package[\"id\"], task=self.task)\n\n return {\"packageId\": package[\"id\"]}\n\n\n@register\n@require_validsign_service\nclass ValidSignReminderTask(ValidSignTask):\n \"\"\"Email a reminder (with links) to signers that they need to sign documents through ValidSign.\n\n **Required process variables**\n\n * ``packageId``: string with the ValidSign Id of a package\n * ``email``: the email address of the signer who needs a reminder\n\n **Optional process variables**\n\n * ``bptlAppId``: the application ID of the app that caused this task to be executed.\n The app-specific credentials will be used for the API calls, if provided.\n\n **Sets no process variables**\n\n \"\"\"\n\n def send_reminder(self, package_id: str, email: str):\n logger.debug(\"Sending a reminder to '%s' through ValidSign\", email)\n\n body = {\"email\": email}\n self.client.request(\n path=f\"api/packages/{package_id}/notifications\",\n operation=\"api.packages._packageId.notifications.post\",\n method=\"POST\",\n json=body,\n )\n\n def perform(self) -> dict:\n\n variables = self.task.get_variables()\n\n package_id = check_variable(variables, \"packageId\")\n email = check_variable(variables, \"email\")\n\n self.send_reminder(package_id, email)\n\n return {}\n","sub_path":"src/bptl/work_units/valid_sign/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":11785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"470697535","text":"'''\nPlots with colormaps.\n'''\n\nfrom __future__ import absolute_import\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib as mpl\n# from skimage import color\n\nfrom . import cm\nfrom . import tools\n\n\nmpl.rcParams.update({'font.size': 14})\nmpl.rcParams['font.sans-serif'] = 'Arev Sans, Bitstream Vera Sans, Lucida Grande, Verdana, Geneva, Lucid, Helvetica, Avant Garde, sans-serif'\nmpl.rcParams['mathtext.fontset'] = 'custom'\nmpl.rcParams['mathtext.cal'] = 'cursive'\nmpl.rcParams['mathtext.rm'] = 'sans'\nmpl.rcParams['mathtext.tt'] = 'monospace'\nmpl.rcParams['mathtext.it'] = 'sans:italic'\nmpl.rcParams['mathtext.bf'] = 'sans:bold'\nmpl.rcParams['mathtext.sf'] = 'sans'\nmpl.rcParams['mathtext.fallback_to_cm'] = 'True'\n\n# import pdb; pdb.set_trace()\n# # list of colormaps for several functions\n# cmaps = [cmocean.temp, cmocean.o2, cmocean.salinity, cmocean.chl,\n# cmocean.rho, cmocean.par, cmocean.turb, cmocean.cdom]\n # cmocean.bathy, cmocean.s, cmocean.v, cmocean.vort,\n # cmocean.eta]\n\ncmaps = cm.cmall\n\n\ndef plot_lightness(saveplot=False):\n '''Plot lightness of colormaps together.\n\n '''\n\n from colorspacious import cspace_converter\n\n dc = 1.\n x = np.linspace(0.0, 1.0, 256)\n locs = [] # locations for text labels\n\n fig = plt.figure(figsize=(16, 6))\n ax = fig.add_subplot(111)\n ax.set_xlim(-0.1, len(cmaps) + 0.1)\n ax.set_ylim(0, 100)\n ax.set_xlabel('Lightness for each colormap')\n\n for j, cmap in enumerate(cmaps):\n rgb = cmap(x)[np.newaxis, :, :3]\n lab = cspace_converter(\"sRGB1\", \"CAM02-UCS\")(rgb)\n # lab = color.rgb2lab(rgb)\n L = lab[0, :, 0]\n # import pdb; pdb.set_trace()\n # L = lab[0, :, 0]\n if L[-1] > L[0]:\n ax.scatter(x+j*dc, L, c=x, cmap=cmap, s=300, linewidths=0.)\n else:\n ax.scatter(x+j*dc, L[::-1], c=x[::-1], cmap=cmap, s=300, linewidths=0.)\n locs.append(x[-1]+j*dc) # store locations for colormap labels\n\n # Set up labels for colormaps\n ax.xaxis.set_ticks_position('top')\n ticker = mpl.ticker.FixedLocator(locs)\n ax.xaxis.set_major_locator(ticker)\n formatter = mpl.ticker.FixedFormatter([cmap.name for cmap in cmaps])\n ax.xaxis.set_major_formatter(formatter)\n labels = ax.get_xticklabels()\n for label in labels:\n label.set_rotation(60)\n\n if saveplot:\n fig.savefig('figures/lightness.png', bbox_inches='tight')\n fig.savefig('figures/lightness.pdf', bbox_inches='tight')\n\n plt.show()\n\n\ndef plot_gallery(saveplot=False):\n '''Make plot of colormaps and labels, like in the matplotlib\n gallery.\n\n :param saveplot=False: Whether to save the plot or not.\n\n '''\n\n from colorspacious import cspace_converter\n\n # don't have reverse colormaps built in yet\n rgb = tools.print_colormaps([cm.gray], returnrgb=True)\n gcmap = tools.cmap(rgb[::-1, :])\n\n gradient = np.linspace(0, 1, 256)\n gradient = np.vstack((gradient, gradient))\n x = np.linspace(0.0, 1.0, 256)\n\n fig, axes = plt.subplots(nrows=len(cmaps), ncols=1, figsize=(6, 12))\n fig.subplots_adjust(top=0.99, bottom=0.01, left=0.2, right=0.99, wspace=0.05)\n # fig.suptitle('Oceanography colormaps', fontsize=16, y=1.0, x=0.6)\n\n for ax, cmap in zip(axes, cmaps):\n\n rgb = cmap(x)[np.newaxis, :, :3]\n\n # Find a good conversion to grayscale\n jch = cspace_converter(\"sRGB1\", \"CAM02-UCS\")(rgb) # Not sure why to use JCh instead so using this.\n # jch = cspace_converter(\"sRGB1\", \"JCh\")(rgb)\n L = jch[0, :, 0]\n # # Get colormap in CIE LAB. We want the L here.\n # lab = color.rgb2lab(rgb)\n # L = lab[0, :, 0]\n L = np.float32(np.vstack((L, L, L)))\n\n ax.imshow(gradient, aspect='auto', cmap=cmap)\n\n pos1 = ax.get_position() # get the original position\n pos2 = [pos1.x0, pos1.y0, pos1.width, pos1.height / 3.0]\n axbw = fig.add_axes(pos2) # colorbar axes\n axbw.set_axis_off()\n axbw.imshow(L, aspect='auto', cmap=gcmap, vmin=0, vmax=100.)\n pos = list(ax.get_position().bounds)\n x_text = pos[0] - 0.01\n y_text = pos[1] + pos[3]/2.\n fig.text(x_text, y_text, cmap.name, va='center', ha='right')\n\n # Turn off *all* ticks & spines, not just the ones with colormaps.\n for ax in axes:\n ax.set_axis_off()\n\n if saveplot:\n fig.savefig('figures/gallery.pdf', bbox_inches='tight')\n fig.savefig('figures/gallery.png', bbox_inches='tight')\n\n plt.show()\n\n\ndef wrap_viscm(cmap, dpi=100, saveplot=False):\n '''Evaluate goodness of colormap using perceptual deltas.\n\n :param cmap: Colormap instance.\n :param dpi=100: dpi for saved image.\n :param saveplot=False: Whether to save the plot or not.\n\n '''\n\n from viscm import viscm\n\n viscm(cmap)\n fig = plt.gcf()\n fig.set_size_inches(22, 10)\n plt.show()\n\n if saveplot:\n fig.savefig('figures/eval_' + cmap.name + '.png', bbox_inches='tight', dpi=dpi)\n fig.savefig('figures/eval_' + cmap.name + '.pdf', bbox_inches='tight', dpi=dpi)\n\n\ndef test(cmap, fig=None, ax=None):\n '''Test colormap by plotting.\n\n :param cmap: A colormap instance. Use a named one with cm.get_cmap(colormap)\n\n '''\n\n from colorspacious import cspace_converter\n\n # indices to step through colormap\n x = np.linspace(0.0, 1.0, 100)\n\n # will plot colormap and lightness\n rgb = cmap(x)[np.newaxis, :, :3]\n # rgb = cm.get_cmap(cmap)(x)[np.newaxis,:,:3]\n lab = cspace_converter(\"sRGB1\", \"CAM02-UCS\")(rgb)\n # lab = color.rgb2lab(rgb)\n\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n ax.scatter(x, lab[0, :, 0], c=x, cmap=cmap, s=300, linewidths=0.)\n ax.set_title(cmap.name)\n ax.set_ylabel('Lightness')\n ax.set_xticks([])\n\n\ndef quick_plot(cmap, fname=None, fig=None, ax=None, N=10):\n '''Show quick test of a colormap.\n\n '''\n\n x = np.linspace(0, 10, N)\n X, _ = np.meshgrid(x, x)\n\n if ax is None:\n fig = plt.figure()\n ax = fig.add_subplot(111)\n mappable = ax.pcolor(X, cmap=cmap)\n ax.set_title(cmap.name)\n ax.set_xticks([])\n ax.set_yticks([])\n plt.colorbar(mappable)\n plt.show()\n\n if fname is not None:\n plt.savefig(fname + '.png', bbox_inches='tight')\n","sub_path":"cmocean/cmocean/plots.py","file_name":"plots.py","file_ext":"py","file_size_in_byte":6298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"22095041","text":"from critics import critics as critics\r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport random\r\n\r\n\r\ndef sim_distance(prefs,person1,person2):\r\n si={}\r\n for item in prefs[person1]:\r\n if item in prefs[person2]:\r\n si[item]=1\r\n\r\n if len(si)==0: return 0\r\n\r\n sum_of_squares = 0\r\n for item in prefs[person1]:\r\n if item in prefs[person2]:\r\n square = pow(prefs[person1][item]-prefs[person2][item], 2)\r\n sum_of_squares = sum_of_squares + square\r\n\r\n distance = math.sqrt(sum_of_squares)\r\n similarity = 1 / (1 + distance)\r\n print('distance:', distance)\r\n print('similarity:', similarity)\r\n\r\n return similarity\r\n\r\n\r\ndef topMatches(prefs, person, n=5, similarity=sim_distance):\r\n scores=[(other, similarity(prefs, person, other)) for other in prefs if other != person]\r\n scores.sort()\r\n scores.reverse()\r\n return scores[0:n]\r\r\n\r\n\r\ndef perfsList(film):\r\n return {pers : critics[pers][film] if film in critics[pers] else 0 for pers in critics}\r\n\r\n\r\ndef getScat(film1, film2):\r\n xDict = perfsList(film1)\r\n yDict = perfsList(film2)\r\n \r\n x = list(xDict.values());\r\n y = list(yDict.values());\r\n\r\n plt.xlabel(film1); plt.ylabel(film2); plt.grid(True);\r\n plt.xticks(range(6))\r\n plt.yticks(range(6))\r\n plt.xlim(-1, 6)\r\n plt.ylim(-1, 6)\r\n plt.scatter(x, y, s=50, alpha=0.5)\r\n\r\n i = 0\r\n for key in xDict.keys():\r\n plt.annotate(s=key, xy=(x[i], y[i]),\r\n xytext=((-1)**len(key)*len(key)*3, (-1)**len(key)*len(key)),\r\n xycoords='data',\r\n arrowprops=dict(arrowstyle=\"->\", connectionstyle=\"arc3, rad=.2\"),\r\n textcoords='offset points') \r\n i += 1\r\n \r\n return plt\r\n\r\n\r\ndef getRecommendations(prefs, person, similarity=sim_distance):\r\n totals = {}\r\n simSums = {}\r\n for other in prefs:\r\n if other == person: continue\r\n\r\n sim = similarity(prefs, person, other)\r\n if sim <= 0: continue\r\n\r\n for item in prefs[other]:\r\n if item not in prefs[person] or prefs[person][item] == 0:\r\n totals.setdefault(item,0)\r\n totals[item] += prefs[other][item] * sim\r\n simSums.setdefault(item,0)\r\n simSums[item] += sim\r\n\r\n rankings = [(total/simSums[item], item) for item, total in totals.items()]\r\n rankings.sort( )\r\n rankings.reverse( )\r\n\r\n return rankings\r\n\r\n\"\"\"\r\nr = getRecommendations(critics, 'Toby')\r\nfor i in r:\r\n print(i)\r\n\"\"\"\r\n\r\ns = {\r\n 'A':{\r\n 'x':4,\r\n 'y':1\r\n },\r\n 'B':{\r\n 'x':6,\r\n 'y':3\r\n }\r\n}\r\n\r\nsim_distance(s, 'A', 'B')\r\n\r\n\"\"\"\r\nplt.close('all')\r\np = getScat('Lady in the Water', 'Snakes on a Plane')\r\np.show()\r\n\"\"\"\r\n\r\n\r\n","sub_path":"recommendations.py","file_name":"recommendations.py","file_ext":"py","file_size_in_byte":2787,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"292397036","text":"from enum import Enum, auto\n\nfrom pycram.designators.object_designator import ObjectDesignator\n\n\nOBJECT_HELD_LEFT = None\nOBJECT_HELD_RIGHT = None\n\ndef free_arms():\n if not OBJECT_HELD_LEFT and not OBJECT_HELD_RIGHT:\n return [Arms.RIGHT, Arms.LEFT]\n elif not OBJECT_HELD_LEFT:\n return [Arms.LEFT]\n elif not OBJECT_HELD_RIGHT:\n return [Arms.RIGHT]\n else:\n return []\n\ndef reach_position_generator(target):\n if type(target) is ObjectDesignator:\n if target.prop_value('name') in ['sink_area_left_middle_drawer', 'sink_area_left_upper_drawer']:\n yield [0.3, 0.9, 0], [0,0,0,1]\n yield [0.4, 0.9, 0], [0,0,0,1]\n yield [0.5, 0.9, 0], [0,0,0,1]\n yield [0.6, 0.9, 0], [0,0,0,1]\n elif target.prop_value('name') is 'iai_fridge':\n yield [0.5, -0.4, 0], [0, 0, -0.258819, 0.9659258]\n else:\n yield [0.6, 0.9, 0], [0,0,0,1]\n else:\n yield [-1.8, 1, 0], [0,0,0,1]\n yield [-0.4, 1, 0], [0,0,1,0]\n\ndef container_opening_distance_generator(object_designator):\n if object_designator:\n if object_designator.prop_value('name') is 'iai_fridge':\n yield 1.0\n elif object_designator.prop_value('name') is 'sink_area_left_upper_drawer':\n yield 0.1\n yield 0.25\n yield 0.3\n yield 0.4\n else:\n yield 0.4\n else:\n yield 0.4\n\ndef object_fetching_location_generator(object_designator):\n object_type = object_designator.prop_value('type')\n if object_type is \"spoon\":\n yield ObjectDesignator([('type', 'drawer'), ('name', 'sink_area_left_upper_drawer'), ('part-of', \"kitchen\")])\n elif object_type is \"bowl\":\n yield ObjectDesignator([('type', 'drawer'), ('name', 'sink_area_left_middle_drawer'), ('part-of', \"kitchen\")])\n elif object_type is \"milk\":\n yield ObjectDesignator([('type', 'fridge'), ('name', 'iai_fridge'), ('part-of', \"kitchen\")])\n yield [1.3, 0.8, 0.95] # Location on counter top\n elif object_type is \"cereal\":\n yield [1.3, 0.8, 0.95] # Location on counter top\n else:\n # Otherwise just look everywhere\n yield [1.3, 0.8, 0.95]\n yield ObjectDesignator([('type', 'drawer'), ('name', 'sink_area_left_upper_drawer'), ('part-of', \"kitchen\")])\n yield ObjectDesignator([('type', 'drawer'), ('name', 'sink_area_left_middle_drawer'), ('part-of', \"kitchen\")])\n yield ObjectDesignator([('type', 'fridge'), ('name', 'iai_fridge'), ('part-of', \"kitchen\")])\n\ndef object_placing_location_generator(object_designator, destination):\n if destination == \"kitchen_island_countertop\":\n object_type = object_designator.prop_value(\"type\")\n if object_type == \"milk\":\n yield [-1.15, 1.2, 0.95]\n yield [-1.05, 1.2, 0.95]\n elif object_type == \"cereal\":\n yield [-1.15, 1.0, 0.95]\n yield [-1.05, 1.0, 0.95]\n elif object_type == \"bowl\":\n yield [-1.35, 1.1, 0.95]\n yield [-0.9, 1.1, 0.95]\n elif object_type == \"spoon\":\n yield [-1.35, 0.95, 0.95]\n yield [-0.9, 1.3, 0.95]\n else:\n raise NotImplementedError(\"This is just a hack for now.\")\n\nclass Arms(Enum):\n LEFT = auto()\n RIGHT = auto()\n BOTH = auto()\n\nclass ArmConfiguration(Enum):\n PARK = auto()\n CARRY = auto()\n\nclass Grasp(Enum):\n TOP = auto()\n","sub_path":"demos/pycram_tasktree_demo/scripts/pr2_knowledge.py","file_name":"pr2_knowledge.py","file_ext":"py","file_size_in_byte":3430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"236773689","text":"\"\"\"\nFunction implementing the Linear Time Iteration algorithm for solving DSGE models\nin the spirit of P. Rendahl (2017)\nFinal version written by Michal Miktus, April 2019\n\"\"\"\n\nfrom torch import eye, abs, max, gesv, inverse, mm, matrix_power, zeros\n\n\ndef Linear_Time_Iteration(A, B, C, F_initial, mu, epsilon):\n \"\"\"\n This function will find the linear time iteration solution to the system of equations in the form of\n AX(-1) + BX + CE[X(+1)] + epsilon = 0\n with a recursive solution in the form of X = FX(-1) + Q*epsilon\n Parameters\n ----------\n A : torch, array_like, dtype=float\n The matrix of coefficients next to endogenous variables entering with a lag\n B : torch, array_like, dtype=float\n The matrix of coefficients next to endogenous, contemporanous variables\n C : torch, array_like, dtype=float\n The matrix of coefficients next to endogenous variables entering with a lead\n F : torch, array_like, dtype=float\n The initial guess for F\n mu : number, dtype=float\n Small positive real number to be multiplied by a conformable identity matrix\n epsilon : number, dtype=float\n Threshold value, should be set to a small value like 1e-16\n Returns\n -------\n F : torch, array_like, dtype=float\n The matrix of coefficients next to the endogenous variable in the solution\n Q : torch, array_like, dtype=float\n The matrix of coefficients next to the disturbance term in the solution\n Notes\n -----\n \"\"\"\n\n F = F_initial\n S = zeros(*A.shape)\n\n # F.requires_grad_()\n # S.requires_grad_()\n\n Id = eye(*A.shape) * mu\n Ch = C\n Bh = (B + 2 * mm(C, Id))\n Ah = (mm(C, matrix_power(Id, 2)) + mm(B, Id) + A)\n\n metric = 1\n iter = 1\n\n while metric > epsilon:\n if iter % 10000 == 0:\n print(iter)\n F = -gesv(Ah, (Bh + mm(Ch, F)))[0]\n S = -gesv(Ch, (Bh + mm(Ah, S)))[0]\n metric1 = max(abs(Ah + mm(Bh, F) + mm(Ch, (mm(F, F)))))\n metric2 = max(abs(mm(Ah, mm(S, S)) + mm(Bh, S) + Ch))\n metric = max(metric1, metric2)\n iter += 1\n if iter > 1000000:\n break\n\n # eig_F = max(abs(eig(F)[0]))\n # eig_S = max(abs(eig(S)[0]))\n # eig_stable = max(abs(eig(mm(inverse(mm(Ah, F) + Bh), Ah))[0]))\n\n # if (eig_F > 1) or (eig_S > 1) or (mu > 1-eig_S):\n # print('Conditions of Proposition 3 violated')\n\n # if (eig_F > 1) or (eig_stable > 1):\n # print('Conditions for stable and unique solution violated')\n\n F = F + Id\n Q = -inverse(B + mm(C, F))\n\n return F, Q\n","sub_path":"Thesis/Linear_Time_Iteration_PyTorch.py","file_name":"Linear_Time_Iteration_PyTorch.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"494810067","text":"###############################################################\n# SPDX-License-Identifier: BSD-2-Clause-Patent\n# SPDX-FileCopyrightText: 2020 the prplMesh contributors (see AUTHORS.md)\n# This code is subject to the terms of the BSD+Patent license.\n# See LICENSE file for more details.\n###############################################################\n\nimport os\nimport subprocess\n\nfrom opts import debug, err, opts, status\n\n\nclass Sniffer:\n '''Captures packets on an interface.'''\n def __init__(self, interface: str):\n self.interface = interface\n self.tcpdump_proc = None\n\n def start(self, outputfile_basename):\n '''Start tcpdump if enabled by config.'''\n if opts.tcpdump:\n debug(\"Starting tcpdump, output file {}.pcap\".format(outputfile_basename))\n os.makedirs(os.path.join(opts.tcpdump_dir, 'logs'), exist_ok=True)\n outputfile = os.path.join(opts.tcpdump_dir, outputfile_basename) + \".pcap\"\n command = [\"tcpdump\", \"-i\", self.interface, \"-w\", outputfile]\n self.tcpdump_proc = subprocess.Popen(command, stderr=subprocess.PIPE)\n # tcpdump takes a while to start up. Wait for the appropriate output before continuing.\n # poll() so we exit the loop if tcpdump terminates for any reason.\n while not self.tcpdump_proc.poll():\n line = self.tcpdump_proc.stderr.readline()\n debug(line.decode()[:-1]) # strip off newline\n if line.startswith(b\"tcpdump: listening on \" + self.interface.encode()):\n # Make sure it doesn't block due to stderr buffering\n self.tcpdump_proc.stderr.close()\n break\n else:\n err(\"tcpdump terminated\")\n self.tcpdump_proc = None\n\n def stop(self):\n '''Stop tcpdump if it is running.'''\n if self.tcpdump_proc:\n status(\"Terminating tcpdump\")\n self.tcpdump_proc.terminate()\n self.tcpdump_proc = None\n","sub_path":"tests/sniffer.py","file_name":"sniffer.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"438663146","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n parameters for clip_board.py\n some junk and unimplemented parms, !! clean up\n unfortunately this is a moving target, will try to keep documentation up to date\n choose_mode is the first method as you may use it most often to change\n modes .... start with new_user_mode, you can then mess with it or copy it\n or other parts of the parameter file to make new modes\n\n\"\"\"\n\nimport logging\nfrom app_global import AppGlobal\nimport os\n\nimport running_on\n\n# ========================================\nclass Parameters( object ):\n \"\"\"\n manages parameter values: use like ini file but it is code\n \"\"\"\n # -------\n def choose_mode( self ):\n \"\"\"\n typically choose one mode and if you wish add the plus_test_mode\n if you comment all out you get the default mode which should\n run, perhaps while limping\n \"\"\"\n self.new_user_mode()\n\n #self.millhouse_1_mode()\n\n #self.russ_1_mode()\n\n # --- add on for testing, as desired\n #self.plus_test_mode()\n\n # -------\n def plus_test_mode( self ):\n \"\"\"\n scratch mode to add tests to other modes\n an add on mode\n \"\"\"\n self.mode = self.mode + \" + test\" # change with care\n\n self.logging_level = logging.DEBUG\n\n self.snippets_fn = [\"./snipsand/snippetts_test.txt\", \"./snipsand/snippetts_example.txt\" ,\"./snipsand/snippetts_1.txt\"]\n #self.snippets_fn = \"./snipsand/snippetts_test.txt\"\n\n # -------\n def new_user_mode( self ):\n \"\"\"\n for a new user to customize, as shipped pretty much a copy of russ_1\n an example mode\n new users should start here for making a mode, you may want to make a copy for reference\n see .default_mode() for some documentation of the variables.\n \"\"\"\n self.mode = \"New_user\"\n\n self.logging_level = logging.DEBUG\n\n # ----- snip or example files\n# self.snip_file_fn = r\"snips_file_test.txt\"\n# self.snip_file_fn = r\"snip_files_nov_18.txt\"\n# self._read_snip_files_( self.snip_file_fn )\n\n # ----- snippets\n self.snippets_fn = \"./snipsand/snippetts_example.txt\"\n self.snippets_sort = True\n\n\n # ------->> More methods: one for each mode\n # -------\n def russ_1_mode( self ):\n \"\"\"\n russ: first mode for theProf -- not documented\n \"\"\"\n self.mode = \"Russ_1\"\n\n self.logging_level = logging.DEBUG\n\n # ----- snip or example files\n\n# self.snippets_fn = [ \"./snipsand/snippetts_1.txt\", \"./snipsand/snippetts_1.txt\" ] # multiple snippet files\n self.snippets_sort = True\n # ================== snippets ============================\n self.snippets_sort = True # sort snippes on key, else in file order\n self.snippets_fn = \"./snipsand/snippetts_1.txt\"\n\n # ================== snips ============================\n self.snip_file_sort = True # sort make them easier to find in the GUI\n\n # next: this is prepended to a snip file prior to opening the file\n # so you can easily keep the snip files in a place you find convient.\n self.snip_file_path = r\"C:\\Russ\\0000\\python00\\python3\\_examples\"\n #\n # path prepended to all snip files\n self.snip_file_fn = \"./snipsand/snip_files_russ.txt\"\n\n self.snip_editor = r\"C:\\apps\\Anaconda3\\Scripts\\thonny.exe\" # editor used for opening snip files\n\n #self.snip_file_command = r\"c:\\apps\\Notepad++\\notepad++.exe\" #russwin10 opens snip files, nice if can run it\n\n # -------\n def millhouse_1_mode( self ):\n \"\"\"\n russ: first mode for millhouse -- not documented\n \"\"\"\n self.mode = \"millhouse_1\"\n\n self.logging_level = logging.DEBUG\n self.logging_level = logging.INFO\n # ----- snip or example files\n\n self.snip_file_fn = \"snip_files_1.txt\"\n self.snip_file_fn = r\"c:\\Russ\\0000\\python00\\python3\\_projects\\clipboard\\Ver3\\snips_file_test.txt\"\n self.snip_file_fn = r\"snips_file_test.txt\"\n #D:\\Russ\\0000\\python00\\python3\\_projects\\clipboard\\Ver3\\snip_files_nov_18.txt\n self.snip_file_fn = r\"snip_files_nov_18.txt\"\n# self.snip_file_fn = [ r\"snip_files_nov_18.txt\", r\"snip_files_nov_18.txt\" ] #\n\n # ----- snippets\n self.snippets_fn = \"./snipsand/snippetts_1.txt\"\n\n # self.snippets_fn = [ \"./snipsand/snippetts_1.txt\", \"./snipsand/snippetts_1.txt\" ]\n self.snippets_sort = True\n\n\n\n # -------\n def running_on_tweaks(self, ):\n \"\"\"\n not a mode, a tweak, see documentation\n use running on tweaks as a more sophisticated version of os_tweaks and computer name tweaks which\n may replace them\n \"\"\"\n computer_id = self.running_on.computer_id\n\n if computer_id == \"smithers\":\n self.win_geometry = '1450x700+20+20' # width x height position\n self.ex_editor = r\"D:\\apps\\Notepad++\\notepad++.exe\"\n self.db_file_name = \"smithers_db.db\"\n\n elif computer_id == \"millhouse\":\n self.ex_editor = r\"C:\\apps\\Notepad++\\notepad++.exe\"\n #self.win_geometry = '1300x600+20+20'\n self.db_file_name = \"millhouse_db.db\"\n\n elif computer_id == \"theprof\":\n self.ex_editor = r\"C:\\apps\\Notepad++\\notepad++.exe\"\n self.db_file_name = \"the_prof_db.db\"\n self.snip_file_path = r\"D:\\Russ\\0000\\python00\\python3\\_examples\"\n self.win_geometry = '1450x700+20+20' # width x height position\n\n elif computer_id == \"bulldog\":\n self.ex_editor = r\"gedit\"\n self.db_file_name = \"bulldog_db.db\"\n\n elif computer_id == \"bulldog-mint-russ\":\n self.ex_editor = r\"xed\"\n self.db_file_name = \"bulldog_db.db\"\n\n else:\n print( f\"In parameters: no special settings for computer_id {computer_id}\" )\n if self.running_on.os_is_win:\n self.ex_editor = r\"C:\\apps\\Notepad++\\notepad++.exe\"\n else:\n self.ex_editor = r\"leafpad\" # linux raspberry pi maybe\n\n # -------\n def os_tweaks( self ):\n \"\"\"\n this is an subroutine to tweak the default settings of \"default_mode\"\n for particular operating systems\n you may need to mess with this based on your os setup\n \"\"\"\n if self.os_win:\n pass\n self.icon = r\"./clipboard_b.ico\" # very dark greenhouse this has issues on rasPi\n self.icon = r\"./clipboard_b_red_GGV_icon.ico\" # looks same as clipboard_b_red_gimp.ico\n# self.icon = r\"./clipboard_b_red2.gif\" # looks same as clipboard_b_red_gimp.ico\n self.icon = r\"./clipboard_b_red_gimp.ico\" # pretty visible\n\n #self.icon = None # default gui icon\n\n else:\n pass\n\n # -------\n def computer_name_tweaks( self ):\n \"\"\"\n this is an subroutine to tweak the default settings of \"default_mode\"\n for particular computers. Put in settings for you computer if you wish\n these are for my computers, add what you want ( or nothing ) for your computes\n !! use computer name or id ??\n \"\"\"\n print(self.computername, flush=True)\n\n if self.computername == \"smithers\":\n self.win_geometry = '1250x700+20+20' # width x height position\n self.ex_editor = r\"c:\\apps\\Notepad++\\notepad++.exe\" # russ win 10 smithers\n\n elif self.computername == \"millhouse\":\n self.ex_editor = r\"C:\\apps\\Notepad++\\notepad++.exe\" # russ win 10 millhouse\n self.win_geometry = '1300x700+50+5' # width x height position\n self.pylogging_fn = \"millhouse_clipboard.py_log\" # file name for the python logging\n #self.snip_file_fn = r\"C:\\Russ\\0000\\python00\\python3\\_projects\\clipboard\\Ver3\\snips_file_auto.txt\"\n # need to associate with extension -- say a dict\n self.snip_file_command = r\"C:\\apps\\Notepad++\\notepad++.exe\" #russwin10 !! implement\n\n elif self.computername == \"theprof\":\n self.ex_editor = r\"c:\\apps\\Notepad++\\notepad++.exe\" # russ win 10 smithers\n\n # -------\n def __init__( self, ):\n \"\"\"\n Init for instance, usually not modified, except perhaps debug stuff ( if any )... but use plus_test_mode()\n may be down in listing because it should not be messed with.\n \"\"\"\n AppGlobal.parameters = self # register as a global\n self.default_mode()\n self.running_on_tweaks()\n self.choose_mode()\n\n print( self )\n #print( self ) # for debugging\n\n # ------->> default mode, always call\n # -------\n def default_mode( self ):\n \"\"\"\n sets up pretty much all settings\n documents the meaning of the modes\n call first, then override as necessary\n good chance these settings will at least let the app run\n \"\"\"\n self.mode = \"default\" # name your config, so it will show in app tilte, may be changed later\n\n #--------------- automatic settings -----------------\n self.running_on = running_on.RunningOn\n self.running_on.gather_data()\n\n # some of the next all?? should be moved over to RunningOn\n self.running_on.log_me( logger = None, logger_level = 10, print_flag = True )\n self.py_path = self.running_on.py_path # this is the path to the main.py program\n\n self.set_default_path_here = True # to make app location the default path in the app, Think True may always be best.\n # above may be tricky to reset, but we may have the original dir in running on\n # no easy way to ovride this ??\n if self.set_default_path_here: # Now change the directory to location of this file\n\n py_path = self.running_on.py_path\n\n print( f\"Directory: ( >>{os.getcwd()}<< switch if not '' to >>{py_path}<<\")\n if py_path != \"\":\n os.chdir( py_path )\n\n self.our_os = self.running_on.our_os # so we know our os could be \"linux\" or our_os == \"linux2\" \"darwin\"....\n self.os_win = self.running_on.os_win # boolean True if some version of windows\n self.computername = self.running_on.computername # a name of the computer if we can get it\n self.opening_dir = self.running_on.opening_dir # directory where app was opened, not where it resides\n\n self.platform = self.our_os # sometimes it matters which os but this value is redundant\n\n self.icon = r\"clipboard_c.ico\" # icon for running app -- not used in linux\n\n self.id_color = \"blue\" # to id the app - not implemented yet\n\n self.win_geometry = '1500x800+20+20' # size, position of app on opening\n self.win_geometry = '900x600+700+230' # width x height position x, y\n\n self.pylogging_fn = \"clipboard.py_log\" # file name for the python logging\n self.logging_level = logging.DEBUG # logging level DEBUG will log all captured text ! or logging.INFO\n #self.logging_level = logging.INFO\n self.logger_id = \"clip_board\" # id of app in logging file\n\n # ------------- file names -------------------\n\n self.snip_editor = r\"C:\\apps\\Anaconda3\\Scripts\\thonny.exe\" # editor used for opening snip files pick one that ->\n # will open file form command line\n\n # this is the name of a program: its executable with path info.\n # to be used in opening an external editor\n self.ex_editor = r\"D:\\apps\\Notepad++\\notepad++.exe\" # russ win 10\n\n # if we are writing scratch files to run in a shell or similar.\n self.scratch_bat = r\"scratch.bat\" # rel filename\n self.scratch_py = r\"scratch.py\" # rel filename\n\n self.run_py = r\"python.exe\" # program to run *>py commands !! not yet implemented\n\n # extensions of files for text editing\n self.text_extends = [ \".txt\", \".rsh\", \".ino\", \".py\", \".h\" , \".cpp\", \".py_log\", \".log\", ] # include the dot!\n\n # ========================= buttons initial state ======================\n\n #------------------------- default the named check box's see gui.py ---------------\n # not really implemented now... in process\n self.cmd_on = 1 # 1 is checked or on else 0\n self.auto_url_on = 0\n self.star_cmd_on = 0\n self.exe_file_on = 0\n #... not all may be named see gui.py\n\n #------------------------- default the named radio buttons see gui.py ---------------\n self.rb_num_on = 0 # which radio button on, number is not nice, but easy !! is working ???\n #... not all may be named see gui.py\n\n self.include_wiki_buttons = True # experimental flag, leave True\n\n # may not be best for text help file\n # help file can be web ( open with browser ), or txt ( open with self.editor ) or anything else ( will try to shell out may or may not work )\n self.help_file = \"help.txt\" # >>. this is the path to our main .py file self.py_path + \"/\" +\n #self.help_file = \"http://www.opencircuits.com/Python_Smart_ClipBoard\" # can be url or a local file -- change for clipboard !!\n self.help_file = \"https://opencircuits.com/index.php?title=ClipBoard_Help_File\"\n self.help_fn = self.help_file # old phase out !!\n\n # ================== snippets ============================\n self.snippets_sort = True # sort snippes on key, else in file order\n self.snippets_fn = \"./snipsand/snippetts_1.txt\" # file name with snippets, can also set as a list of strings\n\n # ================== snips ============================\n self.snip_file_sort = True # sort make them easier to find in the GUI\n\n # next: this is prepended to a snip file prior to opening the file\n # so you can easily keep the snip files in a place you find convient.\n self.snip_file_path = r\"./example_snips\"\n #\n # path prepended to all snip files\n self.snip_file_fn = \"./snipsand/snip_files_example.txt\"\n\n self.snip_file_command = r\"c:\\apps\\Notepad++\\notepad++.exe\" #russwin10 opens snip files, nice if can run it\n\n self.max_history = 9 # !! not implemented -- maybe never\n #---------------------------------------------------\n\n #self.transform = \"off\" #[\"\",\"\",] !! is what\n\n self.poll_delta_t = 200 # how often we poll for clip changes, in ms, think my computer works well as low as 10ms\n\n# -----------------------------------\n def __str__( self, ):\n \"\"\"\n sometimes it is hard to see where values have come out this may help if printed.\n not complete, add as needed -- compare across applications and code above\n \"\"\"\n a_str = f\">>>>>>>>>>* parameters (some) *<<<<<<<<<<<<\"\n a_str = f\"{a_str}\\n mode {self.mode}\"\n\n a_str = f\"{a_str}\\n logger_id {self.logger_id}\"\n a_str = f\"{a_str}\\n logging_level {self.logging_level}\"\n a_str = f\"{a_str}\\n pylogging_fn {self.pylogging_fn}\"\n\n a_str = f\"{a_str}\\n snippets_fn {self.snippets_fn}\"\n a_str = f\"{a_str}\\n snippets_sort {self.snippets_sort}\"\n\n a_str = f\"{a_str}\\n snip_file_fn {self.snip_file_fn}\"\n a_str = f\"{a_str}\\n snip_file_sort {self.snip_file_sort}\"\n a_str = f\"{a_str}\\n snip_file_command {self.snip_file_command}\"\n\n\n a_str = f\"{a_str}\\n snip_editor {self.snip_editor}\"\n a_str = f\"{a_str}\\n scratch_bat {self.scratch_bat}\"\n a_str = f\"{a_str}\\n scratch_py {self.scratch_py}\"\n a_str = f\"{a_str}\\n run_py {self.run_py}\"\n a_str = f\"{a_str}\\n ex_editor {self.ex_editor}\"\n\n a_str = f\"{a_str}\\n scratch_bat {self.scratch_bat}\"\n a_str = f\"{a_str}\\n scratch_py {self.scratch_py}\"\n\n a_str = f\"{a_str}\\n win_geometry {self.win_geometry}\"\n a_str = f\"{a_str}\\n computername {self.computername}\"\n a_str = f\"{a_str}\\n our_os {self.our_os}\"\n a_str = f\"{a_str}\\n and so much more... \\n\\n\"\n return a_str\n\n# =================================================\n\nif __name__ == \"__main__\":\n #----- run the full app\n import clip_board\n app = clip_board.App( None, None )\n\n# =================== eof ==============================\n\n\n\n","sub_path":"parameters_hide.py","file_name":"parameters_hide.py","file_ext":"py","file_size_in_byte":17526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"186021084","text":"from ...configuration.configuration import Configuration\nfrom ...exceptions.executorexceptions import CommandExecutionFailure\nfrom ...exceptions.tardisexceptions import TardisError\nfrom ...exceptions.tardisexceptions import TardisResourceStatusUpdateFailed\nfrom ...interfaces.siteadapter import SiteAdapter\nfrom ...interfaces.siteadapter import ResourceStatus\nfrom ...utilities.asynccachemap import AsyncCacheMap\nfrom ...utilities.attributedict import AttributeDict\nfrom ...utilities.staticmapping import StaticMapping\nfrom ...utilities.executors.shellexecutor import ShellExecutor\nfrom ...utilities.utils import htcondor_csv_parser\n\nfrom contextlib import contextmanager\nfrom datetime import datetime\nfrom functools import partial\n\nimport logging\nimport re\n\n\nasync def htcondor_queue_updater(executor):\n attributes = dict(Owner=\"Owner\", JobStatus=\"JobStatus\", ClusterId=\"ClusterId\", ProcId=\"ProcId\")\n attributes_string = \" \".join(attributes.values())\n queue_command = f\"condor_q -af:t {attributes_string}\"\n\n htcondor_queue = {}\n try:\n condor_queue = await executor.run_command(queue_command)\n except CommandExecutionFailure as cf:\n logging.error(f\"htcondor_queue_update failed: {cf}\")\n raise\n else:\n for row in htcondor_csv_parser(htcondor_input=condor_queue.stdout, fieldnames=tuple(attributes.keys()),\n delimiter='\\t', replacements=dict(undefined=None)):\n htcondor_queue[row['ClusterId']] = row\n return htcondor_queue\n\n\nhtcondor_status_codes = {'0': ResourceStatus.Error,\n '1': ResourceStatus.Booting,\n '2': ResourceStatus.Running,\n '3': ResourceStatus.Stopped,\n '4': ResourceStatus.Deleted,\n '5': ResourceStatus.Error,\n '6': ResourceStatus.Error}\n\nhtcondor_translate_resources = {'Cores': 'request_cpus',\n 'Memory': 'request_memory',\n 'Disk': 'request_disk'}\n\nhtcondor_translate_prefix_resources = {'Cores': 1,\n 'Memory': 1024,\n 'Disk': 1024}\n\n\nclass HTCondorAdapter(SiteAdapter):\n def __init__(self, machine_type, site_name):\n self.configuration = getattr(Configuration(), site_name)\n self._machine_type = machine_type\n self._site_name = site_name\n self._executor = getattr(self.configuration, 'executor', ShellExecutor())\n\n key_translator = StaticMapping(remote_resource_uuid='ClusterId', resource_status='JobStatus',\n created='created', updated='updated')\n\n # HTCondor uses digits to indicate job states and digit as variable names are not allowed in Python, therefore\n # the trick using an expanded htcondor_status_code dictionary is necessary. Somehow ugly.\n translator_functions = StaticMapping(JobStatus=lambda x, translator=StaticMapping(**htcondor_status_codes):\n translator[x])\n\n self.handle_response = partial(self.handle_response, key_translator=key_translator,\n translator_functions=translator_functions)\n\n self._htcondor_queue = AsyncCacheMap(update_coroutine=partial(htcondor_queue_updater, self._executor),\n max_age=self.configuration.max_age * 60)\n\n async def deploy_resource(self, resource_attributes):\n submit_jdl = self.configuration.MachineTypeConfiguration[self._machine_type].jdl\n submit_resources_args = ''\n drone_resources = ''\n for resource in self.machine_meta_data:\n try:\n drone_resource_value = self.machine_meta_data[resource] * htcondor_translate_prefix_resources[resource]\n drone_resources += f';TardisDrone{resource}={drone_resource_value}'\n submit_resources_args += f'-a \"{htcondor_translate_resources[resource]} = {drone_resource_value}\" '\n except KeyError as e:\n logging.error(f\"deploy_resource failed: no translation known for {e}\")\n raise\n submit_command = (\n f'condor_submit '\n f'-append \"environment = TardisDroneUuid={resource_attributes.drone_uuid}{drone_resources}\"'\n f' {submit_resources_args}{submit_jdl}')\n response = await self._executor.run_command(submit_command)\n pattern = re.compile(r\"^.*?(?P\\d+).*?(?P\\d+).$\", flags=re.MULTILINE)\n response = AttributeDict(pattern.search(response.stdout).groupdict())\n response.update(self.create_timestamps())\n return self.handle_response(response)\n\n @property\n def machine_meta_data(self):\n return self.configuration.MachineMetaData[self._machine_type]\n\n @property\n def machine_type(self):\n return self._machine_type\n\n @property\n def site_name(self):\n return self._site_name\n\n async def resource_status(self, resource_attributes):\n await self._htcondor_queue.update_status()\n try:\n resource_status = self._htcondor_queue[resource_attributes.remote_resource_uuid]\n except KeyError:\n # In case the created timestamp is after last update timestamp of the asynccachemap,\n # no decision about the current state can be given, since map is updated asynchronously.\n if (self._htcondor_queue.last_update - resource_attributes.created).total_seconds() < 0:\n raise TardisResourceStatusUpdateFailed\n else:\n return AttributeDict(resource_status=ResourceStatus.Deleted)\n else:\n return self.handle_response(resource_status)\n\n async def stop_resource(self, resource_attributes):\n \"\"\"\"Stopping machines is not supported in HTCondor, therefore terminate is called!\"\"\"\n return await self.terminate_resource(resource_attributes)\n\n async def terminate_resource(self, resource_attributes):\n terminate_command = f\"condor_rm {resource_attributes.remote_resource_uuid}\"\n response = await self._executor.run_command(terminate_command)\n pattern = re.compile(r\"^.*?(?P\\d+).*$\", flags=re.MULTILINE)\n response = AttributeDict(pattern.search(response.stdout).groupdict())\n return self.handle_response(response)\n\n @staticmethod\n def create_timestamps():\n now = datetime.now()\n return AttributeDict(created=now, updated=now)\n\n @contextmanager\n def handle_exceptions(self):\n try:\n yield\n except TardisResourceStatusUpdateFailed:\n raise\n except Exception as ex:\n raise TardisError from ex\n","sub_path":"tardis/adapters/sites/htcondor.py","file_name":"htcondor.py","file_ext":"py","file_size_in_byte":6802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"560949639","text":"from views import Console\nfrom helpers import RunCode\n\n\nclass MemberRegistry(Console):\n \"\"\"View handling the main menu\"\"\"\n\n def main(self, logged_in):\n \"\"\"Displays the main menu. Returns a runcode\"\"\"\n self._clear()\n menu = [(\"Exit program\", RunCode.exit_menu),\n (\"Display list\", RunCode.display_list),\n (\"Display verbose list\", RunCode.display_verbose_list),\n (\"Search member by name\", RunCode.search_member_name),\n (\"Complex search\", RunCode.complex_search)]\n if logged_in:\n menu.append((\"Add member\", RunCode.create))\n else:\n menu.append((\"Log in\", RunCode.login))\n return self._read_choice(menu)","sub_path":"member_registry/views/memberRegistry.py","file_name":"memberRegistry.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"320349224","text":"# -*- coding: utf-8 -*-\r\nfrom flask import Blueprint, render_template, g, request, abort\r\nfrom dataviva.apps.general.views import get_locale\r\nfrom dataviva.translations.dictionary import dictionary\r\nfrom dataviva.apps.title.views import get_title\r\nimport urllib\r\nimport json\r\n\r\nmod = Blueprint('bar', __name__,\r\n template_folder='templates',\r\n url_prefix='//bar',\r\n static_folder='static')\r\n\r\n\r\n@mod.url_value_preprocessor\r\ndef pull_lang_code(endpoint, values):\r\n g.locale = values.pop('lang_code')\r\n\r\n\r\n@mod.url_defaults\r\ndef add_language_code(endpoint, values):\r\n values.setdefault('lang_code', get_locale())\r\n\r\n\r\n@mod.before_request\r\ndef before_request():\r\n g.page_type = mod.name\r\n\r\n\r\ndef location_service(id_ibge):\r\n locations = {\r\n 1: \"region\", #todo\r\n 2: \"state\",\r\n 4: \"mesoregion\",\r\n 5: \"microregion\",\r\n 7: \"municipality\"\r\n }\r\n\r\n return (locations[len(id_ibge)], id_ibge)\r\n\r\n\r\ndef product_service(product):\r\n if len(product) == 2:\r\n return ('product_section', product[:2])\r\n elif len(product) == 4:\r\n return ('product_chapter', product[2:4])\r\n else:\r\n return ('product', product[2:])\r\n\r\ndef wld_service(wld):\r\n if wld.isdigit():\r\n wld = '%03d' % int(wld)\r\n\r\n wlds = {\r\n 2: \"continent\",\r\n 3: \"country\"\r\n }\r\n\r\n return (wlds[len(wld)], wld)\r\n\r\ndef occupation_service(occupation):\r\n occupations = {\r\n 1: \"occupation_group\",\r\n 4: \"occupation_family\"\r\n }\r\n\r\n return (occupations[len(occupation)], occupation)\r\n\r\ndef industry_service(industry):\r\n if len(industry) == 1:\r\n return ('industry_section', industry)\r\n elif len(industry) == 3:\r\n return ('industry_division', industry[1:])\r\n else:\r\n return ('industry_class', industry[1:])\r\n\r\n\r\n@mod.route('///')\r\ndef index(dataset, x, y):\r\n product = request.args.get('product')\r\n id_ibge = request.args.get('id_ibge')\r\n type = request.args.get('type')\r\n wld = request.args.get('wld')\r\n excluded_year= request.args.get('excluded_year')\r\n establishment = request.args.get('establishment')\r\n occupation = request.args.get('occupation')\r\n industry = request.args.get('industry')\r\n counts = request.args.getlist('count')\r\n\r\n title_attrs = {}\r\n\r\n options = request.args.get('options')\r\n subtitle = request.args.get('subtitle', '')\r\n\r\n filters = []\r\n\r\n for count in counts:\r\n filters.append(('count', count))\r\n\r\n if type:\r\n filters.append(('type', type))\r\n title_attrs['type'] = type\r\n\r\n if excluded_year:\r\n filters.append(('year!', excluded_year))\r\n\r\n if request.args.get('filters'):\r\n filters.append(('filters', request.args.get('filters')))\r\n\r\n if wld:\r\n filters.append(wld_service(wld))\r\n title_attrs[wld_service(wld)[0]] = wld_service(wld)[1]\r\n\r\n if occupation:\r\n filters.append(occupation_service(occupation))\r\n title_attrs[occupation_service(occupation)[0]] = occupation_service(occupation)[1]\r\n\r\n if industry:\r\n filters.append(industry_service(industry))\r\n title_attrs[industry_service(industry)[0]] = industry_service(industry)[1]\r\n\r\n if product:\r\n filters.append(product_service(product))\r\n title_attrs[product_service(product)[0]] = product_service(product)[1]\r\n\r\n if id_ibge:\r\n filters.append(location_service(id_ibge))\r\n title_attrs[location_service(id_ibge)[0]] = location_service(id_ibge)[1]\r\n\r\n if establishment:\r\n filters.append(('establishment', establishment))\r\n\r\n filters = urllib.urlencode(filters)\r\n graph_title, graph_subtitle = get_title(dataset, y.split(',')[0], 'bar', title_attrs)\r\n\r\n return render_template('bar/index.html', dataset=dataset, x=x, y=y, filters=filters, options=options,\r\n subtitle=subtitle, graph_title=graph_title or '', graph_subtitle=graph_subtitle or '',\r\n dictionary=json.dumps(dictionary()))\r\n ","sub_path":"dataviva/apps/bar/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"272209773","text":"import sys\nfrom typing import List, Tuple\n\n\ndef find_similar_ids(ids: List[str]) -> Tuple[str, str, str]:\n pairs = []\n for index_a in range(len(ids)):\n for index_b in range(index_a + 1, len(ids)):\n pairs.append((ids[index_a], ids[index_b]))\n\n possibilities = []\n for id_a, id_b in pairs:\n similarity_count = 0\n for char_a, char_b in zip(id_a, id_b):\n if char_a == char_b:\n similarity_count += 1\n if similarity_count == len(id_a) - 1:\n possibilities.append((id_a, id_b))\n\n if 1 != len(possibilities):\n raise RuntimeError(\"More than one match found\")\n\n id_a, id_b = possibilities[0]\n similarities = \"\"\n for char_a, char_b in zip(id_a, id_b):\n if char_a == char_b:\n similarities += char_a\n\n return similarities, id_a, id_b\n\n\ndef main():\n with open(\"input.txt\", \"r\") as file:\n input_list = [line.strip() for line in file.readlines()]\n\n result = find_similar_ids(input_list)\n print(f\"Result: {result}\")\n\n\ndef tests():\n test_cases = [\n ((\"fgij\", \"fghij\", \"fguij\"), [\"abcde\", \"fghij\", \"klmno\", \"pqrst\", \"fguij\", \"axcye\", \"wvxyz\"])\n ]\n for expected_output, test_input in test_cases:\n output = find_similar_ids(test_input)\n if expected_output != output:\n print(ValueError(f\"Expected {expected_output}, got {output}. Input was {test_input}\"))\n else:\n print(\"Test passed!\")\n\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n raise RuntimeError(\"Must provide run mode argument\")\n run_mode = sys.argv[1]\n test_mode = \"Test\"\n main_mode = \"Main\"\n if run_mode == test_mode:\n tests()\n elif run_mode == main_mode:\n main()\n else:\n raise RuntimeError(f\"Run run_mode must be either '{test_mode}' or '{main_mode}', was {run_mode}\")\n","sub_path":"AdventOfCode/2018/Day2/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"57940949","text":"#!/usr/bin/env python\n\nimport os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom scipy import stats\nimport datetime as dt\nimport matplotlib.dates as mdates\nfrom time import time\n\nfrom sklearn import metrics\nfrom sklearn import preprocessing\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\n\nfrom scipy import stats\nimport scipy.io\n\nimport pywt\nfrom scipy.fftpack import fft\nfrom collections import Counter\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport matplotlib.patches as patches\nimport matplotlib.gridspec as gridspec\nfrom scipy.signal import welch\n\n# for anomalies\nfrom sklearn.decomposition import PCA\nfrom sklearn.cluster import KMeans\nfrom sklearn.covariance import EllipticEnvelope\nfrom sklearn.ensemble import IsolationForest\nfrom sklearn.svm import OneClassSVM\nfrom sklearn.metrics import mean_squared_error, mean_absolute_error\n\n\n# Helper functions for later\n\ndef add_date_features(df):\n '''\n Function for adding date features to any dataset.\n The function first resets the index considering the Timestamp\n is set as index.\n Adds hour, day, month, week, weekday and daylight features.\n Considers day from 7:00 am to 7:00 Pm.\n '''\n # df.reset_index(inplace=True)\n # take date features from timeseries\n df['hour'] = df['Timestamp'].dt.hour\n df['day'] = df['Timestamp'].dt.day\n df['month'] = df['Timestamp'].dt.month\n df['week'] = df['Timestamp'].dt.week\n df['weekday'] = df['Timestamp'].dt.weekday\n df['daylight'] = ((df['hour'] >= 7) & (df['hour'] <= 19)).astype(int)\n df.set_index('Timestamp', drop=True, inplace=True)\n return df\n\ndef drop_power(df, Powers, value=-9):\n '''\n for dropping the -9 dbm value\n Powers is a dictionary with the names\n of the columns. Considers the original dataset.\n '''\n for ii in Powers:\n indexx = df[df[ii] == -9].index\n df = df.drop(df.index[indexx], axis=0)\n df.reset_index(inplace=True, drop=True)\n return df\n\ndef get_radios(df):\n '''\n Get the names of the radio modules in the base dataframe.\n return a list of string.\n '''\n Radios = df.Module.unique()[:7]\n Radios = np.append(Radios, df.Module.unique()[-1])\n return Radios\n\ndef get_receivers(df):\n '''\n Get the names of the receivers, this names are different than the others\n cause they are in an hexadecimal way (0x0xxx)\n '''\n return np.array(df['Receiver'].unique())\n\ndef create_RSSI_dataframe(df_powers, df, plot=True, start_date='2018-01-01', end_date='2018-02-16', raw=True, resample_time='60Min'):\n '''\n This functions returns the entire dataset with the RSSI\n aranged by receiver.\n The original dataset is aranged by Neighbours, one Neighbour can have data from\n many different modules with different RSSI levels, this makes harder the analysis\n of timeseries. This functions arange the dataset in receivers, therefor, makes easier\n the working with timeseries.\n\n df_powers: Dataset with RSSI values.\n df: Dataset with othere features and modules names (this can be improved)\n plot: if plot==True then plots the RSSI data from every receiver\n start_date: start date in case you want to take just a slice\n end date: ending date for slice\n raw: if raw==True the data is plotted as raw values (only applies to plot)\n resample_time: in case raw==False this is the time for resample. Default as 1Hour.\n\n return: dff; The dataset aranged by receivers with the raw values.\n\n '''\n receivers = get_receivers(df_powers)\n radios = get_radios(df)\n\n dff = pd.DataFrame(data=None)\n for i in range(len(receivers)):\n subset, serie, transmitters, Tx = arange_RSSI_serie(df_powers, receiver=i, start_date=start_date, end_date=end_date, plot=False, plot_entire=False)\n serie['Receiver'] = radios[i]\n dff = pd.concat([dff, serie])\n\n if plot==True:\n color=['b','r','m','g','c','orange', 'y', 'grey']\n fig, axes = plt.subplots(nrows=receivers.shape[0], ncols=1, figsize=(24,28), sharey=True, sharex=True)\n fig.suptitle('RSSI data from every receiver radio', x=0.5, y=1.02, fontsize=20)\n for ii, ax in zip(radios, axes):\n if raw == True:\n dff[dff['Receiver']==ii].dropna(axis=1, how='all').plot(ax=ax)\n else:\n dff[dff['Receiver']==ii].dropna(axis=1, how='all').resample(resample_time).mean().plot(ax=ax)\n ax.tick_params(labelrotation=0)\n ax.set_yticks(np.arange(-10, -100, step=-10))\n ax.set_title('RSSI from {}'.format(ii))\n ax.legend(loc='upper right')\n plt.tight_layout()\n\n return dff\n\ndef arange_RSSI_serie(df_powers, receiver=0, start_date='2018-01-01', end_date='2018-02-16', sharex=False, sharey=True, figsize=(24,14), joint=True, plot=True, plot_entire=True):\n '''\n This functions returns a dataframe of RSSI data aranged by transmitters.\n Returns all the data transmitted to one single receiver.\n\n df_powers: Dataset with RSSI values.\n receiver = 0: The receiver selected. Default is Coordinator.\n plot: if plot==True then plots the RSSI data from every receiver\n start_date: start date in case you want to take just a slice\n end date: ending date for slice\n sharex: if sharex==True then all data shares the same x axis, else, every graph\n will have different x axis.\n joint: if joint==True then all data will be plotted in a single graph\n plot_entire: if this is True, then will plot the original Neighbour to see the difference.\n\n return: dff; The dataset aranged by receivers with the raw values.\n '''\n\n Receivers = np.array(df_powers['Receiver'].unique())\n subset = df_powers[df_powers['Receiver'] == Receivers[receiver]][start_date:end_date]\n subset = subset.dropna(axis=1, how='all')\n\n Tx, Ptx = [], []\n for i in range(int(subset.columns[3:].shape[0]/2)):\n Tx.append(subset.columns[3:][i*2])\n Ptx.append(subset.columns[4:][i*2])\n\n transmitters = []\n for ii in Tx:\n for i in range(len(subset[ii].value_counts().index)):\n transmitters.append(subset[ii].value_counts().index[i])\n transmitters = pd.Series(data=transmitters)\n transmitters.drop_duplicates(inplace=True)\n transmitters = transmitters.values\n #print(transmitters, Tx, Ptx)\n\n serie = pd.DataFrame(data=None, index=subset.index)\n for t in transmitters:\n dff = pd.DataFrame(data=None)\n for ii,jj in zip(Tx, Ptx):\n dff = pd.concat([dff, subset[subset[ii]==t][jj]])\n dff[t] = dff\n dff.drop(0, axis=1, inplace=True)\n serie[t] = dff[t][~dff[t].index.duplicated()]\n\n if plot==True:\n if plot_entire == True:\n plt.figure(figsize=(24,4))\n subset['P_Tx1(dbm)'].plot()\n plt.title('RSSI from Neighbour 1 over the entire period for Receiver {}'.format(subset['Receiver'].unique()[0]))\n plt.xticks(rotation=0);\n plt.tight_layout()\n\n color=['b','r','m','g','c','black', 'y', 'grey']\n if joint==False:\n fig, axes = plt.subplots(nrows=transmitters.shape[0], ncols=1, figsize=figsize, sharex=sharex, sharey=sharey)\n plt.xticks(rotation=0)\n for ii, ax in zip(range(transmitters.shape[0]), axes.flat):\n serie[transmitters[ii]].plot(ax=ax, color=color[ii], label=('Transmitter '+transmitters[ii]))\n ax.tick_params(labelrotation=0)\n ax.legend()\n plt.tight_layout()\n else:\n fig, axes = plt.subplots(nrows=1, ncols=1, figsize=(24,4), sharex=True, sharey=True)\n plt.xticks(rotation=0)\n plt.title('RSSI from Transmitters to Rec {}'.format(Receivers[receiver]))\n for ii in range(transmitters.shape[0]):\n serie[transmitters[ii]].plot(ax=axes, color=color[ii], label=('Transmitter '+transmitters[ii]))\n axes.tick_params(labelrotation=0)\n axes.legend(loc='upper right')\n plt.tight_layout()\n\n return subset, serie, transmitters, Tx\n\ndef plot_count_transmitters(dff, subset):\n plt.figure(figsize=(12,5))\n plt.title('Amounth of Data by Receiver')\n sns.countplot(x='Receiver', data=dff, palette='Set3')\n plt.tight_layout()\n\n Tx, Ptx = [], []\n for i in range(int(subset.columns[3:].shape[0]/2)):\n Tx.append(subset.columns[3:][i*2])\n\n fig, axx = plt.subplots(3,3, figsize=(30,14))\n fig.delaxes(axx[2,1])\n fig.delaxes(axx[2,2])\n plt.suptitle('Transmitters in every Neighbour sendind data to Receiver '+ subset['Receiver'].unique()[0], x=0.5, y=1.02, fontsize=20)\n if len(Tx) < 4:\n fig.delaxes(axx[2,0]);fig.delaxes(axx[1,2])\n fig.delaxes(axx[1,1]);fig.delaxes(axx[1,0])\n if len(Tx) == 4:\n fig.delaxes(axx[2,0]);fig.delaxes(axx[1,2])\n fig.delaxes(axx[1,1])\n if len(Tx) == 5:\n fig.delaxes(axx[2,0]);fig.delaxes(axx[1,2])\n if len(Tx) == 6:\n fig.delaxes(axx[2,0])\n for ii,ax in zip(Tx, axx.flat):\n sns.countplot(x=ii, data=subset, ax=ax, palette='Set2')\n plt.tight_layout()\n\ndef plot_reciprocal_RSSI(dff):\n RECEIVERS = np.array(df_powers['Receiver'].unique())\n color=['darkblue','c','g','y','r','m','orange', 'b']\n fig, axes = plt.subplots(nrows=RECEIVERS.shape[0], ncols=1, figsize=(24,28), sharey=True, sharex=True)\n fig.suptitle('Reciprocal RSSI measurement of all Radios', x=0.5, y=1.02, fontsize=20)\n for ii, ax in zip(RECEIVERS, axes):\n recipr = dff[['Receiver', ii]]\n rec = []\n for jj, c in zip(RADIOS, color):\n if recipr[recipr['Receiver']== jj].dropna(axis=1, how='all').columns.shape[0] != 1:\n recipr[recipr['Receiver']== jj].plot(ax=ax, color=c)\n ax.tick_params(labelrotation=0)\n ax.set_title('RSSI from Transmitter{}'.format(ii))\n rec.append(jj)\n ax.legend(labels=rec, loc='upper right')\n plt.tight_layout()\n\ndef dist_transmissions(df, receiver, Receivers, ax, start_date ='2018-01-01', end_date='2018-01-10'):\n '''\n Correct way to use it:\n for rec, ax in zip(range(len(RECEIVERS)), axes.flat):\n dist_transmissions(df_powers, rec, RECEIVERS, ax)\n\n Plots a Distribution of the RSSI of Transmitters to Every Receiver\n '''\n subset, serie, transmitters, Tx = arange_RSSI_serie(df, receiver, joint=True, plot=False)\n for i in serie.columns:\n sns.distplot(serie[i].dropna(), kde=False, label=i, bins=20, axlabel='Receiver '+ Receivers[receiver], ax=ax)\n ax.legend()\n ax.set_xticks(np.arange(-10, -100, step=-10))\n plt.tight_layout()\n\ndef boxplot_PowerMod_date(dff, Tx='0x0057FE05', palette='Set3', date_arr=['day','weekday','week','hour'], sharey=True):\n fig, axarr = plt.subplots(nrows=1, ncols=4, figsize=(24,4), sharey=sharey)\n fig.suptitle('Transmitter '+Tx+' over different date frames',y=1.03, fontsize=15)\n #sns.boxplot(x='month',y=Neigh, data=df, ax=axarr[0], palette='Set3')\n sns.boxplot(x=date_arr[0], y=Tx, data=dff, ax=axarr[0], palette='Set3')\n sns.boxplot(x=date_arr[1], y=Tx, data=dff, ax=axarr[1], palette='Set3')\n sns.boxplot(x=date_arr[2], y=Tx, data=dff, ax=axarr[2], palette='Set3')\n sns.boxplot(x=date_arr[3], y=Tx, data=dff, ax=axarr[3], palette='Set3')\n\ndef plot_by_date(dff, by='hour', nrows=2, ncols=4, figsize=(24,7)):\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=figsize, sharex=False, sharey=True)\n fig.suptitle('Mean Values by '+ by, x=0.5, y=1.02, fontsize=18)\n for i,ax in zip(dff.columns[1:9], axes.flat):\n ax.set_title('{} by {}'.format(i, by))\n data=dff.groupby(by).mean()[[i]]\n sns.lineplot(data=data, markers=True,ax=ax, err_style='bars')\n #ax.set_xticks(np.arange(0,24,2))\n plt.tight_layout()\n\ndef append_temperatures_toRSSI(df, df_powers, Modules, start_date, end_date, resample_time='30S'):\n '''\n append the temperatures of the receiver radio\n and the temperature from transmitters.\n '''\n subset, serie, transmitters, Tx = arange_RSSI_serie(df_powers, receiver=0,start_date=start_date, end_date=end_date, joint=True, plot=False)\n\n # get receiver name\n for i in Modules:\n if i[-2:] == subset.Receiver.unique()[0][-2:]:\n receiver = i\n\n # get Receiver temperature over entire period\n serie = serie.resample(resample_time).mean()\n resampled_df = df[df['Module']== receiver]['2018-01-01':'2018-01-18'].resample(resample_time).mean()[['Temp_Mod', 'VBus']]\n resampled_df = resampled_df.fillna(resampled_df.bfill())\n serie['Receiver'] = receiver\n serie[['Temp_Rece', 'VBus_Rec']] = resampled_df\n\n # need to do this because of the difference in the names\n mod = []\n for ii in serie.columns:\n last=ii[-2:]\n for i in Modules:\n if i[-2:] == last:\n mod.append(i)\n\n # transmitters temperatures\n for tx in mod:\n resampled_df = df[df['Module']== tx]['2018-01-01':'2018-01-18'].resample(resample_time).mean()[['Temp_Mod']]\n resampled_df = resampled_df.fillna(resampled_df.bfill())\n serie[['Temp_' + tx]] = resampled_df\n\n return serie\n","sub_path":"Notebooks/rssi.py","file_name":"rssi.py","file_ext":"py","file_size_in_byte":13340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"247217646","text":"from django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView\nfrom django.core.urlresolvers import reverse_lazy\nfrom .models import Company, Base, Product\nfrom .forms import (BaseForm, ProductUpdateForm, ProductCreateForm, \n\t\t\t\t\tCompanyCreateForm, CompanyUpdateForm, ImageUpdateForm)\nfrom utils.permissions import (SuperUserCheckMixin, ManagerCheckMixin,\n\t\t\t\t\t\t\t WarehouseAndManagerCheckMixin)\n \n# 1\nclass CompanyMixin(ManagerCheckMixin):\n\tmodel = Company\n\n\nclass CompanyAuthMixin(CompanyMixin, SuccessMessageMixin):\n\ttemplate_name = \"form.html\"\n\tsuccess_url = reverse_lazy('product:company_list')\n\n\nclass CompanyDetail(CompanyMixin, DetailView):\n\ttemplate_name = \"product/company_detail.html\"\n\n\nclass CompanyList(CompanyMixin, ListView):\n\ttemplate_name = \"product/company_list.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(CompanyList, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Suppliers\"\n\t\treturn context\n\n\nclass CompanyCreate(CompanyAuthMixin, CreateView):\n\tsuccess_message = \"%(company)s was successfully created\"\n\tform_class = CompanyCreateForm\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(CompanyCreate, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Supplier Create\"\n\t\treturn context\n\n\tdef get_success_message(self, cleaned_data):\n\t\treturn self.success_message % dict(\n\t\t\tcleaned_data,\n\t\t\tcompany=self.object.company,\n\t\t)\n\n\nclass CompanyUpdate(CompanyAuthMixin, UpdateView):\n\tsuccess_message = \"%(company)s was successfully updated\"\n\tform_class = CompanyUpdateForm\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(CompanyUpdate, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Supplier Update\"\n\t\treturn context\n\n\tdef get_success_message(self, cleaned_data):\n\t\treturn self.success_message % dict(\n\t\t\tcleaned_data,\n\t\t\tcompany=self.object.company,\n\t\t)\n\n\nclass CompanyDelete(SuperUserCheckMixin, DeleteView):\n\tmodel = Company\n\ttemplate_name = \"delete.html\"\n\tsuccess_url = reverse_lazy('product:company_list')\n\tsuccess_message = \"Company was successfully deleted\"\n\n\tdef delete(self, request, *args, **kwargs):\n\t\tmessages.success(self.request, self.success_message)\n\t\treturn super(CompanyDelete, self).delete(request, *args, **kwargs)\n\n# 2\nclass BaseMixin(WarehouseAndManagerCheckMixin, SuccessMessageMixin):\n\tmodel = Base\n\ttemplate_name = \"form.html\"\n\tform_class = BaseForm\n\tsuccess_url = reverse_lazy('product:base_product_list')\n\n\nclass BaseTypeDetail(WarehouseAndManagerCheckMixin, DetailView):\n\tmodel = Base\n\ttemplate_name = \"product/base_detail.html\"\n\n\nclass BaseTypeList(WarehouseAndManagerCheckMixin, ListView):\n\tmodel = Base\n\ttemplate_name = \"product/base_list.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(BaseTypeList, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Product Categories\"\n\t\treturn context\n\n\nclass BaseTypeCreate(BaseMixin, CreateView):\n\tsuccess_message = \"%(base)s was successfully created\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(BaseTypeCreate, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Product Category Create\"\n\t\treturn context\n\n\tdef get_success_message(self, cleaned_data):\n\t\treturn self.success_message % dict(\n\t\t\tcleaned_data,\n\t\t\tbase=self.object.category,\n\t\t)\n\n\nclass BaseTypeUpdate(BaseMixin, UpdateView):\n\tsuccess_message = \"%(base)s was successfully updated\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(BaseTypeUpdate, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Product Category Update\"\n\t\treturn context\n\n\tdef get_success_message(self, cleaned_data):\n\t\treturn self.success_message % dict(\n\t\t\tcleaned_data,\n\t\t\tbase=self.object.category,\n\t\t)\n\n\nclass BaseTypeDelete(SuperUserCheckMixin, DeleteView):\n\tmodel = Base\n\ttemplate_name = \"delete.html\"\n\tsuccess_url = reverse_lazy('product:base_product_list')\n\tsuccess_message = \"Category was successfully deleted\"\n\n\tdef delete(self, request, *args, **kwargs):\n\t\tmessages.success(self.request, self.success_message)\n\t\treturn super(BaseTypeDelete, self).delete(request, *args, **kwargs)\n\n# 3\nclass ItemCreateUpdateMixin(WarehouseAndManagerCheckMixin,\n\t\t\t\t\t\t\tSuccessMessageMixin):\n\tmodel = Product\n\tsuccess_url = reverse_lazy('product:item_list')\n\ttemplate_name = \"form.html\"\n\n\nclass ItemDetail(WarehouseAndManagerCheckMixin, DetailView):\n\tmodel = Product\n\ttemplate_name = \"product/item_detail.html\"\n\n\nclass ItemCreate(ItemCreateUpdateMixin, CreateView):\n\tsuccess_message = \"%(item)s was successfully created\"\n\tform_class = ProductCreateForm\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(ItemCreate, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Product Create\"\n\t\treturn context\n\n\tdef get_success_message(self, cleaned_data):\n\t\treturn self.success_message % dict(\n\t\t\tcleaned_data,\n\t\t\titem=self.object.item,\n\t\t)\n\n\nclass BaseItemCreate(ItemCreateUpdateMixin, CreateView):\n\tform_class = ProductCreateForm\n\n\tdef get_initial(self):\n\t\tbase = get_object_or_404(Base, slug=self.args[0])\n\t\treturn {\n\t\t\t'base':base\n\t\t}\n\n\nclass SupplierItemCreate(ItemCreateUpdateMixin, CreateView):\n\tform_class = ProductCreateForm\n\n\tdef get_initial(self):\n\t\tsupplier = get_object_or_404(Company, slug=self.args[0])\n\t\treturn {\n\t\t\t'supplier':supplier\n\t\t}\n\n\nclass ItemUpdate(ItemCreateUpdateMixin, UpdateView):\n\tsuccess_message = \"%(item)s was successfully updated\"\n\tform_class = ProductUpdateForm\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(ItemUpdate, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Product Update\"\n\t\treturn context\n\n\tdef get_success_message(self, cleaned_data):\n\t\treturn self.success_message % dict(\n\t\t\tcleaned_data,\n\t\t\titem=self.object.item,\n\t\t)\n\n\nclass ItemList(WarehouseAndManagerCheckMixin, ListView):\n\tmodel = Product\n\ttemplate_name = \"product/item_list.html\"\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(ItemList, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"All Products\"\n\t\treturn context\n\n\nclass ItemCategoryList(ItemList):\n\n\tdef get_queryset(self):\n\t\tself.category = get_object_or_404(Base, slug=self.args[0])\n\t\treturn Product.objects.filter(base=self.category)\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(ItemCategoryList, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Products in {}\". format(self.category)\n\t\treturn context\n\n\nclass ItemSupplierList(ItemList):\n\n\tdef get_queryset(self):\n\t\tself.company = get_object_or_404(Company, slug=self.args[0])\n\t\treturn Company.objects.filter(company=self.company)\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(ItemSupplierList, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Products from {}\". format(self.company)\n\t\treturn context\n\n\nclass ItemDelete(SuperUserCheckMixin, DeleteView):\n\tmodel = Product\n\ttemplate_name = \"delete.html\"\n\tsuccess_url = reverse_lazy('product:item_list')\n\tsuccess_message = \"Item was successfully deleted\"\n\n\tdef delete(self, request, *args, **kwargs):\n\t\tmessages.success(self.request, self.success_message)\n\t\treturn super(ItemDelete, self).delete(request, *args, **kwargs)\n\n\nclass ImageUpdate(ItemCreateUpdateMixin, UpdateView):\n\tsuccess_message = \"%(item)s was successfully updated\"\n\tform_class = ImageUpdateForm\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super(ImageUpdate, self).get_context_data(**kwargs)\n\t\tcontext['title'] = \"Image Update\"\n\t\treturn context\n\n\tdef get_success_message(self, cleaned_data):\n\t\treturn self.success_message % dict(\n\t\t\tcleaned_data,\n\t\t\titem=self.object.item + \" image\"\n\t\t)\n\n\nclass ImageDetail(WarehouseAndManagerCheckMixin, DetailView):\n\tmodel = Product\n\ttemplate_name = \"product/image_detail.html\"","sub_path":"source/stock/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"271650588","text":"\nimport sys\n\narchmagic = {\n 'i386': 3, 3: 'i386' ,\n 'x86_64': 62, 62: 'x86_64',\n}\n\ndef hash_djb2(s):\n h = 5381\n for c in s:\n h = (h * 33 + ord(c)) & 0xFFFFFFFF\n return h\n\ndef eprintf(*args, **kwargs): print(*args, file=sys.stderr, **kwargs)\n\n","sub_path":"smolshared.py","file_name":"smolshared.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"426288725","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.metrics import mean_absolute_error\nimport matplotlib.pyplot as plt\nfrom math import ceil\nimport timeit\n\nstart = timeit.default_timer() #to measure runtime\n\ntime_slice = 2; #in seconds\nnum_dfs_tot = 0\nsec_to_cut = 15\n\n#to read multiple CSVs automatically\nimport glob\npath = r'C:\\Desktop stuff\\university\\camera captures\\hik pcaps n CSVs' \nallFolderPaths = glob.glob(path + \"/*\")\ntime_sliced = pd.DataFrame()\nnum_labels = len(allFolderPaths)\nlabels_dfs = [0]*num_labels\n\n\n#read all CSVs for label i, and cut 15 sec of start and end of each video.\nfor i in range(num_labels) :\n labels_dfs[i] = [pd.read_csv(f) for f in glob.glob(allFolderPaths[i] + \"/*.csv\")]\n num_dfs_tot += len(labels_dfs[i])\n \n for j in range(len(labels_dfs[i])) :\n labels_dfs[i][j].drop(['Source' , 'Destination' , 'Protocol' , 'Info' , 'No.'], axis=1, inplace=True)\n labels_dfs[i][j] = labels_dfs[i][j][labels_dfs[i][j].Time > sec_to_cut]\n labels_dfs[i][j] = labels_dfs[i][j][labels_dfs[i][j].Time < labels_dfs[i][j][\"Time\"].iloc[-1] - sec_to_cut]\n labels_dfs[i][j]['Time'] -= sec_to_cut\n labels_dfs[i][j]['Label'] = i\n labels_dfs[i][j]['vidNum'] = j\n \n df = labels_dfs[i][j] \n df['TimeSlice'] = (np.array(labels_dfs[i][j]['Time'])/time_slice).astype(int)\n df = df.groupby('TimeSlice',axis=0 ,sort = 'False').agg(\n {'Time' : ['count', 'std', 'mean'],\n 'Length' : ['mean', 'sum', 'std'],\n 'Label' : 'first',\n 'vidNum' : 'first'}).fillna(0) \n time_sliced = time_sliced.append(df)\n\ntime_sliced.columns = [\"_\".join(x) for x in df.columns.ravel()]\n\ntwenty_per_in_dfs = round(0.2*(time_sliced[time_sliced['Label_first'] == \n 0]['vidNum_first'].iloc[-1] +1)).astype(int)\nstop = timeit.default_timer() #to measure runtime\nprint('\\n\\nRunTime:', stop - start, 'sec')\n\ntrain_data = []\ntest_data = []\n#for i in range(len(time_sliced)) :\n \n# single_label_df = final_df[final_df.label == i]\ntrain_data.append(time_sliced[time_sliced['vidNum_first'] == 0]) #first 80% of data\ntrain_data.append(time_sliced[time_sliced['vidNum_first'] == 2]) #first 80% of data\ntest_data.append(time_sliced[time_sliced['vidNum_first'] == 1]) #last 20% of data\n\n\n# TODO: how to save vidNum numbers?\n#for i in range(num_labels):\n# for j in range(len(labels_dfs[i])): \n# single_label_df = final_df[final_df.label == i]\n# vidNum_max = max(single_label_df['vidNum'])\n# train_data.append(single_label_df[single_label_df['vidNum']>=0 and single_label_df['vidNum']<=vidNum_max-1]) #first 80% of data\n# test_data.append(single_label_df[single_label_df['vidNum']==vidNum_max]) #last 20% of data\n\ntrain_data = pd.concat(train_data, axis=0) #first 80% of data for label i\ntest_data = pd.concat(test_data, axis=0) #last 20% of data for label i\ntrain_data.drop('vidNum_first', axis=1, inplace=True)\ntest_data.drop('vidNum_first', axis=1, inplace=True)\n\n\n#train_data = train_data.sample(frac=1).reset_index(drop=True) #mix the data!!!\n#test_data = test_data.sample(frac=1).reset_index(drop=True) #mix the data!!!\ntrain_labels = train_data['Label_first'] #get labels\ntest_labels = test_data['Label_first'] #get labels\ntrain_data = train_data.drop('Label_first',axis=1) #remove labels from data\ntest_data = test_data.drop('Label_first',axis=1) #remove labels from data \n \n\n\n\n\n\n\n\n\nclf = RandomForestClassifier(n_estimators = 100)\nclf.fit(train_data ,train_labels)\npred = clf.predict(test_data)\n\nacc_score = \"{:.2f}\".format(accuracy_score(test_labels , pred))\nmean_absolute_err = \"{:.3f}\".format(mean_absolute_error(test_labels , pred))\nprint('time_slice:', time_slice, 'sec')\nprint ('accuracy_score:',acc_score)\nprint ('mean_absolute_error:', mean_absolute_err)\n\n#TODO: AUC ROC curve \n\n\n\n\n\n\n\n\n\n\n\n#%%\n\nfeatures = list(train_data.columns.values)\nimportances = clf.feature_importances_\nindices = np.argsort(importances)\n\nplt.figure(2)\nplt.title('Feature Importances')\nplt.barh(range(len(indices)), importances[indices], color='b', align='center')\nplt.yticks(range(len(indices)), [features[i] for i in indices])\nplt.xlabel('Relative Importance')\nplt.show()\n\n\n#%%\n\nimport itertools\nfrom sklearn.metrics import confusion_matrix\n\ndef plot_confusion_matrix(true_class, prediction,\n normalize=True,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n Normalization can be applied by setting `normalize=True`.\n \"\"\"\n cm = confusion_matrix(true_class, prediction) # HERE YA VICTOR\n classes = (np.sort(np.unique(true_class)))\n \n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n #print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n # print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n #plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n \n plt.show()\n \n return\n\n\nplt.figure(1)\nplot_confusion_matrix(test_labels, pred)","sub_path":"TimeSlices analysis fast + bitrate graphs + create pred vid/predict single timeslice with confusion mat and feature importance.py","file_name":"predict single timeslice with confusion mat and feature importance.py","file_ext":"py","file_size_in_byte":5770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"135173579","text":"import config\nimport random\n\ntestcases_files = []\n\nfor n in config.n_range:\n for k in config.k_possible_values:\n for i in range(config.number_of_testcases):\n testcase = \"{} {}\\n\".format(n, k)\n for j in range(n):\n testcase += str(random.randint(0, config.s_range)) + \" \"\n testcase += \"\\n\"\n testcase_filename = \"testcase_\" + str(len(testcases_files) + 1)\n file = open(testcase_filename, \"w\")\n file.write(testcase)\n file.close()\n testcases_files.append(testcase_filename)\n\nfor testcase in testcases_files:\n print(testcase)\n","sub_path":"repository/nonDivisibleSubset/testgenerator/testcaseGenerator.py","file_name":"testcaseGenerator.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"200457000","text":"from django.forms import ModelForm, Textarea\nfrom django import forms\nfrom .models import Listings, Categories, Bid\nfrom django.utils.translation import gettext_lazy as _\nfrom django.core.exceptions import ValidationError\n\n\nclass ListingsForm(forms.ModelForm):\n class Meta:\n model = Listings\n fields = \"__all__\"\n labels={\n 'start_bid':_('Starting Price'),\n 'image':_('URL')\n }\n widgets={\n 'description': Textarea(attrs={'cols':40, 'rows':5})\n }\n\nclass BidForm(forms.ModelForm):\n class Meta:\n model = Bid\n fields = [\"current_bid\"]\n labels={\n 'current_bid':_('Value Offer')\n }\n error_messages = {\n 'current_bid':{\n 'blank':'Sorry, cannot be blank',\n 'null':'No null allowed'\n\n },\n }\n\n ","sub_path":"auctions/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"54055529","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.8 (3413)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win32\\egg\\elastic3rd\\post\\post.py\n# Compiled at: 2020-01-13 11:01:54\n# Size of source mod 2**32: 6306 bytes\nimport numpy as np\nimport elastic3rd.post.esfit as esfit\nimport elastic3rd.symmetry.symmetry as essym\nimport elastic3rd.esutils as esutils\nimport math\n\ndef get_cij(coef_fit, coef2, coef3, flag_se):\n flag_se = flag_se.lower()\n if flag_se == 'e':\n C3 = np.linalg.solve(coef3, 6.0 * coef_fit[:, 1])\n C2 = np.linalg.lstsq(coef2, (2.0 * coef_fit[:, 0]), rcond=None)[0]\n else:\n if flag_se == 's':\n pass\n return (\n C2, C3)\n\n\ndef get_cijall(coef_fit, coef, Ord=3, flag_se='e'):\n C = CCOEF(Ord)\n for i in range(2, int(Ord + 1)):\n a = 0\n if flag_se == 'e':\n a = math.factorial(i)\n else:\n if flag_se == 's':\n pass\n exec('NumCoef = coef.coef' + str(i) + '.shape[0]')\n exec('RankCoef = np.linalg.matrix_rank(coef.coef' + str(i) + ')')\n if NumCoef == RankCoef:\n exec('C.C' + str(int(i)) + '= np.linalg.solve(coef.coef' + str(i) + ', a*coef_fit[:, i - 2])')\n else:\n exec('C.C' + str(int(i)) + '= np.linalg.lstsq(coef.coef' + str(i) + ', a*coef_fit[:, i - 2], rcond = None)[0]')\n else:\n return C\n\n\ndef get_cij_2nd(coef_2nd, coef2):\n Cij2 = np.linalg.lstsq(coef2, (2.0 * coef_2nd), rcond=None)[0]\n return Cij2\n\n\ndef get_coef_2nd(s, e, V0):\n eVpmol2GPa = 160.21719175\n m, n = e.shape\n coef_2nd = np.zeros((n, 1))\n for i in range(0, n):\n e0 = e[int((m - 1) / 2)][i]\n ei = e[:, i]\n ei = (ei - e0) / V0 * eVpmol2GPa\n coefi, pcovi = esfit.esfit_2nd(s, ei)\n coef_2nd[i] = coefi\n else:\n return coef_2nd\n\n\ndef get_coef(s, e, V0, flag_se, flag):\n flag_se = flag_se.lower()\n eVpmol2GPa = 160.21719175\n m, n = e.shape\n if flag > 3:\n coef_fit = np.zeros((n, flag - 1))\n else:\n coef_fit = np.zeros((n, 2))\n n_d = int((m - 1) / 2)\n for i in range(0, n):\n e0 = e[n_d][i]\n ei = e[:, i]\n if flag_se == 'e':\n ei = (ei - e0) / V0 * eVpmol2GPa\n else:\n if flag_se == 's':\n pass\n if flag > 2:\n coefi, pcovi = esfit.esfit(s, ei, flag_se, flag)\n else:\n s2 = s\n s2[n_d] = 1\n if flag == 1:\n e2 = ei / s2 / s2\n s2 = np.delete(s2, n_d)\n e2 = np.delete(e2, n_d)\n coefi, pcovi = esfit.esfit(s2, e2, flag_se, flag)\n else:\n if flag == 2:\n e2 = ei / s2\n s2[n_d] = 0\n coefi, pcovi = esfit.esfit(s2, e2, flag_se, flag)\n coef_fit[i, :] = coefi\n else:\n return coef_fit\n\n\ndef read_e(EEnergy='EEnergy.txt'):\n e = np.loadtxt(EEnergy)\n return e\n\n\ndef escoef(CrystalType, Ord):\n if Ord == 3:\n coef3, StrainMode, coef2 = essym.gen_strain_mode(CrystalType, Ord)\n return (coef3, coef2, StrainMode)\n if Ord == 2:\n coef2, StrainMode = essym.gen_strain_mode(CrystalType, Ord)\n return coef2\n\n\ndef post_mode(V0, Flag_Fig=1, Flag_Ord=3, EEnergy='EEnergy.txt', INPUT='INPUT', STRAINMODE='STRAINMODE'):\n StrainIn = esutils.read_strainmode(STRAINMODE)\n CrystalType, Ord, flag_se, StrainList = get_post_param(INPUT)\n E = read_e(EEnergy)\n C2, C3 = post_single(StrainList / 100.0, E, StrainIn, V0, Flag_Fig, Flag_Ord, INPUT)\n return (C2, C3)\n\n\ndef post_single(x, E, StrainIn, V0, Flag_Fig=1, Flag_Ord=3, INPUT='INPUT'):\n CrystalType, Ord, flag_se, StrainList = get_post_param(INPUT)\n if flag_se == 'e':\n Cij_mode, coef_e, StrainMode = essym.CoefForSingleMode(CrystalType, Ord, StrainIn)\n else:\n if flag_se == 's':\n pass\n coef_fit = get_coef(x, E, V0, flag_se, Flag_Ord)\n if Flag_Fig == 1:\n esfit.multiesplot(x, E, coef_fit, flag_se, Flag_Ord, V0)\n else:\n coef2 = coef_e.coef2\n coef3 = coef_e.coef3\n n = coef3.shape[1]\n SMRank = np.linalg.matrix_rank(coef3)\n if SMRank < n:\n print(SMRank)\n print(n)\n C2 = np.zeros((1, coef2.shape[1]))\n C3 = np.zeros((1, n))\n else:\n C2, C3 = get_cij(coef_fit, coef2, coef3, flag_se)\n return (\n C2, C3)\n\n\ndef post(V0, Flag_Fig=1, Flag_Ord=3, EEnergy='EEnergy.txt', INPUT='INPUT'):\n CrystalType, Ord, flag_se, StrainList = get_post_param(INPUT)\n E = read_e(EEnergy)\n if flag_se == 'e':\n coef_e, StrainMode = essym.gen_strain_mode(CrystalType, Ord)\n else:\n if flag_se == 's':\n pass\n coef_fit = get_coef(StrainList / 100.0, E, V0, flag_se, Flag_Ord)\n coef2 = coef_e.coef2\n coef3 = coef_e.coef3\n C2, C3 = get_cij(coef_fit, coef2, coef3, flag_se)\n if Flag_Fig == 1:\n esfit.multiesplot(StrainList / 100.0, E, coef_fit, flag_se, Flag_Ord, V0)\n return (\n C2, C3)\n\n\ndef get_post_param(INPUT='INPUT'):\n ParaIn = esutils.read_input(INPUT)\n flag_se = ParaIn['FlagSE'].lower()\n CrystalType = ParaIn['CrystalType']\n Ord = ParaIn['Ord']\n StrainList = esutils.gen_strain_list(ParaIn)\n return (CrystalType, Ord, flag_se, StrainList)\n\n\nclass CCOEF:\n __doc__ = 'This is the structure for coefficients. The attaches is coef + i, \\n where i is 2 to Ord\\n Take Ord = 3 as an example, there are two attaches, coef2 and coef3'\n\n def __init__(self, Ord=3):\n for i in range(2, int(Ord) + 1):\n exec('self.C' + str(i) + ' = []')","sub_path":"pycfiles/ELASTIC3rd-2.4.2-py3.8/post.cpython-38.py","file_name":"post.cpython-38.py","file_ext":"py","file_size_in_byte":5667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"449180703","text":"'''\nSimple template rendering a molecule originating from a PDB file.\n\nThe animation demonstrates the molecule both rotating and traversing on a\ntrajectory using the move_to and rotate methods.\n\nNOTE: also shows how to do prototyping using multithreading; see the\nprototype.ini configuration file.\n\nUses a number of pre-defined Povray objects to simplify scene building\n'''\n\n__author__ = \"Marcel Kempenaar\"\n__status__ = \"Template\"\n__version__ = \"2017.3\"\n\nimport math\nimport copy\nfrom pypovray import pypovray, pdb, load_config, models, logger\nfrom vapory.vapory import Scene, LightSource, Sphere\n\nETHANOL = RAD_PER_SCENE = FRONT_LIGHT = TRACER = None\n\n\ndef scene_objects():\n \"\"\" Creates molecule objects and any other pre-calculated data \"\"\"\n global ETHANOL, RAD_PER_SCENE, FRONT_LIGHT, TRACER\n\n FRONT_LIGHT = LightSource([0, 14, -28], 'color', [1, 0.8, 0.4],\n 'fade_distance', 6, 'fade_power', 2,\n 'area_light', 3, 3, 12, 12,\n 'circular orient adaptive', 0)\n\n # Calculate the radians per scene (one full rotation)\n RAD_PER_SCENE = (math.pi / eval(pypovray.SETTINGS.NumberFrames)) * 2\n # Create a list holding the 'tracing' spheres that are drawn as the molecule moves\n TRACER = []\n # Read in a PDB file and construct a molecule\n ETHANOL = pdb.PDBMolecule('pdb/ethanol.pdb', center=False, offset=[-10, 8, -5])\n\n\ndef frame(step):\n \"\"\" Returns the scene at step number (1 step per frame) \"\"\"\n logger.info(\"@ Step %d\", step)\n\n # The Ethanol molecule is moved on a trajectory representing a 'figure 8' or the infinity\n # symbol by calculating the x- and y-coordinates using the lemniscate of Bernoulli.\n alpha = 9\n scale = alpha * math.sqrt(2)\n radians = step*RAD_PER_SCENE\n\n x = scale * math.cos(radians) / \\\n (math.sin(radians) ** 2 + 1)\n\n y = scale * math.cos(radians) * \\\n math.sin(radians) / \\\n (math.sin(radians)**2 + 1)\n\n # Draws spheres on each of the calculated x,y coordinates\n TRACER.append(Sphere([x, y, -4], 0.2, models.default_sphere_model))\n\n # Copying the full molecule - only needed for multithreading\n # This is required for multithreading\n ethanol = copy.deepcopy(ETHANOL)\n\n # Move the molecule to the calculated coordinates\n ethanol.move_to([x, y, -5])\n\n # Rotate the molecule on x- and y-axes\n # NOTE: default rotate does NOT work when using a thread-pool,\n # use the molecule.rotate_by_step method instead\n ethanol.rotate_by_step([1, 0, 0], RAD_PER_SCENE, step)\n\n # Return a 'Scene' object containing -all- objects to render, i.e. the camera,\n # lights and in this case, a molecule and a list of spheres (TRACER).\n return Scene(models.default_camera,\n objects=[models.default_light, FRONT_LIGHT] + ethanol.povray_molecule + TRACER,\n included=['colors.inc'])\n\n\nif __name__ == '__main__':\n # Load the prototyping settings instead of the default\n pypovray.SETTINGS = load_config('prototype.ini')\n\n # Create static objects\n scene_objects()\n\n # Render as an MP4 movie\n pypovray.render_scene_to_mp4(frame, range(20, 40))\n\n # Timing for running the current simulation including creating the movie:\n # | Single-thread (s) | Multi-threaded (s) |\n # |---------------------|---------------------|\n # | 101.561 | 16.341 |\n # |---------------------|---------------------|\n","sub_path":"template_pdb_mov_rot.py","file_name":"template_pdb_mov_rot.py","file_ext":"py","file_size_in_byte":3478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"41701131","text":"import random\n\n#look at how the AI is running\n#pit how AI\n\nclass StickStack:\n def __init__(self, how_many_sticks=20):\n \"\"\"Creates instance of Stack of Sticks\"\"\"\n self.stick_count = how_many_sticks\n\n\n def take_away_sticks(self, how_many_sticks):\n self.stick_count = self.stick_count - how_many_sticks\n print(\"Took away {}, there are {} sticks in the stack now. \\n\".format(how_many_sticks, self.stick_count))\n return self.stick_count\n\n\n # Don't make a get_stick_count() method, instead call stick_stack.stick_count attribute\n\n\n def add_sticks(self, how_many_sticks):\n self.stick_count = self.stick_count + how_many_sticks\n print(\"Debug added {} sticks stick_count now{}\".format(how_many_sticks, self.stick_count))\n return self.stick_count\n\n\nclass Player:\n \"\"\"Player keeps its score, its name, his sets of turns\"\"\"\n def __init__(self, name=\"Hal\", player_type=\"human\"):\n self.name = name\n self.player_type = type # marker for Human or AI later\n self.score = 0\n self.picked_sets = []\n\n\n def choose_number_of_sticks(self, stick_count):\n print(\"Your turn, {}.\\n\".format(self.name))\n if stick_count >= 3:\n max_number = 3\n else:\n max_number = stick_count\n choice = 0\n while choice == 0 or choice > max_number:\n print(\"Pick 1-{} sticks. \\n\".format(max_number))\n choice = int(input(\"How many sticks do you want? > \\n\"))\n if choice < 1 or choice > max_number:\n print(\"Your choice is not acceptable. Please choose again.\")\n else:\n number_chosen = choice\n return number_chosen\n\n\nclass Game:\n def __init__(self, player1, player2):\n self.player1 = player1\n self.player2 = player2\n self.current_player = player1\n self.player = self.current_player\n self.stick_stack = StickStack()\n self.winner = None\n\n\n def change_current_player(self):\n if self.current_player == self.player1:\n self.current_player = self.player2\n self.player = self.current_player\n else:\n self.current_player = self.player1\n self.player = self.current_player\n\n\n def set_choice(self, number_chosen):\n # have that choice go to set_choice which updates stick_stack.count and turn count\n # update stick_stack\n self.stick_stack.take_away_sticks(number_chosen)\n\n\n def start(self):\n #creates stick_stack\n print(\"stick_stack stick_count is {}\".format(self.stick_stack.stick_count))\n # If stick stack is not 0, ask current player if they want to pick\n if self.stick_stack.stick_count != 0:\n #game.run() is going to call game.go() which will do most of the work\n self.run()\n\n def you_lose(self):\n print(\"Sorry, {}, you lose.\".format(self.current_player.name))\n if self.current_player == self.player1:\n self.winner = self.player2.name\n elif self.current_player == self.player2:\n self.winner = self.player1.name\n print(\"{} Wins!\".format(self.winner))\n\n\n def run(self):\n self.go() #this isolates the loop into one method for testing\n\n\n def go(self):\n #if player eligible\n while self.stick_stack.stick_count <= 20 and self.stick_stack.stick_count > 1:\n print(\"-- \" * self.stick_stack.stick_count)\n # tell player how many sticks in overall stick_stack\n print(\"There are {} sticks in the Stick Stack \\n\".format(self.stick_stack.stick_count))\n # ask player to choose number of sticks\n number_chosen = self.player.choose_number_of_sticks(self.stick_stack.stick_count)\n # have that choice go to set_choice which and updates stick_stack.count and turn count\n self.set_choice(number_chosen)\n #change players\n self.change_current_player()\n if self.stick_stack.stick_count < 1:\n self.you_lose()\n\n\n\n# class AIPlayer\n# It will inherit attributes and methods from Player with additional attributes and methods\n# pass\n\n\nif __name__ == '__main__':\n\n player1 = Player(\"Hal\")\n player2 = Player(\"Marvin\")\n game = Game(player1, player2)\n game.start()\n\n\n # new_stack = StickStack(20)\n # new_stack.take_away_sticks(3)\n # new_stack.add_sticks(4)\n","sub_path":"gameofsticks.py","file_name":"gameofsticks.py","file_ext":"py","file_size_in_byte":4412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"537036657","text":"\"\"\"\nGiven two strings, write a method 'longest_common_subsequence' that finds the list of longest common\nsubsequences (the longest group of characters that occur in both strings, in the same order).\nFor example, the list of longest common subsequences of 'methodiek' and 'katholiek' is ['thoiek']\n\nNote that two strings can have multiple common subsequences. For instance, the longest common subsequences of\n'methodiek' and 'ochtendgymnastiek' are ['ediek', 'etiek', 'hdiek', 'mtiek', 'odiek', 'tdiek']\n\nIf the strings do not have matching characters, the longest common subsequence is an empty string\n\nWrite this function as a RECURSIVE function. This is not enforced by the tests, but you should\nimplement it like this anyway. On the real exam, someone will actually look at your implementation\nto manually verify that it is indeed written recursively.\n\nIf you upload your solution before the deadline, your implementation will be graded by us. This grade\nis a direct (and automatically calculated) result of the output of the test cases in this file and an\nadditional set of test cases. Make sure you think about special and edge cases.\n\"\"\"\n\ndef longest_common_subsequence(xstr, ystr): # do not change the signature of this method (i.e. do not change its name, add/remove parameters, ...)\n \"\"\"\n Given two strings, this function returns the list of longest common subsequences of both strings\n \"\"\"\n if len(xstr) == 0 or len(ystr) == 0:\n return [\"\"]\n uitvoerlijst = []\n substring_set_xstr = all_substrings(xstr)\n substring_set_ystr = all_substrings(ystr)\n gemeenschappelijke_substrings = substring_set_xstr.intersection(substring_set_ystr)\n gemeenschappelijke_substrings_lijst = list(gemeenschappelijke_substrings)\n for element in gemeenschappelijke_substrings_lijst:\n if len(element) == lengte_langste_substring(gemeenschappelijke_substrings):\n uitvoerlijst.append(element)\n return uitvoerlijst\n\n\n pass\n\n\ndef all_substrings(word):\n substringset = set()\n for substr_len in range(1, len(word) + 1):\n for start_pos in range(len(word) - substr_len + 1):\n substringset.add(word[start_pos: start_pos + substr_len])\n return substringset\nprint(all_substrings('aapje'))\n\ndef lengte_langste_substring(setje):\n lijst = list(setje)\n maximale_lengte = len(lijst[0])\n for element in lijst:\n if len(element) > maximale_lengte:\n maximale_lengte = len(element)\n return maximale_lengte\n\n\n\n\n\n'''\n if len(xstr) == 0 or len(ystr) == 0:\n return [\"\"]\n if xstr[0] in ystr:\n longest_common_subsequence2(xstr, ystr)\n else:\n longest_common_subsequence(xstr[1:], ystr)\n\n return lijstenfunctie\n\ndef longest_common_subsequence2(xstr, ystr):\n substring = \"\"\n i = 0\n while xstr[i] == ystr[ystr.index(xstr[0])+i]:\n substring += xstr[i]\n i += 1\n lijstenfunctie(substring)\n\n\ndef lijstenfunctie(substring, lijst = []):\n lijst.append(substring)\n\n\n\n\n\n\n\n\n\n substring = \"\"\n if len(xstr) == 0 or len(ystr) == 0:\n return lijst\n else:\n if xstr[0] in ystr:\n substring += xstr[0]\n else:\n lijst.append(substring)\n return longest_common_subsequence(xstr[1:], ystr)'''\n\n\n\n\n\n\n\n\n#\n# DO NOT CHANGE ANYTHING BELOW THIS LINE !!!\n#\n\ndef main():\n data = (\"aapje\", \"banaan\", [\"aa\"])\n result = longest_common_subsequence(data[0], data[1]);\n print(f\"Testing with input '{data[0]}' and '{data[1]}'.\")\n print(f\"Received result: {result}\")\n\n if result == data[2]:\n print(\"Looking good! Now try to run the test suite to see if your solution works with other examples as well.\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"longest_common_subsequence2.py","file_name":"longest_common_subsequence2.py","file_ext":"py","file_size_in_byte":3718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"552323217","text":"\"\"\"Functions to facilitate using GRASP.\"\"\"\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n#***************************************************************************************************\n# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).\n# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights\n# in this software.\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except\n# in compliance with the License. You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.\n#***************************************************************************************************\n\nimport itertools\nimport random\n\nimport numpy as _np\n\nfrom .. import objects as _objs\n\n\ndef get_swap_neighbors(weights, forcedWeights=None, shuffle=False):\n \"\"\"Return the list of weights in the neighborhood of a given weight vector.\n\n A weight vector is in the neighborhood of a given weight vector if it is\n only a single swap away from the given weight vector. There is an option to\n use `forcedWeights` to indicate elements you don't want to swap out.\n\n Parameters\n ----------\n weights : numpy.array\n Binary vector to find the neighborhood of.\n\n forcedWeights : numpy.array, optional\n Binary vector indicating elements that must be included in all\n neighboring vectors (these elements are assumed to already be present\n in `weights`.\n\n shuffle : bool, optional\n Whether the neighborhood should be presented to the optimizer in a\n random order (important if the local optimizer updates the solution to\n the first better solution it finds in the neighborhood instead of\n exhaustively searching the neighborhood for the best solution).\n\n Returns\n -------\n list of numpy.array\n List of binary vectors corresponding to all the neighbors of `weights`.\n\n \"\"\"\n if forcedWeights is None:\n forcedWeights = _np.zeros(len(weights))\n\n swap_out_idxs = _np.where(_np.logical_and(weights == 1,\n forcedWeights == 0))[0]\n swap_in_idxs = _np.where(weights == 0)[0]\n neighbors = []\n for swap_out, swap_in in itertools.product(swap_out_idxs, swap_in_idxs):\n neighbor = weights.copy()\n neighbor[swap_out] = 0\n neighbor[swap_in] = 1\n neighbors.append(neighbor)\n\n if shuffle:\n random.shuffle(neighbors)\n\n return neighbors\n\n\ndef grasp_greedy_construction(elements, scoreFn, rclFn, feasibleThreshold=None,\n feasibleFn=None, initialElements=None):\n \"\"\"\n Constructs a subset of `elements` that represents a feasible solution.\n\n This function performs the \"greedy-construction\" part of a grasp\n iteration (see :func:`do_grasp_iteration`). The returned solution\n subset is built up by repeating the following step until a feasible\n solution (using `feasibleThreshold` OR `feasibleFn`):\n\n 1. Build a candidate list from elements that haven't been chosen.\n 2. Based on the scores of the candidates (using `scoreFn`), construct a\n \"reduced candidate list\" (using `rclFn`) that need not (but could) be\n just the single best-scoring element.\n 3. Choose a random element from the reduced candidate list and add it\n to the solution subset.\n\n Parameters\n ----------\n elements : list\n A list containing some representation of the elements that can be used\n by the verious score functions.\n\n scoreFn : callable\n Function that takes a sublist of `elements` and returns a score to\n minimize that is comparable with other scores.\n\n rclFn : callable\n Function that takes a list of sublists of `elements` (that is, a list\n of candidate partial solutions) and returns the indices within that\n list of partial solutions to be included in the restricted candidate\n list.\n\n feasibleThreshold : score\n A value comparable with the return value of the various score functions\n against which a score may be compared to test if the solution is\n feasible (the solution is feasible iff\n ``solnScore < feasibleThreshold``). Overrides `feasibleFn` if set to a\n value other than ``None``.\n\n feasibleFn : callable\n Function that takes a sublist of `elements` defining a potential\n solution and returns ``True`` if that solution is feasible (otherwise\n should return ``False``). Not used if `feasibleThreshold` is not\n ``None``.\n\n initialElements : numpy.array\n Binary vector indicating whether the corresponding elements in\n `elements` should be automatically included at the start of this\n construction.\n\n Returns\n -------\n list\n A sub-list of `elements`.\n \"\"\"\n\n if initialElements is None:\n weights = _np.zeros(len(elements))\n else:\n if len(initialElements) != len(elements):\n raise ValueError('initialElements must have the same length as '\n 'elements ({}), not {}!'.format(len(elements),\n len(initialElements)))\n weights = _np.array(initialElements)\n\n soln = [elements[idx] for idx in _np.nonzero(weights)[0]]\n\n if feasibleThreshold is not None:\n feasibleTest = 'threshold'\n elif feasibleFn is not None:\n feasibleTest = 'function'\n else:\n raise ValueError('Must provide either feasibleFn or '\n 'feasibleThreshold!')\n\n feasible = False\n\n while _np.any(weights == 0) and not feasible:\n candidateIdxs = _np.where(weights == 0)[0]\n candidateSolns = [soln + [elements[idx]] for idx in candidateIdxs]\n candidateScores = _np.array([scoreFn(candidateSoln)\n for candidateSoln in candidateSolns])\n rclIdxs = rclFn(candidateScores)\n assert(len(rclIdxs) > 0), \"Empty reduced candidate list!\"\n chosenIdx = _np.random.choice(rclIdxs)\n soln = candidateSolns[chosenIdx]\n weights[candidateIdxs[chosenIdx]] = 1\n if feasibleTest == 'threshold':\n feasible = candidateScores[chosenIdx] <= feasibleThreshold\n elif feasibleTest == 'function':\n feasible = feasibleFn(soln)\n\n if not feasible:\n raise ValueError('No feasible solution found!')\n\n return soln\n\n\ndef grasp_local_search(initialSoln, scoreFn, elements, getNeighborsFn,\n feasibleThreshold=None, feasibleFn=None):\n \"\"\"\n Perfom the local-search part of a grasp iteration.\n\n Attempts to find a better (lower-scoring) solution based on successive\n \"local\" (as determined by `getNeighborsFn`) steps from `initialSolution`.\n\n Parameters\n ----------\n initialSoln : list\n A list of some (or all) of the items in `elements`, representing an\n initial solution. This solution must be \"feasbile\" as determined by\n `feasibleThreshold` or `feasibleFn`.\n\n scoreFn : callable\n Function that takes a sublist of `elements` and returns a score to\n minimize that is comparable with other scores. Used by the local\n search to find a locally optimal feasible subset.\n\n elements : list\n A list containing some representation of the elements that can be used\n by the verious score functions.\n\n getNeighborsFn : callable\n Function that takes a binary vector indicating which members of\n `elements` are included in the current solution and returns a list\n of binary vectors indicating which potential solutions are in the\n neighborhood of the current solution for the purposes of local\n optimization.\n\n feasibleThreshold : score\n A value comparable with the return value of the various score functions\n against which a score may be compared to test if the solution is\n feasible (the solution is feasible iff\n ``solnScore < feasibleThreshold``). Overrides `feasibleFn` if set to a\n value other than ``None``.\n\n feasibleFn : callable\n Function that takes a sublist of `elements` defining a potential\n solution and returns ``True`` if that solution is feasible (otherwise\n should return ``False``). Not used if `feasibleThreshold` is not\n ``None``.\n\n Returns\n -------\n list\n A sub-list of `elements`, representing the locally-improved solution.\n \"\"\"\n if feasibleThreshold is not None:\n feasibleTest = 'threshold'\n elif feasibleFn is not None:\n feasibleTest = 'function'\n else:\n raise ValueError('Must provide either feasibleFn or '\n 'feasibleThreshold!')\n\n currentSoln = initialSoln\n currentWeights = _np.zeros(len(elements))\n for element in initialSoln:\n currentWeights[elements.index(element)] = 1\n currentScore = scoreFn(currentSoln)\n\n betterSolnFound = True\n\n while betterSolnFound:\n betterSolnFound = False\n weightsNeighbors = getNeighborsFn(currentWeights)\n neighborSolns = [[element for element\n in _np.array(elements)[_np.nonzero(weightsNeighbor)]]\n for weightsNeighbor in weightsNeighbors]\n if feasibleTest == 'function':\n feasibleNeighborSolns = [(idx, soln) for idx, soln\n in enumerate(neighborSolns)\n if feasibleFn(soln)]\n for idx, soln in feasibleNeighborSolns:\n solnScore = scoreFn(soln)\n if solnScore < currentScore:\n betterSolnFound = True\n currentScore = solnScore\n currentSoln = soln\n currentWeights = weightsNeighbors[idx]\n break\n\n elif feasibleTest == 'threshold':\n for idx, soln in enumerate(neighborSolns):\n solnScore = scoreFn(soln)\n # The current score is by construction below the threshold,\n # so we don't need to check that.\n if solnScore < currentScore:\n betterSolnFound = True\n currentScore = solnScore\n currentSoln = soln\n currentWeights = weightsNeighbors[idx]\n break\n\n return currentSoln\n\n\ndef do_grasp_iteration(elements, greedyScoreFn, rclFn, localScoreFn,\n getNeighborsFn, feasibleThreshold=None, feasibleFn=None,\n initialElements=None, seed=None, verbosity=0):\n \"\"\"Perform one iteration of GRASP (greedy construction and local search).\n\n Parameters\n ----------\n elements : list\n A list containing some representation of the elements that can be used\n by the verious score functions.\n\n greedyScoreFn : callable\n Function that takes a sublist of `elements` and returns a score to\n minimize that is comparable with other scores. Used by the greedy\n construction to construct the initial feasible subset.\n\n rclFn : callable\n Function that takes a list of sublists of `elements` (that is, a list\n of candidate partial solutions) and returns the indices within that\n list of partial solutions to be included in the restricted candidate\n list.\n\n localScoreFn : callable\n Function that takes a sublist of `elements` and returns a score to\n minimize that is comparable with other scores. Used by the local\n search to find a locally optimal feasible subset.\n\n getNeighborsFn : callable\n Function that takes a binary vector indicating which members of\n `elements` are included in the current solution and returns a list\n of binary vectors indicating which potential solutions are in the\n neighborhood of the current solution for the purposes of local\n optimization.\n\n feasibleThreshold : score\n A value comparable with the return value of the various score functions\n against which a score may be compared to test if the solution is\n feasible (the solution is feasible iff\n ``solnScore < feasibleThreshold``). Overrides `feasibleFn` if set to a\n value other than ``None``.\n\n feasibleFn : callable\n Function that takes a sublist of `elements` defining a potential\n solution and returns ``True`` if that solution is feasible (otherwise\n should return ``False``). Not used if `feasibleThreshold` is not\n ``None``.\n\n initialElements : numpy.array\n Binary vector indicating whether the corresponding elements in\n `elements` should be automatically included by the greedy construction\n routine at the start of its construction.\n\n seed : int\n Seed for the random number generator.\n\n verbosity : int\n Sets the level of logging messages the printer will display.\n\n Returns\n -------\n initialSoln : list\n The sublist of `elements` given by the greedy construction.\n\n localSoln : list\n The sublist of `elements` given by the local search.\n\n \"\"\"\n printer = _objs.VerbosityPrinter.build_printer(verbosity)\n\n initialSoln = grasp_greedy_construction(elements, greedyScoreFn, rclFn,\n feasibleThreshold, feasibleFn,\n initialElements)\n printer.log('Initial construction:', 1)\n def to_str(x): return x.str if isinstance(x, _objs.Circuit) else str(x)\n printer.log(str([to_str(element) for element in initialSoln]), 1)\n\n localSoln = grasp_local_search(initialSoln, localScoreFn, elements,\n getNeighborsFn, feasibleThreshold,\n feasibleFn)\n printer.log('Local optimum:', 1)\n printer.log(str([to_str(element) for element in localSoln]), 1)\n\n return initialSoln, localSoln\n\n\ndef do_grasp(elements, greedyScoreFn, rclFn, localScoreFn, getNeighborsFn,\n finalScoreFn, iterations, feasibleThreshold=None, feasibleFn=None,\n initialElements=None, seed=None, verbosity=0):\n \"\"\"Perform GRASP to come up with an optimal feasible set of elements.\n\n Parameters\n ----------\n elements : list\n A list containing some representation of the elements that can be used\n by the verious score functions.\n\n greedyScoreFn : callable\n Function that takes a sublist of `elements` and returns a score to\n minimize that is comparable with other scores. Used by the greedy\n construction to construct the initial feasible subset.\n\n rclFn : callable\n Function that takes a list of sublists of `elements` (that is, a list\n of candidate partial solutions) and returns the indices within that\n list of partial solutions to be included in the restricted candidate\n list.\n\n localScoreFn : callable\n Function that takes a sublist of `elements` and returns a score to\n minimize that is comparable with other scores. Used by the local\n search to find a locally optimal feasible subset.\n\n getNeighborsFn : callable\n Function that takes a binary vector indicating which members of\n `elements` are included in the current solution and returns a list\n of binary vectors indicating which potential solutions are in the\n neighborhood of the current solution for the purposes of local\n optimization.\n\n finalScoreFn : callable\n Function that takes a sublist of `elements` and returns a score to\n minimize that is comparable with other scores. Used to compare the\n solutions from various iterations in order to choose an optimum.\n\n iterations : int\n How many iterations of greedy construction followed by local search to\n perform.\n\n feasibleThreshold : score\n A value comparable with the return value of the various score functions\n against which a score may be compared to test if the solution is\n feasible (the solution is feasible iff\n ``solnScore < feasibleThreshold``). Overrides `feasibleFn` if set to a\n value other than ``None``.\n\n feasibleFn : callable\n Function that takes a sublist of `elements` defining a potential\n solution and returns ``True`` if that solution is feasible (otherwise\n should return ``False``). Not used if `feasibleThreshold` is not\n ``None``.\n\n initialElements : numpy.array\n Binary vector with 1s at indices corresponding to elements in\n `elements` that the greedy construction routine will include at the\n start of its construction.\n\n seed : int\n Seed for the random number generator.\n\n verbosity : int\n Sets the level of logging messages the printer will display.\n\n Returns\n -------\n list of Circuits\n The best germ set from all locally-optimal germ sets constructed.\n\n \"\"\"\n printer = _objs.VerbosityPrinter.build_printer(verbosity)\n\n bestSoln = None\n for iteration in range(iterations):\n printer.log('Iteration {}'.format(iteration), 1)\n _, localSoln = do_grasp_iteration(elements, greedyScoreFn,\n rclFn, localScoreFn,\n getNeighborsFn,\n feasibleThreshold,\n feasibleFn,\n initialElements, seed,\n verbosity)\n if bestSoln is None:\n bestSoln = localSoln\n elif finalScoreFn(localSoln) < finalScoreFn(bestSoln):\n bestSoln = localSoln\n\n return bestSoln\n","sub_path":"packages/pygsti/algorithms/grasp.py","file_name":"grasp.py","file_ext":"py","file_size_in_byte":17934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"618449664","text":"'''\nIONet device serialiser.\n'''\nfrom rest_framework import serializers, validators\nfrom ionet_server.auth.models import Group\nfrom ionet_server.auth.utils import SerializerConstructor\nfrom ionet_server.auth import constants as auth_constants\nfrom ionet_server.dev import constants, models\nfrom ionet_server.dev.devices import (\n get_device_type_model, get_device_type_choices\n)\n\n\nclass DeviceSerializerConstructor(SerializerConstructor):\n DEVICE_WRITE_FIELDS = ('group', 'device_name', 'device_version',)\n DEVICE_FIELDS = DEVICE_WRITE_FIELDS + ('device_type',)\n USAGE_FIELDS = (\n 'usage_bandwidth', 'usage_cost', 'usage_retention',\n 'usage_limit_enabled', 'usage_limit_units', 'usage_limit',\n )\n TASKS_FIELDS = ('tasks_retention',)\n LOGS_FIELDS = ('logs_enabled', 'logs_interval', 'logs_retention',)\n PARAMS_FIELDS = ('params_enabled', 'params_interval', 'params_retention',)\n WRITE_FIELDS = (\n DEVICE_WRITE_FIELDS + USAGE_FIELDS + TASKS_FIELDS + LOGS_FIELDS +\n PARAMS_FIELDS\n )\n FIELDS = DEVICE_FIELDS + WRITE_FIELDS\n\n PERM_KEY_FIELDS = {\n 'CREATE': {\n 'key': constants.PERM_DEV_CREATE,\n 'fields': FIELDS,\n },\n 'WRITE': {\n 'key': constants.PERM_DEV_WRITE,\n 'fields': WRITE_FIELDS,\n },\n 'RECURSIVE_WRITE': {\n 'key': constants.PERM_DEV_RECURSIVE_WRITE,\n 'fields': WRITE_FIELDS,\n }\n }\n\n def _construct(self, user, devices, groups, device=None, **fields_ro):\n '''\n Constructs the serialiser model for this user and device.\n '''\n # If device object, get the related type model class.\n if device:\n type_cls = get_device_type_model(device.device_type)\n\n # Relational fields should not provide a queryset if read_only is true.\n if fields_ro.get('group', True):\n group_kwargs = {'read_only': True}\n else:\n group_kwargs = {'queryset': groups}\n\n class DeviceSerializer(serializers.ModelSerializer):\n # GroupMixin fields.\n group = serializers.PrimaryKeyRelatedField(\n label=auth_constants.TR_GROUP_LABEL,\n help_text=constants.TR_DEVICE_GROUP_HELP,\n allow_null=False,\n required=False,\n **group_kwargs\n )\n\n # StatusCodeMixin fields.\n status_code = serializers.ReadOnlyField()\n\n # Device fields.\n device_name = serializers.CharField(\n label=constants.TR_DEVICE_NAME_LABEL,\n help_text=constants.TR_DEVICE_NAME_HELP,\n max_length=constants.DEVICE_NAME_LEN,\n read_only=fields_ro.get('device_name', True),\n allow_blank=False,\n )\n # Device type should not be modified after object creation, if read\n # only is represented as a CharField, else as a ChoiceField.\n if fields_ro.get('device_type', True):\n device_type = serializers.CharField(\n label=constants.TR_DEVICE_TYPE_LABEL,\n help_text=constants.TR_DEVICE_TYPE_HELP,\n max_length=constants.DEVICE_TYPE_LEN,\n read_only=True,\n )\n else:\n device_type = serializers.ChoiceField(\n label=constants.TR_DEVICE_TYPE_LABEL,\n help_text=constants.TR_DEVICE_TYPE_HELP,\n choices=get_device_type_choices(),\n required=True,\n )\n device_version = serializers.CharField(\n label=constants.TR_DEVICE_VERSION_LABEL,\n help_text=constants.TR_DEVICE_VERSION_HELP,\n max_length=constants.DEVICE_VERSION_LEN,\n read_only=fields_ro.get('device_version', True),\n allow_blank=False,\n )\n\n # UsageMixin fields.\n usage_bandwidth = serializers.ChoiceField(\n label=constants.TR_USAGE_BANDWIDTH_LABEL,\n help_text=constants.TR_USAGE_BANDWIDTH_HELP,\n choices=constants.USAGE_BANDWIDTH_CHOICES,\n read_only=fields_ro.get('usage_bandwidth', True),\n )\n usage_cost = serializers.ChoiceField(\n label=constants.TR_USAGE_COST_LABEL,\n help_text=constants.TR_USAGE_COST_HELP,\n choices=constants.USAGE_COST_CHOICES,\n read_only=fields_ro.get('usage_cost', True),\n )\n usage_retention = serializers.DurationField(\n label=constants.TR_USAGE_RETENTION_LABEL,\n help_text=constants.TR_USAGE_RETENTION_HELP,\n read_only=fields_ro.get('usage_retention', True),\n )\n usage_limit_enabled = serializers.BooleanField(\n label=constants.TR_USAGE_LIMIT_EN_LABEL,\n help_text=constants.TR_USAGE_LIMIT_EN_HELP,\n read_only=fields_ro.get('usage_limit_enabled', True),\n )\n usage_limit_units = serializers.ChoiceField(\n label=constants.TR_USAGE_LIMIT_UNITS_LABEL,\n help_text=constants.TR_USAGE_LIMIT_UNITS_HELP,\n choices=constants.USAGE_LIMIT_UNITS_CHOICES,\n read_only=fields_ro.get('usage_limit_units', True),\n )\n usage_limit = serializers.IntegerField(\n label=constants.TR_USAGE_LIMIT_LABEL,\n help_text=constants.TR_USAGE_LIMIT_HELP,\n read_only=fields_ro.get('usage_limit', True),\n min_value=0,\n )\n usage_current = serializers.ReadOnlyField()\n usage_reset = serializers.ReadOnlyField()\n\n # TasksMixin fields.\n tasks_retention = serializers.DurationField(\n label=constants.TR_TASKS_RETENTION_LABEL,\n help_text=constants.TR_TASKS_RETENTION_HELP,\n read_only=fields_ro.get('tasks_retention', True),\n )\n\n # LogsMixin fields.\n logs_enabled = serializers.BooleanField(\n label=constants.TR_LOGS_EN_LABEL,\n help_text=constants.TR_LOGS_EN_HELP,\n read_only=fields_ro.get('logs_enabled', True),\n )\n logs_interval = serializers.DurationField(\n label=constants.TR_LOGS_INTERVAL_LABEL,\n help_text=constants.TR_LOGS_INTERVAL_HELP,\n read_only=fields_ro.get('logs_interval', True),\n )\n logs_retention = serializers.DurationField(\n label=constants.TR_LOGS_RETENTION_LABEL,\n help_text=constants.TR_LOGS_RETENTION_HELP,\n read_only=fields_ro.get('logs_retention', True),\n )\n logs_collected = serializers.ReadOnlyField()\n\n # ParametersMixin fields.\n params_enabled = serializers.BooleanField(\n label=constants.TR_PARAMS_EN_LABEL,\n help_text=constants.TR_PARAMS_EN_HELP,\n read_only=fields_ro.get('params_enabled', True),\n )\n params_interval = serializers.DurationField(\n label=constants.TR_PARAMS_INTERVAL_LABEL,\n help_text=constants.TR_PARAMS_INTERVAL_HELP,\n read_only=fields_ro.get('params_interval', True),\n )\n params_retention = serializers.DurationField(\n label=constants.TR_PARAMS_RETENTION_LABEL,\n help_text=constants.TR_PARAMS_RETENTION_HELP,\n read_only=fields_ro.get('params_retention', True),\n )\n params_collected = serializers.ReadOnlyField()\n\n class Meta:\n model = models.Device\n validators = [\n validators.UniqueTogetherValidator(\n queryset=models.Device.objects.all(),\n fields=('device_name', 'group',)\n )\n ]\n fields = (\n 'id',\n 'created',\n 'updated',\n 'status_code',\n 'usage_current',\n 'usage_reset',\n 'logs_collected',\n 'params_collected',\n )\n fields += self.FIELDS\n # If this is an object view and have type class, get extra\n # fields using class method. Add related fields list to save\n # type specific fields.\n if (device and type_cls):\n fields += type_cls.get_serializer_fields()\n related_fields = [device.device_type]\n else:\n related_fields = []\n\n def update(self, instance, validated_data):\n # Save device type fields.\n for related_obj_name in self.Meta.related_fields:\n data = validated_data.pop(related_obj_name)\n related_instance = getattr(instance, related_obj_name)\n\n for attr_name, value in data.items():\n setattr(related_instance, attr_name, value)\n related_instance.save()\n\n return super(DeviceSerializer, self).update(\n instance, validated_data\n )\n\n # If this is an object view and have type class, extend serialiser\n # class using class method.\n if (device and type_cls):\n return type_cls.get_serializer_class(\n DeviceSerializer, device.device_type, user\n )\n else:\n return DeviceSerializer\n\n def get_serializer(self, user, pk=None):\n '''\n Returns the device serializer class for this user and device.\n '''\n # Devices, groups this user is allowed to see.\n devices = models.Device.objects.get_user_devices(user)\n groups = Group.objects.get_user_groups(user)\n\n # No primary key argument, this is a list view.\n if (not pk):\n fields_ro = self.get_obj_group_fields(user)\n return self._construct(user, devices, groups, **fields_ro)\n\n # Else is a device object view.\n else:\n device = devices.get(pk=pk)\n fields_ro = self.get_obj_group_fields(user, device.group)\n return self._construct(user, devices, groups, device, **fields_ro)\n","sub_path":"ionet/src/ionet_server/ionet_server/dev/serializers/device.py","file_name":"device.py","file_ext":"py","file_size_in_byte":10534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"64020189","text":"#!/usr/bin/env python\r\n# -*- coding: utf8 -*-\r\n\r\nfrom BeautifulSoup import BeautifulSoup\r\nimport urllib, urllib2, argparse, textwrap\r\nimport lib.TextColors as TextColors\r\n\r\n__author__ = \"Arlo Emerson \"\r\n__version__ = \"1.0\"\r\n__date__ = \"8/8/2018\"\r\n\r\n\"\"\"\r\n\tSCRIPT: \r\n\t\"download_links.py\"\r\n\r\n\tSYNOPSIS:\r\n\tThis is an http workaround for sometimes slow FTP transfers of rendered files (if an alternative web/http link is provided). Basically we're just getting a list of images on a page and downloading them one by one.\r\n\r\n\tUSAGE:\r\n\t• Place this script and the lib folder in your render/output folder.\r\n\t• run python download_links.py -h for documentation and usage. \r\n\"\"\"\r\n\r\nclass Downloader():\r\n\r\n\tdef __init__(self):\r\n\t\tprint(\"Running \" + TextColors.HEADERLEFT3 + TextColors.INVERTED + self.__class__.__name__ + \" \" + TextColors.ENDC)\r\n\t\t\r\n\t\tself._url = \"\"\r\n\t\tself._listIndex = 1 # set to 1 to start at the 2nd link, this avoids trying to download the up dir link\r\n\r\n\t\thelpMessage = 'This script will attempt to download all links on a given page.'\r\n\r\n\t\tparser = argparse.ArgumentParser(description=helpMessage, \r\n\t\t\tepilog=textwrap.dedent('''Pass in a url containing a list of links to download. e.g. python download_links.py -url \"url-in-quotes\" -i 2'''), formatter_class=argparse.RawTextHelpFormatter)\r\n\t\tparser.add_argument('-u', '--url', dest='url', action=\"store\", required=True, help=\"The url containing the list of links.\")\r\n\t\tparser.add_argument('-i', '--index', dest='index', action=\"store\", required=False, help=\"The index of the list to start downloading at.\")\r\n\r\n\t\targs = parser.parse_args()\r\n\t\t\r\n\t\tif args.url:\r\n\t\t\tself._url = args.url\r\n\t\t\tprint(\"URL is \" + args.url)\r\n\t\telse:\r\n\t\t\tprint(\"You must pass in a URL.\")\r\n\r\n\t\tif args.index:\r\n\t\t\tself._listIndex = int(args.index)\r\n\r\n\tdef run(self):\t\r\n\t\treq = urllib2.Request(self._url)\r\n\r\n\t\tresponse = urllib2.urlopen(req)\r\n\t\tresponseHTML = response.read()\r\n\r\n\t\t# print(responseHTML)\r\n\t\tsoup = BeautifulSoup(responseHTML)\r\n\r\n\t\tlinks = []\r\n\t\tcounter = 0 # 0 will be the up link\r\n\t\tfor link in soup.findAll('a'):\r\n\t\t\tif counter >= self._listIndex:\r\n\t\t\t for foo in link:\r\n\t\t\t \tlinks.append(foo)\r\n\t\t\tcounter += 1\r\n\r\n\t\tfor link in links:\r\n\t\t\ttruncatedLink = link[ link.rfind(\"/\")+1: ]\r\n\t\t\tprint( truncatedLink )\r\n\t\t\tf = urllib.urlretrieve( self._url + link, truncatedLink )\r\n\r\nd = Downloader()\r\nd.run()","sub_path":"python/misc_util/download_links.py","file_name":"download_links.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"207680307","text":"def get_earthquake_domain_list(startTm, endTm, filename):\n\n import urllib.request\n import platform\n\n os = platform.system()\n url = \"http://www.weather.go.kr/weather/earthquake_volcano/domesticlist_download.jsp?startSize=999&endSize=999&pNo=1&startLat=999.0&endLat=999.0&startLon=999.0&endLon=999.0&lat=999.0&lon=999.0&dist=999.0&keyword=&startTm={}&endTm={}\".format(startTm, endTm)\n \n if os == \"Windows\":\n saveFile = \"d:/workspace/hello/scraping/results/{}\".format(filename)\n elif os == \"Darwin\":\n saveFile = \"./results/{}\".format(filename)\n \n mem = urllib.request.urlopen(url).read()\n with open(saveFile, mode=\"wb\") as file:\n file.write(mem)\n\n print(\"OK!\")\n\nget_earthquake_domain_list('2019-01-01', '2019-01-08', 'earthquake_domain_2019.html')","sub_path":"scraping/scraping_test_table_0002.py","file_name":"scraping_test_table_0002.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"439560312","text":"# GUI-Calculator.py\r\nfrom tkinter import *\r\nfrom tkinter import ttk, messagebox\r\nimport wikipedia\r\n#############CSV##############\r\nimport csv\r\nfrom datetime import datetime\r\ndef writetocsv(data, filename='data.csv'):\r\n with open(filename,'a',newline='',encoding='utf-8') as file:\r\n fw = csv.writer(file) # fw = file writer\r\n fw.writerow(data)\r\n\r\n#############GUI##############\r\nGUI = Tk()\r\nGUI.title('โปรแกรมจัดการ layout')\r\nGUI.iconbitmap('loong.ico')\r\n\r\nW = 1000\r\nH = 600\r\nMW = GUI.winfo_screenwidth() # Monitor Width\r\nMH = GUI.winfo_screenheight() # Monitor Height\r\nSX = (MW/2) - (W/2) # Start X\r\nSY = (MH/2) - (H/2) # Start Y\r\n#SY = SY - 50 # diff up\r\n\r\nprint(MW,MH,SX,SY)\r\nprint('{}x{}+{:.0f}+{:.0f}'.format(W,H,SX,SY))\r\nGUI.geometry('{}x{}+{:.0f}+{:.0f}'.format(W,H,SX,SY))\r\n\r\n############MENUBAR###############\r\nmenubar = Menu(GUI)\r\nGUI.config(menu=menubar)\r\n# -----------------------------------------\r\n# File Menu\r\nfilemenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='File',menu=filemenu)\r\n\r\ndef ExportDatabase():\r\n print('Export Database to CSV')\r\nfilemenu.add_command(label='Export',command=ExportDatabase)\r\nfilemenu.add_command(label='Exit',command=lambda: GUI.destroy())\r\n\r\n# -----------------------------------------\r\n# Member Menu\r\nmembermenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='Member',menu=membermenu)\r\n\r\n# -----------------------------------------\r\n# Help Menu\r\nimport webbrowser\r\n\r\nhelpmenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='Help',menu=helpmenu)\r\ncontact_url = 'https://uncle-engineer.com'\r\nhelpmenu.add_command(label='Contact US',command=lambda: webbrowser.open(contact_url))\r\n\r\ndef About():\r\n ABGUI = Toplevel()\r\n ABGUI.iconbitmap('loong.ico')\r\n W = 300\r\n H = 200\r\n MW = GUI.winfo_screenwidth() # Monitor Width\r\n MH = GUI.winfo_screenheight() # Monitor Height\r\n SX = (MW/2) - (W/2) # Start X\r\n SY = (MH/2) - (H/2) # Start Y\r\n ABGUI.geometry('{}x{}+{:.0f}+{:.0f}'.format(W,H,SX,SY))\r\n L = Label(ABGUI,text='โปรแกรมร้านกาแฟ',fg='green',font=('Angsana New',30)).pack()\r\n L = Label(ABGUI,text='พัฒนาโดย Uncle Engineer\\nhttps://uncle-engineer.com',font=('Angsana New',20)).pack()\r\n ABGUI.mainloop()\r\n\r\nhelpmenu.add_command(label='About',command=About)\r\n# -----------------------------------------\r\n#############TAB SETTING##############\r\nTab = ttk.Notebook(GUI)\r\nTab.pack(fill=BOTH,expand=1)\r\n\r\nT1 = Frame(Tab)\r\nT2 = Frame(Tab)\r\nT3 = Frame(Tab)\r\nT4 = Frame(Tab)\r\n\r\nicon_tab1 = PhotoImage(file='tab1.png')\r\nicon_tab2 = PhotoImage(file='tab2.png')\r\nicon_tab3 = PhotoImage(file='tab3.png')\r\nicon_tab4 = PhotoImage(file='tab4.png')\r\n\r\nTab.add(T1, text='กุ้ง',image=icon_tab1,compound='left')\r\nTab.add(T2, text='wiki',image=icon_tab2,compound='left')\r\nTab.add(T3, text='CAFE',image=icon_tab3,compound='left')\r\nTab.add(T4, text='Member',image=icon_tab4,compound='left')\r\n\r\n############TAB 1 - กุ้ง############\r\n\r\nL1 = Label(T1,text='กรอกจำนวนกุ้ง (กิโลกรัม)',font=('Angsana New',25))\r\nL1.pack()\r\n\r\nv_kilo = StringVar() #ตัวแปรพิเศษเอาไว้เก็บค่า\r\n\r\nE1 = ttk.Entry(T1, textvariable= v_kilo, width=10,justify='right',font=('impact',30))\r\nE1.pack(pady=20)\r\n\r\nE1.focus()\r\n\r\ndef Calc(event=None):\r\n print('กำลังคำนวณ...กรุณารอสักครู่')\r\n kilo = float(v_kilo.get()) # .get() ดึงข้อมูลจากตัวแปรที่เป็น StringVar\r\n print(kilo * 10)\r\n calc_result = kilo * 299\r\n date = datetime.now()\r\n year = date.year + 543\r\n stamp = date.strftime('{}-%m-%d %H:%M:%S'.format(year)) #Thai Year\r\n data = [stamp, 'กุ้ง', '{:,.2f}'.format(calc_result)]\r\n writetocsv(data)\r\n messagebox.showinfo('รวมราคาทั้งหมด','ลูกค้าต้องจ่ายตังค์ทั้งหมด: {:,.2f} บาท (กิโลกรัมละ 299 บาท)'.format(calc_result))\r\n\r\n\r\nB1 = ttk.Button(T1,text='คำนวณราคา',command=Calc)\r\nB1.pack(ipadx=40,ipady=30)\r\n\r\nE1.bind('',Calc) # ต้องใส่คำว่า event=None ไว้ในฟังชั่นด้วย\r\n\r\n############TAB 2 - Wiki ############\r\n\r\nFONT1 = ('Angsana New',25)\r\n\r\nL2 = Label(T2,text='ค้นหาข้อมูล wikipedia',font=('Angsana New',25))\r\nL2.pack()\r\n\r\nv_search = StringVar() # .get()=ดึงข้อมูล .set('hello') เซ็ตข้อความให้เป็นแบบนั้น\r\n\r\nE2 = ttk.Entry(T2, textvariable=v_search, font=FONT1)\r\nE2.pack(pady=10)\r\n\r\nwikipedia.set_lang('th') #ทำให้เป็นภาษาไทย\r\n\r\nv_link = StringVar()\r\n\r\ndef Search():\r\n try:\r\n search = v_search.get() #ดึงข้อความจากช่องกรอกมา\r\n # text = wikipedia.summary(search)\r\n text = wikipedia.page(search)\r\n print(text)\r\n v_result.set(text.content[:1000])\r\n print('LINK:',text.url)\r\n v_link.set(text.url)\r\n except:\r\n v_result.set('ไม่มีข้อมูล กรุณาค้นหาใหม่')\r\n\r\n # เพิ่มฟังชั่นสำหรับเด้งไปอ่านบทความฉบับเต็มในเว็บบราวเซอร์\r\n\r\nB2 = ttk.Button(T2,text='Search',image=icon_tab2,compound='left',command=Search)\r\nB2.pack()\r\n\r\nimport webbrowser\r\n\r\ndef readmore():\r\n webbrowser.open(v_link.get())\r\n\r\nB3 = ttk.Button(T2,text='อ่านต่อ',command=readmore)\r\nB3.place(x=800,y=50)\r\n\r\nv_result = StringVar()\r\nv_result.set('--------Result--------')\r\nresult = Label(T2,textvariable=v_result,wraplength=550, font=(None,15))\r\nresult.pack()\r\n\r\n############TAB 3 - Coffee ############\r\n\r\nBfont = ttk.Style()\r\nBfont.configure('TButton',font=('Angsana New',15))\r\n\r\nCF1 = Frame(T3)\r\nCF1.place(x=50,y=100)\r\n\r\n# ROW0\r\n# header = ['No.', 'title', 'quantity','price','total']\r\n\r\nallmenu = {}\r\n\r\nproduct = {'latte':{'name':'ลาเต้','price':30},\r\n 'cappuccino':{'name':'คาปูชิโน','price':35},\r\n 'espresso':{'name':'เอสเปรสโซ่','price':40},\r\n 'greentea':{'name':'ชาเขียว','price':20},\r\n 'icetea':{'name':'ชาเย็น','price':15},\r\n 'hottea':{'name':'ชาร้อน','price':10},}\r\n\r\ndef UpdateTable():\r\n table.delete(*table.get_children()) # แคลียร์ข้อมูลเก่าในตาราง\r\n for i,m in enumerate(allmenu.values(),start=1):\r\n # m = ['ลาเต้', 30, 1, 30]\r\n table.insert('','end',value=[ i ,m[0],m[1],m[2],m[3] ] )\r\n\r\n\r\ndef AddMenu(name='latte'):\r\n # name = 'latte'\r\n if name not in allmenu:\r\n allmenu[name] = [product[name]['name'],product[name]['price'],1,product[name]['price']]\r\n \r\n else:\r\n # {'latte': ['ลาเต้', 30, 1, 30]}\r\n quan = allmenu[name][2] + 1\r\n total = quan * product[name]['price']\r\n allmenu[name] = [product[name]['name'],product[name]['price'], quan ,total]\r\n print(allmenu)\r\n # ยอดรวม\r\n count = sum([ m[3] for m in allmenu.values()])\r\n v_total.set('{:,.2f}'.format(count))\r\n UpdateTable()\r\n\r\n\r\n\r\nB = ttk.Button(CF1,text='ลาเต้',image=icon_tab3,compound='top',command=lambda m='latte': AddMenu(m))\r\nB.grid(row=0,column=0,ipadx=20,ipady=10)\r\nB = ttk.Button(CF1,text='คาปูชิโน',image=icon_tab3,compound='top',command=lambda m='cappuccino': AddMenu(m))\r\nB.grid(row=0,column=1,ipadx=20,ipady=10)\r\nB = ttk.Button(CF1,text='เอสเปรสโซ่',image=icon_tab3,compound='top',command=lambda m='espresso': AddMenu(m))\r\nB.grid(row=0,column=2,ipadx=20,ipady=10)\r\n\r\n# ROW1\r\nB = ttk.Button(CF1,text='ชาเขียว',image=icon_tab3,compound='top',command=lambda m='greentea': AddMenu(m))\r\nB.grid(row=1,column=0,ipadx=20,ipady=10)\r\nB = ttk.Button(CF1,text='ชาเย็น',image=icon_tab3,compound='top',command=lambda m='icetea': AddMenu(m))\r\nB.grid(row=1,column=1,ipadx=20,ipady=10)\r\nB = ttk.Button(CF1,text='ชาร้อน',image=icon_tab3,compound='top',command=lambda m='hottea': AddMenu(m))\r\nB.grid(row=1,column=2,ipadx=20,ipady=10)\r\n\r\n\r\n\r\n\r\n######TABLE#######\r\nCF2 = Frame(T3)\r\nCF2.place(x=500,y=100)\r\n\r\nheader = ['No.', 'title', 'price','quantity','total']\r\nhwidth = [50,200,100,100,100]\r\n\r\ntable = ttk.Treeview(CF2,columns=header, show='headings',height=15)\r\ntable.pack()\r\n\r\nfor hd,hw in zip(header,hwidth):\r\n table.column(hd,width=hw)\r\n table.heading(hd,text=hd)\r\n\r\n# for hd in header:\r\n# table.heading(hd,text=hd)\r\n\r\n\r\nL = Label(T3,text='Total:', font=(None,15)).place(x=500,y=450)\r\n\r\nv_total = StringVar()\r\nv_total.set('0.0')\r\n\r\nLT = Label(T3,textvariable=v_total, font=(None,15))\r\nLT.place(x=600,y=450)\r\n\r\ndef Reset():\r\n global allmenu\r\n allmenu = {}\r\n table.delete(*table.get_children())\r\n v_total.set('0.0')\r\n trstamp = datetime.now().strftime('%y%m%d%H%M%S') #GEN Transaction\r\n v_transaction.set(trstamp)\r\n\r\nB = ttk.Button(T3,text='Clear',command=Reset).place(x=600,y=500)\r\n\r\n# Transaction ID\r\nv_transaction = StringVar()\r\ntrstamp = datetime.now().strftime('%y%m%d%H%M%S') #GEN Transaction\r\nv_transaction.set(trstamp)\r\nLTR = Label(T3,textvariable=v_transaction,font=(None,10)).place(x=950,y=70)\r\n\r\n\r\n# Save Button\r\nFB = Frame(T3)\r\nFB.place(x=890,y=450)\r\n\r\ndef AddTransaction():\r\n # writetocsv('transaction.csv')\r\n stamp = datetime.now().strftime('%Y-%m-%d %H:%M:%S')\r\n transaction = v_transaction.get()\r\n print(transaction, stamp, allmenu.values())\r\n for m in allmenu.values():\r\n # before: m = ['คาปูชิโน', 35, 1, 35]\r\n # after: m = ['12341234', '2022-02-17 21:04:19', 'คาปูชิโน', 35, 1, 35]\r\n m.insert(0,transaction)\r\n m.insert(1,stamp)\r\n writetocsv(m,'transaction.csv')\r\n Reset() #clear data\r\n\r\n\r\nB = ttk.Button(FB,text='บันทึก',command=AddTransaction)\r\nB.pack(ipadx=30,ipady=20)\r\n\r\n# History New Windows\r\n\r\ndef HistoryWindow(event):\r\n HIS = Toplevel() # คล้ายกับ GUI = Tk()\r\n HIS.geometry('750x500')\r\n\r\n L = Label(HIS,text='ประวัติการสั่งซื้อ', font=(None,15)).pack()\r\n\r\n # History\r\n header = ['ts-id','datetime', 'title', 'price','quantity','total']\r\n hwidth = [100,100,200,100,100,100]\r\n\r\n table_history = ttk.Treeview(HIS,columns=header, show='headings',height=15)\r\n table_history.pack()\r\n\r\n for hd,hw in zip(header,hwidth):\r\n table_history.column(hd,width=hw)\r\n table_history.heading(hd,text=hd)\r\n\r\n # Update from CSV\r\n with open('transaction.csv',newline='',encoding='utf-8') as file:\r\n fr = csv.reader(file) # file reader\r\n for row in fr:\r\n table_history.insert('',0,value=row)\r\n\r\n HIS.mainloop()\r\n\r\nGUI.bind('',HistoryWindow)\r\n\r\n#################TAB 4 Member#####################\r\ndef ET3(GUI,text,font=('Angsana New',20)):\r\n v_strvar = StringVar()\r\n T = Label(GUI,text=text,font=(None,15)).pack()\r\n E = ttk.Entry(GUI,textvariable=v_strvar,font=font)\r\n return (E,T,v_strvar)\r\n\r\n\r\nF41 = Frame(T4) # F41 = Frame in Tab4 , No.1\r\nF41.place(x=50,y=50)\r\n\r\nv_membercode = StringVar()\r\nv_membercode.set('M-1001')\r\nL = Label(T4,text='รหัสสมาชิก:',font=(None,13)).place(x=50,y=20)\r\nLCode = Label(T4,textvariable=v_membercode,font=(None,13)).place(x=150,y=20)\r\n\r\nE41,L,v_fullname = ET3(F41,'ชื่อ-สกุล') \r\nE41.pack()\r\n\r\nE42,L,v_tel = ET3(F41,'เบอร์โทร')\r\nE42.pack() \r\n\r\nE43,L,v_usertype = ET3(F41,'ประเภทสมาชิก')\r\nE43.pack()\r\nv_usertype.set('general')\r\n\r\nE44,L,v_point = ET3(F41,'คะแนนสะสม')\r\nE44.pack()\r\nv_point.set('0') # ใส่ค่า default ของ point\r\n\r\n# E43.bind('', lambda x: print(v_usertype.get()))\r\n\r\ndef SaveMember():\r\n code = v_membercode.get()\r\n fullname = v_fullname.get()\r\n tel = v_tel.get()\r\n usertype = v_usertype.get()\r\n point = v_point.get()\r\n print(fullname, tel, usertype, point)\r\n writetocsv([code, fullname, tel, usertype, point],'member.csv') #บันทึกสมาชิกใหม่\r\n table_member.insert('',0,value=[code, fullname, tel, usertype, point])\r\n UpdateTable_Member()\r\n\r\n v_fullname.set('')\r\n v_tel.set('')\r\n v_usertype.set('general')\r\n v_point.set('0')\r\n\r\n\r\nBSave = ttk.Button(F41,text='บันทึก',command=SaveMember)\r\nBSave.pack()\r\n\r\ndef EditMember():\r\n code = v_membercode.get()\r\n allmember[code][1] = v_fullname.get()\r\n allmember[code][2] = v_tel.get()\r\n allmember[code][3] = v_usertype.get()\r\n allmember[code][4] = v_point.get()\r\n UpdateCSV(list(allmember.values()),'member.csv')\r\n UpdateTable_Member()\r\n\r\n BEdit.state(['disabled']) # ปิดปุ่มแก้\r\n BSave.state(['!disabled']) # เปิดปุ่มบันทึก\r\n # set default\r\n v_fullname.set('')\r\n v_tel.set('')\r\n v_usertype.set('general')\r\n v_point.set('0')\r\n\r\nBEdit = ttk.Button(F41,text='แก้ไข',command=EditMember)\r\nBEdit.pack()\r\n\r\n\r\ndef NewMember():\r\n UpdateTable_Member()\r\n BEdit.state(['disabled']) # ปิดปุ่มแก้\r\n BSave.state(['!disabled']) # เปิดปุ่มบันทึก\r\n # set default\r\n v_fullname.set('')\r\n v_tel.set('')\r\n v_usertype.set('general')\r\n v_point.set('0')\r\n\r\n\r\nBNew = ttk.Button(F41,text='New',command=NewMember)\r\nBNew.pack()\r\n\r\n#########ตารางโชว์สมาชิก###########\r\nF42 = Frame(T4)\r\nF42.place(x=500,y=100)\r\n\r\nheader = ['Code', 'ชื่อ-สกุล', 'เบอร์โทร','ประเภทสมาชิก','คะแนนสะสม']\r\nhwidth = [50,200,100,100,100]\r\n\r\ntable_member = ttk.Treeview(F42,columns=header, show='headings',height=15)\r\ntable_member.pack()\r\n\r\nfor hd,hw in zip(header,hwidth):\r\n table_member.column(hd,width=hw)\r\n table_member.heading(hd,text=hd)\r\n\r\n###########################################\r\ndef UpdateCSV(data, filename='data.csv'):\r\n # data = [[a,b],[a,b]]\r\n with open(filename,'w',newline='',encoding='utf-8') as file:\r\n fw = csv.writer(file) # fw = file writer\r\n fw.writerows(data) # writerows = replace with list\r\n\r\n\r\n# Delete ข้อมูลในตารางที่เลือก\r\ndef DeleteMember(event=None):\r\n select = table_member.selection() #เลือก item \r\n print(select)\r\n if len(select) != 0:\r\n data = table_member.item(select)['values']\r\n print(data)\r\n del allmember[data[0]]\r\n UpdateCSV(list(allmember.values()),'member.csv')\r\n UpdateTable_Member()\r\n else:\r\n messagebox.showwarning('ไม่ได้เลือกรายการ','กรุณาเลือกรายการก่อนลบข้อมูล')\r\n\r\ntable_member.bind('',DeleteMember)\r\n\r\n\r\n# Update ข้อมูลสมาชิก\r\ndef UpdateMemberInfo(event=None):\r\n\r\n select = table_member.selection() #เลือก item \r\n if len(select) != 0:\r\n code = table_member.item(select)['values'][0]\r\n print(allmember[code])\r\n memberinfo = allmember[code]\r\n\r\n v_membercode.set(memberinfo[0])\r\n v_fullname.set(memberinfo[1])\r\n v_tel.set(memberinfo[2])\r\n v_usertype.set(memberinfo[3])\r\n v_point.set(memberinfo[4])\r\n\r\n BEdit.state(['!disabled']) # เปิดปุ่มแก้\r\n BSave.state(['disabled']) # ปิดปุ่มบันทึก\r\n else:\r\n messagebox.showwarning('ไม่ได้เลือกรายการ','กรุณาเลือกรายการก่อนแก้ไขข้อมูล')\r\n\r\ntable_member.bind('',UpdateMemberInfo)\r\n\r\n# Update Table\r\nlast_member = ''\r\nallmember = {}\r\n\r\ndef UpdateTable_Member():\r\n global last_member\r\n with open('member.csv',newline='',encoding='utf-8') as file:\r\n fr = csv.reader(file) # file reader\r\n table_member.delete(*table_member.get_children()) #clear table\r\n for row in fr:\r\n table_member.insert('',0,value=row)\r\n code = row[0] # ดึงรหัสมา\r\n allmember[code] = row\r\n \r\n print('Last ROW:',row)\r\n last_member = row[0]\r\n # M-1001\r\n # ['M',1001+1]\r\n next_member = int(last_member.split('-')[1]) + 1\r\n v_membercode.set('M-{}'.format(next_member))\r\n print(allmember)\r\n\r\n# POP UP Menu\r\nmember_rcmenu = Menu(GUI,tearoff=0) # rcmenu = right click menu\r\ntable_member.bind('',lambda event: member_rcmenu.post( event.x_root , event.y_root) )\r\nmember_rcmenu.add_command(label='Delete',command=DeleteMember)\r\nmember_rcmenu.add_command(label='Update',command=UpdateMemberInfo)\r\n\r\ndef SearchName():\r\n select = table_member.selection()\r\n name = table_member.item(select)['values'][1]\r\n print(name)\r\n url = 'https://www.google.com/search?q={}'.format(name)\r\n webbrowser.open(url)\r\n\r\nmember_rcmenu.add_command(label='Search Name',command=SearchName)\r\n\r\ndef SearchBCC():\r\n select = table_member.selection()\r\n name = table_member.item(select)['values'][1]\r\n print(name)\r\n url = 'https://www.bbc.co.uk/search?q={}'.format(name)\r\n webbrowser.open(url)\r\n\r\nmember_rcmenu.add_command(label='Search BCC',command=SearchBCC)\r\n# https://www.bbc.co.uk/search?q=putin\r\n\r\n\r\nBEdit.state(['disabled'])\r\nUpdateTable_Member()\r\nGUI.mainloop()","sub_path":"GUI-Member-menu.py","file_name":"GUI-Member-menu.py","file_ext":"py","file_size_in_byte":17690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"106336794","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# OpenERP, Open Source Management Solution\n# Copyright (C) 2004-2010 Tiny SPRL ().\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom datetime import datetime, timedelta\nfrom openerp.osv import fields, osv\nfrom openerp.tools.translate import _\nfrom openerp import SUPERUSER_ID\n\nclass event_type(osv.osv):\n \"\"\" Event Type \"\"\"\n _name = 'event.type'\n _description = __doc__\n _columns = {\n 'name': fields.char('Event Type', size=64, required=True),\n 'default_reply_to': fields.char('Default Reply-To', size=64,help=\"The email address of the organizer which is put in the 'Reply-To' of all emails sent automatically at event or registrations confirmation. You can also put your email address of your mail gateway if you use one.\" ),\n 'default_email_event': fields.many2one('email.template','Event Confirmation Email', help=\"It will select this default confirmation event mail value when you choose this event\"),\n 'default_email_registration': fields.many2one('email.template','Registration Confirmation Email', help=\"It will select this default confirmation registration mail value when you choose this event\"),\n 'default_registration_min': fields.integer('Default Minimum Registration', help=\"It will select this default minimum value when you choose this event\"),\n 'default_registration_max': fields.integer('Default Maximum Registration', help=\"It will select this default maximum value when you choose this event\"),\n }\n _defaults = {\n 'default_registration_min': 0,\n 'default_registration_max': 0,\n }\n\n\nclass event_event(osv.osv):\n \"\"\"Event\"\"\"\n _name = 'event.event'\n _description = __doc__\n _order = 'date_begin'\n _inherit = ['mail.thread', 'ir.needaction_mixin']\n\n def name_get(self, cr, uid, ids, context=None):\n if not ids:\n return []\n\n if isinstance(ids, (long, int)):\n ids = [ids]\n\n res = []\n for record in self.browse(cr, uid, ids, context=context):\n date = record.date_begin.split(\" \")[0]\n date_end = record.date_end.split(\" \")[0]\n if date != date_end:\n date += ' - ' + date_end\n display_name = record.name + ' (' + date + ')'\n res.append((record['id'], display_name))\n return res\n\n def copy(self, cr, uid, id, default=None, context=None):\n \"\"\" Reset the state and the registrations while copying an event\n \"\"\"\n if not default:\n default = {}\n default.update({\n 'state': 'draft',\n 'registration_ids': False,\n })\n return super(event_event, self).copy(cr, uid, id, default=default, context=context)\n\n def button_draft(self, cr, uid, ids, context=None):\n return self.write(cr, uid, ids, {'state': 'draft'}, context=context)\n\n def button_cancel(self, cr, uid, ids, context=None):\n registration = self.pool.get('event.registration')\n reg_ids = registration.search(cr, uid, [('event_id','in',ids)], context=context)\n for event_reg in registration.browse(cr,uid,reg_ids,context=context):\n if event_reg.state == 'done':\n raise osv.except_osv(_('Error!'),_(\"You have already set a registration for this event as 'Attended'. Please reset it to draft if you want to cancel this event.\") )\n registration.write(cr, uid, reg_ids, {'state': 'cancel'}, context=context)\n return self.write(cr, uid, ids, {'state': 'cancel'}, context=context)\n\n def button_done(self, cr, uid, ids, context=None):\n return self.write(cr, uid, ids, {'state': 'done'}, context=context)\n\n def check_registration_limits(self, cr, uid, ids, context=None):\n for self.event in self.browse(cr, uid, ids, context=context):\n total_confirmed = self.event.register_current\n if total_confirmed < self.event.register_min or total_confirmed > self.event.register_max and self.event.register_max!=0:\n raise osv.except_osv(_('Error!'),_(\"The total of confirmed registration for the event '%s' does not meet the expected minimum/maximum. Please reconsider those limits before going further.\") % (self.event.name))\n\n def check_registration_limits_before(self, cr, uid, ids, no_of_registration, context=None):\n for event in self.browse(cr, uid, ids, context=context):\n available_seats = event.register_avail\n if available_seats and no_of_registration > available_seats:\n raise osv.except_osv(_('Warning!'),_(\"Only %d Seats are Available!\") % (available_seats))\n elif available_seats == 0:\n raise osv.except_osv(_('Warning!'),_(\"No Tickets Available!\"))\n\n def confirm_event(self, cr, uid, ids, context=None):\n register_pool = self.pool.get('event.registration')\n if self.event.email_confirmation_id:\n #send reminder that will confirm the event for all the people that were already confirmed\n reg_ids = register_pool.search(cr, uid, [\n ('event_id', '=', self.event.id),\n ('state', 'not in', ['draft', 'cancel'])], context=context)\n register_pool.mail_user_confirm(cr, uid, reg_ids)\n return self.write(cr, uid, ids, {'state': 'confirm'}, context=context)\n\n def button_confirm(self, cr, uid, ids, context=None):\n \"\"\" Confirm Event and send confirmation email to all register peoples\n \"\"\"\n if isinstance(ids, (int, long)):\n ids = [ids]\n self.check_registration_limits(cr, uid, ids, context=context)\n return self.confirm_event(cr, uid, ids, context=context)\n\n def _get_register(self, cr, uid, ids, fields, args, context=None):\n \"\"\"Get Confirm or uncofirm register value.\n @param ids: List of Event registration type's id\n @param fields: List of function fields(register_current and register_prospect).\n @param context: A standard dictionary for contextual values\n @return: Dictionary of function fields value.\n \"\"\"\n res = {}\n for event in self.browse(cr, uid, ids, context=context):\n res[event.id] = {}\n reg_open = reg_done = reg_draft =0\n for registration in event.registration_ids:\n if registration.state == 'open':\n reg_open += registration.nb_register\n elif registration.state == 'done':\n reg_done += registration.nb_register\n elif registration.state == 'draft':\n reg_draft += registration.nb_register\n for field in fields:\n number = 0\n if field == 'register_current':\n number = reg_open\n elif field == 'register_attended':\n number = reg_done\n elif field == 'register_prospect':\n number = reg_draft\n elif field == 'register_avail':\n #the number of ticket is unlimited if the event.register_max field is not set.\n #In that cas we arbitrary set it to 9999, it is used in the kanban view to special case the display of the 'subscribe' button\n number = event.register_max - reg_open if event.register_max != 0 else 9999\n res[event.id][field] = number\n return res\n\n def _subscribe_fnc(self, cr, uid, ids, fields, args, context=None):\n \"\"\"This functional fields compute if the current user (uid) is already subscribed or not to the event passed in parameter (ids)\n \"\"\"\n register_pool = self.pool.get('event.registration')\n res = {}\n for event in self.browse(cr, uid, ids, context=context):\n res[event.id] = False\n curr_reg_id = register_pool.search(cr, uid, [('user_id', '=', uid), ('event_id', '=' ,event.id)])\n if curr_reg_id:\n for reg in register_pool.browse(cr, uid, curr_reg_id, context=context):\n if reg.state in ('open','done'):\n res[event.id]= True\n continue\n return res\n\n def _get_visibility_selection(self, cr, uid, context=None):\n return [('public', 'All Users'),\n ('employees', 'Employees Only')]\n # Lambda indirection method to avoid passing a copy of the overridable method when declaring the field\n _visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)\n\n _columns = {\n 'name': fields.char('Name', size=64, required=True, translate=True, readonly=False, states={'done': [('readonly', True)]}),\n 'user_id': fields.many2one('res.users', 'Responsible User', readonly=False, states={'done': [('readonly', True)]}),\n 'type': fields.many2one('event.type', 'Type of Event', readonly=False, states={'done': [('readonly', True)]}),\n 'register_max': fields.integer('Maximum Registrations', help=\"You can for each event define a maximum registration level. If you have too much registrations you are not able to confirm your event. (put 0 to ignore this rule )\", readonly=True, states={'draft': [('readonly', False)]}),\n 'register_min': fields.integer('Minimum Registrations', help=\"You can for each event define a minimum registration level. If you do not enough registrations you are not able to confirm your event. (put 0 to ignore this rule )\", readonly=True, states={'draft': [('readonly', False)]}),\n 'register_current': fields.function(_get_register, string='Confirmed Registrations', multi='register_numbers'),\n 'register_avail': fields.function(_get_register, string='Available Registrations', multi='register_numbers',type='integer'),\n 'register_prospect': fields.function(_get_register, string='Unconfirmed Registrations', multi='register_numbers'),\n 'register_attended': fields.function(_get_register, string='# of Participations', multi='register_numbers'),\n 'registration_ids': fields.one2many('event.registration', 'event_id', 'Registrations', readonly=False, states={'done': [('readonly', True)]}),\n 'date_begin': fields.datetime('Start Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),\n 'date_end': fields.datetime('End Date', required=True, readonly=True, states={'draft': [('readonly', False)]}),\n 'state': fields.selection([\n ('draft', 'Unconfirmed'),\n ('cancel', 'Cancelled'),\n ('confirm', 'Confirmed'),\n ('done', 'Done')],\n 'Status', readonly=True, required=True,\n track_visibility='onchange',\n help='If event is created, the status is \\'Draft\\'.If event is confirmed for the particular dates the status is set to \\'Confirmed\\'. If the event is over, the status is set to \\'Done\\'.If event is cancelled the status is set to \\'Cancelled\\'.'),\n 'email_registration_id' : fields.many2one('email.template','Registration Confirmation Email', help='This field contains the template of the mail that will be automatically sent each time a registration for this event is confirmed.'),\n 'email_confirmation_id' : fields.many2one('email.template','Event Confirmation Email', help=\"If you set an email template, each participant will receive this email announcing the confirmation of the event.\"),\n 'reply_to': fields.char('Reply-To Email', size=64, readonly=False, states={'done': [('readonly', True)]}, help=\"The email address of the organizer is likely to be put here, with the effect to be in the 'Reply-To' of the mails sent automatically at event or registrations confirmation. You can also put the email address of your mail gateway if you use one.\"),\n 'main_speaker_id': fields.many2one('res.partner','Main Speaker', readonly=False, states={'done': [('readonly', True)]}, help=\"Speaker who will be giving speech at the event.\"),\n 'address_id': fields.many2one('res.partner','Location Address', readonly=False, states={'done': [('readonly', True)]}),\n 'street': fields.related('address_id','street',type='char',string='Street'),\n 'street2': fields.related('address_id','street2',type='char',string='Street2'),\n 'state_id': fields.related('address_id','state_id',type='many2one', relation=\"res.country.state\", string='State'),\n 'zip': fields.related('address_id','zip',type='char',string='zip'),\n 'city': fields.related('address_id','city',type='char',string='city'),\n 'speaker_confirmed': fields.boolean('Speaker Confirmed', readonly=False, states={'done': [('readonly', True)]}),\n 'country_id': fields.related('address_id', 'country_id',\n type='many2one', relation='res.country', string='Country', readonly=False, states={'done': [('readonly', True)]}),\n 'note': fields.text('Description', readonly=False, states={'done': [('readonly', True)]}),\n 'company_id': fields.many2one('res.company', 'Company', required=False, change_default=True, readonly=False, states={'done': [('readonly', True)]}),\n 'is_subscribed' : fields.function(_subscribe_fnc, type=\"boolean\", string='Subscribed'),\n 'visibility': fields.selection(_visibility_selection, 'Privacy / Visibility',\n select=True, required=True),\n }\n _defaults = {\n 'state': 'draft',\n 'company_id': lambda self,cr,uid,c: self.pool.get('res.company')._company_default_get(cr, uid, 'event.event', context=c),\n 'user_id': lambda obj, cr, uid, context: uid,\n 'visibility': 'employees',\n }\n\n def subscribe_to_event(self, cr, uid, ids, context=None):\n register_pool = self.pool.get('event.registration')\n user_pool = self.pool.get('res.users')\n num_of_seats = int(context.get('ticket', 1))\n self.check_registration_limits_before(cr, uid, ids, num_of_seats, context=context)\n user = user_pool.browse(cr, uid, uid, context=context)\n curr_reg_ids = register_pool.search(cr, uid, [('user_id', '=', user.id), ('event_id', '=' , ids[0])])\n #the subscription is done with SUPERUSER_ID because in case we share the kanban view, we want anyone to be able to subscribe\n if not curr_reg_ids:\n curr_reg_ids = [register_pool.create(cr, SUPERUSER_ID, {'event_id': ids[0] ,'email': user.email, 'name':user.name, 'user_id': user.id, 'nb_register': num_of_seats})]\n else:\n register_pool.write(cr, uid, curr_reg_ids, {'nb_register': num_of_seats}, context=context)\n return register_pool.confirm_registration(cr, SUPERUSER_ID, curr_reg_ids, context=context)\n\n def unsubscribe_to_event(self, cr, uid, ids, context=None):\n register_pool = self.pool.get('event.registration')\n #the unsubscription is done with SUPERUSER_ID because in case we share the kanban view, we want anyone to be able to unsubscribe\n curr_reg_ids = register_pool.search(cr, SUPERUSER_ID, [('user_id', '=', uid), ('event_id', '=', ids[0])])\n return register_pool.button_reg_cancel(cr, SUPERUSER_ID, curr_reg_ids, context=context)\n\n def _check_closing_date(self, cr, uid, ids, context=None):\n for event in self.browse(cr, uid, ids, context=context):\n if event.date_end < event.date_begin:\n return False\n return True\n\n _constraints = [\n (_check_closing_date, 'Error ! Closing Date cannot be set before Beginning Date.', ['date_end']),\n ]\n\n def onchange_event_type(self, cr, uid, ids, type_event, context=None):\n if type_event:\n type_info = self.pool.get('event.type').browse(cr,uid,type_event,context)\n dic ={\n 'reply_to': type_info.default_reply_to,\n 'email_registration_id': type_info.default_email_registration.id,\n 'email_confirmation_id': type_info.default_email_event.id,\n 'register_min': type_info.default_registration_min,\n 'register_max': type_info.default_registration_max,\n }\n return {'value': dic}\n\n def on_change_address_id(self, cr, uid, ids, address_id, context=None):\n values = {}\n if not address_id:\n return values\n address = self.pool.get('res.partner').browse(cr, uid, address_id, context=context)\n values.update({\n 'street' : address.street,\n 'street2' : address.street2,\n 'city' : address.city,\n 'country_id' : address.country_id and address.country_id.id or False,\n 'state_id' : address.state_id and address.state_id.id or False,\n 'zip' : address.zip,\n })\n return {'value' : values}\n\n def onchange_start_date(self, cr, uid, ids, date_begin=False, date_end=False, context=None):\n res = {'value':{}}\n if date_end:\n return res\n if date_begin and isinstance(date_begin, str):\n date_begin = datetime.strptime(date_begin, \"%Y-%m-%d %H:%M:%S\")\n date_end = date_begin + timedelta(hours=1)\n res['value'] = {'date_end': date_end.strftime(\"%Y-%m-%d %H:%M:%S\")}\n return res\n\n\nclass event_registration(osv.osv):\n \"\"\"Event Registration\"\"\"\n _name= 'event.registration'\n _description = __doc__\n _inherit = ['mail.thread', 'ir.needaction_mixin']\n _columns = {\n 'id': fields.integer('ID'),\n 'origin': fields.char('Source Document', size=124,readonly=True,help=\"Reference of the sales order which created the registration\"),\n 'nb_register': fields.integer('Number of Participants', required=True, readonly=True, states={'draft': [('readonly', False)]}),\n 'event_id': fields.many2one('event.event', 'Event', required=True, readonly=True, states={'draft': [('readonly', False)]}),\n 'partner_id': fields.many2one('res.partner', 'Partner', states={'done': [('readonly', True)]}),\n 'create_date': fields.datetime('Creation Date' , readonly=True),\n 'date_closed': fields.datetime('Attended Date', readonly=True),\n 'date_open': fields.datetime('Registration Date', readonly=True),\n 'reply_to': fields.related('event_id','reply_to',string='Reply-to Email', type='char', size=128, readonly=True,),\n 'log_ids': fields.one2many('mail.message', 'res_id', 'Logs', domain=[('model','=',_name)]),\n 'event_end_date': fields.related('event_id','date_end', type='datetime', string=\"Event End Date\", readonly=True),\n 'event_begin_date': fields.related('event_id', 'date_begin', type='datetime', string=\"Event Start Date\", readonly=True),\n 'user_id': fields.many2one('res.users', 'User', states={'done': [('readonly', True)]}),\n 'company_id': fields.related('event_id', 'company_id', type='many2one', relation='res.company', string='Company', store=True, readonly=True, states={'draft':[('readonly',False)]}),\n 'state': fields.selection([('draft', 'Unconfirmed'),\n ('cancel', 'Cancelled'),\n ('open', 'Confirmed'),\n ('done', 'Attended')], 'Status',\n track_visibility='onchange',\n size=16, readonly=True),\n 'email': fields.char('Email', size=64),\n 'phone': fields.char('Phone', size=64),\n 'name': fields.char('Name', size=128, select=True),\n }\n _defaults = {\n 'nb_register': 1,\n 'state': 'draft',\n }\n _order = 'name, create_date desc'\n\n def do_draft(self, cr, uid, ids, context=None):\n return self.write(cr, uid, ids, {'state': 'draft'}, context=context)\n\n def confirm_registration(self, cr, uid, ids, context=None):\n for reg in self.browse(cr, uid, ids, context=context or {}):\n self.pool.get('event.event').message_post(cr, uid, [reg.event_id.id], body=_('New registration confirmed: %s.') % (reg.name or '', ),subtype=\"event.mt_event_registration\", context=context)\n return self.write(cr, uid, ids, {'state': 'open'}, context=context)\n\n def registration_open(self, cr, uid, ids, context=None):\n \"\"\" Open Registration\n \"\"\"\n event_obj = self.pool.get('event.event')\n for register in self.browse(cr, uid, ids, context=context):\n event_id = register.event_id.id\n no_of_registration = register.nb_register\n event_obj.check_registration_limits_before(cr, uid, [event_id], no_of_registration, context=context)\n res = self.confirm_registration(cr, uid, ids, context=context)\n self.mail_user(cr, uid, ids, context=context)\n return res\n\n def button_reg_close(self, cr, uid, ids, context=None):\n \"\"\" Close Registration\n \"\"\"\n if context is None:\n context = {}\n today = fields.datetime.now()\n for registration in self.browse(cr, uid, ids, context=context):\n if today >= registration.event_id.date_begin:\n values = {'state': 'done', 'date_closed': today}\n self.write(cr, uid, ids, values)\n else:\n raise osv.except_osv(_('Error!'), _(\"You must wait for the starting day of the event to do this action.\"))\n return True\n\n def button_reg_cancel(self, cr, uid, ids, context=None, *args):\n return self.write(cr, uid, ids, {'state': 'cancel'})\n\n def mail_user(self, cr, uid, ids, context=None):\n \"\"\"\n Send email to user with email_template when registration is done\n \"\"\"\n for registration in self.browse(cr, uid, ids, context=context):\n if registration.event_id.state == 'confirm' and registration.event_id.email_confirmation_id.id:\n self.mail_user_confirm(cr, uid, ids, context=context)\n else:\n template_id = registration.event_id.email_registration_id.id\n if template_id:\n mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id,registration.id)\n return True\n\n def mail_user_confirm(self, cr, uid, ids, context=None):\n \"\"\"\n Send email to user when the event is confirmed\n \"\"\"\n for registration in self.browse(cr, uid, ids, context=context):\n template_id = registration.event_id.email_confirmation_id.id\n if template_id:\n mail_message = self.pool.get('email.template').send_mail(cr,uid,template_id,registration.id)\n return True\n\n def onchange_contact_id(self, cr, uid, ids, contact, partner, context=None):\n if not contact:\n return {}\n addr_obj = self.pool.get('res.partner')\n contact_id = addr_obj.browse(cr, uid, contact, context=context)\n return {'value': {\n 'email':contact_id.email,\n 'name':contact_id.name,\n 'phone':contact_id.phone,\n }}\n\n def onchange_partner_id(self, cr, uid, ids, part, context=None):\n res_obj = self.pool.get('res.partner')\n data = {}\n if not part:\n return {'value': data}\n addr = res_obj.address_get(cr, uid, [part]).get('default', False)\n if addr:\n d = self.onchange_contact_id(cr, uid, ids, addr, part, context)\n data.update(d['value'])\n return {'value': data}\n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"event/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":24204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"245551246","text":"from test_base import ToolboxTest\nimport pandas as pd\nimport numpy as np\n\nfrom toolbox.wbso import ingest_clockify, process_datapoints, easy_copyable_csv\n\n\nclass IngestClockifyTest(ToolboxTest):\n def test_ingest(self):\n df = ingest_clockify(self.test_data_dir / 'wbso' / 'demo.csv')\n\n self.assertTrue(set(df.columns) == {'date', 'project', 'hours'})\n self.assertGreater(df.shape[0], 1)\n\n\nclass ProcessDatapointsTest(ToolboxTest):\n def test_filter_meetings(self):\n df = pd.DataFrame(data={\n 'date': pd.to_datetime(['2019-01-01', '2019-01-02', '2019-01-03']),\n 'project': ['Load Forecasting', 'Meetings', 'Other non-WBSO'],\n 'hours': [1.0, 2.3, 5.3]\n })\n df = process_datapoints(df)\n expected = pd.DataFrame(data={\n 'date': pd.to_datetime(['2019-01-01']),\n 'hours': [np.ceil(1.0 * 0.8 * 4) / 4]\n }).set_index('date')\n\n pd.testing.assert_frame_equal(df, expected)\n\n def test_aggregate_days(self):\n df = pd.DataFrame(data={\n 'date': pd.to_datetime(['2019-01-01', '2019-01-02', '2019-01-02']),\n 'project': ['Load Forecasting', 'Baselines', 'Baselines'],\n 'hours': [1.0, 2.3, 5.3]\n })\n df = process_datapoints(df)\n\n expected = pd.DataFrame(data={\n 'date': pd.to_datetime(['2019-01-01', '2019-01-02']),\n 'hours': np.ceil(np.array([1.0, 7.6]) * 0.8 * 4) / 4\n }).set_index('date')\n\n pd.testing.assert_frame_equal(df, expected)\n\n def test_round_upper_90_percent(self):\n df = pd.DataFrame(data={\n 'date': pd.to_datetime(['2019-01-01', '2019-01-02', '2019-01-03']),\n 'project': ['baselines', 'baselines', 'baselines'],\n 'hours': [0.25, 0.26 / .8, .49 / .8]\n })\n df = process_datapoints(df)\n\n expected = pd.DataFrame(data={\n 'date': pd.to_datetime(['2019-01-01', '2019-01-02', '2019-01-03']),\n 'hours': [.25, .50, .50]\n }).set_index('date')\n\n pd.testing.assert_frame_equal(df, expected)\n\n\nclass EasyCopyableTest(ToolboxTest):\n def test_easy_copyable(self):\n df = pd.DataFrame(data={\n 'date': pd.to_datetime(['2019-01-01', '2019-02-02', '2019-03-01']),\n 'hours': [0.25, .5, .5]\n }).set_index('date')\n df = easy_copyable_csv(df)\n\n self.assertEqual(set(range(1, 4)), set(df.index.tolist()))\n self.assertEqual(set(range(1, 32)), set(df.columns))\n\n","sub_path":"src/unittest/python/toolbox_tests/wbso_tests.py","file_name":"wbso_tests.py","file_ext":"py","file_size_in_byte":2514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"376247226","text":"from dataclasses import dataclass\n\nfrom flask import jsonify\nimport state as st\n\ndef respond(kickup):\n if kickup is None:\n return jsonify( {\n 'response_type': 'in_channel',\n 'text': ':skull_and_crossbones: Sorry, this kickup is dead',\n })\n if kickup.state == st.CANCELLED:\n return jsonify( {\n 'response_type': 'in_channel',\n 'text': 'This pickup match was cancelled :crying_cat_face:',\n })\n else:\n return button_resp(kickup)\n\ndef button_resp(kickup):\n if kickup.state == st.OPEN:\n text = 'Join this pickup match!'\n elif kickup.state == st.RUNNING:\n text = 'Here is the pairing for this pickup match:'\n elif kickup.state == st.RESOLVED:\n text = 'Results are in:'\n else:\n text = '? ?'\n return jsonify(\n {\n 'text': text,\n 'response_type': 'in_channel',\n \"attachments\": [\n *att_players(kickup),\n *att_buttons(kickup),\n *result(kickup),\n *att_footer(kickup),\n ]\n }\n)\n\ndef att_footer(kickup):\n warnings = kickup.process_warnings()\n if not warnings:\n return []\n warn_lines = \"\\n\".join([f':warning: { w }' for w in warnings])\n return[{\n \"fallback\": \"Can't display this here :(\",\n \"callback_id\": f\"{ kickup.num } \",\n \"footer\": warn_lines,\n \"color\": \"warning\",\n }]\n\ndef att_players(kickup):\n if kickup.state == st.OPEN:\n return candidate_list(kickup)\n elif kickup.state == st.RUNNING or kickup.state == st.RESOLVED:\n return pairing(kickup)\n else:\n return []\n\n\ndef pairing(kickup):\n return [\n {\n \"text\": f\":goal_net:<@{ kickup.pairing.red_goal.slack_id }>\\n:athletic_shoe:<@{ kickup.pairing.red_strike.slack_id }>\",\n \"fallback\": \"Can't display this here :(\",\n \"callback_id\": f\"{ kickup.num }\",\n \"color\": \"#FF0000\",\n \"attachment_type\": \"default\",\n },\n {\n \"text\": f\" VS \",\n \"fallback\": \"Can't display this here :(\",\n \"callback_id\": f\"{ kickup.num }\",\n \"color\": \"#000000\",\n \"attachment_type\": \"default\",\n },\n {\n \"text\": f\":athletic_shoe:<@{ kickup.pairing.blue_strike.slack_id }>\\n:goal_net:<@{ kickup.pairing.blue_goal.slack_id }>\",\n \"fallback\": \"Can't display this here :(\",\n \"callback_id\": f\"{ kickup.num }\",\n \"color\": \"#0000FF\",\n \"attachment_type\": \"default\",\n },\n ]\n\ndef candidate_list(kickup):\n player_list = '\\n'.join([f'{ p.name }' for p in kickup.players])\n return [{\n \"text\": f\"Current players:\\n{ player_list }\",\n \"fallback\": \"Can't display this here :(\",\n \"callback_id\": f\"{ kickup.num }\",\n \"color\": \"#3AA3E3\",\n \"attachment_type\": \"default\",\n }]\n\ndef result(kickup):\n if kickup.state != st.RESOLVED:\n return []\n return [{\n \"text\": emoji_score(kickup),\n \"fallback\": \"Can't display this here :(\",\n \"callback_id\": f\"{ kickup.num }\",\n \"color\": \"#33CC33\",\n \"attachment_type\": \"default\",\n }]\n\n\n@dataclass\nclass EmojiWinConfig:\n name: str\n winner_emoji: str\n loser_emoji: str\n\n\ndef emoji_score(kickup):\n emoji_config = {\n 6: EmojiWinConfig('DESTROYED', ':godmode:', ':sob:'),\n 5: EmojiWinConfig('KNOCKOUT', ':punch:', ':face_with_head_bandage:'),\n 4: EmojiWinConfig('MERCY', ':muscle:', ':dizzy:'),\n 3: EmojiWinConfig('DOMINATED', ':stuck_out_tongue_closed_eyes:', ':astonished:'),\n 2: EmojiWinConfig('NICE GAME', ':star-struck:', ':unamused:'),\n 1: EmojiWinConfig('NICE GAME', ':star-struck:', ':unamused:'),\n }[abs(kickup.score_blue - kickup.score_red)]\n\n red_won = kickup.score_red > kickup.score_blue\n red_emoji = emoji_config.winner_emoji if red_won else emoji_config.loser_emoji\n blue_emoji = emoji_config.winner_emoji if not red_won else emoji_config.loser_emoji\n\n return f\"*{ emoji_config.name }*: {red_emoji} { kickup.score_red }:{ kickup.score_blue } {blue_emoji}\"\n\n\ndef att_buttons(kickup):\n if kickup.state == st.OPEN:\n return [{\n \"callback_id\": f\"{ kickup.num }\",\n \"fallback\": \"OMG\",\n \"actions\": [\n {\n \"name\": \"kickup\",\n \"text\": \":arrow_down: Join\",\n \"type\": \"button\",\n \"value\": \"join\",\n \"style\": \"primary\",\n },\n {\n \"name\": \"kickup\",\n \"text\": \":soccer: Start\",\n \"type\": \"button\",\n \"value\": \"start\"\n },\n {\n \"name\": \"kickup\",\n \"text\": \":no_entry_sign: Cancel\",\n \"type\": \"button\",\n \"value\": \"cancel\",\n \"style\": \"danger\",\n },\n {\n \"name\": \"kickup\",\n \"text\": \"DummyAdd\",\n \"type\": \"button\",\n \"value\": \"dummyadd\"\n },\n ]\n }]\n elif kickup.state == st.RUNNING:\n return [{\n \"callback_id\": f\"{ kickup.num }\",\n \"fallback\": \"OMG\",\n \"actions\": [\n {\n \"name\": \"score_red\",\n \"text\": \"Score Red\",\n \"type\": \"select\",\n \"options\": [{'text': str(i), 'value': str(i)} for i in range(7)],\n \"selected_options\": [ {\n \"text\": str(kickup.score_red),\n \"value\": str(kickup.score_red),\n }],\n },\n {\n \"name\": \"score_blue\",\n \"text\": \"Score Blue\",\n \"type\": \"select\",\n \"options\": [{'text': str(i), 'value': str(i)} for i in range(7)],\n \"selected_options\": [ {\n \"text\": str(kickup.score_blue),\n \"value\": str(kickup.score_blue),\n }],\n },\n {\n \"name\": \"kickup\",\n \"text\": \":heavy_check_mark: Resolve\",\n \"type\": \"button\",\n \"value\": \"resolve\"\n },\n {\n \"name\": \"kickup\",\n \"text\": \":no_entry_sign: Cancel\",\n \"type\": \"button\",\n \"value\": \"cancel\"\n },\n ]}]\n else:\n return []\n\ndef elo_leaderboard_resp(leaderboard):\n c1 = 3\n c2 = 15\n c3 = 2\n c4 = 5\n lines = []\n\n for p, e in enumerate(leaderboard['board']):\n pos = str(p + 1) + '.'\n name = e[\"name\"][:c2]\n matchcount = e[\"matchcount\"]\n score = int(e[\"score\"])\n lines.append(f'{pos:>{c1}} {name:<{c2}} {matchcount:>{c3}} {score:>{c4}}')\n lb_text = \"\\n\".join(lines)\n\n last = list(filter(lambda e: e is not None, leaderboard['last']))\n pos_score = next(filter(lambda s: s >= 0, map(lambda e: e[1], last)),0)\n neg_score = next(filter(lambda s: s < 0, map(lambda e: e[1], last)),0)\n pos_names = \", \".join([e[0].name for e in filter(lambda e: e[1] >=0, last)])[0:50]\n neg_names = \", \".join([e[0].name for e in filter(lambda e: e[1] <0, last)])[0:50]\n\n pos_line = f'↗️ {pos_names:40} +{pos_score}'\n neg_line = f'↘️ {neg_names:40} -{abs(neg_score)}'\n\n res_text = f'*Elo Scores:*```\\n{lb_text}```\\n\\n*Last Result:*\\n```{pos_line}\\n{neg_line}```'\n\n return jsonify( {\n 'response_type': 'in_channel',\n 'text': res_text,\n })\n\ndef error_response(error_message):\n return jsonify( {\n 'response_type': 'ephemeral',\n 'text': f':warning: { error_message }',\n })\n","sub_path":"kickup/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"286410294","text":"\nfrom django.shortcuts import render\nfrom .models import *\nfrom .forms import *\n# Create your views here.\n\ndef home(request):\n barangs = Barang.objects.all()\n return render(request, 'home.html', {'barangs':barangs})\n\ndef barang_detail(request, id_barang):\n barang_satuan = Barang.objects.get(id = id_barang)\n return render(request, 'barang_detail.html', {'barang_satuan': barang_satuan})\n\ndef tambah_barang(request):\n if request.method == \"POST\":\n form = InputBarang(request.POST)\n if form.is_valid():\n post = form.save(commit=False)\n post.save()\n else:\n form = InputBarang()\n return render(request, 'form_input_barang.html', {'form': form})","sub_path":"toko/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"494726828","text":"from decimal import Decimal\n\nfrom django.conf import settings\nfrom django.contrib.gis.db.models import PointField\nfrom django.db import models\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django_fsm import FSMField, transition\nfrom model_utils import Choices\nfrom push_notifications.models import APNSDevice\n\nfrom apps.users.models import User\n\n\nclass Game(models.Model):\n Status = Choices(\n ('pending', _('Pending')),\n ('started', _('Started')),\n ('ended', _('Ended')),\n )\n\n owner = models.ForeignKey(settings.AUTH_USER_MODEL, related_name='games')\n center_point = PointField(srid=4326)\n radius = models.FloatField()\n buy_in = models.DecimalField(max_digits=12, decimal_places=8)\n status = FSMField(choices=Status, default=Status.pending)\n\n @transition(status, source=Status.pending, target=Status.started)\n def start(self):\n users = User.objects.filter(participant__game=self)\n APNSDevice.objects.filter(user__in=users).send_message(None, content_available=True, extra={'type': 'game_started'})\n\n @transition(status, source=Status.started, target=Status.ended)\n def end(self):\n users = User.objects.filter(participant__game=self)\n APNSDevice.objects.filter(user__in=users).send_message(None, content_available=True, extra={'type': 'game_ended'})\n\n\nclass Participant(models.Model):\n Status = Choices(\n ('invited', _('Invited')),\n ('joined', _('Joined')),\n ('tagged', _('Tagged')),\n )\n\n game = models.ForeignKey('Game', related_name='participants', on_delete=models.CASCADE)\n user = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE)\n status = FSMField(choices=Status, default=Status.invited)\n tagged_by = models.ForeignKey(settings.AUTH_USER_MODEL, blank=True, null=True, related_name='tagged', on_delete=models.CASCADE)\n\n @property\n def btc_earned(self):\n participant_count = self.game.participants.count()\n\n pool = self.game.buy_in * participant_count\n winner_pool = Decimal(20) * pool / Decimal(100)\n remaining_pool = pool - winner_pool\n tag_amount = remaining_pool / (participant_count - 1)\n\n earned = Decimal(0)\n earned = tag_amount * Participant.objects.filter(game=self.game, tagged_by=self.user).count()\n\n if self.game.status == Game.Status.ended and self.status != Participant.Status.tagged:\n earned += winner_pool\n\n return earned\n\n @transition(status, source=Status.invited, target=Status.joined)\n def join(self):\n users = User.objects.filter(participant__game=self.game)\n APNSDevice.objects.filter(user__in=users).send_message(None, content_available=True, extra={'type': 'participant_joined'})\n\n @transition(status, source=Status.joined, target=Status.tagged)\n def tag(self, tagged_by):\n self.tagged_by = tagged_by\n users = User.objects.filter(participant__game=self.game)\n APNSDevice.objects.filter(user__in=users).send_message(None, content_available=True, extra={'type': 'participant_tagged'})\n if Participant.objects.exclude(status=self.Status.tagged).count() == 1:\n self.game.end()\n self.game.save()\n","sub_path":"apps/games/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"297787717","text":"'''\nPackages used for the model\n'''\nimport time\nimport numpy as np\nimport cv2\nimport imutils\nfrom imutils.video import VideoStream\nfrom tensorflow import keras\nfrom tensorflow.python.keras.applications.mobilenet_v2 import preprocess_input\n\nimport os\nfrom keras_preprocessing.image import img_to_array\n\n\n\ndef preprocess_face_frame(face_frame):\n # convert to RGB\n face_frame = cv2.cvtColor(face_frame, cv2.COLOR_BGR2RGB)\n # preprocess input image for mobilenet\n face_frame_resized = cv2.resize(face_frame, (224, 224))\n face_frame_array = img_to_array(face_frame_resized)\n return face_frame_array\n\n\ndef decode_prediction(pred):\n (mask, no_mask) = pred\n mask_or_not = \"Mask\" if mask > no_mask else \"No mask\"\n confidence = f\"{(max(mask, no_mask) * 100):.2f}\"\n return mask_or_not, confidence\n\n\ndef write_bb(mask_or_not, confidence, box, frame):\n (x, y, w, h) = box\n color = (0, 255, 0) if mask_or_not == \"Mask\" else (0, 0, 255)\n label = f\"{mask_or_not}: {confidence}%\"\n\n cv2.putText(frame, label, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.45, color, 2)\n cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)\n\n\ndef load_cascade_detector():\n cascade_path = os.path.dirname(cv2.__file__) + \"/data/haarcascade_frontalface_alt2.xml\"\n face_detector = cv2.CascadeClassifier(cascade_path)\n return face_detector\n\nmask_detection_model_path = 'models/final_mask_detection.h5'\ndefault_face_path = 'models/haarcascade_frontalface_default.xml'\nfrontal_face_alt2_path = 'models/frontal_face_alt2.xml'\nnose_path = 'models/Nariz.xml'\nmouth_path = 'models/Mouth.xml'\neyes_path = 'models/frontalEyes35x16.xml'\n#yolo_cfg_path = 'models/yolov4.cfg'\n#yolo_weights_path = 'models/yolov4.weights'\n\n#Classes that can be showed. \nclass_names= ['person','bicycle','car','motorcycle','airplane','bus','train','truck','boat',\n 'traffic light','fire hydrant','stop sign','parking meter','bench','bird','cat',\n 'dog','horse','sheep','cow','elephant','bear','zebra','giraffe','backpack','umbrella',\n 'handbag tie','suitcase','frisbee','skis','snowboard','sports ball','kite',\n 'baseball','bat','baseball glove','skateboard','surfboard','tennis racket','bottle',\n 'wine glass','cup','fork','knife','spoon','bowl','banana','apple','sandwich','orange',\n 'broccoli','carrot','hot dog','pizza','donut','cake','chair','couch','potted plant',\n 'bed','dining table','toilet','tv','laptop','mouse','remote','keyboard','cell phone',\n 'microwave','oven','toaster','sink','refrigerator','book','clock','vase','scissors',\n 'teddy bear','hair drier','toothbrush']\n \nbounding_colors = np.random.uniform(0,255,size = (len(class_names),3))\n\nlabels_dict={0:'MASK',1:'NO MASK',2:'WEAR MASK PROPERLY'}\ncolor_dict={0:(0,255,0),1:(0,0,255),2:(0,100,100)}\n\nmodel = keras.models.load_model(mask_detection_model_path)\nface_detector = load_cascade_detector()\n\n#network = cv2.dnn.readNet(yolo_weights_path, yolo_cfg_path) \n# Using pretrained yolov4 model to detect the objects\n\nface_clsfr=cv2.CascadeClassifier(default_face_path)\nnose_clsfr = cv2.CascadeClassifier(nose_path)\nmouth_clsfr = cv2.CascadeClassifier(mouth_path)\neyes_clsfr = cv2.CascadeClassifier(eyes_path)\nframes = []\n\nsize_fact = (320,320)\nmean = (0,0,0)\nscalefactor = 0.004\nyolo_predict = []\n\ndef output_layers(net):\n layer_names = net.getLayerNames()\n layers = [layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n return layers\n\"\"\"\ndef yolov4_detections(features,width,height,prob_thresh):\n ids,object_probability,bounding_box= [],[],[]\n for feature in features:\n for obj_prob in feature:\n scores = obj_prob[5:]\n id = np.argmax(scores)\n probability = scores[id]\n if(probability > prob_thresh):\n w,h = int(obj_prob[2]*width),int(obj_prob[3]*height)\n x,y = int(obj_prob[0]*width) - w / 2,int(obj_prob[1]*height) - h / 2\n ids.append(id)\n object_probability.append(float(probability))\n bounding_box.append([x,y,w,h])\n return bounding_box,object_probability,ids\n\"\"\" \ndef get_label(img):\n if(True):\n if(True):\n faces = face_clsfr.detectMultiScale(img,1.3,4)\n for (x,y,w,h) in faces:\n face_img = img[y:y+h,x:x+w]\n resized = cv2.resize(face_img,(224,224))\n normalized = resized/255.0\n reshaped = np.reshape(normalized,(1,224,224,3))\n result = model.predict(reshaped)\n label = np.argmax(result,axis=1)[0]\n accuracy = \"{:.2f}\".format(np.max(result) * 100)\n return [label,float(accuracy)]\n# return 2\n\"\"\" resized = cv2.resize(img,(224,224))\n normalized = resized/255.0\n reshaped = np.reshape(normalized,(1,224,224,3))\n result = model.predict(reshaped)\n label = np.argmax(result,axis=1)[0]\n accuracy = \"{:.2f}\".format(np.max(result) * 100)\n return [label,float(accuracy)]\"\"\"\n\n\ndef fun (i):\n\tdict={0:'MASK',1:'NO MASK'}\n\treturn dict[i]\n\n\n\nimport unittest\nimport warnings \nimport logging\nlogging.basicConfig(filename='mask_logs.log',format='%(asctime)s %(message)s')\nlogger=logging.getLogger()\nlogger.setLevel(logging.INFO)\n#logging.info('This is an info message')\ndef get_image(i): \n\tpath = \"./sample_test_images/\"+str(i)+\".png\"\n\treturn cv2.imread(path) \nprint(\"Beginning Testing...\")\n\nclass TestModel(unittest.TestCase):\n def test1(self): \n result,r = get_label(get_image(1))\n print(\"Label of Img \",1,\":- \",result,\"with accuracy\",r,\", i.e Status : \",fun(result))\n logger.info(\"Label of Img 1:- %d with accuracy %f i.e Status : %s\",result,r,fun(result))\n self.assertEqual(result, 0)\n self.assertNotEqual(result, 1)\n \n def test2(self): \n result,r = get_label(get_image(2))\n print(\"Label of Img \",2,\":- \",result,\"with accuracy\",r,\", i.e Status : \",fun(result))\n logger.info(\"Label of Img 2:- %d with accuracy %f i.e Status : %s\",result,r,fun(result))\n self.assertEqual(result, 0)\n self.assertNotEqual(result, 1)\n \n def test3(self): \n result,r = get_label(get_image(3))\n print(\"Label of Img \",3,\":- \",result,\"with accuracy\",r,\", i.e Status : \",fun(result))\n logger.info(\"Label of Img 3:- %d with accuracy %f i.e Status : %s\",result,r,fun(result))\n self.assertEqual(result, 1)\n self.assertNotEqual(result, 0)\n\n \n def test4(self): \n result,r = get_label(get_image(4))\n print(\"Label of Img \",4,\":- \",result,\"with accuracy\",r,\", i.e Status : \",fun(result))\n logger.info(\"Label of Img 4:- %d with accuracy %f i.e Status : %s\",result,r,fun(result))\n self.assertEqual(result, 1)\n self.assertNotEqual(result, 0)\n \n def test5(self): \n result,r = get_label(get_image(5))\n print(\"Label of Img \",5,\":- \",result,\"with accuracy\",r,\", i.e Status : \",fun(result))\n logger.info(\"Label of Img 5:- %d with accuracy %f i.e Status : %s\",result,r,fun(result))\n self.assertEqual(result, 1)\n self.assertNotEqual(result, 0)\n \n def test6(self): \n result,r = get_label(get_image(6))\n print(\"Label of Img \",6,\":- \",result,\"with accuracy\",r,\", i.e Status : \",fun(result))\n logger.info(\"Label of Img 6:- %d with accuracy %f i.e Status : %s\",result,r,fun(result))\n self.assertEqual(result, 1)\n self.assertNotEqual(result, 0)\n \n def test7(self): \n result,r = get_label(get_image(7))\n print(\"Label of Img \",7,\":- \",result,\"with accuracy\",r,\", i.e Status : \",fun(result))\n logger.info(\"Label of Img 7:- %d with accuracy %f i.e Status : %s\",result,r,fun(result))\n self.assertEqual(result, 0)\n self.assertNotEqual(result, 1)\n \n\n \n \n\n \n\n\nif __name__ == '__main__':\n unittest.main()\n \n\n","sub_path":"source/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":8040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"176022555","text":"import pandas as pd\nimport numpy as np\nimport thundersvm\nfrom sklearn import svm\nimport random\n\n\ntotalfilecount = 50\ntestfraction = 0.2\n\nif __name__ == \"__main__\":\n testblocks = random.sample(range(1, totalfilecount), int(totalfilecount*testfraction))\n trainblocks = list(set(range(1,totalfilecount)) - set(testblocks))\n\n # svm_classifier = thundersvm.SVC()\n svm_classifier = svm.SVC()\n # gamma='auto', kernel='linear'\n print(\"collecting data...\")\n all_features = None\n all_labels = None\n for block in trainblocks:\n data = pd.read_csv(\"AF_Filtered_Data/Data{}.csv\".format(block), sep=' ', header=0).values\n if data.shape[0] == 0:\n continue\n valids = np.isfinite(data[:,-1].reshape((data.shape[0],)))\n data = data[valids]\n features = data[:,range(data.shape[1]-1)]\n labels = data[:,-1]\n if all_features is None:\n all_features = features\n else:\n all_features = np.concatenate((all_features, features))\n\n if all_labels is None:\n all_labels = labels\n else:\n all_labels = np.concatenate((all_labels, labels))\n\n print(\"training classifier...\")\n\n svm_classifier.fit(all_features, all_labels)\n\n print(\"collecting test data\")\n all_features = None\n all_labels = None\n for block in testblocks:\n data = pd.read_csv(\"AF_Filtered_Data/Data{}.csv\".format(block), sep=' ', header=0).values\n if data.shape[0] == 0:\n continue\n valids = np.isfinite(data[:, -1].reshape((data.shape[0],)))\n data = data[valids]\n features = data[:, range(data.shape[1] - 1)]\n labels = data[:, -1]\n if all_features is None:\n all_features = features\n else:\n all_features = np.concatenate((all_features, features))\n\n if all_labels is None:\n all_labels = labels\n else:\n all_labels = np.concatenate((all_labels, labels))\n\n print(\"predicting...\")\n res = svm_classifier.predict(all_features)\n\n print(\"calculating scores...\")\n # print(all_labels)\n # print(sum([(a + b) % 2 for (a,b) in zip(res, all_labels) ]))\n tp = 0\n tn = 0\n fp = 0\n fn = 0\n for i in range(len(res)):\n if all_labels[i] != 0 and res[i] != 0:\n tp += 1\n elif all_labels[i] == 0 and res[i] == 0:\n tn += 1\n elif all_labels[i] != 0 and res[i] == 0:\n fn += 1\n elif all_labels[i] == 0 and res[i] != 0:\n fp += 1\n\n print(\"Confusion Matrix:\")\n print(\"{:4} | {:4}\\n------------\\n{:4} | {:4}\".format(tp, fn, fp, tn))\n print()\n print(\"Recall AF: {}\".format(tp / (tp + fn)))\n print(\"Precision AF: {}\".format(tp / (tp + fp)))","sub_path":"Project/svmclassifier.py","file_name":"svmclassifier.py","file_ext":"py","file_size_in_byte":2743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"327905385","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\n\nclass TripadvisorDalatSpider(scrapy.Spider):\n name = 'tripadvisor_Dalat'\n allowed_domains = ['tripadvisor.com.vn']\n start_urls = ['https://www.tripadvisor.com.vn/Hotels-g293922-Da_Lat_Lam_Dong_Province-Hotels.html/']\n\n custom_settings={ 'FEED_URI': \"tripadvisor_DaLat.csv\",\n 'FEED_FORMAT': 'csv'}\n\n def parse(self, response):\n\n print(\"procesing:\"+response.url)\n \n hotel_class=response.css(\".listing_title > a\")\n name_hotel=hotel_class.css(\"a::text\").extract()\n url_hotel=hotel_class.css(\"a::attr(href)\").extract()\n\n row_data=zip(name_hotel, url_hotel)\n\n #Making extracted data row wise\n for item in row_data:\n #create a dictionary to store the scraped info\n scraped_info = {\n #key:value\n 'name_hotel':item[0],\n 'url' : response.urljoin(item[1])\n }\n\n #yield or give the scraped info to scrapy\n yield scraped_info\n\n\n NEXT_PAGE_SELECTOR = \".ui_pagination > a.next::attr(href)\"\n next_page = response.css(NEXT_PAGE_SELECTOR).extract_first()\n if next_page:\n yield scrapy.Request(\n response.urljoin(next_page),\n callback=self.parse)\n","sub_path":"scrapy/Assigment_AIS/tripadvisor_pages/tripadvisor_pages/spiders/tripadvisor_Dalat.py","file_name":"tripadvisor_Dalat.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"386509096","text":"#!/usr/bin/env python\n\nimport sys\nimport json\nimport requests\nimport output\n\nBASE_URL = 'http://localhost:8080'\n\n# -----------------------------------------------------------------------------\n# http://galaxy-central.readthedocs.org/en/latest/lib/galaxy.webapps.galaxy.api.html#galaxy.webapps.galaxy.api.histories.HistoriesController.index\n# lib/galaxy/webapps/galaxy/api/histories.py, def index\ndef list_histories():\n # listing/index of all histories\n full_url = BASE_URL + '/api/histories'\n\n params = {\n # read the key from the .api-key file\n 'key' : open( '.api-key' ).readline().strip(),\n }\n # GET plus resource == index/list of all resources of that kind\n return requests.get( full_url, params=params )\n # --> HTTP GET 'http://localhost:8080/api/histories?key='\n\n\n# -----------------------------------------------------------------------------\nif __name__ == '__main__':\n response = list_histories()\n output.output_response( response )\n","sub_path":"historical_exercises/api-scripts.exercises/list_histories.py","file_name":"list_histories.py","file_ext":"py","file_size_in_byte":1006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"219650664","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and / or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110 - 1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n\nbl_info = {\n \"name\": \"TP Modifier\",\n \"author\": \"MKB\",\n \"version\": (0, 1, 2),\n \"blender\": (2, 7, 8),\n \"location\": \"View3D > Tool Shelf [T] or Property Shelf [N]\",\n \"description\": \"Modifier Tools Panel\",\n \"warning\": \"\",\n \"wiki_url\": \"\",\n \"tracker_url\": \"\",\n \"category\": \"ToolPlus\"}\n\n\nfrom toolplus_modifier.mods_menu import (VIEW3D_TP_Modifier_Menu)\nfrom toolplus_modifier.mods_stack_ui import (VIEW3D_TP_Modifier_Stack_Panel_UI)\nfrom toolplus_modifier.mods_stack_tools import (VIEW3D_TP_Modifier_Stack_Panel_TOOLS)\n\n##################################\n\nimport sys, os\nsys.path.append(os.path.join(os.path.dirname(__file__), '..', 'toolplus_modifier'))\n\nif \"bpy\" in locals():\n import imp\n imp.reload(mods_action)\n imp.reload(mods_automirror)\n imp.reload(mods_batch)\n imp.reload(mods_batch_atm)\n imp.reload(mods_display)\n imp.reload(mods_normals)\n imp.reload(mods_pivot)\n imp.reload(mods_remove)\n imp.reload(mods_show)\n imp.reload(mods_toall)\n imp.reload(mods_tools)\n\n\nelse:\n from . import mods_action \n from . import mods_automirror \n from . import mods_batch \n from . import mods_batch_atm \n from . import mods_display \n from . import mods_normals \n from . import mods_pivot \n from . import mods_remove \n from . import mods_show \n from . import mods_toall \n from . import mods_tools \n\n\n\nimport bpy\nfrom bpy import*\n\nimport bpy.utils.previews\nfrom bpy.types import AddonPreferences, PropertyGroup\nfrom bpy.props import* #(StringProperty, BoolProperty, FloatVectorProperty, FloatProperty, EnumProperty, IntProperty)\n\n\ndef update_panel_position(self, context):\n try:\n bpy.utils.unregister_class(VIEW3D_TP_Modifier_Panel_UI)\n \n bpy.utils.unregister_class(VIEW3D_TP_Modifier_Panel_TOOLS)\n \n except:\n pass\n \n try:\n bpy.utils.unregister_class(VIEW3D_TP_Modifier_Panel_UI)\n except:\n pass\n \n if context.user_preferences.addons[__name__].preferences.tab_location == 'tools':\n \n VIEW3D_TP_Modifier_Panel_TOOLS.bl_category = context.user_preferences.addons[__name__].preferences.tools_category\n \n bpy.utils.register_class(VIEW3D_TP_Modifier_Panel_TOOLS)\n \n if context.user_preferences.addons[__name__].preferences.tab_location == 'ui':\n bpy.utils.register_class(VIEW3D_TP_Modifier_Panel_UI)\n \n\n if context.user_preferences.addons[__name__].preferences.tab_location == 'off':\n pass\n\n\n\ndef update_panel_position_stack(self, context):\n try:\n bpy.utils.unregister_class(VIEW3D_TP_Modifier_Stack_Panel_UI)\n \n bpy.utils.unregister_class(VIEW3D_TP_Modifier_Stack_Panel_TOOLS)\n \n except:\n pass\n \n try:\n bpy.utils.unregister_class(VIEW3D_TP_Modifier_Stack_Panel_UI)\n except:\n pass\n \n if context.user_preferences.addons[__name__].preferences.tab_location_stack == 'tools':\n \n VIEW3D_TP_Modifier_Stack_Panel_TOOLS.bl_category = context.user_preferences.addons[__name__].preferences.tools_category_stack\n \n bpy.utils.register_class(VIEW3D_TP_Modifier_Stack_Panel_TOOLS)\n \n if context.user_preferences.addons[__name__].preferences.tab_location_stack == 'ui':\n bpy.utils.register_class(VIEW3D_TP_Modifier_Stack_Panel_UI)\n\n if context.user_preferences.addons[__name__].preferences.tab_location_stack == 'off':\n pass\n\n\n\n\ndef update_display_tools(self, context):\n\n if context.user_preferences.addons[__name__].preferences.tab_display_tools == 'on':\n return True\n\n else: \n if context.user_preferences.addons[__name__].preferences.tab_display_tools == 'off':\n return False \n\n\n\naddon_keymaps_menu = []\n\ndef update_menu(self, context):\n try:\n bpy.utils.unregister_class(VIEW3D_TP_Modifier_Menu)\n \n # Keymapping\n # remove keymaps when add-on is deactivated\n wm = bpy.context.window_manager\n for km in addon_keymaps_menu:\n wm.keyconfigs.addon.keymaps.remove(km)\n del addon_keymaps_menu[:]\n \n except:\n pass\n \n if context.user_preferences.addons[__name__].preferences.tab_menu_view == 'menu':\n \n VIEW3D_TP_Modifier_Menu.bl_category = context.user_preferences.addons[__name__].preferences.tools_category_menu\n \n bpy.utils.register_class(VIEW3D_TP_Modifier_Menu)\n \n # Keymapping \n wm = bpy.context.window_manager\n km = wm.keyconfigs.addon.keymaps.new(name='3D View', space_type='VIEW_3D')\n\n kmi = km.keymap_items.new('wm.call_menu', 'D', 'PRESS', ctrl=True) #,shift=True, alt=True, \n kmi.properties.name = 'VIEW3D_TP_Modifier_Menu'\n\n if context.user_preferences.addons[__name__].preferences.tab_menu_view == 'off':\n pass\n\n\n\n\n#Panel preferences\nclass TP_Panels_Preferences(AddonPreferences):\n bl_idname = __name__\n \n prefs_tabs = EnumProperty(\n items=(('info', \"Info\", \"Info\"),\n ('toolsets', \"Tools\", \"Tools\"),\n ('location', \"Location\", \"Location\"),\n ('keymap', \"Keymap\", \"Keymap\"), \n ('url', \"URLs\", \"URLs\")),\n default='info')\n\n #Tab Location \n\n tab_location = EnumProperty(\n name = 'Panel Location',\n description = 'save user settings and restart blender after switching the panel location',\n items=(('tools', 'Tool Shelf', 'place panel in the tool shelf [T]'),\n ('ui', 'Property Shelf', 'place panel in the property shelf [N]'),\n ('off', 'Off', 'on or off for panel in the shelfs')),\n default='tools', update = update_panel_position)\n\n tab_location_stack = EnumProperty(\n name = 'Panel Location',\n description = 'save user settings and restart blender after switching the panel location',\n items=(('tools', 'Tool Shelf', 'place panel in the tool shelf [T]'),\n ('ui', 'Property Shelf', 'place panel in the property shelf [N]'),\n ('off', 'Off', 'on or off for panel in the shelfs')),\n default='tools', update = update_panel_position_stack)\n\n tab_menu_view = EnumProperty(\n name = '3d View Menu',\n description = 'save user settings and restart blender after switching the panel location',\n items=(('menu', 'Menu on', 'enable menu for 3d view'),\n ('off', 'Menu off', 'enable or disable menu for 3d view')),\n default='menu', update = update_menu)\n\n # Panel\n tab_display_tools = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Display Tools on', 'enable tools in panel'), ('off', 'Display Tools off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_automirror = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'AutoMirror on', 'enable tools in panel'), ('off', 'AutoMirror off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_mirror_cut = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'MirrorCut on', 'enable tools in panel'), ('off', 'MirrorCut off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_mirror = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Mirror on', 'enable tools in panel'), ('off', 'Mirror off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_bevel = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Bevel on', 'enable tools in panel'), ('off', 'Bevel off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_subsurf = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Subsurf on', 'enable tools in panel'), ('off', 'Subsurf off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_solidify = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Solidify on', 'enable tools in panel'), ('off', 'Solidify off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_simple = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'SDeform on', 'enable tools in panel'), ('off', 'SDeform off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_array = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Array on', 'enable tools in panel'), ('off', 'Array off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_transform = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Transform on', 'enable tools in panel'), ('off', 'Transform off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_shade = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Shade on', 'enable tools in panel'), ('off', 'Shade off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_remove_type = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Remove Type on', 'enable tools in panel'), ('off', 'Remove Type off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n tab_history = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'History on', 'enable tools in panel'), ('off', 'History off', 'disable tools in panel')), default='on', update = update_display_tools)\n\n # Menu\n tab_tp_menus = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Menus on', 'enable tools in menu'), ('off', 'Menus off', 'disable tools in menu')), default='on', update = update_display_tools)\n\n tab_tp_menus = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'Menus on', 'enable tools in menu'), ('off', 'Menus off', 'disable tools in menu')), default='on', update = update_display_tools)\n\n tab_automirror_menu = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'AutoMirror on', 'enable tools in menu'), ('off', 'AutoMirror off', 'disable tools in menu')), default='on', update = update_display_tools)\n\n tab_modstack_menu = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'ModifierStack on', 'enable tools in menu'), ('off', 'ModifierStack off', 'disable tools in menu')), default='on', update = update_display_tools)\n\n tab_clear_menu = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'ClearTools on', 'enable tools in menu'), ('off', 'ClearTools off', 'disable tools in menu')), default='on', update = update_display_tools)\n\n tab_hover_menu = EnumProperty(name = 'Display Tools', description = 'on / off',\n items=(('on', 'HoverTools on', 'enable tools in menu'), ('off', 'HoverTools off', 'disable tools in menu')), default='on', update = update_display_tools)\n\n\n tools_category = StringProperty(name = \"TAB Category\", description = \"add name for a new category tab\", default = 'T+', update = update_panel_position)\n tools_category_stack = StringProperty(name = \"TAB Category\", description = \"add name for a new category tab\", default = 'T+', update = update_panel_position_stack)\n\n tools_category_menu = bpy.props.BoolProperty(name = \"Modifier Menu\", description = \"enable or disable menu\", default=True, update = update_menu)\n\n\n def draw(self, context):\n layout = self.layout\n \n #Info\n row= layout.row(align=True)\n row.prop(self, \"prefs_tabs\", expand=True)\n \n if self.prefs_tabs == 'info':\n \n box = layout.box().column(1)\n \n row = box.column(1) \n row.label(text=\"Welcome to Modifier!\") \n row.label(text=\"This custom addon is for editing.\")\n row.label(text=\"There are two ways to execute the tools:\") \n row.label(text=\"> use the functions in the panel\") \n row.label(text=\"> or the included menu\") \n row.label(text=\"Have Fun! :)\") \n\n\n #Tools\n if self.prefs_tabs == 'toolsets':\n \n box = layout.box().column(1)\n\n row = box.column_flow(4)\n row.prop(self, 'tab_subsurf', expand=True)\n row.prop(self, 'tab_automirror', expand=True)\n row.prop(self, 'tab_mirror_cut', expand=True)\n row.prop(self, 'tab_mirror', expand=True)\n row.prop(self, 'tab_bevel', expand=True)\n row.prop(self, 'tab_solidify', expand=True)\n row.prop(self, 'tab_simple', expand=True)\n row.prop(self, 'tab_array', expand=True)\n row.prop(self, 'tab_transform', expand=True)\n row.prop(self, 'tab_shade', expand=True)\n row.prop(self, 'tab_remove_type', expand=True)\n row.prop(self, 'tab_history', expand=True)\n\n row = layout.row()\n row.label(text=\"! save user settings for permant on/off !\", icon =\"INFO\")\n\n box.separator() \n \n\n #Location\n if self.prefs_tabs == 'location':\n \n box = layout.box().column(1)\n \n row = box.row(1) \n row.label(\"Location Modifier Sets:\")\n \n row = box.row(1)\n row.prop(self, 'tab_location', expand=True)\n \n box.separator()\n\n row = box.row(1) \n if self.tab_location == 'tools':\n \n box.separator() \n \n row.prop(self, \"tools_category\")\n\n box.separator()\n \n row = box.row(1) \n row.label(\"Location Modifier Stack:\") \n \n row = box.row(1) \n row.prop(self, 'tab_location_stack', expand=True)\n \n box.separator() \n \n row = box.row(1) \n if self.tab_location_stack == 'tools':\n \n box.separator() \n \n row.prop(self, \"tools_category_stack\")\n \n row = layout.row()\n row.label(text=\"! please reboot blender after changing the panel location !\", icon =\"INFO\")\n\n box.separator() \n\n\n #Keymap\n if self.prefs_tabs == 'keymap':\n\n box = layout.box().column(1)\n \n row = box.column(1) \n row.label(\"Modifier Menu:\", icon =\"COLLAPSEMENU\") \n \n row.separator() \n row.label(\"Menu: 'D', 'PRESS', ctrl=True\")\n\n row = box.row(1) \n row.prop(self, 'tab_menu_view', expand=True)\n \n if self.tab_menu_view == 'off':\n \n box.separator() \n \n row = box.row(1) \n row.label(text=\"! menu hidden with next reboot durably!\", icon =\"INFO\")\n\n box.separator() \n \n row.operator('wm.url_open', text = 'recommended: is key free addon', icon = 'PLUGIN').url = \"https://github.com/Antonioya/blender/tree/master/iskeyfree\"\n\n box.separator() \n \n row = box.row(1) \n row.label(text=\"! if needed change keys durably in TAB Input !\", icon =\"INFO\")\n\n \n box = layout.box().column(1)\n\n row = box.column_flow(3)\n row.prop(self, 'tab_tp_menus', expand=True)\n row.prop(self, 'tab_automirror_menu', expand=True)\n row.prop(self, 'tab_modstack_menu', expand=True)\n row.prop(self, 'tab_clear_menu', expand=True)\n row.prop(self, 'tab_hover_menu', expand=True)\n\n row = box.row()\n row.label(text=\"! save user settings for permant on/off !\", icon =\"INFO\")\n\n box.separator() \n\n\n #Weblinks\n if self.prefs_tabs == 'url':\n \n box = layout.box().column(1)\n \n row = box.column_flow(2)\n row.operator('wm.url_open', text = 'AutoMirror', icon = 'HELP').url = \"http://le-terrier-de-lapineige.over-blog.com/2014/07/automirror-mon-add-on-pour-symetriser-vos-objets-rapidement.html\"\n row.operator('wm.url_open', text = 'Copy To All', icon = 'HELP').url = \"https://www.artunchained.de/tiny-new-addon-to-all/\"\n row.operator('wm.url_open', text = 'Display Tools', icon = 'HELP').url = \"http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/3D_interaction/Display_Tools\"\n row.operator('wm.url_open', text = 'Modifier Tools', icon = 'HELP').url = \"http://wiki.blender.org/index.php/Extensions:2.6/Py/Scripts/3D_interaction/modifier_tools\"\n row.operator('wm.url_open', text = 'Thread', icon = 'BLENDER').url = \"https://blenderartists.org/forum/showthread.php?411265-Addon-T-Modifier&p=3124733#post3124733\"\n\n\n\nclass Dropdown_TP_Modifier_Props(bpy.types.PropertyGroup):\n\n\n display_subsurf = bpy.props.BoolProperty(name=\"Open / Close\", description=\"Open / Close\", default=True) \n display_automirror = bpy.props.BoolProperty(name=\"Open / Close\", description=\"Open / Close\", default=False) \n display_mirror = bpy.props.BoolProperty(name=\"Open / Close\", description=\"Open / Close\", default=True) \n display_bevel = bpy.props.BoolProperty(name=\"Open / Close\", description=\"Open / Close\", default=True) \n display_solidify = bpy.props.BoolProperty(name=\"Open / Close\", description=\"Open / Close\", default=True) \n display_sdeform = bpy.props.BoolProperty(name=\"Open / Close\", description=\"Open / Close\", default=True) \n display_array = bpy.props.BoolProperty(name=\"Open / Close\", description=\"Open / Close\", default=True) \n display_apply = bpy.props.BoolProperty(name=\"Open / Close\", description=\"Open / Close\", default=False) \n display_display = bpy.props.BoolProperty(name=\"Open / Close\", description=\"Open / Close\", default=True) \n\n\n\ndef draw_modifier_panel_layout(self, context, layout):\n \n tp_props = context.window_manager.tp_collapse_menu_modifier\n \n ob = context.object \n obj = context.object\n scene = context.scene\n scn = context.scene\n rs = bpy.context.scene \n\n obj = context.active_object \n if obj:\n obj_type = obj.type\n \n if obj_type in {'MESH'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"MESH\") \n \n if obj_type in {'LATTICE'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"LATTICE\") \n\n if obj_type in {'CURVE'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"CURVE\") \n \n if obj_type in {'SURFACE'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"SURFACE\") \n \n if obj_type in {'META'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"MBall\") \n \n if obj_type in {'FONT'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"FONT\") \n \n if obj_type in {'ARMATURE'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"ARMATURE\") \n\n if obj_type in {'EMPTY'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"EMPTY\") \n\n if obj_type in {'CAMERA'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"CAMERA\") \n\n if obj_type in {'LAMP'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"LAMP\") \n\n if obj_type in {'SPEAKER'}:\n box = layout.box()\n row = box.row(1) \n row.alignment = \"CENTER\"\n row.label(\"SPEAKER\") \n\n\n box = layout.box()\n \n row = box.row(1) \n sub = row.row(1)\n sub.scale_x = 7\n\n sub.operator(\"tp_ops.pivot_bounding_box\", \"\", icon=\"ROTATE\")\n sub.operator(\"tp_ops.pivot_3d_cursor\", \"\", icon=\"CURSOR\")\n sub.operator(\"tp_ops.pivot_active\", \"\", icon=\"ROTACTIVE\")\n sub.operator(\"tp_ops.pivot_individual\", \"\", icon=\"ROTATECOLLECTION\")\n sub.operator(\"tp_ops.pivot_median\", \"\", icon=\"ROTATECENTER\") \n #row.menu(\"tp_ops.delete_menu\", \"\", icon=\"PANEL_CLOSE\") \n \n box = layout.box().column(1) \n \n row = box.row(1) \n row.operator_menu_enum(\"object.modifier_add\", \"type\",\" Add new Modifier\", icon=\"MODIFIER\") \n\n mod_list = context.active_object.modifiers\n if mod_list:\n \n row = box.row(1) \n row.operator(\"tp_ops.mods_render\",\" \", icon = 'RESTRICT_RENDER_OFF') \n row.operator(\"tp_ops.mods_view\",\" \", icon = 'RESTRICT_VIEW_OFF') \n row.operator(\"tp_ops.mods_edit\",\" \", icon='EDITMODE_HLT') \n row.operator(\"tp_ops.mods_cage\",\" \", icon='OUTLINER_OB_MESH') \n row.operator(\"tp_ops.remove_mod\", text=\" \", icon='X') \n row.operator(\"tp_ops.apply_mod\", text=\" \", icon='FILE_TICK') \n\n else:\n pass\n\n\n box.separator()\n \n Display_Subsurf = context.user_preferences.addons[__package__].preferences.tab_subsurf\n if Display_Subsurf == 'on':\n\n box = layout.box().column(1)\n \n row = box.row(1)\n if tp_props.display_subsurf: \n row.prop(tp_props, \"display_subsurf\", text=\"\", icon=\"MOD_SUBSURF\")\n else:\n row.prop(tp_props, \"display_subsurf\", text=\"\", icon=\"MOD_SUBSURF\")\n \n row.label(\"SubSurf\")\n\n box.separator() \n \n row = box.row(1)\n row.scale_x = 0.6 \n row.operator(\"tp_ops.subsurf_0\")\n row.operator(\"tp_ops.subsurf_1\")\n row.operator(\"tp_ops.subsurf_2\") \n row.operator(\"tp_ops.subsurf_3\")\n row.operator(\"tp_ops.subsurf_4\")\n row.operator(\"tp_ops.subsurf_5\")\n #row.operator(\"tp_ops.subsurf_6\")\n \n box.separator() \n \n if tp_props.display_subsurf: \n \n mo_types = []\n append = mo_types.append\n\n for mo in context.active_object.modifiers:\n if mo.type == 'SUBSURF':\n append(mo.type)\n\n #box.label(mo.name)\n\n row = box.row(1)\n row.prop(mo, \"use_subsurf_uv\",text=\"UVs\")\n row.prop(mo, \"show_only_control_edges\",text=\"Optimal\") \n #row.prop(mo, \"use_opensubdiv\",text=\"OPSubdiv\") \n #row.prop(system, \"opensubdiv_compute_type\", text=\"\")\n\n box.separator() \n\n\n\n Display_AutoMirror = context.user_preferences.addons[__package__].preferences.tab_automirror\n if Display_AutoMirror == 'on':\n\n obj = context.object\n if obj:\n if obj.type in {'MESH'}:\n \n box = layout.box().column(1)\n \n row = box.row(1)\n if tp_props.display_automirror: \n row.prop(tp_props, \"display_automirror\", text=\"\", icon=\"MOD_WIREFRAME\")\n else:\n row.prop(tp_props, \"display_automirror\", text=\"\", icon=\"MOD_WIREFRAME\")\n \n row.label(\"AutoMirror\")\n\n box.separator() \n \n row = box.row()\n row.prop(context.scene, \"AutoMirror_orientation\", text=\"\") \n row.prop(context.scene, \"AutoMirror_axis\", text=\"\") \n \n box.separator() \n \n row = box.row()\n row.prop(context.scene, \"AutoMirror_threshold\", text=\"Threshold\") \n row.operator(\"object.automirror\", text=\"Execute\") \n\n box.separator() \n\n if tp_props.display_automirror: \n \n box = layout.box().column(1) \n row = box.row(1)\n row.prop(context.scene, \"AutoMirror_toggle_edit\", text=\"Editmode\")\n row.prop(context.scene, \"AutoMirror_cut\", text=\"Cut+Mirror\")\n \n row = box.row(1)\n row.prop(context.scene, \"AutoMirror_use_clip\", text=\"Use Clip\")\n row.prop(context.scene, \"AutoMirror_show_on_cage\", text=\"Editable\") \n\n box.separator() \n\n \n Display_Mirror_Cut = context.user_preferences.addons[__package__].preferences.tab_mirror_cut\n if Display_Mirror_Cut == 'on':\n\n box = layout.box().column(1)\n \n row = box.row(1)\n row.label(\"\", icon=\"MOD_MESHDEFORM\") \n row.label(\"AutoCuts\") \n \n row = box.row(1) \n row.prop(context.scene, \"tp_axis\", text=\"\")\n sub = row.row(1)\n sub.scale_x = 0.5\n sub.prop(context.scene, \"tp_axis_cut\", text=\"\")\n row.operator(\"tp_ops.mods_autocut\", text=\"Execute\") \n \n box.separator() \n \n else:\n box = layout.box().column(1)\n \n row = box.row(1) \n row.label(\"nothing selected\", icon =\"INFO\") \n \n \n Display_Mirror = context.user_preferences.addons[__package__].preferences.tab_mirror\n if Display_Mirror == 'on':\n \n box = layout.box().column(1)\n \n row = box.row(1)\n if tp_props.display_mirror: \n row.prop(tp_props, \"display_mirror\", text=\"\", icon=\"MOD_MIRROR\")\n else:\n row.prop(tp_props, \"display_mirror\", text=\"\", icon=\"MOD_MIRROR\")\n \n row.label(\"Mirror\") \n \n sub = row.row(1)\n sub.scale_x = 0.3 \n sub.operator(\"tp_ops.mod_mirror_x\", \"Add\")\n \n box.separator() \n \n if tp_props.display_mirror: \n \n mo_types = []\n append = mo_types.append\n\n for mo in context.active_object.modifiers:\n \n if mo.type == 'MIRROR':\n append(mo.type)\n\n #box.label(mo.name)\n\n row = box.row(1)\n row.prop(mo, \"use_x\")\n row.prop(mo, \"use_y\")\n row.prop(mo, \"use_z\")\n \n row = box.row(1)\n row.prop(mo, \"use_mirror_merge\", text=\"Merge\")\n row.prop(mo, \"use_clip\", text=\"Clipping\")\n \n box.separator() \n\n \n Display_Bevel = context.user_preferences.addons[__package__].preferences.tab_bevel\n if Display_Bevel == 'on':\n \n if context.active_object.type in {'MESH'}:\n \n box = layout.box().column(1)\n \n row = box.row(1)\n if tp_props.display_bevel: \n row.prop(tp_props, \"display_bevel\", text=\"\", icon=\"MOD_BEVEL\")\n else:\n row.prop(tp_props, \"display_bevel\", text=\"\", icon=\"MOD_BEVEL\")\n \n row.label(\"Bevel\")\n \n sub = row.row(1)\n sub.scale_x = 0.3 \n sub.operator(\"tp_ops.mods_bevel\", text=\"Add\")\n \n box.separator() \n \n if tp_props.display_bevel: \n \n mo_types = []\n append = mo_types.append\n\n for mo in context.active_object.modifiers:\n \n if mo.type == 'BEVEL':\n \n append(mo.type)\n \n row = box.row(1) \n row.prop(mo, \"profile\", text=\"\")\n row.prop(mo, \"segments\", text=\"\")\n row.prop(mo, \"width\", text=\"\")\n\n row = box.row(1) \n row.label(text=\"profile\") \n row.label(text=\"segments\")\n row.label(text=\"width\")\n \n box.separator() \n\n\n Display_Solidify = context.user_preferences.addons[__package__].preferences.tab_solidify\n if Display_Solidify == 'on':\n \n if context.active_object.type in {'MESH'}:\n \n box = layout.box().column(1)\n \n row = box.row(1)\n if tp_props.display_solidify: \n row.prop(tp_props, \"display_solidify\", text=\"\", icon=\"MOD_SOLIDIFY\")\n else:\n row.prop(tp_props, \"display_solidify\", text=\"\", icon=\"MOD_SOLIDIFY\")\n \n row.label(\"Solidify\")\n \n sub = row.row(1)\n sub.scale_x = 0.3 \n sub.operator(\"tp_ops.mods_solidify\", text=\"Add\") \n \n box.separator() \n \n if tp_props.display_solidify: \n \n mo_types = []\n append = mo_types.append\n\n for mo in context.active_object.modifiers:\n \n if mo.type == 'SOLIDIFY':\n \n append(mo.type)\n \n row = box.column(1) \n row.prop(mo, \"thickness\")\n row.prop(mo, \"thickness_clamp\") \n row.prop(mo, \"offset\")\n \n row = box.row(1)\n row.prop(mo, \"use_rim\", text =\"Fill\")\n row.prop(mo, \"use_rim_only\", text =\"Rim\") \n row.prop(mo, \"use_even_offset\", text =\"Even\")\n \n box.separator() \n\n\n\n Display_Simple = context.user_preferences.addons[__package__].preferences.tab_simple\n if Display_Simple == 'on':\n \n if context.active_object.type in {'MESH'}:\n \n box = layout.box().column(1)\n \n row = box.row(1)\n if tp_props.display_sdeform: \n row.prop(tp_props, \"display_sdeform\", text=\"\", icon=\"MOD_SIMPLEDEFORM\")\n else:\n row.prop(tp_props, \"display_sdeform\", text=\"\", icon=\"MOD_SIMPLEDEFORM\")\n \n row.label(\"SDeform\")\n\n sub = row.row(1)\n sub.scale_x = 0.3 \n sub.operator(\"object.modifier_add\", text=\"Add\").type='SIMPLE_DEFORM' \n \n box.separator() \n\n if tp_props.display_sdeform: \n \n mo_types = []\n append = mo_types.append\n\n for mo in context.active_object.modifiers:\n \n if mo.type == 'SIMPLE_DEFORM':\n \n append(mo.type)\n \n row = box.row(1) \n row.prop(mo, \"deform_method\", expand=True)\n \n box.separator() \n \n row = box.row(1) \n row.prop_search(mo, \"vertex_group\", ob, \"vertex_groups\", text=\"VGrp\")\n row.prop(mo, \"invert_vertex_group\", text=\"\", icon='ARROW_LEFTRIGHT')\n\n row = box.row(1) \n row.prop(mo, \"origin\", text=\"Axis\")\n row.label(text=\"\", icon =\"BLANK1\")\n\n if mo.deform_method in {'TAPER', 'STRETCH', 'TWIST'}:\n \n row = box.row(1) \n row.prop(mo, \"lock_x\")\n row.prop(mo, \"lock_y\")\n\n box.separator() \n \n row = box.row(1) \n if mo.deform_method in {'TAPER', 'STRETCH'}:\n row.scale_x = 3\n row.prop(mo, \"factor\", text=\"Deform Factor:\")\n else:\n row.prop(mo, \"angle\", text=\"Deform Angle:\")\n \n box.separator() \n \n row = box.row(1) \n row.prop(mo, \"limits\", slider=True, text=\"Limits\")\n\n box.separator() \n \n \n \n Display_Array = context.user_preferences.addons[__name__].preferences.tab_array\n if Display_Array == 'on': \n\n box = layout.box().column(1)\n \n row = box.row(1)\n if tp_props.display_array: \n row.prop(tp_props, \"display_array\", text=\"\", icon=\"MOD_ARRAY\")\n else:\n row.prop(tp_props, \"display_array\", text=\"\", icon=\"MOD_ARRAY\")\n \n row.label(\"Array\") \n\n sub = row.row(1)\n sub.scale_x = 0.3 \n sub.operator(\"tp_ops.x_array\", text=\"X\")\n sub.operator(\"tp_ops.y_array\", text=\"Y\")\n sub.operator(\"tp_ops.z_array\", text=\"Z\")\n \n box.separator() \n \n if tp_props.display_array: \n \n mo_types = []\n append = mo_types.append\n\n for mo in context.active_object.modifiers:\n if mo.type == 'ARRAY':\n if mo.fit_type == 'FIXED_COUNT':\n append(mo.type)\n\n split = box.split()\n\n row = box.row(1)\n row.label(mo.name) \n row.prop(mo, \"count\")\n \n box.separator() \n \n row = box.row(1) \n row.prop(mo, \"relative_offset_displace\", text=\"\")\n \n row = box.row(1) \n row.prop(mo, \"start_cap\", text=\"\")\n row.prop(mo, \"end_cap\", text=\"\") \n \n box.separator() \n \n\n Display_Transform = context.user_preferences.addons[__package__].preferences.tab_transform\n if Display_Transform == 'on':\n \n if context.mode == 'OBJECT': \n \n box = layout.box().column(1)\n \n row = box.row(1)\n if tp_props.display_apply: \n row.prop(tp_props, \"display_apply\", text=\"\", icon=\"MANIPUL\")\n else:\n row.prop(tp_props, \"display_apply\", text=\"\", icon=\"MANIPUL\")\n \n row.label(\"Apply\") \n\n sub = row.row(1)\n sub.scale_x = 0.3 \n sub.operator(\"object.transform_apply\", text=\" \", icon =\"MAN_TRANS\").location=True\n sub.operator(\"object.transform_apply\", text=\" \", icon =\"MAN_ROT\").rotation=True \n sub.operator(\"object.transform_apply\", text=\" \", icon =\"MAN_SCALE\").scale=True \n \n if tp_props.display_apply: \n \n box = layout.box().column(1)\n \n row = box.column_flow(2)\n row.label(\"Transforms to Deltas\") \n row.operator(\"object.transforms_to_deltas\", text=\"Location\").mode='LOC'\n row.operator(\"object.transforms_to_deltas\", text=\"Rotation\").mode='ROT' \n row.operator(\"object.transforms_to_deltas\", text=\"All\").mode='ALL'\n row.operator(\"object.transforms_to_deltas\", text=\"Scale\").mode='SCALE' \n row.operator(\"object.anim_transforms_to_deltas\", text=\"Animated\")\n \n box.separator() \n \n row = box.column(1)\n row.operator(\"object.visual_transform_apply\")\n row.operator(\"object.duplicates_make_real\")\n \n box.separator() \n\n Display_Shade = context.user_preferences.addons[__name__].preferences.tab_shade\n if Display_Shade == 'on': \n\n box = layout.box().column(1)\n \n row = box.row(1)\n if tp_props.display_display: \n row.prop(tp_props, \"display_display\", text=\"\", icon=\"WORLD\")\n else:\n row.prop(tp_props, \"display_display\", text=\"\", icon=\"WORLD\")\n \n row.label(\"Display\")\n \n if tp_props.display_display: \n \n box.separator()\n \n row = box.row(1) \n row.operator(\"tp_ops.wire_all\", text=\"Wire all\", icon='WIRE')\n \n active_wire = bpy.context.object.show_wire \n if active_wire == True:\n row.operator(\"tp_ops.wire_off\", \"Wire Select\", icon = 'MESH_PLANE') \n else: \n row.operator(\"tp_ops.wire_on\", \"Wire Select\", icon = 'MESH_GRID') \n \n row = box.row(1)\n if context.object.draw_type == 'WIRE':\n row.operator(\"tp_ops.draw_solid\", text=\"Solid Shade\", icon='GHOST_DISABLED') \n else:\n row.operator(\"tp_ops.draw_wire\", text=\"Wire Shade\", icon='GHOST_ENABLED') \n\n row.prop(context.object, \"draw_type\", text=\"\")\n\n row = box.row(1)\n row.prop(context.object, \"show_bounds\", text=\"ShowBounds\", icon='STICKY_UVS_LOC') \n row.prop(context.object, \"draw_bounds_type\", text=\"\") \n \n if context.mode == 'EDIT_MESH': \n \n box.separator() \n \n row = box.row(1) \n row.operator(\"mesh.faces_shade_flat\", text=\"Flat\", icon=\"MESH_CIRCLE\") \n row.operator(\"mesh.faces_shade_smooth\", text=\"Smooth\", icon=\"SMOOTH\") \n \n row = box.row(1) \n row.operator(\"mesh.normals_make_consistent\", text=\"Consistent Normals\", icon=\"SNAP_NORMAL\") \n \n else: \n \n box.separator() \n \n if context.mode == 'OBJECT': \n \n row = box.row(1) \n row.operator(\"object.shade_flat\", text=\"Flat\", icon=\"MESH_CIRCLE\")\n row.operator(\"object.shade_smooth\", text=\"Smooth\", icon=\"SMOOTH\") \n \n row = box.row(1) \n row.operator(\"tp_ops.rec_normals\", text=\"Consistent Normals\", icon=\"SNAP_NORMAL\") \n\n box.separator() \n\n\n \n mod_list = context.active_object.modifiers\n if mod_list:\n \n if context.mode == 'OBJECT':\n box = layout.box().column(1)\n \n row = box.column(1)\n row.operator(\"scene.to_all\", text=\"copy active to selected\", icon='FRAME_NEXT').mode = \"modifier, selected\"\n row.operator(\"scene.to_all\", text=\"copy active to children\", icon='LINKED').mode = \"modifier, children\" \n \n box.separator() \n\n\n Display_RemoveType = context.user_preferences.addons[__package__].preferences.tab_remove_type\n if Display_RemoveType == 'on':\n\n box = layout.box().column(1)\n \n row = box.row(1)\n row.label(\"\", icon=\"COLLAPSEMENU\") \n row.label(\"Remove Type\") \n \n row = box.row(1) \n row.prop(context.scene, \"tp_mods_type\", text=\"\")\n row.operator(\"tp_ops.remove_mods_type\", text=\"Execute\") \n \n box.separator() \n\n else:\n\n box = layout.box().column(1) \n \n row = box.row(1) \n \n box.label('no modifier on active' , icon =\"ERROR\")\n \n box.separator()\n\n\n\n Display_History = context.user_preferences.addons[__name__].preferences.tab_history \n if Display_History == 'on':\n \n box = layout.box().column(1) \n\n row = box.row(1) \n row.operator(\"view3d.ruler\", text=\"Ruler\") \n \n row.operator(\"ed.undo_history\", text=\"History\")\n row.operator(\"ed.undo\", text=\"\", icon=\"LOOP_BACK\")\n row.operator(\"ed.redo\", text=\"\", icon=\"LOOP_FORWARDS\") \n \n box.separator() \n\n\n\n\nclass VIEW3D_TP_Modifier_Panel_TOOLS(bpy.types.Panel):\n bl_category = \"Origin\"\n bl_idname = \"VIEW3D_TP_Modifier_Panel_TOOLS\"\n bl_label = \"Modifier\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'TOOLS'\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n isModelingMode = not (\n #context.sculpt_object or \n context.vertex_paint_object\n or context.weight_paint_object\n or context.image_paint_object)\n return (isModelingMode)\n\n def draw(self, context):\n layout = self.layout.column_flow(1) \n layout.operator_context = 'INVOKE_REGION_WIN'\n\n draw_modifier_panel_layout(self, context, layout) \n\n\n\nclass VIEW3D_TP_Modifier_Panel_UI(bpy.types.Panel):\n bl_idname = \"VIEW3D_TP_Modifier_Panel_UI\"\n bl_label = \"Modifier\"\n bl_space_type = 'VIEW_3D'\n bl_region_type = 'UI'\n bl_options = {'DEFAULT_CLOSED'}\n\n @classmethod\n def poll(cls, context):\n isModelingMode = not (\n #context.sculpt_object or \n context.vertex_paint_object\n or context.weight_paint_object\n or context.image_paint_object)\n return (isModelingMode)\n\n def draw(self, context):\n layout = self.layout.column_flow(1) \n layout.operator_context = 'INVOKE_REGION_WIN' \n\n draw_modifier_panel_layout(self, context, layout) \n\n\n\n\n# register\n\nimport traceback\n\nicon_collections = {}\n\ndef register():\n\n mkb_icons = bpy.utils.previews.new()\n\n icons_dir = os.path.join(os.path.dirname(__file__), \"icons\")\n\n mkb_icons.load(\"my_image1\", os.path.join(icons_dir, \"icon_image1.png\"), 'IMAGE')\n mkb_icons.load(\"my_image2\", os.path.join(icons_dir, \"icon_image2.png\"), 'IMAGE')\n\n icon_collections['main'] = mkb_icons\n \n try: bpy.utils.register_module(__name__)\n except: traceback.print_exc()\n\n bpy.types.WindowManager.tp_collapse_menu_modifier = bpy.props.PointerProperty(type = Dropdown_TP_Modifier_Props)\n\n \n update_menu(None, bpy.context)\n update_panel_position(None, bpy.context)\n update_panel_position_stack(None, bpy.context)\n\n\ndef unregister():\n\n for icon in icon_collections.values():\n bpy.utils.previews.remove(icon)\n icon_collections.clear()\n\n try: bpy.utils.unregister_module(__name__)\n except: traceback.print_exc()\n \n del bpy.types.WindowManager.tp_collapse_menu_modifier\n \nif __name__ == \"__main__\":\n register()\n \n \n\n\n\n\n \n","sub_path":"scripts/addons_extern/toolplus_modifier/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":48399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"217961121","text":"#sub_dir_search.py\n\nimport os\n\ndef search(dirname):\n filenames = os.listdir(dirname) #os.listdir을 사용하면 해당 디렉터리에 있는 파일들의 리스트를 구할 수 있음\n #print(filenames)\n for filename in filenames:\n #print(filename)\n full_filename = os.path.join(dirname, filename) #파일 리스트는 파일 이름만 포함되어 있으므로\n #경로를 포함한 파일 이름을 구하기 위해서는 입력으로 받은 dirname을 앞에 덧붙여줘야 함\n #os.path.join 함수는 디렉터리와 파일 이름을 이어주는 전체 경로를 구해 줌\n #i = os.path.splitext(full_filename)\n #print(i) #('C:/$Recycle', '.Bin'), ('C:/bootTel', '.dat'), ('C:/swapfile', '.sys'), ...\n ext = os.path.splitext(full_filename)[-1] #해당 이름의 확장이름 출력 ([-1]은 맨 마지막값\n #print(ext) # .Bin, .dat, .sys, .log, ...\n if ext == '.py':\n print(full_filename)\n \n\nsearch(\"C:/\") #search라는 함수를 만들고 시작 디렉터리를 입력 받도록 코드를 작성함\n\n","sub_path":"191217/sub_dir_search.py","file_name":"sub_dir_search.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"209241801","text":"import StringIO\nimport json\nimport tokenize\n\nfrom geomet import wkt\nimport logging\n\nfrom ancoris.gae import mysql, cache\n\n\nclass Config(mysql.Entity):\n _db_table = 'tConfig'\n\n def __init__(self, whitelist=None, mapCenter=None, mapZoom=None, namespaceLogo=None, appVariant=None):\n self.whitelist = whitelist\n self.mapCenter = mapCenter\n self.mapZoom = mapZoom\n self.namespaceLogo = namespaceLogo\n self.appVariant = appVariant\n\n logging.info('pppp-------------------------------------------')\n\n @classmethod\n def get(cls, conn):\n config = cls()\n logging.info('pppp-------------------------------------------')\n config_cache_key = 'config'\n namespace = conn.sql_user\n config_json = cache.get(config_cache_key, namespace=namespace)\n\n if not config_json:\n conn.cursor.execute(\"\"\"SELECT `incomingEmailWhitelist`, AsWKT(mapCenter) AS `mapCenter`, `mapZoom`, `namespaceLogo`, `appVariant` FROM `vConfig`;\"\"\")\n result = conn.cursor.fetchone()\n cols = [i[0] for i in conn.cursor.description]\n conn.close_cursor()\n\n config_json = {}\n if result:\n whitelist = result[cols.index('incomingEmailWhitelist')]\n if whitelist:\n whitelist = whitelist.decode('latin1').split(',')\n\n mapCenter = result[cols.index('mapCenter')]\n if mapCenter:\n mapCenter = wkt.loads(mapCenter)\n mapCenterCoordinates = mapCenter.get('coordinates', None)\n if mapCenterCoordinates:\n mapCenter = {'lat': mapCenterCoordinates[1], 'lng': mapCenterCoordinates[0]}\n else:\n mapCenter = None\n\n mapZoom = result[cols.index('mapZoom')]\n namespaceLogo = result[cols.index('namespaceLogo')]\n appVariant = result[cols.index('appVariant')]\n\n if whitelist:\n config_json['whitelist'] = whitelist\n if mapCenter:\n config_json['mapCenter'] = mapCenter\n if mapZoom:\n config_json['mapZoom'] = mapZoom\n if namespaceLogo:\n config_json['namespaceLogo'] = namespaceLogo.decode('latin1')\n if appVariant:\n config_json['appVariant'] = appVariant.decode('latin1')\n\n if config_json:\n config_json = json.dumps(config_json)\n cache.set(config_cache_key, config_json, (60 * 60 * 72), namespace=namespace) # cache for 72 hours\n\n if config_json:\n config_json_obj = json.loads(config_json)\n if config_json_obj:\n config.whitelist = config_json_obj.get('whitelist', None)\n config.mapCenter = config_json_obj.get('mapCenter', None)\n config.mapZoom = config_json_obj.get('mapZoom', None)\n config.namespaceLogo = config_json_obj.get('namespaceLogo', None)\n config.appVariant = config_json_obj.get('appVariant', None)\n\n return config\n","sub_path":"src/cc/entities/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"312984880","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nUtility functions.\n\"\"\"\n\n\n# ----------------------------------------------------------------------------\n# Imports\n# ----------------------------------------------------------------------------\n\n# Standard library modules\nimport base64\nimport pathlib\nimport sys\nfrom io import BytesIO\n\n# Third-party modules\nimport arrow\nfrom loguru import logger\nfrom PIL import Image\n\n# First-party modules\nfrom aim.core import configmanager\nfrom aim.core.constants import IMAGE_QUALITY_JPEG\n\n# ----------------------------------------------------------------------------\n# Metadata\n# ----------------------------------------------------------------------------\n\n__author__ = \"Markku Laine\"\n__date__ = \"2021-01-27\"\n__email__ = \"markku.laine@aalto.fi\"\n__version__ = \"1.0\"\n\n\n# ----------------------------------------------------------------------------\n# Utility functions\n# ----------------------------------------------------------------------------\n\n\ndef read_image(filepath: pathlib.Path) -> str:\n \"\"\"\n Read an image from a file.\n\n Args:\n filepath: Input image file path\n\n Returns:\n Image encoded in Base64\n \"\"\"\n with open(filepath, \"rb\") as f:\n image_base64: str = base64.b64encode(f.read()).decode(\"utf-8\")\n\n return image_base64\n\n\ndef write_image(image_base64: str, filepath: pathlib.Path):\n \"\"\"\n Write an image to a file.\n\n Args:\n image_base64: Image encoded in Base64\n filepath: Output image file path\n \"\"\"\n with open(filepath, \"wb\") as f:\n f.write(base64.b64decode(image_base64))\n\n\ndef convert_image(\n png_image: str, jpeg_image_quality: int = IMAGE_QUALITY_JPEG\n) -> str:\n \"\"\"\n Convert an image from PNG to JPEG, encoded in Base64.\n\n (Semi-)transparent pixels are replaced with (semi-)white pixels in\n the output JPEG image.\n\n Args:\n png_image: PNG image encoded in Base64\n\n Kwargs:\n jpeg_image_quality: JPEG image quality (defaults to 70)\n\n Returns:\n JPEG image encoded in Base64\n \"\"\"\n img_rgb: Image.Image = Image.open(\n BytesIO(base64.b64decode(png_image))\n ).convert(\"RGB\")\n buffered: BytesIO = BytesIO()\n img_rgb.save(buffered, format=\"JPEG\", quality=jpeg_image_quality)\n jpeg_image_base64: str = base64.b64encode(buffered.getvalue()).decode(\n \"utf-8\"\n )\n\n return jpeg_image_base64\n\n\ndef configure_logger():\n logger.configure(handlers=get_loguru_handlers())\n\n\ndef show_header(title, version):\n logger.info(\"{} {}\".format(title, version))\n logger.info(len(\"{} {}\".format(title, version)) * \"=\")\n logger.info(\"\")\n\n\ndef show_configurations():\n logger.debug(configmanager.options)\n logger.debug(\"\\n{}\".format(configmanager.parser.format_help()))\n logger.debug(\"\\n{}\".format(configmanager.parser.format_values()))\n\n\ndef custom_isoformat(datetime_obj):\n return (\n arrow.get(datetime_obj)\n .format(\"YYYY-MM-DDTHH:mm:ss.SSSZZ\")\n .replace(\"+00:00\", \"Z\")\n )\n\n\ndef format_string(record):\n return (\n \"{} | \".format(custom_isoformat(record[\"time\"]))\n + \"{level: <8} | \"\n + \"{name}:{function}:{line} | \"\n + \"{message}\\n{exception}\"\n )\n\n\ndef get_loguru_handlers():\n handlers = []\n if configmanager.options.loguru_stdout:\n handlers.append(\n {\n \"sink\": sys.stdout,\n \"format\": format_string,\n \"level\": configmanager.options.loguru_level,\n \"backtrace\": configmanager.options.loguru_backtrace,\n }\n )\n\n if configmanager.options.loguru_file:\n handlers.append(\n {\n \"sink\": \"logs/error.log\",\n \"format\": format_string,\n \"level\": configmanager.options.loguru_level,\n \"rotation\": \"100 MB\",\n \"retention\": \"3 months\",\n \"backtrace\": configmanager.options.loguru_backtrace,\n }\n )\n\n return handlers\n","sub_path":"aim2_metrics/aim/core/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"216550469","text":"def areMetaStrings(str1, str2):\n len1 = len(str1)\n len2 = len(str2)\n\n # Return false if both are not of equal length\n if (len1 != len2):\n return False\n\n # To store indexes of previously mismatched\n # characters\n prev = -1\n curr = -1\n\n count = 0\n i = 0\n while i < len1:\n\n # If current character doesn't match\n if (str1[i] != str2[i]):\n\n # Count number of unmatched character\n count = count + 1\n\n # If unmatched are greater than 2,\n # then return false\n if (count > 2):\n return False\n\n # Store both unmatched characters of\n # both strings\n prev = curr\n curr = i\n\n i = i + 1\n\n # Check if previous unmatched of string1\n # is equal to curr unmatched of string2\n # and also check for curr unmatched character,\n # if both are same, then return true\n return (count == 2 and str1[prev] == str2[curr]\n and str1[curr] == str2[prev])\n\n\nprint(areMetaStrings(\"geeks\", \"keegs\"))\n","sub_path":"src/main/scala/MetaStrings.py","file_name":"MetaStrings.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"511934483","text":"# REAL STATE HOUSING PRICE PREDICTION\n\nimport pandas as pd\nimport numpy as np\nhousing = pd.read_csv(\"housing_data.csv\")\n\n# print(housing.head()) # print top 5 rows\n\n# print(housing.info()) # give the info about out dataset\n\n# print(housing['CHAS'].value_counts()) # it will give the count of all values of any particular category data\n\n# print(housing.describe()) # it will describe all our data\n\nimport matplotlib.pyplot as plt\n\nhousing.hist(bins = 50,figsize = (20,15)) #histogram for our data representing\n# print(plt.show())\n\n# TRAIN TEST SPLITTING --------->\nfrom sklearn.model_selection import train_test_split\ntrain_set, test_set = train_test_split(housing , test_size = 0.2,random_state = 42) #normally use random_state = 42,\n# we are using random_state for fixing the shuffled value of our data set\n# print(\"Rows for testing -> \",len(train_set),\"\\nRows for training -> \",len(test_set))\n\n# if we want to fix the ratio of any particular feautre in train_set and test_set\n# then we can use stratified shuffle slit\nfrom sklearn.model_selection import StratifiedShuffleSplit\nsplit = StratifiedShuffleSplit(n_splits = 2, test_size = 0.2, random_state = 42) #n_split will be according to no. of which categories we want to set shufflespit\nfor train_index, test_index in split.split(housing,housing['CHAS'],housing['RAD']): # here we have added two features so, n_split = 2\n strat_train_set = housing.loc[train_index]\n strat_test_set = housing.loc[test_index]\n\n# Now, we will store the copy of strat_train_set in housing data\nhousing = strat_train_set.copy()\n\n# print(strat_test_set['CHAS'].value_counts())\n# print(strat_train_set['CHAS'].value_counts())\n# print(housing.info())\n# print(strat_test_set['RAD'].value_counts())\n# print(strat_train_set['RAD'].value_counts())\n\n\n# Now the ratio of 1 and 0 in both train_set and test_set will be same \n# 0 95\n# 1 7\n# Name: CHAS, dtype: int64\n# 0 376\n# 1 28\n# Name: CHAS, dtype: int64\n\n# LOOKING FOR A CORRELATION--------------->\n# if we want to know the strongly affected features available into our dataset\n\ncorr_matrix = housing.corr()\nrel = corr_matrix['MEDV'].sort_values(ascending=False)\n# print(rel)\n# MEDV 1.000000\n# RM 0.695360 # According to this data RM is the strongly affected feature\n# ZN 0.360445 # on which price will have strongly positive correlation\n# B 0.333461\n# DIS 0.249929\n# CHAS 0.175260\n# AGE -0.376955\n# RAD -0.381626\n# CRIM -0.388305\n# NOX -0.427321\n# TAX -0.468536\n# INDUS -0.483725\n# PTRATIO -0.507787\n# LSTAT -0.737663\n\n# if we want to plot the graph of correlation feature-->\nfrom pandas.plotting import scatter_matrix\nattributes = [\"MEDV\",\"RM\",\"ZN\",\"LSTAT\"]\n# print(scatter_matrix(housing[attributes] , figsize = (12,8)))\n\n# TRY OUT THE ATTRIBUTES COMBINATION------------>\n# It means, if want to add new feature by the combination of two or more attributes then -->\n\nhousing[\"TAXRM\"] = housing[\"TAX\"]/housing[\"RM\"]\n# TAXRM has been added in our housing data\n# print(housing.head())\n# WE can check the correlation of this feeature\n# corr_matrix = housing.corr()\n# rel = corr_matrix['MEDV'].sort_values(ascending=False)\n# print(rel)\n\n# Plotting graph for TAXRM-->\nplot = housing.plot(kind =\"scatter\" , x = \"TAXRM\" , y = \"MEDV\",alpha = 0.8) # alpha is used for the darkness of point of our graph\n# print(plot)\n\nhousing = strat_train_set.drop(\"MEDV\" , axis = 1)\nhousing_labels = strat_train_set[\"MEDV\"].copy() # here we will separate to housing and housing labels\n\n# MISSING DATA------------>\n# To take care of missing attributes, you have three options:\\n\",\n # 1. Get rid of the missing data points\\n\", (rid of data means, removove the data)\n # 2. Get rid of the whole attribute\\n\",\n # 3. Set the value to some value(0, mean or median)\"\n # First two option can affect our prediction so, we will use third option\n\n# option 1-->\na = housing.dropna(subset=[\"RM\"]) # oringinal dataframe will be unchanged\n# print(a.shape) # we can check the shape and size of RM\n \n# Option 2-->\nb = housing.drop(\"RM\",axis = 1) # Note that there will be no \"RM\" column\n# original housing dataframe will be unchanged\n# print(b)\n\n# Option 3 -->\n\nmedian = housing[\"RM\"].median()\nc = housing[\"RM\"].fillna(median) # Note that original dataframe will not be changed\n# print(c)\n\n# Now, we will do the third option using sklearn and fit into our original dataframe\n# print(housing.info())\nfrom sklearn.impute import SimpleImputer\nimputer = SimpleImputer(strategy=\"median\")\nimputer.fit(housing) # calculate some parameters\nx = imputer.transform(housing) # this is a numpy array and transform data in this\n\n# we will create a new dataframe for transformed dataset in which all missing values will be fit by the median values\nhousing_tr = pd.DataFrame(x , columns = housing.columns)\n# print(housing_tr[\"RM\"].describe()) \n\n\n\n# SCIKIT LEARN DESIGN ---------------->\n\n# \"Primarily, three types of objects\"--->\n# \"1. Estimators - It estimates some parameter based on a dataset.\n # Eg. imputer. It has a fit method and transform method. Fit method - Fits the dataset and calculates internal parameters\\n\",\n# \"2. Transformers - transform method take input and returns output based on the learnings\n # from fit(). It also has a convenience function called fit_transform() which fits and then transforms.\\n\",\n# \"3. Predictors - LinearRegression model is an example of predictor.\n # fit() and predict() are two common functions. It also gives score() function which will evaluate the predictions.\"\n\n# FEATURE SCALING ----->\n# Primarily, two types of feature scaling methods:--->\n# \"1. Min-max scaling (Normalization)\n# \" (value - min)/(max - min)\",\n# \" Sklearn provides a class called MinMaxScaler for this\",\n# \"2. Standardization\",\n# \" (value - mean)/std\",\n# \" Sklearn provides a class called StandardScaler for this\n\n# We will use pipeline for the series of steps-->\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler\nmy_pipeline = Pipeline([('imputer', SimpleImputer(strategy = 'median')),\n ('std_scalar',StandardScaler())]) # add as many as you want\n \n\nhousing_num_tr = my_pipeline.fit_transform(housing)\n# print(housing_num_tr)\n# print(housing_num_tr.shape)\n\n\n# SELECTING A MODEL --->\n# We will try different model to know that which one will give better result\nfrom sklearn.linear_model import LinearRegression\n# model = LinearRegression()\n# if we want to use decision tree regressor model\nfrom sklearn.tree import DecisionTreeRegressor\n# model = DecisionTreeRegressor() # if we use decision tree model\n# If we want to use randomforestRegressor then,\nfrom sklearn.ensemble import RandomForestRegressor # here ensemble means combine different regression and use it\nmodel = RandomForestRegressor()\nmodel.fit(housing_num_tr, housing_labels)\n\n# PREDICTION SOME DATA-->\nsome_data = housing.iloc[:5] # taking some data from housing data\n\nsome_labels = housing_labels.iloc[:5] # taking 5 lables\n\nprepared_data = my_pipeline.transform(some_data)\n\n# print(model.predict(prepared_data)) # these are pridicted values\n# Now we can compare/check the prediction values\n# print(list(some_labels)) # these are original values\n\n# EVALUATING THE MODEL--->\nfrom sklearn.metrics import mean_squared_error\nhousing_predictions = model.predict(housing_num_tr)\nmse = mean_squared_error(housing_labels,housing_predictions)\nrmse = np.sqrt(mse)\n# print(rmse) # so, this will be our root mean square error\n\n\n# USING BETTER EVALUATION TECHNIQUE - CROSS VALIDATION\n# how it works --> lets we have data -= 1 2 3 4 5 6 7 8 9 10\n# we will find the error by training and testing 1 by 1 \n# Use any one for testing and other for training finding the errors and repeat the process respectively\n\nfrom sklearn.model_selection import cross_val_score\nscores = cross_val_score(model , housing_num_tr , housing_labels, scoring = \"neg_mean_squared_error\" ,cv = 10)\nrmsc_scores = np.sqrt(-scores) # bcz we will get neg error - will convert it into pos\n# print(rmsc_scores)\n\n\ndef print_scores(scores):\n print(\"Scores : \" , scores)\n print(\"Mean : \" , scores.mean())\n print(\"Standard deviation : \" , scores.std())\n\n# print_scores(rmsc_scores) # This will give the values for decision tree model\n# we can use linear regeression and RandomForestRegressor model like this, we will use that one type of model which will give lowest error\n\n# So, now we got different values for mean and standard deviation for every model-->\n# For linear regresion-->\n # Mean : 5.033624671156926\n # Standard deviation : 1.0560992597577876\n# For Decision tree-->\n # Mean : 4.663916968421566\n # Standard deviation : 1.2175076398663032\n# For RandomForestRegressor-->\n # Mean : 3.3148356942800534\n # Standard deviation : 0.6172708981931212\n\n# As we can see randomforestRegressor model work better for this dataset\n# So, We can use it for better prediction\n\n# SAVING THE MODEL-->\nfrom joblib import dump , load\ndump(model , 'Dragon_Realstate_Model.joblib')\n# now, we can load our model anywhere by importing from joblib and make predictions\n\n# TESTING THE MODEL ON TEST DATA-->\nx_test = strat_test_set.drop(\"MEDV\",axis = 1)\ny_test = strat_test_set[\"MEDV\"].copy()\n\nx_test_prepared = my_pipeline.transform(x_test)\nfinal_prediction = model.predict(x_test_prepared)\nfinal_mse = mean_squared_error(y_test , final_prediction)\nfinal_rmse = np.sqrt(final_mse)\n# print(final_rmse)\n# print(final_prediction) # these are the predicted values of y_test\n# print(final_prediction , list(y_test)) # here, we can check by printing both\n","sub_path":"Real_Estate_price_prediction.py","file_name":"Real_Estate_price_prediction.py","file_ext":"py","file_size_in_byte":9690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"459854539","text":"import json\nfrom pprint import pprint\nimport pandas as pd\n\nwith open('./data/case_data.json') as f:\n data = json.load(f)\n\n#pprint(data)\n\nDescriptions=[] # a list contains all the description, note each discription may have more than 1 sentence\nFunctions=[] #these are our labels\n\nall_data=data['data']\n#note: the data['data'] key is a list of dictionaries, we need to extract the value of decription and function\nfor d in all_data:\n Descriptions.append(d.get('description'))\n Functions.append(d.get('function'))\n\nprint(len(Functions))\nprint(len(Descriptions))\nprint(Descriptions[0])\nprint(Functions[0])\n\n\n#create a csv file that we will use it later in colab\n#store the csv in the data folder\n\n#1. create a dictionary\ndata_to_model= {\"Descriptions\":Descriptions, \"Functions\":Functions}\n# 2. create a df\ndf=pd.DataFrame(data_to_model)\n#3. create a csv file\ndf.to_csv(\"./data/data_to_model.csv\")\n\n","sub_path":"data_preparing.py","file_name":"data_preparing.py","file_ext":"py","file_size_in_byte":907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"357211604","text":"from sklearn.model_selection import RandomizedSearchCV\nvc = VotingClassifier([\n ('lr', model_pl_lr), \n ('svc', model_pl_svc), \n ('tree', model_pl_tree), \n ('knn', model_pl_knn)], \n voting='soft', weights=[2, 2, 1, 1])\nweights = {'weights':mesh}\nnp.random.seed(42)\nrgs = RandomizedSearchCV(vc, param_distributions=weights, \n n_iter=30, cv=10, random_state=42)\nrgs.fit(X_train, y_train)\nprint('訓練集的預測結果', rgs.best_score_)\nprint('測試集預測結果',rgs.score(X_test, y_test))\nprint('最佳權重選擇',rgs.best_params_)","sub_path":"docs/cd/06443007程式碼/ch12/12-6.py","file_name":"12-6.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"560113531","text":"# Script (Python)\n# /article17/speciessummary/markConclusionAsUndeleted\n# params: 'id'\n## Script (Python) \"markConclusionAsUndeleted\"\n##bind container=container\n##bind context=context\n##bind namespace=\n##bind script=script\n##bind subpath=traverse_subpath\n##parameters=id\n##title=\n##\nfrom DateTime import DateTime\n\nrequest = container.REQUEST\nRESPONSE = request.RESPONSE\n\nspecies, region, user, ms = request.get('id').split('###')\n\ncontext.sql_methods.update_undelete_conclusion(region=region,\n assesment_speciesname=context.string_decode(species),\n user=user,\n ms=ms,\n last_update=DateTime().strftime('%Y-%m-%d %H:%M'))\nreturn id\n","sub_path":"dump/markConclusionAsUndeleted.py","file_name":"markConclusionAsUndeleted.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"225041353","text":"\n\n\n# Given a knight in a chessboard n * m (a binary matrix with 0 as empty and 1 as barrier). the knight initialze position is (0, 0) and he wants to reach position (n - 1, m - 1), Knight can only be from left to right. Find the shortest path to the destination position, return the length of the route. Return -1 if knight can not reached.\n\n\nclass Solution:\n \"\"\"\n @param grid: a chessboard included 0 and 1\n @return: the shortest path\n \"\"\"\n def shortestPath2(self, grid):\n # write your code here\n # BFS, time/space O(4^dist), dist is the step number from src to des\n # Knight can only be from left to right, only have four directions to move.\n if not grid or not grid[0] or grid[0][0] == 1 or grid[-1][-1] == 1:\n return -1\n \n m, n = len(grid), len(grid[0])\n dq = collections.deque([(0, 0)])\n grid[0][0] = 1\n step = 0\n while dq:\n length = len(dq)\n for __ in range(length):\n x, y = dq.popleft()\n if (x, y) == (m - 1, n - 1):\n return step\n for dx, dy in [(1, 2), (-1, 2), (2, 1), (-2, 1)]:\n nx, ny = x + dx, y + dy\n if 0 <= nx < m and 0 <= ny < n and grid[nx][ny] == 0:\n dq.append((nx, ny))\n grid[nx][ny] = 1\n step += 1\n \n return -1\n\n\nclass Solution:\n \"\"\"\n @param grid: a chessboard included 0 and 1\n @return: the shortest path\n \"\"\"\n def shortestPath2(self, grid):\n # write your code here\n # DP, time/space O(m * n)\n # Knight can only be from left to right, only have four directions to move.\n if not grid or not grid[0] or grid[0][0] == 1 or grid[-1][-1] == 1:\n return -1\n \n m, n = len(grid), len(grid[0])\n dp = [[float('inf')] * n for __ in range(m)]\n dp[0][0] = 0 # src\n for j in range(1, n): # traverse over column! can not go back\n for i in range(m):\n if grid[i][j] == 0:\n if i - 1 >= 0 and j - 2 >= 0 and dp[i - 1][j - 2] != float('inf'):\n dp[i][j] = min(dp[i][j], dp[i - 1][j - 2] + 1)\n if i + 1 < m and j - 2 >= 0 and dp[i + 1][j - 2] != float('inf'):\n dp[i][j] = min(dp[i][j], dp[i + 1][j - 2] + 1)\n if i - 2 >= 0 and j - 1 >= 0 and dp[i - 2][j - 1] != float('inf'):\n dp[i][j] = min(dp[i][j], dp[i - 2][j - 1] + 1)\n if i + 2 < m and j - 1 >= 0 and dp[i + 2][j - 1] != float('inf'):\n dp[i][j] = min(dp[i][j], dp[i + 2][j - 1] + 1)\n \n return dp[m - 1][n - 1] if dp[m - 1][n - 1] != float('inf') else -1","sub_path":"Knight Shortest Path II.py","file_name":"Knight Shortest Path II.py","file_ext":"py","file_size_in_byte":2814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"462461590","text":"import argparse\nimport collections\nimport copy\nimport datetime\nimport heapq\nimport json\nimport logging\nimport re\nimport time\nfrom typing import Dict, Optional, List, Any, Iterable, Union, Tuple\n\nimport pyodbc\nfrom tabulate import tabulate\n\nfrom . import clock_sync, kafka, tracked_tables, constants, options, validation, change_index, progress_tracking, \\\n sql_query_subprocess, sql_queries\nfrom .metric_reporting import accumulator\n\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from . import parsed_row\n\nlogger = logging.getLogger(__name__)\n\n\ndef run() -> None:\n opts: argparse.Namespace\n opts, reporters = options.get_options_and_metrics_reporters()\n\n logger.debug('Parsed configuration: %s', json.dumps(vars(opts)))\n\n if not (opts.schema_registry_url and opts.kafka_bootstrap_servers and opts.db_conn_string):\n raise Exception('Arguments schema_registry_url, kafka_bootstrap_servers, and db_conn_string are all required.')\n\n redo_snapshot_for_new_instance: bool = \\\n opts.new_capture_instance_snapshot_handling == options.NEW_CAPTURE_INSTANCE_SNAPSHOT_HANDLING_BEGIN_NEW\n publish_duplicate_changes_from_new_instance: bool = \\\n opts.new_capture_instance_overlap_handling == options.NEW_CAPTURE_INSTANCE_OVERLAP_HANDLING_REPUBLISH\n\n with sql_query_subprocess.get_db_conn(\n opts.db_conn_string\n ) as db_conn, sql_query_subprocess.SQLQueryProcessor(\n opts.db_conn_string\n ) as sql_query_processor:\n clock_syncer: clock_sync.ClockSync = clock_sync.ClockSync(db_conn)\n\n metrics_accumulator: accumulator.Accumulator = accumulator.Accumulator(\n db_conn, clock_syncer, opts.metrics_namespace, opts.process_hostname)\n\n capture_instances_by_fq_name: Dict[str, Dict[str, Any]] = get_latest_capture_instances_by_fq_name(\n db_conn, opts.capture_instance_version_strategy, opts.capture_instance_version_regex,\n opts.table_whitelist_regex, opts.table_blacklist_regex)\n\n if not capture_instances_by_fq_name:\n logger.error('No capture instances could be found.')\n exit(1)\n\n capture_instance_names: List[str] = [ci['capture_instance_name']\n for ci in capture_instances_by_fq_name.values()]\n\n tables: List[tracked_tables.TrackedTable] = build_tracked_tables_from_cdc_metadata(\n db_conn, clock_syncer, metrics_accumulator, opts.topic_name_template, opts.snapshot_table_whitelist_regex,\n opts.snapshot_table_blacklist_regex, opts.truncate_fields, capture_instance_names, sql_query_processor)\n\n topic_to_source_table_map: Dict[str, str] = {\n t.topic_name: t.fq_name for t in tables}\n topic_to_change_table_map: Dict[str, str] = {\n t.topic_name: f'{constants.CDC_DB_SCHEMA_NAME}.{t.change_table_name}' for t in tables}\n capture_instance_to_topic_map: Dict[str, str] = {\n t.capture_instance_name: t.topic_name for t in tables}\n\n with kafka.KafkaClient(\n metrics_accumulator, opts.kafka_bootstrap_servers, opts.schema_registry_url,\n opts.extra_kafka_consumer_config, opts.extra_kafka_producer_config, disable_writing=opts.run_validations\n ) as kafka_client, progress_tracking.ProgressTracker(\n kafka_client, opts.progress_topic_name, topic_to_source_table_map, topic_to_change_table_map\n ) as progress_tracker:\n\n kafka_client.register_delivery_callback((\n constants.SINGLE_TABLE_CHANGE_MESSAGE, constants.SINGLE_TABLE_SNAPSHOT_MESSAGE\n ), progress_tracker.kafka_delivery_callback)\n kafka_client.register_delivery_callback((\n constants.SINGLE_TABLE_CHANGE_MESSAGE, constants.UNIFIED_TOPIC_CHANGE_MESSAGE,\n constants.SINGLE_TABLE_SNAPSHOT_MESSAGE, constants.DELETION_CHANGE_TOMBSTONE_MESSAGE\n ), metrics_accumulator.kafka_delivery_callback)\n\n determine_start_points_and_finalize_tables(\n kafka_client, tables, progress_tracker, opts.lsn_gap_handling, opts.partition_count,\n opts.replication_factor, opts.extra_topic_config, opts.run_validations, redo_snapshot_for_new_instance,\n publish_duplicate_changes_from_new_instance)\n\n table_to_unified_topics_map: Dict[str, List[Tuple[str, int, int]]] = collections.defaultdict(list)\n unified_topic_to_tables_map: Dict[str, List[tracked_tables.TrackedTable]] = collections.defaultdict(list)\n\n # \"Unified\" topics contain change messages from multiple capture instances in a globally-consistent LSN\n # order. They don't contain snapshot messages.\n if opts.unified_topics:\n for unified_topic_name, included_tables_regex in opts.unified_topics.items():\n compiled_regex = re.compile(included_tables_regex, re.IGNORECASE)\n matched_tables = [table for table in tables if compiled_regex.match(table.fq_name)]\n if matched_tables:\n unified_value_schema = copy.deepcopy(constants.UNIFIED_TOPIC_VALUE_SCHEMA)\n unified_value_schema['fields'][1]['type'] = [t.value_schema for t in matched_tables]\n unified_key_schema_id, unified_value_schema_id = kafka_client.register_schemas(\n unified_topic_name, constants.UNIFIED_TOPIC_KEY_SCHEMA, unified_value_schema,\n value_schema_compatibility_level=constants.UNIFIED_TOPIC_VALUE_SCHEMA_COMPATIBILITY_LEVEL)\n unified_topic_tuple = (unified_topic_name, unified_key_schema_id, unified_value_schema_id)\n for matched_table in matched_tables:\n table_to_unified_topics_map[matched_table.fq_name].append(unified_topic_tuple)\n unified_topic_to_tables_map[unified_topic_name].append(matched_table)\n\n if table_to_unified_topics_map:\n logger.debug('Unified topics being produced to, by table: %s', table_to_unified_topics_map)\n\n # Validations will go through all messages in all topics and try to warn of any inconsistencies between\n # those and the source DB data. It take a while; probably don't run this on very large datasets!\n if opts.run_validations:\n validator: validation.Validator = validation.Validator(\n kafka_client, tables, progress_tracker, unified_topic_to_tables_map)\n validator.run()\n exit(0)\n\n last_metrics_emission_time: datetime.datetime = datetime.datetime.utcnow()\n last_capture_instance_check_time: datetime.datetime = datetime.datetime.utcnow()\n last_slow_table_heartbeat_time: datetime.datetime = datetime.datetime.utcnow()\n next_cdc_poll_allowed_time: datetime.datetime = datetime.datetime.utcnow()\n next_cdc_poll_due_time: datetime.datetime = datetime.datetime.utcnow()\n last_produced_row: Optional['parsed_row.ParsedRow'] = None\n last_topic_produces: Dict[str, datetime.datetime] = {}\n change_rows_queue: List['parsed_row.ParsedRow'] = []\n queued_change_row_counts: Dict[str, int] = {t.topic_name: 0 for t in tables}\n\n # Returned bool indicates whether the process should halt\n def poll_periodic_tasks() -> bool:\n nonlocal last_metrics_emission_time\n nonlocal last_slow_table_heartbeat_time\n nonlocal last_capture_instance_check_time\n\n if (datetime.datetime.utcnow() - last_metrics_emission_time) > constants.METRICS_REPORTING_INTERVAL:\n start_time = time.perf_counter()\n metrics = metrics_accumulator.end_and_get_values()\n for reporter in reporters:\n try:\n reporter.emit(metrics) # TODO: async this\n except Exception as e:\n logger.exception('Caught exception while reporting metrics', exc_info=e)\n elapsed = (time.perf_counter() - start_time)\n logger.debug('Metrics reporting completed in %s ms', elapsed * 1000)\n metrics_accumulator.reset_and_start()\n last_metrics_emission_time = datetime.datetime.utcnow()\n\n if (datetime.datetime.utcnow() - last_slow_table_heartbeat_time) > \\\n constants.SLOW_TABLE_PROGRESS_HEARTBEAT_INTERVAL:\n for t in tables:\n if queued_change_row_counts[t.topic_name] == 0:\n last_topic_produce = last_topic_produces.get(t.topic_name)\n if not last_topic_produce or (datetime.datetime.utcnow() - last_topic_produce) > \\\n 2 * constants.SLOW_TABLE_PROGRESS_HEARTBEAT_INTERVAL:\n logger.debug('Emitting heartbeat progress for slow table %s', t.fq_name)\n progress_tracker.emit_changes_progress_heartbeat(\n t.topic_name, t.max_polled_change_index)\n last_slow_table_heartbeat_time = datetime.datetime.utcnow()\n\n if opts.terminate_on_capture_instance_change and \\\n (datetime.datetime.utcnow() - last_capture_instance_check_time) > \\\n constants.CHANGED_CAPTURE_INSTANCES_CHECK_INTERVAL:\n if should_terminate_due_to_capture_instance_change(\n db_conn, progress_tracker, opts.capture_instance_version_strategy,\n opts.capture_instance_version_regex, capture_instance_to_topic_map,\n capture_instances_by_fq_name, opts.table_whitelist_regex,\n opts.table_blacklist_regex):\n return True\n last_capture_instance_check_time = datetime.datetime.utcnow()\n return False\n\n logger.info('Beginning processing for %s tracked table(s).', len(tables))\n metrics_accumulator.reset_and_start()\n\n # The above is all setup, now we come to the \"hot loop\":\n\n try:\n while True:\n snapshots_remain: bool = not all([t.snapshot_complete for t in tables])\n change_tables_lagging: bool = any([t.change_reads_are_lagging for t in tables])\n\n # ----- Poll for and produce snapshot data while change row queries run -----\n\n if snapshots_remain and not change_tables_lagging:\n while datetime.datetime.utcnow() < next_cdc_poll_due_time:\n for t in tables:\n if not t.snapshot_complete:\n for row in t.retrieve_snapshot_query_results():\n kafka_client.produce(row.destination_topic, row.key_dict,\n row.avro_key_schema_id, row.value_dict,\n row.avro_value_schema_id,\n constants.SINGLE_TABLE_SNAPSHOT_MESSAGE)\n t.enqueue_snapshot_query() # NB: results may not be retrieved until next cycle\n if datetime.datetime.utcnow() > next_cdc_poll_due_time:\n break\n if poll_periodic_tasks():\n break\n\n # ----- Wait for next poll window (if needed) and get ceiling LSN for cycle -----\n\n if not change_tables_lagging:\n wait_time = (next_cdc_poll_allowed_time - datetime.datetime.utcnow()).total_seconds()\n if wait_time > 0:\n time.sleep(wait_time)\n metrics_accumulator.register_sleep(wait_time)\n\n if poll_periodic_tasks():\n break\n\n with db_conn.cursor() as cursor:\n q, _ = sql_queries.get_max_lsn()\n cursor.execute(q)\n lsn_limit = cursor.fetchval()\n\n next_cdc_poll_allowed_time = (datetime.datetime.utcnow() + constants.MIN_CDC_POLLING_INTERVAL)\n next_cdc_poll_due_time = (datetime.datetime.utcnow() + constants.MAX_CDC_POLLING_INTERVAL)\n\n # ----- Query for change rows ----\n\n for t in tables:\n if queued_change_row_counts[t.topic_name] < constants.DB_ROW_BATCH_SIZE + 1:\n t.enqueue_changes_query(lsn_limit)\n\n common_lsn_limit: change_index.ChangeIndex = change_index.HIGHEST_CHANGE_INDEX\n\n if poll_periodic_tasks():\n break\n\n for t in tables:\n for row in t.retrieve_changes_query_results():\n queued_change_row_counts[t.topic_name] += 1\n heapq.heappush(change_rows_queue, row)\n if t.max_polled_change_index < common_lsn_limit:\n common_lsn_limit = t.max_polled_change_index\n\n if poll_periodic_tasks():\n break\n\n # ----- Produce change data to Kafka and commit progress -----\n\n while change_rows_queue:\n row: 'parsed_row.ParsedRow' = heapq.heappop(change_rows_queue)\n\n if row.change_idx > common_lsn_limit:\n heapq.heappush(change_rows_queue, row)\n break\n\n if last_produced_row and row.change_idx < last_produced_row.change_idx:\n raise Exception(f'Change rows are being produced to Kafka out of LSN order. There is '\n f'a bug. Fix it! Prior: {last_produced_row}, current: {row}')\n last_produced_row = row\n queued_change_row_counts[row.destination_topic] -= 1\n\n kafka_client.produce(row.destination_topic, row.key_dict, row.avro_key_schema_id,\n row.value_dict, row.avro_value_schema_id,\n constants.SINGLE_TABLE_CHANGE_MESSAGE)\n last_topic_produces[row.destination_topic] = datetime.datetime.utcnow()\n\n unified_topic_tuples = table_to_unified_topics_map.get(row.table_fq_name, [])\n\n if unified_topic_tuples:\n ut_key = {constants.LSN_NAME: row.change_idx.to_avro_ready_dict()[constants.LSN_NAME]}\n ut_value = {constants.UNIFIED_TOPIC_MSG_SOURCE_TABLE_NAME: row.table_fq_name,\n constants.UNIFIED_TOPIC_MSG_DATA_WRAPPER_NAME: row.value_dict}\n for ut_name, ut_key_schema_id, ut_value_schema_id in unified_topic_tuples:\n kafka_client.produce(ut_name, ut_key, ut_key_schema_id, ut_value,\n ut_value_schema_id, constants.UNIFIED_TOPIC_CHANGE_MESSAGE)\n\n if not opts.disable_deletion_tombstones and row.operation_name == \\\n constants.DELETE_OPERATION_NAME:\n kafka_client.produce(row.destination_topic, row.key_dict, row.avro_key_schema_id,\n None, row.avro_value_schema_id,\n constants.DELETION_CHANGE_TOMBSTONE_MESSAGE)\n\n progress_tracker.commit_progress()\n\n if poll_periodic_tasks():\n break\n except (KeyboardInterrupt, pyodbc.OperationalError):\n logger.info('Exiting due to external interrupt.')\n\n\n# This pulls the \"greatest\" capture instance running for each source table, in the event there is more than one.\ndef get_latest_capture_instances_by_fq_name(\n db_conn: pyodbc.Connection, capture_instance_version_strategy: str, capture_instance_version_regex: str,\n table_whitelist_regex: str, table_blacklist_regex: str\n) -> Dict[str, Dict[str, Any]]:\n if capture_instance_version_strategy == options.CAPTURE_INSTANCE_VERSION_STRATEGY_REGEX \\\n and not capture_instance_version_regex:\n raise Exception('Please provide a capture_instance_version_regex when specifying the `regex` '\n 'capture_instance_version_strategy.')\n result: Dict[str, Dict[str, Any]] = {}\n fq_name_to_capture_instances: Dict[str, List[Dict[str, Any]]] = collections.defaultdict(list)\n capture_instance_version_regex = capture_instance_version_regex and re.compile(capture_instance_version_regex)\n table_whitelist_regex = table_whitelist_regex and re.compile(table_whitelist_regex, re.IGNORECASE)\n table_blacklist_regex = table_blacklist_regex and re.compile(table_blacklist_regex, re.IGNORECASE)\n\n with db_conn.cursor() as cursor:\n q, p = sql_queries.get_cdc_capture_instances_metadata()\n cursor.execute(q)\n for row in cursor.fetchall():\n fq_table_name = f'{row[0]}.{row[1]}'\n\n if table_whitelist_regex and not table_whitelist_regex.match(fq_table_name):\n logger.debug('Table %s excluded by whitelist', fq_table_name)\n continue\n\n if table_blacklist_regex and table_blacklist_regex.match(fq_table_name):\n logger.debug('Table %s excluded by blacklist', fq_table_name)\n continue\n\n if row[3] is None or row[4] is None:\n logger.debug('Capture instance for %s appears to be brand-new; will evaluate again on '\n 'next pass', fq_table_name)\n continue\n\n as_dict = {\n 'fq_name': fq_table_name,\n 'capture_instance_name': row[2],\n 'start_lsn': row[3],\n 'create_date': row[4],\n }\n if capture_instance_version_regex:\n match = capture_instance_version_regex.match(row[1])\n as_dict['regex_matched_group'] = match and match.group(1) or ''\n fq_name_to_capture_instances[as_dict['fq_name']].append(as_dict)\n\n for fq_name, capture_instances in fq_name_to_capture_instances.items():\n if capture_instance_version_strategy == options.CAPTURE_INSTANCE_VERSION_STRATEGY_CREATE_DATE:\n latest_instance = sorted(capture_instances, key=lambda x: x['create_date'])[-1]\n elif capture_instance_version_strategy == options.CAPTURE_INSTANCE_VERSION_STRATEGY_REGEX:\n latest_instance = sorted(capture_instances, key=lambda x: x['regex_matched_group'])[-1]\n else:\n raise Exception(f'Capture instance version strategy \"{capture_instance_version_strategy}\" not recognized.')\n result[fq_name] = latest_instance\n\n logger.debug('Latest capture instance names determined by \"%s\" strategy: %s', capture_instance_version_strategy,\n sorted([v['capture_instance_name'] for v in result.values()]))\n\n return result\n\n\ndef build_tracked_tables_from_cdc_metadata(\n db_conn: pyodbc.Connection, clock_syncer: 'clock_sync.ClockSync', metrics_accumulator: 'accumulator.Accumulator',\n topic_name_template: str, snapshot_table_whitelist_regex: str, snapshot_table_blacklist_regex: str,\n truncate_fields: Dict[str, int], capture_instance_names: List[str],\n sql_query_processor: 'sql_query_subprocess.SQLQueryProcessor'\n) -> List[tracked_tables.TrackedTable]:\n result: List[tracked_tables.TrackedTable] = []\n\n truncate_fields = {k.lower(): v for k, v in truncate_fields.items()}\n\n snapshot_table_whitelist_regex = snapshot_table_whitelist_regex and re.compile(\n snapshot_table_whitelist_regex, re.IGNORECASE)\n snapshot_table_blacklist_regex = snapshot_table_blacklist_regex and re.compile(\n snapshot_table_blacklist_regex, re.IGNORECASE)\n\n name_to_meta_fields: Dict[Tuple, List[Tuple]] = collections.defaultdict(list)\n\n with db_conn.cursor() as cursor:\n q, p = sql_queries.get_cdc_tracked_tables_metadata(capture_instance_names)\n cursor.execute(q)\n for row in cursor.fetchall():\n # 0:4 gets schema name, table name, capture instance name, min captured LSN:\n name_to_meta_fields[tuple(row[0:4])].append(row[4:])\n\n for (schema_name, table_name, capture_instance_name, min_lsn), fields in name_to_meta_fields.items():\n fq_table_name = f'{schema_name}.{table_name}'\n\n can_snapshot = False\n\n if snapshot_table_whitelist_regex and snapshot_table_whitelist_regex.match(fq_table_name):\n logger.debug('Table %s WILL be snapshotted due to whitelisting', fq_table_name)\n can_snapshot = True\n\n if snapshot_table_blacklist_regex and snapshot_table_blacklist_regex.match(fq_table_name):\n logger.debug('Table %s will NOT be snapshotted due to blacklisting', fq_table_name)\n can_snapshot = False\n\n topic_name = topic_name_template.format(\n schema_name=schema_name, table_name=table_name, capture_instance_name=capture_instance_name)\n\n tracked_table = tracked_tables.TrackedTable(\n db_conn, clock_syncer, metrics_accumulator, sql_query_processor, schema_name, table_name,\n capture_instance_name, topic_name, min_lsn, can_snapshot)\n\n for (change_table_ordinal, column_name, sql_type_name, primary_key_ordinal, decimal_precision,\n decimal_scale) in fields:\n truncate_after = truncate_fields.get(f'{schema_name}.{table_name}.{column_name}'.lower())\n tracked_table.append_field(tracked_tables.TrackedField(\n column_name, sql_type_name, change_table_ordinal, primary_key_ordinal, decimal_precision,\n decimal_scale, truncate_after))\n\n result.append(tracked_table)\n\n return result\n\n\ndef determine_start_points_and_finalize_tables(\n kafka_client: kafka.KafkaClient, tables: Iterable[tracked_tables.TrackedTable],\n progress_tracker: progress_tracking.ProgressTracker, lsn_gap_handling: str,\n partition_count: int, replication_factor: int, extra_topic_config: Dict[str, Union[str, int]],\n validation_mode: bool = False, redo_snapshot_for_new_instance: bool = False,\n publish_duplicate_changes_from_new_instance: bool = False\n) -> None:\n topic_names: List[str] = [t.topic_name for t in tables]\n\n if validation_mode:\n for table in tables:\n table.snapshot_allowed = False\n table.finalize_table(change_index.LOWEST_CHANGE_INDEX, {}, lsn_gap_handling, kafka_client.register_schemas)\n return\n\n watermarks_by_topic = kafka_client.get_topic_watermarks(topic_names)\n first_check_watermarks_json = json.dumps(watermarks_by_topic)\n\n logger.info('Pausing briefly to ensure target topics are not receiving new messages from elsewhere...')\n time.sleep(constants.WATERMARK_STABILITY_CHECK_DELAY_SECS)\n\n watermarks_by_topic = kafka_client.get_topic_watermarks(topic_names)\n second_check_watermarks_json = json.dumps(watermarks_by_topic)\n\n if first_check_watermarks_json != second_check_watermarks_json:\n raise Exception(f'Watermarks for one or more target topics changed between successive checks. '\n f'Another process may be producing to the topic(s). Bailing.\\nFirst check: '\n f'{first_check_watermarks_json}\\nSecond check: {second_check_watermarks_json}')\n logger.debug('Topic watermarks: %s', second_check_watermarks_json)\n\n prior_progress_log_table_data = []\n prior_progress = progress_tracker.get_prior_progress_or_create_progress_topic()\n\n for table in tables:\n snapshot_progress, changes_progress = None, None\n\n if table.topic_name not in watermarks_by_topic: # new topic; create it\n if partition_count:\n this_topic_partition_count = partition_count\n else:\n per_second = table.get_change_rows_per_second()\n # one partition for each 10 rows/sec on average in the change table:\n this_topic_partition_count = max(1, int(per_second / 10))\n if this_topic_partition_count > 100:\n raise Exception(\n f'Automatic topic creation would create %{this_topic_partition_count} partitions for topic '\n f'{table.topic_name} based on a change table rows per second rate of {per_second}. This '\n f'seems excessive, so the program is exiting to prevent overwhelming your Kafka cluster. '\n f'Look at setting PARTITION_COUNT to take manual control of this.')\n logger.info('Creating topic %s with %s partition(s)', table.topic_name, this_topic_partition_count)\n kafka_client.create_topic(table.topic_name, this_topic_partition_count, replication_factor,\n extra_topic_config)\n else:\n snapshot_progress: Union[None, progress_tracking.ProgressEntry] = prior_progress.get(\n (table.topic_name, constants.SNAPSHOT_ROWS_KIND))\n changes_progress: Union[None, progress_tracking.ProgressEntry] = prior_progress.get(\n (table.topic_name, constants.CHANGE_ROWS_KIND))\n\n fq_change_table_name = f'{constants.CDC_DB_SCHEMA_NAME}.{table.change_table_name}'\n if snapshot_progress and (snapshot_progress.change_table_name != fq_change_table_name):\n logger.info('Found prior snapshot progress into topic %s, but from an older capture instance '\n '(prior progress instance: %s; current instance: %s)', table.topic_name,\n snapshot_progress.change_table_name, fq_change_table_name)\n if redo_snapshot_for_new_instance:\n logger.info('Will start new snapshot.')\n snapshot_progress = None\n else:\n logger.info('Will NOT start new snapshot.')\n\n if changes_progress and (changes_progress.change_table_name != fq_change_table_name):\n logger.info('Found prior change data progress into topic %s, but from an older capture instance '\n '(prior progress instance: %s; current instance: %s)', table.topic_name,\n changes_progress.change_table_name, fq_change_table_name)\n if publish_duplicate_changes_from_new_instance:\n logger.info('Will republish any change rows duplicated by the new capture instance.')\n changes_progress = None\n else:\n logger.info('Will NOT republish any change rows duplicated by the new capture instance.')\n\n starting_change_index = (changes_progress and changes_progress.change_index) \\\n or change_index.LOWEST_CHANGE_INDEX\n starting_snapshot_index = snapshot_progress and snapshot_progress.snapshot_index\n\n table.finalize_table(starting_change_index, starting_snapshot_index, lsn_gap_handling,\n kafka_client.register_schemas, progress_tracker.reset_all_progress)\n\n if not table.snapshot_allowed:\n snapshot_state = ''\n elif table.snapshot_complete:\n snapshot_state = ''\n elif table.last_read_key_for_snapshot_display is None:\n snapshot_state = ''\n else:\n snapshot_state = f'From {table.last_read_key_for_snapshot_display}'\n\n prior_progress_log_table_data.append((table.capture_instance_name, table.fq_name, table.topic_name,\n starting_change_index or '', snapshot_state))\n\n headers = ('Capture instance name', 'Source table name', 'Topic name', 'From change table index', 'Snapshots')\n table = tabulate(prior_progress_log_table_data, headers, tablefmt='fancy_grid')\n\n logger.info('Processing will proceed from the following positions based on the last message from each topic '\n 'and/or the snapshot progress committed in Kafka (NB: snapshot reads occur BACKWARDS from high to '\n 'low key column values):\\n%s', table)\n\n\ndef should_terminate_due_to_capture_instance_change(\n db_conn: pyodbc.Connection, progress_tracker: progress_tracking.ProgressTracker,\n capture_instance_version_strategy: str, capture_instance_version_regex: str,\n capture_instance_to_topic_map: Dict[str, str], current_capture_instances: Dict[str, Dict[str, Any]],\n table_whitelist_regex: str, table_blacklist_regex: str\n) -> bool:\n new_capture_instances = get_latest_capture_instances_by_fq_name(\n db_conn, capture_instance_version_strategy, capture_instance_version_regex, table_whitelist_regex,\n table_blacklist_regex)\n\n current = {k: v['capture_instance_name'] for k, v in current_capture_instances.items()}\n new = {k: v['capture_instance_name'] for k, v in new_capture_instances.items()}\n\n if new == current:\n logger.debug('Capture instances unchanged; continuing...')\n return False\n\n def better_json_serialize(obj):\n if isinstance(obj, (datetime.datetime, datetime.date)):\n return obj.isoformat()\n if isinstance(obj, (bytes,)):\n return f'0x{obj.hex()}'\n raise TypeError(\"Type %s not serializable\" % type(obj))\n\n logger.info('Change detected in capture instances. Current: %s New: %s',\n json.dumps(current_capture_instances, default=better_json_serialize),\n json.dumps(new_capture_instances, default=better_json_serialize))\n\n for fq_name, current_ci in current_capture_instances.items():\n if fq_name in new_capture_instances:\n new_ci = new_capture_instances[fq_name]\n last_recorded_progress = progress_tracker.get_last_recorded_progress_for_topic(\n capture_instance_to_topic_map[current_ci['capture_instance_name']])\n current_idx = last_recorded_progress and last_recorded_progress.change_index or \\\n change_index.LOWEST_CHANGE_INDEX\n new_ci_min_index = change_index.ChangeIndex(new_ci['start_lsn'], b'\\x00' * 10, 0)\n if not last_recorded_progress or (last_recorded_progress.change_index < new_ci_min_index):\n with db_conn.cursor() as cursor:\n ci_table_name = f\"[{constants.CDC_DB_SCHEMA_NAME}].[{current_ci['capture_instance_name']}_CT]\"\n cursor.execute(f\"SELECT TOP 1 1 FROM {ci_table_name} WITH (NOLOCK)\")\n has_rows = cursor.fetchval() is not None\n if has_rows:\n logger.info('Progress against existing capture instance (\"%s\") for table \"%s\" has reached index %s, '\n 'but the new capture instance (\"%s\") does not begin until index %s. Deferring termination '\n 'to maintain data integrity and will try again on next capture instance evaluation '\n 'iteration.', current_ci['capture_instance_name'], fq_name, current_idx,\n new_ci['capture_instance_name'], new_ci_min_index)\n return False\n\n logger.warning('Terminating process due to change in capture instances. This behavior can be controlled by '\n 'changing option TERMINATE_ON_CAPTURE_INSTANCE_CHANGE.')\n return True\n","sub_path":"cdc_kafka/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":32087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"38145439","text":"\"\"\"Tests for estimator_noisers.\"\"\"\n\nimport numpy as np\nimport scipy.stats\n\nfrom wfa_cardinality_estimation_evaluation_framework.estimators import estimator_noisers\nfrom absl.testing import absltest\n\n\nclass FakeLaplaceRandomState:\n def __init__(self, return_value):\n self.return_value = np.array(return_value)\n\n def laplace(self, size, scale):\n return self.return_value\n\n\nclass EstimatorNoisersTest(absltest.TestCase):\n\n def test_laplace_estimate_noiser_accepts_scalar_argument(self):\n le = estimator_noisers.LaplaceEstimateNoiser(\n 1.0, random_state=FakeLaplaceRandomState([0.5]))\n result = le(10.)\n self.assertEqual(result, 10.5)\n\n def test_laplace_estimate_noiser_accepts_array_argument(self):\n le = estimator_noisers.LaplaceEstimateNoiser(\n 1.0, random_state=FakeLaplaceRandomState([0.5, -0.5]))\n result = le(np.array([10., 20.]))\n np.testing.assert_array_equal(result, [10.5, 19.5])\n\n def test_geometric_estimate_noiser_accepts_scalar_argument(self):\n le = estimator_noisers.GeometricEstimateNoiser(\n 1.0, random_state=FakeLaplaceRandomState([1.]))\n result = le(10.)\n self.assertEqual(result, 11)\n\n def test_geometric_estimate_noiser_accepts_array_argument(self):\n le = estimator_noisers.GeometricEstimateNoiser(\n 1.0, random_state=FakeLaplaceRandomState([1., -1.]))\n result = le(np.array([10., 20.]))\n np.testing.assert_array_equal(result, [11, 19])\n\n\nif __name__ == '__main__':\n absltest.main()\n","sub_path":"src/estimators/tests/estimator_noisers_test.py","file_name":"estimator_noisers_test.py","file_ext":"py","file_size_in_byte":1478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"74751392","text":"\"\"\"https://www.acmicpc.net/problem/10815\n숫자카드 범위: [-1천만, +1천만]\n\"\"\"\ndef binary_search(array, target, start, end):\n while start <= end:\n mid = (start + end) // 2\n if array[mid] == target:\n return True\n elif array[mid] > target:\n end = mid - 1\n else:\n start = mid + 1\n return False # 없음\n\nn = int(input())\non_hand_list = list(map(int, input().split()))\non_hand_list.sort() # 이분탐색을 위한 정렬\n\nm = int(input())\ncheck_list = list(map(int, input().split()))\n\n\noutput = ''\nfor c in check_list:\n start, end = 0, len(on_hand_list) - 1\n existence = binary_search(\n array=on_hand_list, \n target=c, \n start=start, \n end=end\n )\n\n if existence:\n output += '1 ' # 존재할 경우\n else:\n output += '0 ' # 존재하지 않을 경우\n\nprint(output.strip())\n","sub_path":"binary-search/10815_숫자카드.py","file_name":"10815_숫자카드.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"624178663","text":"#coding:utf-8\nimport urllib\nfrom urllib import request\n\ndata={}\ndata['work'] = 'cat'\n\nurl_values = urllib.parse.urlencode(data)\nurl = 'http://www.baidu.com/s?'\nfull_url = url+url_values\n\ndata = request.urlopen(full_url).read()\ndata = data.decode('UTF-8', 'ignore')\nprint(data)","sub_path":"chapter1/url_deal.py","file_name":"url_deal.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"319118868","text":"import io\nimport os\nimport zipfile\nimport rcssmin\nimport rjsmin\n\npakName = input(\"Theme Name: \").replace(\" \", \"_\").lower()\n\nthemePak = zipfile.ZipFile(pakName +\".tpk\", \"w\")\n\nfor dirname, subdirs, files in os.walk(\"static\"):\n themePak.write(dirname)\n for filename in files:\n if not filename.endswith((\".css\", \".js\")):\n themePak.write(os.path.join(dirname, filename))\n\n if filename.endswith(\".css\"):\n cssMinified = io.StringIO()\n cssMinified.write(rcssmin.cssmin(filename, keep_bang_comments=True))\n themePak.writestr(os.path.join(dirname, filename), cssMinified.getvalue())\n\n if filename.endswith(\".js\"):\n jsMinified = io.StringIO()\n jsMinified.write(rjsmin.jsmin(filename, keep_bang_comments=True))\n themePak.writestr(os.path.join(dirname, filename), jsMinified.getvalue())\n\nthemePak.close()\n","sub_path":"static/minify_css_js.py","file_name":"minify_css_js.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"103702030","text":"import sys\r\n\r\nimport os\r\n\r\n\r\nclass SrtFormatter():\r\n def _secs_to_minutes_hours(self , time):\r\n add_formatting = lambda a: '0' + a if len(a) == 1 else a\r\n milli_secs = time - int(time)\r\n secs = add_formatting(str(int(time)%60))\r\n mins = add_formatting((int(time)//60).__str__())\r\n hours = add_formatting((int(time)//3600).__str__())\r\n\r\n return f\"{hours}:{mins}:{secs},{int(milli_secs*1000)}\"\r\n def _format(self, transcript_data):\r\n prev_time = 0\r\n final_srt = ''\r\n for index,each in enumerate(transcript_data) :\r\n start_time = each['start']\r\n end_time = each['start'] + each['duration']\r\n final_srt += f\"{index+1}\\n\"\r\n final_srt += f'{self._secs_to_minutes_hours(start_time)} --> {self._secs_to_minutes_hours(end_time)}\\n'\r\n final_srt += each['text'] + '\\n\\n'\r\n return final_srt\r\n def format_and_save(self , transcript_data , location = os.getcwd() , file_name = r'Transcript'):\r\n file_name +=r'.srt'\r\n path_list = location.split(os.sep)\r\n final_path = os.sep.join(path_list) + \"/\" + file_name\r\n with open(final_path, 'w', encoding = \"utf-8\") as srt_file:\r\n final_srt = self._format(transcript_data)\r\n srt_file.write(final_srt)\r\n srt_file.close()\r\n","sub_path":"formatting.py","file_name":"formatting.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"226917936","text":"from wand.image import Image\nfrom PIL import Image as PI\nimport pyocr\nimport pyocr.builders\nimport io\nimport glob, os\n\ndef get_ocr(file):\n\tprint(file.name)\n\ttool = pyocr.get_available_tools()[0]\n\tlang = tool.get_available_languages()[0]\n\n\treq_image = []\n\tfinal_text = []\n\t# image_pdf = Image(filename=\"./\"+file, resolution=300)\n\t# imagem_jpeg = image_pdf.convert('jpeg')\n\n\timage_pdf = Image(filename=file)\n\timagem_jpeg = image_pdf.convert('jpeg')\n\n\tfor img in imagem_jpeg.sequence:\n\t\timg_page = Image(image=img)\n\t\treq_image.append(img_page.make_blob('jpeg'))\n\n\tfor img in req_image:\n\t\ttxt = tool.image_to_string(\n\t\t\tPI.open(io.BytesIO(img)),\n\t\t\tlang=lang,\n\t\t\tbuilder=pyocr.builders.TextBuilder()\n\t\t\t)\n\t\tfinal_text.append(txt)\n\treturn final_text","sub_path":"ged/util/ocr.py","file_name":"ocr.py","file_ext":"py","file_size_in_byte":744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"77685745","text":"# -*- coding: utf-8 -*-\n\"\"\"\n fa.py\n ~~~~~\n Finite Automaton\n\"\"\"\n\nEPISILON = 'episilon'\n\n\nclass NFA:\n \"\"\"\n Nondeterministic finite automaton\n \"\"\"\n\n def __init__(self, states, input_symbol, trans_func, init_state, accept_states):\n self.states = states\n self.input_symbol = input_symbol\n self.trans_func = trans_func\n self.init_state = init_state\n self.accept_states = accept_states\n\n def __str__(self):\n return '\\n'.join([\n str(self.states),\n str(self.input_symbol),\n str(self.trans_func),\n str(self.init_state),\n str(self.accept_states)])\n\n def accept(self, word):\n cur_state = self.init_state\n for c in word:\n if c not in self.input_symbol:\n return\n cur_state = self.trans_func[cur_state][c]\n return cur_state in self.accept_states\n\n\n def to_dfa(self):\n import copy\n from collections import defaultdict\n\n def next_states(s, nc):\n if s not in self.trans_func:\n return set()\n else:\n return copy.deepcopy(self.trans_func[s].get(nc, set()))\n\n def episilo_closure(start, c):\n states = next_states(start, c)\n all = copy.deepcopy(states)\n while states:\n s = states.pop()\n all.add(s)\n\n for e in next_states(s, EPISILON):\n if e not in all:\n all.add(e)\n states.add(e)\n return all\n\n def convert_to_final_dfa(dfa_accept_states, dfa_input_symbol, dfa_states, dfa_trans_func):\n states_map = {dfa_states[i]: 'd' + str(i) for i in range(len(dfa_states))}\n dfa_new_states = set(states_map.values())\n dfa_new_trans_func = defaultdict(dict)\n for key, value in dfa_trans_func.items():\n for key2, value2 in value.items():\n dfa_new_trans_func[states_map[key]][key2] = states_map[value2]\n dfa_new_init_state = states_map[(self.init_state,)]\n dfa_new_accept_states = {states_map[key] for key in dfa_accept_states}\n dfa = NFA(dfa_new_states, dfa_input_symbol, dfa_new_trans_func,\n dfa_new_init_state, dfa_new_accept_states)\n return dfa\n\n dfa_trans_func = defaultdict(dict)\n dfa_input_symbol = set()\n dfa_accept_states = set()\n dfa_states = [(self.init_state, )]\n worklist = [(self.init_state, )]\n while worklist:\n workitems = worklist.pop()\n if set(workitems).intersection(self.accept_states):\n dfa_accept_states.add(workitems)\n\n # sorted to make a deterministic result\n for c in sorted(self.input_symbol):\n if c == EPISILON: continue\n\n workitems_c_next = set()\n for e in workitems:\n to_e = episilo_closure(e, c)\n if to_e:\n workitems_c_next = workitems_c_next.union(to_e)\n workitems_c_next = tuple(sorted(workitems_c_next))\n\n if workitems_c_next and workitems_c_next not in dfa_states:\n dfa_states.append(workitems_c_next)\n worklist.append(workitems_c_next)\n if workitems_c_next:\n dfa_trans_func[workitems][c] = workitems_c_next\n dfa_input_symbol.add(c)\n\n dfa = convert_to_final_dfa(dfa_accept_states, dfa_input_symbol, dfa_states, dfa_trans_func)\n\n return dfa","sub_path":"compiler/fa.py","file_name":"fa.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"438356581","text":"#!/usr/bin/python\n\nimport unittest\nimport csv\nfrom string import maketrans\n\ndef containsDuplicates(aList):\n \"\"\"Does list contain a duplicate\n \n >>> containsDuplicates(['foo','bar','fum'])\n False\n >>> containsDuplicates(['foo','foo','fum'])\n True\n >>> containsDuplicates(['foo','fum','foo'])\n True\n >>> containsDuplicates(['bar','foo','bar'])\n True\n \"\"\"\n copyList = list(aList)\n\n copyList.sort()\n \n for i in range(len(copyList)-1):\n if copyList[i]==copyList[i+1]:\n return True\n return False\n \ndef firstNamesWithNCharsOfLastName(userList, indices, n):\n \"return list of new first names\"\n\n names = []\n for i in range(len(indices)):\n names.append(userList[indices[i]]['first'] + \"_\" + \n userList[indices[i]]['last'][0:n])\n \n return names\n\ndef disambiguateAllFirstNames(userList):\n newUserList = list(userList)\n firstDuplicateName = nameInFirstMatchingPairOfFirstNames(newUserList)\n while ( firstDuplicateName != False):\n print(\"Fixing duplicates for \",firstDuplicateName)\n indices = findIndicesOfMatchingFirstNames(newUserList,firstDuplicateName)\n newUserList = disambiguateFirstNamesOfTheseIndices(newUserList,indices)\n firstDuplicateName = nameInFirstMatchingPairOfFirstNames(newUserList)\n\n return newUserList\n \n\ndef disambiguateFirstNamesOfTheseIndices(userList,indices):\n \"return a new userList with certain first names disambiguated\"\n \n newList = list(userList)\n \n needed = 1; # Need up through 0 only (i.e. 1 char)\n \n firstNames = firstNamesWithNCharsOfLastName(userList,indices,needed)\n #print(\"firstNames=\",firstNames,\"needed=\",needed)\n \n while( containsDuplicates(firstNames) ):\n needed = needed + 1\n firstNames = firstNamesWithNCharsOfLastName(userList,indices,needed)\n #print(\"firstNames=\",firstNames,\"needed=\",needed)\n\n \n for i in range(len(indices)):\n newList[indices[i]]['first'] = firstNames[i]\n\n return newList\n\ndef nameInFirstMatchingPairOfFirstNames(userList):\n \"returns the first name that occurs more than once, or False if no dup first names\" \n\n for i in range(len(userList)):\n for j in range(i+1,len(userList)):\n if userList[i]['first'] == userList[j]['first']:\n return userList[i]['first']\n return False\n \ndef findIndicesOfMatchingFirstNames(userList,name):\n \"returns list of the indices of the elements in userList who first names match name\"\n\n indices = []\n for i in range(len(userList)):\n if userList[i]['first'] == name:\n indices.append(i)\n \n return indices\n \n\ndef makeUserDict(first,last,github,email,csil):\n return {'first': first, 'last': last, 'github': github.lower(), 'email':email.lower(), 'csil':csil.lower() }\n\n\ndef convertUserList(csvFile):\n userList = []\n for line in csvFile:\n userList.append(makeUserDict(line[\"First Name\"],\n line[\"Last Name\"],\n line[\"github userid\"],\n line[\"Umail address\"],\n line[\"CSIL userid\"]))\n \n\n for user in userList:\n user[\"first\"] = user[\"first\"].strip().translate(maketrans(\" \",\"_\"));\n\n return userList\n\ndef makeUserLookupDictByGithubId(userList):\n \"\"\"\n userList is a list of dictionaries with keys first,last,github,email,csil.\n returned value is a dictionary where the keys are the github ids,\n and the values are the original dictionaries with first,last,github,email,csil\n \"\"\"\n\n newDict = {}\n for user in userList:\n if user['github'] in newDict:\n raise Exception(\"duplicate github user {0}\".format(user['github']))\n newDict[user['github']]=user\n\n return newDict\n\ndef convertPairList(userList,csvFile):\n \"\"\"\n userList is a list of dictionaries with keys first,last,github,email,csil\n csvFile is a list of dictionaries with keys Partner1_GithubID,Partner2_GithubID,labnumber\n \n returned value should be a list of dictionaries with keys teamName,user1,user2, where user1 and user2 are the elements fromn userlist where the github ids match.\n \"\"\"\n\n pairList = []\n userLookupDict = makeUserLookupDictByGithubId(userList)\n for line in csvFile:\n line['Partner1_GithubID']=line['Partner1_GithubID'].lower().strip()\n line['Partner2_GithubID']=line['Partner2_GithubID'].lower().strip()\n if not (line['Partner1_GithubID'] in userLookupDict):\n raise Exception(\"Partner1_GithubID from pair file not found in user list: {0}\".format(line['Partner1_GithubID']))\n \n if not (line['Partner2_GithubID'] in userLookupDict):\n raise Exception(\"Partner2_GithubID from pair file not found in user list: {0}\".format(line['Partner2_GithubID']))\n \n team = {}\n user1 = userLookupDict[line['Partner1_GithubID']]\n user2 = userLookupDict[line['Partner2_GithubID']]\n if (user1[\"first\"] > user2[\"first\"]): \n # Swap if out of order\n temp = user1\n user1 = user2\n user2 = temp\n team[\"user1\"] = user1\n team[\"user2\"] = user2\n team[\"teamName\"]=\"Pair_\" + user1['first'] + \"_\" + user2['first']\n \n pairList.append(team)\n \n return pairList\n\ndef getUserList(csvFilename):\n\n with open(csvFilename,'r') as f:\n csvFile = csv.DictReader(f,delimiter=',', quotechar='\"')\n \n userList = convertUserList(csvFile)\n \n newUserList = disambiguateAllFirstNames(userList)\n\n return newUserList\n\ndef getPairList(userList,csvFilename):\n\n with open(csvFilename,'r') as f:\n csvFile = csv.DictReader(f,delimiter=',', quotechar='\"')\n \n pairList = convertPairList(userList,csvFile)\n \n return pairList\n\nclass TestSequenceFunctions(unittest.TestCase):\n\n\n\n def setUp(self):\n self.userList1 = [ makeUserDict('Chris','Jones','cj','cj@example.org','cj'),\n makeUserDict('Chris','Smith','cs','cs@example.org','cs'),\n makeUserDict('Mary Kay','Jones','mkj','mkj@example.org','mkj'),\n makeUserDict('Mary','Kay','mkay','mkay@example.org','mkay') ]\n\n self.userList1a = [ makeUserDict('Chris_J','Jones','cj','cj@example.org','cj'),\n makeUserDict('Chris_S','Smith','cs','cs@example.org','cs'),\n makeUserDict('Mary Kay','Jones','mkj','mkj@example.org','mkj'),\n makeUserDict('Mary','Kay','mkay','mkay@example.org','mkay') ]\n\n self.userList2 = [ makeUserDict('Chris_J','Jones','cj','cj@example.org','cj'),\n makeUserDict('Chris_S','Smith','cs','cs@example.org','cs'),\n makeUserDict('Mary Kay','Jones','mkj','mkj@example.org','mkj'),\n makeUserDict('Mary','Kay','mkay','mkay@example.org','mkay') ]\n\n self.userList3 = [ makeUserDict('Chris','Jones','cj','cj@example.org','cj'),\n makeUserDict('Chris','Smith','cs','cs@example.org','cs'),\n makeUserDict('Mary','Jones','mkj','mkj@example.org','mkj'),\n makeUserDict('Mary','Kay','mkay','mkay@example.org','mkay'),\n makeUserDict('Dave','Jones','dj','dk@example.org','dj'),\n makeUserDict('Dave','Kay','dk','dj@example.org','dk') ]\n\n self.userList3a = [ makeUserDict('Chris_J','Jones','cj','cj@example.org','cj'),\n makeUserDict('Chris_S','Smith','cs','cs@example.org','cs'),\n makeUserDict('Mary_J','Jones','mkj','mkj@example.org','mkj'),\n makeUserDict('Mary_K','Kay','mkay','mkay@example.org','mkay'),\n makeUserDict('Dave_J','Jones','dj','dk@example.org','dj'),\n makeUserDict('Dave_K','Kay','dk','dj@example.org','dk') ]\n\n\n\n self.userList4 = [ makeUserDict('Chris','Jones','cj','cj@example.org','cj'),\n makeUserDict('Mary','Jones','mkj','mkj@example.org','mkj'),\n makeUserDict('Dave','Kay','dk','dj@example.org','dk') ]\n\n self.userList5 = [ makeUserDict('Mary','Jones','mkj','mkj@example.org','mkj'),\n makeUserDict('Chris','Smyth','csmy','cj@example.org','cj'),\n makeUserDict('Chris','Smith','csmi','cs@example.org','cs'),\n makeUserDict('Mary','Kay','mkay','mkay@example.org','mkay'),\n makeUserDict('Dave','Jones','dj','dk@example.org','dj'),\n makeUserDict('Dave','Kay','dk','dj@example.org','dk') ]\n\n self.userList5a = [ makeUserDict('Mary_J','Jones','mkj','mkj@example.org','mkj'),\n makeUserDict('Chris_Smy','Smyth','csmy','cj@example.org','cj'),\n makeUserDict('Chris_Smi','Smith','csmi','cs@example.org','cs'),\n makeUserDict('Mary_K','Kay','mkay','mkay@example.org','mkay'),\n makeUserDict('Dave_J','Jones','dj','dk@example.org','dj'),\n makeUserDict('Dave_K','Kay','dk','dj@example.org','dk') ]\n\n\n def test_firstNamesWithNCharsOfLastName1(self):\n result = firstNamesWithNCharsOfLastName(self.userList1,[0,1,2,3],1)\n self.assertEqual(result, [\"Chris_J\",\"Chris_S\",\"Mary Kay_J\",\"Mary_K\"])\n\n def test_firstNamesWithNCharsOfLastName2(self):\n result = firstNamesWithNCharsOfLastName(self.userList1,[0,2],2)\n self.assertEqual(result, [\"Chris_Jo\",\"Mary Kay_Jo\"])\n\n def test_disambiguateFirstNamesOfTheseIndices(self):\n result = disambiguateFirstNamesOfTheseIndices(self.userList1,[0,1])\n self.assertEqual(result,self.userList2)\n\n\n def test_nameInFirstMatchingPairOfFirstNames1(self):\n result = nameInFirstMatchingPairOfFirstNames(self.userList1);\n self.assertEqual(result,\"Chris\");\n\n def test_nameInFirstMatchingPairOfFirstNames3(self):\n result = nameInFirstMatchingPairOfFirstNames(self.userList3);\n self.assertEqual(result,\"Chris\");\n\n def test_nameInFirstMatchingPairOfFirstNames4(self):\n result = nameInFirstMatchingPairOfFirstNames(self.userList4);\n self.assertFalse(result);\n\n def test_nameInFirstMatchingPairOfFirstNames5(self):\n result = nameInFirstMatchingPairOfFirstNames(self.userList5);\n self.assertEqual(result,\"Mary\");\n\n\n \n\n def test_findIndicesOfMatchingFirstNames1(self):\n result = findIndicesOfMatchingFirstNames(self.userList1,'Chris');\n self.assertEqual(result,[0,1]);\n\n def test_findIndicesOfMatchingFirstNames3(self):\n result = findIndicesOfMatchingFirstNames(self.userList3,'Mary');\n self.assertEqual(result,[2,3]);\n\n def test_findIndicesOfMatchingFirstNames4(self):\n result = findIndicesOfMatchingFirstNames(self.userList4,'Dave');\n self.assertEqual(result,[2]);\n\n def test_findIndicesOfMatchingFirstNames5a(self):\n result = findIndicesOfMatchingFirstNames(self.userList5,'Mary');\n self.assertEqual(result,[0,3]);\n\n def test_findIndicesOfMatchingFirstNames5b(self):\n result = findIndicesOfMatchingFirstNames(self.userList5,'Chris');\n self.assertEqual(result,[1,2]);\n\n def test_disambiguateAllFirstNames1(self):\n result = disambiguateAllFirstNames(self.userList1);\n self.assertEqual(result,self.userList1a);\n\n def test_disambiguateAllFirstNames3(self):\n result = disambiguateAllFirstNames(self.userList3);\n self.assertEqual(result,self.userList3a);\n\n def test_disambiguateAllFirstNames4(self):\n result = disambiguateAllFirstNames(self.userList4);\n self.assertEqual(result,self.userList4);\n\n def test_disambiguateAllFirstNames5(self):\n result = disambiguateAllFirstNames(self.userList5);\n self.assertEqual(result,self.userList5a);\n\n\nif __name__ == '__main__':\n unittest.main()\n import doctest\n doctest.testmod()\n\n","sub_path":"disambiguateFunctions.py","file_name":"disambiguateFunctions.py","file_ext":"py","file_size_in_byte":12280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"248090302","text":"import compiler\nimport copy\nfrom compiler.ast import *\n\nimport os.path\n\nclass Var():\n\tdef __init__(self, name, index):\n\t\tself.name = name\n\t\tself.index = index\n\t\tself.mapping = '-' + str(4 * (index + 1)) + '(%ebp)'\n\n\tdef __repr__(self):\n\t\treturn str(self.mapping)\n\nclass Statement(object):\n\tdef __init__(self, py_text, as_text):\n\t\tself.py_text = py_text\n\t\tself.as_text = as_text\n\n\tdef __repr__(self):\n\t\treturn str(self.as_text)\n\nclass Assignment(Statement):\n\tdef __init__(self, store, val):\n\t\tpy_text = str(store) + ' = ' + str(val)\n\t\ts_text = 'movl ' + str(val) + ', %ecx\\nmovl %ecx, ' + str(store)\n\n\t\tsuper(Assignment, self).__init__(py_text, s_text)\n\nclass Addition(Statement):\n\tdef __init__(self, store, val):\n\t\tpy_text = str (store) + ' = ' + str(store) + ' + ' + str(val)\n\t\ts_text = 'movl ' + str(val) + ', %ecx\\n'\n\t\ts_text += 'addl %ecx,' + str(store)\n\t\tsuper(Addition, self).__init__(py_text, s_text)\n\nclass Negate(Statement):\n\tdef __init__(self, store):\n\t\tpy_text = str (store) + ' = ' + '-' + str(store)\n\t\ts_text = 'negl ' + str(store)\n\t\tsuper(Negate, self).__init__(py_text, s_text)\n\nclass Print(Statement):\n\tdef __init__(self, val):\n\t\tpy_text = 'print ' + str(val)\n\t\ts_text = 'pushl ' + str(val) + '\\ncall print_int_nl\\naddl $4, %esp'\n\t\tsuper(Print, self).__init__(py_text, s_text)\n\nclass Input(Statement):\n\tdef __init__(self, store):\n\t\tpy_text = str(store) + \" = input()\"\n\t\ts_text = \"call input\\nmovl %eax, \" + str(store)\n\t\tsuper(Input, self).__init__(py_text, s_text)\n\nclass flattened_ast ():\n\tdef __init__(self, root):\n\t\tself.raw_ast = root\n\t\tself.vars = {}\n\t\tself.statements = []\n\t\tself.process(self.raw_ast)\n\n\tdef add_var(self, name):\n\t\tself.vars.update({name: Var(name, len(self.vars))})\n\t\treturn self.vars[name]\n\n\tdef add_tmp_var(self):\n\t\tvn = \"__tmp\" + str(len(self.vars)) + \"__\"\n\t\treturn self.add_var(vn)\n\n\tdef add_stmt(self, stmt):\n\t\tself.statements.append(stmt)\n\n\tdef process(self, n):\n\t\t# Certain Nodes should be Extracted into statements\n\t\t# Other nodes should be given variables instead of real nodes\n\n\t\t# First, Find Leaves\n\t\tif isinstance(n, Module):\n\t\t\tself.process(n.node)\n\t\telif isinstance(n, Stmt):\n\t\t\tfor i in n.nodes:\n\t\t\t\tself.process(i)\n\t\telif isinstance(n, Printnl):\n\t\t\tself.add_stmt(Print(self.process(n.nodes[0])))\n\t\telif isinstance(n, Assign):\n\t\t\tif not isinstance(n.nodes[0], AssName):\n\t\t\t\traise Exception (\"AssName not after Assign!\")\n\t\t\tname = n.nodes[0].name;\n\t\t\tif not name in self.vars:\n\t\t\t\tself.add_var(name)\n\t\t\t\n\t\t\tself.add_stmt(Assignment(self.vars[name], self.process(n.expr)))\n\n\t\t\t# Create new Var, assign process(expr) to it\n\t\telif isinstance (n, AssName): # Leaf and vars\n\t\t\treturn\n\t\telif isinstance(n, Discard):\n\t\t\tself.process(n.expr)\n\t\telif isinstance(n, Const): # Leaf\n\t\t\treturn '$' + str(hex(n.value))\n\t\telif isinstance(n, Name): # Leaf\n\t\t\tif not n.name in self.vars:\n\t\t\t\traise Exception(\"Using Var Before Assignment!\")\n\t\t\treturn self.vars[n.name]\n\t\telif isinstance(n, Add):\n\t\t\ttmpv = self.add_tmp_var()\n\t\t\tself.add_stmt(Assignment(tmpv, self.process(n.left)))\n\t\t\tself.add_stmt(Addition(tmpv, self.process(n.right)))\n\t\t\treturn tmpv\n\t\telif isinstance(n, UnarySub):\n\t\t\ttmpv = self.add_tmp_var()\n\t\t\tself.add_stmt(Assignment(tmpv, self.process(n.expr)))\n\t\t\tself.add_stmt(Negate(tmpv))\n\t\t\treturn tmpv\n\t\telif isinstance(n, CallFunc): # Leaf\n\t\t\tif not n.node.name == 'input':\n\t\t\t\traise Exception(\"Can only call input\")\n\t\t\telse:\n\t\t\t\ttmpv = self.add_tmp_var()\n\t\t\t\tself.statements.append(Input(tmpv))\n\t\t\t\treturn tmpv\n\t\telse:\n\t\t\traise Exception('Unknown AST Node: ' + repr(n))\n\n\n\n","sub_path":"Homework1/bin/dj_ast.py","file_name":"dj_ast.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"584268327","text":"#!/usr/bin/env python3\n\nimport collections\nimport hashlib\nimport os\nimport pathlib\n\n\ndef print_return(s):\n print(s + '\\r', end='')\n\n\ndef get_hash(file_path, buffer_size=65536):\n with file_path.open(mode='rb') as f:\n hasher = hashlib.sha256()\n buffer = f.read(buffer_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = f.read(buffer_size)\n return hasher.hexdigest()\n\n\ndef main():\n file_list = []\n hashes = {}\n dupes = collections.defaultdict(list)\n\n count = 0\n for root, folders, files in os.walk(os.getcwd()):\n root_path = pathlib.Path(root).resolve()\n for fn in files:\n count += 1\n file_list.append(root_path / fn)\n print_return('Collected {} files'.format(count))\n print('Collected {} files'.format(count))\n\n count = 0\n for file in sorted(file_list):\n count += 1\n # file_hash = get_hash(file)\n file_hash = file.name.lower()\n if file_hash in hashes:\n dupes[hashes[file_hash]].append(file)\n else:\n hashes[file_hash] = file\n print_return('Scanned {} files'.format(count))\n print('Scanned {} files\\n**'.format(count))\n\n for first_file in sorted(dupes.keys()):\n print(first_file)\n for dupe_file in dupes[first_file]:\n print(dupe_file)\n input()\n\nif __name__ == '__main__':\n main()\n","sub_path":"find_duplicates.py","file_name":"find_duplicates.py","file_ext":"py","file_size_in_byte":1420,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"143907803","text":"# -*- coding:utf-8 -*-\n\n\nimport cv2 as cv\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\n\npath = 'test2.jpg' # 白蓝瓶子\nimg = cv.imread(path, 0)\n\n# global threshold\nret1, th1 = cv.threshold(img, 127, 255, cv.THRESH_BINARY)\n\n# Otsu's threshold\nret2, th2 = cv.threshold(img, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU)\n\n#\n# (5, 5)为高斯核的大小,0为标准差\nblur = cv.GaussianBlur(img, (5, 5), 0)\n# 阈值一定要设为0\nret3, th3 = cv.threshold(blur, 0, 255, cv.THRESH_BINARY+cv.THRESH_OTSU)\n\n# 绘制所有的图和它们的直方图\nimages = [img, 0, th1,\n img, 0, th1,\n blur, 0, th3]\ntitles = ['Original Noisy Image', 'Histogram', 'Global Threstholding (v=127)',\n 'Original Noisy Image', 'Histogram', \"Otsu`s Threstholding\",\n 'Gaussian filtered Image', 'Histgram', \"Otsu's Threstholding\"]\n\n\n# for i in range(3):\n# plt.subplot(3, 3, i*3+1), plt.imshow(images[i*3], 'gray')\n# plt.title(titles[i*3]), plt.xticks([]), plt.yticks([])\n#\n# plt.subplot(3, 3, i*3+2), plt.hist(images[i*3].ravel(), 256)\n# plt.title(titles[i*3+1]), plt.xticks([]), plt.yticks([])\n#\n# plt.subplot(3, 3, i*3+3), plt.imshow(images[i*3+2], 'gray')\n# plt.title(titles[i*3+2]), plt.xticks([]), plt.yticks([])\n# plt.show()\n\n\n# plt.imshow(images[0*3+2])\n# plt.imshow(images[1*3+2])\nplt.imshow(images[2*3+2])\nplt.show()\n# ----------------------------------------\n","sub_path":"实验/deal_img2.0/test_save.py","file_name":"test_save.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"462057438","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\nC2017-22\r\n\r\n\"\"\"\r\nimport scrapy\r\nfrom carbuisness.items import GasSinopecItem\r\nimport time\r\nfrom scrapy.conf import settings\r\nfrom scrapy.mail import MailSender\r\nimport logging\r\nimport json\r\nimport random\r\nimport re\r\nimport hashlib\r\nfrom hashlib import md5\r\n\r\nwebsite='sinopec_gas'\r\n\r\nclass CarSpider(scrapy.Spider):\r\n\r\n name=website\r\n start_urls=['http://www.sinopecsales.com/website/html/service/jiayouzhan.html']\r\n\r\n def __init__(self,**kwargs):\r\n super(CarSpider,self).__init__(**kwargs)\r\n self.mailer=MailSender.from_settings(settings)\r\n self.counts=0\r\n self.carnum=50000\r\n\r\n settings.set('CrawlCar_Num',self.carnum,priority='cmdline')\r\n settings.set('MONGODB_DB','carbusiness',priority='cmdline')\r\n settings.set('MONGODB_COLLECTION',website,priority='cmdline')\r\n\r\n def parse(self, response):\r\n #print \"do parse\"\r\n time.sleep(1)\r\n areas = response.xpath('//div[@class=\"serve_left\"]/ul/li')\r\n for area in areas:\r\n urlbase = area.xpath('a/@href').extract_first()\r\n province = area.xpath('a/div/text()').extract_first().strip()\r\n urltemp = re.findall(\"website(\\S+)\", urlbase)[0]\r\n url = \"http://www.sinopecsales.com/website\" + urltemp\r\n metadata = {\"province\" : province}\r\n yield scrapy.Request(url, meta={\"metadata\":metadata}, callback=self.parse_middle, dont_filter=True)\r\n\r\n def parse_middle(self, response):\r\n #print \"do parse_middle\"\r\n metadata = response.meta['metadata']\r\n urlfront = re.findall(\"(.*?)pageNo=\", response.url)[0] + \"pageNo=\"\r\n #urlnum = re.findall(\"pageNo=(\\d+)\", response.url)[0]\r\n urlback = \"&stationCharge=\" + re.findall(\"stationCharge=(\\S+)\", response.url)[0]\r\n pagenumbase = response.xpath(u'//td[contains(text(), \"第 1 页\")]/text()').extract_first().strip()\r\n pagenum = re.findall(u\"共 (\\d+)\", pagenumbase)[0]\r\n for i in range(1, int(pagenum)+1):\r\n url = urlfront + str(i) + urlback\r\n #print url\r\n yield scrapy.Request(url, meta={\"metadata\":metadata}, callback=self.parse_info, dont_filter=True)\r\n\r\n def parse_info(self, response):\r\n #print \"parse_info\"\r\n metadata = response.meta['metadata']\r\n tbody = response.xpath(u'//th[contains(text(), \"序号\")]/../../tr')\r\n\r\n flag = 0 #第一个是空值需要过滤掉\r\n #print response.url\r\n for dot in tbody:\r\n if dot and flag!=0:\r\n item = GasSinopecItem()\r\n\r\n if dot.xpath('td[2]/text()'):\r\n item['dotname'] = dot.xpath('td[2]/text()').extract_first()\r\n else:\r\n item['dotname'] = \"-\"\r\n\r\n if dot.xpath('td[3]/text()'):\r\n item['location'] = dot.xpath('td[3]/text()').extract_first().strip()\r\n else:\r\n item['location'] = \"-\"\r\n\r\n if dot.xpath('td[4]/text()'):\r\n item['sell_card'] = dot.xpath('td[4]/text()').extract_first().strip()\r\n else:\r\n item['sell_card'] = \"-\"\r\n\r\n if dot.xpath('td[5]/text()'):\r\n item['phonenum'] = dot.xpath('td[5]/text()').extract_first().strip()\r\n else:\r\n item['phonenum'] = \"-\"\r\n\r\n if dot.xpath('td[6]/text()'):\r\n item['electronic_prepaid_card_invoice'] = dot.xpath('td[6]/text()').extract_first().strip()\r\n else:\r\n item['electronic_prepaid_card_invoice'] = \"-\"\r\n\r\n if dot.xpath('td[7]/text()'):\r\n item['valueadd_tax_invoice'] = dot.xpath('td[7]/text()').extract_first().strip()\r\n else:\r\n item['valueadd_tax_invoice'] = \"-\"\r\n\r\n # item['pagenum'] = re.findall(\"pageNo=(\\d+)\", response.url)[0]\r\n item['province'] = metadata['province']\r\n item['url'] = response.url\r\n item['website'] = website\r\n item['grabtime'] = time.strftime('%Y-%m-%d %X', time.localtime())\r\n item['status'] = str(dot.xpath('td[1]/text()').extract_first()) + response.url #序号+url\r\n yield item\r\n else:\r\n flag = 1\r\n","sub_path":"cagey/carbuisness/carbuisness/spiders/sinopec_gas.py","file_name":"sinopec_gas.py","file_ext":"py","file_size_in_byte":4380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"154949659","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Nov 22 15:54:23 2018\n\n@author: Ayush jain\n\"\"\"\nimport pandas as pd\nfrom urllib.parse import urlparse\nimport Preprocess\n#import tldextract\nprint(\"traintestsplit.py\")\ndef ClaimsTrainTestSplit():\n claimFrame = pd.read_csv('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/claimCredibility.csv')\n reviewsFrame = pd.read_csv('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/claimReviews.csv')\n claimTrainFrame = claimFrame.iloc[:40000][:]\n claimTestFrame = claimFrame.iloc[40000:][:]\n reviewsTrainFrame = reviewsFrame.loc[reviewsFrame['claimId'].isin(claimTrainFrame['claimId'])]\n reviewsTestFrame = reviewsFrame.loc[reviewsFrame['claimId'].isin(claimTestFrame['claimId'])]\n claimTrainFrame.to_csv('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Train/claimCredibility.csv')\n claimTestFrame.to_csv('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Test/claimCredibility.csv')\n reviewsTrainFrame.to_csv('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Train/claimReviews.csv')\n reviewsTestFrame.to_csv('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Test/claimReviews.csv')\n \n#ClaimsTrainTestSplit() \n\ndef getClaimTextFromLink(claimLink):\n #if(not isinstance(claimLink,str)):\n # claimLink = claimLink.iloc[0]\n claims = pd.read_csv('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/claims.csv',index_col=0)\n return claims.loc[claimLink,:].tolist()[0]\n\ndef getReviewsFromClaimId(claimId,reviewFile):\n reviews = pd.read_csv(reviewFile)\n reviews = reviews.loc[reviews['claimId'].isin([claimId])]\n #print(reviews)\n reviews = reviews.loc[:,['Review','Reviewer']]\n reviewlist = []\n reviewerslist = []\n for i in reviews.index:\n \n review = reviews.loc[i,'Review']\n reviewer = reviews.loc[i,'Reviewer']\n reviewlist.append(review)\n reviewerslist.append(reviewer)\n return reviewlist,reviewerslist\n\ndef getClaimSourceAttrFromClaimLink(claimLink):\n #if( not isinstance(claimLink,str)):\n # claimLink = claimLink.iloc[0]\n domain = urlparse(claimLink).netloc\n #print(domain)\n #domain = tldextract.extract(claimLink).domain\n sourceFrame = pd.read_csv('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/claim_sources.csv',names=['source', 'a1','a2','a3','a4','a5','a6','a7'], header=None)\n sourceFrame = sourceFrame.loc[sourceFrame['source'].str.contains(domain)] \n sourceFrame = sourceFrame.drop_duplicates(subset=['source'], keep='last') #removing duplicate rows\n return sourceFrame.loc[:,['a1','a2','a3','a4','a5','a6','a7']].values\n \ndef getReviewerAttrFromName(name) : \n name = name.strip(' ').lower()\n name = name.replace(' ','-')\n reviewer = pd.read_csv('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/members.csv',index_col=0,names=['f1','f2','f3','f4','f5','f6','f7','f8','f9','f10','f11','f12'])\n reviewer = reviewer[~reviewer.index.duplicated(keep='last')] #removing duplicate rows\n return reviewer.loc[name,:].values\n\ndef getBatch(claimFile,reviewFile,batchSize):\n claimFrame = pd.read_csv(claimFile,index_col=0) \n claimFrame = claimFrame[~claimFrame.index.duplicated(keep='first')] #removing duplicate rows\n X =[]\n Y = []\n counter=0\n while True: \n for claimId in claimFrame.index.tolist():\n # print('caught!\\n')\n try:\n reviews,reviewers = getReviewsFromClaimId(claimId,reviewFile)\n claimSourceEmbedding = getClaimSourceAttrFromClaimLink(claimFrame.loc[claimId,'claimLink'])\n credibility = claimFrame.loc[claimId,'credibility']\n #if(isinstance(credibility,pd.Series)): #above statement may return a series if duplicate entries are there in claimframe\n # credibility = credibility.iloc[\n claimText = getClaimTextFromLink(claimFrame.loc[claimId,'claimLink'])\n except: \n continue \n claimEmbeddings = Preprocess.getUniversalSentenceEncoding(claimText)\n print(\"no. of reviews\"+str(len(reviews)))\n #print(reviews)\n if len(reviews) > 0:\n for i in range(len(reviews)):\n tokens = Preprocess.tokenizer(reviews[i])\n #articleTerms = tokens \n articleTermEmbeddings = Preprocess.getContextualisedWordEmbeddings(tokens)\n reviewerEmbedding = getReviewerAttrFromName(reviewers[i])\n x = {'claimId':claimId,'claim' : claimEmbeddings, 'article': articleTermEmbeddings,'claimSource':claimSourceEmbedding,'articleSource':reviewerEmbedding }\n y = credibility\n X.append(x)\n Y.append(y)\n counter = counter +1\n if(counter == batchSize):\n yield X,Y\n X=[]\n Y=[]\n counter = 0\n \n #while True:\n \n#getClaimTextFromLink(\"b'http://www.npr.org/templates/story/story.php?storyId=121529261'\")\nXTrain,YTrain = next(getBatch('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Train/claimCredibility.csv','/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Train/claimReviews.csv',3000))\n#print(X)\n#print(Y)\n#ClaimsTrainTestSplit()\nwith open('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Train/XTrain.pickle', 'wb') as handle:\n pickle.dump(XTrain, handle, protocol=pickle.HIGHEST_PROTOCOL)\nwith open('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Train/YTrain.pickle', 'wb') as handle:\n\tpickle.dump(YTrain,handle, protocol=pickle.HIGHEST_PROTOCOL)\n##Test Data## \nXTest,YTest = next(getBatch('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Test/claimCredibility.csv','/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Test/claimReviews.csv',1000))\n#print(X)\n#print(Y)\n#ClaimsTrainTestSplit()\nwith open('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Test/XTest.pickle', 'wb') as handle:\n pickle.dump(XTest, handle, protocol=pickle.HIGHEST_PROTOCOL)\nwith open('/media/sdb/sanjay/IR/debunking-fake-news/newstrust_final/Test/YTest.pickle', 'wb') as handle:\n pickle.dump(YTest,handle, protocol=pickle.HIGHEST_PROTOCOL)\n\n","sub_path":"runningTrainTestSplit.py","file_name":"runningTrainTestSplit.py","file_ext":"py","file_size_in_byte":6389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"104607029","text":"#\n# Copyright (c) nexB Inc. and others. All rights reserved.\n# http://nexb.com and https://github.com/nexB/vulnerablecode/\n# The VulnerableCode software is licensed under the Apache License version 2.0.\n# Data generated with VulnerableCode require an acknowledgment.\n#\n# You may not use this software except in compliance with the License.\n# You may obtain a copy of the License at: http://apache.org/licenses/LICENSE-2.0\n# Unless required by applicable law or agreed to in writing, software distributed\n# under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR\n# CONDITIONS OF ANY KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations under the License.\n#\n# When you publish or redistribute any data created with VulnerableCode or any VulnerableCode\n# derivative work, you must accompany this data with the following acknowledgment:\n#\n# Generated with VulnerableCode and provided on an \"AS IS\" BASIS, WITHOUT WARRANTIES\n# OR CONDITIONS OF ANY KIND, either express or implied. No content created from\n# VulnerableCode should be considered or used as legal advice. Consult an Attorney\n# for any legal advice.\n# VulnerableCode is a free software tool from nexB Inc. and others.\n# Visit https://github.com/nexB/vulnerablecode/ for support and download.\n\nfrom urllib.parse import unquote\nfrom typing import List\n\nfrom django.db.models import Q\nfrom django.urls import reverse\nfrom django_filters import rest_framework as filters\nfrom packageurl import PackageURL\nfrom rest_framework import serializers\nfrom rest_framework import viewsets\nfrom rest_framework.decorators import action\nfrom rest_framework.response import Response\nfrom drf_spectacular.utils import extend_schema, inline_serializer\nfrom drf_spectacular.types import OpenApiTypes\n\nfrom vulnerabilities.models import Package\nfrom vulnerabilities.models import Vulnerability\nfrom vulnerabilities.models import VulnerabilityReference\n\n# This serializer is used for the bulk apis, to prevent wrong auto documentation\n# TODO: Fix the swagger documentation for bulk apis\nplaceholder_serializer = inline_serializer(name=\"Placeholder\", fields={})\n\n\nclass VulnerabilityReferenceSerializer(serializers.ModelSerializer):\n class Meta:\n model = VulnerabilityReference\n fields = [\n \"source\",\n \"reference_id\",\n \"url\",\n ]\n\n\nclass HyperLinkedPackageSerializer(serializers.HyperlinkedModelSerializer):\n purl = serializers.CharField(source=\"package_url\")\n\n class Meta:\n model = Package\n fields = [\"url\", \"purl\"]\n\n\nclass HyperLinkedVulnerabilitySerializer(serializers.HyperlinkedModelSerializer):\n class Meta:\n model = Vulnerability\n fields = [\"url\", \"vulnerability_id\"]\n\n\nclass MinimalVulnerabilitySerializer(serializers.HyperlinkedModelSerializer):\n\n resolved_packages = HyperLinkedPackageSerializer(\n many=True, source=\"resolved_to\", read_only=True\n )\n unresolved_packages = HyperLinkedPackageSerializer(\n many=True, source=\"vulnerable_to\", read_only=True\n )\n\n class Meta:\n model = Vulnerability\n fields = [\"url\", \"unresolved_packages\", \"resolved_packages\"]\n\n\nclass VulnerabilitySerializer(MinimalVulnerabilitySerializer):\n references = VulnerabilityReferenceSerializer(many=True, source=\"vulnerabilityreference_set\")\n\n class Meta:\n model = Vulnerability\n fields = \"__all__\"\n\n\nclass MinimalPackageSerializer(serializers.HyperlinkedModelSerializer):\n unresolved_vulnerabilities = HyperLinkedVulnerabilitySerializer(\n many=True, source=\"vulnerable_to\", read_only=True\n )\n resolved_vulnerabilities = HyperLinkedVulnerabilitySerializer(\n many=True, source=\"resolved_to\", read_only=True\n )\n\n class Meta:\n model = Package\n fields = [\n \"resolved_vulnerabilities\",\n \"unresolved_vulnerabilities\",\n ]\n\n\nclass PackageSerializer(MinimalPackageSerializer):\n purl = serializers.CharField(source=\"package_url\")\n\n class Meta:\n model = Package\n exclude = [\"vulnerabilities\"]\n\n\nclass PackageFilterSet(filters.FilterSet):\n purl = filters.CharFilter(method=\"filter_purl\")\n\n class Meta:\n model = Package\n fields = [\"name\", \"type\", \"version\", \"subpath\", \"purl\"]\n\n def filter_purl(self, queryset, name, value):\n purl = unquote(value)\n try:\n purl = PackageURL.from_string(purl)\n\n except ValueError as ve:\n raise serializers.ValidationError(\n detail={\"error\": f'\"{purl}\" is not a valid Package URL: {ve}'},\n )\n\n attrs = {k: v for k, v in purl.to_dict().items() if v}\n return self.queryset.filter(**attrs)\n\n\nclass PackageViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Package.objects.all()\n serializer_class = PackageSerializer\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = PackageFilterSet\n\n # TODO: Fix the swagger documentation for this endpoint\n @extend_schema(request=placeholder_serializer, responses=placeholder_serializer)\n @action(detail=False, methods=[\"post\"])\n def bulk_search(self, request):\n \"\"\"\n See https://github.com/nexB/vulnerablecode/pull/303#issuecomment-761801639 for docs\n \"\"\"\n filter_list = Q()\n response = {}\n if not isinstance(request.data.get(\"packages\"), list):\n return Response(\n status=400,\n data={\n \"Error\": \"Request needs to contain a key 'packages' which has the value of a list of package urls\" # nopep8\n },\n )\n for purl in request.data[\"packages\"]:\n try:\n filter_list |= Q(\n **{k: v for k, v in PackageURL.from_string(purl).to_dict().items() if v}\n )\n except ValueError as ve:\n return Response(status=400, data={\"Error\": str(ve)})\n\n # This handles the case when the said purl doesnt exist in db\n response[purl] = {}\n res = Package.objects.filter(filter_list)\n for p in res:\n response[p.package_url] = MinimalPackageSerializer(p, context={\"request\": request}).data\n\n return Response(response)\n\n\nclass VulnerabilityFilterSet(filters.FilterSet):\n class Meta:\n model = Vulnerability\n fields = [\"vulnerability_id\"]\n\n\nclass VulnerabilityViewSet(viewsets.ReadOnlyModelViewSet):\n queryset = Vulnerability.objects.all()\n serializer_class = VulnerabilitySerializer\n paginate_by = 50\n filter_backends = (filters.DjangoFilterBackend,)\n filterset_class = VulnerabilityFilterSet\n\n # TODO: Fix the swagger documentation for this endpoint\n @extend_schema(request=placeholder_serializer, responses=placeholder_serializer)\n @action(detail=False, methods=[\"post\"])\n def bulk_search(self, request):\n \"\"\"\n See https://github.com/nexB/vulnerablecode/pull/303#issuecomment-761801619 for docs\n \"\"\"\n filter_list = []\n response = {}\n if not isinstance(request.data.get(\"vulnerabilities\"), list):\n return Response(\n status=400,\n data={\n \"Error\": \"Request needs to contain a key 'vulnerabilities' which has the value of a list of vulnerability ids\" # nopep8\n },\n )\n\n for vulnerability_id in request.data[\"vulnerabilities\"]:\n filter_list.append(vulnerability_id)\n # This handles the case when the said cve doesnt exist in db\n response[vulnerability_id] = {}\n res = Vulnerability.objects.filter(vulnerability_id__in=filter_list)\n for vuln in res:\n response[vuln.vulnerability_id] = MinimalVulnerabilitySerializer(\n vuln, context={\"request\": request}\n ).data\n return Response(response)\n","sub_path":"vulnerabilities/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":7946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"397436724","text":"# 1. Write a method that combines two strings, by taking one character from the first string, then one from the second string and so on. Once one string has no characters left it should carry on with the other string.\nimport unittest\n\n\ndef mergeStrings(string1, string2):\n string1List = []\n string2List = []\n\n if len(string1) == 0:\n pass\n else:\n for char in string1:\n string1List.append(char)\n\n if len(string2) == 0:\n pass\n else:\n for char in string2:\n string2List.append(char)\n\n mergedStringList = [j for i in zip(string1List, string2List) for j in i]\n\n mergedString = ''.join(mergedStringList)\n print(\"Your combined string is: \", mergedString)\n\n# mergeStrings()\n\n# 2. Check if a 3 digit number is an Armstrong number. An Armstrong number of three digits is an integer such that the sum of the cubes of its digits is equal to the number itself.\n\n\ndef armstrongNumber():\n number = input(\"Input a 3 digit number: \")\n numberList = []\n\n if len(number) < 3:\n print(\"Your number is not a 3 digit number.\")\n else:\n for char in number:\n numberList.append(char)\n number = int(number)\n\n firstNumber = int(numberList[0])**3\n secondNumber = int(numberList[1])**3\n thirdNumber = int(numberList[2])**3\n combinedNumbers = firstNumber + secondNumber + thirdNumber\n\n if number == combinedNumbers:\n print(\"Yes\")\n else:\n print(\"No\")\n\n\n# armstrongNumber()\n\nclass IsAnagramTests(unittest.TestCase):\n\n def testOne(self):\n self.assertEquals(mergeStrings(\"diana\", \"anaid\"))\n\n\ndef main():\n unittest.main()\n\n\nif __name__ == '__main__':\n\n main()\n","sub_path":"Week_1/Week1.py","file_name":"Week1.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"118322132","text":"\"\"\" Name : Neil Barot\nCourse : CMPS 1500\nLab Section : Tuesday 2 -3.15 pm\nAssignment : hw7pr0.py\nDate : 10/28/14\n\"\"\"\n\ndef rearrange(aList, k):\n \"\"\"\n Sample input: [4, 18, 2 , 39, 20, 3 ,4, 3, 8], 7\n Sample output: [4, 2, 3, 4, 3, 8, 20, 39, 18]\n \"\"\"\n #base case\n if aList == []: \n return []\n #recursive case\n if aList[0] > k:\n return rearrange(aList[1:], k) + [aList[0]]\n return [aList[0]] + rearrange(aList[1:], k)\n\n\n#Final Big O Notation: O(n)\n","sub_path":"Homework 7/hw7pr0.py","file_name":"hw7pr0.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"95497653","text":"import argparse\n\nimport tensorflow as tf\n\nARGS = None\nexample_queue = None\n\ndef parse_args():\n '''Parses the command line arguments'''\n\n global ARGS\n\n #Optional arguments\n parser = argparse.ArgumentParser(description='This is my description')\n parser.add_argument('-e', '--epochs', default='10', type=int, help='The number of epochs')\n parser.add_argument('-b', '--batch', default='128', type=int, help='The batch size')\n parser.add_argument('--no-shuffle-inputs', default=False, action='store_true', help='Whether the input files shall be shuffled per epoch')\n parser.add_argument('--no-shuffle', default=False, action='store_true', help='Whether the examples shall be shuffled')\n parser.add_argument('--input-threads', default=1, type=int, help='The number of threads producing the examples form the inputs')\n\n #Required arguments\n parser.add_argument('method', choices=['inf', 'train', 'eval'])\n parser.add_argument('inputs', nargs='+')\n \n ARGS = parser.parse_args()\n\ndef input_pipeline():\n '''Creates the input pipeline\n\n The pipeline consists of the following stages:\n\n 1) Input files are enqueued into an input_queue\n 2) Readers dequeue input files from the input_queue read the contained records\n 3) Each record read by a Reader gets transformed into a example, label pair\n 4) The example gets enqueued into an example_queue\n\n Stages 1 and the 2,3,4 each are processed in different threads. For stage 1\n usually only one thread is started. The number of threads performing stage \n 2,3,4 can be specified through the --input-threads command line argument.\n '''\n\n def normalize_record(key, record_string):\n '''Generates (example, label) pairs used for training\n\n Args:\n key: The key of the record_string\n record_string: The record read by the reader\n\n Returns:\n A example, label pair\n '''\n return (record_string, key)\n\n def read_record(input_queue, example_queue):\n '''Reads a record from an input file\n\n Args:\n input_queue: A queue containing input files that store records\n example_queue: The queue into which (example, label) pairs are\n enqueued\n\n Returns:\n An enqueue operation node\n '''\n\n reader = tf.TextLineReader()\n\n key, record_string = reader.read(input_queue)\n example, label = normalize_record(key, record_string)\n\n return example_queue.enqueue((example, label)) \n\n #Create the queue into which the examples to train the model are enqueued\n global example_queue\n if(ARGS.no_shuffle):\n example_queue = tf.FIFOQueue()\n else:\n example_queue = tf.RandomShuffleQueue(capacity=ARGS.batch * 5, min_after_dequeue=ARGS.batch * 3, dtypes=(tf.string))\n\n #Create a string input producer, which creates a Queue that gets filled\n #with the input files.\n input_queue = tf.train.string_input_producer(ARGS.inputs, num_epochs=ARGS.epochs, shuffle=not ARGS.no_shuffle_inputs)\n\n enqueue_ops = [read_record(input_queue, example_queue) for i in range(ARGS.input_threads)]\n\n #Create the QueueRunner which starts the threads that are running the\n #enqueue operations. \n example_queue_runner = tf.train.QueueRunner(example_queue, enqueue_ops)\n \n #Add the queue runner to the QUEUE_RUNNER collection. This way we can \n #easily start the queue runner with a call to tf.start_queue_runners\n tf.train.add_queue_runner(example_queue_runner)\n\ndef inference():\n '''Create the model for inference only\n\n Returns:\n An inference operation node\n '''\n\n return example_queue.dequeue()\n\ndef train():\n '''Extends the graph with operations for training'''\n\n pass\n\ndef eval():\n pass\n\n\nif __name__ == '__main__':\n \n parse_args()\n\n with tf.name_scope('input_pipeline'):\n input_pipeline()\n\n with tf.name_scope('inference'):\n inf = inference()\n\n with tf.Session() as sess:\n\n summary_writer = tf.train.SummaryWriter('log', sess.graph)\n\n tf.initialize_all_variables().run()\n sess.graph.finalize()\n\n coord = tf.train.Coordinator()\n threads = tf.train.start_queue_runners(coord=coord)\n\n try:\n print(inf.eval())\n except Exception as e:\n print(e)\n coord.request_stop(e)\n finally:\n coord.request_stop()\n\n coord.join(threads)\n","sub_path":"nn/tensorflow-02.py","file_name":"tensorflow-02.py","file_ext":"py","file_size_in_byte":4463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"118945763","text":"import unittest\nfrom Wpp.WppCore import WppCore\nfrom Python.PyCore import PyCore\nfrom out.OutContextMemoryStream import OutContextMemoryStream\n\nclass TestPyForeach(unittest.TestCase):\n\tdef testSimple(self):\n\t\tsource = \"\"\"\nfunc public main\n\tvar myList: Array double = [0.1, 0.4, 0.8]\n\tvar summa: double = 0.0\n\tforeach myList => var item\n\t\tsumma += item\n\t\t\"\"\"\n\t\texpected = \"\"\"\ndef main():\n\tmyList = [0.1, 0.4, 0.8]\n\tsumma = 0.0\n\tfor item in myList:\n\t\tsumma += item\n\t\t\"\"\"\n\t\tsrcModule = WppCore.createMemModule(source, 'foreach.fake')\n\t\tdstModule = srcModule.cloneRoot(PyCore())\n\t\toutContext = OutContextMemoryStream()\n\t\tdstModule.export(outContext)\n\t\tself.assertEqual(str(outContext), expected.strip())\n\n\tdef testArrayIndex(self):\n\t\tsource = \"\"\"\nfunc public main\n\tvar myList: Array double = [0.1, 0.4, 0.8]\n\tvar summa: double = 0.0\n\tforeach myList => var item => var i\n\t\tsumma += item * (i + 1)\n\t\t\"\"\"\n\t\texpected = \"\"\"\ndef main():\n\tmyList = [0.1, 0.4, 0.8]\n\tsumma = 0.0\n\tfor i, item in enumerate(myList):\n\t\tsumma += item * (i + 1)\n\t\t\"\"\"\n\t\tsrcModule = WppCore.createMemModule(source, 'foreach.fake')\n\t\tdstModule = srcModule.cloneRoot(PyCore())\n\t\toutContext = OutContextMemoryStream()\n\t\tdstModule.export(outContext)\n\t\tself.assertEqual(str(outContext), expected.strip())\n\n\tdef testMapFull(self):\n\t\t\"\"\" Test for construction: for key, value in map.items() \"\"\"\n\t\tsource = \"\"\"\nfunc public toList: Array String\n\tparam map: const Map String, String\n\tvar result: Array String\n\tforeach map => var value => var key\n\t\tresult.push(key + \":\" + value)\n\tresult\n\t\t\"\"\"\n\t\texpected = \"\"\"\ndef toList(map):\n\tresult = []\n\tfor key, value in map.items():\n\t\tresult.append(key + ':' + value)\n\treturn result\n\t\t\"\"\"\n\t\tsrcModule = WppCore.createMemModule(source, 'foreach.fake')\n\t\tdstModule = srcModule.cloneRoot(PyCore())\n\n\t\ttoListOver = dstModule.dictionary['toList']\n\t\tself.assertEqual(toListOver.type, 'Overloads')\n\t\ttoList = toListOver.items[0]\n\t\tself.assertEqual(toList.type, 'Func')\n\t\tcmdFor = toList.getBody().items[1]\n\t\tself.assertEqual(cmdFor.type, 'Foreach')\n\t\tcmdCall = cmdFor.getBody().items[0]\n\t\tself.assertEqual(cmdCall.type, 'Call')\n\t\tcaller = cmdCall.getCaller()\n\t\tself.assertEqual(caller.type, 'BinOp')\n\t\tpush = caller.getRight()\n\t\tself.assertEqual(push.type, 'FieldExpr')\n\n\t\toutContext = OutContextMemoryStream()\n\t\tdstModule.export(outContext)\n\t\tself.assertEqual(str(outContext), expected.strip())\n\n\tdef testMapValues(self):\n\t\t\"\"\" Test for construction: for value in map.values() \"\"\"\n\t\tsource = \"\"\"\nfunc public sumValues: double\n\tparam map: const Map String, double\n\tvar result: double = 0.0\n\tforeach map => var value\n\t\tresult += value\n\tresult\n\t\t\"\"\"\n\t\texpected = \"\"\"\ndef sumValues(map):\n\tresult = 0.0\n\tfor value in map.values():\n\t\tresult += value\n\treturn result\n\t\t\"\"\"\n\t\tsrcModule = WppCore.createMemModule(source, 'values.fake')\n\t\tdstModule = srcModule.cloneRoot(PyCore())\n\t\toutContext = OutContextMemoryStream()\n\t\tdstModule.export(outContext)\n\t\tself.assertEqual(str(outContext), expected.strip())\n","sub_path":"src/Python/tests/testPyForeach.py","file_name":"testPyForeach.py","file_ext":"py","file_size_in_byte":2982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"267106934","text":"from bs4 import BeautifulSoup # pip install beautifulsoup4 # pip install lxml\nimport requests # pip install requests\nimport csv\nimport sqlite3\n\n\ncon = sqlite3.connect('my_project_new.db')\ncur = con.cursor()\n\nstep = 1\nurl = ('https://www.imdb.com/search/title/?title_type=feature&'\n 'release_date=2000-01-01,2002-12-31&user_rating=1.0,&sort=year,asc&start={}&count=200')\n\nurl_cast = 'https://www.imdb.com/title/tt{}/fullcredits'\n\n\n\nfor page in range(0, step * 10, step):\n source = requests.get(url.format(801)).text\n soup = BeautifulSoup(source, 'lxml')\n # print(soup)\n imdb_table = {}\n if page == 1:\n break\n\n for film in soup.find_all('div', class_='lister-item-content'):\n title = film.h3.a.text\n idx = film.h3.a['href'].split('/')[2][2:]\n imdb_table[\"movie_id\"]= idx\n\n film_open = f\"https://www.imdb.com{film.h3.a['href']}\" # link\n print(film_open)\n film_open_source = requests.get(film_open).text # opened, entered in\n # print(film_open_source)\n f_soup = BeautifulSoup(film_open_source, 'lxml') # search\n if f_soup.find('div', class_=\"titleReviewBarItem\"):\n met = f_soup.find('div', class_=\"titleReviewBarItem\").text.strip().split(\"\\n\")\n meta_score = met[0]\n if meta_score.strip()==\"Reviews\":\n meta_score = \"None\"\n else:\n meta_score = \"None\"\n print(meta_score)\n # imdb_table[meta_score] = meta_score\n\n rate_open = f\"https://www.imdb.com/{film.h3.a['href']}ratings\"\n print(rate_open)\n rate_open_source = requests.get(rate_open).text # opened, entered in\n r_soup = BeautifulSoup(rate_open_source, 'lxml') # search\n # print(r_soup)\n test = r_soup.find('div', class_=\"allText\").text.strip().split(\"\\n\")\n # print(test)\n voters = test[0]\n print(voters)\n imdb_table[\"voters\"]=voters\n weighted_av = test[1].strip().split(\" \")\n wa = weighted_av[1].strip().split(\"/\")\n rating_wa = wa[0]\n imdb_table[\"rating_wa\"]=rating_wa\n print(rating_wa)\n if r_soup.find('div', {\"align\": \"center\"}).text.strip().split(\"\\n\"):\n med_whole = r_soup.find('div', {\"align\": \"center\"}).text.strip().split(\"\\n\")\n med_sep = med_whole[-1].strip()\n med = med_sep.split(\"=\")\n median = med[1].strip()\n print(median)\n else:\n median = \"None\"\n imdb_table[\"rating_median\"]= median\n male_whole = r_soup.findAll('div', class_=\"bigcell\")\n rating_male = male_whole[5].text\n rating_woman = male_whole[10].text\n imdb_table[\"rating_female\"] = rating_woman\n imdb_table[\"rating_male\"] = rating_male\n print(rating_male)\n print(rating_woman)\n\n attrib_names = \", \".join(imdb_table.keys())\n print(attrib_names)\n attrib_values = \", \".join(\"?\" * len(imdb_table.keys()))\n print(attrib_values)\n sql = f'INSERT OR REPLACE INTO movie_imbd_rating ({attrib_names}) VALUES ({attrib_values})'\n cur.execute(sql, list(imdb_table.values()))\n\n print(imdb_table)\n print(\"--------------\")\ncon.commit()\ncon.close()\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"movies_project/imdb_rating_scrapping.py","file_name":"imdb_rating_scrapping.py","file_ext":"py","file_size_in_byte":3246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"500040981","text":"from app import app\nfrom flask import render_template, request, json, Response\n\nsome_data = [{ \"name\": 'Item one', \"id\": \"1\" }, { \"name\": 'Item two', \"id\": \"2\" }]\n\n@app.route('/')\n@app.route('/home')\ndef home():\n return render_template(\"index.html\", new_user=False, title=\"Home page\")\n\n\n@app.route('/thoughts')\ndef thoughts():\n return render_template(\"thoughts.html\", title=\"Thoughts page\")\n\n\n@app.route('/data')\ndef data():\n\n print(some_data)\n return render_template(\"data.html\", some_data=some_data)\n\n\n@app.route('/people')\n@app.route('/people/')\ndef people(user=\"Clarence\"):\n return render_template(\"people.html\", user=user)\n\n@app.route('/add_person', methods=[\"GET\", \"POST\"])\ndef add_person():\n name = request.form.get('name')\n return render_template(\"add_person.html\", name=name)\n\n@app.route('/api')\n@app.route('/api/')\ndef api(id=None): \n if(id == None):\n data = some_data\n else:\n data = list(filter(lambda x: x[\"id\"] == id, some_data))\n print(data)\n\n\n return Response(json.dumps(data), mimetype=\"application/json\")","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"387125828","text":"#!/usr/bin/env python\n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Michael A.G. Aivazis\n# California Institute of Technology\n# (C) 1998-2005 All Rights Reserved\n#\n# \n#\n# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n\n\nfrom pyre.components.Component import Component\n\n\nclass Journal(Component):\n\n\n class Inventory(Component.Inventory):\n\n\n from ChannelFacility import ChannelFacility\n from DeviceFacility import DeviceFacility\n\n error = ChannelFacility(\"error\")\n error.meta['tip'] = 'controls wchich error messages get printed'\n \n warning = ChannelFacility(\"warning\")\n warning.meta['tip'] = 'controls which warning get printed'\n \n info = ChannelFacility(\"info\")\n info.meta['tip'] = 'controls which informational messages get printed'\n\n debug = ChannelFacility(\"debug\")\n debug.meta['tip'] = 'controls which debugging messages get printed'\n\n firewall = ChannelFacility(\"firewall\")\n debug.meta['tip'] = 'controls which firewalls are checked'\n\n device = DeviceFacility()\n device.meta['tip'] = 'controls the output device used for printing the generated messages'\n\n\n def device(self):\n return self.inventory.device\n\n\n def __init__(self, name=None):\n if name is None:\n name = 'journal'\n \n Component.__init__(self, name, facility=\"journal\")\n return\n\n\n def _init(self):\n import journal\n theJournal = journal.journal()\n\n device = self.inventory.device.device\n theJournal.device = device\n\n Component._init(self)\n\n return\n\n# version\n__id__ = \"$Id: Journal.py,v 1.1.1.1 2006-11-27 00:09:35 aivazis Exp $\"\n\n# End of file \n","sub_path":"python/journal/components/Journal.py","file_name":"Journal.py","file_ext":"py","file_size_in_byte":1884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"650736805","text":"#!/usr/bin/env python3\n# -*-coding:utf-8 -*\n\n\nfrom geometry_msgs.msg import Twist\nimport rospy\n\n\ntopicName = '/cmd_vel'\n\nmaximasValues = {\n\t\"forward\" : 0.22,\n\t\"backward\" : -0.22,\n\t\"twist left\" : 2.84,\n\t\"twist right\" : -2.84\n}\nstopValues = {\n\t\"transversal\": (maximasValues[\"forward\"] + maximasValues[\"backward\"])/2,\n\t\"yaw\": (maximasValues[\"twist left\"] + maximasValues[\"twist right\"])/2,\n}\nminValues = {\n\t\"forward\": stopValues[\"transversal\"] + (maximasValues[\"forward\"] - stopValues[\"transversal\"])/5,\n\t\"backward\": stopValues[\"transversal\"] + (maximasValues[\"backward\"] - stopValues[\"transversal\"])/5,\n\t\"twist left\": stopValues[\"yaw\"] + (maximasValues[\"twist left\"] - stopValues[\"yaw\"])/5,\n\t\"twist right\": stopValues[\"yaw\"] + (maximasValues[\"twist right\"] - stopValues[\"yaw\"])/5\n}\n\nstopPosition = Twist()\nstopPosition.linear.x = stopValues[\"transversal\"]\nstopPosition.angular.z = stopValues[\"yaw\"]\n\npub = rospy.Publisher(topicName, Twist, queue_size = 1)\n\n\ndef writeOnTopic(\n\ttransversal = stopValues[\"transversal\"],\n\tyaw = stopValues[\"yaw\"],\n\tpubToWrite = pub,\n\tstop = False):\n\tif stop:\n\t\tpubToWrite.publish(stopPosition)\n\telse:\n\t\ttwist = Twist()\n\t\ttwist.linear.x = transversal\n\t\ttwist.angular.z = yaw\n\t\tpubToWrite.publish(twist)\n\n\nif __name__ == '__main__':\n\tpass\n","sub_path":"Programs/Config/topic_turtle_bot.py","file_name":"topic_turtle_bot.py","file_ext":"py","file_size_in_byte":1265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"575045167","text":"import cv2\nimport random\nimport scipy.io as sio\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport pickle\nfrom tqdm import tqdm\nimport sys\n\n\nfrom datasets.dataset_utils import int64_feature, float_feature, bytes_feature\n\nVOC_LABELS = {\n 'none': (0, 'Background'),\n 'text': (1, 'text')\n}\n\nRANDOM_SEED = 200\nSAMPLES_PER_FILES = 500\n\n# convert wordBB to aabb boxes list\ndef convert_rotbbox_to_aabb(wordBB, image_size):\n if len(wordBB.shape) < 3:\n wordBB = np.expand_dims(wordBB, axis=-1)\n\n word_len = wordBB.shape[2]\n height, width = image_size\n aabb = np.zeros((word_len, 4), dtype=np.float32)\n for i in xrange(word_len):\n x_min = np.min(wordBB[0, :, i]) / float(width)\n x_max = np.max(wordBB[0, :, i]) / float(width)\n y_min = np.min(wordBB[1, : ,i]) / float(height)\n y_max = np.max(wordBB[1, : ,i]) / float(height)\n aabb[i,:] = np.array([y_min, x_min, y_max, x_max])\n\n return aabb\n\n\ndef run(dataset_dir, output_dir, split_ratio=0.9, shuffling=False):\n \"\"\"Runs the conversion operation.\n\n Args:\n dataset_dir: The dataset directory where the dataset is stored.\n output_dir: Output directory.\n \"\"\"\n \n def _process_image(directory, idx_of_img):\n \"\"\"Process a image and annotation file.\n\n Args:\n directory: the syntext data directory\n idx_of_img: int, the index of images in gt.mat\n Returns:\n image_data: string, JPEG encoding of RGB image.\n shape: the image shape\n bboxes: the bounding box of text\n labels: the label of text always 1, there is only one label\n \"\"\"\n # Read the image file.\n name = str(gt['imnames'][:,idx_of_img][0][0])\n filename = directory + name\n image_data = tf.gfile.FastGFile(filename, 'r').read()\n\n shape = image_shapes[name]\n\n wordBB = gt['wordBB'][:,idx_of_img][0]\n\n bboxes = convert_rotbbox_to_aabb(wordBB, shape[:2])\n labels = [VOC_LABELS['text'][0]] * len(bboxes)\n\n return image_data, shape, bboxes, labels\n \n def _convert_to_example(image_data, shape, bboxes, labels):\n \"\"\"Build an Example proto for an image example.\n\n Args:\n image_data: string, JPEG encoding of RGB image; Sythtext images are always .jpg\n labels: list of integers, identifier for the ground truth;\n bboxes: list of bounding boxes; each box is a list of integers;\n specifying [xmin, ymin, xmax, ymax]. All boxes are assumed to belong\n to the same label as the image label.\n shape: 3 integers, image shapes in pixels.\n Returns:\n Example proto\n \"\"\"\n xmin = []\n ymin = []\n xmax = []\n ymax = []\n for b in bboxes:\n assert len(b) == 4\n # pylint: disable=expression-not-assigned\n [l.append(point) for l, point in zip([ymin, xmin, ymax, xmax], b)]\n # pylint: enable=expression-not-assigned\n\n image_format = b'JPEG'\n example = tf.train.Example(features=tf.train.Features(feature={\n 'image/height': int64_feature(shape[0]),\n 'image/width': int64_feature(shape[1]),\n 'image/channels': int64_feature(shape[2]),\n 'image/shape': int64_feature(shape),\n 'image/object/bbox/xmin': float_feature(xmin),\n 'image/object/bbox/xmax': float_feature(xmax),\n 'image/object/bbox/ymin': float_feature(ymin),\n 'image/object/bbox/ymax': float_feature(ymax),\n 'image/object/bbox/label': int64_feature(labels),\n 'image/format': bytes_feature(image_format),\n 'image/encoded': bytes_feature(image_data)}))\n return example\n \n def _add_to_tfrecord(dataset_dir, idx, tfrecord_writer):\n \"\"\"Loads data from image and annotations files and add them to a TFRecord.\n\n Args:\n dataset_dir: Dataset directory;\n name: Image name to add to the TFRecord;\n tfrecord_writer: The TFRecord writer to use for writing.\n \"\"\"\n image_data, shape, bboxes, labels = \\\n _process_image(dataset_dir, idx)\n example = _convert_to_example(image_data, shape, bboxes, labels)\n tfrecord_writer.write(example.SerializeToString())\n \n def _get_output_filename(output_dir, split_name, idx):\n return '%s/syntext_%s_%04d.tfrecord' % (output_dir, split_name, idx)\n\n if not tf.gfile.Exists(output_dir):\n tf.gfile.MakeDirs(output_dir)\n\n print('read gt.mat file...')\n gt = sio.loadmat(os.path.join(dataset_dir, 'gt.mat'))\n \n print('read image_shape.pkl file...')\n with open(os.path.join(dataset_dir, 'image_shape.pkl')) as f:\n image_shapes = pickle.load(f)\n \n num_of_image = gt['imnames'].shape[1]\n \n assert num_of_image == len(image_shapes)\n \n # assert split_ratio belong to (0, 1)\n num_for_train = int(num_of_image * split_ratio)\n \n # shuffle the indices\n fileidxs = range(0, num_of_image)\n if shuffling:\n random.seed(RANDOM_SEED)\n random.shuffle(fileidxs)\n \n # pick up indices for train and test\n idxs_for_train = fileidxs[:num_for_train]\n idxs_for_test = fileidxs[num_for_train:]\n\n # Process dataset files for trains\n print('convert data for train.')\n\n i, fidx = 0, 0\n while i < len(idxs_for_train):\n tf_filename = _get_output_filename(output_dir, 'train', fidx)\n with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:\n j = 0\n while i < len(idxs_for_train) and j < SAMPLES_PER_FILES:\n sys.stdout.write('\\r>> Converting image %d/%d' % (i+1, len(idxs_for_train)))\n sys.stdout.flush()\n _add_to_tfrecord(dataset_dir, idxs_for_train[i], tfrecord_writer)\n i +=1\n j +=1\n \n fidx += 1\n \n # Process dataset files for test\n print('convert data for test.')\n\n i, fidx = 0, 0\n while i < len(idxs_for_test):\n tf_filename = _get_output_filename(output_dir, 'test', fidx)\n with tf.python_io.TFRecordWriter(tf_filename) as tfrecord_writer:\n j = 0\n while i < len(idxs_for_test) and j < SAMPLES_PER_FILES:\n sys.stdout.write('\\r>> Converting image %d/%d' % (i+1, len(idxs_for_test)))\n sys.stdout.flush()\n _add_to_tfrecord(dataset_dir, idxs_for_test[i], tfrecord_writer)\n i +=1\n j +=1\n \n fidx += 1\n \n print('\\nFinished converting the SynthText dataset!')\n","sub_path":"datasets/synthtext_to_tfrecords.py","file_name":"synthtext_to_tfrecords.py","file_ext":"py","file_size_in_byte":6654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"111258420","text":"class Solution:\n def reverseParentheses(self, s: str) -> str:\n\n def isChar(ch):\n return ch != '(' and ch != ')'\n\n i = 0\n stack = ['']\n N = len(s)\n while i < N:\n if isChar(s[i]):\n stack[-1] += s[i]\n elif s[i] == '(':\n stack.append('')\n else:\n rs0 = stack.pop()[::-1]\n stack[-1] += rs0\n i += 1\n return stack[-1]\n\n\nsol = Solution()\nret = sol.reverseParentheses(\"a(bcdefghijkl(mno)p)q\")\nprint(ret)\n","sub_path":"src/reverse-substrings-between-each-pair-of-parentheses.py","file_name":"reverse-substrings-between-each-pair-of-parentheses.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"75713889","text":"# coding:utf-8\nfrom __future__ import absolute_import, unicode_literals\n\nimport csv\nimport json\nimport uuid\nimport codecs\nimport chardet\nfrom io import StringIO\nfrom collections import defaultdict\n\nfrom django.conf import settings\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import TemplateView, ListView, View\nfrom django.views.generic.edit import CreateView, DeleteView, FormView, UpdateView\nfrom django.urls import reverse_lazy\nfrom django.views.generic.detail import DetailView, SingleObjectMixin\nfrom django.http import HttpResponse, JsonResponse, HttpResponseRedirect\nfrom django.views.decorators.csrf import csrf_protect, csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom django.core.cache import cache\nfrom django.utils import timezone\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import get_object_or_404, redirect, reverse\n\nfrom common.mixins import JSONResponseMixin\nfrom common.utils import get_object_or_none\nfrom .. import forms\nfrom ..models import Asset, AssetGroup, AdminUser, IDC, SystemUser\nfrom ..hands import AdminUserRequiredMixin\nfrom ..tasks import update_assets_hardware_info\n\n\n__all__ = ['AssetListView', 'AssetCreateView', 'AssetUpdateView',\n 'UserAssetListView', 'AssetBulkUpdateView', 'AssetDetailView',\n 'AssetModalListView', 'AssetDeleteView', 'AssetExportView',\n 'BulkImportAssetView',\n ]\n\n\nclass AssetListView(AdminUserRequiredMixin, TemplateView):\n template_name = 'assets/asset_list.html'\n\n def get_context_data(self, **kwargs):\n context = {\n 'app': 'Assets',\n 'action': 'Asset list',\n 'groups': AssetGroup.objects.all(),\n 'system_users': SystemUser.objects.all(),\n # 'form': forms.AssetBulkUpdateForm(),\n }\n kwargs.update(context)\n return super(AssetListView, self).get_context_data(**kwargs)\n\n\nclass UserAssetListView(LoginRequiredMixin, TemplateView):\n template_name = 'assets/user_asset_list.html'\n\n def get_context_data(self, **kwargs):\n context = {\n 'app': 'Assets',\n 'action': 'Asset list',\n 'system_users': SystemUser.objects.all(),\n }\n kwargs.update(context)\n return super(UserAssetListView, self).get_context_data(**kwargs)\n\n\nclass AssetCreateView(AdminUserRequiredMixin, CreateView):\n model = Asset\n form_class = forms.AssetCreateForm\n template_name = 'assets/asset_create.html'\n success_url = reverse_lazy('assets:asset-list')\n\n def form_valid(self, form):\n self.asset = asset = form.save()\n asset.created_by = self.request.user.username or 'Admin'\n asset.date_created = timezone.now()\n asset.save()\n return super(AssetCreateView, self).form_valid(form)\n\n def get_context_data(self, **kwargs):\n context = {\n 'app': 'Assets',\n 'action': 'Create asset',\n }\n kwargs.update(context)\n return super(AssetCreateView, self).get_context_data(**kwargs)\n\n def get_success_url(self):\n update_assets_hardware_info.delay([self.asset._to_secret_json()])\n return super(AssetCreateView, self).get_success_url()\n\n\nclass AssetModalListView(AdminUserRequiredMixin, ListView):\n paginate_by = settings.CONFIG.DISPLAY_PER_PAGE\n model = Asset\n context_object_name = 'asset_modal_list'\n template_name = 'assets/asset_modal_list.html'\n\n def get_context_data(self, **kwargs):\n assets = Asset.objects.all()\n assets_id = self.request.GET.get('assets_id', '')\n assets_id_list = [i for i in assets_id.split(',') if i.isdigit()]\n context = {\n 'all_assets': assets_id_list,\n 'assets': assets\n }\n kwargs.update(context)\n return super(AssetModalListView, self).get_context_data(**kwargs)\n\n\nclass AssetBulkUpdateView(AdminUserRequiredMixin, ListView):\n model = Asset\n form_class = forms.AssetBulkUpdateForm\n template_name = 'assets/asset_bulk_update.html'\n success_url = reverse_lazy('assets:asset-list')\n\n def get(self, request, *args, **kwargs):\n assets_id = self.request.GET.get('assets_id', '')\n self.assets_id_list = [int(i) for i in assets_id.split(',') if i.isdigit()]\n\n if kwargs.get('form'):\n self.form = kwargs['form']\n elif assets_id:\n self.form = self.form_class(\n initial={'assets': self.assets_id_list}\n )\n else:\n self.form = self.form_class()\n return super(AssetBulkUpdateView, self).get(request, *args, **kwargs)\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n if form.is_valid():\n form.save()\n return redirect(self.success_url)\n else:\n return self.get(request, form=form, *args, **kwargs)\n\n def get_context_data(self, **kwargs):\n # assets_list = Asset.objects.filter(id__in=self.assets_id_list)\n context = {\n 'app': 'Assets',\n 'action': 'Bulk update asset',\n 'form': self.form,\n 'assets_selected': self.assets_id_list,\n 'assets': Asset.objects.all(),\n }\n kwargs.update(context)\n return super(AssetBulkUpdateView, self).get_context_data(**kwargs)\n\n\nclass AssetUpdateView(AdminUserRequiredMixin, UpdateView):\n model = Asset\n form_class = forms.AssetUpdateForm\n template_name = 'assets/asset_update.html'\n success_url = reverse_lazy('assets:asset-list')\n\n def get_context_data(self, **kwargs):\n context = {\n 'app': 'Assets',\n 'action': 'Update asset',\n }\n kwargs.update(context)\n return super(AssetUpdateView, self).get_context_data(**kwargs)\n\n def form_invalid(self, form):\n print(form.errors)\n return super(AssetUpdateView, self).form_invalid(form)\n\n\nclass AssetDeleteView(AdminUserRequiredMixin, DeleteView):\n model = Asset\n template_name = 'assets/delete_confirm.html'\n success_url = reverse_lazy('assets:asset-list')\n\n\nclass AssetDetailView(DetailView):\n model = Asset\n context_object_name = 'asset'\n template_name = 'assets/asset_detail.html'\n\n def get_context_data(self, **kwargs):\n asset_groups = self.object.groups.all()\n system_users = self.object.system_users.all()\n context = {\n 'app': 'Assets',\n 'action': 'Asset detail',\n 'asset_groups_remain': [asset_group for asset_group in AssetGroup.objects.all()\n if asset_group not in asset_groups],\n 'asset_groups': asset_groups,\n 'system_users_all': SystemUser.objects.all(),\n 'system_users': system_users,\n }\n kwargs.update(context)\n return super(AssetDetailView, self).get_context_data(**kwargs)\n\n\n@method_decorator(csrf_exempt, name='dispatch')\nclass AssetExportView(View):\n def get(self, request):\n spm = request.GET.get('spm', '')\n assets_id_default = [Asset.objects.first().id] if Asset.objects.first() else [1]\n assets_id = cache.get(spm, assets_id_default)\n fields = [\n field for field in Asset._meta.fields\n if field.name not in [\n 'date_created'\n ]\n ]\n filename = 'assets-{}.csv'.format(\n timezone.localtime(timezone.now()).strftime('%Y-%m-%d_%H-%M-%S'))\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % filename\n response.write(codecs.BOM_UTF8)\n assets = Asset.objects.filter(id__in=assets_id)\n writer = csv.writer(response, dialect='excel',\n quoting=csv.QUOTE_MINIMAL)\n\n header = [field.verbose_name for field in fields]\n header.append(_('Asset groups'))\n writer.writerow(header)\n\n for asset in assets:\n groups = ','.join([group.name for group in asset.groups.all()])\n data = [getattr(asset, field.name) for field in fields]\n data.append(groups)\n writer.writerow(data)\n return response\n\n def post(self, request, *args, **kwargs):\n try:\n assets_id = json.loads(request.body).get('assets_id', [])\n except ValueError:\n return HttpResponse('Json object not valid', status=400)\n spm = uuid.uuid4().hex\n cache.set(spm, assets_id, 300)\n url = reverse_lazy('assets:asset-export') + '?spm=%s' % spm\n return JsonResponse({'redirect': url})\n\n\nclass BulkImportAssetView(AdminUserRequiredMixin, JSONResponseMixin, FormView):\n form_class = forms.FileForm\n\n def form_valid(self, form):\n f = form.cleaned_data['file']\n det_result = chardet.detect(f.read())\n f.seek(0) # reset file seek index\n file_data = f.read().decode(det_result['encoding']).strip(codecs.BOM_UTF8.decode())\n csv_file = StringIO(file_data)\n reader = csv.reader(csv_file)\n csv_data = [row for row in reader]\n fields = [\n field for field in Asset._meta.fields\n if field.name not in [\n 'date_created'\n ]\n ]\n header_ = csv_data[0]\n mapping_reverse = {field.verbose_name: field.name for field in fields}\n mapping_reverse[_('Asset groups')] = 'groups'\n attr = [mapping_reverse.get(n, None) for n in header_]\n if None in attr:\n data = {'valid': False,\n 'msg': 'Must be same format as '\n 'template or export file'}\n return self.render_json_response(data)\n\n created, updated, failed = [], [], []\n assets = []\n for row in csv_data[1:]:\n if set(row) == {''}:\n continue\n\n asset_dict = dict(zip(attr, row))\n id_ = asset_dict.pop('id', 0)\n\n try:\n id_ = int(id_)\n except ValueError:\n id_ = 0\n\n asset = get_object_or_none(Asset, id=id_)\n for k, v in asset_dict.items():\n if k == 'idc':\n v = get_object_or_none(IDC, name=v)\n elif k == 'is_active':\n v = bool(v)\n elif k == 'admin_user':\n v = get_object_or_none(AdminUser, name=v)\n elif k in ['port', 'cabinet_pos', 'cpu_count', 'cpu_cores']:\n try:\n v = int(v)\n except ValueError:\n v = 0\n elif k == 'groups':\n groups_name = v.split(',')\n v = AssetGroup.objects.filter(name__in=groups_name)\n else:\n continue\n asset_dict[k] = v\n\n if not asset:\n try:\n groups = asset_dict.pop('groups')\n if len(Asset.objects.filter(hostname=asset_dict.get('hostname'))):\n raise Exception(_('already exists'))\n asset = Asset.objects.create(**asset_dict)\n asset.groups.set(groups)\n created.append(asset_dict['hostname'])\n assets.append(asset)\n except Exception as e:\n failed.append('%s: %s' % (asset_dict['hostname'], str(e)))\n else:\n for k, v in asset_dict.items():\n if k == 'groups':\n asset.groups.set(v)\n continue\n if v:\n setattr(asset, k, v)\n try:\n asset.save()\n updated.append(asset_dict['hostname'])\n except Exception as e:\n failed.append('%s: %s' % (asset_dict['hostname'], str(e)))\n\n if assets:\n update_assets_hardware_info.delay([asset._to_secret_json() for asset in assets])\n\n\n data = {\n 'created': created,\n 'created_info': 'Created {}'.format(len(created)),\n 'updated': updated,\n 'updated_info': 'Updated {}'.format(len(updated)),\n 'failed': failed,\n 'failed_info': 'Failed {}'.format(len(failed)),\n 'valid': True,\n 'msg': 'Created: {}. Updated: {}, Error: {}'.format(\n len(created), len(updated), len(failed))\n }\n return self.render_json_response(data)\n\n\n","sub_path":"jumpserver-dev/apps/assets/views/asset.py","file_name":"asset.py","file_ext":"py","file_size_in_byte":12568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"138772373","text":"#Parameters that do not change\ndays = 300\npop = 1.377e6\na_weight = 0.8\n#K*gamma = 3\n\n#Parameters that do change (possible values in list)\nalpha_list = [i*1e-4 for i in range(1,8)]\nbeta_list = [i*1e-8 for i in range(10,30,2)]\ngamma_list = [i*1e-4 for i in range(10,40,2)]\nL_list = [i for i in range(6,13)]\nK_gamma_list = [i*0.1 for i in range(10,105,5)]\nimmune_start_list = [i*0.01 for i in range(0,55,5)]\n\n'''\nHelp functions\n'''\ndef read_input(input_file='input_data/input.csv'):\n f = open(input_file,'r')\n content = f.read().strip().split('\\n')\n #content = f.readlines()\n a_inp = []\n for x in content[0].split(';'):\n out = ''\n for y in x:\n if y.isdigit(): out += y\n a_inp.append(float(out))\n d_inp = []\n for x in content[1].split(';'):\n out = ''\n for y in x:\n if y.isdigit(): out += y\n d_inp.append(float(out))\n #a_inp = list(map(lambda x:float(x.strip()),content[0].split(';')))\n #d_inp = list(map(lambda x:float(x.strip()),content[1].split(';')))\n days_inp = len(a_inp)\n assert len(a_inp) == len(d_inp), 'Unequal length of deaths and sick people in input.'\n f.close()\n return a_inp,d_inp\n\ndef plot(i,d,s,r,a,a_inp,d_inp):\n import matplotlib.pyplot as plt\n plt.figure()\n plt.semilogy(i,label='Immuna')\n plt.semilogy(d,label='Döda')\n plt.semilogy(s,label='Sjuka')\n plt.semilogy(r,label='Risk')\n plt.semilogy(a,label='Inlagda')\n plt.semilogy(a_inp,label='Inladga, verklig data')\n plt.semilogy(d_inp,label='Döda, verklig data')\n plt.legend()\n plt.title(\"Antal personer i olika kategorier över tid\")\n plt.xlabel(\"Dagar efter 10/3\")\n plt.ylabel(\"Antal individer i respektive grupp (logaritmisk skala)\")\n plt.show()\n\ndef mse(a,b):\n assert len(a) == len(b), \"MSE needs vectors of equal length.\"\n return sum([(a[i]-b[i])**2 for i in range(len(a))])/len(a)\n\ndef exp_mse(a,b,w):\n weights = [(1/w)**i for i in range(len(a))]\n s = sum(weights)\n weights = [w/s for w in weights]\n return sum([weights[i]*(a[i]-b[i])**2 for i in range(len(a))])\n\ndef score(a_inp,a,d_inp,d,a_weight=a_weight,exp_weight=1):\n return a_weight*exp_mse(a,a_inp,exp_weight) + (1-a_weight)*exp_mse(d,d_inp,exp_weight)\n # return a_weight*mse(a_inp,a) + (1-a_weight)*mse(d_inp,d)\n\ndef run_simul(alpha,beta,gamma,L,K,i_start,stop=None,bugfix=False):\n num_days = stop if stop != None else days\n s = [K]\n if bugfix: to_rem = [K/L for _ in range(L)]\n else: to_rem = [0]*L\n i,d,r = [pop*i_start],[0],[pop-s[0]-pop*i_start]\n a = [gamma*s[0]]\n for day in range(1,num_days+1):\n i.append(i[-1] + (1-alpha) * (to_rem[day-1] if day-L < 0 else (beta * r[day-L] * s[day-L])))\n d.append(d[-1] + alpha * (to_rem[day-1] if day-L < 0 else (beta * r[day-L] * s[day-L])))\n s.append(s[-1] + beta * r[-1] * s[-1] - (to_rem[day-1] if day-L < 0 else (beta * r[day-L] * s[day-L])))\n r.append(r[-1] - beta * r[-1] * s[-1])\n a.append(gamma * s[day])\n return s,i,d,r,a\n\ndef get_best_parameters(a_inp,d_inp,days=None,log_file='log_parameter_tidsserie.csv',exp_weight=1,bugfix=False):\n if days == None: days = len(a_inp)\n try:\n f = open(log_file,'r')\n content = f.read().strip().split('\\n')\n f.close()\n except:\n content = []\n results = []\n for x in content:\n if len(x) < 10: continue\n results.append(tuple(map(float,x.split(';'))))\n if results[-1][0] == days:\n return results[-1][1],results[-1][2],results[-1][3],results[-1][4],results[-1][5],results[-1][6],results[-1][7]\n no_best_models = 1\n\n BEST = (10**20,None)\n for beta in beta_list:\n for gamma in gamma_list:\n for kgamma in K_gamma_list:\n K = kgamma/gamma\n for L in L_list:\n for alpha in alpha_list:\n for immune_start in immune_start_list:\n s,i,d,r,a = run_simul(alpha,beta,gamma,L,K,immune_start,stop=days+4,bugfix=bugfix)\n sc = score(a_inp[:days],a[:days],d_inp[:days],d[:days],a_weight=a_weight,exp_weight=exp_weight)\n BEST = min(BEST,(sc,[alpha,beta,gamma,K,L,kgamma,immune_start]))\n\n sc,vals = BEST\n alpha,beta,gamma,K,L,kgamma,immune_start = vals[0],vals[1],vals[2],vals[3],vals[4],vals[5],vals[6]\n results.append((days,alpha,beta,gamma,K,L,kgamma,immune_start,sc))\n results.sort()\n f = open(log_file,'w')\n for x in results:\n f.write('{}'.format(int(x[0])))\n for y in x[1:]:\n f.write(';{}'.format(y))\n f.write('\\n')\n f.close()\n return alpha,beta,gamma,K,L,kgamma,immune_start\n","sub_path":"CoronaPrediktionHöst/corona.py","file_name":"corona.py","file_ext":"py","file_size_in_byte":4713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"400411542","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport rg\n\n\nclass Robot:\n\n def act(self, game):\n\n # rodzaje pól: 'spawn', 'normal', 'obstacle', 'invalid'\n # rg.loc_types() - zwraca typ pola\n # funkcja zwróci prawdę, jeżeli \"poz\" wskazuje punkt wejścia\n def czy_wejscie(poz):\n if 'spawn' in rg.loc_types(poz):\n return True\n return False\n # return ['guard']\n # return ['suicide']\n # return ['move', (4,5)]\n # return ['attack', (5,4)]\n # ilu_wrogow = 0\n lista_wrogow_obok = []\n\n for poz, robot in game.robots.iteritems():\n if robot.player_id != self.player_id: # rozpoznanie wroga\n if rg.dist(poz, self.location) <= 1:\n lista_wrogow_obok.append(poz)\n # ilu_wrogow += 1\n # return ['attack', poz]\n print(lista_wrogow_obok)\n\n # rg.dist() - odlagłość pomiedzy dwoma lokalizacjami\n # rg.toward() - najkrótsza droga pomiedzy dwoma lokalizacjami\n\n if len(lista_wrogow_obok) > 2 and self.hp < 27:\n return['suicide']\n elif len(lista_wrogow_obok):\n return['attack', lista_wrogow_obok[0]]\n\n print(game.robots)\n\n if self.location == rg.CENTER_POINT:\n return ['guard']\n\n # idź do środka planszy, ruch domyślny\n if czy_wejscie(self.poz):\n return ['move', rg.toward(self.location, rg.CENTER_POINT)]\n\n","sub_path":"robot01.py","file_name":"robot01.py","file_ext":"py","file_size_in_byte":1502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"2859332","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n@version: v1.0\n@author: jayzhen\n@license: Apache Licence \n@email: jayzhen_testing@163.com\n@software: PyCharm\n@file: chart_report\n@time: 2018/4/11 17:27\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\n\npath = \"Nccut_TraceFile.log\"\nfile = open(path, 'r')\n\nAMat = [];BMat = [];XMat = [];YMat = [];ZMat = [];\n\nfor line in file.readlines():\n lineArr = line.strip().split()\n AMat.append(int(lineArr[0]))\n BMat.append(int(lineArr[1]))\n XMat.append(int(lineArr[2]))\n YMat.append(int(lineArr[3]))\n ZMat.append(int(lineArr[4]))\n\nfig = plt.figure()\naxA = fig.add_subplot(5, 1, 1, xlim=(0, 0.2), ylim=(0, 40))\naxB = fig.add_subplot(5, 1, 2, xlim=(0, 0.2), ylim=(0, 40))\naxX = fig.add_subplot(5, 1, 3, xlim=(0, 0.2), ylim=(0, 200))\naxY = fig.add_subplot(5, 1, 4, xlim=(0, 0.2), ylim=(0, 200))\naxZ = fig.add_subplot(5, 1, 5, xlim=(0, 0.2), ylim=(0, 200))\n\nlineA, = axA.plot([], [], lw=1)\nlineB, = axB.plot([], [], lw=1)\nlineX, = axX.plot([], [], lw=1)\nlineY, = axY.plot([], [], lw=1)\nlineZ, = axZ.plot([], [], lw=1)\n\n\ndef init():\n lineA.set_data([], [])\n lineB.set_data([], [])\n lineX.set_data([], [])\n lineY.set_data([], [])\n lineZ.set_data([], [])\n return lineA, lineB, lineX, lineY, lineZ\n\n\ndef animate(i):\n t = np.linspace(0, 0.2, 10)\n yA = AMat[i:10 + i]\n lineA.set_data(t, yA)\n\n yB = BMat[i:10 + i]\n lineB.set_data(t, yB)\n\n yX = XMat[i:10 + i]\n lineX.set_data(t, yX)\n\n yY = YMat[i:10 + i]\n lineY.set_data(t, yY)\n\n yZ = ZMat[i:10 + i]\n lineZ.set_data(t, yZ)\n\n return lineA, lineB, lineX, lineY, lineZ\n\n\nanim1 = animation.FuncAnimation(fig, animate, init_func=init, frames=len(XMat) - 10, interval=2)\nplt.show()\n","sub_path":"app/api_v1_2/reporter/chart_report.py","file_name":"chart_report.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"595812384","text":"# 'bnn' / 'vanilla'\nmodel_type = 'bnn'\n# 'dresden' / 'RAISE'\ndatabase = 'dresden'\neven_database = False\nimage_root = 'data/'\n\nif database == 'dresden':\n # 6 models\n # brands = ['Canon', 'Canon', 'Nikon', 'Nikon', 'Sony', 'Agfa']\n # models = ['Ixus70', 'Ixus55', 'D200', 'D70', 'DSC-H50', 'DC-733s']\n # 5 models\n brands = ['Canon', 'Canon', 'Nikon', 'Nikon', 'Sony']\n models = ['Ixus70', 'Ixus55', 'D200', 'D70', 'DSC-H50']\n # 4 models\n # brands = ['Canon', 'Canon', 'Nikon', 'Nikon']\n # models = ['Ixus70', 'Ixus55', 'D200', 'D70']\n # 3 models\n # brands = []\n # models = []\n\n unseen_brands = ['Agfa', 'Canon', 'Sony', 'Samsung', 'Nikon']\n unseen_models = ['DC-830i', 'PowerShotA640', 'DSC-W170', 'L74wide', 'CoolPixS710']\n ds_csv = 'data/dresden.csv'\n ds_image_dir = image_root + 'dresden'\n patch_dir = image_root + ('even_dresden_base'\n if even_database else 'dresden_base')\n unseen_dir = image_root + 'dresden_unseen'\n kaggle_dir = image_root + 'kaggle_unseen'\n print_fig_step = 50\n\n\nelif database == 'RAISE':\n # RAISE brand model\n brands = ['Nikon', 'Nikon']\n models = ['D90', 'D7000']\n # unseen can be changed here\n unseen_brands = ['Nikon']\n unseen_models = ['D40']\n ds_csv = 'data/RAISE_2k.csv'\n ds_image_dir = image_root + 'RAISE'\n patch_dir = image_root + ('even_RAISE_base' \n if even_database else 'RAISE_base')\n unseen_dir = image_root + 'RAISE_unseen'\n print_fig_step = 10\n\nbrand_models = ['_'.join([b, m]) for (b, m) in zip(brands, models)]\nunseen_brand_models = ['_'.join([b, m]) for (b, m) in zip(unseen_brands, unseen_models)]\n\nIMG_HEIGHT = 256\nIMG_WIDTH = 256\npatch_num = 25\npatch_span = 256 * 5\nadaptive_span = True\nBATCH_SIZE = 64\nNUM_EPOCHS = 100\nNUM_CLASSES = len(brand_models)\nnum_monte_carlo = 10\npatience = 5\n# restore training\nrestore = False\n\nHParams = {'init_learning_rate':0.001,\n 'init_prior_scale_mean':-1,\n 'init_prior_scale_std':.1, \n 'std_prior_scale':1.5}\n\n","sub_path":"params.py","file_name":"params.py","file_ext":"py","file_size_in_byte":2044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"232860419","text":"# -*- coding: UTF-8 -*-\nimport os\nimport time\nimport tkinter\nimport datetime\nfrom tkinter import ttk\nfrom threading import Thread\nimport verify\nimport library\n\n\nclass Main_win(object):\n def __init__(self):\n self.windows_width = 1920\n self.windows_height = 1080\n self.table_lines = 25\n self.channel_list = [['169.254.1.21', 7600],\n ['169.254.1.22', 7600],\n ['169.254.1.23', 7600],\n ['169.254.1.24', 7600]]\n self.button_color = ['systemButtonface', 'royalblue']\n self.time_flag = -1\n self.time_color = library.colors\n self.channel = self.channel_list[3][0]\n self.port = self.channel_list[3][1]\n self.psu = ''\n self.option = ''\n self.selected_ch = []\n self.info = ''\n self.psu1_flag = 0\n self.psu2_flag = 0\n self.vol_flag = 0\n self.cur_flag = 0\n self.res_flag = 0\n self.run_flag = [False, 'None']\n self.client = None\n self.thread = None\n self.time_thread = None\n self.run_timer = -1\n self.dir = None\n self.log_file = None\n self.log_file_name = ''\n self.win = tkinter.Tk()\n\n # 主窗口\n self.win.title('verify & test')\n self.win_width = 1205\n self.win_height = 711\n self.win.geometry(str(self.win_width) + 'x'\n + str(self.win_height) + '+'\n + str(int((self.windows_width-self.win_width)/2)) + '+'\n + str(int((self.windows_height-self.win_height-80)/2)))\n self.win.resizable(width=False, height=False)\n self.win.iconbitmap('.\\\\verify.ico')\n\n # 菜单条\n self.menubar = tkinter.Menu(self.win)\n self.win.config(menu=self.menubar)\n self.menu1 = tkinter.Menu(self.menubar, tearoff=False)\n self.menu1.add_command(label='打开log目录', command=lambda: os.startfile(os.getcwd()+'\\\\'+self.dir.replace('/', '\\\\')))\n # 添加分隔线\n self.menu1.add_separator()\n self.menu1.add_command(label='退出', command=lambda: self.win.quit())\n # 向菜单条上添加菜单选项\n self.menubar.add_cascade(label='文件', menu=self.menu1)\n\n # option\n self.option_height = 115\n self.frame_option = tkinter.Frame(self.win, width=self.win.winfo_width(), height=self.option_height)\n self.frame_option.pack(side=tkinter.TOP)\n\n # scale\n self.frame_scale = tkinter.Frame(self.win, width=self.win.winfo_width(), height=25)\n self.frame_scale.pack()\n\n # table\n self.frame_table = tkinter.Frame(self.win, width=self.win.winfo_width(),\n height=self.win.winfo_height() - (self.option_height+105))\n self.frame_table.pack()\n\n # status\n self.frame_status = tkinter.Frame(self.win, width=self.win.winfo_width(), height=24)\n self.frame_status.pack(side=tkinter.BOTTOM)\n\n # option title\n self.label_title = tkinter.Label(self.frame_option, bitmap='gray12', compound=tkinter.CENTER, width=115,\n height=115, text='B444 FCT\\n校准 测试\\n应用程序', font=('Arial', 18),\n bg='DarkKhaki', fg='DarkRed')\n self.label_title.place(x=0, y=0)\n\n # option time\n self.label_time = tkinter.Label(self.frame_option, bitmap='gray12', compound=tkinter.CENTER, width=423,\n height=115, font=('Helvetica', 20), text='准备就绪')\n self.label_time.place(x=121, y=0)\n\n # option channel\n self.frame_channel = tkinter.Frame(self.frame_option, width=140, height=100)\n self.frame_channel.place(x=self.win_width-655, y=6)\n\n # option PSU1\n self.button_psu1 = tkinter.Button(self.frame_option,\n bitmap='gray12',\n compound=tkinter.BOTTOM,\n height=100,\n width=45,\n font=('Fixdsys', 16, 'bold'),\n text='PSU1',\n bd=4,\n bg=self.button_color[self.psu1_flag],\n command=self.psu1_click)\n self.button_psu1.place(x=self.win_width-522, y=0)\n\n # option PSU2\n self.button_psu2 = tkinter.Button(self.frame_option,\n bitmap='gray12',\n compound=tkinter.BOTTOM,\n height=100,\n width=45,\n font=('Fixdsys', 16, 'bold'),\n text='PSU2',\n bd=4,\n bg=self.button_color[self.psu2_flag],\n command=self.psu2_click)\n self.button_psu2.place(x=self.win_width-462, y=0)\n\n # option voltage\n self.button_vol = tkinter.Button(self.frame_option,\n bitmap='gray12',\n compound=tkinter.LEFT,\n height=24,\n width=100,\n font=('Fixdsys', 16, 'bold'),\n text='电压',\n bd=4,\n bg=self.button_color[self.vol_flag],\n command=self.vol_click)\n self.button_vol.place(x=self.win_width-402, y=0)\n\n # option current\n self.button_cur = tkinter.Button(self.frame_option,\n bitmap='gray12',\n compound=tkinter.LEFT,\n height=24,\n width=100,\n font=('Fixdsys', 16, 'bold'),\n text='电流',\n bd=4,\n bg=self.button_color[self.cur_flag],\n command=self.cur_click)\n self.button_cur.place(x=self.win_width-402, y=38)\n\n # option resistance\n self.button_res = tkinter.Button(self.frame_option,\n bitmap='gray12',\n compound=tkinter.LEFT,\n height=24,\n width=100,\n font=('Fixdsys', 16, 'bold'),\n text='电阻',\n bd=4,\n state=tkinter.DISABLED,\n bg=self.button_color[self.res_flag],\n command=self.res_click)\n self.button_res.place(x=self.win_width-402, y=76)\n\n # option ch\n self.frame_cur_ch = tkinter.Frame(self.frame_option, width=110, height=165)\n self.frame_cur_ch.place(x=self.win_width-290, y=5)\n\n # option verify\n self.button_verify = tkinter.Button(self.frame_option,\n bitmap='gray12',\n height=42,\n width=100,\n compound=tkinter.CENTER,\n text='校准',\n font=('Fixdsys', 18, 'bold'),\n bd=5,\n command=self.verify)\n self.button_verify.place(x=self.win_width-192, y=0)\n\n # option test\n self.button_test = tkinter.Button(self.frame_option,\n bitmap='gray12',\n height=42,\n width=100,\n compound=tkinter.CENTER,\n text='测试',\n font=('Fixdsys', 18, 'bold'),\n bd=5,\n command=self.test)\n self.button_test.place(x=self.win_width-192, y=58)\n\n # option abrot\n self.button_abrot = tkinter.Button(self.frame_option,\n bitmap='gray12',\n compound=tkinter.CENTER,\n height=45,\n width=58,\n font=('Fixdsys', 18, 'bold'),\n text='中止',\n bd=3,\n command=self.stop)\n self.button_abrot.place(x=self.win_width-75, y=0)\n\n # option restart\n self.button_restart = tkinter.Button(self.frame_option,\n bitmap='gray12',\n compound=tkinter.CENTER,\n height=18,\n width=58,\n font=('Fixdsys', 10, 'bold'),\n text='重启进程',\n bd=3,\n command=self.restart_click)\n self.button_restart.place(x=self.win_width-75, y=58)\n\n # option reboot\n self.button_reboot = tkinter.Button(self.frame_option,\n bitmap='gray12',\n compound=tkinter.CENTER,\n height=18,\n width=58,\n font=('Fixdsys', 10, 'bold'),\n text='重启ZYNQ',\n bd=3,\n command=self.reboot_click)\n self.button_reboot.place(x=self.win_width-75, y=86)\n\n # 一组单选框要绑定同一个变量\n self.r = tkinter.IntVar()\n self.aisle_1 = tkinter.Radiobutton(self.frame_channel,\n text='169.254.1.21:7600',\n variable=self.r,\n value=0,\n command=self.get_channel)\n self.aisle_1.place(x=5, y=1)\n self.aisle_2 = tkinter.Radiobutton(self.frame_channel,\n text='169.254.1.22:7600',\n variable=self.r,\n value=1,\n command=self.get_channel)\n self.aisle_2.place(x=5, y=27)\n self.aisle_3 = tkinter.Radiobutton(self.frame_channel,\n text='169.254.1.23:7600',\n variable=self.r,\n value=2,\n command=self.get_channel)\n self.aisle_3.place(x=5, y=53)\n self.aisle_4 = tkinter.Radiobutton(self.frame_channel,\n text='169.254.1.24:7600',\n variable=self.r,\n value=3,\n command=self.get_channel)\n self.aisle_4.place(x=5, y=79)\n\n self.ch1 = tkinter.BooleanVar()\n self.channel_1 = tkinter.Checkbutton(self.frame_cur_ch,\n text='ch1 0.1R',\n state=tkinter.DISABLED,\n variable=self.ch1,\n command=self.ch_select_1)\n self.channel_1.place(x=5, y=0)\n self.ch2 = tkinter.BooleanVar()\n self.channel_2 = tkinter.Checkbutton(self.frame_cur_ch,\n text='ch2 2R',\n state=tkinter.DISABLED,\n variable=self.ch2,\n command=self.ch_select_2)\n self.channel_2.place(x=5, y=20)\n self.ch3 = tkinter.BooleanVar()\n self.channel_3 = tkinter.Checkbutton(self.frame_cur_ch,\n text='ch3 200R',\n state=tkinter.DISABLED,\n variable=self.ch3,\n command=self.ch_select_3)\n self.channel_3.place(x=5, y=40)\n self.ch4 = tkinter.BooleanVar()\n self.channel_4 = tkinter.Checkbutton(self.frame_cur_ch,\n text='ch4 10000R',\n state=tkinter.DISABLED,\n variable=self.ch4,\n command=self.ch_select_4)\n self.channel_4.place(x=5, y=60)\n self.ch_all = tkinter.BooleanVar()\n self.channel_all = tkinter.Checkbutton(self.frame_cur_ch,\n text='全选',\n variable=self.ch_all,\n state=tkinter.DISABLED,\n command=self.ch_select_all)\n self.channel_all.place(x=5, y=80)\n\n # 创建一个背景色为白色的矩形\n self.canvas = tkinter.Canvas(self.frame_scale, width=703, height=19, bg=\"white\")\n # 创建一个矩形外边框(距离左边,距离顶部,矩形宽度,矩形高度),线型宽度,颜色\n self.out_line = self.canvas.create_rectangle(2, 2, 703, 20, width=1, outline=\"black\")\n self.canvas.place(x=1, y=1)\n self.fill_line = self.canvas.create_rectangle(2, 2, 0, 20, width=0, fill=\"blue\")\n\n # scale label\n self.scale_label = tkinter.Text(self.frame_scale, height=1, width=61, font=(16,))\n self.scale_label.place(x=self.win_width-495, y=2)\n\n # table\n self.table = ttk.Treeview(self.frame_table, show=\"headings\", height=self.table_lines)\n self.table.pack(side=tkinter.LEFT, fill=tkinter.Y)\n self.vbar = ttk.Scrollbar(self.frame_table, orient=tkinter.VERTICAL, command=self.table.yview)\n self.vbar.pack(side=tkinter.RIGHT, fill=tkinter.Y)\n # 定义树形结构与滚动条\n self.table.config(yscrollcommand=self.vbar.set)\n\n # status message\n self.status_message = tkinter.Text(self.frame_status, height=1, width=139, font=(16,))\n self.status_message.place(x=2, y=3)\n\n # status log\n self.status_log = tkinter.Button(self.frame_status,\n bitmap='gray12',\n compound=tkinter.RIGHT,\n height=14,\n width=73,\n font=('Fixdsys', 14, 'bold'),\n text='log',\n command=self.open_log)\n self.status_log.place(x=self.win_width-84, y=2)\n\n self.set_table()\n self.init_dir()\n self.win.mainloop()\n\n def set_table(self):\n self.table['columns'] = ('1', '2', '3', '4', '5', '6', '7', '8', '9', '10', '11', '12', '13')\n self.table.column('1', anchor='w', width=50)\n self.table.column('2', anchor='w', width=150)\n self.table.column('3', anchor='w', width=70)\n self.table.column('4', anchor='w', width=80)\n self.table.column('5', anchor='w', width=70)\n self.table.column('6', anchor='w', width=80)\n self.table.column('7', anchor='w', width=70)\n self.table.column('8', anchor='w', width=80)\n self.table.column('9', anchor='w', width=100)\n self.table.column('10', anchor='w', width=142)\n self.table.column('11', anchor='w', width=100)\n self.table.column('12', anchor='w', width=142)\n self.table.column('13', anchor='w', width=51) # 1185\n # 设置表头\n self.table.heading('1', text='Item')\n self.table.heading('2', text='Verify Name')\n self.table.heading('3', text='dac addr')\n self.table.heading('4', text='dac')\n self.table.heading('5', text='adc addr')\n self.table.heading('6', text='adc')\n self.table.heading('7', text='ref addr')\n self.table.heading('8', text='ref')\n self.table.heading('9', text='sub1(adc-ref)')\n self.table.heading('10', text='rate1(sub1/dac)(‰)')\n self.table.heading('11', text='sub2(adc-dac)')\n self.table.heading('12', text='rate2(sub2/dac)(‰)')\n self.table.heading('13', text='Judge')\n\n self.r.set(3)\n\n def init_dir(self):\n now = str(datetime.datetime.now())[:-7]\n index = now.find(' ')\n date = now[:index]\n date = date.replace('-', '')\n self.dir = 'log/' + date\n if not os.path.exists(self.dir):\n os.mkdir(self.dir)\n\n def create_dir(self, mode):\n client = library.connect(self.channel, self.port, self.mes_status, [True])\n if client is False:\n return\n client.send('[-1]eeprom string read(Uart_board, at16, 0x64, 12)\\r\\n'.encode('utf-8'))\n response = client.recv(1024).decode('utf-8')\n client.close()\n start = response.find('(\"') + 2\n end = response.find('\";DONE')\n fixture_id = response[start:end]\n temp_dir = fixture_id + '_' + mode\n if fixture_id in self.dir and mode not in self.dir:\n end = self.dir.rfind('/')+1\n self.dir = self.dir[:end] + temp_dir\n if not os.path.exists(self.dir):\n os.mkdir(self.dir)\n elif fixture_id not in self.dir:\n if fixture_id[:3] in self.dir:\n end = self.dir.rfind('/')\n self.dir = self.dir[:end]\n self.dir = self.dir + '/' + temp_dir\n if not os.path.exists(self.dir):\n os.mkdir(self.dir)\n\n def get_channel(self):\n self.channel = self.channel_list[self.r.get()][0]\n self.port = self.channel_list[self.r.get()][1]\n self.update_info()\n\n def get_button_state(self):\n self.selected_ch.clear()\n if self.psu1_flag:\n self.psu = 'PSU1'\n self.button_res['state'] = tkinter.NORMAL\n elif self.psu2_flag:\n self.psu = 'PSU2'\n self.button_res['state'] = tkinter.DISABLED\n else:\n self.button_res['state'] = tkinter.DISABLED\n self.psu = ''\n if self.vol_flag:\n self.option = '电压'\n self.ch1.set(False)\n self.ch2.set(False)\n self.ch3.set(False)\n self.ch4.set(False)\n self.ch_all.set(False)\n self.channel_1['state'] = tkinter.DISABLED\n self.channel_2['state'] = tkinter.DISABLED\n self.channel_3['state'] = tkinter.DISABLED\n self.channel_4['state'] = tkinter.DISABLED\n self.channel_all['state'] = tkinter.DISABLED\n elif self.cur_flag:\n self.option = '电流'\n self.channel_1['state'] = tkinter.NORMAL\n self.channel_2['state'] = tkinter.NORMAL\n self.channel_3['state'] = tkinter.NORMAL\n self.channel_4['state'] = tkinter.NORMAL\n self.channel_all['state'] = tkinter.NORMAL\n elif self.res_flag:\n self.option = '电阻'\n self.channel_1['state'] = tkinter.NORMAL\n self.channel_2['state'] = tkinter.NORMAL\n self.channel_3['state'] = tkinter.NORMAL\n self.channel_4['state'] = tkinter.DISABLED\n self.ch4.set(False)\n self.channel_all['state'] = tkinter.NORMAL\n else:\n self.option = ''\n if self.ch1.get():\n self.selected_ch.append(1)\n if len(self.selected_ch) == 4:\n self.ch_all.set(True)\n if self.ch2.get():\n self.selected_ch.append(2)\n if len(self.selected_ch) == 4:\n self.ch_all.set(True)\n if self.ch3.get():\n self.selected_ch.append(3)\n if len(self.selected_ch) == 4:\n self.ch_all.set(True)\n if self.ch4.get() and self.channel_4['state'] == tkinter.NORMAL:\n self.selected_ch.append(4)\n if len(self.selected_ch) == 4:\n self.ch_all.set(True)\n if self.ch_all.get():\n if self.res_flag:\n self.selected_ch = [1, 2, 3]\n elif self.cur_flag and self.psu1_flag:\n self.selected_ch = [1, 2, 3, 4]\n self.update_info()\n\n def psu1_click(self):\n self.psu1_flag = (self.psu1_flag+1) % 2\n if self.psu1_flag:\n self.psu2_flag = 0\n self.button_psu2['bg'] = self.button_color[0]\n if self.cur_flag:\n self.ch1.set(True)\n self.ch2.set(True)\n self.ch3.set(True)\n self.ch4.set(True)\n self.button_psu1['bg'] = self.button_color[self.psu1_flag]\n self.get_button_state()\n\n def psu2_click(self):\n self.psu2_flag = (self.psu2_flag+1) % 2\n if self.psu2_flag:\n self.psu1_flag = 0\n self.button_psu1['bg'] = self.button_color[0]\n self.res_flag = 0\n self.button_res['bg'] = self.button_color[0]\n if self.cur_flag:\n self.ch1.set(True)\n self.ch2.set(True)\n self.ch3.set(True)\n self.ch4.set(True)\n self.button_psu2['bg'] = self.button_color[self.psu2_flag]\n self.get_button_state()\n\n def vol_click(self):\n self.vol_flag = (self.vol_flag+1) % 2\n if self.vol_flag:\n self.cur_flag = 0\n self.button_cur['bg'] = self.button_color[0]\n self.res_flag = 0\n self.button_res['bg'] = self.button_color[0]\n self.ch_all.set(False)\n self.button_vol['bg'] = self.button_color[self.vol_flag]\n self.get_button_state()\n\n def cur_click(self):\n self.cur_flag = (self.cur_flag+1) % 2\n if self.cur_flag:\n self.vol_flag = 0\n self.button_vol['bg'] = self.button_color[0]\n self.res_flag = 0\n self.button_res['bg'] = self.button_color[0]\n self.ch1.set(True)\n self.ch2.set(True)\n self.ch3.set(True)\n self.ch4.set(True)\n self.button_cur['bg'] = self.button_color[self.cur_flag]\n self.get_button_state()\n\n def res_click(self):\n self.res_flag = (self.res_flag+1) % 2\n if self.res_flag:\n self.vol_flag = 0\n self.button_vol['bg'] = self.button_color[0]\n self.cur_flag = 0\n self.button_cur['bg'] = self.button_color[0]\n if self.ch1.get() and self.ch2.get() and self.ch3.get():\n self.ch_all.set(True)\n self.button_res['bg'] = self.button_color[self.res_flag]\n self.get_button_state()\n\n def restart_click(self):\n client = library.connect(self.channel, self.port, self.mes_status, [True])\n client.send('[0]RESTART()\\r\\n'.encode('utf-8'))\n i = 1\n while True:\n try:\n print('第%d次尝试连接cmd进程......' % i)\n client = library.connect(self.channel, self.port, self.mes_status, [True])\n client.send('[]SYS_DEFAULT()\\r\\n'.encode('utf-8'))\n response = client.recv(1024).decode('utf-8', 'ignore')\n if 'DONE' in response:\n break\n except Exception as e:\n time.sleep(0.5)\n i += 1\n self.mes_status('重启' + self.channel + ':' + str(self.port) + ' cmd进程成功!')\n return\n\n def reboot_click(self):\n client = library.connect(self.channel, self.port, self.mes_status, [True])\n client.send('[0]REBOOT()\\r\\n'.encode('utf-8'))\n time.sleep(3)\n i = 1\n while True:\n try:\n print('第%d次尝试连接ZYNQ板......' % i)\n client = library.connect(self.channel, self.port, self.mes_status, [True])\n client.send('[]SYS_DEFAULT()\\r\\n'.encode('utf-8'))\n response = client.recv(1024).decode('utf-8', 'ignore')\n if 'DONE' in response:\n break\n except Exception as e:\n time.sleep(1)\n i += 1\n self.mes_status('重启' + self.channel + ' ZYNQ板成功!')\n return\n\n def ch_select_1(self):\n if self.ch1.get() is False:\n self.ch_all.set(False)\n self.get_button_state()\n\n def ch_select_2(self):\n if self.ch2.get() is False:\n self.ch_all.set(False)\n self.get_button_state()\n\n def ch_select_3(self):\n if self.ch3.get() is False:\n self.ch_all.set(False)\n self.get_button_state()\n\n def ch_select_4(self):\n if self.ch4.get() is False:\n self.ch_all.set(False)\n self.get_button_state()\n\n def ch_select_all(self):\n if self.ch_all.get() is False:\n self.ch1.set(False)\n self.ch2.set(False)\n self.ch3.set(False)\n self.ch4.set(False)\n else:\n self.ch1.set(True)\n self.ch2.set(True)\n self.ch3.set(True)\n if self.channel_4['state'] == tkinter.NORMAL:\n self.ch4.set(True)\n self.get_button_state()\n\n def update_info(self):\n # 清除Text中的所有内容\n self.scale_label.delete(0.0, tkinter.END)\n self.info = self.channel + ':' + str(self.port) + ' 》 ' + self.psu + ' 》 ' + self.option\n for i in self.selected_ch:\n self.info += ' 》 ch' + str(i)\n self.scale_label.insert(tkinter.INSERT, self.info)\n\n def mes_status(self, mes):\n self.status_message.delete(0.0, tkinter.END)\n self.status_message.insert(tkinter.INSERT, mes)\n\n def message(self, mes):\n self.mes_status(mes)\n times = str(datetime.datetime.now())[:-3]\n if self.log_file is None:\n self.log_file.open(self.log_file_name, 'a+', encoding='utf-8')\n print('重新打开log文件')\n if '接收' in mes:\n index = mes.find('接收')\n mes = mes[:index-4] + '\\r\\n\\t\\t\\t ' + mes[index:]\n self.log_file.write('\\n' + times + ' ' + mes + '\\r\\n')\n elif '万用表回复' in mes:\n index = mes.find('万用表回复')\n mes = mes[:index-4] + '\\r\\n\\t\\t\\t ' + mes[index:]\n self.log_file.write('\\n' + times + ' ' + mes + '\\r\\n')\n else:\n self.log_file.write(mes + '\\r\\n')\n\n def write_log(self, mes):\n self.log_file.write(str(datetime.datetime.now())[:-3] + mes + '\\r\\n')\n\n def open_log(self):\n if self.log_file:\n self.log_file.close()\n self.log_file = None\n file_name = os.getcwd() + '\\\\' + self.log_file_name.replace('/', '\\\\')\n os.startfile(file_name)\n self.log_file = open(self.log_file_name, 'a+', encoding='utf-8')\n\n def timer(self, start, flag):\n while flag[0]:\n self.run_timer = time.time()-start\n self.time_flag = (self.time_flag+1) % len(self.time_color)\n self.label_time['text'] = str(int(self.run_timer))\n self.label_time['bg'] = self.time_color[self.time_flag]\n time.sleep(1)\n\n def canvas_update(self, n):\n # 以矩形的长度作为变量值更新\n self.canvas.coords(self.fill_line, (0, 0, n, 20))\n self.frame_scale.update()\n\n def run(self):\n self.empty_table()\n self.run_flag[0] = True\n self.aisle_1['state'] = tkinter.DISABLED\n self.aisle_2['state'] = tkinter.DISABLED\n self.aisle_3['state'] = tkinter.DISABLED\n self.aisle_4['state'] = tkinter.DISABLED\n self.button_psu1['state'] = tkinter.DISABLED\n self.button_psu2['state'] = tkinter.DISABLED\n self.button_vol['state'] = tkinter.DISABLED\n self.button_cur['state'] = tkinter.DISABLED\n self.button_res['state'] = tkinter.DISABLED\n self.channel_1['state'] = tkinter.DISABLED\n self.channel_2['state'] = tkinter.DISABLED\n self.channel_3['state'] = tkinter.DISABLED\n self.channel_4['state'] = tkinter.DISABLED\n self.channel_all['state'] = tkinter.DISABLED\n self.button_verify['state'] = tkinter.DISABLED\n self.button_test['state'] = tkinter.DISABLED\n self.run_flag[1] = None\n self.time_thread = Thread(target=self.timer, args=(time.time(), self.run_flag))\n self.time_thread.setDaemon(True)\n self.time_thread.start()\n now = str(datetime.datetime.now())[:-7]\n index = now.find(' ')\n times = now[index+1:]\n name = times.replace(':', '')\n self.log_file_name = self.dir + '/' + name + '.log'\n self.log_file = open(self.log_file_name, 'w', encoding='utf-8')\n\n def stop(self):\n self.run_flag[0] = False\n self.aisle_1['state'] = tkinter.NORMAL\n self.aisle_2['state'] = tkinter.NORMAL\n self.aisle_3['state'] = tkinter.NORMAL\n self.aisle_4['state'] = tkinter.NORMAL\n self.button_psu1['state'] = tkinter.NORMAL\n self.button_psu2['state'] = tkinter.NORMAL\n self.button_vol['state'] = tkinter.NORMAL\n self.button_cur['state'] = tkinter.NORMAL\n if self.psu1_flag:\n self.button_res['state'] = tkinter.NORMAL\n if self.cur_flag:\n self.channel_1['state'] = tkinter.NORMAL\n self.channel_2['state'] = tkinter.NORMAL\n self.channel_3['state'] = tkinter.NORMAL\n self.channel_4['state'] = tkinter.NORMAL\n self.channel_all['state'] = tkinter.NORMAL\n if self.res_flag:\n self.channel_1['state'] = tkinter.NORMAL\n self.channel_2['state'] = tkinter.NORMAL\n self.channel_3['state'] = tkinter.NORMAL\n self.channel_all['state'] = tkinter.NORMAL\n self.button_verify['state'] = tkinter.NORMAL\n self.button_verify['bg'] = self.button_color[0]\n self.button_test['state'] = tkinter.NORMAL\n self.button_test['bg'] = self.button_color[0]\n if len(self.table.get_children()) > 0:\n self.table.see(self.table.get_children()[0])\n if self.time_thread and self.time_thread.is_alive():\n self.time_thread.join()\n self.label_time['text'] = str(round(self.run_timer, 2))\n self.run_timer = -1\n if self.run_flag[1] == 'Fail':\n self.label_time['text'] += '\\r\\nFAIL'\n self.label_time['bg'] = 'red'\n elif self.run_flag[1] == 'Pass':\n self.label_time['text'] += '\\r\\nPASS'\n self.label_time['bg'] = 'lightgreen'\n elif self.run_flag[1] == 'None':\n self.label_time['bg'] = 'systemButtonface'\n if self.log_file:\n self.log_file.close()\n if os.path.getsize(self.log_file_name) == 0:\n os.remove(self.log_file_name)\n self.status_message.delete(0.0, tkinter.END)\n self.status_message.insert(tkinter.INSERT, '删除空log :' + self.log_file_name)\n self.log_file = None\n\n def empty_table(self):\n item_list = self.table.get_children()\n for item in item_list:\n self.table.delete(item)\n\n def verify(self):\n if self.vol_flag:\n if self.psu1_flag or self.psu2_flag:\n self.create_dir('verify')\n self.button_verify['bg'] = self.button_color[1]\n self.run()\n self.thread = Thread(target=verify.vol_verify,\n args=(self.channel, self.port, self.psu, self.table, self.run_flag,\n self.message, self.stop, self.write_log, self.log_file_name,\n self.canvas_update))\n self.thread.setDaemon(True)\n self.thread.start()\n elif self.cur_flag:\n if len(self.selected_ch) == 0:\n self.status_message.delete(0.0, tkinter.END)\n self.status_message.insert(tkinter.INSERT, '未选择档位')\n else:\n self.create_dir('verify')\n self.button_verify['bg'] = self.button_color[1]\n self.run()\n self.thread = Thread(target=verify.cur_verify,\n args=(self.channel, self.port, self.psu, self.selected_ch, self.table,\n self.run_flag, self.message, self.stop, self.write_log, self.log_file_name,\n self.canvas_update))\n self.thread.setDaemon(True)\n self.thread.start()\n elif self.res_flag:\n if len(self.selected_ch) == 0:\n self.status_message.delete(0.0, tkinter.END)\n self.status_message.insert(tkinter.INSERT, '未选择档位')\n else:\n self.thread = Thread(target=verify.res, args=(self.table, self.run_flag, self.stop,\n self.canvas_update()))\n self.thread.setDaemon(True)\n self.thread.start()\n self.button_verify['bg'] = self.button_color[1]\n self.run()\n\n def test(self):\n if self.vol_flag:\n if self.psu1_flag or self.psu2_flag:\n self.create_dir('test')\n self.button_test['bg'] = self.button_color[1]\n self.run()\n self.thread = Thread(target=verify.vol_test,\n args=(self.channel, self.port, self.psu, self.table, self.run_flag,\n self.message, self.stop, self.write_log, self.log_file_name,\n self.canvas_update))\n self.thread.setDaemon(True)\n self.thread.start()\n elif self.cur_flag:\n if len(self.selected_ch) == 0:\n self.status_message.delete(0.0, tkinter.END)\n self.status_message.insert(tkinter.INSERT, '未选择档位')\n else:\n self.create_dir('test')\n self.button_test['bg'] = self.button_color[1]\n self.run()\n self.thread = Thread(target=verify.cur_test,\n args=(self.channel, self.port, self.psu, self.selected_ch, self.table,\n self.run_flag, self.message, self.stop, self.write_log, self.log_file_name,\n self.canvas_update))\n self.thread.setDaemon(True)\n self.thread.start()\n elif self.res_flag:\n self.thread = Thread(target=verify.res, args=(self.table, self.run_flag, self.stop, self.canvas_update))\n self.thread.setDaemon(True)\n self.thread.start()\n self.button_test['bg'] = self.button_color[1]\n self.run()\n\n\nif __name__ == '__main__':\n win = Main_win()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":36324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"155022920","text":"BOT_NAME = 'proxy_pool'\nSPIDER_MODULES = ['proxy_pool.spiders']\nNEWSPIDER_MODULE = 'proxy_pool.spiders'\n\n# js 解析 scrapy_splash\nSPLASH_URL = 'http://127.0.0.1:8050'\n\n# robots.txt rules\nROBOTSTXT_OBEY = False\n\n# Redis\nREDIS_HOST = \"127.0.0.1\"\n# redis 端口\nREDIS_PORT = 6379\n# redis 密码\nREDIS_PASSWORD = \"\"\n# redis set key\nREDIS_KEY = \"proxies:ranking\"\n# redis 连接池最大连接量\nREDIS_MAX_CONNECTION = 20\n# REDIS SCORE 最大分数\nMAX_SCORE = 10\n# REDIS SCORE 最小分数\nMIN_SCORE = 0\n# REDIS SCORE 初始分数\nINIT_SCORE = 9\n\n# 中间件\nDOWNLOADER_MIDDLEWARES = {\n # 'proxy_pool.middlewares.ProxyPollDownloaderMiddleware': 543,\n 'scrapy_splash.SplashCookiesMiddleware': 723,\n 'scrapy_splash.SplashMiddleware': 725,\n 'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810,\n}\n\n# Configure item pipelines\nITEM_PIPELINES = {\n 'proxy_pool.pipelines.ProxyPollPipeline': 300,\n}\n\n# 爬取器循环周期(分钟)\nCRAWLER_RUN_CYCLE = 30\n# 校验器循环周期(分钟)\nVALIDATOR_RUN_CYCLE = 15\n# 校验器测试网站,可以定向改为自己想爬取的网站,如新浪,知乎等\nVALIDATOR_BASE_URL = \"https://httpbin.org/get?show_env=1\"\n# 请求超时时间\nREQUEST_TIMEOUT = 8","sub_path":"proxy_pool/settings_example.py","file_name":"settings_example.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"644626744","text":"from models import app, Guitar\nfrom flask import jsonify, request\nfrom crud.guitar_crud import get_all_guitars, get_guitar, create_guitar, update_guitar, destroy_guitar\n\n@app.errorhandler(Exception)\ndef unhandled_exception(e):\n app.logger.error(f'Unhandled Exception: {e}')\n message_str = e.__str__()\n return jsonify(message=message_str.split(':')[0])\n\n@app.route('/guitars', methods=['GET', 'POST'])\ndef guitar_index_create():\n if request.method == 'GET':\n return get_all_guitars()\n else:\n return create_guitar(**request.form)\n\n@app.route('/guitars/', methods=['GET', 'PUT', 'DELETE'])\ndef guitar_show_put_delete(id):\n if request.method == 'GET':\n return get_guitar(id)\n if request.method == 'PUT':\n return(update_guitar(id, **request.form))\n if request.method == 'DELETE':\n return(destroy_guitar(id))","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"302907928","text":"\"\"\" Helper functions and classes\"\"\"\n\nimport random\nfrom collections import deque\n\nimport numpy as np\nimport tensorflow as tf\n\n\nclass DDQN_Solver():\n def __init__(self, gamma, memory_size, min_memory_size, learning_rate_adam, HL_1_size, HL_2_size, batch_size,\n epsilon_all):\n # Load important parameters\n self.gamma = gamma # discount rate\n self.memory_size = memory_size # size of the memory buffer\n self.HL_1_size = HL_1_size # number of nodes in the first hidden layer\n self.HL_2_size = HL_2_size # number of nodes in the second hidden layer\n self.learning_rate_adam = learning_rate_adam # learning rate for Adam optimizer\n self.batch_size = batch_size # batch size for training\n self.min_memory_size = max(self.batch_size, min_memory_size) # minimal memory size before we start training\n self.epsilon_initial = epsilon_all['initial'] # epsilon-greedy policy - initial value\n self.epsilon_decay = epsilon_all['decay'] # decay after each time step\n self.epsilon_min = epsilon_all['min'] # minimal value of epsilon\n\n # Initialize attributes\n self.replay_buffer = deque()\n self.global_step = 0 # counts the number of times we have trained our model = sum_{episode} timesteps_episode\n self.most_recent_score = tf.Variable(0, dtype=tf.int32) # most recent score - visualized in tensorboard\n tf.summary.scalar('most_recent_score', self.most_recent_score)\n self.epsilon = self.epsilon_initial # we initialize our epsilon\n self.epsilon_tensor = tf.Variable(self.epsilon, dtype=tf.float32) # for tensorboard\n tf.summary.scalar('epsilon', self.epsilon_tensor)\n\n # Build online and target networks\n self.__build_Q_net()\n\n # Merge summaries\n self.overall_summary = tf.summary.merge_all()\n\n # Initialize variables and summary writer\n self.__init_session()\n self.summary_writer = tf.summary.FileWriter('/Users/jankrepl/Desktop/ddqn_summaries', self.session.graph)\n\n # Synchronize Online and Target Network\n self.update_target_network()\n\n def __build_Q_net(self):\n # Placeholders\n self.input_state = tf.placeholder(tf.float32, [None, 4], 'Input_state')\n self.input_action = tf.placeholder(tf.float32, [None, 2], 'Input_action')\n self.target = tf.placeholder(tf.float32, [None], 'Target')\n\n # Variables - Online Network\n self.W1_on = tf.Variable(tf.truncated_normal([4, self.HL_1_size]))\n self.b1_on = tf.Variable(tf.constant(0.1, shape=[self.HL_1_size]))\n self.HL_1_on = tf.nn.relu(tf.matmul(self.input_state, self.W1_on) + self.b1_on, )\n self.W2_on = tf.Variable(tf.truncated_normal([self.HL_1_size, self.HL_2_size]))\n self.b2_on = tf.Variable(tf.constant(0.1, shape=[self.HL_2_size]))\n self.HL_2_on = tf.nn.relu(tf.matmul(self.HL_1_on, self.W2_on) + self.b2_on)\n self.W3_on = tf.Variable(tf.truncated_normal([self.HL_1_size, 2]))\n self.b3_on = tf.Variable(tf.constant(0.1, shape=[2]))\n self.Q_ohr_on = tf.matmul(self.HL_2_on, self.W3_on) + self.b3_on\n\n # Variables - Target Network\n self.W1_tn = tf.Variable(tf.truncated_normal([4, self.HL_1_size]))\n self.b1_tn = tf.Variable(tf.constant(0.1, shape=[self.HL_1_size]))\n self.HL_1_tn = tf.nn.relu(tf.matmul(self.input_state, self.W1_tn) + self.b1_tn, )\n self.W2_tn = tf.Variable(tf.truncated_normal([self.HL_1_size, self.HL_2_size]))\n self.b2_tn = tf.Variable(tf.constant(0.1, shape=[self.HL_2_size]))\n self.HL_2_tn = tf.nn.relu(tf.matmul(self.HL_1_tn, self.W2_tn) + self.b2_tn)\n self.W3_tn = tf.Variable(tf.truncated_normal([self.HL_1_size, 2]))\n self.b3_tn = tf.Variable(tf.constant(0.1, shape=[2]))\n self.Q_ohr_tn = tf.matmul(self.HL_2_tn, self.W3_tn) + self.b3_tn\n\n # Q function and loss\n self.Q_on = tf.reduce_sum(tf.multiply(self.Q_ohr_on, self.input_action), reduction_indices=1)\n\n # Loss\n self.loss = tf.reduce_mean(tf.square(self.target - self.Q_on), name='loss')\n tf.summary.scalar(\"loss\", self.loss)\n\n # Train operations\n self.train_op = tf.train.AdamOptimizer(self.learning_rate_adam).minimize(self.loss)\n\n def __init_session(self):\n self.session = tf.InteractiveSession()\n self.session.run(tf.global_variables_initializer())\n\n def train(self):\n \"\"\" Samples a minibatch from the memory and based on it trains the network\n \"\"\"\n self.global_step += 1\n\n # Just make sure that it breaks at the beginning when memory is not big enough < min_memory_size\n if len(self.replay_buffer) < self.min_memory_size:\n print('The memory is too small to train')\n return\n\n # Sample from memory\n\n mini_batch = random.sample(self.replay_buffer, self.batch_size) # sampling without replacement\n batch_s_old = [element[0] for element in mini_batch]\n batch_a = [element[1] for element in mini_batch]\n batch_r = [element[2] for element in mini_batch]\n batch_s_new = [element[3] for element in mini_batch]\n batch_d = [element[4] for element in mini_batch]\n\n # Generating targets\n Q_new_on = self.Q_ohr_on.eval(feed_dict={self.input_state: batch_s_new}) # forward pass - ONLINE NETWORK\n Q_new_tn = self.Q_ohr_tn.eval(feed_dict={self.input_state: batch_s_new}) # forward pass - TARGET NETWORK\n argmax = np.argmax(Q_new_on, axis=1)\n Q_target = np.reshape(np.array([Q_new_tn[i][argmax[i]] for i in range(self.batch_size)]),\n newshape=self.batch_size)\n\n # Generate targets\n batch_target = []\n for i in range(self.batch_size):\n if batch_d[i]:\n # The new state is the end game - its target Q value is definitely 0\n batch_target.append(batch_r[i])\n else:\n batch_target.append(batch_r[i] + self.gamma * Q_target[i])\n\n # Train and write summary\n _, summary_str = self.session.run([self.train_op, self.overall_summary], feed_dict={\n self.target: batch_target,\n self.input_state: batch_s_old,\n self.input_action: batch_a,\n })\n self.summary_writer.add_summary(summary_str, self.global_step)\n\n # Decay epsilon\n self.__decay_epsilon()\n\n def update_target_network(self):\n # We simply copy online network values into the target network\n ops_list = []\n ops_list.append(self.W1_tn.assign(self.W1_on))\n ops_list.append(self.b1_tn.assign(self.b1_on))\n ops_list.append(self.W2_tn.assign(self.W2_on))\n ops_list.append(self.b2_tn.assign(self.b2_on))\n ops_list.append(self.W3_tn.assign(self.W3_on))\n ops_list.append(self.b3_tn.assign(self.b3_on))\n\n self.session.run(ops_list)\n\n\n def __decay_epsilon(self, printme=False):\n \"\"\" Decays epsilon based on epsilon_decay\n\n :param printme: print current value of epsilon\n :type printme: bool\n \"\"\"\n if self.epsilon > self.epsilon_min:\n self.epsilon = max(self.epsilon_min, self.epsilon * self.epsilon_decay)\n\n if printme:\n print('The current value of epsilon is ' + str(self.epsilon))\n\n def memorize(self, s_old, action, reward, s_new, done):\n \"\"\" Inserts the most recent SARS and done into the memory - a is saved in the one hot representation\n\n :param s_old: old state\n :type s_old: ndarray\n :param action: 0 or 1\n :type action: int\n :param reward: reward\n :type reward: int\n :param s_new: new state\n :type s_new: ndarray\n :param done: is finished\n :type done: bool\n \"\"\"\n # Convert action to one hot representation\n a_ohr = np.zeros(2)\n a_ohr[action] = 1\n\n # Make sure they have the right dimensions\n s_old.shape = (4,)\n a_ohr.shape = (2,)\n s_new.shape = (4,)\n\n # Add into replay_buffer and if necessary pop oldest memory\n memory_element = tuple((s_old, a_ohr, reward, s_new, done))\n self.replay_buffer.append(memory_element)\n if len(self.replay_buffer) > self.memory_size:\n self.replay_buffer.popleft()\n\n def choose_action(self, s_old, policy_from_online):\n \"\"\" Epsilon greedy policy\n \n :param s_old: old observation\n :type s_old: ndarray\n :param policy_from_online: if True, online network as the policy, if False, target network as the policy\n :type policy_from_online: bool\n :return: 0 or 1\n :rtype: int\n \"\"\"\n # just a forward pass and max\n if np.random.rand() < self.epsilon:\n # Explore\n return np.random.choice([0, 1], 1)[0]\n else:\n # Exploit\n s_old.shape = (1, 4) # make sure it matches the placeholder shape (None, 4)\n if policy_from_online:\n return np.argmax(self.Q_ohr_on.eval(feed_dict={self.input_state: s_old}))\n else:\n return np.argmax(self.Q_ohr_tn.eval(feed_dict={self.input_state: s_old}))\n\n def feed_most_recent_score(self, score):\n \"\"\" Feeds the most recent score into our solver class so that we can visualize it in tensorboard together\n with epsilon\n\n :param score: most recent score\n :type score: int\n \"\"\"\n op1 = self.most_recent_score.assign(score)\n op2 = self.epsilon_tensor.assign(self.epsilon)\n self.session.run([op1, op2])\n","sub_path":"foo.py","file_name":"foo.py","file_ext":"py","file_size_in_byte":9646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"67123690","text":"\"\"\"\nPipeline manager module that provides functionality to add, modify and delete pipelines.\n\"\"\"\n\nimport os\nimport sys\nfrom argparse import ArgumentParser\nfrom chrisstoreclient.client import StoreClient\n\nif \"DJANGO_SETTINGS_MODULE\" not in os.environ:\n # django needs to be loaded (eg. when this script is run from the command line)\n sys.path.append(os.path.join(os.path.dirname(__file__), '../../'))\n os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"config.settings.local\")\n import django\n django.setup()\n\nfrom django.utils import timezone\n\nfrom plugins.models import Plugin\nfrom plugins.models import ComputeResource\nfrom plugins.serializers import PluginSerializer, PluginParameterSerializer\nfrom plugins.serializers import DEFAULT_PARAMETER_SERIALIZERS\nfrom pipelines.models import Pipeline\n\n\nclass PipelineManager(object):\n\n def __init__(self):\n parser = ArgumentParser(description='Manage pipelines')\n subparsers = parser.add_subparsers(dest='subparser_name', title='subcommands',\n description='valid subcommands',\n help='sub-command help')\n\n # create the parser for the \"add\" command\n parser_add = subparsers.add_parser('add', help='Add a new plugin')\n parser_add.add_argument('name', help=\"Plugin's name\")\n parser_add.add_argument('computeresource',\n help=\"Compute resource where the plugin's instances run\")\n parser_add.add_argument('storeurl',\n help=\"Url of ChRIS store where the plugin is registered\")\n parser_add.add_argument('--storeusername', help=\"Username for the ChRIS store\")\n parser_add.add_argument('--storepassword', help=\"Password for the ChRIS store\")\n parser_add.add_argument('--storetimeout', help=\"ChRIS store request timeout\")\n\n # create the parser for the \"modify\" command\n parser_modify = subparsers.add_parser('modify', help='Modify existing plugin')\n parser_modify.add_argument('name', help=\"Plugin's name\")\n parser_modify.add_argument('--computeresource',\n help=\"Compute resource where the plugin's instances run\")\n parser_modify.add_argument('--storeurl',\n help=\"Url of ChRIS store where the plugin is registered\")\n parser_modify.add_argument('--storeusername', help=\"Username for the ChRIS store\")\n parser_modify.add_argument('--storepassword', help=\"Password for the ChRIS store\")\n parser_modify.add_argument('--storetimeout', help=\"ChRIS store request timeout\")\n\n # create the parser for the \"remove\" command\n parser_remove = subparsers.add_parser('remove', help='Remove an existing plugin')\n parser_remove.add_argument('name', help=\"Plugin's name\")\n\n self.parser = parser\n self.str_service = ''\n\n # Debug specifications\n self.b_quiet = False\n self.b_useDebug = True\n self.str_debugFile = '%s/tmp/debug-charm.log' % os.environ['HOME']\n\n def add_pipeline(self, args):\n \"\"\"\n Register/add a new plugin to the system.\n \"\"\"\n timeout = 30\n if args.storetimeout:\n timeout = args.storetimeout\n plg_repr = self.get_plugin_representation_from_store(args.name, args.storeurl,\n args.storeusername,\n args.storepassword, timeout)\n parameters_data = plg_repr['parameters']\n del plg_repr['parameters']\n plg_serializer = PluginSerializer(data=plg_repr)\n plg_serializer.is_valid(raise_exception=True)\n (compute_resource, tf) = ComputeResource.objects.get_or_create(\n compute_resource_identifier=args.computeresource)\n plugin = plg_serializer.save(compute_resource=compute_resource)\n # collect parameters and validate and save them to the DB\n for parameter in parameters_data:\n default = parameter['default'] if 'default' in parameter else None\n del parameter['default']\n parameter_serializer = PluginParameterSerializer(data=parameter)\n parameter_serializer.is_valid(raise_exception=True)\n param = parameter_serializer.save(plugin=plugin)\n if default is not None:\n default_param_serializer = DEFAULT_PARAMETER_SERIALIZERS[param.type](\n data={'value': default})\n default_param_serializer.is_valid(raise_exception=True)\n default_param_serializer.save(plugin_param=param)\n\n def modify_pipeline(self, args):\n \"\"\"\n Modify an existing/registered plugin and add the current date as a new plugin\n modification date.\n \"\"\"\n plugin = self.get_plugin(args.name)\n compute_resource = None\n plg_repr = None\n if args.computeresource:\n (compute_resource, tf) = ComputeResource.objects.get_or_create(\n compute_resource_identifier=args.computeresource)\n if args.storeurl:\n timeout = 30\n if args.storetimeout:\n timeout = args.storetimeout\n plg_repr = self.get_plugin_representation_from_store(args.name, args.storeurl,\n args.storeusername,\n args.storepassword,\n timeout)\n if plg_repr:\n parameters_data = plg_repr['parameters']\n del plg_repr['parameters']\n plg_serializer = PluginSerializer(plugin, data=plg_repr)\n plg_serializer.is_valid(raise_exception=True)\n plugin = plg_serializer.save(compute_resource=compute_resource)\n # collect existing and new parameters and validate and save them to the DB\n db_parameters = plugin.parameters.all()\n for parameter in parameters_data:\n default = parameter['default'] if 'default' in parameter else None\n del parameter['default']\n db_param = [p for p in db_parameters if p.name == parameter['name']]\n if db_param:\n parameter_serializer = PluginParameterSerializer(db_param[0],\n data=parameter)\n else:\n parameter_serializer = PluginParameterSerializer(data=parameter)\n parameter_serializer.is_valid(raise_exception=True)\n param = parameter_serializer.save(plugin=plugin)\n if default is not None:\n db_default = param.get_default()\n if db_default is not None: # check if there is already a default in DB\n default_param_serializer = DEFAULT_PARAMETER_SERIALIZERS[\n param.type](db_default, data={'value': default})\n else:\n default_param_serializer = DEFAULT_PARAMETER_SERIALIZERS[\n param.type](data={'value': default})\n default_param_serializer.is_valid(raise_exception=True)\n default_param_serializer.save(plugin_param=param)\n elif compute_resource:\n plg_serializer = PluginSerializer(plugin)\n plugin = plg_serializer.save(compute_resource=compute_resource)\n\n if plg_repr or compute_resource:\n plugin.modification_date = timezone.now()\n plugin.save()\n\n def remove_pipeline(self, args):\n \"\"\"\n Remove an existing/registered plugin from the system.\n \"\"\"\n plugin = self.get_plugin(args.name)\n plugin.delete()\n\n def run(self, args=None):\n \"\"\"\n Parse the arguments passed to the manager and perform the appropriate action.\n \"\"\"\n options = self.parser.parse_args(args)\n if options.subparser_name == 'add':\n self.add_plugin(options)\n elif options.subparser_name == 'modify':\n self.modify_plugin(options)\n elif options.subparser_name == 'remove':\n self.remove_plugin(options)\n\n @staticmethod\n def get_plugin_representation_from_store(name, store_url, username=None,\n password=None, timeout=30):\n \"\"\"\n Get a plugin app representation from the ChRIS store.\n \"\"\"\n store_client = StoreClient(store_url, username, password, timeout)\n return store_client.get_plugin(name)\n\n @staticmethod\n def get_plugin(name):\n \"\"\"\n Get an existing plugin.\n \"\"\"\n try:\n plugin = Plugin.objects.get(name=name)\n except Plugin.DoesNotExist:\n raise NameError(\"Couldn't find '%s' plugin in the system\" % name)\n return plugin\n\n\n# ENTRYPOINT\nif __name__ == \"__main__\":\n manager = PipelineManager()\n manager.run()\n","sub_path":"chris_backend/pipelines/services/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":9117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"267486693","text":"import numpy as np\r\nfrom component import *\r\n\r\n# TODO\r\n# Class to implement the logic to integrate both MeanModel and GPModel in the same container\r\n# Purpose is to abstract the logic of the likelihood calculation when dealing with parameters of both models together\r\nclass Model(object):\r\n\t\r\n\tdef __init__(self, mean_model, gp_model, data, include_errors=False):\r\n\t\t# If include errors try to get all data from data array\r\n\t\tif include_errors:\r\n\t\t\ttry:\r\n\t\t\t\tself.time, self.flux, self.error = data\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Data needs to have errors to include them\")\r\n\t\t# If not include errors, handle their possible existence in data without making the attribute\t\t\t\r\n\t\telse:\r\n\t\t\ttry:\r\n\t\t\t\tself.time, self.flux = data\r\n\t\t\texcept ValueError:\r\n\t\t\t\ttry:\r\n\t\t\t\t\tself.time, self.flux, _ = data\r\n\t\t\t\texcept ValueError:\r\n\t\t\t\t\tprint(\"Data needs to have at least time and flux arrays\")\r\n\r\n\t\tif isinstance(mean_model, MeanModel):\r\n\t\t\tself.mean_model = mean_model\r\n\t\telse:\r\n\t\t\traise ValueError(\"First argument must be of type MeanModel\")\r\n\r\n\t\tif isinstance(gp_model, GPModel):\r\n\t\t\tself.gp_model = gp_model\r\n\t\t\tif hasattr(self, 'error'):\r\n\t\t\t\tself.gp = GP(self.gp_model, self.time, self.error)\r\n\t\t\telse:\r\n\t\t\t\tself.gp = GP(self.gp_model, self.time)\r\n\t\telse:\r\n\t\t\traise ValueError(\"Second argument must be of type GPModel\")\r\n\r\n\tdef log_likelihood(self, params):\r\n\t\tself.gp.gp_model.set_parameters(params)\r\n\t\tlnprior = self.gp.gp_model.prior_evaluate()\r\n\t\tif not np.isfinite(lnprior):\r\n\t\t\treturn -np.inf\r\n\t\tself.gp.set_parameters()\r\n\r\n\t\tlnlikelihood = self.gp.log_likelihood(self.flux - self.mean_model.eval(params, self.time, self.flux))\r\n\t\t\r\n\t\treturn lnprior + lnlikelihood\r\n\r\n\r\n# TODO\r\n# Parametric model of the data that has a defined functional form\r\nclass MeanModel(object):\r\n\t\r\n\tdef eval(self, params, time, flux):\r\n\t\treturn 0\r\n\r\n# Model that contains the components of the GP. Might be joined with GP in the future.\r\nclass GPModel(object):\r\n\tcomponent_array = []\r\n\r\n\tdef __repr__(self):\r\n\t\tstring = 'Model with {0} components:\\n'.format(len(self.component_array))\r\n\t\tfor component in self.component_array:\r\n\t\t\tstring += repr(component) + '\\n'\r\n\t\treturn string\r\n\r\n\tdef\t__init__(self, *args):\r\n\t\tif len(args):\r\n\t\t\tfor arg in args:\r\n\t\t\t\tif isinstance(arg, Component):\r\n\t\t\t\t\tself.component_array.append(arg)\r\n\t\t\t\telse:\r\n\t\t\t\t\traise ValueError(\"Args must be of type Component\")\r\n\t\telse:\r\n\t\t\traise ValueError(\"Model must have at least one component\")\r\n\r\n\tdef add(self, *args):\r\n\t\tif len(args):\r\n\t\t\tfor arg in args:\r\n\t\t\t\tif isinstance(arg, Component):\r\n\t\t\t\t\tself.component_array.append(arg)\r\n\t\t\t\telse:\r\n\t\t\t\t\traise ValueError(\"Args must be of type Component\")\r\n\t\telse:\r\n\t\t\traise ValueError(\"Must add at least one component to model\")\r\n\r\n\tdef set_parameters(self, params):\r\n\t\ti = 0\r\n\t\tfor component in self.component_array:\r\n\t\t\tcomponent.parameter_array = params[i:i+component.npars]\r\n\t\t\ti += component.npars\r\n\r\n\tdef get_parameters(self):\r\n\t\treturn np.hstack([component.parameter_array for component in self.component_array])\r\n\r\n\tdef get_parameters_celerite(self):\r\n\t\treturn np.hstack([component.get_parameters_celerite() for component in self.component_array])\r\n\t\t\r\n\tdef get_parameters_names(self):\r\n\t\treturn np.hstack([component.parameter_names for component in self.component_array])\r\n\r\n\tdef get_parameters_latex(self):\r\n\t\treturn np.hstack([component.parameter_latex_names for component in self.component_array])\r\n\r\n\tdef get_parameters_units(self):\r\n\t\treturn np.hstack([component.parameter_names for component in self.component_array])\r\n\r\n\r\n\tdef prior_evaluate(self):\r\n\t\tprior = sum([component.eval_prior() for component in self.component_array])\r\n\t\tif not np.isfinite(prior):\r\n\t\t\treturn -np.inf\r\n\t\treturn prior\r\n\r\n\tdef prior_sample(self, num=1):\r\n\t\treturn np.hstack([component.sample_prior(num) for component in self.component_array])\r\n\r\n\tdef get_kernel(self):\r\n\t\tkernel = celerite.terms.TermSum()\r\n\t\tfor component in self.component_array:\r\n\t\t\tkernel += component.get_kernel()\r\n\t\treturn kernel\r\n\r\n\tdef get_psd(self, time):\r\n\t\tnyquist = (1 / (2*(time[1]-time[0]))) * 1e6\r\n\t\tf_sampling = 1 / (27.4*24*3600 / 1e6)\r\n\t\tfreq = np.linspace(0.0, nyquist, (nyquist/f_sampling)+1)\r\n\r\n\t\tpsd_dict = [component.get_psd(freq, time.size) for component in self.component_array]\r\n\t\treturn [freq, psd_dict]\r\n\r\n\tdef get_kernel_list(self):\r\n\t\tpass\r\n\r\n# Class that implements the GP methods of the GPModel and interfaces with celerite methods\r\nclass GP(object):\r\n\tdef __init__(self, gp_model, time, error=1.123e-12): # yerr same as celerite\r\n\t\tif isinstance(gp_model, GPModel):\r\n\t\t\tself.gp_model = gp_model\r\n\t\telse:\r\n\t\t\traise ValueError(\"model arg must be of type GPModel\")\r\n\t\tself.gp = celerite.GP(self.gp_model.get_kernel())\r\n\t\tself.gp.compute(time/1e6, yerr=error)\r\n\r\n\tdef set_parameters(self):\r\n\t\tcelerite_params = self.gp_model.get_parameters_celerite()\r\n\t\tself.gp.set_parameter_vector(celerite_params)\r\n\r\n\tdef log_likelihood(self, residuals):\r\n\t\treturn self.gp.log_likelihood(residuals)\r\n\r\n\tdef predict(self, y, t=None, return_cov=True, return_var=False):\r\n\t\treturn self.gp.predict(y, t, return_cov, return_var)","sub_path":"gptransits/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"452190094","text":"import requests\r\nimport json\r\nimport pandas as pd\r\n#from recommender import Recommender\r\nfrom GeoFilter import GeoFilter\r\n\r\nclass Visualizations:\r\n def __init__(self):\r\n self.api_key = \"p5XJewjpIhpiwegBmHG-dETXNCdQst3XaDxu6iubyTvAWjYGrHoVLQxmcIVfTDRVwLkzujm1M8_8Dz2LhKh8etCa3HzwwAdm4zEFWDcDL-ebocrgJzLJZYWHxWu2XXYx\"\r\n self.weekdays = {}\r\n self.weekdays[0] = 'Monday'\r\n self.weekdays[1] = 'Tuesday'\r\n self.weekdays[2] = 'Wednesday'\r\n self.weekdays[3] = 'Thursday'\r\n self.weekdays[4] = 'Friday'\r\n self.weekdays[5] = 'Saturday'\r\n self.weekdays[6] = 'Sunday'\r\n\r\n def retrieve_data(self, user_id):\r\n #recommender = Recommender('Las Vegas')\r\n #recommendations = recommender.recommend(user_id)\r\n #business_list = recommendations['restaurant_id'].to_list()\r\n #business_list = business_list[0:3]\r\n\r\n # read user choice\r\n user_choice = pd.read_csv('user_choice.csv')\r\n user = user_choice['UserID'][0]\r\n start_lat = user_choice['Start_Lat'][0]\r\n start_long = user_choice['Start_Long'][0]\r\n end_lat = user_choice['End_Lat'][0]\r\n end_long = user_choice['End_Long'][0]\r\n\r\n # new geofilter added\r\n recommender = GeoFilter('Las Vegas', start_lat, start_long, end_lat, end_long)\r\n recommendations = recommender.filter_by_user(user)\r\n business_list = recommendations['restaurant_id'].to_list()\r\n business_list = business_list[0:3]\r\n\r\n # Get Business Id for 3 Restaurants\r\n # business_id = [\"SeNOJ2zYHziptxLuiRINLg\", \"vHz2RLtfUMVRPFmd7VBEHA\", \"I6EDDi4-Eq_XlFghcDCUhw\"]\r\n\r\n headers = {'Authorization': 'Bearer %s' % self.api_key}\r\n name = list()\r\n website = list()\r\n price = list()\r\n review_count = list()\r\n stars = list()\r\n categories = list()\r\n hours = list() # nested list of hours\r\n longitude = list()\r\n latitude = list()\r\n\r\n for i in range(len(business_list)):\r\n url = 'https://api.yelp.com/v3/businesses/' + business_list[i]\r\n req = requests.get(url, headers=headers)\r\n if req.status_code != 200:\r\n quit()\r\n restaurant = json.loads(req.text)\r\n\r\n name.append(restaurant['name'])\r\n website.append(restaurant['url'])\r\n price.append(restaurant['price'])\r\n review_count.append(restaurant['review_count'])\r\n stars.append(restaurant['rating'])\r\n categories.append([i['title'] for i in restaurant['categories']])\r\n # format business hours\r\n hours_list = restaurant['hours'][0]['open']\r\n hours_list1 = [[i['day'], i['start'], i['end']] for i in hours_list]\r\n for v in hours_list1:\r\n v[0] = self.weekdays[v[0]]\r\n v[1] = \": \" + v[1][:-2] + ':' + v[1][-2:] + \" - \"\r\n v[2] = v[2][:-2] + ':' + v[2][-2:]\r\n hours_list2 = [''.join(i) for i in hours_list1]\r\n hours.append(hours_list2) # list of hours\r\n latitude.append(restaurant['coordinates']['latitude'])\r\n longitude.append(restaurant['coordinates']['longitude'])\r\n df1 = pd.DataFrame(list(zip(name, website, price, review_count, stars, categories, hours, latitude, longitude)),\r\n columns=['name', 'website', 'price', 'review_count', 'stars', 'categories', 'hours', 'latitude', 'longitude'])\r\n df1.to_csv(r'data_viz.csv', index=False)\r\n\r\ndef main():\r\n vis = Visualizations()\r\n vis.retrieve_data('U4INQZOPSUaj8hMjLlZ3KA')\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"YR/visualizations.py","file_name":"visualizations.py","file_ext":"py","file_size_in_byte":3664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"543172372","text":"from selenium import webdriver\r\nfrom selenium.webdriver.chrome.options import Options\r\nfrom selenium.common.exceptions import TimeoutException\r\nimport time\r\nimport requests\r\nfrom bs4 import BeautifulSoup\r\nimport re\r\nfrom MWA_webapp.models import Commodities\r\nfrom datetime import datetime\r\n\r\n\r\ndef scrape_ALLORDS(driver):\r\n \"\"\" ALL ORDINARIES\"\"\"\r\n try:\r\n page = driver.get('https://www.marketindex.com.au/all-ordinaries')\r\n except TimeoutException:\r\n pass\r\n soup = BeautifulSoup(driver.page_source, 'html.parser')\r\n\r\n allords_last_price = float(soup.find(\"p\",{'class':'quoteapi-number'}).text.strip().replace(\",\",\"\"))\r\n allords_last_movement_nominal = float(soup.find(\"span\",{'class':'quoteapi-change'}).text.strip().replace(\",\",\"\"))\r\n allords_last_movement_percentage = float(soup.find(\"span\",{'class':'quoteapi-pct-change'}).text.strip().replace(\",\",\"\").replace(\"%\",\"\").replace(\"(\",\"\").replace(\")\",\"\"))\r\n\r\n ALLORDS = [allords_last_price,allords_last_movement_nominal,allords_last_movement_percentage]\r\n return(ALLORDS)\r\n\r\n\r\ndef scrape_OIL(driver):\r\n \"\"\" CRUDE OIL (WTI)\"\"\"\r\n try:\r\n page = driver.get('https://www.marketindex.com.au/crude-oil')\r\n except TimeoutException:\r\n pass\r\n soup = BeautifulSoup(driver.page_source, 'html.parser')\r\n\r\n WTIoil_last_price = float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'price-wrapper'}).find(\"p\").text))\r\n if soup.find(\"div\",{'class':'movement-wrapper'}).find('div',{'class':'negative'}) is None:\r\n WTIoil_last_movement_nominal = float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[0]))\r\n WTIoil_last_movement_percentage = float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[1]))\r\n else:\r\n WTIoil_last_movement_nominal = -1*abs(float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[0])))\r\n WTIoil_last_movement_percentage = -1*abs(float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[1])))\r\n\r\n WTIOIL = [WTIoil_last_price,WTIoil_last_movement_nominal,WTIoil_last_movement_percentage]\r\n return(WTIOIL)\r\n\r\n\r\ndef scrape_GOLD(driver):\r\n \"\"\" GOLD \"\"\"\r\n try:\r\n page = driver.get('https://www.marketindex.com.au/gold')\r\n except TimeoutException:\r\n pass\r\n soup = BeautifulSoup(driver.page_source, 'html.parser')\r\n\r\n gold_last_price = float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'price-wrapper'}).find(\"p\").text))\r\n if soup.find(\"div\",{'class':'movement-wrapper'}).find('div',{'class':'negative'}) is None:\r\n gold_last_movement_nominal = float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[0]))\r\n gold_last_movement_percentage = float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[1]))\r\n else:\r\n gold_last_movement_nominal = -1*abs(float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[0])))\r\n gold_last_movement_percentage = -1*abs(float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[1])))\r\n\r\n GOLD = [gold_last_price,gold_last_movement_nominal,gold_last_movement_percentage]\r\n return(GOLD)\r\n\r\n\r\ndef scrape_SILVER(driver):\r\n \"\"\" SILVER \"\"\"\r\n try:\r\n page = driver.get('https://www.marketindex.com.au/silver')\r\n except TimeoutException:\r\n pass\r\n soup = BeautifulSoup(driver.page_source, 'html.parser')\r\n\r\n silver_last_price = float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'price-wrapper'}).find(\"p\").text))\r\n if soup.find(\"div\",{'class':'movement-wrapper'}).find('div',{'class':'negative'}) is None:\r\n silver_last_movement_nominal = float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[0]))\r\n silver_last_movement_percentage = float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[1]))\r\n else:\r\n silver_last_movement_nominal = -1*abs(float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[0])))\r\n silver_last_movement_percentage = -1*abs(float(re.sub(\"[^0-9.]\", \"\", soup.find(\"div\",{'class':'movement-wrapper'}).text.split(\"(\")[1])))\r\n\r\n SILVER = [silver_last_price,silver_last_movement_nominal,silver_last_movement_percentage]\r\n return(SILVER)\r\n\r\n\r\ndef scrape_BITCOIN(driver):\r\n \"\"\" BITCOIN \"\"\"\r\n try:\r\n page = driver.get('https://coinmarketcap.com/currencies/bitcoin/')\r\n except TimeoutException:\r\n pass\r\n soup = BeautifulSoup(driver.page_source, 'html.parser')\r\n\r\n bitcoin_last_price = float(re.sub(\"[^0-9.]\", \"\", soup.find(\"span\",{'class':'details-panel-item--price__value'}).text))\r\n\r\n if soup.find('span',{'class':'negative_change'}) is None:\r\n bitcoin_last_movement_percentage = abs(float(re.sub(\"[^0-9.]\", \"\", soup.find(\"span\",{'data-format-percentage':True})['data-format-value'])))\r\n else:\r\n bitcoin_last_movement_percentage = -1*abs(float(re.sub(\"[^0-9.]\", \"\", soup.find(\"span\",{'data-format-percentage':True})['data-format-value'])))\r\n\r\n silver_last_movement_nominal = bitcoin_last_price*bitcoin_last_movement_percentage/100\r\n\r\n BITCOIN = [bitcoin_last_price,silver_last_movement_nominal,bitcoin_last_movement_percentage]\r\n return(BITCOIN)\r\n\r\n\r\ndef scrape_all():\r\n\r\n options = Options()\r\n options.add_argument(\"--disable-notifications\")\r\n options.add_argument(\"--no-sandbox\")\r\n options.add_argument(\"--disable-dev-shm-usage\")\r\n options.headless = True\r\n\r\n user_agent = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.50 Safari/537.36'\r\n options.add_argument('user-agent={0}'.format(user_agent))\r\n\r\n\r\n driver = webdriver.Chrome(chrome_options=options)\r\n driver.set_page_load_timeout(15)\r\n\r\n try:\r\n r = requests.get('https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=AUD&to_currency=USD&apikey=M4TA2P8B2OU22SPC')\r\n json_response = r.json()\r\n\r\n Commodities.objects.filter(commodity_name=\"AUD\").update(date_last_scraped=datetime.now(), last_price=json_response[\"Realtime Currency Exchange Rate\"][\"5. Exchange Rate\"])\r\n except Exception as e:\r\n print('error scraping AUD: ', e)\r\n\r\n try:\r\n update = scrape_ALLORDS(driver)\r\n exch_rate = Commodities.objects.get(commodity_name=\"AUD\").last_price\r\n update[0] = update[0]*exch_rate\r\n update[1] = update[1]*exch_rate\r\n\r\n Commodities.objects.filter(commodity_name=\"All Ords\").update(date_last_scraped=datetime.now(), last_price=update[0], last_movement_nominal=update[1], last_movement_percentage=update[2])\r\n except Exception as e:\r\n print('error scraping ALLORDS: ', e)\r\n\r\n try:\r\n update = scrape_OIL(driver)\r\n Commodities.objects.filter(commodity_name=\"Oil\").update(date_last_scraped=datetime.now(), last_price=update[0], last_movement_nominal=update[1], last_movement_percentage=update[2])\r\n except Exception as e:\r\n print('error scraping OIL: ', e)\r\n\r\n try:\r\n update = scrape_BITCOIN(driver)\r\n Commodities.objects.filter(commodity_name=\"Bitcoin\").update(date_last_scraped=datetime.now(), last_price=update[0], last_movement_nominal=update[1], last_movement_percentage=update[2])\r\n except Exception as e:\r\n print('error scraping BITCOIN: ', e)\r\n\r\n try:\r\n update = scrape_GOLD(driver)\r\n Commodities.objects.filter(commodity_name=\"Gold\").update(date_last_scraped=datetime.now(), last_price=update[0], last_movement_nominal=update[1], last_movement_percentage=update[2])\r\n except Exception as e:\r\n print('error scraping GOLD: ', e)\r\n\r\n try:\r\n update = scrape_SILVER(driver)\r\n Commodities.objects.filter(commodity_name=\"Silver\").update(date_last_scraped=datetime.now(), last_price=update[0], last_movement_nominal=update[1], last_movement_percentage=update[2])\r\n except Exception as e:\r\n print('error scraping SILVER: ', e)\r\n\r\n\r\n driver.quit()\r\n del(driver)\r\n","sub_path":"mywealthanalyst_django/scripts/scrape_livedata.py","file_name":"scrape_livedata.py","file_ext":"py","file_size_in_byte":8167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"586922922","text":"\nimport pyrax\nimport ConfigParser\n\nclass Challenge:\n \n def getInifile(self):\n config = ConfigParser.ConfigParser()\n \n from os.path import expanduser\n home = expanduser(\"~\")\n \n config.read(home + \"/.rackspace_cloud_credentials\")\n return config\n \n def getUsername(self):\n d = self.getInifile()\n return d.get('credentials','username')\n \n def getApiKey(self):\n d = self.getInifile()\n return d.get('credentials','api_key')\n \n def getPyrax(self):\n cls = pyrax.utils.import_class('pyrax.identity.rax_identity.RaxIdentity')\n pyrax.identity = cls()\n pyrax.set_credentials(self.getUsername(), self.getApiKey(), region='DFW')\n return pyrax\n","sub_path":"python-pyrax/src/org/mbs3/pyrax/devops/Challenge.py","file_name":"Challenge.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"569334920","text":"from django.shortcuts import render_to_response\nfrom django.core.mail import send_mail\nfrom django.template import RequestContext\nfrom django.http import HttpResponseRedirect\nfrom .forms import ContactForm\n\ndef contact(request):\n if request.method == 'POST':\n form = ContactForm(request.POST)\n if form.is_valid():\n subject = form.cleaned_data['subject']\n message = form.cleaned_data['message']\n sender = form.cleaned_data['sender']\n cc_myself = form.cleaned_data['cc_myself']\n recipients = ['leandroc@inatel.br']\n if cc_myself:\n recipients.append(sender)\n send_mail(subject, message, sender, recipients)\n return HttpResponseRedirect('/servicedesk/thanks')\n else:\n form = ContactForm()\n return render_to_response('contact.html', {'form': form,}, \\\n RequestContext(request))\n","sub_path":"servicedesk/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"152943239","text":"import datetime\nimport shortuuid\nimport hashlib\nimport requests\nfrom sqlalchemy.dialects.postgresql import JSONB\n\nfrom app import db\nfrom source import make_event_source\nfrom event import CedEvent\nfrom event import UnpaywallEvent\n\n\n\nclass Doi(object):\n\n def __init__(self, doi):\n self.doi = doi\n self.metadata = CrossrefMetadata(self.doi)\n self.open_access = OaDoi(self.doi)\n self.altmetrics = AltmetricsForDoi(self.doi)\n\n def get(self):\n self.metadata.get()\n self.open_access.get()\n self.altmetrics.get()\n\n def altmetrics_dict_including_unpaywall_views(self):\n altmetrics_value = self.altmetrics.to_dict()\n return altmetrics_value\n\n def to_dict(self):\n\n altmetrics = self.altmetrics.to_dict()\n\n\n ret = {\n \"doi\": self.doi,\n \"altmetrics_sources\": altmetrics[\"sources\"],\n \"crossref_event_data_url\": altmetrics[\"crossref_event_data_url\"],\n \"metadata\": self.metadata.to_dict(),\n \"open_access\": self.open_access.to_dict()\n }\n return ret\n\n\n\nclass AltmetricsForDoi(object):\n def __init__(self, doi):\n self.doi = doi\n self.ced_url = \"https://api.eventdata.crossref.org/v1/events?rows=10000&filter=from-collected-date:1990-01-01,until-collected-date:2099-01-01,obj-id:{}\".format(\n self.doi\n )\n self.events = []\n self.sources = []\n\n def get(self):\n # \"\"\"\n # Handy test data:\n # 10.2190/EC.43.3.f # no events\n # 10.1371/journal.pone.0000308 # many events, incl lots of wiki\n # \"\"\"\n ced_events = CedEvent.query.filter(CedEvent.doi==self.doi).limit(2500).all()\n for ced_event in ced_events:\n self.add_event(ced_event)\n\n def add_event(self, ced_event):\n source_id = ced_event.source.id\n\n # get the correct source for this event\n my_source = None\n for source in self.sources:\n if source.id == source_id:\n my_source = source\n break\n\n # we don't have this source for this DOI.\n # make it and add it to the list\n if my_source is None:\n my_source = make_event_source(ced_event.source)\n self.sources.append(my_source)\n\n # this source exists now for sure because we either found it or made it\n # add the event to the source.\n my_source.add_event(ced_event.api_raw)\n\n def to_dict(self):\n ret = {\n \"crossref_event_data_url\": self.ced_url,\n \"sources\": [s.to_dict() for s in self.sources]\n }\n return ret\n\n\n\n# currently unused, since we're moving Unpaywall to its own API\nclass UnpaywallViewsForDoi(object):\n def __init__(self, doi):\n self.doi = doi\n\n def get(self):\n event_objs = UnpaywallEvent.query.filter(UnpaywallEvent.doi==self.doi).all()\n event_dicts = [event.api_dict() for event in event_objs]\n return event_dicts\n\n def to_dict(self):\n ret = self.get()\n return ret\n\n\nclass OaDoi(object):\n def __init__(self, doi):\n self.doi = doi\n self.url = u\"https://api.oadoi.org/v2/{}\".format(doi)\n self.data = {}\n\n def get(self):\n r = requests.get(self.url)\n if r.status_code == 200:\n self.data = r.json()\n\n def to_dict(self):\n self.data[\"oadoi_url\"] = self.url\n return self.data\n\n\n\nclass CrossrefMetadata(object):\n def __init__(self, doi):\n self.doi = doi\n self.url = u\"https://api.crossref.org/works/{}/transform/application/vnd.citationstyles.csl+json\".format(doi)\n self.data = {}\n\n def get(self):\n r = requests.get(self.url)\n if r.status_code == 200:\n self.data = r.json()\n\n def to_dict(self):\n self.data[\"crossref_url\"] = self.url\n return self.data\n\n","sub_path":"doi.py","file_name":"doi.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"133697778","text":"import random\nimport re\nimport requests\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.template.loader import render_to_string\nfrom django.urls import reverse\nfrom django.utils.crypto import get_random_string\nfrom django.utils.translation import ugettext as _\nimport dns.resolver\n\nfrom dusken.tasks import send_mail_task\n\nlogger = logging.getLogger(__name__)\n\n\ndef generate_username(first_name, last_name):\n \"\"\" Generate a fairly unique username based on first and last name.\n Example: nikolakr1234\n \"\"\"\n whitespace_re = re.compile(r'\\s')\n first_name = first_name.encode('ascii', 'ignore').lower()[:6].decode('utf-8')\n last_name = last_name.encode('ascii', 'ignore').lower()[:2].decode('utf-8')\n random_number = random.randint(1, 9999)\n username = '{}{}{:04d}'.format(first_name, last_name, random_number)\n username = whitespace_re.sub('', username)\n return username\n\n\nclass InlineClass(object):\n def __init__(self, dictionary):\n self.__dict__ = dictionary\n\n\ndef send_validation_email(user):\n if user.email_is_confirmed:\n # Bail\n return\n\n site = Site.objects.get(pk=settings.SITE_ID)\n url_kwargs = {'slug': str(user.uuid), 'email_key': user.email_key}\n\n context = {\n 'user': user,\n 'validation_url': 'https://{}{}'.format(site.domain, reverse('user-email-validate', kwargs=url_kwargs)),\n 'site_name': site.name\n }\n\n message = render_to_string('dusken/emails/validation_email.txt', context)\n html_message = render_to_string('dusken/emails/validation_email.html', context)\n\n from_email = settings.DEFAULT_FROM_EMAIL\n\n send_mail_task.delay(\n _('Confirm your email address at Chateau Neuf'),\n from_email,\n message,\n [user.email],\n html_message=html_message)\n\n\ndef create_email_key():\n return get_random_string()\n\n\ndef send_sms(to, message):\n if settings.TESTING:\n return True\n url = '{}send'.format(settings.TEKSTMELDING_API_URL)\n payload = {\n 'to': str(to),\n 'message': message\n }\n headers = {\n 'Authorization': 'Token ' + settings.TEKSTMELDING_API_KEY\n }\n response = requests.post(url, json=payload, headers=headers)\n if response.status_code != 200:\n logger.warning('Failed to send SMS, status_code={} payload={}'.format(response.status_code, payload))\n return\n\n return response.json().get('outgoing_id')\n\n\ndef send_validation_sms(user):\n if user.phone_number_confirmed:\n # Bail\n return\n\n # Create a key if needed\n if not user.phone_number_key:\n user.phone_number_key = create_phone_key()\n user.save()\n\n message = _('Confirm your phone number at Chateau Neuf with this code:')\n message = message + ' ' + user.phone_number_key\n\n return send_sms(to=user.phone_number, message=message)\n\n\ndef create_phone_key(length=6):\n return ''.join([random.choice('0123456789') for i in range(length)])\n\n\ndef email_exists(email):\n from dusken.models import DuskenUser\n return DuskenUser.objects.filter(email=email).exists()\n\n\ndef phone_number_exist(phone_number):\n from dusken.models import DuskenUser\n return DuskenUser.objects.filter(phone_number=phone_number).exists()\n\n\ndef mx_record_exists(domain):\n \"\"\"True if the domain exists and if it has an MX record, False otherwise\"\"\"\n try:\n dns.resolver.query(domain, 'MX')\n return True\n except (dns.resolver.NXDOMAIN, dns.resolver.NoAnswer):\n return False\n","sub_path":"dusken/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"300193363","text":"import argparse\nimport os\n\nfrom .dataset import TxtDataset, XMLDataset\nfrom .embedding import Embedding\n\n\ndef get_arguments():\n \"\"\"\n Defines and reads command line arguments\n :return: Command line arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Generate word embedding vectors')\n parser.add_argument('-v', '--verbose', help='verbose mode', action='count', default=0)\n parser.add_argument('dir', type=str,\n help='directory to read files')\n parser.add_argument('--filetype', type=str, help='raw data filetype', default='txt', choices=['txt', 'xml'])\n parser.add_argument('--dim', type=int, help='dimensions of word embedding vectors', default=200)\n # parser.add_argument('--ontology', type=str, help='UMLS ontology for semantic mapping and key', default='oncology')\n parser.add_argument('--apikey', type=str, help='API key to access UMLS ontology', default='oncology')\n parser.add_argument('--categories', type=str, help='categories within samples to keep')\n parser.add_argument('--model', type=str, help='choice of word embedding model', default='word2vec', choices=['word2vec', 'fasttext'])\n parser.add_argument('--workers', type=int, help='number of workers to parallelise training of word embedding model',\n default=1)\n parser.add_argument('--visualise', help='make tSNE plot of trained word embedding model', action = 'count', default=0)\n\n return parser.parse_args()\n\n\ndef main():\n \"\"\"\n dataset -> transformed dataset -> word embedding vector\n :return: None\n \"\"\"\n\n args = get_arguments()\n\n if not os.listdir(args.dir):\n raise ValueError('No files found in file directory')\n\n\n if args.filetype == 'txt':\n dataset = TxtDataset(args.dir, args.verbose, args.categories)\n else:\n dataset = XMLDataset(args.dir, args.verbose, args.categories)\n\n dataset.preprocess()\n embedding = Embedding(args.verbose)\n embedding.generate(args.model, args.dim, args.workers)\n if args.visualise:\n embedding.tSNE()\n","sub_path":"medembed/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"168319905","text":"import pygame\nimport math\n\nfrom pygame.locals import *\nfrom OpenGL.GL import *\nfrom OpenGL.GLU import *\n\nSCREEN_WIDTH = 640\nSCREEN_HEIGHT = 480\nFPS = 60\n\ncubeVertices = ((1,1,1),(1,1,-1),(1,-1,-1),(1,-1,1),(-1,1,1),(-1,-1,-1),(-1,-1,1),(-1,1,-1))\ncubeEdges = ((0,1),(0,3),(0,4),(1,2),(1,7),(2,5),(2,3),(3,6),(4,6),(4,7),(5,6),(5,7))\ncubeQuads = ((0,3,6,4),(2,5,6,3),(1,2,5,7),(1,0,4,7),(7,4,6,5),(2,3,0,1))\n\ndef WireCube():\n glBegin(GL_LINES)\n for cubeEdge in cubeEdges:\n for cubeVertex in cubeEdge:\n glVertex3fv(cubeVertices[cubeVertex])\n glEnd()\n\ndef SolidCube():\n glBegin(GL_QUADS)\n for cubeQuad in cubeQuads:\n for cubeVertex in cubeQuad:\n glVertex3fv(cubeVertices[cubeVertex])\n glEnd()\n\ndef Ship():\n glBegin(GL_LINES)\n for shipEdge in shipEdges:\n for vertex in shipEdge:\n glVertex3fv(shipVertices[vertex])\n glEnd()\n\nclass GameState():\n def __init__(self):\n self.shipVertices = ((-0.25,0,0), (0,1,0), (0.25,0,0))\n self.shipEdges = ((0,1),(0,2),(1,2))\n self.player_max_speed = 10\n self.player_speed = 0\n self.player_rotation = 0\n self.daccl = 0.01\n\n def ship(self):\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glRotatef(self.player_rotation,0,0,1)\n glBegin(GL_LINES)\n for shipEdge in self.shipEdges:\n for vertex in shipEdge:\n glVertex3fv(self.shipVertices[vertex])\n glEnd()\n glPopMatrix()\n\n\n def update(self, rotation, accl):\n self.player_rotation += rotation\n self.player_speed += accl\n if self.player_speed > self.player_max_speed:\n self.player_speed = self.player_max_speed\n self.player_speed += self.daccl\n\nclass App:\n\n def __init__(self):\n self._running = True\n self._window = None\n self._gluPerspective = None\n self._gameState = GameState()\n self.size = self.width, self.height = SCREEN_WIDTH, SCREEN_HEIGHT\n self.cameraPos = [0,0,-20]\n\n def on_init(self):\n pygame.init()\n pygame.display.set_caption(\"Dank memes and tough dreams\")\n self._window = pygame.display.set_mode(self.size, DOUBLEBUF|OPENGL)\n self._running = True\n self._gluPerspective = gluPerspective(34, (self.width/self.height), 0.1, 50.0)\n glTranslatef(0,0,-20)\n\n def on_event(self, event=None):\n keys = pygame.key.get_pressed()\n if event.type == pygame.QUIT:\n self._running = False\n if keys[pygame.K_ESCAPE]:\n self._running = False\n if keys[pygame.K_w]:\n self.cameraPos[1] += 0.01\n if keys[pygame.K_s]:\n self.cameraPos[1] -= 0.01\n if keys[pygame.K_a]:\n self._gameState.update(0.5,0)\n if keys[pygame.K_d]:\n self._gameState.update(-0.5,0)\n\n def on_loop(self):\n pass\n\n def on_render_player(self):\n self._gameState.ship()\n \n def on_render(self):\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glTranslatef(self.cameraPos[0],self.cameraPos[1],self.cameraPos[2])\n WireCube()\n glPopMatrix()\n \n def on_cleanup(self):\n pygame.quit() \n\n def on_execute(self):\n if self.on_init() == False:\n self._running = False\n while( self._running ):\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\n for event in pygame.event.get():\n self.on_event(event)\n if pygame.key.get_focused:\n self.on_event(event)\n self.on_loop()\n self.on_render_player()\n glPushMatrix() \n self.on_render()\n glPopMatrix()\n pygame.display.flip()\n\n self.on_cleanup()\n\nif __name__ == \"__main__\" :\n app = App()\n app.on_execute()","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":3858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"265822042","text":"\"\"\"Forms for paylogic customizations.\"\"\"\nfrom django import forms\n\nfrom django.core.urlresolvers import reverse\nfrom django_select2.fields import HeavySelect2TagField\n\n\nclass GatekeeperApprove(forms.Form):\n \"\"\"Gatekeeper approval form.\"\"\"\n target_branch = HeavySelect2TagField(\n 'target_branch',\n data_view='lookup_target_branches',\n )\n\n def __init__(self, case_id, *args, **kwargs):\n \"\"\"Set the lookup url according to a given Fogbugz case_id.\n\n :param case_id: `int` Fogbugz case id.\n \"\"\"\n super(GatekeeperApprove, self).__init__(*args, **kwargs)\n widget = self.fields['target_branch'].widget\n widget.options['minimumInputLength'] = 0\n widget.options['maximumSelectionSize'] = 1\n widget.options['width'] = '200px'\n\n widget.url = widget.options['ajax']['url'] = reverse(\n self.fields['target_branch'].widget.view, kwargs=dict(case_id=case_id))\n\n def clean_target_branch(self):\n \"\"\"Make single value out of multiple.\"\"\"\n value = self.cleaned_data['target_branch']\n if value:\n return value[0]\n","sub_path":"paylogic/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"336896678","text":"import argparse\nimport sys\nfrom pht.internal import AbstractTrain, StationRuntimeInfo\n\n\ndef _fprint(msg: str):\n print(msg, flush=True)\n\n\ndef cmd_for_train(train: AbstractTrain):\n _describe = 'describe'\n _run = 'run'\n\n parser = argparse.ArgumentParser(description='Command-line interface for running a train')\n parser.add_argument('TOOL', type=str, choices=['describe', 'run'])\n parser.add_argument(\n '--station-id', type=int, required=True, help='The Station Id where the train is run at')\n parser.add_argument(\n '--track-info', type=str, required=False, help='Info on the Track that the Train is currently running on')\n parser.add_argument(\n '--user-data', type=str, required=False, help='Custom User data')\n\n args = parser.parse_args()\n info = StationRuntimeInfo(station_id=args.station_id , track_info=args.track_info, user_data=args.user_data)\n tool = args.TOOL\n if tool == _run:\n _fprint(train.run(info).to_json_string())\n elif tool == _describe:\n _fprint(train.describe(info).to_json_string())\n else:\n sys.exit(1)\n","sub_path":"pht/entrypoint.py","file_name":"entrypoint.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"484438221","text":"import numpy as np\nimport itertools\nfrom ....Functions.Simulation.VarSimu.run_single_simu import run_single_simu\n\n\ndef run(self):\n \"\"\"Run each simulation contained\"\"\"\n # Check var_simu parameters\n self.check_param()\n\n # Get xoutput to store results\n xoutput = self.parent.parent\n\n # Extact simulation list and ParamExplorerValue list\n simu_dict = self.get_simulations()\n\n # Fill input_parameters\n xoutput.nb_simu = simu_dict[\"nb_simu\"]\n xoutput.paramexplorer_list = simu_dict[\"paramexplorer_list\"]\n simulation_list = simu_dict[\"simulation_list\"]\n\n # Construct results\n for datakeeper in self.datakeeper_list:\n xoutput.xoutput_dict[datakeeper.symbol] = datakeeper\n datakeeper.result = [None] * self.nb_simu\n\n # Execute the reference simulation it is included in the simulation list\n # Otherwise, the reference simulation is already executed in the simulation.run method\n nb_simu = self.nb_simu\n ref_simu_index = self.ref_simu_index\n index_list = list(range(nb_simu))\n\n ref_simu_in_multsim = isinstance(self.ref_simu_index, int)\n\n if ref_simu_in_multsim:\n logger = self.get_logger()\n logger.info(\"Computing reference simulation\")\n\n simulation = simulation_list.pop(ref_simu_index)\n index_list.pop(ref_simu_index)\n xoutput.simu = simulation\n\n # Run the simulation handling errors\n run_single_simu(\n xoutput,\n self.datakeeper_list,\n simulation,\n ref_simu_index,\n self.stop_if_error,\n self.ref_simu_index,\n self.is_keep_all_output,\n )\n\n # Set back the var_simu\n simulation.var_simu = self\n print(\n \"\\r[\"\n + \"=\" * (50 * (1) // (nb_simu))\n + \" \" * (50 - ((50) // (nb_simu)))\n + \"] {:3d}%\".format(((100 * 1) // (nb_simu))),\n end=\"\",\n )\n\n # Reuse some intermediate results from reference simulation (if requested)\n for simu in simulation_list:\n self.set_reused_data(simu, xoutput)\n\n # Execute the other simulations\n nb_simu = self.nb_simu\n for idx, [i, simulation] in zip(index_list, enumerate(simulation_list)):\n # Run the simulation handling errors\n run_single_simu(\n xoutput,\n self.datakeeper_list,\n simulation,\n idx,\n self.stop_if_error,\n self.ref_simu_index,\n self.is_keep_all_output,\n )\n\n print(\n \"\\r[\"\n + \"=\" * (50 * (i + 1 + ref_simu_in_multsim) // (nb_simu))\n + \" \" * (50 - ((50 * (i + 1 + ref_simu_in_multsim)) // (nb_simu)))\n + \"] {:3d}%\".format(((100 * (i + 1 + ref_simu_in_multsim)) // (nb_simu))),\n end=\"\",\n )\n\n # Running postprocessings\n if self.postproc_list:\n logger.info(\"Running var_simu postprocessings...\")\n for postproc in self.postproc_list:\n postproc.run(xoutput)\n","sub_path":"pyleecan/Methods/Simulation/VarSimu/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"281648038","text":"import logging\nimport re\nfrom typing import List\n\nfrom nio import (\n AsyncClient,\n InviteMemberEvent,\n JoinError,\n MatrixRoom,\n MegolmEvent,\n RoomMessageText,\n)\n\nfrom matrix_reminder_bot.bot_commands import Command\nfrom matrix_reminder_bot.config import CONFIG\nfrom matrix_reminder_bot.errors import CommandError\nfrom matrix_reminder_bot.functions import send_text_to_room\nfrom matrix_reminder_bot.storage import Storage\n\nlogger = logging.getLogger(__name__)\n\n\nclass Callbacks(object):\n \"\"\"Callback methods that fire on certain matrix events\n\n Args:\n client: nio client used to interact with matrix\n store: Bot storage\n \"\"\"\n\n def __init__(self, client: AsyncClient, store: Storage):\n self.client = client\n self.store = store\n\n @staticmethod\n def str_strip(s: str, phrases: List[str]) -> str:\n \"\"\"\n Strip instances of a string in leading and trailing positions around another string.\n Like str.rstrip but with strings instead of individual characters.\n Also runs str.strip on s.\n\n Args:\n s: The string to strip.\n phrases: A list of strings to strip from s.\n \"\"\"\n # Strip the string of whitespace\n s = s.strip()\n\n for phrase in phrases:\n # Use a regex to strip leading strings from another string\n #\n # We use re.S to treat the input text as one line (aka not strip leading\n # phrases from every line of the message.\n match = re.match(f\"({phrase})*(.*)\", s, flags=re.S)\n\n # Extract the text between the parentheses in the pattern above\n # Note that the above pattern is guaranteed to find a match, even with an empty str\n s = match.group(2)\n\n # Now attempt to strip trailing strings.\n match = re.match(f\"(.*)({phrase})$\", s, flags=re.S)\n if match:\n s = match.group(1)\n\n # After attempting to strip leading and trailing phrases from the string, return it\n return s\n\n async def message(self, room: MatrixRoom, event: RoomMessageText):\n \"\"\"Callback for when a message event is received\"\"\"\n # Ignore messages from ourselves\n if event.sender == self.client.user:\n return\n\n # Ignore broken events\n if not event.body:\n return\n\n # We do some stripping just to remove any surrounding formatting\n formatting_chars = [\"

    \", \"\\\\n\", \"

    \"]\n body = self.str_strip(event.body, formatting_chars)\n formatted_body = (\n self.str_strip(event.formatted_body, formatting_chars)\n if event.formatted_body\n else None\n )\n\n # Use the formatted message text, or the basic text if no formatting is available\n msg = formatted_body or body\n if not msg:\n logger.info(\"No msg!\")\n return\n\n # Check whether this is a command\n #\n # We use event.body here as formatted bodies can start with

    instead of the\n # command prefix\n if not body.startswith(CONFIG.command_prefix):\n return\n\n logger.debug(\"Command received: %s\", msg)\n\n # Assume this is a command and attempt to process\n command = Command(self.client, self.store, msg, room, event)\n\n try:\n await command.process()\n except CommandError as e:\n # An expected error occurred. Inform the user\n msg = f\"Error: {e.msg}\"\n await send_text_to_room(self.client, room.room_id, msg)\n\n # Print traceback\n logger.exception(\"CommandError while processing command:\")\n except Exception as e:\n # An unknown error occurred. Inform the user\n msg = f\"An unknown error occurred: {e}\"\n await send_text_to_room(self.client, room.room_id, msg)\n\n # Print traceback\n logger.exception(\"Unknown error while processing command:\")\n\n async def invite(self, room: MatrixRoom, event: InviteMemberEvent):\n \"\"\"Callback for when an invite is received. Join the room specified in the invite\"\"\"\n logger.debug(f\"Got invite to {room.room_id} from {event.sender}.\")\n\n # Attempt to join 3 times before giving up\n for attempt in range(3):\n result = await self.client.join(room.room_id)\n if type(result) == JoinError:\n logger.error(\n f\"Error joining room {room.room_id} (attempt %d): %s\",\n attempt,\n result.message,\n )\n else:\n logger.info(f\"Joined {room.room_id}\")\n break\n\n async def decryption_failure(self, room: MatrixRoom, event: MegolmEvent):\n \"\"\"Callback for when an event fails to decrypt. Inform the user\"\"\"\n logger.error(\n f\"Failed to decrypt event '{event.event_id}' in room '{room.room_id}'!\"\n f\"\\n\\n\"\n f\"Tip: try using a different device ID in your config file and restart.\"\n f\"\\n\\n\"\n f\"If all else fails, delete your store directory and let the bot recreate \"\n f\"it (your reminders will NOT be deleted, but the bot may respond to existing \"\n f\"commands a second time).\"\n )\n\n user_msg = (\n \"Unable to decrypt this message. \"\n \"Check whether you've chosen to only encrypt to trusted devices.\"\n )\n\n await send_text_to_room(\n self.client,\n room.room_id,\n user_msg,\n reply_to_event_id=event.event_id,\n )\n","sub_path":"matrix_reminder_bot/callbacks.py","file_name":"callbacks.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"30408528","text":"from higher_lower_art import logo, vs\nfrom higher_lower_game_data import data\nfrom random import randint\nimport random\n# asign_a = random.choice(data)\n#print(f\"{asign_a['name']}, {asign_a['description']}, {asign_a['country']}\")\n# asign_b = data[random.randint(0,len(data))]\n# print(asign_b)\n# print(f\"{asign_b['name']}, {asign_b['description']}, {asign_b['country']}\")\n\ndef asign():\n global data\n a = random.randint(0,len(data))\n x = data[a]\n del(data[a])\n return x\n# def compare(a, b):\n# if a[\"follower_count\"] > b[\"follower_count\"]:\n# a = a\n# b = asign()\n# i += 1\n# print(f\"You are right! Current score : {i}\")\n# elif a[\"follower_count\"] < b[\"follower_count\"]:\n# prin(\"You Lose!\")\nprint(logo)\ni = 0\nasign_a = asign()\nasign_b = asign()\nwhile True:\n print(f\"Compare A: {asign_a['name']}, {asign_a['description']}, {asign_a['country']}\")\n print(vs)\n print(f\"Compare B: {asign_b['name']}, {asign_b['description']}, {asign_b['country']}\")\n who = input(\"Who has more followers? Type A or Type B :\").upper()\n if who == \"A\":\n if asign_a[\"follower_count\"] > asign_b[\"follower_count\"]:\n asign_a = asign_a\n asign_b = asign()\n i += 1\n print(f\"You are right! Current score : {i}\")\n elif asign_a[\"follower_count\"] < asign_b[\"follower_count\"]:\n print(f\"Sorry, you lose. Your Score {i}\")\n break\n elif who == \"B\":\n if asign_b[\"follower_count\"] > asign_a[\"follower_count\"]:\n asign_a = asign_b\n asign_b = asign()\n i += 1\n print(f\"You are right! Current score : {i}\")\n elif asign_b[\"follower_count\"] < asign_a[\"follower_count\"]:\n print(f\"Sorry, you lose. Your Score {i}\")\n break\n elif i == 48:\n print(\"you are a Legend 😎\")\n break","sub_path":"higher_lower_game.py","file_name":"higher_lower_game.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"348144704","text":"# -*- coding: utf-8 -*-\nimport random\nimport numpy as np\ndef MakeScheduleForClass(clas, n):\n schedule = []\n i = n\n while i < len(data):\n j = 0\n while j < int(data[i][1]):\n schedule.append([clas, data[i][0], data[i][2], data[i][3]])\n j += 1\n i += 1\n for i in schedule:\n if i[2] == '0':\n for j in data:\n if j[0] == clas:\n i[2] = j[1]\n break\n if i[3] == '0':\n i[3] = i[0]\n return random.sample(schedule, len(schedule))\ndef CountCoincidence(schedule, n):\n errors = []\n suberrors = []\n i = 0\n while i < len(schedule[0]):\n for j in range(n):\n lst = [schedule[j][i], schedule[j][i + 1], schedule[j][i + 2], schedule[j][i + 3]]\n for s, k in enumerate(lst):\n if lst.count(k) > 1:\n suberrors.append(j + i)\n break\n i += 4\n arr = np.array(schedule)\n arr = arr.transpose()\n for q, j in enumerate(arr[2]):\n for w, k in enumerate(j):\n if list(j).count(k) > 1:\n errors.append([w, q])\n break\n return [errors, suberrors]\ndef Mutate(schedule, errors):\n if len(errors) == 1:\n for j in range(3):\n gy = random.randint(8,11)\n schedule[j][errors[0]], schedule[j][gy] = schedule[j][gy], schedule[j][errors[0]]\n for i in range(len(errors)):\n for j in range(3):\n gy = random.randint(4,7)\n schedule[j][errors[i]], schedule[j][i + gy] = schedule[j][i + gy], schedule[j][errors[i]]\n return\ndef Crossover(schedule, errors):\n for i in errors:\n schedule[i[0]][i[1]], schedule[i[0]][random.randint(0, 19)] = schedule[i[0]][random.randint(0, 19)], schedule[i[0]][i[1]]\n return\ndef PrettyOutput(schedule, n):\n f = open(\"schedule.txt\", \"w\")\n i = 0\n if len(schedule) < 19:\n while i < len(schedule[0]):\n if i == 0: f.write(\"Monday:\\n\")\n elif i == 4: f.write(\"Tuesday:\\n\")\n elif i == 8: f.write(\"Wednesday:\\n\")\n elif i == 12: f.write(\"Thursday:\\n\")\n elif i == 16: f.write(\"Friday:\\n\")\n if i % 4 == 0: f.write(\"1 lesson:\\n\")\n elif i % 4 == 1: f.write(\"2 lesson:\\n\")\n elif i % 4 == 2: f.write(\"3 lesson:\\n\")\n elif i % 4 == 3: f.write(\"4 lesson:\\n\")\n for j in range(n):\n for k in schedule[j][i]:\n if k == 'Reading' or k == 'PT' or k == 'English' or k == 'Arts':\n f.write(k + '\\t\\t')\n else: f.write(k + '\\t')\n f.write('\\n')\n i += 1\n f.close()\nf = open('data.txt', 'r')\nn = 3\ndata = []\nfor line in f:\n data.append(line.strip().split(', '))\nf.close()\nwhile 1:\n schedule = []\n i = 0\n while i < n:\n schedule.append(MakeScheduleForClass(data[i][0], n))\n i += 1\n while CountCoincidence(schedule, n)[0] != []:\n Crossover(schedule, CountCoincidence(schedule, n)[0])\n temp = schedule[:]\n while CountCoincidence(schedule, n)[1] != []:\n if i > 100:\n break\n Mutate(schedule, CountCoincidence(schedule, n)[1])\n #print(CountCoincidence(schedule, n)[1])\n i += 1\n if CountCoincidence(schedule, n)[1] == []: break\n else: continue\n#for i in schedule: print(i)\nPrettyOutput(schedule, n)","sub_path":"Lab_6/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":3429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"8857486","text":"# Cross-situational learning\r\n\r\n# Data available to learning:\r\n# Two objects are present in each situation, one is always missing\r\n# Either, one of the two objects is named, by producing its word; or else, the distractor is produced\r\n\r\n# Word 0 -> object 0\r\n# Word 1 -> object 1\r\n# Word 2 -> object 2\r\n# and Word 3 is a distractor.\r\n#\r\n# Our learning is invariant to the order of presentation, so there's no point\r\n# in simulating lots of long dialogues... the only thing that matters is the\r\n# relative proportions of different examples, and there the law of large\r\n# numbers will kick in. So we just evaluate the posterior once.\r\n#\r\n# NOTE: this example really pushes the limits of the importance sampling\r\n# (notice that the below graph used 100,000 particles, and still has visible\r\n# asymmetries caused by the sampling; when I tried it with 10,000 particles it\r\n# got the wrong answer! 100,000 particles takes a few minutes on my\r\n# laptop.). This is a perfect place to use MCMC instead.\r\n\r\nfrom nips import *\r\n\r\nCS_OBJECTS = 3\r\nCS_WORDS = 4\r\ndata_points = []\r\nfor missing_obj in xrange(CS_OBJECTS):\r\n present_objs = range(CS_OBJECTS)\r\n present_objs.remove(missing_obj)\r\n object_prior = np.empty(CS_OBJECTS)\r\n object_prior[missing_obj] = 0.01\r\n object_prior[present_objs] = (1 - 0.01) / 2\r\n for target_obj in present_objs:\r\n data_points.append(conpact2.LDataUtt(LISTENER_DEPTH - 1,\r\n target_obj,\r\n np.log(object_prior)))\r\n for distractor_word in xrange(CS_OBJECTS, CS_WORDS):\r\n data_points.append(conpact2.LDataUtt(LISTENER_DEPTH - 1,\r\n distractor_word,\r\n np.log(object_prior)))\r\n\r\nd = dom(adjectives=4, objects=3)\r\nlearner = conpact2.FixedSupportImportanceSampler(d, 23, PARTICLES * 1000,\r\n data=data_points * 10)\r\nmatshow(np.exp(learner.marginal_dist_n(LISTENER_DEPTH)), cmap=\"binary\",\r\n vmin=0, vmax=1)\r\nxlabel(\"Objects\")\r\nylabel(\"Words\")\r\ngcf().savefig(\"cross-sit.pdf\", bbox_inches=\"tight\")\r\n","sub_path":"Model/other_wrappers/cross-sit.py","file_name":"cross-sit.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"216064289","text":"import requests #아나콘다 깔 때 같이 깔림 \nfrom bs4 import BeautifulSoup\n\nURL = 'http://192.168.41.3:7070/HELLOWEB/tel_list.jsp' \nresponse = requests.get(URL) \n\nhtml = response.text\nsoup = BeautifulSoup(html, 'html.parser')\n\n\n#print(soup.select('td')) #200:제대로 나옴\n\ntds = soup.find_all('td')\n\nfor i in tds: #배열 안에 있는것들을 뽑아서 i에 삽입\n print(i.text) #태그를 제외한 텍스트 출력\n\n\n\n\n\n\n","sub_path":"basic_python/HELLOPYTHON/day09/mysoup02.py","file_name":"mysoup02.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"221692418","text":"import cv2 as cv\nimport numpy as np\n\n\nclass yolo:\n def __init__(self):\n # Initialize the parameters\n self.confThreshold = 0.5 # Confidence threshold\n self.nmsThreshold = 0.4 # Non-maximum suppression threshold\n\n # self.inpWidth = 416 # 608\n # self.inpHeight = 416 # 608\n self.inpWidth = 320 # 608\n self.inpHeight = 320 # 608\n\n self.modelConfiguration = 'yolov3-KD.cfg'\n self.modelWeights = 'yolov3-KD_last.weights'\n\n self.net = cv.dnn.readNetFromDarknet(self.modelConfiguration, self.modelWeights)\n\n self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_CUDA)\n self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CUDA)\n # self.net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)\n # self.net.setPreferableTarget(cv.dnn.DNN_TARGET_CPU)\n\n def getOutputsNames(self, net):\n layersNames = net.getLayerNames()\n return [layersNames[i[0] - 1] for i in net.getUnconnectedOutLayers()]\n\n def drawPred(self, left, top, right, bottom, frame):\n \"\"\"\n 绘制车牌框\n :param left: 左边界\n :param top: 上边界\n :param right: 右边界\n :param bottom: 下边界\n :param frame: 图像\n :return: 返回在frame上根据四个坐标画好轮廓的图像\n \"\"\"\n frame = cv.rectangle(frame, (left, top), (right, bottom), (0, 255, 0), 3)\n return frame\n\n\n def returnPred(self, frame, left, top, right, bottom):\n \"\"\"\n 返回车牌图像\n :param frame: 图像\n :param left: 左边界\n :param top: 上边界\n :param right: 右边界\n :param bottom: 下边界\n :return: 返回对图像按照边界切割后的车牌图像\n \"\"\"\n targ = frame[top:bottom, left:right]\n return targ\n\n\n def postprocess(self, frame, outs):\n frameHeight = frame.shape[0]\n frameWidth = frame.shape[1]\n\n classIds = []\n confidences = []\n boxes = []\n for out in outs:\n for detection in out:\n scores = detection[5:]\n classId = np.argmax(scores)\n confidence = scores[classId]\n if confidence > self.confThreshold:\n center_x = int(detection[0] * frameWidth)\n center_y = int(detection[1] * frameHeight)\n width = int(detection[2] * frameWidth)\n height = int(detection[3] * frameHeight)\n left = int(center_x - width / 2)\n top = int(center_y - height / 2)\n classIds.append(classId)\n confidences.append(float(confidence))\n boxes.append([left, top, width, height])\n\n indices = cv.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)\n plate_list = []\n for i in indices:\n i = i[0]\n box = boxes[i]\n left = box[0]\n top = box[1]\n width = box[2]\n height = box[3]\n frame = self.drawPred(left, top, left + width, top + height, frame)\n plate_list.append(self.returnPred(frame, left, top, left + width, top + height))\n return frame, plate_list\n\n def return_frame(self, frame):\n \"\"\"\n 接收图像,返回标记车牌后的图像以及车牌图像列表\n :param frame: 摄像头捕获的一帧图像\n :return:\n self.postprocess()函数运行之后返回两个数据:���记车牌后的图像以及车牌图像列表\n \"\"\"\n blob = cv.dnn.blobFromImage(frame, 1 / 255, (self.inpWidth, self.inpHeight), [0, 0, 0], 1, crop=False)\n self.net.setInput(blob)\n outs = self.net.forward(self.getOutputsNames(self.net))\n return self.postprocess(frame, outs)\n","sub_path":"plateDetect.py","file_name":"plateDetect.py","file_ext":"py","file_size_in_byte":3843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"233401825","text":"import math\nimport os\nimport random\nimport re\nimport sys\n\nsys.stdin = open('input.txt', 'r')\nsys.stdout = open('output.txt', 'w')\n\n# Complete the plusMinus function below.\ndef plusMinus(arr):\n zero = 0\n pos = 0\n neg = 0\n for e in arr:\n if e == 0:\n zero +=1\n elif e > 0:\n pos += 1\n else:\n neg +=1\n res_p = round((pos / n), 6)\n res_n = round((neg / n), 6)\n res_z = round((zero / n), 6)\n print(res_p)\n print(res_n)\n print(res_z)\n\n\nif __name__ == '__main__':\n n = int(input())\n\n arr = list(map(int, input().rstrip().split()))\n\n plusMinus(arr)\n\n","sub_path":"Plus_Minus/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"322503876","text":"#!/anaconda3/bin/python\n\nimport sys\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def detectCycle(self, head):\n if not head or not head.next:\n return None\n fast = slow = head\n while slow and fast and fast.next:\n fast = fast.next.next\n slow = slow.next\n if fast == slow:\n break\n if fast != slow:\n return None\n fast = head\n while fast != slow:\n fast = fast.next\n slow = slow.next\n fast.next = None\n return fast\n\nif __name__ == \"__main__\":\n head = ListNode(3)\n cur = head\n cur.next = ListNode(2)\n repeat = cur = cur.next\n \n for i in [0,-4]:\n cur.next = ListNode(i)\n cur = cur.next\n cur.next = repeat\n a = Solution()\n res = a.detectCycle(head)\n print(res.val)\n \n","sub_path":"q142.py","file_name":"q142.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"211698936","text":"# Demo script to show how to use tenon\n\n# Define global variables\nimport sys, os\npwd = os.getcwd()\nrootdir = os.path.join(pwd, '../../..') # This folder is /examples/demo_lsp/\ntenonpath = os.path.join(rootdir, 'code')\noutputdir = os.path.join(rootdir, 'cache/lsp_synthesized')\nprint(tenonpath)\nsys.path.append(os.path.expanduser(tenonpath))\n\nimport tenon\nsys.path.append(pwd)\nimport lsppose\n\nimport argparse\n\ndef main():\n util = lsppose.Util()\n models = lsppose.Models()\n util.rootdir = rootdir\n\n import tenon.logging as L\n # outputdir = '//../cache/lsp_synthesized'\n L.setLevel(tenon.logging.INFO)\n L.info('Switch logging level to INFO')\n tenon.render.write('init.png')\n\n camera = tenon.obj.get('Camera') # Unused in this demo\n scene = util.setup_scene()\n\n objs = [\n # models.humanModel(),\n models.bodyMesh(),\n models.upperCloth(),\n models.lowerCloth(),\n models.hair(),\n models.eye()\n ]\n\n schedule_idx = range(1, 20001)\n schedule_idx = [2]\n for i in schedule_idx:\n util.update_scene(scene, i)\n\n # imgfilename = os.path.join(outputdir, 'imgs/%04d.png' % i)\n # tenon.render.write(imgfilename)\n # L.info('Synthetic image: %s' % L.prettify_filename(imgfilename))\n\n # depth_filename = os.path.join(outputdir, 'depth/%04d.png' % i)\n # tenon.render.DepthMode.enable()\n # tenon.render.write(depth_filename)\n # L.info('Depth: %s' % L.prettify_filename(depth_filename))\n # tenon.render.DepthMode.disable()\n\n # paint_filename = os.path.join(outputdir, 'parts/%04d.png' % i)\n # for obj in objs:\n # tenon.render.PaintMode.enable(obj)\n # tenon.render.write(paint_filename)\n # L.info('Semantic parts: %s' % L.prettify_filename(paint_filename))\n # for obj in objs:\n # tenon.render.PaintMode.disable(obj)\n\n # # Also save the joint annotation and part annotation\n # joint_filename = os.path.join(outputdir, 'joints/%04d.csv' % i)\n # joints = lsppose.JointInfo.export()\n # lsppose.JointInfo.serializeJointInfo(joint_filename, joints)\n\n import tenon.util as U\n radius = camera.location.length # Keep the radius fixed\n el = 0\n # for angle in [0, 90, 180, 270]:\n # for angle in range(0, 360, 10):\n for angle in [240]:\n for el in range(0, 90, 10):\n loc = U.sphere_location(radius, angle, el)\n camera.location = loc\n imgfilename = os.path.join(outputdir, 'imgs/%04d_%d_%d.png' % (i, angle, el))\n tenon.render.write(imgfilename)\n L.info('Synthetic image: %s' % L.prettify_filename(imgfilename))\n\n\nif __name__ == '__main__':\n # Avoid execution during module import\n if not tenon.inblender():\n parser = argparse.ArgumentParser()\n parser.add_argument('-f', '--blendfile', default = os.path.join(rootdir, 'data/fully_annotated.blend'))\n\n args = parser.parse_args()\n tenon.run(__file__, args.blendfile)\n else:\n main()\n","sub_path":"examples/demo_lsp/thesis_demo_viewpoint.py","file_name":"thesis_demo_viewpoint.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"61"} +{"seq_id":"281148435","text":"import datetime\nimport json\n\nfrom django.db import transaction\nfrom django.http import HttpResponseNotAllowed, HttpResponseBadRequest, HttpResponse\nfrom django.shortcuts import render\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom TryIT.settings_global import EDITION_YEAR\n# from volunteers.models import RegisterVolunteers\nfrom editions.models import Edition\nfrom tickets.models import School, Degree\nfrom volunteers.forms import VolunteerForm\nfrom volunteers.models import Schedule, Volunteer, VolunteerSchedule\n\n\n@csrf_exempt\n@transaction.atomic\ndef submit(request):\n if request.method == 'POST':\n data = json.loads(request.body.decode('utf-8'))\n form = VolunteerForm(data)\n if form.is_valid():\n volunteer = Volunteer()\n volunteer.name = data['name'].strip()\n volunteer.surname = data['lastname'].strip()\n volunteer.email = data['email'].strip()\n volunteer.expedient = data['expedient'].strip()\n volunteer.phone = data['phone'].strip()\n volunteer.shirt_size = data['shirt']\n volunteer.android_phone = data['android']\n\n if 'commentary' in data:\n volunteer.commentary = data['commentary'].strip()\n\n # School and degree\n volunteer.school = School.objects.get(code=data['college'])\n volunteer.degree = Degree.objects.get(code=data['degree'])\n\n volunteer.save()\n\n # Insert schedules\n for schedule in data['schedule']:\n volunteer_schedule = VolunteerSchedule()\n volunteer_schedule.schedule = Schedule.objects.get(pk=schedule[4:])\n volunteer_schedule.volunteer = volunteer\n\n # Calculate schedule day\n date = Edition.objects.get(year=EDITION_YEAR).start_date\n volunteer_schedule.day = datetime.date(year=EDITION_YEAR, month=date.month, day=int(schedule[1:3]))\n\n volunteer_schedule.save()\n\n return HttpResponse()\n else:\n error = {'id': 1, 'message': 'Error en la validación'}\n return HttpResponseBadRequest(json.dumps(error))\n else:\n return HttpResponseNotAllowed(permitted_methods=['POST'])\n\n\ndef volunteers(request):\n day_list = []\n\n edition = Edition.objects.get(year=EDITION_YEAR)\n schedule_list = Schedule.objects.filter(edition=edition)\n school_data = School.objects.all()\n\n # Convert to JSON\n school_list = [{'code': school.code, 'name': school.name, 'degrees': [\n {'code': degree.code, 'name': degree.degree} for degree in school.degree_set.all()\n ]} for school in school_data]\n\n start_date = edition.start_date\n end_date = edition.end_date\n # Calculate de difference between two dates. The difference between 26 and 23 is 3, we need to add 1\n ndays = int((end_date - start_date).days)\n\n # calculate days of event, it will exclude weekends\n for day in range(0, ndays + 1):\n day_event = start_date + datetime.timedelta(days=day)\n # .weekday returns a number between 0 to 6. If the dif :is less than 0, the day is saturday or sunday\n if int(day_event.weekday()) - 5 < 0:\n day_list.append(day_event)\n\n context = {\"day_list\": day_list,\n \"schedule_list\": schedule_list,\n \"school_list\": json.dumps(school_list)\n }\n\n return render(request, 'volunteers/volunteers.html', context)\n","sub_path":"volunteers/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"20874482","text":"#!usr/bin/env python\nimport cPickle\nimport math\n\n\nclass completer(object):\n\n def __init__(self, fName):\n self.tree = self.loadTree(fName)\n self.MAX_SCORE = 10\n self.CA_CODE = {\n \"01\": \"AB\",\n \"02\": \"BC\",\n \"03\": \"MB\",\n \"04\": \"NB\",\n \"05\": \"NL\",\n \"07\": \"NS\",\n \"08\": \"ON\",\n \"09\": \"PE\",\n \"10\": \"QC\",\n \"11\": \"SK\",\n \"12\": \"YT\",\n \"13\": \"NT\",\n \"14\": \"NU\"\n }\n\n def loadTree(self, fName):\n try:\n with open(fName, \"rb\") as g:\n unpickler = cPickle.Unpickler(g)\n tree = unpickler.load()\n return tree\n except IOError:\n return None\n\n # here we use a rather simple distance score function\n def getStringScore(self, queryKey, resKey):\n # max 6 min 0\n d = max(6-(len(resKey) - len(queryKey)), 0)\n\n return d\n\n def getGeoDistance(self, origin, destination):\n \"\"\"\n Calculate the Haversine distance.\n\n Parameters\n ----------\n origin : tuple of float\n (lat, long)\n destination : tuple of float\n (lat, long)\n\n Returns\n -------\n distance_in_km : float\n\n \"\"\"\n lat1, lon1 = origin\n lat2, lon2 = destination\n radius = 6371 # earth radius in km\n\n dlat = math.radians(lat2 - lat1)\n dlon = math.radians(lon2 - lon1)\n a = (math.sin(dlat / 2) * math.sin(dlat / 2) +\n math.cos(math.radians(lat1)) * math.cos(math.radians(lat2)) *\n math.sin(dlon / 2) * math.sin(dlon / 2))\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = radius * c\n\n return abs(round(d, 1))\n\n def getGeoScore(self, origin, destination):\n d = self.getGeoDistance(origin, destination)\n if d < 100:\n score = 3.0\n elif d >= 100 and d < 200:\n score = 1.5\n elif d >= 200 and d < 500:\n score = 0.75\n else:\n score = 0.0\n return score\n\n # higher score for bigger city\n def getPopulationScore(self, population):\n if population > 5000 and population < 15000:\n score = 0.3\n elif population > 15000 and population < 50000:\n score = 0.6\n elif population > 50000:\n score = 1.0\n else:\n score = 0.0\n return score\n\n # a wrapper to provide the required funciton\n # def predict(self, qString=None, lat=None, lng=None, radius=None):\n # return self.complete(qString, lat, lng, radius)\n def complete(self, qString, latitude=None, longitude=None, radius=None):\n if(not self.tree or len(qString) < 1):\n return {}\n # incase user input not capitalized\n qString = qString.title()\n\n # get all suffix starting with the prefix qString\n raw = self.tree.findSuffix(qString)\n \n temp = []\n for name in raw.keys():\n for data in raw[name]:\n # if radius is used we filter out the out of range places\n if (radius and self.getGeoDistance(\n (latitude, longitude),\n (float(data[0]),\n float(data[1]))\n ) > radius):\n continue\n cName = \"Canada\" if data[2] in self.CA_CODE.values() else \"USA\"\n uniqueName = name + \", \" + data[2]+\", \"+cName\n score = 0\n if(latitude and longitude):\n score = self.getGeoScore(\n (latitude, longitude),\n (float(data[0]),\n float(data[1]))\n )\n\n score += self.getStringScore(qString, name)\n score += self.getPopulationScore(int(data[-1]))\n # ensure score is positive\n score = round(score / self.MAX_SCORE, 2)\n i = 0\n # insertion while maintain desc order on score\n while (i < len(temp)):\n if temp[i][\"score\"] < score:\n break\n i += 1\n temp.insert(i,\n {\"name\": uniqueName,\n \"latitude\": data[0],\n \"longitude\": data[1],\n \"score\": score\n }\n )\n return temp\n","sub_path":"auto_complete.py","file_name":"auto_complete.py","file_ext":"py","file_size_in_byte":4966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"86379319","text":"from collections import deque\n\ndy = [-1, 1, 0, 0]\ndx = [0, 0, -1, 1]\n\n\ndef bomb(w):\n global H, board\n\n stack = []\n for i in range(H):\n if board[i][w]!=0:\n stack.append([i, w, board[i][w]])\n break\n \n while stack:\n s = stack.pop()\n board[s[0]][s[1]] = 0\n for i in range(4):\n for j in range(1, s[2]):\n ny = s[0] + dy[i] * j\n nx = s[1] + dx[i] * j\n \n if 0<=ny 1:\n stack.append([ny, nx, board[ny][nx]])\n board[ny][nx] = 0\n \ndef down():\n global board\n\n rot_board = []\n for b in zip(*board):\n tmp = [0 for _ in range(H)]\n h = H - 1\n for block in reversed(b):\n if block>0:\n tmp[h] = block\n h -= 1\n \n rot_board.append(tmp)\n\n board = [list(z) for z in zip(*rot_board)]\n \n\ndef dfs(n, tmp):\n global N, board, result, comb\n if result==0:\n return\n \n if n==N:\n tmp = [b[:] for b in board]\n \n for c in comb:\n bomb(c)\n down()\n \n count = 0\n for b in board:\n #print(b)\n for block in b:\n if block>0:\n count += 1\n result = min(result, count)\n board = tmp\n \n return\n \n for i in range(W):\n comb[n] = i\n dfs(n+1, tmp) \n \n \n\n \nT = int(input())\n\nfor tc in range(1, T+1):\n N, W, H = map(int, input().split())\n board = [list(map(int, input().split())) for _ in range(H)]\n comb = [-1 for _ in range(N)]\n \n result = float(\"inf\")\n dfs(0, board)\n print(\"#%s %s\" % (tc, result))\n","sub_path":"res/python/5656_벽돌깨기.py","file_name":"5656_벽돌깨기.py","file_ext":"py","file_size_in_byte":1828,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"383192145","text":"import ws\nimport time\nimport multiprocessing as mp\nfrom multiprocessing import Process\nfrom ctypes import c_double\nfrom typing import Tuple\n\n\ndef SocketLoopFactory() -> Tuple[mp.Process, mp.Value]:\n \"\"\"\n Used to reset the Process and Value for our socket upon loss of connection. \n Necessary because multiprocessing process's cannot be started more than once\n \n v (value) is a thread that tracks a double (variable from c) that represents time, \n return: tuple of listening process and the variable we will store the time of last heartbeat in\n \"\"\"\n v = mp.Value(c_double, time.time())\n return mp.Process(target=ws.main(), kwargs={\"shared_beat\": v}), v\n\n\ndef main():\n # p is actual ws process, beat is shared variable (between processes) to store last heartbeat time\n p, beat = SocketLoopFactory()\n try:\n p.start()\n while p.is_alive():\n time.sleep(1)\n # if time between last heartbeat and current time is over 10,\n # probable loss of heartbeat, must kill child and spawn new process\n if time.time() - beat.value > 10 and p.is_alive():\n p.terminate()\n p, beat = SocketLoopFactory()\n p.start()\n except Exception as e:\n print(e)\n p.terminate()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"639260961","text":"import sys\nfrom os import mkdir\nfrom os.path import exists, dirname, basename, join, split\nfrom textx.metamodel import metamodel_from_file\nfrom textx.model import get_children_of_type\nfrom textx.exceptions import TextXSemanticError\nfrom textx.scoping import providers\nfrom copy import copy, deepcopy\n\nthis_folder = dirname(__file__)\n\n# set of actions used in the model\nactions = set([])\n\n# Classes for SLCO statements\nclass Assignment(object):\n\tdef __init__(self, parent, left, right):\n\t\tself.parent = parent\n\t\tself.left = left\n\t\tself.right = right\n\nclass Composite(object):\n\tdef __init__(self, parent, guard, assignments):\n\t\tself.parent = parent\n\t\tself.guard = guard\n\t\tself.assignments = assignments\n\nclass Expression(object):\n\tdef __init__(self, parent, left, op, right):\n\t\tself.parent = parent\n\t\tself.left = left\n\t\tself.op = op\n\t\tself.right = right\n\nclass ExprPrec1(object):\n\tdef __init__(self, parent, left, op, right):\n\t\tself.parent = parent\n\t\tself.left = left\n\t\tself.op = op\n\t\tself.right = right\n\nclass ExprPrec2(object):\n\tdef __init__(self, parent, left, op, right):\n\t\tself.parent = parent\n\t\tself.left = left\n\t\tself.op = op\n\t\tself.right = right\n\nclass ExprPrec3(object):\n\tdef __init__(self, parent, left, op, right):\n\t\tself.parent = parent\n\t\tself.left = left\n\t\tself.op = op\n\t\tself.right = right\n\nclass ExprPrec4(object):\n\tdef __init__(self, parent, left, op, right):\n\t\tself.parent = parent\n\t\tself.left = left\n\t\tself.op = op\n\t\tself.right = right\n\nclass Primary(object):\n\tdef __init__(self, parent, sign, value, body, ref):\n\t\tself.parent = parent\n\t\tself.sign = sign\n\t\tself.value = value\n\t\tself.body = body\n\t\tself.ref = ref\n\nclass ExpressionRef(object):\n\tdef __init__(self, parent, ref, index):\n\t\tself.parent = parent\n\t\tself.ref = ref\n\t\tself.index = index\n\nclass Variable(object):\n\tdef __init__(self, parent, type, name, defvalue, defvalues):\n\t\tself.parent = parent\n\t\tself.type = type\n\t\tself.name = name\n\t\tself.defvalue = defvalue\n\t\tself.defvalues = defvalues\n\nclass VariableRef(object):\n\tdef __init__(self, parent, var, index):\n\t\tself.parent = parent\n\t\tself.var = var\n\t\tself.index = index\n\nclass Type(object):\n\tdef __init__(self, parent, base, size):\n\t\tself.parent = parent\n\t\tself.base = base\n\t\tself.size = size\n\nclass Action(object):\n\tdef __init__(self, parent, name):\n\t\tself.parent = parent\n\t\tself.name = name\n\n# extra class to identify references to Actions\nclass ActionRef(object):\n\tdef __init__(self, parent, act):\n\t\tself.parent = parent\n\t\tself.act = act\n\n# extra classes for AL-SLCO models\nclass AL_Assignment(object):\n\tdef __init__(self, parent, left, right):\n\t\tself.parent = parent\n\t\tself.left = left\n\t\tself.right = right\n\t\tself.cached = cached\n\nclass AL_Expression(object):\n\tdef __init__(self, parent, left, op, right):\n\t\tself.parent = parent\n\t\tself.left = left\n\t\tself.op = op\n\t\tself.right = right\n\t\tself.cached = cached\n\nclass AL_ReadInstruction(object):\n\tdef __init__(self, parent, ref):\n\t\tself.parent = parent\n\t\tself.ref = ref\n\nclass WriteInstruction(object):\n\tdef __init__(self, parent, ref):\n\t\tself.parent = parent\n\t\tself.ref = ref\n\n# method to raise semantic error\ndef raise_semantic_error(S, s, model):\n\t\"\"\"S is error message string.\n\ts is object in which error occurs.\"\"\"\n\tline, col = model._tx_parser.pos_to_linecol(s._tx_position)\n\tS += ' at (\"%s\", \"%s\")' % (line, col)\n\traise TextXSemanticError(S)\n\n# FUNCTIONS TO CREATE INSTANCES OF SLCO CONCEPTS\n\ndef create_smlocal_var(sm, v, type, size):\n\t\"\"\"Create and return a variable object for the variable named v of given type and size, associated to the given state machine sm\"\"\"\n\tnewv = Variable(sm, '', v, None, [])\n\tif type == \"Boolean\":\n\t\tt = Type(newv, 'Boolean', size)\n\telif type == \"Byte\":\n\t\tt = Type(newv, 'Byte', size)\n\telse:\n\t\tt = Type(newv, 'Boolean', size)\n\tnewv.type = t\n\t# add variable to list of state machine-local variables\n\tsm.variables.append(newv)\n\treturn newv\n\ndef create_var_expression(st, v, i):\n\t\"\"\"Create and return a new expression containing only a reference to the given variable name (with possibly index i). Associate the expression with the given statement st\"\"\"\n\te = Expression(st, '', '', '')\n\te4 = ExprPrec4(e, '', '', '')\n\te3 = ExprPrec3(e4, '', '', '')\n\te2 = ExprPrec2(e3, '', '', '')\n\te1 = ExprPrec1(e2, '', '', '')\n\tp = Primary(e1, '', '', '', '')\n\tr = ExpressionRef(p, v, i)\n\te.left = e4\n\te4.left = e3\n\te3.left = e2\n\te2.left = e1\n\te1.left = p\n\tp.ref = r\n\treturn e\n\n# *** MODEL PROCESSORS ***\n\n# model processor to create the set of actions\ndef construct_action_set(model, metamodel):\n\tglobal actions\n\t# construct the set of actions as they appear in the model\n\tactions = set([])\n\tfor a in model.actions:\n\t\tactions.add(a.name)\n\ndef getlabel_colored(s):\n\t\"\"\"Get the label for the given statement s\"\"\"\n\tresult = ''\n\tif s.__class__.__name__ == \"Assignment\":\n\t\tresult += s.left.var.name\n\t\tif s.left.index != None:\n\t\t\tresult += \"[\" + getlabel(s.left.index) + \"]\"\n\t\tresult += \" := \" + getlabel(s.right)\n\telif s.__class__.__name__ == \"Composite\":\n\t\tresult += \"[\"\n\t\tif s.guard != None:\n\t\t\tresult += getlabel(s.guard)\n\t\t\tresult += \";\"\n\t\tfor i in range(0,len(s.assignments)):\n\t\t\tresult += \" \" + getlabel(s.assignments[i])\n\t\t\tif i < len(s.assignments)-1:\n\t\t\t\tresult += \";\"\n\t\tresult += \"]\"\n\telif s.__class__.__name__ == \"Delay\":\n\t\tresult += \"after \" + str(s.length) + \" ms\"\n\telif s.__class__.__name__ == \"SendSignal\":\n\t\tresult += \"send \" + s.signal + \"(\"\n\t\tfirst = True\n\t\tfor p in s.params:\n\t\t\tif not first:\n\t\t\t\tresult += \",\"\n\t\t\telse:\n\t\t\t\tfirst = False\n\t\t\tresult += getlabel(p)\n\t\tresult += \") to \" + s.target.name\n\telif s.__class__.__name__ == \"ReceiveSignal\":\n\t\tresult += \"receive \" + s.signal + \"(\"\n\t\tfirst = True\n\t\tfor p in s.params:\n\t\t\tif not first:\n\t\t\t\tresult += \",\"\n\t\t\telse:\n\t\t\t\tfirst = False\n\t\t\tresult += getlabel(p)\n\t\tif s.guard != None:\n\t\t\tresult += \" | \" + getlabel(s.guard)\n\t\tresult += \") from \" + s.target.name\n\telif s.__class__.__name__ == \"Expression\" or s.__class__.__name__ == \"ExprPrec4\" or s.__class__.__name__ == \"ExprPrec3\" or s.__class__.__name__ == \"ExprPrec2\" or s.__class__.__name__ == \"ExprPrec1\":\n\t\tif s.op != '':\n\t\t\tresult += getlabel(s.left) + \" \" + s.op + \" \" + getlabel(s.right)\n\t\telse:\n\t\t\tresult += getlabel(s.left)\n\telif s.__class__.__name__ == \"Primary\":\n\t\tresult += s.sign\n\t\tif s.sign == \"not\":\n\t\t\tresult += \" \"\n\t\tif s.value != None:\n\t\t\tnewvalue = s.value\n\t\t\tresult += str(newvalue)\n\t\telif s.ref != None:\n\t\t\tresult += s.ref.ref\n\t\t\tif s.ref.index != None:\n\t\t\t\tresult += \"[\" + getlabel(s.ref.index) + \"]\"\n\t\telse:\n\t\t\tresult += '(' + getlabel(s.body) + ')'\n\telif s.__class__.__name__ == \"VariableRef\":\n\t\tresult += s.var.name\n\t\tif s.index != None:\n\t\t\tresult += \"[\" + getlabel(s.index) + \"]\"\n\treturn result\n\ndef getlabel(s):\n\t\"\"\"Get the label for the given statement s\"\"\"\n\tresult = ''\n\tif s.__class__.__name__ == \"Assignment\":\n\t\tresult += s.left.var.name\n\t\tif s.left.index != None:\n\t\t\tresult += \"[\" + getlabel(s.left.index) + \"]\"\n\t\tresult += \" := \" + getlabel(s.right)\n\telif s.__class__.__name__ == \"Composite\":\n\t\tresult += \"[\"\n\t\tif s.guard != None:\n\t\t\tresult += getlabel(s.guard)\n\t\t\tresult += \";\"\n\t\tfor i in range(0,len(s.assignments)):\n\t\t\tresult += \" \" + getlabel(s.assignments[i])\n\t\t\tif i < len(s.assignments)-1:\n\t\t\t\tresult += \";\"\n\t\tresult += \"]\"\n\telif s.__class__.__name__ == \"Delay\":\n\t\tresult += \"after \" + str(s.length) + \" ms\"\n\telif s.__class__.__name__ == \"SendSignal\":\n\t\tresult += \"send \" + s.signal + \"(\"\n\t\tfirst = True\n\t\tfor p in s.params:\n\t\t\tif not first:\n\t\t\t\tresult += \",\"\n\t\t\telse:\n\t\t\t\tfirst = False\n\t\t\tresult += getlabel(p)\n\t\tresult += \") to \" + s.target.name\n\telif s.__class__.__name__ == \"ReceiveSignal\":\n\t\tresult += \"receive \" + s.signal + \"(\"\n\t\tfirst = True\n\t\tfor p in s.params:\n\t\t\tif not first:\n\t\t\t\tresult += \",\"\n\t\t\telse:\n\t\t\t\tfirst = False\n\t\t\tresult += getlabel(p)\n\t\tif s.guard != None:\n\t\t\tresult += \" | \" + getlabel(s.guard)\n\t\tresult += \") from \" + s.target.name\n\telif s.__class__.__name__ == \"Expression\" or s.__class__.__name__ == \"ExprPrec4\" or s.__class__.__name__ == \"ExprPrec3\" or s.__class__.__name__ == \"ExprPrec2\" or s.__class__.__name__ == \"ExprPrec1\":\n\t\tif s.op != '':\n\t\t\tresult += getlabel(s.left) + \" \" + s.op + \" \" + getlabel(s.right)\n\t\telse:\n\t\t\tresult += getlabel(s.left)\n\telif s.__class__.__name__ == \"Primary\":\n\t\tresult += s.sign\n\t\tif s.sign == \"not\":\n\t\t\tresult += \" \"\n\t\tif s.value != None:\n\t\t\tnewvalue = s.value\n\t\t\tresult += str(newvalue)\n\t\telif s.ref != None:\n\t\t\tresult += s.ref.ref\n\t\t\tif s.ref.index != None:\n\t\t\t\tresult += \"[\" + getlabel(s.ref.index) + \"]\"\n\t\telse:\n\t\t\tresult += '(' + getlabel(s.body) + ')'\n\telif s.__class__.__name__ == \"VariableRef\":\n\t\tresult += s.var.name\n\t\tif s.index != None:\n\t\t\tresult += \"[\" + getlabel(s.index) + \"]\"\n\telif s.__class__.__name__ == \"Variable\":\n\t\tresult += s.name\n\telif s.__class__.__name__ == \"ActionRef\":\n\t\tresult += s.act.name\n\treturn result\n\n# model processor to check for name clashes and remove duplicates in lists\n# invalid name clashes:\n# - action names cannot be used for variable names\n# - classes need to have unique names\n# - objects need to have unique names\ndef check_names(model, metamodel):\n\t# actions\n\tactlist = []\n\tactset = set([])\n\tfor a in model.actions:\n\t\tif a.name not in actset:\n\t\t\tactset.add(a.name)\n\t\t\tactlist.append(a)\n\tmodel.actions = actlist\n\n\t# classes\n\tcnames = set([])\n\tfor c in model.classes:\n\t\tif c.name in cnames:\n\t\t\terror = 'Name clash: \"%s\" used for multiple classes, once' % name\n\t\t\traise_semantic_error(error, c, model)\n\t\telse:\n\t\t\tcnames.add(c.name)\n\n\t# check inside each class for variable names\n\tfor c in model.classes:\n\t\t# variables\n\t\tvarlist = []\n\t\tvarset = set([])\n\t\tfor v in c.variables:\n\t\t\tif v.name in actset:\n\t\t\t\terror = 'Name clash: \"%s\" used for both a variable and an action ' % v.name\n\t\t\t\traise_semantic_error(error, v, model)\n\t\t\telse:\n\t\t\t\tif v.name not in varset:\n\t\t\t\t\tvarset.add(v.name)\n\t\t\t\t\tvarlist.append(v)\n\t\tc.variables = varlist\n\t\t# ports\n\t\tportlist = []\n\t\ttmp = set([])\n\t\tfor p in c.ports:\n\t\t\tif p.name not in tmp:\n\t\t\t\ttmp.add(p.name)\n\t\t\t\tportlist.append(p)\n\t\tc.ports = portlist\n\t\t# state machines\n\t\tfor sm in c.statemachines:\n\t\t\tvarlist = []\n\t\t\tvarset = set([])\n\t\t\tfor v in sm.variables:\n\t\t\t\tif v.name in actset:\n\t\t\t\t\terror = 'Name clash: \"%s\" used for both a variable and an action at' % v.name\n\t\t\t\t\traise_semantic_error(error, v, model)\n\t\t\t\telse:\n\t\t\t\t\tif v.name not in varset:\n\t\t\t\t\t\tvarset.add(v.name)\n\t\t\t\t\t\tvarlist.append(v)\n\t\t\tsm.variables = varlist\n\t\t\t# states\n\t\t\tstatelist = []\n\t\t\tstateset = set([sm.initialstate.name])\n\t\t\tfor s in sm.states:\n\t\t\t\tif s.name not in stateset:\n\t\t\t\t\tstateset.add(s.name)\n\t\t\t\t\tstatelist.append(s)\n\t\t\tsm.states = statelist\n\n\tonames = set([])\n\tfor o in model.objects:\n\t\tif o.name in cnames:\n\t\t\terror = 'Name clash: \"%s\" used for multiple objects, once' % o.name\n\t\t\traise_semantic_error(error, o, model)\n\t\telse:\n\t\t\tonames.add(o.name)\n\n# model processor adding initial state to list of states\ndef add_initial_to_states(model, metamodel):\n\tfor c in model.classes:\n\t\tfor stm in c.statemachines:\n\t\t\tstm.states = [stm.initialstate] + stm.states\n\n# model processor filling in types of variables\ndef add_variable_types(model, metamodel):\n\tfor c in model.classes:\n\t\tfor i in range(0,len(c.variables)):\n\t\t\tif c.variables[i].type == None:\n\t\t\t\tc.variables[i].type = c.variables[i-1].type\n\t\tfor sm in c.statemachines:\n\t\t\tfor i in range(0,len(sm.variables)):\n\t\t\t\tif sm.variables[i].type == None:\n\t\t\t\t\tsm.variables[i].type = sm.variables[i-1].type\n\n# model processor setting sizes of types by default to one\ndef set_default_type_size(model, metamodel):\n\tfor c in model.classes:\n\t\tfor i in range(0,len(c.variables)):\n\t\t\tif c.variables[i].type != None:\n\t\t\t\tif c.variables[i].type.size != None:\n\t\t\t\t\tif c.variables[i].type.size == 0:\n\t\t\t\t\t\tc.variables[i].type.size = 1\n\t\tfor sm in c.statemachines:\n\t\t\tfor i in range(0,len(sm.variables)):\n\t\t\t\tif sm.variables[i].type != None:\n\t\t\t\t\tif sm.variables[i].type.size != None:\n\t\t\t\t\t\tif sm.variables[i].type.size == 0:\n\t\t\t\t\t\t\tsm.variables[i].type.size = 1\n\tfor ch in model.channels:\n\t\tfor t in ch.type:\n\t\t\tif t.size != None:\n\t\t\t\tif t.size == 0:\n\t\t\t\t\tt.size = 1\n\n# model processor setting sizes of channels by default to one\ndef set_default_channel_size(model, metamodel):\n\tfor ch in model.channels:\n\t\tif ch.size == 0:\n\t\t\tch.size = 1\n\n# model processor adding tau action to transitions without statements\ndef add_taus(model, metamodel):\n\ttau_needed = False\n\tfor c in model.classes:\n\t\tfor stm in c.statemachines:\n\t\t\tfor trn in stm.transitions:\n\t\t\t\tif len(trn.statements) == 0:\n\t\t\t\t\ttau_needed = True\n\t\t\t\t\tbreak\n\t\t\tif tau_needed:\n\t\t\t\tbreak\n\t\tif tau_needed:\n\t\t\tbreak\n\tif tau_needed:\n\t\t# add tau to list of actions\n\t\tta = Action(model, \"tau\")\n\t\tmodel.actions.append(ta)\n\t\t# add reference to tau action to all transitions without statements\n\t\tfor c in model.classes:\n\t\t\tfor stm in c.statemachines:\n\t\t\t\tfor trn in stm.transitions:\n\t\t\t\t\tif len(trn.statements) == 0:\n\t\t\t\t\t\ttrn.statements.append(ActionRef(trn, ta))\n\n# model processor to fix and check references\ndef fix_references(model, metamodel):\n\tfor c in model.classes:\n\t\tVc = {}\n\t\tfor v in c.variables:\n\t\t\tVc[v.name] = v\n\t\tfor sm in c.statemachines:\n\t\t\tV = copy(Vc)\n\t\t\tfor v in sm.variables:\n\t\t\t\tV[v.name] = v\n\t\t\tstatedict = {sm.initialstate.name: sm.initialstate}\n\t\t\tfor s in sm.states:\n\t\t\t\tstatedict[s.name] = s\n\t\t\tfor tr in sm.transitions:\n\t\t\t\t# check state references of transitions\n\t\t\t\tsref = statedict.get(tr.source.name)\n\t\t\t\tif sref == None:\n\t\t\t\t\terror = 'Source state \"%s\" mentioned in transition is not defined in state machine' % tr.source.name\n\t\t\t\t\traise_semantic_error(error, tr, model)\n\t\t\t\telse:\n\t\t\t\t\ttr.source = sref\n\t\t\t\tsref = statedict.get(tr.target.name)\n\t\t\t\tif sref == None:\n\t\t\t\t\terror = 'Target state \"%s\" mentioned in transition is not defined in state machine' % tr.target.name\n\t\t\t\t\traise_semantic_error(error, tr, model)\n\t\t\t\telse:\n\t\t\t\t\ttr.target = sref\n\t\t\t\t# check references to actions and variables in statements\n\t\t\t\tfor st in tr.statements:\n\t\t\t\t\tstatement_check_refs(st, V, model)\n\n# # model processor to check initialisation of variables: TODO\n# def check_inits(model, metamodel):\n# \tfor o in model.objects:\n# \t\tfor i in o.assignments:\n# \t\t\tt = i.left.type\n# \t\t\tif t.base == 'Integer' and t.size == 0 and not i.right == \n\ndef statement_check_refs(s, V, model):\n\t\"\"\"Auxiliary function used to check references in statements.\n\tV is a dictionary of references to variables in the current scope.\"\"\"\n\tglobal actions\n\n\tif s.__class__.__name__ == \"Assignment\":\n\t\tstatement_check_refs(s.left, V, model)\n\t\tstatement_check_refs(s.right, V, model)\n\telif s.__class__.__name__ == \"Composite\":\n\t\tif s.guard != None:\n\t\t\tstatement_check_refs(s.guard, V, model)\n\t\tfor a in s.assignments:\n\t\t\tstatement_check_refs(a, V, model)\n\telif s.__class__.__name__ == \"ReceiveSignal\":\n\t\tfor p in s.params:\n\t\t\tstatement_check_refs(p, V, model)\n\t\tif s.guard != None:\n\t\t\tstatement_check_refs(s.guard, V, model)\n\telif s.__class__.__name__ == \"SendSignal\":\n\t\tfor p in s.params:\n\t\t\tstatement_check_refs(p, V, model)\n\telif s.__class__.__name__ == \"VariableRef\":\n\t\tvref = V.get(s.var.name)\n\t\tif vref == None:\n\t\t\terror = 'There is a reference to a variable \"%s\" that does not exist in that scope' % s.var.name\n\t\t\traise_semantic_error(error, s, model)\n\t\telse:\n\t\t\t# possibly fix reference to take scope into account\n\t\t\ts.var = vref\n\t\t\t# if variable is of Array type, an index must be provided\n\t\t\tif vref.type.size > 1 and s.index == None:\n\t\t\t\terror = 'There is a reference to an Array variable \"%s\" where an index is mandatory, but missing,' % s.var.name\n\t\t\t\traise_semantic_error(error, s, model)\n\telif s.__class__.__name__ != \"Primary\" and s.__class__.__name__ != \"ActionRef\":\n\t\tstatement_check_refs(s.left, V, model)\n\t\tif s.op != '':\n\t\t\tstatement_check_refs(s.right, V, model)\n\telif s.__class__.__name__ != \"ActionRef\":\n\t\tif s.ref != None:\n\t\t\tif s.ref.ref not in actions:\n\t\t\t\tvref = V.get(s.ref.ref)\n\t\t\t\tif vref == None:\n\t\t\t\t\terror = 'There is a reference to a variable \"%s\" that does not exist in that scope' % s.ref.ref\n\t\t\t\t\traise_semantic_error(error, s, model)\n\t\t\t\telse:\n\t\t\t\t\t# if variable is of Array type, an index must be provided\n\t\t\t\t\tif vref.type.size > 1 and s.ref.index == None:\n\t\t\t\t\t\terror = 'There is a reference to an Array variable \"%s\" where an index is mandatory, but missing,' % s.var.name\n\t\t\t\t\t\traise_semantic_error(error, s, model)\n\n# model processor to simplify action references\n#def simplify_statements(model, metamodel):\n#\tfor c in model.classes:\n#\t\tfor sm in c.statemachines:\n#\t\t\tfor tr in sm.transitions:\n#\t\t\t\tfor st in tr.statements:\n#\t\t\t\t\tif expression_is_actionref(st):\n\t\t\t\t\t\t# replace with a single class of type ActionRef\n\t\t\t\t\t\t# TODO\n\ndef statement_is_actionref(s):\n\tif s.__class__.__name__ == \"ActionRef\":\n\t\treturn True\n\telse:\n\t\treturn expression_is_actionref(s)\n\ndef expression_is_actionref(s):\n\t\"\"\"Determine whether the given expression is an action reference\"\"\"\n\tglobal actions\n\tif s.__class__.__name__ == \"Expression\":\n\t\tif s.op == '':\n\t\t\tsnext = s.left\n\t\t\tif snext.op == '':\n\t\t\t\tsnext = snext.left\n\t\t\t\tif snext.op == '':\n\t\t\t\t\tsnext = snext.left\n\t\t\t\t\tif snext.op == '':\n\t\t\t\t\t\tsnext = snext.left\n\t\t\t\t\t\tif snext.op == '':\n\t\t\t\t\t\t\tsnext = snext.left\n\t\t\t\t\t\t\tif snext.ref != None and snext.sign == '':\n\t\t\t\t\t\t\t\tsnext = snext.ref\n\t\t\t\t\t\t\t\tif snext.ref in actions:\n\t\t\t\t\t\t\t\t\treturn True\n\treturn False\n\ndef read_SLCO_model(m):\n\t\"\"\"Read, post process, and type check an SLCO model\"\"\"\n\n\t# create meta-model\n\tslco_mm = metamodel_from_file(join(this_folder,'../textx_grammars/slco2.tx'), autokwd=True, classes=[Assignment, Composite, Expression, ExprPrec1, ExprPrec2, ExprPrec3, ExprPrec4, Primary, ExpressionRef, Variable, VariableRef, Type, Action])\n\n\t# register processors\n\tslco_mm.register_model_processor(construct_action_set)\n\tslco_mm.register_model_processor(check_names)\n\tslco_mm.register_model_processor(add_initial_to_states)\n\tslco_mm.register_model_processor(add_variable_types)\n\t#slco_mm.register_model_processor(set_default_type_size)\n\tslco_mm.register_model_processor(set_default_channel_size)\n\tslco_mm.register_model_processor(add_taus)\n\tslco_mm.register_model_processor(fix_references)\n\t#slco_mm.register_model_processor(simplify_statements)\n\n# To do: Check receive statements for not receiving multiple values in the same variable\n# To do: Check for absence of arrays (not single elements) as part of messages\n\n\tslco_mm.register_scope_providers({\n\t\t\"*.*\": providers.FQN(),\n\t\t\"Initialisation.left\": providers.RelativeName(\"parent.type.variables\"),\n\t\t\"Channel.port0\": providers.RelativeName(\"source.type.ports\"),\n\t\t\"Channel.port1\": providers.RelativeName(\"target.type.ports\"),\n\t\t\"ReceiveSignal.from\": providers.RelativeName(\"parent.parent.parent.type.ports\"),\n\t\t\"SendSignal.to\": providers.RelativeName(\"parent.parent.parent.type.ports\"),\n\t})\n\n\t# parse and return the model\n\treturn slco_mm.model_from_file(m)\n","sub_path":"libraries/slcolib.py","file_name":"slcolib.py","file_ext":"py","file_size_in_byte":18735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"46376270","text":"import pygame\n\nfrom src.Config import*\n\nclass Snake:\n def __init__(self, display):\n self.x_pos = (WIDTH -30) / 2\n self.y_pos = (HEIGHT - 30) / 2\n self.display = display\n # self.body = []\n # self.max_size = 0\n\n # def eat(self):\n # self.max_size += 1\n\n def draw(self):\n return pygame.draw.rect(\n self.display, \n YELLOW,\n [\n self.x_pos,\n self.y_pos,\n SNAKE_HEIGHT,\n SNAKE_WIDTH\n ]\n )\n\n # def draw_body(self):\n # for item in self.body:\n # pygame.draw.rect(\n # self.display, \n # YELLOW,\n # [\n # item[0],\n # item[1],\n # SNAKE_WIDTH,\n # SNAKE_HEIGHT\n # ]\n # )\n\n def move(self, x_change, y_change):\n # self.body.append((self.x_pos, self.y_pos))\n self.x_pos += x_change\n self.y_pos += y_change\n\n # if len(self.body) > self.max_size:\n # del(self.body[0])\n","sub_path":"Ssssss111/src/Snake.py","file_name":"Snake.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"570944164","text":"import tensorflow as tf\r\nimport tensorlayer as tl\r\nimport cv2\r\nimport numpy as np\r\nfrom tensorlayer.layers import *\r\nimport matplotlib.pyplot as plt\r\nimport os\r\nfrom tqdm import tqdm\r\n\r\nfrom net import Gb_all_layer_out, ResLayer, RouteLayer, upsample, conv2d_unit, detection\r\n\r\n# net_out = [tf.zeros(shape=(1, 52, 52, 3, 85)), tf.zeros(shape=(1, 26, 26, 3, 85)), tf.zeros(shape=(1, 13, 13, 3, 85))]\r\ncheckpoint_dir = './ckpt/'\r\nckpt_name = 'ep000-step46000-loss2.157-46000'\r\nlabel = ['knot']\r\nanchors = tf.constant([125, 311, 127, 192, 212, 378, 273, 178, 324, 490, 362, 865, 404, 292, 513, 505, 639, 727],\r\n # [10, 13, 16, 30, 33, 23, 30, 61, 62, 45, 59, 119, 116, 90, 156, 198, 373, 326],\r\n dtype='float', shape=[1, 1, 1, 9, 2])\r\nn_class = len(label)\r\n\r\ninput_pb = tf.placeholder(tf.float32, [None, 416, 416, 3])\r\nnet = InputLayer(input_pb, name='input')\r\nnet = conv2d_unit(net, filters=32, kernels=3, strides=1, bn=True, name='0')\r\nnet = conv2d_unit(net, filters=64, kernels=3, strides=2, bn=True, name='1')\r\nnet = conv2d_unit(net, filters=32, kernels=1, strides=1, bn=True, name='2')\r\nnet = conv2d_unit(net, filters=64, kernels=3, strides=1, bn=True, name='3')\r\nnet = ResLayer(net, res=1, name='4')\r\nnet = conv2d_unit(net, filters=128, kernels=3, strides=2, bn=True, name='5')\r\nnet = conv2d_unit(net, filters=64, kernels=1, strides=1, bn=True, name='6')\r\nnet = conv2d_unit(net, filters=128, kernels=3, strides=1, bn=True, name='7')\r\nnet = ResLayer(net, res=5, name='8')\r\nnet = conv2d_unit(net, filters=64, kernels=1, strides=1, bn=True, name='9')\r\nnet = conv2d_unit(net, filters=128, kernels=3, strides=1, bn=True, name='10')\r\nnet = ResLayer(net, res=8, name='11')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=2, bn=True, name='12')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='13')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='14')\r\nnet = ResLayer(net, res=12, name='15')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='16')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='17')\r\nnet = ResLayer(net, res=15, name='18')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='19')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='20')\r\nnet = ResLayer(net, res=18, name='21')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='22')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='23')\r\nnet = ResLayer(net, res=21, name='24')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='25')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='26')\r\nnet = ResLayer(net, res=24, name='27')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='28')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='29')\r\nnet = ResLayer(net, res=27, name='30')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='31')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='32')\r\nnet = ResLayer(net, res=30, name='33')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='34')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='35')\r\nnet = ResLayer(net, res=33, name='36')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=2, bn=True, name='37')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='38')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='39')\r\nnet = ResLayer(net, res=37, name='40')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='41')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='42')\r\nnet = ResLayer(net, res=40, name='43')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='44')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='45')\r\nnet = ResLayer(net, res=43, name='46')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='47')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='48')\r\nnet = ResLayer(net, res=46, name='49')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='50')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='51')\r\nnet = ResLayer(net, res=49, name='52')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='53')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='54')\r\nnet = ResLayer(net, res=52, name='55')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='56')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='57')\r\nnet = ResLayer(net, res=55, name='58')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='59')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='60')\r\nnet = ResLayer(net, res=58, name='61')\r\nnet = conv2d_unit(net, filters=1024, kernels=3, strides=2, bn=True, name='62')\r\nnet = conv2d_unit(net, filters=512, kernels=1, strides=1, bn=True, name='63')\r\nnet = conv2d_unit(net, filters=1024, kernels=3, strides=1, bn=True, name='64')\r\nnet = ResLayer(net, res=62, name='65')\r\nnet = conv2d_unit(net, filters=512, kernels=1, strides=1, bn=True, name='66')\r\nnet = conv2d_unit(net, filters=1024, kernels=3, strides=1, bn=True, name='67')\r\nnet = ResLayer(net, res=65, name='68')\r\nnet = conv2d_unit(net, filters=512, kernels=1, strides=1, bn=True, name='69')\r\nnet = conv2d_unit(net, filters=1024, kernels=3, strides=1, bn=True, name='70')\r\nnet = ResLayer(net, res=68, name='71')\r\nnet = conv2d_unit(net, filters=512, kernels=1, strides=1, bn=True, name='72')\r\nnet = conv2d_unit(net, filters=1024, kernels=3, strides=1, bn=True, name='73')\r\nnet = ResLayer(net, res=71, name='74')\r\nnet = conv2d_unit(net, filters=512, kernels=1, strides=1, bn=True, name='75')\r\nnet = conv2d_unit(net, filters=1024, kernels=3, strides=1, bn=True, name='76')\r\nnet = conv2d_unit(net, filters=512, kernels=1, strides=1, bn=True, name='77')\r\nnet = conv2d_unit(net, filters=1024, kernels=3, strides=1, bn=True, name='78')\r\nnet = conv2d_unit(net, filters=512, kernels=1, strides=1, bn=True, name='79')\r\nnet = conv2d_unit(net, filters=1024, kernels=3, strides=1, bn=True, name='80')\r\nnet = conv2d_unit(net, filters=3 * (5 + n_class), kernels=1, strides=1, act='liner', bn=False, name='81')\r\ndetection(net, '82')\r\nnet = RouteLayer(net, [79], name='83')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='84')\r\nnet = upsample(net, scale=2, name='85')\r\nnet = RouteLayer(net, [85, 61], name='86')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='87')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='88')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='89')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='90')\r\nnet = conv2d_unit(net, filters=256, kernels=1, strides=1, bn=True, name='91')\r\nnet = conv2d_unit(net, filters=512, kernels=3, strides=1, bn=True, name='92')\r\nnet = conv2d_unit(net, filters=3 * (5 + n_class), kernels=1, strides=1, act='liner', bn=False, name='93')\r\ndetection(net, '94')\r\nnet = RouteLayer(net, [91], name='95')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='96')\r\nnet = upsample(net, scale=2, name='97')\r\nnet = RouteLayer(net, [97, 36], name='98')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='99')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='100')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='101')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='102')\r\nnet = conv2d_unit(net, filters=128, kernels=1, strides=1, bn=True, name='103')\r\nnet = conv2d_unit(net, filters=256, kernels=3, strides=1, bn=True, name='104')\r\nnet = conv2d_unit(net, filters=3 * (5 + n_class), kernels=1, strides=1, act='liner', bn=False, name='105')\r\ndetection(net, '106')\r\nnet_out = [Gb_all_layer_out[106], Gb_all_layer_out[94], Gb_all_layer_out[82]]\r\n\r\n# 读取ckpt里保存的参数\r\nsess = tf.InteractiveSession()\r\nsaver = tf.train.Saver()\r\n# 如果有checkpoint这个文件可以加下面这句话,如果只有一个ckpt文件就不需要这个if\r\nif tf.train.get_checkpoint_state(checkpoint_dir): # 确认是否存在\r\n saver.restore(sess, checkpoint_dir + ckpt_name)\r\n print(\"load ok!\")\r\nelse:\r\n print(\"ckpt文件不存在\")\r\n\r\n# tensor = tf.global_variables('layer_0_conv')\r\n# b = sess.run(tensor)\r\n# c = sess.run(net_out, feed_dict={input_pb: image_data})\r\nif not os.path.exists('out'):\r\n os.mkdir('out')\r\nfile_name = input('Input image filedir:')\r\nimg_path = os.listdir(file_name)\r\nfor path in tqdm(img_path):\r\n abs_path = file_name + path\r\n img = cv2.imread(abs_path)\r\n# while True:\r\n# file_name = input('Input image filename:')\r\n# img = cv2.imread(file_name)\r\n img = img[:, :, ::-1] # RGB image\r\n img_shape = img.shape[0:2][::-1]\r\n\r\n _scale = min(416 / img_shape[0], 416 / img_shape[1])\r\n _new_shape = (int(img_shape[0] * _scale), int(img_shape[1] * _scale))\r\n im_sized = cv2.resize(img, _new_shape)\r\n im_sized = np.pad(im_sized,\r\n (\r\n (int((416 - _new_shape[1]) / 2), 416 - _new_shape[1] - int((416 - _new_shape[1]) / 2)),\r\n (int((416 - _new_shape[0]) / 2), 416 - _new_shape[0] - int((416 - _new_shape[0]) / 2)),\r\n (0, 0)\r\n ),\r\n mode='constant')\r\n image_data = np.array(im_sized, dtype='float32')\r\n image_data /= 255.\r\n image_data = np.expand_dims(image_data, 0) # Add batch dimension.\r\n\r\n input_shape = tf.cast(tf.shape(net_out[2])[1:3] * 32, dtype='float32')[::-1] # hw\r\n image_shape = tf.cast(img_shape, dtype='float32')[::-1] # hw\r\n new_shape = tf.round(image_shape * tf.reduce_min(input_shape / image_shape))\r\n offset = (input_shape - new_shape) / 2. / input_shape\r\n scale = input_shape / new_shape\r\n\r\n # with tf.Session() as sess:\r\n # a = sess.run(scale)\r\n\r\n boxes = list()\r\n box_scores = list()\r\n\r\n cellbase_x = tf.to_float(tf.reshape(tf.tile(tf.range(52), [52]), (1, 52, 52, 1, 1)))\r\n cellbase_y = tf.transpose(cellbase_x, (0, 2, 1, 3, 4))\r\n cellbase_grid = tf.tile(tf.concat([cellbase_x, cellbase_y], -1), [1, 1, 1, 3, 1])\r\n # classes = list()\r\n for i in range(3): # 52 26 13\r\n anchor = anchors[..., 3 * i:3 * (i + 1), :]\r\n # feats = model.output[i]\r\n feats = net_out[i]\r\n\r\n grid_w = tf.shape(feats)[1] # 13\r\n grid_h = tf.shape(feats)[2] # 13\r\n grid_factor = tf.reshape(tf.cast([grid_w, grid_h], tf.float32), [1, 1, 1, 1, 2])\r\n\r\n feats = tf.reshape(feats, [-1, grid_w, grid_h, 3, n_class + 5])\r\n\r\n # Adjust preditions to each spatial grid point and anchor size.\r\n box_xy = (tf.sigmoid(feats[..., :2]) + cellbase_grid[:, :grid_w, :grid_h, :, :]) / tf.cast(grid_factor[::-1],\r\n 'float32')\r\n box_wh = tf.exp(feats[..., 2:4]) * anchor / tf.cast(input_shape[::-1], 'float32')\r\n box_confidence = tf.sigmoid(feats[..., 4:5])\r\n box_class_probs = tf.sigmoid(feats[..., 5:])\r\n\r\n box_yx = box_xy[..., ::-1]\r\n box_hw = box_wh[..., ::-1]\r\n box_yx = (box_yx - offset) * scale\r\n box_hw *= scale\r\n box_mins = box_yx - (box_hw / 2.)\r\n box_maxes = box_yx + (box_hw / 2.)\r\n _boxes = tf.concat([\r\n box_mins[..., 0:1], # y_min\r\n box_mins[..., 1:2], # x_min\r\n box_maxes[..., 0:1], # y_max\r\n box_maxes[..., 1:2] # x_max\r\n ], axis=-1)\r\n\r\n # Scale boxes back to original image shape.\r\n _boxes *= tf.concat([tf.cast(image_shape, 'float32'), tf.cast(image_shape, 'float32')], axis=-1)\r\n _boxes = tf.reshape(_boxes, [-1, 4])\r\n\r\n _box_scores = box_confidence * box_class_probs\r\n _box_scores = tf.reshape(_box_scores, [-1, n_class])\r\n boxes.append(_boxes)\r\n box_scores.append(_box_scores)\r\n boxes = tf.concat(boxes, axis=0)\r\n box_scores = tf.concat(box_scores, axis=0)\r\n\r\n mask = box_scores >= 0.3\r\n max_num_boxes = tf.constant(20, dtype='int32')\r\n\r\n boxes_ = []\r\n scores_ = []\r\n classes_ = []\r\n for c in range(n_class):\r\n class_boxes = tf.boolean_mask(boxes, mask[:, c])\r\n class_box_scores = tf.boolean_mask(box_scores[:, c], mask[:, c])\r\n nms_index = tf.image.non_max_suppression(\r\n class_boxes, class_box_scores, max_num_boxes, iou_threshold=0.5)\r\n class_boxes = tf.gather(class_boxes, nms_index)\r\n class_box_scores = tf.gather(class_box_scores, nms_index)\r\n classes = tf.ones_like(class_box_scores, 'int32') * c\r\n boxes_.append(class_boxes)\r\n scores_.append(class_box_scores)\r\n classes_.append(classes)\r\n boxes_ = tf.concat(boxes_, axis=0)\r\n scores_ = tf.concat(scores_, axis=0)\r\n classes_ = tf.concat(classes_, axis=0)\r\n\r\n b, s, c = sess.run([boxes_, scores_, classes_], feed_dict={input_pb: image_data})\r\n\r\n # plt.cla()\r\n # plt.imshow(img)\r\n # for i, obj in enumerate(b):\r\n # x1 = obj[1]\r\n # x2 = obj[3]\r\n # y1 = obj[0]\r\n # y2 = obj[2]\r\n #\r\n # # TODO: change the color of text\r\n # plt.text(x1, y1 - 10, round(s[i], 2))\r\n # plt.text(x2 - 30, y1 - 10, label[c[i]])\r\n # plt.hlines(y1, x1, x2, colors='red')\r\n # plt.hlines(y2, x1, x2, colors='red')\r\n # plt.vlines(x1, y1, y2, colors='red')\r\n # plt.vlines(x2, y1, y2, colors='red')\r\n # plt.show()\r\n img = img[:, :, ::-1]\r\n file = open(\"./out/\" + path.rstrip('.jpg') + '.txt', 'w')\r\n for i, obj in enumerate(b):\r\n cv2.rectangle(img, (obj[1], obj[0]), (obj[3], obj[2]), (0, 0, 255), 3)\r\n cv2.putText(img, str(round(s[i], 2)), (int(obj[1]), int(obj[0]) - 10), cv2.FONT_HERSHEY_COMPLEX, 2, (0, 0, 255),\r\n 3)\r\n cv2.putText(img, str(label[c[i]]), (int(obj[3]) - 100, int(obj[0]) - 10), cv2.FONT_HERSHEY_COMPLEX, 2,\r\n (0, 0, 255), 3)\r\n\r\n file.write('{0} {1} '.format(label[c[i]], s[i]))\r\n file.write('{0} {1} {2} {3}'.format(obj[1], obj[0], obj[3], obj[2]))\r\n file.write('\\n')\r\n file.close()\r\n cv2.imwrite(\"./out/\" + path, img)\r\n # cv2.imwrite(\"1.jpg\", img)\r\n","sub_path":"detection.py","file_name":"detection.py","file_ext":"py","file_size_in_byte":14644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"374282176","text":"import as_weatherstation.app as mod_ws_app\nimport as_weatherstation.read.csvfile as mod_ws_read_csvfile\n\n\nclass AS_WS_READER_LOG(mod_ws_read_csvfile.AS_WS_READER_CSVFILE):\n \"\"\" Log file reader \"\"\"\n\n\n def __init__(self, wsApp, sLabel=None):\n\n super(AS_WS_READER_LOG, self).__init__(wsApp)\n\n # No station specified so try to find one in the station list.\n # There must be one station, and only one, configured\n # for this to work, otherwise we will throw an error.\n if sLabel == None:\n for label in wsApp.stations:\n if not sLabel is None:\n raise ValueError('Multiple weather stations configured. Please manually specify which configuration to use.')\n sLabel = label\n\n if sLabel is None:\n raise ValueError('No stations found in configuration.')\n \n self.station = self.app.stations[sLabel]\n\n\n # Figure out which sensor and inputs to read from\n \"\"\"\n fm = self.app.fieldMap[mod_ws_app.STYPE_PWS]\n self.sensors = {}\n self.inputs = {}\n for field in fm:\n if fm[field] is None:\n continue\n if fm[field][0] == mod_ws_app.PWS_IO_SENSOR:\n self.sensors[field] = getattr(self.station, fm[field][1])\n elif fm[field][0] == mod_ws_app.PWS_IO_INPUT:\n self.inputs[field] = getattr(self.station, fm[field][1])\n \"\"\"\n\n\n\n\n def read(self, log=None, lines=0):\n\n \"\"\" Read the station log file \"\"\"\n\n if log is None:\n log = self.station.logFile\n\n return super(AS_WS_READER_LOG, self).read(log, lines)\n\n\n\n def parseCSVLine(self, line):\n \"\"\" Override the parent's method \"\"\"\n \n from time import strptime\n\n fm = self.app.fieldMap['log']\n\n sample = mod_ws_app.AS_WS_SAMPLE(strptime(line[0], \"%Y-%m-%d %H:%M:%S\"))\n for i in range(1, len(line)):\n mtype = fm[i-1]\n measurement = mod_ws_app.AS_WS_SAMPLE.createMeasurement(mtype, line[i])\n sample.setMeasurement(measurement)\n\n return sample\n\n\n\n def aggregateMeasurements(self, mtype, measurements):\n\n # For digital inputs the aggregate is the sum of the value sampled\n # (that is, the events are additive).\n if mtype in self.inputs:\n return sum(measurements)\n else:\n return super(AS_WS_READER_PWS, self).aggregateMeasurements(mtype, measurements)\n\n\n\n","sub_path":"as_weatherstation/read/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":2495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"289587359","text":"from com_cheese_api.ext.db import url, db, openSession, engine\n# from com_cheese_api.util.file import FileReader\nfrom com_cheese_api.cmm.utl.file import FileReader\n\nfrom flask import request\nfrom flask import Response, jsonify\nfrom flask_restful import Resource, reqparse\n\nfrom sqlalchemy import func\nfrom sqlalchemy import and_, or_\n\nfrom sklearn.ensemble import RandomForestClassifier # rforest\nfrom sklearn.tree import DecisionTreeClassifier # dtree\nfrom sklearn.naive_bayes import GaussianNB # nb\nfrom sklearn.neighbors import KNeighborsClassifier # knn\nfrom sklearn.svm import SVC # svm\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import KFold # k value is understood as count\nfrom sklearn.model_selection import cross_val_score\n\nimport pandas as pd\nimport numpy as np\nimport json\nimport os\nimport sys\nfrom typing import List\nfrom pathlib import Path\n\n\n# 코드 실행 후 생성되는 파일명\n# cheese_fin.csv, cheese_train.csv, cheese_test.csv\n\nclass CheeseDfo(object):\n def __init__(self):\n self.fileReader = FileReader()\n #self.data = os.path.join(os.path.abspath(os.path.dirname(__file__))+'/data')\n self.data = os.path.join('com_cheese_api/cop/itm/cheese/data')\n self.odf = None\n\n def new(self):\n cheese = 'cheese_data.csv'\n this = self.fileReader\n this.cheese = self.new_model(cheese)\n # print(f'===== 치즈 데이터 insert ===== this.cheese')\n # print(this)\n\n print(this.cheese.isnull().sum())\n\n print(f'\\n===================cheese_dfo.py 111\\n')\n\n this = CheeseDfo.ranking_ordinal(this)\n this = CheeseDfo.cheese_texture_nominal(this)\n this = CheeseDfo.types_nominal(this)\n this = CheeseDfo.cheese_category_nominal(this)\n\n print(f'===================cheese_dfo.py 222')\n\n cheese_split = CheeseDfo.df_split(this.cheese)\n\n # train, test 데이터#\n train = 'cheese_train.csv'\n test = 'cheese_test.csv'\n\n this = self.fileReader\n this.train = self.new_model(train) # payload\n this.test = self.new_model(test) # payload\n\n print(this)\n\n\n self.odf = pd.DataFrame(\n {\n 'ranking' : this.train.ranking,\n 'cheese_id' : this.train.cheese_id,\n 'brand' : this.train.brand,\n 'category' : this.train.category,\n 'types': this.train.types\n }\n )\n\n\n this.id = this.test['name']\n # print(f'Preprocessing Train Variable : {this.train.columns}')\n # print(f'Preprocessing Test Variable : {this.test.columns}') \n this = CheeseDfo.drop_feature(this, 'country')\n this = CheeseDfo.drop_feature(this, 'price')\n this = CheeseDfo.drop_feature(this, 'content')\n this = CheeseDfo.drop_feature(this, 'cheese_id')\n # print(f'Post-Drop Variable : {this.train.columns}')\n\n\n # # print(f'Preprocessing Train Result: {this.train.head()}')\n # # print(f'Preprocessing Test Result: {this.test.head()}')\n # # print(f'Train NA Check: {this.train.isnull().sum()}')\n # # print(f'Test NA Check: {this.test.isnull().sum()}') \n\n this.label = CheeseDfo.create_label(this) # payload\n this.train = CheeseDfo.create_train(this) # payload\n\n # # print(f'Train Variable: {this.train.columns}')\n # # print(f'Test Varibale: {this.test.columns}')\n # clf = RandomForestClassifier()\n # clf.fit(this.train, this.label)\n # prediction = clf.predict(this.test)\n\n # print(this)\n\n\n df = pd.DataFrame(\n {\n 'texture': this.train.texture,\n # 'matching' : this.train.matching,\n 'img' : this.train.img\n }\n )\n\n # print(self.odf)\n # print(df)\n # 변수 odf와 df의 데이터 프레임을 합침\n sumdf = pd.concat([self.odf, df], axis=1)\n print('=========cheese_dfo.py======111')\n print(sumdf)\n print('=========cheese_dfo.py======222')\n print(sumdf.isnull().sum())\n print(list(sumdf))\n sumdf.to_csv(os.path.join('com_cheese_api/cop/itm/cheese/data', 'cheese_fin.csv'), index=False, encoding='utf-8-sig')\n \n return sumdf\n\n\n\n def new_model(self, payload) -> object:\n this = self.fileReader\n this.data = self.data\n this.fname = payload\n print(f'{self.data}')\n print(f'{this.fname}')\n return pd.read_csv(Path(self.data, this.fname)) \n\n @staticmethod\n def create_train(this) -> object:\n return this.train.drop('name', axis = 1)\n \n\n @staticmethod\n def create_label(this) -> object:\n return this.train['name'] # Label is the answer.\n\n @staticmethod\n def drop_feature(this, feature) -> object:\n this.train = this.train.drop([feature], axis = 1)\n this.test = this.test.drop([feature], axis = 1)\n return this\n\n @staticmethod\n def ranking_ordinal(this) -> object:\n return this\n\n @staticmethod\n def cheese_texture_nominal(this) -> object:\n this.cheese['texture'] = this.cheese['texture'].map({\n '후레쉬치즈': 1,\n '세미하드치즈': 2,\n '세미하드': 2,\n '하드치즈': 3,\n '소프트치즈': 4,\n '연성치즈': 5,\n '경성치즈': 6\n })\n return this\n\n @staticmethod\n def types_nominal(this) -> object:\n types_mapping = {'가공치즈':0, '자연치즈':1}\n this.cheese ['types'] = this.cheese['types'].map(types_mapping)\n this.cheese = this.cheese\n return this\n\n @staticmethod\n def cheese_category_nominal(this) -> object:\n category_map = {\n '모짜렐라': 1,\n '블루치즈': 2,\n '리코타': 3,\n '체다': 4,\n '파르미지아노 레지아노': 5,\n '고다': 6,\n '까망베르': 7,\n '브리': 8,\n '만체고': 9,\n '에멘탈': 10,\n '부라타': 11\n }\n this.cheese['category'] = this.cheese['category'].map(category_map)\n return this\n\n\n @staticmethod\n def df_split(data):\n cheese_train, cheese_test = train_test_split(data, test_size = 0.3, random_state = 32)\n cheese_train.to_csv(os.path.join('com_cheese_api/cop/itm/cheese/data', 'cheese_train.csv'), index=False)\n cheese_test.to_csv(os.path.join('com_cheese_api/cop/itm/cheese/data', 'cheese_test.csv'), index=False) \n return cheese_train, cheese_test\n\n\nif __name__ == '__main__' :\n df = CheeseDfo()\n df.new() \n\n\n\n'''\n ranking cheese_id brand category types texture img\n0 33 p33 샴피뇽 2 1 4 https://img-cf.kurly.com/shop/data/goods/15954...\n1 48 p48 푸글리제 3 1 1 https://img-cf.kurly.com/shop/data/goods/15319...\n2 16 p16 zott 1 1 1 https://img-cf.kurly.com/shop/data/goods/15266...\n3 57 p57 라 콘타디나 3 1 1 https://img-cf.kurly.com/shop/data/goods/15235...\n4 47 p47 란다나 6 1 2 https://img-cf.kurly.com/shop/data/goods/15777...\n5 32 p32 안젤로 2 1 2 https://img-cf.kurly.com/shop/data/goods/15107...\n6 61 p61 사토리 4 1 2 https://img-cf.kurly.com/shop/data/goods/15311...\n7 54 p54 퀘소로시난테 9 1 3 https://img-cf.kurly.com/shop/data/goods/15978...\n8 49 p49 베르기어 6 1 2 https://img-cf.kurly.com/shop/data/goods/15281...\n9 69 p69 그랑도르즈 7 1 4 https://img-cf.kurly.com/shop/data/goods/14775...\n10 67 p67 사토리 4 1 3 https://img-cf.kurly.com/shop/data/goods/15639...\n[49 rows x 7 columns]\n'''","sub_path":"proj/cheese-emp-ai/com_cheese_api/cop/itm/cheese_dummy/cheese_dfo_201110.py","file_name":"cheese_dfo_201110.py","file_ext":"py","file_size_in_byte":8122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"177816830","text":"from collections import OrderedDict\nfrom pandas import DataFrame\nfrom altair import Chart\nimport altair as alt\nimport streamlit as st\nimport requests\n\n# caching for fast load times during local tests\n# not suitable for deployed app because it doesn't \n# account for a scholar's changing metrics as add more publications\n# @st.cache(allow_output_mutation=True, suppress_st_warning=True)\ndef validate_url(url: str) -> bool:\n \"\"\"Uses the requests library to check if a given URL returns a 200 response\n\n Args:\n url (str): URL string given by the user\n\n Returns:\n bool: Returns True if it got a 200 response, False if not\n \"\"\"\n try:\n response = requests.get(url)\n return response.status_code == requests.codes.ok\n\n except:\n return False\n\n# @st.cache(allow_output_mutation=True, suppress_st_warning=True)\ndef hit_scraper_api(url: str) -> dict:\n \"\"\"Sends a post request to the scraper API and gets back a json response as a Python dictionary\n\n Args:\n url (str): A verified URL given by the user\n\n Returns:\n dict: Either an error message, or the scraped metrics for a given author\n \"\"\"\n payload = {\"url\": url}\n # scraper_url = \"http://0.0.0.0:8080\" # local deploy\n scraper_url = \"https://scholarscraper-st2oqocqiq-uw.a.run.app\" # deployed API on Cloud Run\n response = requests.post(url=scraper_url, json=payload) \n\n return response.json()\n\n\ndef order_one(dct: dict) -> OrderedDict:\n \"\"\"Sorts the scholar's metrics alphanumerically, then preserves that order\n\n Args:\n dct (dict): scraped scholar metrics\n\n Returns:\n OrderedDict: the properly arranged scholar metrics\n \"\"\"\n return OrderedDict(sorted(dct.items()))\n\n\ndef order_dicts(dict1: dict, dict2: dict) -> OrderedDict:\n \"\"\"Sorts the metrics contained in two dictionaries\n\n Args:\n dict1 (dict): publication counts for author position\n dict2 (dict): citation counts for each author position\n\n Returns:\n OrderedDict: the properly sorted scholar metrics\n \"\"\"\n return order_one(dict1), order_one(dict2)\n\n# @st.cache(allow_output_mutation=True, suppress_st_warning=True)\ndef make_chart(df: DataFrame) -> Chart:\n \"\"\"Generates a horizontal bar chart of percentages of total citations by author position\n\n Args:\n df (DataFrame): dataframe of decimals by author position\n\n Returns:\n Chart: the rendered visualization\n \"\"\"\n return alt.Chart(df).mark_bar().encode(\n alt.X(\n 'portion_of_citations', \n axis=alt.Axis(\n title=\"percentage of citations\", \n tickCount=5, \n format='%'\n )\n ),\n alt.Y(\n 'positions', \n axis=alt.Axis(title=\"author position\"), \n sort=None\n ),\n color=alt.Color(\n \"positions\", \n scale=alt.Scale(scheme=\"greenblue\"), \n legend=None\n )\n ).properties(\n title='citations by author position'\n ).configure_axisX(\n labelAngle=0\n ).configure_axis(\n grid=False\n ).configure_view(\n strokeWidth=0\n )","sub_path":"dashboard/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"228027133","text":"import torch\n\nclass IMDBReviewDataset(torch.utils.data.Dataset):\n\n def __init__(self, reviews, labels, tokenizer, max_len):\n self.reviews = reviews\n self.labels = labels\n self.tokenizer = tokenizer\n self.max_len = max_len\n\n def __len__(self):\n return len(self.reviews)\n \n\n def __getitem__(self, idx):\n review = self.reviews[idx]\n\n encoding = self.tokenizer.encode_plus(\n review,\n max_length = self.max_len,\n add_special_tokens = True,\n padding='max_length',\n return_attention_mask = True,\n truncation = True,\n return_token_type_ids = False,\n return_tensors = 'pt'\n )\n\n return {\n 'review_text': review,\n 'input_ids' : encoding['input_ids'].flatten(),\n 'attention_mask': encoding['attention_mask'].flatten(),\n 'label': torch.tensor(self.labels[idx], dtype = torch.long)\n }\n\ndef createDataLoader(reviews, labels, tokenizer, max_len, batch_size):\n dataset = IMDBReviewDataset(\n reviews = reviews,\n labels = labels,\n tokenizer = tokenizer,\n max_len = max_len\n )\n return torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers = 2)\n ","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"348629210","text":"import pandas as pd\nimport glob\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import f1_score\nimport joblib\nfrom sklearn.tree import DecisionTreeClassifier\n\n\npath = os.getcwd()\nreport_path = path + \"/report\"\noutput_path = path + \"/output\"\nconfig_path = path + \"/config\"\nmodel_file = path + \"/savedmodel/DTree.pkl\"\n\ndf = pd.read_csv(output_path + \"/alertsAnalyzed.csv\")\n\nos.chdir(config_path)\nvariables = glob.glob('*.txt')\ni=0\nfor fileName in variables:\n file = open(fileName, \"r\")\n varname = str.replace(fileName, \".txt\", \"\")\n varname = str.lower(varname)\n variables[i]=varname\n i = i+ 1\nvariables.append('Cluster')\n\ndfClassify = df[variables]\nvariables.pop()\n\nX_train, X_test, y_train, y_test = train_test_split(dfClassify[variables], dfClassify['Cluster'], test_size=0.3,random_state=109)\n\ntree = DecisionTreeClassifier(criterion=\"gini\",max_depth=5)\nDTModel = tree.fit(X_train, y_train)\nDTPred = tree.predict(X_test)\njoblib.dump(DTModel, model_file)\n\n# model accuracy for X_test\naccuracy = DTModel.score(X_test, y_test)\nprint(accuracy)\n\nscore=f1_score(y_test, DTPred,average='weighted')\nprint(score)\n","sub_path":"failurePredectionModel.py","file_name":"failurePredectionModel.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"555621315","text":"#!/usr/bin/env python\n\nimport pika\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\n\nchannel = connection.channel()\nchannel.queue_declare(queue='hello')\n\nbody = b'Hello World!'\n\nchannel.basic_publish(exchange='', routing_key='hello', body=body)\nprint(\" [x] Sent %r\" % body)\nconnection.close()\n","sub_path":"send.py","file_name":"send.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"616278979","text":"import importlib\nimport traceback\n\n_primitives = [\n 'bool',\n 'int8',\n 'uint8',\n 'int16',\n 'uint16',\n 'int32',\n 'uint32',\n 'int64',\n 'uint64',\n 'float32',\n 'float64',\n 'boolean',\n 'float',\n 'double',\n 'string',\n 'time',\n 'duration']\n\nclass MsgException(Exception):\n _msg = 'MsgException'\n\n def addInfo(self, i, line):\n self._i = i\n self._line = line\n\n def __str__(self):\n return '%s(line=%d, str=%s)' % (self._msg, self._i, self._line)\n\nclass InvalidInnerTypeName(MsgException):\n _msg = 'InvalidInnerTypeName'\n pass\n \nclass InvalidPrimitiveTypeName(MsgException):\n _msg = 'InvalidPrimitiveTypeName'\n pass\n\nclass InvalidArgument(MsgException):\n def __init__(self, msg):\n self._msg = msg\n\n def __str__(self):\n return self._msg\n\nclass MsgMemberList(object):\n \n def __init__(self):\n self._m = []\n\n def append(self, m):\n self._m.append(m)\n\n def forEach(self, func):\n for m in self._m:\n func(m)\n\n def map(self, func):\n return [func(m) for m in self._m]\n \n def filter(self, func):\n return [m for m in self._m if func(m)]\n \n def findByName(self, name):\n rs = self.filter(lambda m:m.name == name)\n if len(rs) == 0: return None\n elif len(rs) == 1: return rs[0]\n else: raise InvalidArgument('Find multiple members by one name(%s)' % name)\n\n\nclass MsgType(object):\n def __init__(self, name):\n self._is_primitive = self._check_typename(name)\n self._pkgName = '' if self.is_primitive else name.split('/')[0]\n self._name = name if self.is_primitive else name.split('/')[1]\n \n @property\n def is_primitive(self):\n return self._is_primitive\n\n @property\n def fullName(self):\n return str(self)\n\n @property\n def name(self):\n return self._name\n\n @property\n def packageName(self):\n return self._pkgName\n\n def _check_typename(self, n):\n if n.find('/') >= 0:\n ns = n.split('/')\n if len(ns) < 2:\n raise InvalidInnerTypeName()\n elif len(ns) > 2:\n raise InvalidInnerTypeName()\n return False\n \n if n in _primitives:\n# raise InvalidPrimitiveTypeName\n self._is_primitive = True\n return True\n\n def __str__(self):\n if self.is_primitive: return self.name\n return self.packageName + '/' + self.name\n\n\nclass MsgMember(object):\n \n def __init__(self, _type, name, comment='', constant_value=None):\n self._type = _type\n self._name = name\n self._comment = comment\n self._value = constant_value\n\n @property\n def name(self):\n return self._name\n\n @property\n def type(self):\n return self._type\n\n def setType(self, t):\n self._type = t\n\n def __str__(self):\n return self.name + ': ' + self.type.name\n\n @property\n def comment(self):\n return self._comment\n\n @property\n def is_constant(self):\n return not self._value is None\n\n @property\n def value_str(self):\n return self._value\n\n @property\n def value(self):\n if not self.type.is_primitive:\n return self._value\n if self.type.fullName.find('int') >= 0:\n return int(self._value)\n elif self.type.fullName.find('float') >= 0:\n return float(self._value)\n elif self.type.fullName.find('bool') >= 0:\n return self._value == 'true' or self._value == 'True' or self._value == 'TRUE'\n elif self.type.fullName == 'string':\n return self._value\n else:\n return self._value\n \n \n \n \nclass ROSStruct2(MsgType):\n\n def __init__(self, pkgName, name=None):\n self._is_primitive = False\n if name is None: # only one value passed.\n self._check_typename(pkgName)\n if self._is_primitive:\n name = pkgName\n pkgName = ''\n else:\n if len(pkgName.split('/')) == 2:\n name = pkgName.split('/')[1]\n pkgName = pkgName.split('/')[0]\n else:\n name = pkgName\n pkgName = ''\n MsgType.__init__(self, name if self._is_primitive else pkgName + '/' + name)\n # self._name = name\n # self._pkgName = pkgName\n self._members = MsgMemberList()\n self._comment = ''\n\n def addComment(self, c):\n self._comment = c\n \n def addMember(self, m):\n self._members.append(m)\n\n @property\n def comment(self):\n return self._comment\n \n @property\n def members(self):\n return self._members\n\n\nclass SrvObject(object):\n\n def __init__(self, pkgName, name):\n self._name = name\n self._pkgName = pkgName\n self._comment = ''\n self.request = ROSStruct(pkgName, name + '_request')\n self.response = ROSStruct(pkgName, name + '_response')\n \n def addComment(self, c):\n self._comment = c\n\n @property\n def comment(self):\n return self._comment\n \n @property\n def packageName(self):\n return self._pkgName\n\n @property\n def name(self):\n return self._name\n\n\nclass ActionObject(object):\n\n def __init__(self, pkgName, name):\n self._name = name\n self._pkgName = pkgName\n self._comment = ''\n self.goal = ROSStruct(pkgName, name + '_goal')\n self.result = ROSStruct(pkgName, name + '_result')\n self.feedback = ROSStruct(pkgName, name + '_feedback')\n \n def addComment(self, c):\n self._comment = c\n\n @property\n def comment(self):\n return self._comment\n \n @property\n def packageName(self):\n return self._pkgName\n\n @property\n def name(self):\n return self._name\n\n\n\nclass Parser2(object):\n\n def __init__(self, ignore_header=True, ignore_goal_id=False):\n\n self._ignore_header = ignore_header\n self._ignore_goal_id = ignore_goal_id\n pass\n\n def __parse_name(self, name):\n p = name.split('/')\n if len(p) != 2:\n raise InvalidArgument('Invalid Package/Filename format(%s)' % name)\n return p\n \n def parse_action_str(self, name, argstr, typeDict={}):\n p = self.__parse_name(name)\n m = ActionObject(p[0], p[1])\n\n comment = None\n commentLines = []\n commentPhase = 'action'\n parsePhase = 'goal'\n for i, line in enumerate(argstr.split('\\n')):\n try:\n line = line.strip()\n if line.startswith('#') and (not commentPhase.startswith('end')):\n commentLines.append(line[1:].strip())\n continue\n\n if len(commentLines) == 0 and len(line) == 0:\n continue\n\n if self._ignore_header:\n if line.startswith('Header'):\n continue\n if self._ignore_goal_id:\n if line.startswith('actionlib_msgs/GoalID'):\n continue\n \n \n if comment is None:\n comment = '\\n'.join(commentLines)\n \n if commentPhase == 'action':\n m.addComment(comment if comment else '')\n commentPhase = 'goal'\n comment = None\n commentLines = []\n elif commentPhase == 'goal':\n m.goal.addComment(comment if comment else '')\n commentPhase = 'end_goal'\n comment = None\n commentLines = []\n elif commentPhase == 'result':\n m.result.addComment(comment if comment else '')\n commentPhase = 'end_result'\n comment = None\n commentLines = []\n elif commentPhase == 'feedback':\n m.feedback.addComment(comment if comment else '')\n commentPhase = 'end'\n \n if line.startswith('-'):\n comment = None\n if parsePhase == 'goal': parsePhase = 'result'\n elif parsePhase == 'result': parsePhase = 'feedback'\n if commentPhase == 'end_result' or commentPhase == 'result':\n commentPhase = 'feedback'\n elif commentPhase == 'goal' or commentPhase == 'end_goal':\n commentPhase = 'result'\n continue\n\n if len(line) == 0: continue\n\n value_comment = ''\n tokens = line.strip().split('#')\n if len(tokens) > 1:\n line = tokens[0]\n value_comment = ''.join(tokens[1:]).strip()\n ms = line.strip().split()\n if len(ms) != 2:\n raise InvalidArgument('Invalid Syntax(lineNum=%s, line=\"%s\",ms=\"%s\", len(line)=%d)' % (i, line, ms, len(line)))\n mem = MsgMember(self.create_ros_struct(ms[0], typeDict), ms[1], value_comment)\n if parsePhase == 'goal':\n m.goal.addMember(mem)\n elif parsePhase == 'result':\n m.result.addMember(mem)\n elif parsePhase == 'feedback':\n m.feedback.addMember(mem)\n except MsgException as e:\n traceback.print_exc()\n e.addInfo(i, line)\n raise e\n\n return m\n\n def parse_srv_str(self, name, argstr, typeDict={}):\n p = self.__parse_name(name)\n srv = SrvObject(p[0], p[1])\n\n comment = None\n commentLines = []\n commentPhase = 'srv'\n argparsing = True\n for i, line in enumerate(argstr.split('\\n')):\n try:\n line = line.strip()\n if line.startswith('#') and (not commentPhase is 'end'):\n commentLines.append(line[1:].strip())\n continue\n\n if len(commentLines) == 0 and len(line) == 0:\n continue\n \n if comment is None:\n comment = '\\n'.join(commentLines)\n \n if commentPhase == 'srv':\n srv.addComment(comment if comment else '')\n commentPhase = 'request'\n comment = None\n commentLines = []\n elif commentPhase == 'request':\n srv.request.addComment(comment if comment else '')\n commentPhase = 'end'\n comment = None\n commentLines = []\n elif commentPhase == 'response':\n srv.response.addComment(comment if comment else '')\n commentPhase = 'end'\n \n if line.startswith('-'):\n comment = None\n commentPhase = 'response'\n argparsing = False\n continue\n\n if len(line) == 0: continue\n\n value_comment = ''\n tokens = line.strip().split('#')\n if len(tokens) > 1:\n line = tokens[0]\n value_comment = ''.join(tokens[1:]).strip()\n ms = line.strip().split()\n if len(ms) != 2:\n raise InvalidArgument('Invalid Syntax(lineNum=%s, line=\"%s\",ms=\"%s\", len(line)=%d)' % (i, line, ms, len(line)))\n m = MsgMember(self.create_ros_struct(ms[0], typeDict=typeDict), ms[1], value_comment)\n if argparsing:\n srv.request.addMember(m)\n else:\n srv.response.addMember(m)\n except MsgException as e:\n traceback.print_exc()\n e.addInfo(i, line)\n raise e\n\n return srv\n\n\n def parse_str(self, name, argstr, typeDict={}):\n return self.parse_msg_str(name, argstr, typeDict)\n\n def parse_msg_str(self, name, argstr, typeDict={}):\n # print 'parse_msg_str(', name,',ignore_header=', self._ignore_header, ')'\n p = self.__parse_name(name)\n\n msg = ROSStruct(p[0], p[1])\n comment = None\n commentLines = []\n for i, line in enumerate(argstr.split('\\n')):\n try:\n orgLine = line\n line = line.strip()\n if line.startswith('#'):\n if comment is None:\n commentLines.append(line[1:].strip())\n continue\n \n if len(commentLines) == 0 and len(line) == 0:\n continue\n\n if len(line) == 0: # empty line\n if comment is None:\n comment = '\\n'.join(commentLines)\n continue\n if comment is None:\n comment = '\\n'.join(commentLines)\n\n value_comment = ''\n tokens = line.strip().split('#')\n if len(tokens) > 1:\n line = tokens[0]\n value_comment = tokens[1].strip()\n tokens = line.strip().split('=')\n is_constant_definition = False\n constant_value = ''\n if len(tokens) > 1: # Constant Definition\n line = tokens[0]\n is_constant_definition = True\n constant_value = tokens[1].strip()\n pass\n \n ms = line.strip().split()\n if len(ms) != 2:\n # print 'argstr:', argstr\n raise InvalidArgument('Invalid Syntax(line=%s, linedata=\"%s\", len(line)=\"%d\", value_comment=\"%s\")' % (i, orgLine, len(line), value_comment))\n if not is_constant_definition:\n if self._ignore_header:\n if line.startswith('Header'):\n continue\n if self._ignore_goal_id:\n #print line\n if line.startswith('actionlib_msgs/GoalID'):\n continue\n # print 'MsgMember creating...', ms\n m = MsgMember(self.create_ros_struct(ms[0], typeDict, p[0]), ms[1], value_comment)\n msg.addMember(m)\n else:\n m = MsgMember(self.create_ros_struct(ms[0], typeDict, p[0]), ms[1], value_comment, constant_value)\n msg.addMember(m)\n except MsgException as e:\n e.addInfo(i, line)\n raise e\n msg.addComment(comment if comment else '')\n return msg\n\n def create_ros_struct(self, typeName, typeDict={}, current_package=''):\n # print 'create_ros_struct(', typeName, ')'\n if typeName in _primitives:\n return ROSStruct(typeName)\n else:\n if typeName in typeDict.keys():\n return self.parse_msg_str(typeName, typeDict[typeName], typeDict)\n elif typeName == 'Header':\n typeName2 = 'std_msgs/Header'\n return self.parse_msg_str(typeName2, typeDict[typeName2], typeDict)\n else:\n typeName2 = current_package + '/' + typeName\n if typeName2 in typeDict.keys():\n return self.parse_msg_str(typeName2, typeDict[typeName2], typeDict)\n else:\n print('No type (', typeName2, ') found.')\n return ROSStruct(typeName)\n\n def parse_type_dictionary(self, full_text):\n ft = []\n lines = full_text.split('\\n')\n line_buf = []\n for line in lines:\n if line.startswith('==='):\n ft.append('\\n'.join(line_buf))\n line_buf = []\n else:\n line_buf.append(line)\n ft.append('\\n'.join(line_buf))\n subtypes = {}\n if len(ft) > 1:\n for f in ft[1:]:\n lines = [l for l in f.split('\\n') if len(l.strip()) > 0]\n if not lines[0].startswith('MSG:'):\n raise InvalidArgument('This member is not available to parse')\n name = lines[0][4:].strip()\n value = '\\n'.join(lines[1:])\n subtypes[name] = value\n return ft[0], subtypes\n\n def parse_type_dictionary2(self, cls):\n return (cls.__module__.split('.')[0] + '/' + cls.__name__, \"\", {})\n\n def parse_msg_class_str2(self, type_fullpath):\n if type_fullpath in _primitives:\n return MsgType(type_fullpath)\n if type_fullpath.startswith('sequence'):\n print('ROS2 msg parser currently does not support sequence class.')\n return None\n else:\n pkg, name = [None, None]\n try:\n print(type_fullpath)\n pkg, name = type_fullpath.split('/')\n except:\n traceback.print_exc()\n print('Exception: ' + type_fullpath + ' for ' + str(e))\n raise e\n metaclass = getattr(importlib.import_module(pkg + '.msg'), name)\n return self.parse_msg_class2(metaclass)\n \n def parse_msg_class2(self, cls):\n msg = ROSStruct2(cls.__module__.split('.')[0], cls.__name__)\n for name, type_fullpath in cls.get_fields_and_field_types().items():\n metaclass = self.parse_msg_member2(type_fullpath, name)\n if metaclass:\n msg.addMember(metaclass)\n return msg\n \n def parse_msg_member2(self, type_fullpath, name):\n metaclass = self.parse_msg_class_str2(type_fullpath)\n if metaclass:\n return MsgMember(metaclass, name)\n return None\n\n def parse_srv_class(self, cls):\n p = self.__parse_name(cls._type)\n srv = SrvObject(p[0], p[1])\n srv.request = self.parse_msg_class(cls._request_class)\n srv.response = self.parse_msg_class(cls._response_class)\n return srv\n\n def parse_action_class(self, cls):\n\n clsText, typeDict = self.parse_type_dictionary(cls._full_text)\n # return self.parse_action_str(cls._type, clsText, typeDict)\n return self.create_ros_struct(cls._type + 'Goal', typeDict)\n\n\n def parse_msg_str2(self, name, argstr, typeDict={}):\n # print 'parse_msg_str(', name,',ignore_header=', self._ignore_header, ')'\n p = self.__parse_name(name)\n\n msg = ROSStruct(p[0], p[1])\n comment = None\n commentLines = []\n for i, line in enumerate(argstr.split('\\n')):\n try:\n orgLine = line\n line = line.strip()\n if line.startswith('#'):\n if comment is None:\n commentLines.append(line[1:].strip())\n continue\n \n if len(commentLines) == 0 and len(line) == 0:\n continue\n\n if len(line) == 0: # empty line\n if comment is None:\n comment = '\\n'.join(commentLines)\n continue\n if comment is None:\n comment = '\\n'.join(commentLines)\n\n value_comment = ''\n tokens = line.strip().split('#')\n if len(tokens) > 1:\n line = tokens[0]\n value_comment = tokens[1].strip()\n tokens = line.strip().split('=')\n is_constant_definition = False\n constant_value = ''\n if len(tokens) > 1: # Constant Definition\n line = tokens[0]\n is_constant_definition = True\n constant_value = tokens[1].strip()\n pass\n \n ms = line.strip().split()\n if len(ms) != 2:\n # print 'argstr:', argstr\n raise InvalidArgument('Invalid Syntax(line=%s, linedata=\"%s\", len(line)=\"%d\", value_comment=\"%s\")' % (i, orgLine, len(line), value_comment))\n if not is_constant_definition:\n if self._ignore_header:\n if line.startswith('Header'):\n continue\n if self._ignore_goal_id:\n #print line\n if line.startswith('actionlib_msgs/GoalID'):\n continue\n # print 'MsgMember creating...', ms\n m = MsgMember(self.create_ros_struct(ms[0], typeDict, p[0]), ms[1], value_comment)\n msg.addMember(m)\n else:\n m = MsgMember(self.create_ros_struct(ms[0], typeDict, p[0]), ms[1], value_comment, constant_value)\n msg.addMember(m)\n except MsgException as e:\n e.addInfo(i, line)\n raise e\n msg.addComment(comment if comment else '')\n return msg\n","sub_path":"msg_parser/parser2.py","file_name":"parser2.py","file_ext":"py","file_size_in_byte":21268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"74477356","text":"from models.discriminators.GeneralDiscriminator import GeneralDiscriminator\nfrom models.embedders.GeneralEmbedder import GeneralEmbedder\nfrom models.generators.GeneralGenerator import GeneralGenerator\nfrom utils.constants import *\nimport importlib\nimport os\nfrom utils.general_utils import ensure_current_directory\nimport torch.optim as opt\n\nLOSS_DIR = \"losses\"\nEMBED_DIR = \"embedders\"\nGEN_DIR = \"generators\"\nOPTIMS = \"optim\"\nDIS_DIR = \"discriminators\"\n\ntypes = [DIS_DIR, EMBED_DIR, GEN_DIR, LOSS_DIR]\nmodels = {x: {} for x in types}\n\ndef _read_all_classnames():\n \"\"\"\n private function that imports all class references in a dictionary\n\n :return:\n \"\"\"\n\n for typ in types:\n for name in os.listdir(f\"./models/{typ}\"):\n if (not \"__\" in name):\n short_name = name.split(\".\")[0]\n module = importlib.import_module(f\"models.{typ}.{short_name}\")\n class_reference = getattr(module, short_name)\n models[typ][short_name] = class_reference\n\n models[OPTIMS] = {}\n models[OPTIMS][\"Adam\"] = opt.Adam\n models[OPTIMS][\"RMSprop\"] = opt.RMSprop\n models[OPTIMS][\"SGD\"] = opt.SGD\n\n\n\ndef find_right_model(type: str, name: str, **kwargs):\n \"\"\"\n returns model with arguments given a string name-tag\n\n :param type:\n :param name:\n :param kwargs:\n :return:\n \"\"\"\n\n return models[type][name](**kwargs)\n\n\ndef save_models(discriminator: GeneralDiscriminator, generator: GeneralGenerator, embedder: GeneralEmbedder,\n suffix: str):\n \"\"\"\n Saves current state of models\n\n :param discriminator:\n :param generator:\n :param embedder:\n :param suffix: determines file name\n :param data_manager: from training (contains the right date_stamp_directory)\n :return:\n \"\"\"\n save_dict = {\"discriminator\": discriminator.state_dict(), \"generator\": generator.state_dict(),\n \"embedder\": embedder.state_dict()}\n\n DATA_MANAGER.save_python_obj(save_dict, f\"{DATA_MANAGER.stamp}/{MODELS_DIR}/{suffix}\")\n\n\ndef load_models_and_state(discriminator: GeneralDiscriminator, generator: GeneralGenerator, embedder: GeneralEmbedder,\n suffix: str, stamp: str):\n \"\"\"\n Loads saved models given a suffix and then also loads in state dicts already\n\n :param discriminator: fully initialized\n :param generator: fully initialized\n :param embedder: fully initialized\n :param suffix: filename\n :param stamp: date_Stamp_directory\n :return:\n \"\"\"\n\n models = DATA_MANAGER.load_python_obj(f\"{stamp}/{MODELS_DIR}/{suffix}\")\n\n # discriminator.load_state_dict(models[\"discriminator\"])\n embedder.load_state_dict(models[\"embedder\"])\n generator.load_state_dict(models[\"generator\"])\n\n discriminator.to(DEVICE)\n embedder.to(DEVICE)\n generator.to(DEVICE)\n\n return discriminator, generator, embedder\n\n\ndef load_states(suffix: str, stamp: str):\n \"\"\"\n Only loads state dicts\n\n :param suffix: filename\n :param stamp: date_stamp\n :return:\n \"\"\"\n\n return DATA_MANAGER.load_python_obj(f\"{stamp}/{MODELS_DIR}/{suffix}\")\n\n\n# needed to load in class references\n_read_all_classnames()\n\nif __name__ == '__main__':\n # unit-test functions here\n ensure_current_directory()\n _read_all_classnames()\n z = find_right_model(\"losses\", \"GeneralLoss\")\n print(type(z))\n","sub_path":"utils/model_utils.py","file_name":"model_utils.py","file_ext":"py","file_size_in_byte":3359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"519192344","text":"from pyScoreParser import xml_matching\n\nimport numpy as np\n\ndef feature_output_to_midi(prediction, note_locations, xml_doc, xml_notes,means, stds, multi_instruments=False):\n prediction = scale_model_prediction_to_original(prediction, means, stds)\n\n output_features = xml_matching.model_prediction_to_feature(prediction)\n output_features = xml_matching.add_note_location_to_features(output_features, note_locations)\n\n output_xml = xml_matching.apply_tempo_perform_features(xml_doc, xml_notes, output_features, start_time=1,\n predicted=True)\n output_midi, midi_pedals = xml_matching.xml_notes_to_midi(output_xml, multi_instruments)\n return output_midi, midi_pedals, output_features\n\ndef scale_model_prediction_to_original(prediction, MEANS, STDS):\n for i in range(len(STDS)):\n for j in range(len(STDS[i])):\n if STDS[i][j] < 1e-4:\n STDS[i][j] = 1\n prediction = np.squeeze(np.asarray(prediction.cpu()))\n num_notes = len(prediction)\n for i in range(11):\n prediction[:, i] *= STDS[1][i]\n prediction[:, i] += MEANS[1][i]\n # for i in range(11, 15):\n # prediction[:, i] *= STDS[1][i+4]\n # prediction[:, i] += MEANS[1][i+4]\n return prediction","sub_path":"src/data/post_processing.py","file_name":"post_processing.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"263989125","text":"#from falcon_rest.auth import auth\nfrom falcon_rest.conf import settings\nimport falcon\n\nfrom sqlalchemy.orm import sessionmaker\nSession = sessionmaker(bind=settings.DB_ENGINE)\n\nclass CoreMiddleWare:\n \n def process_request(self,req,resp):\n req.context['session'] = Session()\n\n def process_response(self,req,resp,resource,req_succeeded):\n try:\n if req_succeeded:\n req.context['session'].commit()\n else:\n req.context['session'].rollback()\n except:\n pass\n finally:\n req.context['session'].close()\n\n\nclass CORSMiddleWare:\n def process_response(self, req, resp, resource, req_succeeded):\n resp.set_header('Access-Control-Allow-Origin','*')\n if (req.method == 'OPTIONS' and req.get_header('Access-Control-Request-Method')): #req_succeeded and \n #preflight CORS request\n\n allow = resp.get_header('Allow')\n resp.delete_header('Allow')\n\n allow_headers = req.get_header(\n 'Access-Control-Request-Headers', default='*'\n )\n\n resp.set_headers((\n ('Access-Control-Allow-Methods', allow),\n ('Access-Control-Allow-Headers', allow_headers),\n ('Access-Control-Max-Age', '86400'), # 24 hours\n ))\n\n\n\n\"\"\"\nclass AuthMiddleWare:\n \n def process_resource(self,req,resp,resource,params):\n \n if req.method != 'OPTIONS':\n \n must_login = True\n auth_token_type = 'Bearer'\n\n try:\n must_login = resource.login_required #we expect login_required = False\n except AttributeError:\n pass\n \n try:\n auth_token_type = resource.auth_token_type #we expect login_required = False\n except AttributeError:\n pass\n \n\n if must_login:\n auth_data = None\n\n if auth_token_type == 'Bearer':\n \n\n auth_data = auth.validate_jwt_token( bearer_token = req.auth)\n \n if auth_data is None:\n raise falcon.HTTPUnauthorized(description = 'Login Required')\n \n req.context['auth'] = auth_data\n \n def get_secret_key(self,req):\n return self.secret_key\n\"\"\"","sub_path":"falcon_rest/middlewares.py","file_name":"middlewares.py","file_ext":"py","file_size_in_byte":2423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"438058881","text":"import requests\nfrom bs4 import BeautifulSoup\nimport datetime\nimport pandas as pd\nfrom pandas import ExcelWriter\nimport re\n\n\ndef get_dop_data(url):\n print(url)\n r = requests.get(url)\n page = BeautifulSoup(r.text, 'html.parser')\n fragment_time = page.find('table', class_='table table-bordered table-mini where-pass-tests-table')\n fragment_time.th.decompose()\n fragment_time = fragment_time.find('tr')\n work_time = BeautifulSoup(fragment_time.prettify(), 'html.parser')\n td = [[td.get_text(strip=True) for td in tr.find_all('td')] for tr in work_time.find_all('tr')][0]\n th = [[th.get_text(strip=True) for th in th.find_all('th')] for th in work_time.find_all('tr')][0]\n end_res = dict(zip(th, td))\n end_text = str(end_res).replace('{','').replace('}','').replace(',',';').replace(\"'\",'')\n return end_text\n\n\ndef search_script():\n url = 'https://www.cmd-online.ru/patsientam/gde-sdat-analizy/#region1'\n r = requests.get(url)\n page = BeautifulSoup(r.text, 'html.parser')\n all_scripts = page.find_all('script')\n for script in all_scripts:\n if 'coord_lat' in script.get_text():\n return script\n\n\ndef get_page():\n fragmen_page = search_script()\n fragmen_page = fragmen_page.prettify().split(\"['id']\")\n good_data = []\n for data in fragmen_page:\n new_data = \"['id']\" + data\n id = re.findall(r\"'id'] = '(.*)';\", new_data)\n coord_lat = re.findall(r\"'coord_lat'] = '(.*)';\", new_data)\n coord_long = re.findall(r\"'coord_long'] = '(.*)';\", new_data)\n address = re.findall(r\"'address'] = '(.*)';\", new_data)\n url = re.findall(r\"'url'] = '(.*)';\", new_data)\n\n good_row = {}\n if id != []:\n if 'Скоро открытие' in address[0]:\n good_row['address'] = address[0].replace(' - Скоро открытие', '')\n good_row['country'] = 'Russian Federation'\n try:\n good_row['working_time'] = get_dop_data('https://www.cmd-online.ru' + url[0])\n except:\n pass\n\n good_row['x'] = coord_long[0]\n good_row['y'] = coord_lat[0]\n\n good_row['status'] = 'Construction'\n good_row['status_id'] = '2'\n good_row['Подходящие выражения'] = 'Скоро открытие'\n\n good_row['brand_name'] = 'CMD'\n good_row['holding_name'] = 'CMD'\n good_row['website'] = 'https://www.cmd-online.ru/'\n good_row['date_review'] = datetime.datetime.now()\n print(good_row)\n good_data.append(good_row)\n\n else:\n good_row['address'] = address[0]\n good_row['country'] = 'Russian Federation'\n try:\n good_row['working_time'] = get_dop_data('https://www.cmd-online.ru' + url[0])\n except:\n pass\n\n good_row['x'] = coord_long[0]\n good_row['y'] = coord_lat[0]\n\n good_row['logo'] = None\n good_row['status'] = 'Open'\n good_row['status_id'] = '1'\n\n good_row['brand_name'] = 'CMD'\n good_row['holding_name'] = 'CMD'\n good_row['website'] = 'https://www.cmd-online.ru/'\n\n good_row['date_review'] = datetime.datetime.now()\n\n print(good_row)\n good_data.append(good_row)\n else:\n pass\n\n return good_data\n\n\n# Функция для записи в XLSX не вызываетя ( закомпилирована ) см. maria_pd_data\ndef write_xlsx(df, name_file):\n writer = ExcelWriter(f'{name_file}.xlsx')\n df.to_excel(writer, 'Sheet1')\n writer.save()\n return 'ФАЙЛ СОХРАНЕН'\n\ndef cmd_pd_data():\n \"\"\"\n Используем requests and BS4\n 1. Делаем запрос к странице https://www.cmd-online.ru/patsientam/gde-sdat-analizy/#region1\n\n 2. С помошью функции search_script():\n 3. Ищем нужный нам тег scripts с данными проверяя есть ли в нем строка\n if 'coord_lat' in script.get_text():\n если это тот самый фрагмент возврашаем его(script) в функцию get_page()\n\n 4. где с помошью регулярных выражений получаем чать данных:\n id = re.findall(r\"'id'] = '(.*)';\", new_data)\n city = re.findall(r\"'city'] = '(.*)';\", new_data)\n coord_lat = re.findall(r\"'coord_lat'] = '(.*)';\", new_data)\n coord_long = re.findall(r\"'coord_long'] = '(.*)';\", new_data)\n address = re.findall(r\"'address'] = '(.*)';\", new_data)\n url = re.findall(r\"'url'] = '(.*)';\", new_data)\n Если в адрессе присутствет слова \"Скоро открытие\" то меняем статус и его id\n 3. Для получения времени работы заходим на страницу конкретноой лаборатории\n - С помощью BS4 ишим таблицу с данными\n fragment_time = page.find('table', class_='table table-bordered table-mini where-pass-tests-table')\n - Разбираем таблицу формируем строку с временем работы\n 4. Формируем good_row записываем в good_data\n 5. Из good_data получаем DF\n :return:\n \"\"\"\n good_data = get_page()\n df = pd.DataFrame(good_data)\n print(df)\n write_xlsx(df,'cmd')\n return df\n\ncmd_pd_data()","sub_path":"crawlers/laboratoy/cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"626360654","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@File : test04.py\n@Time : 12/10/20 4:42 PM\n@Author : Mingqiang Ning\n@Email : ningmq_cv@foxmail.com\n@Modify Time @Version @Description\n------------ -------- -----------\n12/10/20 4:42 PM 1.0 None\n# @Software: PyCharm\n\"\"\"\nimport torch\nfrom torchvision import models\nfrom tensorboardX import SummaryWriter\na=torch.randn(3,3)\nb=models.vgg16()\nif torch.cuda.is_available():\n a=a.cuda()\n b=b.cuda()\ndevice=torch.device('cuda:0')\nc=torch.randn(3,3,device=device,requires_grad=True)\n\nwriter=SummaryWriter()\nfor i in range(10):\n writer.add_scalar('quadratic',i**2,global_step=i)\n writer.add_scalar('exponential', 2 ** i, global_step=i)\n\n\n\n\n\n","sub_path":"D20201210/test04.py","file_name":"test04.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"254268600","text":"# Leetcode problem link : https://leetcode.com/problems/reverse-linked-list/\n# Time Complexity : O(n)\n# Space Complexity : O(1)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : No\n\n# Your code here along with comments explaining your approach\n'''\n Basic approach : O(n) space => Copy to another linked list by creating new nodes\n \n Optimized approach: O(n) => 1. Maintain 3 pointers\n 2. current will keep track of current node\n 3. Prev and Next will track current's previous node and current's next node respectively\n 4. When current becomes null, prev will be pointing to the last element, which after reversing becomes the new head.\n 5. Return previous pointer\n \n'''\n\n# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\n\nclass Solution:\n def reverseList(self, head: ListNode) -> ListNode:\n if not head:\n return None\n \n prev = None\n curr = head\n next = None\n \n while(curr):\n next = curr.next\n curr.next = prev\n prev = curr\n curr = next\n \n \n return prev\n ","sub_path":"206_Reverse_Linked_List.py","file_name":"206_Reverse_Linked_List.py","file_ext":"py","file_size_in_byte":1289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"44875332","text":"__author__ = 'Austin'\nfrom DataSet import *\nimport numpy as np\nfrom Polynomial import *\nimport random\nimport time\nimport cmath\n\n\nmin = None\nstart = time.time()\narsq = 0\ndeg = 0\nmovavg = 0\nlastmovavg = 0\nendrange = 100\nfor i in range(0,1000):\n data1 = DataSet([\"x\",\"y\"])\n y = random.randrange(-100,100)\n avaar = []\n for x in range(0,endrange+1):\n y += random.randrange(-50,51)\n if x > 4:\n lastmovavg = movavg\n movavg = data1.getMovAvg(\"y\",5)\n else:\n movavg = y\n data1.appendDataPoint([x,y])\n avaar.append(movavg) \n fitname, fit, rsq = data1.getCurveFitEasy(\"x\",\"y\")\n minmax = fit.getMinMax([0,51], scale = True)\n kmeans = data1.getKmeans(\"y\")\n stats = data1.getStats(\"y\")\n data1.addDataVariable(\"kl\", [kmeans[0][0]]*data1.lenData + kmeans[0][1])\n data1.addDataVariable(\"kh\", [kmeans[1][0]]*data1.lenData - kmeans[1][1])\n data1.addDataVariable(\"mvav\", avaar)\n arsq += rsq\n deg += fit.degree\n yvals = data1.getDataVariable(\"y\")\n deriv = fit.differentiate().evaluate(endrange-1,scale=True)\n print(fit.degree, rsq)\n print(kmeans)\n print(stats)\n print(movavg)\n print(minmax)\n print(yvals[endrange] - yvals[endrange-1], deriv, movavg - lastmovavg)\n data1.plotData(\"x\",[\"y\",fitname,\"kl\",\"kh\"])\n print(\"\")\narsq /= 1000\ndeg /= 1000\nend = time.time()\ntotal = end - start\nprint(\"Time Elapsed: \", total, \"\\nTime per: \", (total/1000))\nprint(arsq, deg)\n","sub_path":"stringtest.py","file_name":"stringtest.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"234618491","text":"\n\n#calss header\nclass _INCUMBENCY():\n\tdef __init__(self,): \n\t\tself.name = \"INCUMBENCY\"\n\t\tself.definitions = [u'the period during which someone has a particular official position: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_incumbency.py","file_name":"_incumbency.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"193002060","text":"from stix2 import FileSystemSource\nfs = FileSystemSource('./enterprise-attack')\n\nfrom stix2 import Filter\nfilt = Filter('type', '=', 'attack-pattern')\n\nmalwares = fs.query(Filter(\"type\", \"=\", 'malware'))\n[print(m) for m in malwares if m.name == 'Emotet']\n# print(malwares[3].name)\n\n# * Query relationships\nall_rs = fs.query(Filter(\"type\", \"=\", 'relationship'))\n# print(all_rs[3])\nrelationships = [r for r in all_rs if r.source_ref == 'malware--32066e94-3112-48ca-b9eb-ba2b59d2f023']\nprint(relationships)\n# print(type(relationships))\n\n# * Query relationships\n# all_rs = fs.query(Filter(\"type\", \"=\", 'relationship'))\n# [print(r) for r in all_rs if r.target_ref == 'malware--32066e94-3112-48ca-b9eb-ba2b59d2f023']\n\n# * Query techniques\n# techniques = fs.query([filt])\n# print(techniques[0].x_mitre_data_sources)\n# [print(t) for t in techniques]\n\n# * Query software\n# from itertools import chain\n\n# def get_all_software(src):\n# filts = [\n# [Filter('type', '=', 'malware')],\n# [Filter('type', '=', 'tool')]\n# ]\n# return list(chain.from_iterable(\n# src.query(f) for f in filts\n# ))\n \n# l = get_all_software(fs)\n# print(l[0])","sub_path":"query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"288134493","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import fields, models, api, _\n\n\nclass PosUseService(models.Model):\n _inherit = 'pos.use.service'\n\n signature = fields.Binary(\"Signature\", attachment=True, store=True)\n customer_confirm = fields.Boolean(related='pos_session_id.config_id.module_izi_pos_customer_confirm',\n string=\"Customer Confirm\")\n # custmer_confirm_id = fields.Many2one('pos.customer.confirm', \"Customer Confirm\")\n\n @api.multi\n def action_done(self):\n if self.pos_session_id.config_id.module_izi_pos_customer_confirm is False:\n return self._action_done()\n else:\n view = self.env.ref('izi_pos_customer_confirm.izi_pos_use_service_confirm_form')\n return {\n 'name': _('Sign Customer?'),\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'pos.use.service',\n 'views': [(view.id, 'form')],\n 'view_id': view.id,\n 'target': 'new',\n 'res_id': self.id,\n 'context': self.env.context,\n }\n\n @api.multi\n def action_customer_signature(self):\n self.write({\n 'signature': self.signature\n })\n if self.pos_order_id:\n self.pos_order_id.write({\n 'x_signature': self.signature\n })\n return self._action_done()\n\n\nclass PosUseServiceLine(models.Model):\n _inherit = 'pos.use.service.line'\n\n customer_rate = fields.Selection([(0, 'Normal'), (1, 'Good'), (2, \"Excellent\")], default=2)\n # custmer_confirm_id = fields.Many2one('pos.customer.confirm', \"Customer Confirm\")\n","sub_path":"izi_pos_customer_confirm/models/pos_use_service.py","file_name":"pos_use_service.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"} +{"seq_id":"150944830","text":"#!/usr/bin/env python\n# Name: Jesse Pannekeet\n# Student number: 10151494\n\"\"\"\nThis script scrapes IMDB and outputs a CSV file with highest rated movies.\n\"\"\"\n\nimport csv\nfrom requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nfrom bs4 import BeautifulSoup\n\nTARGET_URL = \"https://www.imdb.com/search/title?title_type=feature&release_date=2008-01-01,2018-01-01&num_votes=5000,&sort=user_rating,desc\"\nBACKUP_HTML = 'movies.html'\nOUTPUT_CSV = 'movies.csv'\n\ndef extract_movies(dom):\n \"\"\"\n Extract a list of highest rated movies from DOM (of IMDB page).\n Each movie entry should contain the following fields:\n - Title\n - Rating\n - Year of release (only a number!)\n - Actors/actresses (comma separated if more than one)\n - Runtime (only a number!)\n \"\"\"\n # creates list of lists of actors in each movie\n # initialises moviesactors list for storage\n moviesactors = []\n # determines when the program should start adding to the moviesactors list\n actortime = False\n for i in dom.find_all('p'):\n actors = []\n # leaves first value for each movie out of the list as this is the director\n first = True\n director_and_actors = i.find_all('a', href=True)\n for director_or_actor in director_and_actors:\n if director_or_actor.string != None and first == False:\n actors.append(director_or_actor.string)\n first = False\n # only appends to moviesactors list if first movie is found and actors are found\n if len(actors) > 0:\n if actortime == True:\n moviesactors.append(actors)\n if actors[0] == 'Community':\n actortime=True\n\n # finds all movie ratings and puts them in list\n allratings = []\n # determines when movies are found\n ratings = False\n for rating in dom.find_all('strong'):\n # appends rating to ratings list when first movie is found\n if ratings == True:\n allratings.append(rating.string)\n if 'User Rating' == rating.string:\n ratings = True\n\n # adds all relevant movie details to the movie list\n movies = []\n # moves over each movie's rating, year, title, actors and runtime\n for rating, year, title, actors, runtime in zip(allratings,\n dom.find_all('span', 'lister-item-year text-muted unbold'),\n dom.find_all('h3'), moviesactors,\n dom.find_all('span', 'runtime')):\n # removes unused characters from release year\n if '(I)' in year.string:\n year.string = year.string[5:9]\n elif '(II)' in year.string:\n year.string = year.string[6:10]\n else:\n year.string = year.string[1:5]\n # removes unused characters from runtime\n runtime.string = runtime.string[:3]\n\n # adds relevant movie date to a list of movies\n movie = title.a.string, rating, year.string, actors, runtime.string\n movies.append(movie)\n\n\n # HIGHEST RATED MOVIES\n # NOTE: FOR THIS EXERCISE YOU ARE ALLOWED (BUT NOT REQUIRED) TO IGNORE\n # UNICODE CHARACTERS AND SIMPLY LEAVE THEM OUT OF THE OUTPUT\n return[movies] # REPLACE THIS LINE AS WELL IF APPROPRIATE\n\ndef has_class_but_no_id(tag):\n return tag.has_attr('href')\n\ndef save_csv(outfile, movies):\n \"\"\"\n Output a CSV file containing highest rated movies.\n \"\"\"\n writer = csv.writer(outfile)\n writer.writerow(['Title', 'Rating', 'Year', 'Actors', 'Runtime'])\n\n\n # adds all movies to the csv file\n for movie in movies:\n for value in movie:\n writer.writerow(value)\n\n\ndef simple_get(url):\n \"\"\"\n Attempts to get the content at `url` by making an HTTP GET request.\n If the content-type of response is some kind of HTML/XML, return the\n text content, otherwise return None\n \"\"\"\n try:\n with closing(get(url, stream=True)) as resp:\n if is_good_response(resp):\n return resp.content\n else:\n return None\n except RequestException as e:\n print('The following error occurred during HTTP GET request to {0} : {1}'.format(url, str(e)))\n return None\n\n\ndef is_good_response(resp):\n \"\"\"\n Returns true if the response seems to be HTML, false otherwise\n \"\"\"\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)\n\n\nif __name__ == \"__main__\":\n\n # get HTML content at target URL\n html = simple_get(TARGET_URL)\n\n # save a copy to disk in the current directory, this serves as an backup\n # of the original HTML, will be used in grading.\n with open(BACKUP_HTML, 'wb') as f:\n f.write(html)\n\n # parse the HTML file into a DOM representation\n dom = BeautifulSoup(html, 'html.parser')\n\n # extract the movies (using the function you implemented)\n movies = extract_movies(dom)\n\n # write the CSV file to disk (including a header)\n with open(OUTPUT_CSV, 'w', newline='') as output_file:\n save_csv(output_file, movies)\n","sub_path":"Homework/Week_1/moviescraper.py","file_name":"moviescraper.py","file_ext":"py","file_size_in_byte":5251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"70"}

    2018-09-05 15:38