diff --git "a/6335.jsonl" "b/6335.jsonl" new file mode 100644--- /dev/null +++ "b/6335.jsonl" @@ -0,0 +1,684 @@ +{"seq_id":"511061619","text":"# ========================================\n# properties\n\n# ========================================\nimport math\nimport numpy as np\n# ==========================================sediment\n# from book <>, 3.12 means eq number in the book\nhmin =0.02 #decide threhold wet/dry\ndiased=0.0003 #sediment diameter\nspec=2.65 #specific gravity\nporo=0.4 #porosity\nrhos=2650 #sendiment specific weight\nrhow=1025 #water specific weight\nrhob=rhos*(1-poro)+rhow*poro #total specific weight\namiu=0.000001 #kinetic visocity\nmanning=0.018\ndt0=0.6\n\nzs_int1=0.0001 #upstream\nzs_int2=0.0001 #downstream\ninitzs=1.8\n# zbmin1=0.1\n# zbmin1=1.0\n# cohesion=5000\n# cohesion=10000\ncohesion=15000\nfric=0.4\n\nzbmin1=1.0\n# zbmin1=3.0\nerokd = 0.0000104\namiud=amiu/diased\nwset=math.sqrt((13.95*amiud)**2+1.09*(spec-1.0)*9.81*diased)-13.95*amiud # settling speed 3.12\nustarcr2=0.03*1.65*9.81*diased #critical shear velocity not in the book but similar to 3.27\ncqb=math.sqrt(1.65*9.81*diased**3) # for suspended transport 3.102\n# ==========================================sediment\n\n# =========== dam\ndamheight=2.3\ndamwidth=7.4\n# damslope1=1\ndamslope1=0.3333333333\n# damslope2=1\ndamslope2=0.3333333333\n\nstreamslope1=0\n# streamslope2=0.05\n# streamslope2=0\n#! 0.01 ok now\n# streamslope2=0.01\nstreamslope2=0.05\n# streamslope2=0.001\n\n# =========== notch\ncreastlength=4.6\nnotchdepth=0.463\n# notchwidth=0.5\nnotchwidth=1.83\nnotchslope1=0.3333333333\n# notchslope1=1.0\nnotchslope2=0.3333333333\n# notchslope2=1.0\n# notch location\n# def notchlocation(method): method,1 - customize location 2- middle location\nmethod=2\nif method==1:\n notchcorner1=1\nelif method==2:\n notchcorner1=damwidth/2-(notchwidth/2+notchdepth/notchslope1)\n\n\n# ================ dam location here damtoe=0\ndamdis1=damheight/damslope1\ndamdis2=damheight/damslope2\ndamcorner1=damdis1\ndamcorner2=damdis1+creastlength\ndamtoe2=damdis1+damdis2+creastlength\ndamtoe1=0.0\ndamcenter = 0.5*(damtoe2 + damtoe1)\n\nnotchtoe1=notchcorner1+notchdepth/notchslope1\nnotchtoe2=notchtoe1+notchwidth\nnotchcorner2=notchtoe2+notchdepth/notchslope2\nnotchheight=damheight-notchdepth\n\n\n# ======================================================= repose\n# reposedry=1.0\n# reposewet=1.0\nreposedry = 100\n# reposewet = 0.65\nreposewet = 1.0\n# reposewet = 100\n\n\n# slope=1\n# d\n\n\n\n# ======================================================= cross direction\n# print('notchheight=',notchheight)","sub_path":"src_py/prot.py","file_name":"prot.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"544758055","text":"import os\nimport copy\nimport re\n\nimport anchor_txt\nimport six\n\nfrom . import code\nfrom . import artifact\n\nNAME_REFERENCE_STR = '@?' + code.NAME_FULL_STR\nNAME_REFERENCE_RE = re.compile(NAME_REFERENCE_STR, re.I)\n\n\ndef dump_project(project, with_links=True):\n \"\"\"Dump the artifact project with fresh reference links.\"\"\"\n # make a copy so we can mutate things\n project = copy.deepcopy(project)\n\n # scrub all sections of things like reference links\n scrub_sections_recurse(project.root_section)\n\n lines = project.root_section.to_lines()\n strip_empty_lines(lines)\n\n if with_links:\n # add our own references at the end\n links = get_reference_links(project)\n if lines or links:\n lines.append('')\n lines.extend(links)\n\n return lines\n\n\ndef strip_empty_lines(lines):\n for i in reversed(range(len(lines))):\n if not lines[i].strip():\n lines.pop(i)\n else:\n break\n\n\ndef get_reference_links(project):\n if project.settings.code_url is None:\n return\n\n reference_links = []\n\n for artifact in project.artifacts:\n name = artifact.name\n reference_links.append(reference_link_inline(project.settings, name))\n for subpart in artifact.subparts:\n reference_links.append(\n reference_link_inline(project.settings, name, subpart=subpart))\n\n for name, impl in six.iteritems(project.impls):\n if impl.primary:\n reference_links.append(\n reference_link_code(project.settings, name, impl.primary[0]))\n\n subparts = sorted(six.iteritems(impl.secondary), key=lambda x: x[0])\n for subpart, codelocs in subparts:\n reference_links.append(\n reference_link_code(project.settings,\n name,\n codelocs[0],\n subpart=subpart))\n\n lines = []\n\n for reflink in reference_links:\n lines.extend(reflink.to_lines())\n\n lines.sort()\n\n if lines:\n lines.append('')\n\n return lines\n\n\ndef reference_link_inline(settings, name, subpart=None):\n reference = reference_str(name, subpart)\n\n return anchor_txt.ReferenceLink.from_parts(\n reference=reference,\n link='#' + reference,\n )\n\n\ndef reference_link_code(settings, name, codeloc, subpart=None):\n reference = reference_str(name, subpart)\n\n link = settings.code_url.format(\n file=settings.relpath(codeloc.file),\n line=codeloc.line + 1,\n )\n return anchor_txt.ReferenceLink.from_parts(\n reference='@' + reference,\n link=link,\n )\n\n\ndef reference_str(name, subpart=None):\n if subpart is None:\n return name.raw\n else:\n return '{}.{}'.format(name.raw, subpart.raw)\n\n\ndef get_last_section(project):\n last = None\n if project.sections:\n last = project.sections[-1]\n if last is None:\n return None\n return _last_section_recurse(last)\n\n\ndef _last_section_recurse(section):\n if isinstance(section, artifact.Artifact):\n section = section.section\n if section.sections:\n return _last_section_recurse(section.sections[-1])\n return section\n\n\ndef scrub_sections_recurse(section):\n section.contents = [\n c for c in section.contents if not _is_artifact_reference(c)\n ]\n\n for child in section.sections:\n scrub_sections_recurse(child)\n\n\ndef _is_artifact_reference(content):\n return (isinstance(content, anchor_txt.ReferenceLink)\n and NAME_REFERENCE_RE.match(content.reference))\n","sub_path":"artifact_py/dump.py","file_name":"dump.py","file_ext":"py","file_size_in_byte":3587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"523988791","text":"import re\nfrom .lineinfo import MultiLineInfo, LineInfo\nfrom ..util import ReportableException\n\nclass LexError(ReportableException): pass\n\nclass Lexer:\n\tdef __init__(self):\n\t\tself.closed = False\n\t\tself.mode = None\n\t\tself.initial_mode = None\n\t\tself.patterns = {}\n\t\tself.eof = {}\n\n\tdef reg(self, regex, action):\n\t\tself.patterns.setdefault(self.mode, []).append((re.compile(regex), action))\n\n\tdef symbols(self, literals, symbols):\n\t\tdef symbol(lit, sym):\n\t\t\tself.reg(lit, lambda s: [(sym, None)])\n\t\t\n\t\tfor lit, sym in zip(literals, symbols):\n\t\t\tsymbol(lit, sym)\n\n\tdef\tprocess_all(self, buffer_, fname=\"\"):\n\t\tself.lines = buffer_.split(\"\\n\")\n\t\tself.fname = fname\n\t\tself.lineno = 0\n\t\tself.linepos = 0\n\t\tself.prev_success = None\n\n\t\tself.mode = self.initial_mode\n\t\tself.out = []\n\t\tself.buffer = buffer_\n\t\tself.closed = True\n\t\twhile self.buffer:\n\t\t\tself.process()\n\t\treturn self.out, self.process()\n\n\tdef process(self):\n\t\tlongest_match = None\n\t\tlongest_match_action = None\n\n\t\tdef new_lineinfo(width):\n\t\t\tinfo = LineInfo()\n\t\t\tinfo.filename = self.fname\n\t\t\tinfo.lines = self.lines\n\t\t\tinfo.lineno = self.lineno\n\t\t\tinfo.pos = self.linepos\n\t\t\tinfo.num_marked = width\n\t\t\treturn info\n\n\t\tif len(self.buffer) == 0:\n\t\t\treturn new_lineinfo(0)\n\n\t\tfor pat, action in self.patterns[self.mode]:\n\t\t\tmatch = pat.match(self.buffer)\n\t\t\tif not match:\n\t\t\t\tcontinue\n\n\t\t\tif match.end() > (longest_match.end() if longest_match else 0):\n\t\t\t\tlongest_match = match\n\t\t\t\tlongest_match_action = action\n\n\t\tif longest_match == None:\n\t\t\trest_of_line = self.buffer.find(\"\\n\")\n\t\t\tif rest_of_line == -1: rest_of_line = len(self.buffer)\n\t\t\traise LexError(MultiLineInfo([(\"Lexical error\", new_lineinfo(rest_of_line))]))\n\n\t\tif longest_match.end() == len(self.buffer) and not self.closed:\n\t\t\treturn # can't be sure this is the end yet\n\n\t\ts = longest_match.group()\n\n\t\tinfo = new_lineinfo(longest_match.end())\n\t\tself.prev_success = info\n\n\t\tself.lineno += s.count(\"\\n\")\n\t\ti = s.rsplit(\"\\n\", 1)\n\t\tif len(i) > 1:\n\t\t\tself.linepos = len(i[-1])\n\t\telse:\n\t\t\tself.linepos += len(i[-1])\n\n\t\toutsyms = longest_match_action(s)\n\t\toutsyms = [(a, b, info) for a, b in outsyms]\n\t\tself.out.extend(outsyms)\n\t\tself.buffer = self.buffer[longest_match.end():]\n\n\t\tif self.closed == True and len(self.buffer) == 0:\n\t\t\tself.eof[self.mode]()\n\n\tdef set_mode(self, mode):\n\t\tself.mode = mode\n\n\ndef codea_lexer():\n\n\tlex = Lexer()\n\n\tkw = (\n\t\t\"auto break case char const continue default do double else enum extern float for goto if \"+\n\t \"int long register return short signed sizeof static struct switch typedef union unsigned \"+\n\t \"void volatile while == != <= >= -> == != --\"\n\t)\n\n\tinitial = object()\n\tin_comment = object()\n\n\tlast_comment = [None]\n\n\tdef comment_begin(s):\n\t\tlast_comment[0] = lex.prev_success\n\n\t\tlex.set_mode(in_comment)\n\t\treturn []\n\n\tdef comment_end(s):\n\t\tlex.set_mode(initial)\n\t\treturn []\n\n\tdef comment_at_eof():\n\t\tlc = last_comment[0]\n\t\traise LexError(MultiLineInfo([(\"Unclosed comment\", lc)]))\n\n\tlex.mode = initial\n\tlex.symbols(kw.split(\" \"), kw.split(\" \"))\n\tlex.symbols([r\"\\+\\+\", r\"\\&\\&\", r\"\\|\\|\"], [\"++\", \"&&\", \"||\"])\n\tlex.reg(r\"[0-9]+\", lambda s: [(\"num\", int(s))]) # decnumber\n\tlex.reg(r\"0x[0-9a-fA-F]*\", lambda s: [(\"num\", int(s[:-1], 16))]) # hexnumber\n\tlex.reg(r\"'.'\", lambda s: [(\"char_const\", s[1:-1])]) # character\n\tlex.reg(r'\"([^\"]|\\\")*?\"', lambda s: [(\"string\", s)]) # strings\n\tlex.reg(r\"[\\n\\t ]\", lambda s: []) # whitespace\n\tlex.reg(r\"[_a-zA-Z][a-zA-Z0-9_]*\", lambda s: [(\"id\", s)]) # identifier\n\tlex.reg(r\"[<>:;,().+\\-*={}/\\[\\]%&]\", lambda s: [(s, None)]) # operand\n\tlex.reg(r\"/\\*\", comment_begin)\n\n\tlex.mode = in_comment\n\tlex.reg(r\"\\*\", lambda s: []) # eat the lone star\n\tlex.reg(r\"[^*\\n]+\", lambda s: []) # eat comment in chunks\n\tlex.reg(r\"\\n\", lambda s: []) # newlines are ok\n\tlex.reg(r\"\\*/\", comment_end)\n\n\tlex.eof[initial] = lambda: None\n\tlex.eof[in_comment] = lambda: comment_at_eof()\n\n\tlex.initial_mode = initial\n\n\treturn lex\n\ndef codea_lexer_test():\n\tcal = codea_lexer()\n\tcal.buffer = \"func x() return not-not-x; end;\"\n\tcal.closed = True\n\twhile cal.buffer:\n\t\tcal.process()\n\t\tprint(cal.out)\n\nif __name__ == '__main__':\n\t#codea_lexer_test()\n\tisel_lexer_test()","sub_path":"vin/parsing/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":4323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"62839678","text":"\"\"\"\n Single Number\n -------------\n https://leetcode.com/explore/interview/card/top-interview-questions-easy/92/array/549/\n\"\"\"\n\n\nclass Solution:\n def __init__(self, nums):\n self.nums = nums\n\n def single_number_use_sort(self):\n \"\"\"\n :time: O(n*log(n) + n)\n :space: O(n)\n \"\"\"\n self.nums.sort()\n self.nums = ['a'] + self.nums + ['b']\n for i in range(1, len(self.nums)-1):\n if self.nums[i-1] != self.nums[i] != self.nums[i+1]:\n return self.nums[i]\n\n def single_number_use_counter(self):\n from collections import Counter\n counter = Counter(self.nums)\n for num, count in counter.items():\n if count == 1:\n return num\n\n def single_number_use_list(self):\n \"\"\"\n :time: O(n^2)\n :space: O(n)\n \"\"\"\n no_dup_lst = []\n for num in self.nums:\n if num not in no_dup_lst:\n no_dup_lst.append(num)\n else:\n no_dup_lst.remove(num)\n return no_dup_lst[0]\n\n def single_number_use_dict(self):\n # from collections import defaultdict\n # dct = defaultdict(int)\n # for num in self.nums:\n # dct[num] += 1\n dct = {}\n for num in self.nums:\n dct[num] = dct.get(num, 0) + 1\n for num, count in dct.items():\n if count == 1:\n return num\n\n def single_number_use_dict2(self):\n dct = {}\n for num in self.nums:\n # try:\n # dct.pop(num)\n # except:\n # dct[num] = 1\n if num in dct:\n dct.pop(num)\n else:\n dct[num] = 1\n return dct.popitem()[0]\n\n def single_number_use_set(self):\n record = set()\n for num in self.nums:\n if num in record:\n record.remove(num)\n else:\n record.add(num)\n return record.pop()\n\n def single_number_use_math(self):\n \"\"\"\"\n :time complexity: O(n+n)\n :space complexity: O(n+n)\n \"\"\"\n return 2 * sum(set(self.nums)) - sum(self.nums)\n\n def single_number_use_bit_manipulation(self):\n from functools import reduce\n return reduce(lambda x, y: x ^ y, self.nums)\n\n def single_number_use_bit_manipulation2(self):\n from operator import xor\n from functools import reduce\n return reduce(xor, self.nums)\n\n def single_number_use_bit_manipulation3(self):\n \"\"\"\"\n :time complexity: O(n)\n :space complexity: O(1)\n \"\"\"\n res = 0\n for num in self.nums:\n res ^= num\n return res\n\n\nif __name__ == '__main__':\n solution = Solution([4, 1, 2, 1, 2])\n print(solution.single_number_use_bit_manipulation3())\n","sub_path":"src/leetcode/array/single_number.py","file_name":"single_number.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"519490392","text":"import json\nimport os\n\ndata = json.load(open(os.path.join(\"..\", \"data\", \"features\" ,\"all_features_3.json\")))\n\ndictionary_map = {}\n\nfor entry in data:\n for term in entry[\"features\"][\"tokens_symbols\"]:\n if term not in dictionary_map:\n dictionary_map[term] = len(dictionary_map)\n\nN = 20\n\nprint(\"Dictionary is buit, size: %s, start to work n = %s\" % (len(dictionary_map), N))\n\nfor entry in data:\n tokens = entry[\"features\"][\"tokens_symbols\"]\n num_tokens = len(tokens)\n context_vectors = []\n for num in entry[\"features\"][\"numbers\"]:\n context = [0] * len(dictionary_map)\n token_indx = num[\"index_symbols\"]\n left_indx = max(0, token_indx - N)\n right_indx = min(num_tokens - 1, token_indx + N + 1)\n indises = list(range(left_indx, token_indx)) + list(range(token_indx + 1, right_indx))\n for i in indises:\n context[dictionary_map[tokens[i]]] += 1\n context_vectors.append(context)\n entry[\"features\"][\"target_num_symbols_context_vectors\"] = context_vectors\n\njson.dump(data, open(os.path.join(\"..\", \"data\", \"features\" ,\"all_features_4.json\"), \"w\"))\n","sub_path":"feature_extraction/02_03_context_symbols.py","file_name":"02_03_context_symbols.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"629925142","text":"import tornado.websocket\n\nfrom tornado.ioloop import IOLoop\nfrom tornado.iostream import _ERRNO_CONNRESET\nfrom tornado.util import errno_from_exception\n\n\nclass Worker(object):\n def __init__(self, ssh, channel, dest_addr):\n self.loop = IOLoop.current()\n self.ssh = ssh\n self.channel = channel\n self.dest_addr = dest_addr\n self.fd = channel.fileno()\n self.id = str(id(self))\n self.data_to_dst = []\n self.handler = None\n self.mode = IOLoop.READ\n\n\n def __call__(self, fd, events):\n if events & IOLoop.READ:\n self.on_read()\n if events & IOLoop.WRITE:\n self.on_write()\n if events & IOLoop.ERROR:\n self.close()\n\n\n def set_handler(self, handler):\n if not self.handler:\n self.handler = handler\n\n\n def update_handler(self, mode):\n if self.mode != mode:\n self.loop.update_handler(self.fd, mode)\n self.mode = mode\n\n\n def on_read(self):\n try:\n data = self.channel.recv(1024)\n except (OSError, IOError) as e:\n if errno_from_exception(e) in _ERRNO_CONNRESET:\n self.close()\n else:\n if not data:\n self.close()\n return\n\n try:\n self.handler.write_message(data)\n except tornado.websocket.WebSocketClosedError:\n self.close()\n\n\n def on_write(self):\n if not self.data_to_dst:\n return\n\n data = ''.join(self.data_to_dst)\n\n try:\n sent = self.channel.send(data)\n except (OSError, IOError) as e:\n if errno_from_exception(e) in _ERRNO_CONNRESET:\n self.close()\n else:\n self.update_handler(IOLoop.WRITE)\n else:\n self.data_to_dst = []\n data = data[sent:]\n if data:\n self.data_to_dst.append(data)\n self.update_handler(IOLoop.WRITE)\n else:\n self.update_handler(IOLoop.READ)\n\n\n def close(self):\n if self.handler:\n self.loop.remove_handler(self.fd)\n self.handler.close()\n\n self.channel.close()\n self.ssh.close()\n","sub_path":"app/main/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"354578386","text":"import pyecharts.options as opts\nfrom pyecharts.charts import Pie\n\n# 饼形图数据\nx1 = [\"北京\", \"上海\", \"广州\"]\ny1 = [1168, 890,578]\ndata1 = [list(z) for z in zip(x1,y1)]\n# 环形图数据\nx2 = [\"北京\", \"上海\", \"河南省\", \"广州\", \"湖南省\", \"四川省\", \"湖北省\", \"河北省\", \"江苏省\", \"浙江省\"]\ny2 = [1168, 890, 234, 578, 345, 225, 188, 101,999,1300]\ndata2 = [list(z) for z in zip(x2,y2)]\n# 饼形图与环形图组合\n(\n Pie(init_opts=opts.InitOpts(width=\"1000px\", height=\"600px\"))\n # 饼形图\n .add(\n series_name=\"销售地区\",\n data_pair=data1,\n radius=[0, \"30%\"],\n label_opts=opts.LabelOpts(position=\"inner\"), # 饼形图标签\n )\n # 环形图\n .add(\n series_name=\"销售地区\",\n radius=[\"40%\", \"55%\"],\n data_pair=data2,\n # 环形图标签\n label_opts=opts.LabelOpts(\n position=\"outside\", # 标签位置\n # 标签格式化\n formatter=\"{a|{a}}{bg|}\\n{hr|}\\n {b|{b}: }{c} {per|{d}%} \",\n background_color=\"#FAFAD2\", # 背景色\n border_color=\"#FFA500\", # 边框颜色\n border_width=1, # 边框宽度\n border_radius=4, # 边框半径\n # 利用富文本样式,定义标签效果\n rich={\n \"a\": {\"color\": \"black\", \"lineHeight\": 22, \"align\": \"center\"},\n \"bg\": {\n \"backgroundColor\": \"#FFA500\",\n \"width\": \"100%\",\n \"align\": \"right\",\n \"height\": 22,\n \"borderRadius\": [4, 4, 0, 0],\n },\n \"hr\": {\n \"borderColor\": \"#aaa\",\n \"width\": \"100%\",\n \"borderWidth\": 0.5,\n \"height\": 0,\n },\n \"b\": {\"fontSize\": 14, \"lineHeight\": 33},\n \"per\": {\n \"color\": \"#eee\",\n \"backgroundColor\": \"#334455\",\n \"padding\": [2, 4],\n \"borderRadius\": 2,\n },\n },\n ),\n )\n .set_global_opts(legend_opts=opts.LegendOpts(pos_left=\"left\", orient=\"vertical\"))\n .set_series_opts(\n tooltip_opts=opts.TooltipOpts(\n trigger=\"item\", formatter=\"{a}
{b}: {c} ({d}%)\"\n )\n )\n .render(\"mypies.html\")\n)\n\n","sub_path":"Python数据分析从入门到精通/MR/Code/07/example/03/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"522955840","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 30 12:16:50 2019\n\n@author: AnnaGrr\n\"\"\"\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 19 13:59:05 2019\n\n@author: AnnaGrr\n\"\"\"\n\nimport numpy as np\nimport gcsfs\n\n\ndef preprocess(train_file,harmonic,seq_len,augment):\n## Load data and separate into input and output training, evaluation and test sets.\n \n# fs = gcsfs.GCSFileSystem(project='jovial-hawk-246922')\n# with fs.open(train_file) as f:\n# df = pd.read_csv(f)\n if harmonic == 'harmonic':\n# Augmented/\n folder = 'Harmonic/'\n else:\n folder = 'Non-Harmonic/'\n if augment == 'Augment':\n augment = 'Augmented/'\n dataset = train_file+augment+folder\n \n X_train_file = dataset+'X_train'+str(seq_len)+'.npy'\n X_val_file = dataset+'X_val'+str(seq_len)+'.npy'\n X_test_file = dataset+'X_test'+str(seq_len)+'.npy'\n \n Y_train_file = dataset+'Y_train'+str(seq_len)+'.npy'\n Y_val_file = dataset+'Y_val'+str(seq_len)+'.npy'\n Y_test_file = dataset+'Y_test'+str(seq_len)+'.npy'\n \n \n fs = gcsfs.GCSFileSystem(project='jovial-hawk-246922')\n with fs.open(X_train_file) as f:\n X_train = np.load(f)\n with fs.open(X_val_file) as f:\n X_val = np.load(f)\n with fs.open(X_test_file) as f:\n X_test = np.load(f)\n with fs.open(Y_train_file) as f:\n Y_train = np.load(f)\n with fs.open(Y_val_file) as f:\n Y_val = np.load(f)\n with fs.open(Y_test_file) as f:\n Y_test = np.load(f)\n \n return X_train, X_val, X_test, Y_train, Y_val, Y_test\n\n \n ","sub_path":"OneHot/Augment/LSTM/trainer/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":1610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"587722695","text":"import json\nimport time\nimport utils\nimport logging\n\nlogging.basicConfig(format='%(levelname)s %(asctime)s %(filename)s %(lineno)d: %(message)s')\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nMAXLEN = 1400\n\n\ndef callback(ch, method, properties, body):\n global MODEL, VOCAB, VOCAB_SIZE, CHECK\n data = json.loads(body)\n if data['predicted_relevancy'] == 1:\n logger.info('Started processing content. {}'.format(data['pipeline_key']))\n\n process(data, MODEL, VOCAB, VOCAB_SIZE, CHECK)\n\n logger.info('Finished quad tagging. {}'.format(data['pipeline_key']))\n else:\n logger.info('Irrelevant content. {}'.format(data['pipeline_key']))\n pass\n ch.basic_ack(delivery_tag=method.delivery_tag)\n\n\ndef process(data, model, vocab, vocab_size, check):\n publish = 'quad'\n rabbit_publish = utils.RabbitClient(queue=publish,\n host='rabbitmq')\n\n sents = data['sents']\n data['event_info'] = {}\n for sid, sent in sents.iteritems():\n try:\n logger.info('Processing sent {} for content {}'.format(sid,\n data['pipeline_key']))\n mat = utils.encode_data([sent], MAXLEN, vocab, vocab_size, check)\n pred = model.predict(mat)\n pred_class = pred.argmax(1)[0]\n pred_score = pred[0][pred_class]\n data['event_info'][sid] = {}\n data['event_info'][sid]['predicted_class'] = {'class': pred_class,\n 'score': str(pred_score)}\n data['event_info'][sid]['sent'] = sent\n except Exception as e:\n # If something goes wrong, log it and return nothing\n logger.info(e)\n # Make sure to update this line if you change the variable names\n data['event_info'][sid]['predicted_class'] = {}\n\n rabbit_publish.send(data, publish)\n\n\ndef main():\n logger.info('... waiting ...')\n time.sleep(30)\n logger.info('... done ...')\n\n consume = 'relevancy'\n rabbit_consume = utils.RabbitClient(queue=consume,\n host='rabbitmq')\n\n rabbit_consume.receive(callback)\n\n\nif __name__ == '__main__':\n args = utils.parse_arguments()\n\n logger.info('Loading model...')\n MODEL, VOCAB = utils.load_model(args)\n VOCAB_SIZE = len(VOCAB.keys())\n CHECK = set(VOCAB.keys())\n\n main()\n","sub_path":"quad/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"628876297","text":"# 2. 写一个函数 input_number\n# def input_number():\n# ....\n# 此函数用来获取用户循环输入的整数,当用户输入负数时结束输入\n# 将用户输入数以列表的形式返回,再用内建函数max, min, sum 示出用户输入的最大值,最小值及和:\n# 如:\n# L = input_number()\n# print(L) # 打印此列表\n# print(\"用户输入的最大数是:\", max(L))\n# print(\"用户输入的最小数是:\", min(L))\n# print(\"用户输入的和是:\", sum(L))\n\n\ndef input_number():\n L = []\n while True:\n n = int(input(\"请输入: \"))\n if n < 0:\n return L # 如果n为负数,把之前输入的值返回\n L.append(n) # 如果n为大于等于0的数,则加入列表L\n\ndef main():\n L = input_number()\n print(L) # 打印此列表\n print(\"用户输入的最大数是:\", max(L))\n print(\"用户输入的最小数是:\", min(L))\n print(\"用户输入的和是:\", sum(L))\n\nmain()","sub_path":"第一阶段/3. Python02/day03/exercise/02_input_number2.py","file_name":"02_input_number2.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"588808076","text":"#!/anaconda3/bin/python\nimport pandas as pd\n# from mysql.connector import errorcode\n# import mysql.connector\n# from mysql.connector import (connection)\n# import psycopg2\n\n\n# # cnx = mysql.connector.connect(user='root',)\n# # cnx.close()\n\n# try:\n# connection = psycopg2.connect(user=\"ganasene\",\n# host=\"127.0.0.1\",\n# port=\"5432\",\n# database=\"mabase\")\n\n# cursor = connection.cursor()\n# # Print PostgreSQL Connection properties\n# print(connection.get_dsn_parameters(), \"\\n\")\n\n# # Print PostgreSQL version\n# cursor.execute(\"SELECT version();\")\n# record = cursor.fetchone()\n# print(\"You are connected to - \", record, \"\\n\")\n\n# except (Exception, psycopg2.Error) as error:\n# print(\"Error while connecting to PostgreSQL\", error)\n# finally:\n# #closing database connection.\n# if(connection):\n# cursor.close()\n# connection.close()\n# print(\"PostgreSQL connection is closed\")\n\n# # print(dir(psycopg2))\n\n\ndf_path = pd.read_csv(\"/Users/ganasene/Desktop/resultats_xml/job_path.csv\")\ndf_stage = pd.read_csv(\"/Users/ganasene/Desktop/resultats_xml/job_stage.csv\")\n\n#### path datafrae\n# print(df_path.head())\n# cols=[col for col in df_stage]\n# print(cols)\n# rename columns\n\ndf_path.rename(columns={\"jobName\":\"jobName_path\",\"logFile\":\"logFile_path\",\\\n \"recordType\":\"recordType_path\", \"projectName\":\"projectName_path\"}, inplace=True)\n\ndf_stage.rename(columns={\"jobName\": \"jobName_stage\", \"logFile\": \"logFile_stage\",\n \"recordType\": \"recordType_stage\", \"projectName\": \"projectName_stage\"}, inplace=True)\n\n\ndf_stage.to_json('/Users/ganasene/Desktop/resultats_xml/job_stage.json')\ndf_path.to_json('/Users/ganasene/Desktop/resultats_xml/job_path.json')\n\ndf_path.to_\n\n\n\n\n\n\n\n\n\n\n\n\n\n# # print(len(tuple_Job_logfile_file_trueFile_attr))\n# # print(len(tuple1_Job_logfile))\n\n\n# tuple_Job_logfile_file_trueFile_attr = [\n# (\"tableEnfant\", \"fanzonelog.txt\", '#dir#rep#ok.csv','/dir/rep/ok.csv' ,\"input\"),\n# (\"tabParent\", \"emploilog.txt\", '#home#rep#ok', '/home/rep/ok.csv', \"input\"),\n# (\"tabParents\", \"emploilog.txt\", '#home#rep#ok', '/home/rep/ok.csv', \"output\"),\n# (\"tableEcodel\", \"permilog.txt\", '#dir#rep#ok', '/dir/rep/ok.csv', \"output\"),\n# (\"epongAutre\", \"fanzoneslog.txt\", '#home#rep#ok', '/dir/rep/ok.csv', \"input\"),\n# (\"epongAutre\", \"fanzonelog.txt\", '#home#rep#ok', '/dir/rep/ok.csv', \"input\"),\n# (\"Enfant\", \"fanzonelog.txt\", '#dir#deskto#ok', '/dir/rep/ok.txt', \"input\"),\n# ]\n\n# tuple1_Job_logfile_attr = [\n# (\"tableEnfant\", \"fanzonelog.txt\",\"input\"),\n# (\"tableEnfant\", \"fanzonelog.txt\",\"input\"),\n# (\"tableEnfant\", \"fanzonelog.txt\",\"input\"),\n# (\"tableEnfant\", \"fanzonelog.txt\",\"input\"),\n# (\"tabParent\", \"emploilog.txt\",\"input\"),\n# (\"tabParent\", \"emploilog.txt\",\"input\"),\n# (\"tabParents\", \"emploilog.txt\",\"output\"),\n# (\"tabParents\", \"emploilog.txt\",\"output\"),\n# (\"tabParents\", \"emploilog.txt\", \"output\"),\n# (\"tabParents\", \"emploilog.txt\", \"output\"),\n# (\"tableEcodel\", \"permilog.txt\",\"output\"),\n# (\"tableEcodel\", \"permilog.txt\",\"output\"),\n# (\"tableEcodel\", \"permilog.txt\",\"output\",),\n# (\"epongAutre\", \"fanzoneslog.txt\",\"input\"),\n# (\"epongAutre\", \"fanzonelog.txt\",\"input\"),\n# (\"epongAutre\", \"fanzonelog.txt\",\"input\"),\n# (\"epongAutre\", \"fanzonelog.txt\",\"input\"),\n# (\"epongAutre\", \"fanzonelog.txt\",\"input\"),\n# (\"Enfant\", \"fanzonelog.txt\",\"input\")\n\n# ]\n# print(tuple1_Job_logfile_attr)\n# print(len(tuple1_Job_logfile_attr))\n\n# print('')\n\n# newTuple =[]\n# for h in range(len(tuple_Job_logfile_file_trueFile_attr)):\n# t0 = tuple_Job_logfile_file_trueFile_attr[h][0]\n# t1 = tuple_Job_logfile_file_trueFile_attr[h][1]\n# t4 = tuple_Job_logfile_file_trueFile_attr[h][4]\n\n# t3= tuple_Job_logfile_file_trueFile_attr[h][3]\n# # print(t0_1_4)\n# t0_1_4 = (t0, t1, t4)\n# for j in tuple1_Job_logfile_attr:\n# if t0_1_4 == j:\n# print(t0_1_4)\n# print(j)\n# newTuple.append(t3)\n# # else:\n# # newTuple.append(\"NaN\")\n\n\n# # print(t0_1)\n# # print(t4)\n# print(newTuple)\n# print(len(newTuple))\n","sub_path":"test5.py","file_name":"test5.py","file_ext":"py","file_size_in_byte":4263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"368367514","text":"# topics = [\"图\", \"拓扑排序\", \"广度优先搜索\"]\n\nfrom collections import deque\nfrom typing import Dict, List\n\n\n# BFS 实现\nclass Solution:\n def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:\n # 邻接表\n edges: Dict[int, List[int]] = {}\n # 所有顶点的入度表\n indeg = [0] * numCourses\n\n for i, j in prerequisites:\n indeg[i] += 1\n if j not in edges:\n edges.setdefault(j, [i])\n else:\n edges[j].append(i)\n\n # 入度为 0 的顶点加入队列\n q = deque(i for (i, v) in enumerate(indeg) if v == 0)\n res: List[int] = []\n\n while q:\n node = q.popleft()\n for v in edges.get(node, []):\n indeg[v] -= 1\n if indeg[v] == 0:\n q.append(v)\n res.append(node)\n\n return res if len(res) == numCourses else []\n","sub_path":"algorithms/[210]课程表 II/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"245961792","text":"import cv2\n\npic = cv2.imread('C:/Users/Krishna/PycharmProjects/FaceDetection-using-OpenCv/resources/kristy.jpg', 0)\n\nmatrix = (7, 7)\n\nblur = cv2.GaussianBlur(pic, matrix, 0)\n\ncv2.imshow('blur', blur)\n\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n","sub_path":"image/filtering/Blur.py","file_name":"Blur.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"360472372","text":"import sys\nsys.setrecursionlimit(10**6)\n\ncountList = []\nT = int(sys.stdin.readline())\n\ndef dfs(mat, y, x):\n mat[y][x] = 0\n\n dx = [-1, 1, 0, 0]\n dy = [0, 0, -1, 1]\n\n for i in range(4):\n nx = x + dx[i]\n ny = y + dy[i]\n\n if nx >= 0 and nx < M and ny >= 0 and ny < N:\n if mat[ny][nx] == 1:\n dfs(mat, ny, nx)\n\n\nfor _ in range(T):\n\n # M: x, N: y\n M, N, K = map(int, sys.stdin.readline().split())\n\n inputList = [list(map(int, sys.stdin.readline().split())) for _ in range(0, K)]\n\n mat = [ [0]*M for _ in range(N)]\n\n for x, y in inputList:\n mat[y][x] = 1\n\n cnt = 0\n for y in range(0, N):\n for x in range(0, M):\n if mat[y][x] == 1:\n dfs(mat, y, x)\n cnt += 1\n\n countList.append(cnt)\n\nfor i in countList:\n print(i)\n\n","sub_path":"DFS_BFS/유기농배추.py","file_name":"유기농배추.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"393149877","text":"# Here we will read xml file using python.\n\n# Importing libraries/modules\nimport os\nimport codecs\nimport csv\nimport bz2\nimport time\nimport json\nimport logging\nimport argparse\n\n\nclass Requirements():\n def __init__(self, args):\n dump_path = args.dump_path\n if dump_path is None:\n dump_path = os.path.join(r\".\", \"Raw\")\n\n latest_all_json = args.file_name\n if latest_all_json is None:\n latest_all_json = \"latest-all.json.bz2\"\n \n self.filename = os.path.join(dump_path, latest_all_json)\n\n save_path = args.save_path\n if save_path is None:\n save_path = os.path.join(r\".\", \"CSV\")\n\n self.encoding = args.encode\n if self.encoding is None:\n self.encoding = \"utf-8\"\n\n self.save_log = args.save_log\n if self.save_log:\n logging.basicConfig(filename=\"1_WikiData_Main_Dump_Parser.log\"\n , level=\"DEBUG\", filemode=\"a\"\n , format=\"%(asctime)s - %(levelname)s: %(message)s\"\n , datefmt=\"%m/%d/%Y %I:%M:%S %p\")\n \n self.display_message = args.display_message\n \n self.file_identification = os.path.join(save_path, \"WD_identification_item.csv\")\n self.file_wikibase_entityid = os.path.join(save_path, \"WD_wikibase_entityid.csv\")\n self.file_quantity = os.path.join(save_path, \"WD_quantity.csv\")\n self.file_globecoordinate = os.path.join(save_path, \"WD_globecoordinate.csv\")\n self.file_time = os.path.join(save_path, \"WD_time.csv\")\n \n @staticmethod \n def hms_string(sec_elapsed):\n h = int(sec_elapsed / (60 * 60))\n m = int((sec_elapsed % (60 * 60)) / 60)\n s = sec_elapsed % 60\n return \"{}:{:>02}:{:>05.2f}\".format(h, m, s)\n \n @staticmethod\n def ent_values(ent):\n wd_type = ent[\"type\"]\n wd_item = ent[\"id\"]\n \n if ent[\"labels\"].get(\"en\", \"not found\") == \"not found\":\n wd_label = \"\"\n else:\n wd_label = ent[\"labels\"][\"en\"][\"value\"]\n \n if ent[\"descriptions\"].get(\"en\", \"not found\") == \"not found\":\n wd_desc = \"\"\n else:\n wd_desc = ent[\"descriptions\"][\"en\"][\"value\"]\n \n if ent[\"sitelinks\"].get(\"enwiki\", \"not found\") == \"not found\":\n wd_title = \"\"\n else:\n wd_title = ent[\"sitelinks\"][\"enwiki\"][\"title\"]\n \n return([wd_type, wd_item, wd_label, wd_desc, wd_title])\n \n @staticmethod\n def concat_claims(claims):\n for rel_id, rel_claims in claims.items():\n for claim in rel_claims:\n yield claim\n \n def __repr__(self):\n return \"all requirements saved in this object\"\n\n\ndef main():\n parser = argparse.ArgumentParser()\n \n parser.add_argument(\"-d\",\"--dump_path\"\n , help = \"Provide a path containing WikiData JSON data dump. Default Option: a 'Raw' folder within the existing directory.\"\n , type=str)\n parser.add_argument(\"-f\",\"--file_name\"\n , help = \"Provide filename for WikiData JSON data dump. Default Option: 'latest-all.json.bz2'.\"\n , type=str)\n parser.add_argument(\"-s\",\"--save_path\"\n , help = \"Provide a path to save output csv files. Default Option: a 'CSV' folder within the existing directory.\"\n , type=str)\n parser.add_argument(\"-c\",\"--encode\"\n , help = \"Provide a encoding code. Default Option: 'utf-8'.\"\n , type=str)\n parser.add_argument(\"-l\", \"--save_log\"\n , help=\"Save log flag.\"\n , action=\"store_true\")\n parser.add_argument(\"-m\", \"--display_message\"\n , help=\"Display messsage to the consol flag.\"\n , action=\"store_true\")\n \n args = parser.parse_args()\n \n req = Requirements(args)\n \n i = 0\n \n start_time = time.time()\n \n with codecs.open(req.file_identification, \"w\", req.encoding) as op_identification \\\n ,codecs.open(req.file_wikibase_entityid, \"w\", req.encoding) as op_wikibase_entityid \\\n ,codecs.open(req.file_quantity, \"w\", req.encoding) as op_quantity \\\n ,codecs.open(req.file_globecoordinate, \"w\", req.encoding) as op_globecoordinate \\\n ,codecs.open(req.file_time, \"w\", req.encoding) as op_time:\n \n opw_identification = csv.writer(op_identification, quoting=csv.QUOTE_MINIMAL)\n opw_identification.writerow([\"WD_Type\", \"WD_WikiData_Item\", \"WD_Label\", \"WD_Description\", \"WD_Title\"])\n \n opw_wikibase_entityid = csv.writer(op_wikibase_entityid, quoting=csv.QUOTE_MINIMAL)\n opw_wikibase_entityid.writerow([\"WD_Subject\",\"WD_Predicate\",\"WD_Object\"]) \n \n opw_quantity = csv.writer(op_quantity, quoting=csv.QUOTE_MINIMAL)\n opw_quantity.writerow([\"WD_Subject\",\"WD_Predicate\",\"WD_Object\",\"WD_Units\"]) \n \n opw_globecoordinate = csv.writer(op_globecoordinate, quoting=csv.QUOTE_MINIMAL)\n opw_globecoordinate.writerow([\"WD_Subject\",\"WD_Predicate\",\"WD_Object\",\"WD_Precision\"])\n \n opw_time = csv.writer(op_time, quoting=csv.QUOTE_MINIMAL)\n opw_time.writerow([\"WD_Subject\",\"WD_Predicate\",\"WD_Object\",\"WD_Precision\"])\n \n \n with bz2.BZ2File(req.filename, \"rb\") as f:\n for line in f:\n try:\n line = line.decode(req.encoding, errors=\"ignore\")\n if line in (\"[\\n\", \"]\\n\"):\n pass\n else:\n ent = json.loads(line.rstrip('\\n,'))\n \n if ent[\"type\"] != \"item\":\n continue\n \n opw_identification.writerow(req.ent_values(ent))\n \n claims = req.concat_claims(ent[\"claims\"])\n e1 = ent[\"id\"]\n \n for claim in claims:\n mainsnak = claim[\"mainsnak\"]\n rel = mainsnak[\"property\"]\n snak_datatype = mainsnak[\"datatype\"]\n \n if mainsnak['snaktype'] == \"value\":\n snak_value = mainsnak[\"datavalue\"][\"value\"]\n \n if snak_datatype in (\"wikibase-item\", \"wikibase-property\"):\n opw_wikibase_entityid.writerow([e1, rel, snak_value[\"id\"]])\n \n elif snak_datatype == \"quantity\":\n e2 = (snak_value[\"amount\"],snak_value[\"unit\"].strip(r\"http://www.wikidata.org/entity/\"))\n opw_quantity.writerow([e1, rel, e2[0],e2[1]])\n \n elif snak_datatype == \"globe-coordinate\":\n e2 = ((snak_value[\"latitude\"],snak_value[\"longitude\"]),snak_value[\"precision\"])\n opw_globecoordinate.writerow([e1, rel, e2[0], e2[1]])\n \n elif snak_datatype == \"time\":\n e2 = (snak_value[\"time\"],snak_value[\"precision\"])\n opw_time.writerow([e1, rel, e2[0],e2[1]])\n \n else:\n pass \n \n i = i + 1\n if i%1000000 == 0 & req.display_message:\n print(\"{} number of item processed\".format(i))\n except:\n if req.save_log:\n logging.exception(\"Exception occurred\", exc_info=True)\n else:\n pass\n \n elapsed_time = time.time() - start_time\n msg = msg = \"Total item processed: {:,} \\n Elapsed time: {}\".format(i-1, req.hms_string(elapsed_time))\n if req.display_message:\n print(msg)\n if req.save_log:\n logging.info(msg)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"WikiData_AllRequiredDump_Parse.py","file_name":"WikiData_AllRequiredDump_Parse.py","file_ext":"py","file_size_in_byte":8142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"38969972","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Jesse Rubin - project Euler\n\"\"\"\nIdempotents\nProblem 407\nIf we calculate a^2 mod 6 for 0 ≤ a ≤ 5 we get: 0,1,4,3,4,1.\n\nThe largest value of a such that a^2 ≡ a mod 6 is 4.\nLet's call M(n) the largest value of a < n such that a^2 ≡ a (mod n).\nSo M(6) = 4.\n\nFind ∑M(n) for 1 ≤ n ≤ 10^7.\n\"\"\"\nfrom tqdm import tqdm\nfrom bisect import bisect_left\nfrom bib.amazon_prime import prime_gen\n\n\ndef S_M(max_n):\n primes = [p for p in prime_gen(max_n)]\n squares = [n*n for n in range(max_n+1)]\n\n def M(n):\n # print(\"_\")\n # print(n)\n if primes[-1] >= n == primes[bisect_left(primes, n)]:\n return 1\n if squares[bisect_left(squares, n)] == n:\n return 1\n for a in range(n-1, -1, -1):\n right = (a%n)\n # left = (a*a)%n\n sq = squares[a]\n left = squares[a]%n\n if right == left:\n return a\n\n assert 4 == M(6)\n\n # for n in range(1, 1000):\n # print(M(n))\n\n m_sum = 0\n for n in tqdm(range(1, max_n+1)):\n # print(\"_\")\n # print(n, M(n)**2, M(n))\n m_sum += M(n)\n m_sum -= 1\n print(\"MSUM\", m_sum)\n return m_sum\n\n\nS_M(20)\nS_M(10) # 17\nS_M(10**2) # 2549\nS_M(50) # 538\n# S_M(10**3)\nS_M(10**4)\nS_M(10**5)\nS_M(10**6)\n","sub_path":"not_done/euler_407.py","file_name":"euler_407.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"50362469","text":"import numpy as np\nfrom scipy.integrate import ode\nfrom collections import Counter\n\nclass Environ(object):\n \"\"\"\n Environ holds all the 'global' variables\n \"\"\"\n\n def __init__(self,initvars = None):\n if initvars is None:\n CPG = {\"C\":2.4 , \"ep\":2.5, \"de\":0.051, \"gKS\":0.19, \"Iext\":37., \"Syn\":{\"CPG\":{\"vSyn\":0.,\"kSyn\":0.8, \"tauSyn\":5., \"gSyn\":0.2}, \"Df\":{\"vSyn\":0., \"kSyn\":0.8, \"tauSyn\":5., \"gSyn\":0.6}, \"Ds\":{\"vSyn\":-10., \"kSyn\":0.8, \"tauSyn\":5., \"gSyn\":2.}}}\n Df = {\"C\":2.4 , \"ep\":2.5, \"de\":0.051, \"gKS\":0.25, \"Iext\":37.}\n Ds = {\"C\":2.4 , \"ep\":2, \"de\":0.0002, \"gKS\":0.5, \"Iext\":50.}\n initvars={\"gK\":9.0 , \"gCa\":4.4, \"gL\":2.0, \"EK\":-80., \"ECa\":120., \"EL\":-60., \"VCa\":-1.2, \n \"kCa\":1./18., \"VK\":2., \"kK\":1./10., \"kc\":0.7, \"Vc\":-25., \"neu\":{\"CPG\":CPG,\"Df\":Df, \"Ds\":Ds}}\n vars(self).update(initvars)\n\n\nclass Model(Environ):\n \"\"\"\n Model represents the model in Ghiglia, Holmes paper:\n Cv' = - [ICa + IK + IL + IKS] + Iext\n m' = ep/tm(v)*(mInf(v)-m)\n c' = del/tc(v)*(cInf(v)-c)\n\n ICa = gCa*nInf(v)(v-ECa)\n IL = gL*(v-EK)\n IK = gK*m*(v-EK)\n IKS = gKS*c*(v-EK)\n wInf(v) = 1/(1+exp(-kw(v-vwth))) = 0.5*(1+tanh(-kw(v-vwth)/2)) (w = m,c,n)\n tw(v) = sech(kw(v-vwth)) (w = m,c,n)\n \"\"\"\n\n def __init__(self, cls, initvars = None):\n super(Model, self).__init__(initvars)\n initvars2 = self.neu.get(cls)\n vars(self).update(initvars2)\n self.cls = cls\n \n def singledyn(self,t,V,m,c,Iadd = 0):\n\n mInf = 0.5*(1+np.tanh((V-self.VK)*self.kK))\n cInf = 0.5*(1+np.tanh((V-self.Vc)*self.kc))\n nInf = 0.5*(1+np.tanh((V-self.VCa)*self.kCa))\n #tm = 1./cosh((V-self.env.V3)/(2*self.env.V4))\n #tc = 1./cosh((V-self.env.Vc)*self.env.kK/4)\n ICa = self.gCa*nInf*(V-self.ECa)\n IK = self.gK*m*(V-self.EK)\n IKS = self.gKS*c*(V-self.EK)\n IL = self.gL*(V-self.EL) #EK in paper\n Iext = self.Iext + Iadd\n Vd = -1./self.C*(ICa + IK + IL + IKS) + Iext/self.C\n md = self.ep*(mInf-m)*np.cosh(self.kK*(V-self.VK)/2)\n cd = self.de*(cInf-c)*np.cosh(self.kK*(V-self.Vc)/2)\n \n return [Vd, md, cd]\n \nclass Synapse(object):\n \"\"\"\n Synapse is an object that contains the model for s12\n \"\"\"\n \n def __init__(self,cls1,cls2,initvars12 = None):\n self.mod1 = Model(cls1)\n self.mod2 = Model(cls2)\n if initvars12 is None:\n initvars12 = self.mod1.Syn.get(cls2)\n vars(self).update(initvars12)\n def synapsedyn(self,V1,s):\n Sinf =\t1./(1+np.exp(-self.kSyn*(V1-self.vSyn)));\n sd\t=\t1./self.tauSyn*(Sinf*(1-s)-s);\n return sd\n \nclass Neuron(Model):\n \"\"\"\n Neuron is an object that contains the model for V, w and c\n \"\"\"\n \n def dyn(self,t,x):\n [V,m,c] = x\n xd = self.singledyn(t,V,m,c)\n return np.array([xd])\n \n def sym(self,t0,x0,tmax,dt):\n\n r = ode(self.dyn).set_integrator('dopri5')\n r.set_initial_value(np.array(x0),t0) \n t = [t0]\n xd = [np.array(x0)]\n while r.successful() and r.t < tmax:\n r.integrate(r.t + dt)\n t.append(r.t)\n xd.append(r.y)\n # print(\"%g %g %g %g\" %(r.t, r.y[0],r.y[1],r.y[2]))\n print(r.successful())\n return [t, xd]\n\nclass System(object):\n \"\"\"\n System represents the system of neurons and synapses that we will simulate\n \"\"\"\n def __init__(self,cls,connect):\n self.neurons = cls\n aux = Counter(cls)\n self.models = {key : Model(key) for key in aux}\n self.N = connect.shape[0]\n self.syns = {}\n for i in range(self.N):\n for j in range(self.N):\n if connect[i,j] == 1:\n self.syns.setdefault((cls[i],cls[j]),[]).append(np.array([i,j]))\n self.syns = {key:np.vstack(self.syns[key]) for key in self.syns}\n self.synmodels = {key: Synapse(*key) for key in self.syns}\n self.xd = np.hstack((np.zeros((self.N,1)),np.zeros((self.N,1)),\n np.zeros((self.N,1)),np.zeros((self.N,self.N))))\n def dyn(self,t,x):\n y= x.reshape(self.N,3+self.N)\n Iadd = np.array([sum([getattr(self.synmodels.get((self.neurons[i],self.neurons[j])),'gSyn',0)*y[i,j+3]*(y[j,0]-getattr(self.synmodels.get((self.neurons[i],self.neurons[j])),'vSyn',0)) for i in range(self.N)]) for j in range(self.N)]) #all the synaptic info that comes from other neurons is added here to form the exterior current that will be included in the dynamics\n for key in self.models:\n [V,m,c]=self.models[key].singledyn(t,y[np.where([i == key for i in self.neurons]),0],y[np.where([i == key for i in self.neurons]),1], y[np.where([i == key for i in self.neurons]),2], Iadd = Iadd[np.where([i == key for i in self.neurons])])\n [self.xd[np.where([i == key for i in self.neurons]),0],self.xd[np.where([i == key for i in self.neurons]),1],self.xd[np.where([i == key for i in self.neurons]),2]] = [V,m,c]\n for key in self.syns:\n self.xd[self.syns.get(key)[:,0],self.syns.get(key)[:,1]+3] = self.synmodels.get(key).synapsedyn(y[self.syns.get(key)[:,0],0],y[self.syns.get(key)[:,0],self.syns.get(key)[:,1]+3])\n return self.xd.reshape(-1)\n \n def sym(self,t0,x0,tmax,dt):\n r = ode(self.dyn).set_integrator('dopri5')\n r.set_initial_value(x0.reshape(-1),t0) \n t = [t0]\n x = [x0.reshape(-1)]\n while r.successful() and r.t < tmax:\n r.integrate(r.t + dt)\n t.append(r.t)\n x.append(r.y)\n # print(\"%g %g %g %g\" %(r.t, r.y[0],r.y[1],r.y[2]))\n print(r.successful())\n return [t, x]\n ","sub_path":"neuropy/neuron.py","file_name":"neuron.py","file_ext":"py","file_size_in_byte":5311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"58862929","text":"import argparse\nimport pytesseract\nimport numpy as np\nfrom scipy import misc\nfrom scipy.ndimage.interpolation import rotate\nfrom skimage.feature import canny\nfrom skimage.transform import (hough_line, hough_line_peaks)\n\nalignment_algorithm = {\n 0: 'horizontal',\n 1: 'hough'\n}\n\n\ndef main(arguments):\n print(\"Using {} mode.\".format(alignment_algorithm[arguments.mode]))\n\n original_image = read_image(arguments.input_image_path)\n original_gray_image = read_image(arguments.input_image_path, gray_scale=True)\n\n print(\"Original image text: {}\".format(pytesseract.image_to_string(original_image)))\n\n if alignment_algorithm[arguments.mode] == 'horizontal':\n angle = horizontal_projection_method(original_gray_image)\n else:\n angle = hough_method(original_gray_image)\n\n print(\"The image should be fixed by {:.2f} degrees.\".format(angle))\n\n fixed_image = rotate_image(original_image, angle)\n misc.imsave(arguments.output_image, fixed_image)\n print(\"Fixed image generated!\")\n print(\"Fixed image text: {}\".format(pytesseract.image_to_string(fixed_image)))\n\n\ndef horizontal_projection_method(image):\n copy_image = image.copy()\n copy_image[copy_image <= 128] = 1\n copy_image[copy_image > 128] = 0\n original_hp = np.sum(copy_image, 1)\n\n best_angle = 0\n max_dist = -1\n for i in range(-90, 91):\n rotated_image = rotate_image(image, i)\n # binarize the image\n rotated_image[rotated_image <= 128] = 1\n rotated_image[rotated_image > 128] = 0\n\n horizontal_profile = np.sum(rotated_image, 1)\n result = ssd(original_hp, horizontal_profile)\n if result > max_dist:\n best_angle = i\n max_dist = result\n\n return best_angle\n\n\ndef ssd(a, b):\n return ((a - b) ** 2).sum()\n\n\ndef hough_method(image):\n edges = canny(image, 2, 1, 25)\n h, theta, d = hough_line(edges)\n angles = []\n for _, angle, _ in zip(*hough_line_peaks(h, theta, d)):\n angles.append(np.rad2deg(angle))\n\n angle = np.median(angles)\n\n return angle - 90 if angle >= 0 else angle + 90\n\n\ndef read_image(path, gray_scale=False):\n # Open desired image\n input_image = None\n try:\n input_image = misc.imread(path, flatten=gray_scale)\n except FileNotFoundError:\n print(\"Image {} not found.\".format(path))\n exit(0)\n\n return input_image\n\n\ndef rotate_image(image, angle):\n return rotate(image, angle, reshape=False, cval=255)\n\n\nif __name__ == \"__main__\":\n # Parse the arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('input_image_path')\n parser.add_argument('mode', type=int, choices=range(2), metavar=\"[0-1]\",\n help=\"0 for horizontal projection mode and 1 for Hough mode\")\n parser.add_argument('output_image')\n args = parser.parse_args()\n\n main(args)\n","sub_path":"trabalho3/src/trabalho3.py","file_name":"trabalho3.py","file_ext":"py","file_size_in_byte":2848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"516083811","text":"import pyodbc\nimport connections as conn\ncursor_new = conn.conn_new.cursor()\ncursor_old = conn.conn_old.cursor()\noldMovies = cursor_old.execute('Select M.Movie_ID, M.Movie_Title,M.Movie_Genre_ID, MG.Movie_Genre_Name, M.Movie_Studio_ID,MS.Movie_Studio_Name From Movies as M join Movie_Genres as MG on M.Movie_Genre_ID = MG.Movie_Genre_ID join Movie_Studios as MS on M.Movie_Studio_ID = MS.Movie_Studio_ID')\n\ncount = 0\n\nfor row in oldMovies:\n K_Cat_ID = 0\n Category = cursor_new.execute('SELECT Cat_ID FROM Category Where Cat_Name = ?', row[3])\n \n for Categoryrow in Category:\n K_Cat_ID = Categoryrow[0]\n\n if(K_Cat_ID == 0):\n cursor_new.execute('Insert Into Category Values(5,?)', row[3])\n Category = cursor_new.execute(\n 'SELECT Cat_ID FROM Category Where Cat_Name = ?', row[3])\n for Categoryrow in Category:\n K_Cat_ID = Categoryrow[0]\n\n k_Brand_ID = 0\n Brand = cursor_new.execute('SELECT Brand_ID FROM Brand Where Brand_Name = ?', row[5])\n \n for Brandrow in Brand:\n k_Brand_ID = Brandrow[0]\n\n if(k_Brand_ID == 0):\n cursor_new.execute('Insert Into Brand Values(?)', row[5])\n Brand = cursor_new.execute(\n 'SELECT Brand_ID FROM Brand Where Brand_Name = ?', row[5])\n for Brandrow in Brand:\n k_Brand_ID = Brandrow[0]\n\n Name = row[1].strip()\n\n cursor_new.execute('Insert Into Product_Info (Prod_SKU, Dept_ID, Cat_ID, Brand_ID, Prod_Name) Values (?,5,?,?,?)',\n row[0], K_Cat_ID, k_Brand_ID, Name)\n count = count + 1\n\nconn.conn_new.commit()\nprint(str(count) + ' rows interted successfully!')\n\n\n","sub_path":"data/DataMigrationScripts/5_Movies.py","file_name":"5_Movies.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"156632374","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('compta', '0006_auto_20150417_0445'),\n ]\n\n operations = [\n migrations.AlterModelOptions(\n name='billpart',\n options={'verbose_name': 'Partie de facture', 'verbose_name_plural': 'Parties de facture'},\n ),\n migrations.AlterModelOptions(\n name='subscriptionbillpart',\n options={'verbose_name': 'Partie de facture (subscription)', 'verbose_name_plural': 'Parties de facture (souscription)'},\n ),\n ]\n","sub_path":"compta/migrations/0007_auto_20150417_0511.py","file_name":"0007_auto_20150417_0511.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"275944887","text":"CSV_FIELDNAMES = ('rank', 'name', 'date', 'pace', 'HR', 'VAM', 'Time')\n\ntry:\n from local_config import *\nexcept ImportError:\n pass\n\nAUTH_DATA = {\n 'authenticity_token': '',\n 'email': AUTH_EMAIL,\n 'password': AUTH_PASSWORD,\n 'utf8': '✓',\n 'plan': ''\n}\n\nSTRAVA_SESSION = 'https://www.strava.com/session'\nSTRAVA_LOGIN = 'https://www.strava.com/login'\nSTRAVA_SEGMENT_LEADERBOARD_URL = 'https://www.strava.com/api/v3/segments/{}/leaderboard'\n\n\nclass AthletheConfig:\n AGE_CLASS = {0: '0_19', 20: '20_24', 25: '25_34', 35: '35_44', 45: '45_54', 55: '55_64', 65: '65_69', 70: '70_74',\n 75: '75_plus'}\n WEIGHT_CLASS = {\n 0: '0_124', 125: '125_149', 150: '150_164', 165: '165_179', 180: '180_199',\n 200: '200_224', 225: '225_249', 250: '250_plus'\n }\n WEIGHT_SCALE = [0, 125, 150, 165, 180, 200, 250]\n AGE_SCALE = [0, 20, 35, 45, 55, 65, 70, 75]\n\n @classmethod\n def validate_weight(cls, weight):\n \"\"\"\n Returns weight range for according to weight param\n :param weight: in lbs\n :return: weight category as query param\n \"\"\"\n try:\n weight_int = int(weight)\n try:\n goal_weight = next(filter(lambda x: x > weight_int, AthletheConfig.WEIGHT_SCALE))\n except StopIteration:\n goal_weight = AthletheConfig.WEIGHT_SCALE[-1]\n return AthletheConfig.WEIGHT_CLASS[goal_weight]\n except ValueError:\n raise RuntimeError(f'The value of weight - {weight} - is not valid')\n\n @classmethod\n def validate_age(cls, age):\n \"\"\"\n Returns weight range for according to weight param\n :param weight: in lbs\n :return: weight category as query param\n \"\"\"\n try:\n age_int = int(age)\n try:\n goal_age = next(filter(lambda x: x > age_int, AthletheConfig.AGE_CLASS))\n except StopIteration:\n goal_age = AthletheConfig.WEIGHT_SCALE[-1]\n return AthletheConfig.AGE_CLASS[goal_age]\n\n except ValueError:\n raise RuntimeError(f'The value of age - {age} - is not valid')\n\n @classmethod\n def validate_params(cls, age=None, weight=None):\n return {'age_group': cls.validate_age(age) if age is not None else None,\n 'weight_class': cls.validate_weight(weight) if weight is not None else None}\n\n\nADDITIONAL_HEADERS = {\n # 'Authorization': 'Bearer {}'.format(STRAVA_API_KEY),\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.90 Safari/537.36',\n}\n\nif __name__ == '__main__':\n print(AthletheConfig.validate_weight(260))\n","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"193036172","text":"from flights.models import MapData\nimport re\nfrom django.db.models import Q\nfrom django.core.cache import cache\nimport json\n\ndef get_map_data(object_list):\n\n airport_list = []\n latlngs = []\n\n object_list = set(object_list) #remove duplicate routes\n\n #get map points\n for item in object_list:\n route = re.split('\\W+', item.route) #separate individual codes\n for airport in route:\n #create new dict object inside loop for each airport\n feature = {\"type\":\"Feature\",\"properties\":{\"icao\": \"\",\"iata\": \"\", \"name\": \"\", \"city\": \"\", \"state\": \"\", \"country\": \"\", \"elevation\": \"\"},\"geometry\":{\"type\":\"Point\",\"coordinates\":['','']}}\n if airport == '':\n pass\n else:\n iata_kwargs = {'iata' : airport}\n icao_kwargs = {'icao' : airport}\n map_object = (MapData.objects.filter(**iata_kwargs) | MapData.objects.filter(**icao_kwargs)).first()\n\n latlngs.append([map_object.latitude, map_object.longitude]) #assemble polyline\n\n #assemble geojson\n feature[\"properties\"][\"icao\"] = map_object.icao\n feature[\"properties\"][\"iata\"] = map_object.iata\n feature[\"properties\"][\"name\"] = map_object.name\n feature[\"properties\"][\"city\"] = map_object.city\n feature[\"properties\"][\"state\"] = map_object.state\n feature[\"properties\"][\"country\"] = map_object.country\n feature[\"properties\"][\"elevation\"] = map_object.elevation\n feature[\"geometry\"][\"coordinates\"] = [map_object.longitude, map_object.latitude]\n airport_list.append(feature)#add current dict object to list for feature_collection\n airport_list = list({v[\"properties\"][\"icao\"]:v for v in airport_list}.values()) #removes duplicate airports\n\n feature_collection = {\"type\":\"FeatureCollection\",\"features\":airport_list}\n latlngs = str(latlngs)\n cache.set('routes', latlngs, 5*60)\n cache.set('airports', feature_collection, 5*60)\n","sub_path":"flights/get_map_data.py","file_name":"get_map_data.py","file_ext":"py","file_size_in_byte":2061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"46151169","text":"\nimport numpy as np\n\nclass PairwiseAligner:\n\n \"\"\"\n Parent class for :class:`align.algs.SmithWaterman` and :class:`align.algs.NeedlemanWunsch` that \n contains shared alignment functions and variables\n\n :param gapB_mat: Matrix to keep track of scores for adding gaps to seqB \n :type gapB_mat: np.ndarray \n :param back: Matrix that keeps track of pointers for backtracking through match matrix\n :type back: np.ndarray\n :param back_A: Matrix that keeps track of pointers for backtracking through gap A matrix\n :type back_A: np.ndarray\n :param back_B: Matrix that keeps track of pointers for backtracking through gap B matrix\n :type back_B: np.ndarray\n :param opt_score: Variable to keep track of final alignment score\n :type opt_score: int\n :param seqA: Query sequence A\n :type seqA: str\n :param seqB: Query sequence B\n :type seqB: str\n :param seqA_align: Aligned version of query sequence A\n :type seqA_align: str\n :param seqB_align: Aligned version of query sequence B\n :type seqB_align: str\n :param D_open: Opening gap penalty\n :type D_open: int\n :param D_extend: Extension gap penalty\n :type D_extend: int\n \n \"\"\"\n \n def __init__( self, scoring_mat ):\n \n \"\"\"\n Constructor\n\n :param scoring_mat: Pathlike str to .mat file with substitution matrix \n :type scoring_mat: str\n \n \"\"\"\n\n # init alignment and gap matrices\n self.align_mat = None\n self.gapA_mat = None\n self.gapB_mat = None\n \n # init backtrace matrices\n self.back = None\n self.back_A = None\n self.back_B = None\n \n # optimal score\n self.opt_score = 0\n \n # sequences\n self.seqA = \"\"\n self.seqB = \"\"\n \n # alignments\n self.seqA_align = \"\"\n self.seqB_align = \"\"\n \n # penalties\n self.D_open = -12\n self.D_extend = -2\n \n # read in scoring matrix and create scoring dictionary\n if isinstance(scoring_mat, str): \n self.scoring_mat, self.alphabet = self.read_scoring_file(scoring_mat)\n self.scores_dict = self.init_scoring_dict()\n elif isinstance(scoring_mat, dict):\n self.scores_dict = scoring_mat\n else: \n self.scoring_mat = None\n self.alphabet = None\n print(\"set scoring matrix and alphabet manually\")\n \n \n def read_scoring_file(self, scores_file):\n \n \"\"\"\n Reads in file containing scoring matrix\n \n :param scores_file: Pathlike string to .mat file with substitution matrix \n :type scores_file: str\n\n :return: tuple containing n x n substitution matrix and a list of n amino acids\n\n \"\"\"\n \n # read in file for scoring matrix\n with open(scores_file, 'r') as f:\n scores_mat = [line for line in f.read().splitlines() if \"#\" not in line]\n \n # extract out header characters\n alphabet = [char for char in scores_mat[0] if \" \" not in char]\n scores_mat = scores_mat[1:]\n \n # create list with transition matrix values\n for i in range(len(scores_mat)):\n scores_mat[i] = [int(score) for score in scores_mat[i].split(\" \") if len(score) > 0]\n \n # convert to nparray\n scores_mat = np.array(scores_mat)\n \n return scores_mat, alphabet\n\n def init_scoring_dict(self, scoring_mat=None, alphabet=None):\n \"\"\"\n Read in scoring file and creating dictionary \n \n :param scoring_mat: n x n substitution matrix\n :type scoring_mat: np.ndarray\n :param alphabet: list of n amino acids\n :type alphabet: list\n\n :return: dictionary that maps tuple of amino acid (aa1, aa2) to score\n\n \"\"\"\n \n # option to update scoring matrix\n if scoring_mat is not None: \n self.scoring_mat = scoring_mat\n \n # option to update alphabet\n if alphabet is not None: \n self.alphabet = alphabet\n \n # check scoring matrix type and create scoring dictionary\n scores_dict = {}\n if isinstance(self.scoring_mat, np.ndarray):\n \n # step through each character and add each pair to dict\n for i in range(len(self.alphabet)):\n for j in range(len(self.alphabet)): \n scores_dict[self.alphabet[i],self.alphabet[j]] = self.scoring_mat[i][j] \n \n return scores_dict\n\n return \"Scoring matrix invalid\"\n \n def clean_sequence(self, seq, remove_unknown=False):\n \"\"\"\n Makes sequence uppercase and replaces unknown characters with * (or removes them if remove_unknown=True)\n \n :param seq: sequence to clean\n :type seq: str \n :param remove_unknown: Whether to replace or remove unknown characters in query sequence\n :type remove_unknown: bool, default=False \n\n :return: cleaned sequence with all uppercase letters\n\n \"\"\"\n\n # check for alphabet\n if self.alphabet is None: \n return \"Error: add an alphabet\"\n \n # check for alphabet\n seq = seq.upper()\n seq = \"\".join([i if i in self.alphabet else \"!\" for i in seq])\n \n # check for alphabet\n if \"!\" in seq:\n if remove_unknown: seq = seq.replace(\"!\", \"*\")\n else: seq = seq.replace(\"!\", \"\")\n \n return seq\n \n def align(self, seqA, seqB, sw=False):\n \"\"\"\n Populates scoring and backtracing matrices with alignment scores and pointers\n \n :param seqA: First query sequence\n :type seqA: str\n :param seqB: Second query sequence\n :type seqB: str\n :param sw: whether to perform Smith-Waterman alignment (default, values clipped to 0)\n :type sw: bool, default = False\n\n \"\"\"\n # clean and store sequences\n self.seqA = self.clean_sequence(seqA)\n self.seqB = self.clean_sequence(seqB)\n \n # reset alignment\n self.seqA_align = \"\"\n self.seqB_align = \"\"\n \n # initialize first row of gapA and backtrace matrices\n # since once i is 0, only gaps can be added to A\n for j in range(self.gapA_mat.shape[1]): \n self.gapA_mat[0][j] = self.D_open + j * self.D_extend\n self.back_A[0][j] = 1\n self.back[0][j] = 1 \n\n # initalize first col of gapB and back\n # since once j is 0, only gaps can be added to B\n for i in range(self.gapB_mat.shape[0]): \n self.gapB_mat[i][0] = self.D_open + i * self.D_extend\n self.back_B[i][0] = 2\n self.back[i][0] = 2\n\n # initalize corner of align_mat\n self.align_mat[0][0] = 0 \n \n # begin heuristic with backtrace values stored as \n # 0 = diag, 1 = left, 2 = up, 3 = end (sw only)\n for i in range(1, len(self.seqA) + 1): \n for j in range(1, len(self.seqB) + 1):\n \n # update I(A) and I(A) backtrace matrix values to \n # keep track of whether gaps should be added to seqA\n _currScores = [self.align_mat[i, j-1] + self.D_open, # open gap in seqA\n self.gapA_mat[i, j-1] + self.D_extend] # extend gap in seqA\n \n self.gapA_mat[i, j] = max(_currScores) #I(A) \n self.back_A[i, j] = np.argmax(_currScores) #backtrace\n\n # update I(B) and I(B) backtrace matrix values to \n # keep track of whether gaps should be added to seqB\n _currScores = [self.align_mat[i-1, j] + self.D_open, # open gap in seqB\n -np.inf, # ignored\n self.gapB_mat[i-1, j] + self.D_extend] # extend gap in seqB\n \n self.gapB_mat[i,j] = max(_currScores) #I(B) \n self.back_B[i, j] = np.argmax(_currScores) #backtrace \n \n # update M and M backtrace matrix values\n s = self.scores_dict[self.seqA[i-1], self.seqB[j-1]]\n \n _currScores = [self.align_mat[i-1, j-1] + s, #match (diag)\n self.gapA_mat[i, j], #gap seqA (left)\n self.gapB_mat[i, j]] #gap seqB (up)\n \n self.align_mat[i,j] = max(_currScores) #M\n \n if sw and (max(_currScores) <= 0): \n self.back[i, j] = 3 #add marker for end for sw\n else:\n self.back[i, j] = np.argmax(_currScores) #backtrace\n\n\n def backtrace(self, i, j, curr_back):\n \"\"\"\n Backtracking steps shared by SW and NW algorithms\n \n :param i: current row value\n :type i: int\n :param j: current col value\n :type j: int\n :param curr_back: current pointer matrix to consider\n :type curr_back: np.ndarray\n \n :return: tuple (i, j) containing updated row and column values\n \"\"\"\n\n # update the sequence\n # if pointing to M, match sequence and move diagonally\n if curr_back is self.back: \n self.seqA_align = self.seqA[i-1] + self.seqA_align\n self.seqB_align = self.seqB[j-1] + self.seqB_align\n i -= 1\n j -= 1\n\n # if pointing to gapA, add a gap to sequence A, \n # get current character in B, and move left\n elif curr_back is self.back_A:\n self.seqA_align = \"-\" + self.seqA_align\n self.seqB_align = self.seqB[j-1] + self.seqB_align\n j -= 1\n\n # if pointing to gapB, add a gap to sequence B and move up\n elif curr_back is self.back_B: \n self.seqB_align = \"-\" + self.seqB_align\n self.seqA_align = self.seqA[i-1] + self.seqA_align\n i -= 1\n \n return i, j\n\nclass NeedlemanWunsch(PairwiseAligner):\n \"\"\"\n Needleman-Wunsch global alignment algorithm\n \"\"\"\n\n def __init__( self, scoring_mat):\n \"\"\"\n Constructor\n\n :param scoring_mat: Pathlike str to .mat file with substitution matrix passed to parent :class:`align.algs.PairwiseAligner`\n :type str or matrix\n \"\"\"\n\n PairwiseAligner.__init__(self, scoring_mat) \n\n def align(self, seqA, seqB, score_only=False):\n\n \"\"\"\n Perform Needleman-Wunch global alignment with affine gap scoring.\n Initializes matrices and calls parent align method for scoring\n \n :param seqA: First query sequence\n :type seqA: str\n :param seqB: Second query sequence\n :type seqA: str\n :param score_only: Whether to return the aligned sequences with the score (default) or just the score\n :type score_only: bool, default=False\n\n :return: Gapped version of seqA, gapped version of seqB, alignment score; if score_only = True is passed, only the alignment score is returned\n \n \"\"\"\n\n # create matrices for alignment scores and gaps \n self.align_mat = np.ones((len(seqA) + 1, len(seqB) + 1)) * -np.inf\n self.gapA_mat = np.ones((len(seqA) + 1, len(seqB) + 1)) * -np.inf\n self.gapB_mat = np.ones((len(seqA) + 1, len(seqB) + 1)) * -np.inf\n\n # create matrices for backtracing pointers\n self.back = np.ones((len(seqA) + 1, len(seqB) + 1)) * -np.inf\n self.back_A = np.ones((len(seqA) + 1, len(seqB) + 1)) * -np.inf\n self.back_B = np.ones((len(seqA) + 1, len(seqB) + 1)) * -np.inf\n \n # initialize other variables with parent class align\n super(NeedlemanWunsch, self).align(seqA, seqB, sw=False)\n \n return self.backtrace(score_only=score_only)\n \n def backtrace(self, score_only=False):\n\n \"\"\"\n Backtracing through scoring matrices using parameters for Needleman-Wunch global alignment parameters\n \n :param score_only: Whether to return the aligned sequences with the score (default) or just the score\n :type score_only: bool, default=False\n\n :return: Gapped version of seqA, gapped version of seqB, alignment score; if score_only = True is passed, only the alignment score is returned\n\n \"\"\"\n\n # Use list to keep track of what the value of the backtrace matrix points to, with\n # the index corresponding to the pointers used (0=diag=M, 1=left=IA, 2=up=IB). \n all_mat = [self.align_mat, self.gapA_mat, self.gapB_mat]\n all_back = [self.back, self.back_A, self.back_B]\n \n # get pointer to matrix with largest optimal value\n mat_ind = np.argmax([self.align_mat[-1][-1], self.gapA_mat[-1][-1], self.gapB_mat[-1][-1]])\n \n # get matrix containing maximum score\n curr_mat = all_mat[mat_ind]\n curr_back = all_back[mat_ind]\n \n # get starting index\n i, j = curr_mat.shape[0] - 1, curr_mat.shape[1] - 1\n \n # update optimal score and return if not backtracking\n self.opt_score = int(curr_mat[i][j])\n if score_only: return self.opt_score\n \n # begin backtracing\n while (i > 0 or j > 0):\n pointer = curr_back[i, j] # store current pointer\n i, j = super(NeedlemanWunsch, self).backtrace(i, j, curr_back) # use parent backtrack function\n curr_back = all_back[int(pointer)] # use pointer to select next backtrace matrix\n \n # return the alignments and score\n return (self.seqA_align, self.seqB_align, self.opt_score)\n \nclass SmithWaterman(PairwiseAligner):\n \"\"\"\n Smith-Waterman local alignment algorithm\n\n \"\"\"\n\n def __init__( self, scoring_mat):\n \"\"\"\n Constructor\n \n :param scoring_mat: Pathlike str to .mat file with substitution matrix passed to parent :class:`align.algs.PairwiseAligner`\n :type str or matrix\n \"\"\"\n PairwiseAligner.__init__(self, scoring_mat) \n\n def align(self, seqA, seqB, score_only=False):\n\n \"\"\"\n Perform Smith-Waterman local alignment with affine gap scoring.\n Initializes matrices and calls parent align method for scoring\n \n :param seqA: First query sequence\n :type seqA: str\n :param seqB: Second query sequence\n :type seqA: str\n :param score_only: Whether to return the aligned sequences with the score (default) or just the score\n :type score_only: bool, default=False\n\n :return: Gapped version of seqA, gapped version of seqB, alignment score; if score_only = True is passed, only the alignment score is returned\n\n \"\"\"\n\n # create matrices for alignment scores and gaps \n self.align_mat = np.zeros((len(seqA) + 1, len(seqB) + 1))\n self.gapA_mat = np.zeros((len(seqA) + 1, len(seqB) + 1))\n self.gapB_mat = np.zeros((len(seqA) + 1, len(seqB) + 1))\n\n # create matrices for backtracing pointers\n self.back = np.zeros((len(seqA) + 1, len(seqB) + 1))\n self.back_A = np.zeros((len(seqA) + 1, len(seqB) + 1))\n self.back_B = np.zeros((len(seqA) + 1, len(seqB) + 1))\n \n # initialize other variables with parent class align\n super(SmithWaterman, self).align(seqA, seqB, sw=True)\n \n # backtracking\n return self.backtrace(score_only)\n \n def backtrace(self, score_only=False):\n \"\"\"\n Backtracing through scoring matrices using parameters for Smith-Waterman local alignment parameters\n \n :param score_only: Whether to return the aligned sequences with the score (default) or just the score\n :type score_only: bool, default=False\n\n :return: Gapped version of seqA, gapped version of seqB, alignment score; if score_only = True is passed, only the alignment score is returned\n\n \"\"\"\n\n # Use list to keep track of what the value of the backtrace matrix points to, with\n # the index corresponding to the pointers used (0=diag=M, 1=left=IA, 2=up=IB). \n all_mat = [self.align_mat, self.gapA_mat, self.gapB_mat]\n all_back = [self.back, self.back_A, self.back_B]\n \n # get pointer to matrix with largest optimal value\n mat_ind = np.argmax([np.max(self.align_mat), np.max(self.gapA_mat), np.max(self.gapB_mat)])\n \n # get matrix containing maximum score\n curr_mat = all_mat[mat_ind]\n curr_back = all_back[mat_ind]\n \n # get index of largest value\n max_ind = np.where(curr_mat == np.amax(curr_mat))\n i, j = max_ind[0][0], max_ind[1][0]\n \n # update optimal score and return if not backtracking\n self.opt_score = int(curr_mat[i][j])\n if score_only: return self.opt_score\n \n # begin backtracing\n while (i > 0 and j > 0) and (curr_back[i,j] < 3):\n pointer = curr_back[i, j] # store current pointer\n i, j = super(SmithWaterman, self).backtrace(i, j, curr_back) # use parent backtrack function\n curr_back = all_back[int(pointer)] # use pointer to select next backtrace matrix\n \n # return the aligned sequences and score\n return (self.seqA_align, self.seqB_align, self.opt_score)\n\n# input fasta files of alignments\ndef read_fasta(input_file):\n \"\"\"\n Parses fasta file to retrieve fasta sequence\n \n :param input_file: Path to fasta file\n :type input_file: str\n\n :return: Gapped version of seqA, gapped version of seqB, alignment score; if score_only = True is passed, only the alignment score is returned\n\n \"\"\"\n\n # read \n with open(input_file, 'r') as f:\n fasta = [line for line in f.read().splitlines()]\n return fasta[0], \"\".join(fasta[1:])\n \n\n\n\n\n\n","sub_path":"align/algs.py","file_name":"algs.py","file_ext":"py","file_size_in_byte":17896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"270181832","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jun 19 16:58:06 2019\r\n\r\n@author: Masaya Muramatsu\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom scipy import linalg\r\n\r\nclass ESN():\r\n \r\n \r\n def __init__(self, ninput, ninternal, noutput, W, W_in, W_fb, W_out, \r\n activation, out_activation, invout_activation, encode,\r\n spectral_radius,\r\n dynamics, regression,\r\n noise_level,delta,C,leakage\r\n ):\r\n \r\n \"\"\"\r\n W: ninternal x ninternal\r\n W_in: ninteral x ninput\r\n W_fb: ninteral x noutput\r\n W_out: noutput x ninternal \r\n \"\"\"\r\n \r\n self.ninput = ninput #number of nodes in input layer\r\n self.ninternal = ninternal #number of nodes in internal layer\r\n self.noutput = noutput #number of nodes in output layer\r\n self.ntotal = ninput + ninternal + noutput #number of nodes in all layers\r\n self.spectral_radius = spectral_radius #spectral radius\r\n \r\n \"\"\"\r\n def init_internal_weights():\r\n internal_weights = np.random.normal(0, 1, (ninternal,ninternal))\r\n maxval = max(abs(linalg.eigvals(internal_weights)))\r\n internal_weights = internal_weights / maxval * self.spectral_radius\r\n return internal_weights\r\n \"\"\"\r\n \r\n self.W = W # (ninternal x ninternal) \r\n self.W_in = W_in # (ninternal x ninput)\r\n self.W_fb = W_fb # (ninternal x ninput)\r\n self.W_out = W_out # noutput x (ninternal + ninput)\r\n self.activation = activation #activation function\r\n self.out_activation = out_activation #output activation fanction \r\n self.invout_activation = invout_activation #inverse of output activation function\r\n self.encode = encode\r\n \r\n dynamics_options = {'leaky': self.leaky, 'plain': self.plain,\r\n 'opt_leaky': self.leaky_optical, 'opt_proposal': self.leaky_optical_proposal} #reservoir renewal rule\r\n if dynamics in dynamics_options:\r\n self._update = dynamics_options[dynamics]\r\n else:\r\n self._update = dynamics\r\n\r\n self.noise_level = noise_level\r\n self.regression = regression\r\n self.trained = False\r\n self._last_input = np.zeros((self.ninput, 1)) \r\n self._last_state = np.zeros((self.ninternal, 1))\r\n self._last_output = np.zeros((self.noutput, 1)) \r\n self.delta = delta\r\n self.C = C\r\n self.leakage = leakage\r\n\r\n \r\n ##学習させる\r\n def fit(self, inputs, outputs, nforget):\r\n \"\"\"\r\n inputs : ninput x ntime\r\n outputs: noutput x ntime\r\n nforget: 最初のnforget分だけ回帰するときに無視する\r\n \"\"\"\r\n \r\n ntime = inputs.shape[1]\r\n \r\n #収集\r\n states = np.zeros((self.ninternal, ntime))\r\n for t in range(1, ntime):\r\n states[:, t] = self._update(states[:, t - 1], inputs[:, t], outputs[:, t - 1])\r\n \r\n S = np.vstack((states, inputs)).T[nforget:]\r\n D = self.invout_activation(outputs.T[nforget:])\r\n self.W_out = self.regression(S, D)\r\n \r\n # 最後のstateを覚えておく\r\n self._last_input = inputs[:, -1]\r\n self._last_state = states[:, -1]\r\n self._last_output = outputs[:, -1]\r\n\r\n self.trained = True\r\n\r\n return states\r\n \r\n def trained_outputs(self,inputs,outputs):\r\n \r\n ntime = inputs.shape[1]\r\n \r\n trained_outputs = np.zeros((self.noutput,ntime))\r\n states = np.zeros((self.ninternal,ntime))\r\n for t in range(1,ntime):\r\n states[:,t] = self._update(states[:, t - 1], inputs[:, t], outputs[:, t - 1])\r\n trained_outputs[:,t] = self.out_activation(self.W_out @ np.hstack((states[:, t], inputs[:, t])))\r\n \r\n return trained_outputs\r\n \r\n\r\n def predict(self, inputs, turnoff_noise=False, continuing=True):\r\n \"\"\"\r\n inputs: ninput x ntime\r\n continuing: 最後の訓練したstateでつづけるか\r\n turnoff_noise: ノイズを消すかどうか\r\n Return: outputs: noutput x ntime\r\n \"\"\"\r\n \r\n if turnoff_noise:\r\n self.noise_level = 0\r\n if not continuing:\r\n self._last_input = np.zeros((self.ninput, 1))\r\n self._last_state = np.zeros((self.ninternal, 1))\r\n self._last_output = np.zeros((self.noutput, 1))\r\n\r\n ntime = inputs.shape[1]\r\n outputs = np.zeros((self.noutput, ntime))\r\n\r\n states = np.zeros((self.ninternal, ntime))\r\n states[:, 0] = self._update(self._last_state, inputs[:, 0], self._last_output)\r\n outputs[:, 0] = self.out_activation(self.W_out @ np.hstack((states[:, 0], inputs[:, 0])))\r\n for t in range(1, ntime):\r\n states[:, t] = self._update(states[:, t - 1], inputs[:, t], outputs[:, t - 1])\r\n outputs[:, t] = self.out_activation(self.W_out @ np.hstack((states[:, t], inputs[:, t])))\r\n\r\n return outputs\r\n \r\n \r\n \r\n def leaky(self, previous_internal, new_input, previous_output):\r\n\r\n new_internal = (1 - self.delta * self.C * self.leakage) * previous_internal \\\r\n + self.delta * self.C *self.activation(self.W_in @ new_input\r\n + self.W @ previous_internal\r\n + self.W_fb @ previous_output\r\n + self.noise_level) \\\r\n \r\n return new_internal\r\n \r\n def plain(self, previous_internal, new_input, previous_output):\r\n \r\n new_internal = self.activation(self.W_in @ new_input\r\n + self.W @ previous_internal\r\n + self.W_fb @ previous_output)\\\r\n + self.noise_level * (np.random.rand(self.ninternal)-0.5)\r\n return new_internal\r\n \r\n\r\n def leaky_optical(self, previous_internal, new_input, previous_output):\r\n new_internal = (1-self.delta * self.C * self.leakage) * previous_internal \\\r\n + self.delta * self.C *self.activation(self.W_in @ self.encode(new_input)\r\n + self.W @ self.encode(previous_internal)\r\n + self.noise_level) \r\n return new_internal\r\n \r\n def leaky_optical_proposal(self, previous_internal, new_input, previous_output):\r\n new_internal = (1-self.delta * self.C * self.leakage) * previous_internal \\\r\n + self.delta * self.C *self.activation(self.W_in @ self.encode(new_input)\r\n + self.W @ self.encode(previous_internal)\r\n + self.W_fb @ self.encode(previous_output)\r\n + self.noise_level) \r\n return new_internal ","sub_path":"echo_state_network_optical.py","file_name":"echo_state_network_optical.py","file_ext":"py","file_size_in_byte":7005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"175252147","text":"import RPi.GPIO as gpio\nimport time\n \ngpio.setmode(gpio.BOARD)\ngpio.setup(26, gpio.IN, pull_up_down=gpio.PUD_UP)\ngpio.setup(11, gpio.OUT)\n \ndef action(channel):\n print(\"Motion detected\")\n for i in range(10):\n gpio.output(11, True)\n time.sleep(0.2)\n gpio.output(11, False)\n time.sleep(0.2)\n \ntry:\n gpio.add_event_detect(26, gpio.RISING, callback=action, bouncetime=200)\n while True:\n time.sleep(1)\nexcept:\n gpio.cleanup()","sub_path":"09_RpiSensor/PIRSensor.py","file_name":"PIRSensor.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"522578373","text":"import sys\n\nfrom oslo_config import cfg\nfrom oslo_log import log as logging\n\nfrom neutron.common import config as common_config\nfrom neutron.common import utils as neutron_utils\nfrom neutron.i18n import _LE, _LI, _LW\nfrom neutron.plugins.linuxbridge.agent import linuxbridge_neutron_agent as lna\n\nfrom altocumulus.discovery import DiscoveryManager\nfrom altocumulus.utils import Shell\n\nDEFAULT_ROOT_HELPER = 'sudo'\n\nLOG = logging.getLogger(__name__)\n\n\nclass HPBLinuxBridgeNeutronAgentRPC(lna.LinuxBridgeNeutronAgentRPC):\n def __init__(self, interface_mappings, polling_interval):\n super(HPBLinuxBridgeNeutronAgentRPC, self).__init__(\n interface_mappings,\n polling_interval\n )\n\n dm = DiscoveryManager(Shell(DEFAULT_ROOT_HELPER))\n\n for physnet, interface in interface_mappings.iteritems():\n neighbor = dm.find_neighbor_for_interface(interface)\n if neighbor:\n self.agent_state['configurations']['switch_name'] = \\\n neighbor['name']\n self.agent_state['configurations']['switch_mgmt_ip'] = \\\n neighbor['mgmt-ip']\n break\n else:\n LOG.error(\n _LE('Unable to find %s neighbor for interface %s'),\n physnet,\n interface\n )\n\n\ndef main():\n common_config.init(sys.argv[1:])\n\n common_config.setup_logging()\n try:\n interface_mappings = neutron_utils.parse_mappings(\n cfg.CONF.LINUX_BRIDGE.physical_interface_mappings)\n except ValueError as e:\n LOG.error(_LE(\"Parsing physical_interface_mappings failed: %s. \"\n \"Agent terminated!\"), e)\n sys.exit(1)\n LOG.info(_LI(\"Interface mappings: %s\"), interface_mappings)\n\n polling_interval = cfg.CONF.AGENT.polling_interval\n agent = HPBLinuxBridgeNeutronAgentRPC(interface_mappings,\n polling_interval)\n LOG.info(_LI(\"Agent initialized successfully, now running... \"))\n agent.daemon_loop()\n sys.exit(0)\n\n","sub_path":"altocumulus/ml2/hpb_bridge_agent.py","file_name":"hpb_bridge_agent.py","file_ext":"py","file_size_in_byte":2079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"543177152","text":"import os\nimport scipy\nprint('scipy: %s' % scipy.__version__)\nimport numpy as np\nprint('numpy: %s' % np.__version__)\nimport matplotlib\nprint('matplotlib: %s' % matplotlib.__version__)\nimport matplotlib.pyplot as plt\nimport pandas as pd\nprint('pandas: %s' % pd.__version__)\nimport sklearn\n# import statsmodels\nfrom pandas import Series\nimport datetime as dt\nimport pickle\nimport pytrends\nimport lxml\nfrom pytrends.request import TrendReq\n\nprint(os.getcwd())\n\ndef main():\n print(\"First Module's Name: {}\".format(__name__))\n print('OS:', os.name)\n os.chdir('..')\n\n if os.name == 'posix':\n sl = '/'\n elif os.name == 'nt':\n sl = '\\\\'\n\n# timezone 360 = US CST\npytrends = TrendReq(hl='en-US', tz=360)\nkw_list = [\"Bitcoin\"]\n\nsingle_frames = {\n 0: '2017-08-01 2017-08-31',\n 1: '2017-09-01 2017-09-30',\n 2: '2017-08-01 2017-09-30',\n}\n\nz = 1\ndt_pd_google_segments = pd.DataFrame(columns = ['Bitcoin', 'segment'])\nfor x in single_frames:\n pytrends.build_payload(kw_list, cat=0, timeframe=single_frames[x], geo='', gprop='')\n dt_pd_google_tmp = pytrends.interest_over_time()\n print(x)\n if x > 0:\n print('x>0')\n dt_pd_google_tmp['segment'] = x\n dt_pd_google_segments = dt_pd_google_segments.append(dt_pd_google_tmp)\n else:\n print('x = 0')\n dt_pd_google_tmp['segment'] = x\n dt_pd_google_segments = dt_pd_google_segments.append(dt_pd_google_tmp)\n print('retrieve frame', x, 'done - ', z/len(single_frames)*100,'%')\n z = z + 1\n\n# dt_pd_google_daily = dt_pd_google_daily_un.groupby(dt_pd_google_daily_un.index).first()\ndt_pd_google_segments.rename(columns={'Bitcoin': 'google_tr'}, inplace=True)\n\ndt_pd_google_segments.to_pickle('dt_pd_google_ex_unadj.pickle')\n\nprint('google trend ex download done')\n\nif __name__ == '__main__':\n main()\nelse:\n print(\"Run From Import\")","sub_path":"P_Python/A_Archive/dt_google_segments_ex_unadj.py","file_name":"dt_google_segments_ex_unadj.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"631590386","text":"import json\n\nfrom django.urls import reverse\n\nfrom django.test import TestCase\n\n\nclass TagsTestCase(TestCase):\n def post_url(self, name, url):\n return self.client.post(\n reverse(name),\n json.dumps({\n 'url': url\n }),\n 'application/json'\n )\n\n def test_page_tags_success(self):\n resp = self.post_url('tags', 'https://google.com.ua/')\n self.assertEqual(resp.status_code, 201)\n\n def test_page_tags_validation(self):\n resp = self.post_url('tags', 'http// not valid . url')\n self.assertEqual(resp.status_code, 400)\n\n def test_page_tags_error(self):\n resp = self.post_url('tags', reverse('tags'))\n self.assertEqual(resp.status_code, 400)\n","sub_path":"apps/tags/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"450431490","text":"# -*- coding:utf-8 -*-\r\n\r\nimport wx\r\nfrom . import panel\r\n\r\nfrom .listctrl import HistoryListCtrl as HisList\r\nfrom .historyactionpanel import HistoryActionPanel as HaPanel\r\n\r\nclass HistoryPanel(wx.Panel):\r\n\tdef __init__(self, *a, **k):\r\n\t\tsuper(HistoryPanel, self).__init__(*a, **k)\r\n\t\t\r\n\t\tself.listctrl = HisList(self, wx.ID_ANY, \\\r\n\t\t\tstyle = wx.LC_REPORT | wx.LC_SINGLE_SEL | wx.NO_BORDER)\r\n\t\t\r\n\t\tself.actionpanel = HaPanel(self, wx.ID_ANY)\r\n\t\t\r\n\t\tvbox = wx.BoxSizer(wx.VERTICAL)\r\n\t\tvbox.Add(self.actionpanel, \\\r\n\t\t\tflag = wx.TOP | wx.LEFT | wx.RIGHT | wx.EXPAND | wx.ALIGN_TOP)\r\n\t\tvbox.Add(self.listctrl, \\\r\n\t\t\tproportion = 1, \\\r\n\t\t\tflag = wx.ALL | wx.EXPAND | wx.ALIGN_BOTTOM)\r\n\t\t\r\n\t\tself.actionpanel.Redo_callback = self.listctrl.Redo\r\n\t\tself.actionpanel.Undo_callback = self.listctrl.Undo\r\n\t\tself.actionpanel.Clear_callback = self.listctrl.Clear\r\n\t\t\r\n\t\t\r\n\t\tself.SetSizer(vbox)\r\n\t\r\n\t\r\nclass Panel(panel.NotebookPanel):\r\n\tdef __init__(self, *a, **k):\r\n\t\tsuper(Panel, self).__init__(*a, **k)\r\n\t\tself.historypanel = HistoryPanel(self.notebook, wx.ID_ANY)\r\n\t\tself.listctrl = self.historypanel.listctrl\r\n\t\tself.BuildPages()\r\n\t\t\r\n\tdef BuildPages(self):\r\n\t\tself.notebook.AddPage(self.historypanel, 'History')\r\n\t\r\n\tdef insert(self, func):\r\n\t\tself.listctrl.insert(func)\n","sub_path":"trunk/pytune/pytune/historypanel.py","file_name":"historypanel.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"306809072","text":"#!/usr/bin/env python3.4\nfrom test_tools import *\n\n\n@runtest\ndef run():\n \"\"\"The test checks for restriction to set permissions on the file.\"\"\"\n test = Testfile()\n acl = Acl()\n # Create a file with standart UNIX permissions 777. SUCCESS.\n test.del_testfile()\n test.create_testfile()\n # Set the permission (NFSv4 Access Control Lists) 'ALLOW x' for 'TEST_UID'\n #(nfs4_setfacl -a A::TEST_UID:x). SUCCESS.\n ace = Ace('A', '', TEST_UID, 'x')\n acl.modify_acl(ace)\n acl.is_ace_set(ace)\n acl.is_acl_correct()\n # Set permissions (NFSv4 Access Control Lists) 'DENY x' for 'EVERYONE@'\n #(nfs4_setfacl -a D::EVERYONE@:x). SUCCESS.\n ace = Ace('D', '', 'EVERYONE@', 'x')\n acl.modify_acl(ace)\n acl.is_acl_correct()\n # Check the permission (NFSv4 Access Control Lists)\n #'ALLOW x' for 'TEST_UID' (A::TEST_UID:x). FAIL.\n ace = Ace('A', '', TEST_UID, 'x')\n acl.is_ace_set(ace, success=False)\n\n\nif __name__ == '__main__':\n print('START TEST'.center(72,'*'))\n try:\n run()\n print('TEST SECCESSFUL'.center(72,'*'))\n except Exception as e:\n print('ERROR', e)\n print('TEST FAILED'.center(72,'*'))\n","sub_path":"test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"546792419","text":"import sys\nimport os.path\nimport argparse\nimport itertools\nfrom typing import List, Dict\n\nimport yaml\nfrom scipy.spatial import distance\nfrom matplotlib import pyplot\nimport matplotlib.gridspec as gridspec\n\nfrom .data import IOMetricsWriteVda, add_iop_size, TSDBCreds, parse_db_creds, CPUMetrics, disk_metric\nfrom .db import LocalDBNP, load_nodes, LocalDB\nfrom .ml import get_metric_slice, metric_distance, sort_by_hdistance, get_cross_vm_dist, get_cross_dist, moving_average\nfrom .plot import plot_cross_vm_dist, plot_metrics, plot_vm_lifetime, plot_cross_dist\nfrom .influx_exporter import copy_data_from_influx, lookup_vms, connect_to_ts_database, most_loaded_vms\n\n\nclass Config:\n instances = None # type: List[str]\n sqlite3db = None # type: str\n sqlite_cls = None # type: str\n ts_db = None # type: str\n sync_metrics = None # type: Dict[str, Dict[str, str]]\n\n\ndef connect_to_sqlite(cfg: Config) -> LocalDB:\n clss = [LocalDB, LocalDBNP]\n for cls in clss:\n if cls.__name__ == cfg.sqlite_cls:\n return cls(cfg.sqlite3db)\n raise ValueError(\"Unknown db class {!r}\".format(cfg.sqlite_cls))\n\n\ndef sync_date_from_influx(cfg: Config):\n ts_client = connect_to_ts_database(cfg.ts_db)\n with connect_to_sqlite(cfg) as db:\n db.prepare()\n items = [(instance, None, 'virt_cpu_time') for instance in cfg.instances]\n vda_metrics = ['virt_disk_octets_write', 'virt_disk_octets_read', 'virt_disk_ops_write', 'virt_disk_ops_read']\n items += list(itertools.product(cfg.instances, ['vda'], vda_metrics))\n copy_data_from_influx(db, ts_client, items, np=True)\n\n\ndef parse_args(argv):\n # root passwd == masterkey\n descr = \"Monitoring result analyze tool\"\n parser = argparse.ArgumentParser(prog='mira-ml', description=descr)\n parser.add_argument(\"-l\", '--log-level', default='DEBUG', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'SILENT'],\n help=\"Set log level\")\n parser.add_argument(\"-c\", '--config', default='~/.config/mira-ml.yaml', help=\"Config file path\")\n subparsers = parser.add_subparsers(dest='subparser_name')\n # ---------------------------------------------------------------------\n\n subparsers.add_parser('ls', help='list all vms')\n subparsers.add_parser('sync2sqlite', help='Sync data to sqlite')\n subparsers.add_parser('vm_count')\n subparsers.add_parser('lifetime')\n\n vm_load_corr = subparsers.add_parser('vm_load_corr')\n vm_load_corr.add_argument(\"-r\", '--ref-vm', help=\"Reference vm id\")\n vm_load_corr.add_argument('--start-time', help=\"Corr start time\")\n vm_load_corr.add_argument('--stop-time', help=\"Corr stop time\")\n\n node_vm_load_corr = subparsers.add_parser('node_vm_load_corr')\n node_vm_load_corr.add_argument(\"-r\", '--ref-vm', help=\"Reference vm id\")\n node_vm_load_corr.add_argument('--start-time', help=\"Corr start time\")\n node_vm_load_corr.add_argument('--stop-time', help=\"Corr stop time\")\n\n return parser.parse_args(argv)\n\n\n# Посчитать корреляции hw-нода <-> виртуалка, найти на какой ноде живут виртуалки, сравнить с реальными данными\n# Посчитать корреляции vm <-> vm, проверить как бы они агрегировались\n\n\ndef main(argv):\n opts = parse_args(argv[1:])\n cfg = Config()\n cfg.__dict__.update(yaml.load(open(os.path.expanduser(opts.config))))\n\n if opts.subparser_name == 'ls':\n client = connect_to_ts_database(cfg.ts_db)\n # list all vm in db\n\n vms = lookup_vms(client, cfg.instances[:32])\n vms_l = sorted(vms.values(), key=lambda x: x.lifetime)\n min_start_time = min(vm.start_time for vm in vms_l)\n max_stop_time = max(vm.stop_time for vm in vms_l)\n print(min_start_time, max_stop_time, max_stop_time - min_start_time)\n # vms_sl = vms_l[-10:]\n # fill_vms_io_stats(client, vms_sl)\n # most_loaded_vms(client)\n elif opts.subparser_name == 'sync2sqlite':\n db = connect_to_sqlite(cfg)\n db.prepare()\n copy_data_from_influx(db, cfg.ts_db, cfg.sync_metrics, cfg.instances[:32], True)\n\n elif opts.subparser_name == 'vm_count':\n with connect_to_sqlite(cfg) as db:\n vms = load_nodes(db, vm=True)\n\n vm_start_times = {vm.start_time for vm in vms}\n vm_stop_times = {vm.stop_time for vm in vms}\n all_change_times = vm_start_times.union(vm_stop_times)\n timeline = {} # type: Dict[float, int]\n curr = 0\n for tm in sorted(all_change_times):\n curr += 1 if tm in vm_start_times else -1\n timeline[tm] = curr\n\n x, y = zip(*sorted(timeline.items()))\n x = [(i - x[0]) / 3600 for i in x]\n ax = pyplot.figure().add_subplot(111)\n ax.plot(x, y)\n pyplot.show()\n\n elif opts.subparser_name == 'lifetime':\n with connect_to_sqlite(cfg) as db:\n vms = load_nodes(db, vm=True)\n ax = pyplot.figure().add_subplot(111)\n plot_vm_lifetime(vms, ax)\n pyplot.show()\n\n elif opts.subparser_name == 'vm_load_corr':\n reference_vm_id = opts.ref_vm\n start_time = int(opts.start_time)\n stop_time = int(opts.stop_time)\n\n with connect_to_sqlite(cfg) as db:\n vms = load_nodes(db, vm=True)\n vms = [vm for vm in vms if vm.stop_time >= stop_time and vm.start_time <= start_time]\n\n # metr = CPUMetrics()\n # metric_slice = get_metric_slice(metr.device, metr.metric, start_time, stop_time)\n metr = disk_metric('vda', 'virt_disk_octets_write')\n metric_slice = get_metric_slice(metr.device, metr.metric, start_time, stop_time)\n dist_func = distance.euclidean\n\n base_vms = [vm for vm in vms if vm.name.startswith(reference_vm_id)]\n assert base_vms, \"Can't find vm\"\n assert len(base_vms) == 1, \"Ambigious vm id\"\n\n base_vm = base_vms[0]\n vms_l = [base_vm] + [vm for vm in vms if vm is not base_vm]\n fig = pyplot.figure(figsize=(18, 12))\n fig.set_tight_layout(True)\n\n gs = gridspec.GridSpec(1, 2)\n ax = pyplot.subplot(gs[0, 0])\n\n plot_metrics(vms_l, metric_slice, ax)\n ax.set_title(\"Serie values\", fontsize='xx-large')\n\n ax = pyplot.subplot(gs[0, 1])\n vm_metric_dist_f = metric_distance(metric_slice, dist_func)\n vm_metric_dist = get_cross_vm_dist(vms_l, vm_metric_dist_f, sort=True)\n plot_cross_vm_dist(vm_metric_dist, [vm.name_short for vm in vms_l], ax)\n ax.set_title(\"Serie metrics\", fontsize='xx-large')\n ax.title.set_position([.5, 1.1])\n\n pyplot.show()\n\n elif opts.subparser_name == 'node_vm_load_corr':\n reference_vm_id = opts.ref_vm\n start_time = int(opts.start_time)\n stop_time = int(opts.stop_time)\n\n with connect_to_sqlite(cfg) as db:\n hw_nodes = load_nodes(db, vm=False)\n vms = [vm for vm in load_nodes(db, vm=True)\n if vm.stop_time >= stop_time and vm.start_time <= start_time]\n\n base_vms = [vm for vm in vms if vm.name.startswith(reference_vm_id)]\n assert base_vms, \"Can't find vm\"\n assert len(base_vms) == 1, \"Ambigious vm id\"\n base_vm = base_vms[0]\n vms = [base_vm] + [vm for vm in vms if vm is not base_vm]\n vms = vms[:5]\n\n fig = pyplot.figure(figsize=(18, 12))\n fig.set_tight_layout(True)\n ax = fig.add_subplot(111)\n\n dist_func = distance.correlation\n # dist_func = distance.cosine\n # dist_func = distance.euclidean\n\n vm_metr = disk_metric('vda', 'virt_disk_octets_write')\n vm_metric_slice = get_metric_slice(vm_metr.device, vm_metr.metric, start_time, stop_time)\n hw_metr = disk_metric('sdb', 'disk_octets_write')\n hw_metric_slice = get_metric_slice(hw_metr.device, hw_metr.metric, start_time, stop_time)\n\n hw_nodes.sort(key=lambda x: x.name)\n hw_vecs = list(map(hw_metric_slice, hw_nodes))\n vm_vecs = list(map(vm_metric_slice, vms))\n\n min_sz = min([len(i) for i in hw_vecs] + [len(i) for i in vm_vecs])\n hw_vecs = [vec[:min_sz] for vec in hw_vecs]\n hw_vecs, hw_nodes_s = zip(*[(vec, node) for vec, node in zip(hw_vecs, hw_nodes) if vec.sum() > 1.0])\n\n vm_vecs = [vec[:min_sz] for vec in vm_vecs]\n\n # hw_vecs = [vec - vec.mean() for vec in hw_vecs]\n # vm_vecs = [vec - vec.mean() for vec in vm_vecs]\n\n # import IPython\n # IPython.embed()\n\n # vm_metric_dist = get_cross_dist(vm_vecs, hw_vecs, dist_func, sort=True)\n # plot_cross_dist(vm_metric_dist, [node.name_short for node in hw_nodes_s], [vm.name_short for vm in vms], ax)\n\n for vec, node in zip(hw_vecs, hw_nodes_s):\n vec = moving_average(vec, 90)\n ax.plot(vec, color='blue', label=node.name_short)\n\n for vec, vm in zip(vm_vecs, vms):\n vec = moving_average(vec, 90)\n ax.plot(vec, color='red', label=vm.name_short)\n\n ax.legend()\n ax.set_title(\"Serie metrics\", fontsize='xx-large')\n ax.title.set_position([.5, 1.1])\n\n pyplot.show()\n else:\n # histo_func = metr.get_histo()\n # vms_l.remove(base_vm)\n # vms_l, dists = sort_by_hdistance(base_vm, vms_l, histo_func, dist_func)\n # vms_l.insert(0, base_vm)\n # dists.insert(0, 0)\n\n # fig = pyplot.figure(figsize=(36, 24))\n # reference_vm_id = '5b0f6842-a501-4c01-b78f-130530c81fa0'\n # reference_vm_id = 'e71bf537-1741-4466-acbd-6231fd083b9e'\n # reference_vm_id = 'f802ef20-f943-4d7b-890f-919b4d3edea2'\n # dist_func = distance.euclidean\n\n # client = InfluxDBClient('localhost', 8086, 'lma', 'lma', 'lma')\n # list_all_vms(client)\n # sync_date_from_influx()\n\n min_lifespan = [1.499 * 10**9, 1.49975 * 10 ** 9]\n\n # metr = CPUMetrics()\n # metr = IOMetricsWriteWszVda()\n metr = IOMetricsWriteVda()\n histo_func = metr.get_histo()\n # histo_func = metr2.get_histo()\n\n with connect_to_sqlite(cfg) as db:\n # with LocalDB(\"/home/koder/workspace/mira-ml/vms.db\") as db:\n vms = load_vms(db)\n\n vms = {vm_id: vm for vm_id, vm in vms.items()\n if vm.stop_time >= min_lifespan[1] and vm.start_time <= min_lifespan[0]}\n\n for vm in vms.values():\n add_iop_size(vm, metr.device)\n\n start_time = max(vm.start_time for vm in vms.values())\n stop_time = min(vm.stop_time for vm in vms.values())\n\n metric_slice = get_metric_slice(metr.device, metr.metric, start_time, stop_time)\n vm_metric_dist_f = metric_distance(metric_slice, dist_func)\n\n # metr_f2 = get_metric_slice(CPUMetrics.device, CPUMetrics.metric, start_time, stop_time)\n # vm_metric_dist_f_cpu = metric_distance(metr_f2, dist_func)\n\n base_vm = vms[reference_vm_id]\n vms_l = list(vms.values())\n vms_l.remove(base_vm)\n vms_l, dists = sort_by_hdistance(base_vm, vms_l, histo_func, dist_func)\n vms_l.insert(0, base_vm)\n dists.insert(0, 0)\n\n # fig = pyplot.figure(figsize=(36, 24))\n fig = pyplot.figure(figsize=(18, 12))\n fig.set_tight_layout(True)\n\n gs = gridspec.GridSpec(1, 2)\n ax = pyplot.subplot(gs[0, 0])\n\n plot_metrics(vms_l, metric_slice, ax)\n ax.set_title(\"Serie values\", fontsize='xx-large')\n\n ax = pyplot.subplot(gs[0, 1])\n vm_metric_dist = get_cross_vm_dist(vms_l, vm_metric_dist_f)\n plot_cross_vm_dist(vm_metric_dist, [vm.vm_id_short for vm in vms_l], ax)\n ax.set_title(\"Serie metrics\", fontsize='xx-large')\n ax.title.set_position([.5, 1.1])\n\n # histo_gs = gridspec.GridSpecFromSubplotSpec(4, 4, subplot_spec=gs[0, 1])\n # ax_func = lambda idx: pyplot.subplot(histo_gs[idx // 4, idx % 4])\n # plot_histos(vms_l, map(histo_func1d, vms_l), dists, ax_func)\n\n # dists1d = pair_distances(vms_l, histo_func1d, dist_func)\n # dists2d = pair_distances(vms_l, histo_func2d, dist_func)\n # plot_cross_vm_dist(dists1d, [vm.vm_id_short for vm in vms_l], ax)\n # ax.set_title(\"Histo metrics\")\n\n # base_vm = vms_l.pop(0)\n # vms_l, dists = sort_by_hdistance(base_vm, vms_l, histo_func2d, dist_func)\n # vms_l.insert(0, base_vm)\n # dists.insert(0, 0)\n\n # ax_func = lambda idx: pyplot.subplot(gs[5 + idx // 4, 4 + idx % 4])\n # plot_io(vms_l, dists, ax_func, metr2.device)\n\n pyplot.show()\n return 0\n\n\nif __name__ == \"__main__\":\n exit(main(sys.argv))\n","sub_path":"mira_ml/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":12934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"496980362","text":"''' Macro module for store related macros '''\nfrom client.store import catalog\n\nfrom logger import Logger\n\n\ndef buy(connection, item_id, val):\n ''' Buys a specific item from the store '''\n data = {\n \"items\": [\n {\n \"itemKey\": {\n \"inventoryType\": \"CHAMPION\",\n \"itemId\": item_id\n },\n \"purchaseCurrencyInfo\": {\n \"currencyType\": \"IP\",\n \"price\": val,\n \"purchasable\": True,\n },\n \"quantity\": 1\n }\n ]\n }\n res = connection.post('/lol-purchase-widget/v1/purchaseItems', json=data)\n res_json = res.json()\n if res.status_code == 200:\n return \"success\"\n return res_json[\"errorDetails\"].popitem()[0] if 'errorDetails' in res_json else 'error'\n\n\ndef buy_champ_by_be(logger: Logger, connection, blue_essence):\n ''' Buys all the champions of specific blue essence value '''\n logger.log(f\"Getting champions at costs {blue_essence} BE\")\n res_json = catalog(connection, \"CHAMPION\")\n filtered = list(filter(lambda m: m[\"prices\"][0][\"cost\"] == blue_essence, res_json))\n for champ in filtered:\n name = champ[\"localizations\"][\"en_GB\"][\"name\"]\n logger.log(f'Buying {name}...')\n result = buy(connection, champ[\"itemId\"], champ[\"prices\"][0][\"cost\"])\n if result == \"error\":\n logger.log(\"Error buying champion.\")\n continue\n if result == \"validation.item.owned\":\n logger.log(\"Champion already owned\")\n continue\n if result == \"validation.item.not.enough.currency\":\n logger.log(\"Not enough BE to buy champion\")\n break\n","sub_path":"macro/store.py","file_name":"store.py","file_ext":"py","file_size_in_byte":1742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"623138374","text":"import os\r\nimport treetaggerwrapper\r\nfrom nltk.tokenize import word_tokenize\r\nfrom string import punctuation\r\nfrom config.config import TAG_PAR_FILE, TAGDIR\r\n\r\n# On veut garder certains caractères de ponctuation\r\npunctuation = [p for p in punctuation if p not in \"!?$#%&+-\"]\r\n\r\n\r\ntagger = treetaggerwrapper.TreeTagger(TAGLANG='fr', TAGPARFILE=TAG_PAR_FILE, TAGDIR=TAGDIR)\r\nexclude_type = [\r\n \"ADV\", \"DET:ART\", \"DET:POS\", \"KON\", \"NUM\", \"PRO\", \"PRO:DEM\",\r\n \"SENT\", \"PRO:IND\", \"PRO:PER\", \"PRO:POS\", \"PRO:REL\", \"PRP\",\r\n \"PRP:det\", \"PUN\", \"PUN:cit\", \"SYM\"]\r\n\r\n\r\ndef processing(text, return_token=False):\r\n text = text.replace('#', '# ')\r\n text_tagged = tagger.tag_text(text)\r\n text_tagged = treetaggerwrapper.make_tags(\r\n text_tagged, exclude_nottags=True)\r\n list_lemm = [\r\n tag.lemma for tag in text_tagged if tag.pos not in exclude_type]\r\n # liste = reduc(liste)\r\n new_text = ' '.join(list_lemm)\r\n # new_text = new_text.replace(u\"url-remplacée\",u\"\")\r\n # new_text = new_text.replace(u\"email-remplacé\",u\"\")\r\n # new_text = new_text.replace(u\"dns-remplacé\",u\"\")\r\n for p in punctuation:\r\n new_text = new_text.replace(p, \"\")\r\n new_text = new_text.replace(\" \", \" \").replace(\" \", \" \").lower()\r\n if new_text == \"\" or new_text is None:\r\n new_text = \"empty-tweet\"\r\n\r\n if return_token:\r\n new_text = word_tokenize(new_text)\r\n\r\n return new_text\r\n\r\n\r\ndef format_sentence(sent):\r\n \"\"\"Format data for classification\"\"\"\r\n sent = processing(sent)\r\n return {word: True for word in word_tokenize(sent)}\r\n\r\n\r\nif __name__ == '__main__':\r\n pass\r\n # processing(\"\"\" @soissonschris Bugatti Chiron #Mondial2018 https://t.co/b0MATr1iJC \"\"\")\r\n # processing(\"\"\" \"Découvrez des assurances qui s'adaptent à vos nouvelles attentes...\r\n # https://t.co/O7PlaPzPvP https://t.co/T0U6Y7MLfV\" \"\"\")\r\n # processing(\"\"\" \"Ce qui est atypique chez nous, c'est que l'on a mis les paysans et les commerçants autour de la même table\" 🎙 Clau… https://t.co/rKBp9SzQBM \"\"\")\r\n # processing(\"\"\" @sophie_merle @m6info La #spiruline est aussi une arme contre la #malnutrition ! Excellent complément alimentaire,… https://t.co/kOQxSh8vBT \"\"\")\r\n # processing(\"\"\" L’#éducation participe à l’amélioration du processus démocratique et à l’exercice de ses droits civiques. Nous mili… https://t.co/8E6YHIg01B \"\"\")\r\n # processing(\"\"\" Courir dimanche ? Oui au #Trail du four à chaux à #Nandy ! https://t.co/WFOlDph4R3 #scleroseenplaques @LaRep77… https://t.co/8I5yqDk3Tj \"\"\")\r\n # processing(\"\"\" [Chiffres-Clé du jour] 2 270 000 m3 de capacité de stockage d'eau recyclée, 12,5 km de canalisations et 4 000 hecta… https://t.co/M2zHNVwpNs \"\"\")\r\n # processing(\"\"\" \"ACC\r\n # @assocoeurcouleur\r\n # #lavoixdesrares\r\n\r\n # Plateforme d'expertise Maladies Rares Paris-Sud\r\n # @RaresParisSud\r\n\r\n # #CaféMR… https://t.co/3rEXa1KoRR\" \"\"\")\r\n # processing(\"\"\" Tarbes : la navette gratuite du centre-ville se renverse, 3 blessés légers https://t.co/XFGVq17XIE \"\"\")\r\n # processing(\"\"\" Le Germoir🌱 weekend de #coconstruction pour les #bénévoles dirigeants + salarié.e.s @e_graine venu.e.s de toute la… https://t.co/zf6MJzzt3t \"\"\")\r\n # processing(\"\"\" Les #femmes contribuent de manière significative à la mise en œuvre de l’« Agenda 2030 » et à l’atteinte de ses 17… https://t.co/ed8mDxoeFS \"\"\")\r\n","sub_path":"utils/text_processing.py","file_name":"text_processing.py","file_ext":"py","file_size_in_byte":3425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"104035590","text":"import intra.session\nimport requests\n\ndef load(url):\n\theaders = {\n\t\t'User-Agent': 'Mozilla/5.0 Python-urllib3/1.19 (Linux armv7; Raspbian/8.0, like Debian) (compatible; IntraBot/1.0; +http://roslyn.epi.codes/intra-bot.html)'\n\t}\n\tr = requests.get(url, headers=headers, cookies=intra.session.jar, allow_redirects=False, timeout=30)\n\tintra.session.jar.update(r.cookies)\n\treturn r","sub_path":"calculator/intra/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"61106886","text":"import copy\nimport math\nimport numpy as np\nimport scipy\nfrom scipy import interpolate\nimport scipy.sparse as sp\nimport functions\n\nclass agent:\n def __init__(self,par, z0):\n self.z = z0\n self.zDot = 0\n self.zDotDot = 0\n self.dV = 0\n self.maxStepsize = par.maxStepsize\n self.derivZ = 0\n\n def stateDynamics(self, z, zDot, dV, u):\n dVNew = dV + u\n zDotDotNew = - (1.3*zDot*abs(zDot) + 1000*9.81*dVNew)/(1.1*0.47)\n zDotNew = zDot + zDotDotNew\n zNew = z - zDotNew\n return np.array([zNew, zDotNew, zDotDotNew, dVNew])\n\n\n def trajectoryFromControl(self, u):\n stateTraj = np.zeros((len(u),4))\n stateTraj[0,0] = self.z\n stateTraj[0,1] = self.zDot\n stateTraj[0,2] = self.zDotDot\n stateTraj[0,3] = self.dV\n for i in range(len(u) - 1):\n stateTraj[i + 1,:] = self.stateDynamics(stateTraj[i,0],stateTraj[i,1],stateTraj[i,3], u[i]).reshape(4)\n return stateTraj\n\nclass GP:\n def __init__(self, kernelPar):\n self.kernelPar = kernelPar\n self.emptyData = True\n self.trainInput = None\n self.trainOutput = None\n\n def kernel(self, z1, z2):\n squaredDistance = np.linalg.norm(z1 - z2,2)**2\n return np.exp(-.5 * 1 / self.kernelPar * squaredDistance)\n\n def getKernelMatrix(self, vec1, vec2):\n n = vec1.shape[0]\n N = vec2.shape[0]\n K = np.zeros((n, N))\n for i in range(n):\n for j in range(N):\n K[i, j] = self.kernel(vec1[i,:], vec2[j,:])\n return K\n\n def update(self, inputData, outputData):\n if self.emptyData:\n self.trainInput = inputData*np.eye(1)\n self.trainOutput = outputData*np.eye(1)\n self.emptyData = False\n else:\n self.trainInput = np.vstack((self.trainInput, inputData))\n self.trainOutput = np.vstack((self.trainOutput, outputData))\n\n def predict(self, inputData):\n # according to https://www.cs.ubc.ca/~nando/540-2013/lectures/l6.pdf\n K = self.getKernelMatrix(self.trainInput, self.trainInput)\n L = np.linalg.cholesky(K+0.00001*np.eye(len(K)))\n\n # Compute mean\n Lk = np.linalg.solve(L, self.getKernelMatrix(self.trainInput, inputData))\n mu = np.dot(Lk.T, np.linalg.solve(L, self.trainOutput))\n\n # Compute variance\n KStar = self.getKernelMatrix(inputData, inputData)\n var = KStar - np.dot(Lk.T, Lk)\n\n return mu, var\n\nclass trueField:\n def __init__(self, par, fieldType):\n self.fieldType = fieldType\n self.par = par\n\n \"\"\"1D Sine\"\"\"\n self.SineAmp = 1\n\n \"\"\"Gaussian\"\"\"\n nG = 10\n kernelPar = 0.001\n self.zGauss = np.linspace(self.par.zMin-0.2, self.par.zMax+0.2, nG)\n GP1 = GP(kernelPar)\n kernelMat = GP1.getKernelMatrix(self.zGauss.reshape(nG,1),self.zGauss.reshape(nG,1))\n cholKernelMat = scipy.linalg.cholesky(kernelMat)\n valGauss = 5*np.ones(nG) + np.dot(cholKernelMat,np.random.randn(nG))\n self.fGauss = interpolate.interp1d(self.zGauss,valGauss,'cubic')\n\n \"\"\"Random\"\"\"\n nR = 50\n maxRandomValue = 10\n zRand = np.linspace(self.par.zMin-0.2, self.par.zMax+0.2, nR)\n valRand = np.random.rand(nR)*maxRandomValue\n self.fRand = interpolate.interp1d(zRand,valRand,'cubic')\n\n \"\"\"Predefined\"\"\"\n self.zDef = np.array([0,-0.1,-0.3,-0.5,-0.7,-0.9,-1])\n self.fDef = np.array([3,8,8,2,7,6,4])\n self.fDefInterp = interpolate.interp1d(self.zDef, self.fDef, 'cubic')\n\n minMaxtestIn = np.linspace(par.zMin,par.zMax,1000)\n minMaxTestOut = self.fDefInterp(minMaxtestIn)\n\n if fieldType == 'sine':\n self.fieldLimit = [-self.SineAmp,self.SineAmp]\n elif fieldType == 'random':\n self.fieldLimit = [0,maxRandomValue]\n elif fieldType == 'predefined':\n self.fieldLimit = [np.min(minMaxTestOut)-0.2, np.max(minMaxTestOut)+0.2]\n elif fieldType == 'gauss':\n self.fieldLimit = [np.min(valGauss)-1, np.max(valGauss)+1]\n elif fieldType == 'peak':\n self.fieldLimit = [0,11]\n\n def getField(self,z):\n if self.fieldType == 'sine':\n f = self.SineAmp * np.sin(z)\n elif self.fieldType == 'random':\n f = self.fRand(z)\n elif self.fieldType == 'predefined':\n f = self.fDefInterp(z)\n elif self.fieldType == 'gauss':\n f = self.fGauss(z)\n elif self.fieldType == 'peak':\n if isinstance(z,float):\n return 8*math.exp(-((z+0.5)/1e-2) **2)*np.eye(1)+2\n else:\n f = np.zeros(z.shape)\n for i in range(len(z)):\n f[i] = 8*math.exp(-((z[i]+0.5)/1e-2) **2)+2\n return f\n\n def updateField(self, par, t):\n if t < par.pulseTime:\n self.SineAmp = np.cos(10*math.pi * t / par.pulseTime)\n\n fDefNew = copy.deepcopy(self.fDef)\n for i in range(len(self.zDef)):\n if self.par.varTimeKernelLoc[0] <= self.zDef[i] <= self.par.varTimeKernelLoc[1]:\n fDefNew[i] = self.fDef[i] * (1 + 0.3 * np.sin(10*math.pi * t / par.pulseTime))\n self.fDefInterp = interpolate.interp1d(self.zDef, fDefNew, 'cubic')\n\nclass gmrf:\n def __init__(self,par,nGridZ,nEdge):\n \"GMRF properties\"\n self.par = par\n\n self.zMin = par.zMin\n self.zMax = par.zMax\n\n self.nGridZ = nGridZ\n self.nEdge = nEdge\n\n self.valueT = par.valueT # Precision value for beta regression\n self.dt = par.dt\n\n \"Distance between two vertices in x and y without edges\"\n self.dz = round((self.zMax - self.zMin) / (self.nGridZ - 1),5)\n\n self.nZ = self.nGridZ + 2 * self.nEdge # Total number of vertices in y with edges\n self.nP = self.nZ\n\n self.zMinEdge = self.zMin - self.nEdge * self.dz\n self.zMaxEdge = self.zMax + self.nEdge * self.dz\n\n self.z = np.linspace(self.zMinEdge, self.zMaxEdge, self.nZ) # Vector of z grid values\n\n \"Precision matrix for z values (without regression variable beta)\"\n self.Lambda = functions.getPrecisionMatrix(self)\n\n \"Mean augmented bayesian regression\"\n self.nBeta = par.nBeta\n\n F = np.ones((self.nP, self.nBeta))\n FSparse = sp.csr_matrix(F)\n FTSparse = sp.csr_matrix(F.T)\n T = self.valueT * np.eye(self.nBeta)\n Tinv = np.linalg.inv(T)\n TSparse = sp.csr_matrix(T)\n TinvSparse = sp.csr_matrix(Tinv)\n\n \"Augmented prior precision matrix\"\n precPriorUpperRight = self.Lambda.dot(-1 * FSparse)\n precPriorLowerLeft = -1 * FTSparse.dot(self.Lambda)\n precPriorLowerRight = sp.csr_matrix.dot(FTSparse, self.Lambda.dot(FSparse)) + TSparse\n precH1 = sp.hstack([self.Lambda, precPriorUpperRight])\n precH2 = sp.hstack([precPriorLowerLeft, precPriorLowerRight])\n self.precCondSparse = sp.vstack([precH1, precH2]).tocsr()\n\n \"Augmented prior covariance matrix\"\n covPriorUpperLeft = sp.linalg.inv(self.Lambda.tocsc()) + sp.csr_matrix.dot(FSparse,TinvSparse.dot(FTSparse))\n covPriorUpperRight = FSparse.dot(Tinv)\n covPriorLowerLeft = covPriorUpperRight.T\n covPriorLowerRight = TinvSparse\n covH1 = sp.hstack([covPriorUpperLeft, covPriorUpperRight])\n covH2 = sp.hstack([covPriorLowerLeft, covPriorLowerRight])\n self.covCond = np.array(sp.vstack([covH1, covH2]).todense())\n\n self.diagCovCond = self.covCond.diagonal().reshape(self.nP + self.nBeta, 1)\n\n \"Prior and conditioned mean\"\n self.meanPrior = np.zeros((self.nP + self.nBeta, 1))\n self.meanCond = np.zeros((self.nP + self.nBeta, 1))\n\n self.covLevels = np.linspace(-0.2, min(np.amax(self.diagCovCond) + 0.1, 5), 20) # TODO Adapt\n\n \"Sequential bayesian regression\"\n self.bSeq = np.zeros((self.nP + self.nBeta, 1))\n\n def bayesianUpdate(self, fMeas, Phi):\n \"\"\"Update conditioned precision matrix\"\"\"\n R = np.dot(Phi, np.dot(self.covPrior, Phi.T)) + self.par.ov2 * np.eye(\n len(fMeas)) # covariance matrix of measurements\n temp1 = np.dot(Phi, self.covPrior)\n temp2 = np.dot(np.linalg.inv(R), temp1)\n temp3 = np.dot(Phi.T, temp2)\n\n self.covCond = self.covPrior - np.dot(self.covPrior, temp3)\n # self.covCond = np.linalg.inv((np.linalg.inv(self.covPrior)+1/self.par.ov2*np.dot(Phi.T,Phi))) # alternative way\n self.diagCovCond = self.covCond.diagonal().reshape(self.nP + self.nBeta, 1)\n\n \"Update mean\"\n if self.par.belief == 'regBayesTrunc':\n self.meanCond = self.meanPrior + 1 / self.par.ov2 * np.dot(self.covCond,\n np.dot(Phi.T, fMeas - np.dot(Phi, self.meanPrior)))\n else:\n self.meanCond = np.dot(self.covPrior, np.dot(Phi.T, np.dot(np.linalg.inv(R), fMeas)))\n\n # Also update bSeq and precCond in case seq. belief update is used for planning\n PhiT = Phi.T\n PhiTSparse = sp.csr_matrix(PhiT)\n self.bSeq = self.bSeq + 1 / self.par.ov2 * np.dot(PhiT,fMeas) # sequential update canonical mean\n self.precCondSparse = self.precCondSparse + 1 / self.par.ov2 * PhiTSparse.dot(PhiTSparse.T)\n\n def seqBayesianUpdate(self, fMeas, Phi):\n PhiT = Phi.T\n PhiTSparse = sp.csr_matrix(PhiT)\n\n hSeq = sp.linalg.spsolve(self.precCondSparse, PhiTSparse).T\n self.bSeq = self.bSeq + fMeas/self.par.ov2 * PhiT # sequential update canonical mean\n self.precCondSparse = self.precCondSparse + 1 / self.par.ov2 * PhiTSparse.dot(PhiTSparse.T) # sequential update of precision matrix\n self.meanCond = sp.linalg.spsolve(self.precCondSparse, self.bSeq).reshape(self.nP+self.nBeta,1)\n self.diagCovCond = np.subtract(self.diagCovCond,np.multiply(hSeq,hSeq).reshape(self.nP+self.nBeta,1) / (self.par.ov2 + np.dot(Phi, hSeq)[0]))\n \"\"\" Works too:\n self.covCond = np.linalg.inv(self.precCond)\n self.diagCovCond = self.covCond.diagonal().reshape(self.nP + self.nBeta, 1)\n \"\"\"\n\nclass node:\n def __init__(self, par, gmrf, auv):\n self.par = par\n self.gmrf = gmrf\n self.auv = copy.deepcopy(auv)\n self.rewardToNode = 0\n self.accReward = 0\n self.actionToNode = []\n self.depth = 0\n self.parent = []\n self.children = []\n self.visits = 1\n self.D = []\n self.GP = GP(par.BOKernelPar)\n\n def getFeasibleInput(self):\n thetaUB = (self.par.maxStepsize - self.auv.derivZ) / (2 * self.par.nTrajPoints)\n thetaLB = (-self.par.maxStepsize - self.auv.derivZ) / (2 * self.par.nTrajPoints)\n return thetaLB,thetaUB\n\n def action2theta(self,action):\n thetaLB, thetaUB = self.getFeasibleInput()\n return (action * self.par.maxStepsize/(2*self.par.nTrajPoints) + (thetaLB+thetaUB)/2)\n\n def theta2action(self,theta):\n thetaLB, thetaUB = self.getFeasibleInput()\n return ((theta-(thetaLB+thetaUB)/2) * 2*self.par.nTrajPoints/self.par.maxStepsize)\n","sub_path":"NanoPi_Neo_Air/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":11181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"389170155","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 4 14:37:45 2020\n\neyeball_from_file.py\n--------------------\nThis script allows you to replot any of the eyeballing plots from scratch using \nsaved data for original, deterended, lowess-fitted lcs and periododram period/powers\n\n\n@author: mbattley\n\"\"\"\n\nfrom lc_download_methods_new import lc_from_csv\n\nsave_path = ''\ntic = ''\n\n# Open detrended data for lightcurve\ndetrended_lc = lc_from_csv(save_path + 'Detrended_lcs/{}_detrended_lc.csv'.format(tic))\ntime = detrended_lc.time\ndetrended_flux = detrended_lc.flux","sub_path":"eyeball_from_file.py","file_name":"eyeball_from_file.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"325975650","text":"from django.shortcuts import render\nfrom django.contrib.auth.forms import AuthenticationForm\n\ndef check_log_in(request):\n username = None\n if request.user.is_authenticated():\n user = request.user\n if user.is_staff == False:\n message = \"You are not an authorized staff member.\"\n form = AuthenticationForm()\n context = {'form': form,\n 'message': message}\n return render(request, 'obits/login.html', context)\n else:\n message = \"Please log-in to add a new record.\"\n form = AuthenticationForm()\n context = {'form': form,\n 'message': message}\n return render(request, 'obits/login.html', context)\n","sub_path":"obits/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"458878053","text":"\"\"\"\nThis script would convert a pre-trained TF model to a servable version for TF Serving.\n\nCommand: python3 models/objectdetection/mobilenet/export.py \\\n --export_dir \\\n --version \\\n --frozen_graph \n\nReturns:\n * A TF Servable model\n\n* Run the command from root dir after setting the PYTHONPATH to the root dir\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport tensorflow as tf\nimport argparse\nfrom logger import logger\n\n# Load frozen graph utils\nfrom tensorflow.python.util import compat\nfrom tensorflow.python.platform import gfile\n\n# TF Libraries to export model into .pb file\nfrom tensorflow.python.client import session\nfrom tensorflow.python.saved_model import signature_constants\nfrom tensorflow.core.protobuf import rewriter_config_pb2\nfrom tensorflow.tools.graph_transforms import TransformGraph\n\n\ndef load_graph_from_pb(model_filename):\n logger.info(\"Loading the model.\")\n with tf.Session() as sess:\n try:\n with gfile.FastGFile(model_filename, 'rb') as f:\n data = compat.as_bytes(f.read())\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(data)\n return graph_def\n except FileNotFoundError as e:\n logger.error(e)\n logger.error('Loading model {} failed.'.format(model_filename))\n\n\nif __name__ == \"__main__\":\n try:\n # Parse args\n PARSER = argparse.ArgumentParser(\n description='Exporting the object detection model')\n\n PARSER.add_argument('--version', default='1', type=str)\n PARSER.add_argument('--frozen_graph', default='weights/ssdlite_mobilenet_v2_coco_2018_05_09/frozen_inference_graph.pb', type=str)\n PARSER.add_argument('--export_root', default='exports/objectdetection/mobilenet/', type=str)\n ARGS = PARSER.parse_args()\n # Define Consts\n VERSION = ARGS.version\n FROZEN_GRAPH = ARGS.frozen_graph\n EXPORT_PATH_BASE = ARGS.export_root\n EXPORT_PATH = os.path.join(\n tf.compat.as_bytes(EXPORT_PATH_BASE),\n tf.compat.as_bytes(VERSION))\n\n logger.info(\"Exporting model {} to {} version {}\".format(FROZEN_GRAPH, EXPORT_PATH, VERSION))\n\n input_names = 'image_tensor'\n output_names = ['detection_boxes', 'detection_classes', 'detection_scores', 'num_detections']\n\n with tf.Session() as sess:\n input_tensor = tf.placeholder(dtype=tf.uint8, shape=(None, None, None, 3), name=input_names)\n\n graph_def = load_graph_from_pb(FROZEN_GRAPH)\n outputs = tf.import_graph_def(graph_def,\n input_map={'image_tensor': input_tensor},\n return_elements=output_names,\n name='')\n outputs = [sess.graph.get_tensor_by_name(ops.name + ':0')for ops in outputs]\n outputs = dict(zip(output_names, outputs))\n\n transforms = [\"add_default_attributes\",\n \"quantize_weights\", \"round_weights\",\n \"fold_batch_norms\", \"fold_old_batch_norms\"]\n logger.info('Quantizing model')\n quantized_graph = TransformGraph(input_graph_def=graph_def,\n inputs=input_names,\n outputs=output_names,\n transforms=transforms)\n\n with tf.Graph().as_default():\n tf.import_graph_def(quantized_graph, name='')\n\n # Optimizing graph ## PRESENT: optimize_tensor_layout=True\n rewrite_options = rewriter_config_pb2.RewriterConfig()\n rewrite_options.optimizers.append('pruning')\n rewrite_options.optimizers.append('constfold')\n rewrite_options.optimizers.append('layout')\n graph_options = tf.GraphOptions(rewrite_options=rewrite_options, infer_shapes=True)\n\n # Build model for TF Serving\n config = tf.ConfigProto(graph_options=graph_options)\n\n with session.Session(config=config) as sess:\n\n logger.info('Exporting trained model to {}'.format(EXPORT_PATH))\n\n builder = tf.saved_model.builder.SavedModelBuilder(EXPORT_PATH)\n tensor_info_inputs = {'inputs': tf.saved_model.utils.build_tensor_info(input_tensor)}\n tensor_info_outputs = {}\n for k, v in outputs.items():\n tensor_info_outputs[k] = tf.saved_model.utils.build_tensor_info(v)\n\n detection_signature = (\n tf.saved_model.signature_def_utils.build_signature_def(\n inputs=tensor_info_inputs,\n outputs=tensor_info_outputs,\n method_name= signature_constants.PREDICT_METHOD_NAME))\n\n builder.add_meta_graph_and_variables(\n sess, [tf.saved_model.tag_constants.SERVING],\n signature_def_map={'predict_images': detection_signature,\n signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: detection_signature,\n },\n )\n builder.save()\n logger.info(\"Exporting Succeded. (saved at {}/saved_model.pb)\".format(EXPORT_PATH))\n except Exception as e:\n logger.error(e)\n logger.error('Exporting model failed.')\n","sub_path":"Disha-Pattani/models/objectdetection/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":5548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"171441429","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python3.6/dist-packages/calcutils/__init__.py\n# Compiled at: 2019-11-02 01:58:26\n# Size of source mod 2**32: 114 bytes\nfrom calcutils.calcutils import evalall\nall_values = ''\nfor exp in evalall():\n all_values += '%s;\\n' % exp","sub_path":"pycfiles/calcutils-pkg-0.0.1.linux-x86_64.tar/__init__.cpython-36.py","file_name":"__init__.cpython-36.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"557379464","text":"__author__ = \"Dohoon Lee\"\r\n__copyright__ = \"Copyright 2019, Dohoon Lee\"\r\n__email__ = \"dohlee.bioinfo@gmail.com\"\r\n__license__ = \"MIT\"\r\n\r\nimport itertools\r\n\r\nfrom os import path, listdir\r\nfrom snakemake.shell import shell\r\n\r\n# Define utility function.\r\ndef get_common_prefixes(strings):\r\n all_same = lambda x: all(x[0] == y for y in x)\r\n\r\n prefix_tuples = itertools.takewhile(all_same, zip(*strings))\r\n return ''.join(x[0] for x in prefix_tuples).strip('.')\r\n\r\ndef is_defined_by_user(*params):\r\n extra = snakemake.params.get('extra', '')\r\n for param in params:\r\n if param in extra:\r\n return True\r\n return False\r\n\r\n# Extract log.\r\nlog = snakemake.log_fmt_shell(stdout=False, stderr=True)\r\n\r\n# Extract parameters.\r\nextra = snakemake.params.get('extra', '')\r\n\r\n# Extract required inputs.\r\nreads = snakemake.input.reads\r\nif len(reads) == 2:\r\n read_command = '-1 %s -2 %s' % (reads[0], reads[1])\r\nelse:\r\n read_command = '%s' % (reads[0])\r\n\r\nindex_dir = snakemake.input.index_dir\r\nprefix = get_common_prefixes(listdir(index_dir))\r\nindex_command = path.join(index_dir, prefix)\r\n\r\n# Extract required outputs.\r\noutput = snakemake.output[0]\r\nif output.endswith('.sorted.bam'):\r\n if 'mapq_cutoff' in snakemake.params:\r\n postprocess_command = '| samtools view -bS -q %d - | samtools sort > %s' % (snakemake.params.mapq_cutoff, output)\r\n else:\r\n postprocess_command = '| samtools view -bS - | samtools sort > %s' % (output)\r\nelse:\r\n if 'mapq_cutoff' in snakemake.params:\r\n postprocess_command = '| samtools view -bS -q %d - > %s' % (snakemake.params.mapq_cutoff, output)\r\n else:\r\n postprocess_command = '| samtools view -bS - > %s' % (output)\r\n\r\nsam_command = '--sam' if not is_defined_by_user('--sam', '-S') else ''\r\n\r\n# Execute shell command.\r\nshell(\r\n \"(\"\r\n \"bowtie \"\r\n \"{index_command} \"\r\n \"{read_command} \"\r\n \"{sam_command} \"\r\n \"{extra} \"\r\n \"--threads {snakemake.threads} \"\r\n \"{postprocess_command}) \"\r\n \"{log}\"\r\n)\r\n","sub_path":"bowtie/wrapper.py","file_name":"wrapper.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"548630927","text":"import string\nimport sys\nimport math\nimport os\n\nfrom PIL import Image, ImageDraw, ImageFont, ImageChops, ImageStat, ImageOps\n\nimport log\n\nargs = sys.argv[1:]\n# get the image\nif len(args) >= 1:\n\tbase = Image.open(args[-1]).convert('L')\nelse:\n\tsys.exit()\n\nbase_w, base_h = base.size\nfont_size = 16\ncell_width = 9.1\ncell_height = 19\n\n# base dimensions in pixels\nbase_w, base_h = base.size\n\n# base dimensions in cells\nwidth = math.floor(base_w / cell_width)\nheight = math.floor(base_h / cell_height)\n\n# target dimension in cells\ntarget_cw = width\n\n# invert image ?\ninverted = False\n\nfor a in args[:-1]:\n\t# print(a)\n\tif a == \"--invert\":\n\t\tinverted = True\n\telif a.startswith(\"--width=\"):\n\t\tsegs = a.split(\"=\", 1)\n\t\ttarget_cw = int(segs[1])\n\t# elif a.startswith(\"--height=\"):\n\t# \tsegs = a.split(\"=\", 1)\n\t# \ttarget_ch = int(segs[1])\n\nif inverted:\n\tbase = ImageOps.invert(base)\n\n# scale image\ntarget_pw = target_cw*cell_width\nwpercent = (target_pw/float(base.size[0]))\ntarget_ph = int((float(base.size[1])*float(wpercent)))\nbase = base.resize((int(target_pw), int(target_ph)), Image.ANTIALIAS)\n\n# base dimensions in cells\nwidth = math.floor(target_pw / cell_width)\nheight = math.floor(target_ph / cell_height)\n\n# get a font\nfnt = ImageFont.truetype('fira_code.ttf', font_size)\n\nlog.pushOrigin(\"Ascii Maker\")\n\ndictionary = \" 0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\\\"#$%&\\'()*+,-./:;?@[\\\\]^_`{|}~<=>\"\n\nimages = {}\nlog.printLogNormal(\"Rendering stamps\")\nfor character in dictionary:\n\tnew_image = Image.new('L', (int(cell_width), int(cell_height)), (255))\n\tnew_draw = ImageDraw.Draw(new_image)\n\tnew_draw.text((0,0), character, font=fnt, fill=(0))\n\timages[character] = new_image\n\n\ndef best_character_at(x, y):\n\tbest_score = sys.maxsize\n\tbest_char = \" \"\n\txx = x * cell_width\n\tyy = y * cell_height\n\treference = base.crop((xx, yy, xx+cell_width, yy+cell_height))\n\n\tfor c in dictionary:\n\t\tdifference = ImageChops.difference(reference, images[c])\n\t\tstat = ImageStat.Stat(difference)\n\t\tscore = stat.sum[0]\n\t\tif score < best_score:\n\t\t\tbest_score = score\n\t\t\tbest_char = c\n\t\t\tif score == 0:\n\t\t\t\treturn best_char\n\n\treturn best_char\n\n\nlog.printLogNormal(\"Rendering image\")\nfor y in range(height):\n\tfor x in range(width):\n\t\tsys.stdout.write(best_character_at(x, y))\n\tsys.stdout.write(\"\\n\")\n\nlog.popOrigin()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"241650098","text":"from flask import Flask, request, render_template\nfrom chatterbot import ChatBot\nfrom chatterbot.trainers import ChatterBotCorpusTrainer\nfrom chatterbot.trainers import ListTrainer\n\napp = Flask(__name__)\nbot = ChatBot(\n 'Hedwig',\n storage_adapter='chatterbot.storage.SQLStorageAdapter',\n logic_adapters=[\n 'chatterbot.logic.MathematicalEvaluation',\n 'chatterbot.logic.TimeLogicAdapter'\n ],\n database_uri='sqlite:///database.sqlite3'\n)\n\n@app.route(\"/\")\ndef introduce():\n # from data.about import bot\n return render_template(\"index.html\", data = \"Hi\")\n\n@app.route(\"/get\")\ndef get_bot_response():\n if request.method == \"POST\":\n userText = request.args.get('chat-input')\n result = bot.get_response(userText)\n return render_template(\"base.html\", data = result)\n else :\n return render_template(\"base.html\")","sub_path":"todo_app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"548558669","text":"from django import forms\n\nfrom .widgets import (\n MarkdownxWidget,\n AdminMarkdownxWidget,\n)\n\n\nclass MarkdownxFormField(forms.CharField):\n\n def __init__(self, *args, **kwargs):\n super(MarkdownxFormField, self).__init__(*args, **kwargs)\n\n if issubclass(self.widget.__class__, forms.widgets.MultiWidget):\n if not any([\n issubclass(x.__class__, MarkdownxWidget)\n for x in self.widget.widgets\n ]):\n self.widget = MarkdownxWidget()\n elif not issubclass(self.widget.__class__, MarkdownxWidget):\n self.widget = MarkdownxWidget()\n","sub_path":"markdownx/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"257049346","text":"import pytest\nfrom datacube import Datacube\n\nfrom datacube_ows.cube_pool import get_cube, pool_size, release_cube\n\n\ndef test_basic_cube_pool():\n dc_1 = get_cube(app=\"test\")\n dc_2 = get_cube(app=\"test\")\n assert dc_1 != dc_2\n release_cube(dc_1, app=\"test\")\n release_cube(dc_2, app=\"test\")\n assert pool_size(app=\"test\") >= 2\n\n\ndef test_release_nonalloc():\n dc_alloc = get_cube(app=\"test\")\n dc_unalloc = Datacube(app=\"test\")\n assert dc_alloc != dc_unalloc\n release_cube(dc_alloc, app=\"test\")\n with pytest.raises(Exception) as e:\n release_cube(dc_unalloc, app=\"test\")\n assert \"non-pool datacube\" in str(e.value)\n\n","sub_path":"integration_tests/test_cube_pool.py","file_name":"test_cube_pool.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"483765767","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\n\ndef load_dataset(train_size=0.8):\n \"\"\"\n Loads dataset, standardizes features and splits into training and validation sets,\n which contain tuples of (input, target)\n \"\"\"\n wine = pd.read_csv('dataset/winequality-red.csv')\n features = wine.drop('quality', axis=1)\n # in the dataset quality is in range of [3, 8]\n # so let's change it to [0, 5]\n targets = wine['quality'] - 3\n\n standardized_features = (features - features.mean()) / features.std()\n dataset = pd.concat([standardized_features, targets], axis=1)\n \n train, validation = train_test_split(dataset, test_size=1-train_size)\n\n # separating input and target for training set\n train_inp = train.drop('quality', axis=1)\n train_inp = np.expand_dims(train_inp, axis=2)\n train_targ = train['quality'].values\n # creates one-hot representation for each target\n encoded = np.zeros((len(train_targ), 6, 1))\n encoded[np.arange(len(train_targ)), train_targ] = 1\n # zips input and target\n training_set = zip(train_inp, encoded)\n\n # same for validation set\n valid_inp = validation.drop('quality', axis=1)\n valid_inp = np.expand_dims(valid_inp, axis=2)\n valid_targ = validation['quality'].values\n encoded = np.zeros((len(valid_targ), 6, 1))\n encoded[np.arange(len(valid_targ)), valid_targ] = 1\n validation_set = zip(valid_inp, encoded)\n\n return list(training_set), list(validation_set)\n","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"439570068","text":"#!/usr/bin/env python3\n\n# MIT License\n# \n# Copyright (c) 2021, Alex M. Maldonado\n# \n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n# \n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n__version__ = '0.0.1'\n\n\"\"\"\n# Partitioner\n\nA partition, in this contex, is one or more molecules from a larger atomic\ncluster. For example, a dimer partition of a tetramer would be one of the\npossible combinations containing two molecules out of the possible four\nmolecules.\n\nThis script will take a single structure or trajectory, identify every possible\npartition up to a specified size, then write xyz coordinates for each partition.\nAlso, a set of energy+gradient calculations using ORCA 4 can be prepared; these\ncalculations are pertinent for mbGDML data sets.\n\nIf multiple structures are desired, the atoms must be in the same order.\n\n## Requirements\n- mbgdml\n\"\"\"\n\nimport os\nimport argparse\nfrom mbgdml.utils import write_xyz\nfrom mbgdml.partition import partition_structures\nfrom mbgdml.data import mbGDMLDataset, structure\nfrom mbgdml.calculate import partition_engrad\n\n\n### Universal Information ###\n\ncalc_theory = 'MP2'\ncalc_basis = 'def2-TZVP'\ncalc_options = 'TightSCF'\n\ndef main():\n\n parser = argparse.ArgumentParser(\n description='Partitions structure(s) for energy+gradient calculations.'\n )\n parser.add_argument(\n 'xyz_file', metavar='xyz_file', type=str, nargs='?',\n help='Path to xyz file with one or multiple structures to partition.'\n )\n parser.add_argument(\n '--dir_name', metavar='dir_name', type=str, nargs='?', default='partitions',\n help='Name of the top-level directory for all partitions.'\n )\n parser.add_argument(\n '--save_dir', metavar='save_dir', type=str, nargs='?', default='.',\n help='Path to save npz data set. Defaults to current directory.'\n )\n parser.add_argument(\n '-o', '--overwrite', action='store_true', help='Overwrite npz data set.'\n )\n parser.add_argument(\n '-s', '--size', metavar='size', type=int, nargs='?', default=4,\n help='Maximum size of partitions.'\n )\n parser.add_argument(\n '--calcs', action='store_true',\n help='Write MP2/def2-TZVP ORCA 4 energy+gradient calculations.'\n )\n parser.add_argument(\n '--num_calcs', metavar='num_calcs', type=int, nargs='?', default=100,\n help='Number of structures to include in the energy+gradient calculations.'\n )\n parser.add_argument(\n '--cluster_name', metavar='cluster_name', type=str, nargs='?', default='cluster',\n help='Base name describing the original cluster.'\n )\n parser.add_argument(\n '--r_unit', metavar='r_unit', type=str, nargs='?', default='Angstrom',\n help='Units of distance for the structure(s): Angstrom or bohr.'\n )\n\n\n\n args = parser.parse_args()\n\n print(f'\\nPartitioner v{__version__}')\n print('Written by Alex M. Maldonado (@aalexmmaldonado)\\n')\n\n # Ensures paths end in a '/'.\n save_dir = args.save_dir\n if save_dir[-1] != '/':\n save_dir += '/'\n \n # Checks to see if partitions already exists.\n partitions_dir_path = f'{save_dir}{args.dir_name}/'\n if os.path.isdir(partitions_dir_path) and not args.overwrite:\n print(f'{partitions_dir_path} already exists and overwrite is False.\\n')\n raise FileExistsError\n \n # Prepare to partition.\n print('Preparing partition directories ...')\n os.makedirs(partitions_dir_path, exist_ok=True)\n os.chdir(partitions_dir_path)\n\n # Parse file into mbGDML data set.\n print('Parsing XYZ file ... ')\n cluster_dataset = mbGDMLDataset()\n cluster_dataset.read_xyz(\n args.xyz_file, 'coords', r_unit=args.r_unit, energy_comments=False\n )\n print(f'Found {cluster_dataset.R.shape[0]} structure(s) '\n f'containing {cluster_dataset.z.shape[0]} atoms')\n \n # Partition all structures in the data set.\n print('Partitioning structure(s) ... ')\n cluster_partitions = partition_structures(\n cluster_dataset.z, cluster_dataset.R, args.size\n )\n print(f'Identified {len(cluster_partitions)} partitions')\n partition_sizes = []\n for size in range(1, args.size + 1):\n partition_sizes.append(\n len([i for i in cluster_partitions if len(i.split(',')) == size])\n )\n print(f' {partition_sizes[-1]} {size}mer partitions')\n \n # Write all partitions and possibly energy+gradient calculations\n for partition in cluster_partitions:\n partition_size = len(partition.split(','))\n partition_name = f'{args.cluster_name}.mol{partition}'\n\n print(f'Working on {partition_name} partition')\n \n z = cluster_partitions[partition]['z']\n R = cluster_partitions[partition]['R']\n\n # XYZ file\n xyz_dir = f'{save_dir}{args.cluster_name}-{partition_size}.partitions/{partition_name}/'\n os.makedirs(xyz_dir, exist_ok=True)\n write_xyz(z, R, xyz_dir, partition_name)\n \n\n # Energy+Gradient calculation\n if args.calcs:\n # Adds '.first' if the number of calcs is not the all\n # of the structures.\n if R.shape != R[:args.num_calcs].shape:\n partition_name += f'.first{args.num_calcs}'\n \n partition_dir = f'{xyz_dir}{partition_name}-calcs/'\n os.makedirs(partition_dir, exist_ok=True)\n calculation_name = f'{partition_name}-orca.engrad-mp2.def2tzvp.frozencore'\n\n partition_engrad(\n 'orca',\n z,\n R[:args.num_calcs],\n partition_name,\n calculation_name,\n calculation_name,\n 'MP2',\n 'def2-TZVP',\n 0,\n 1,\n 'smp',\n 1,\n 6,\n 2,\n 00,\n options='TightSCF',\n control_blocks=(\n '%maxcore 8000\\n\\n'\n '%scf\\n ConvForced true\\nend'),\n calc_dir=partition_dir\n )\n\nif __name__ == \"__main__\":\n main()","sub_path":"scripts/partitioner.py","file_name":"partitioner.py","file_ext":"py","file_size_in_byte":7089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"182518848","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"\n\n MoEDAL and CERN@school - Plotting NOD information.\n\n See the README.md file and the GitHub wiki for more information.\n\n http://cernatschool.web.cern.ch\n\n\"\"\"\n\n# Import the code needed to manage files.\nimport os, glob\n\n#...for parsing the arguments.\nimport argparse\n\n#...for the logging.\nimport logging as lg\n\n# The NOD wrapper class.\nfrom wrappers.nod import NOD\n\nif __name__ == \"__main__\":\n\n print(\"*\")\n print(\"*==================================================*\")\n print(\"* MoEDAL and CERN@school: Plotting NOD information *\")\n print(\"*==================================================*\")\n print(\"*\")\n\n # Get the datafile path from the command line.\n parser = argparse.ArgumentParser()\n parser.add_argument(\"dataPath\", help=\"Path to the input dataset.\")\n parser.add_argument(\"-v\", \"--verbose\", help=\"Increase output verbosity\", action=\"store_true\")\n args = parser.parse_args()\n\n ## The root data path.\n data_path = args.dataPath\n\n # Check if the input file exists. If it doesn't, quit.\n if not os.path.exists(data_path):\n raise IOError(\"* ERROR: '%s' input file does not exist!\" % (data_path))\n\n ## The number of outer rings data path.\n nod_path = os.path.join(data_path, \"NOD\")\n if not os.path.isdir(nod_path):\n raise IOError(\"* ERROR: '%s' does not exist - no input data!\" % (nod_path))\n\n # Set the logging level.\n if args.verbose:\n level=lg.DEBUG\n else:\n level=lg.INFO\n\n # Configure the logging.\n lg.basicConfig(filename=os.path.join('./.', 'log_plot_nod.log'), filemode='w', level=level)\n\n lg.info(\" *\")\n lg.info(\" *==================================================*\")\n lg.info(\" * MoEDAL and CERN@school: Plotting NOD information *\")\n lg.info(\" *==================================================*\")\n lg.info(\" *\")\n lg.info(\" * Plotting number of oddities information in : '%s'\" % (nod_path))\n lg.info(\" *\")\n\n # Loop over the found oddity information.\n for i, nod_csv_path in enumerate(sorted(glob.glob(os.path.join(nod_path, \"*.csv\")))):\n\n ## The subject ID.\n sub_id = os.path.basename(nod_csv_path)[:-4]\n\n ## The NOD wrapper object.\n nod = NOD(nod_csv_path)\n\n ## The path of the plot image.\n nod_plot_image_path = os.path.join(nod_path, \"%s.png\" % (sub_id))\n\n # Make the plot.\n nod.make_frequency_histogram(nod_plot_image_path)\n","sub_path":"plot_nod.py","file_name":"plot_nod.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"500983523","text":"#!/usr/bin/env python3\n\nimport math\n\n\nclass Error(Exception):\n \"\"\"Base exception for Calculator\n\n All other exceptions are based on this one.\n In case code needs to catch only Calculator's produced exceptions.\n \"\"\"\n pass\n\n\nclass BadOperator(Error):\n \"\"\"Bad operator exception for Calculator\"\"\"\n pass\n\n\nclass BadValue(Error):\n \"\"\"Bad value exception for Calculator\"\"\"\n pass\n\n\nclass OperatorHandler(object):\n \"\"\"Base for all operator handlers\n\n Extend implementing handle_*() method\n \"\"\"\n\n def handle(self, op, *args, **kwargs):\n \"\"\"Base handler for all other unary operators\n\n Expects operator and value as a string\n Returns value as a string\n \"\"\"\n f = getattr(self, 'handle_{op}'.format(op=op), None)\n if not f:\n raise BadOperator('Bad operator: {}'.format(op))\n return f(*args, **kwargs)\n\n def operations(self):\n return [v[7:] for v in dir(self) if v.startswith('handle_')]\n\n\nclass UnaryOperatorHandler(OperatorHandler):\n \"\"\"Unary operator handler\n \"\"\"\n\n def handle_sqrt(self, v):\n if v < 0:\n raise BadValue('Cannot sqrt: {}'.format(v))\n return math.sqrt(v)\n\n def handle_pow2(self, v):\n return math.pow(v, 2)\n\n def handle_reciproc(self, v):\n if v == 0:\n raise BadValue('Cannot divide by 0: {}'.format(v))\n\n\nclass BinaryOperatorHandler(OperatorHandler):\n \"\"\"Binary operator handler\n \"\"\"\n\n def handle_add(self, v1, v2):\n return v1 + v2\n\n def handle_sub(self, v1, v2):\n return v1 - v2\n\n def handle_mul(self, v1, v2):\n return v1 * v2\n\n def handle_div(self, v1, v2):\n if v2 == 0:\n raise BadValue('Cannot divide by 0: {}'.format(v2))\n return v1 / v2\n\n\nclass Calculator(object):\n \"\"\"Dummy calculator\n\n It can be used as a class (delegate) for GUI or CUI calculator.\n These should have some kind of input methanism (event based or raw_input)\n and execute `on_` methods accordingly.\n\n Error handling is done in simple way.\n If more sophisticated error handling is needed `error()` may be overriden.\n\n Exceptions may be used also, to do so - set strict = True after\n initialization.\n\n No memory operations are implemented, but this is not a neuroscience.\n \"\"\"\n\n def __init__(self, uoh=None, boh=None):\n \"\"\"\n If needed, handlers for Unary operators and Binary operators\n may be extended to support Decimal or something else.\n All is needed it to pass new handlers to __init__ (delegate)\n \"\"\"\n\n self.stored_op = ''\n self.result = '0'\n\n self.value = '0'\n self.clear_value = True\n\n if uoh is None:\n uoh = UnaryOperatorHandler()\n self.uoh = uoh\n\n if boh is None:\n boh = BinaryOperatorHandler()\n self.boh = boh\n\n self.strict = False\n\n def get_value(self):\n \"\"\"Returns value on screen as number\"\"\"\n value = float(self.value)\n\n if value % 1 == 0:\n return int(value)\n return value\n\n def get_result(self):\n \"\"\"Returns current result as number\"\"\"\n result = float(self.result)\n\n if result % 1 == 0:\n return int(result)\n return result\n\n def error(self, msg, reason=None):\n print('Error: {}'.format(msg))\n\n def on_digit(self, digit):\n if digit == '0' and self.value == '0':\n return\n\n if self.clear_value:\n self.value = ''\n self.clear_value = False\n\n self.value += str(digit)\n\n def on_sep(self):\n if '.' in self.value:\n self.error('Cannot have value with two seps')\n if self.strict:\n raise BadValue('Cannot have value with two seps')\n return\n\n if self.value == '':\n self.value = '0'\n\n self.value += '.'\n\n def on_sign(self):\n if self.value[0] == '-':\n self.value = self.value[1:]\n else:\n self.value = '-' + self.value\n\n def on_number(self, n):\n if n == '0' and self.value == '0':\n return\n\n if n % 1 == 0:\n n = int(n)\n\n self.value = str(n)\n self.clear_value = True\n\n def on_unary_op(self, op):\n try:\n value = self.uoh.handle(\n op,\n self.get_value())\n self.value = str(value)\n self.clear_value = True\n except Error as e:\n self.error(e)\n if self.strict:\n raise\n\n def on_binary_op(self, op):\n try:\n if self.stored_op != '':\n value = self.boh.handle(\n self.stored_op,\n self.get_result(),\n self.get_value())\n self.value = str(value)\n self.result = self.value\n self.clear_value = True\n self.stored_op = op\n except Error as e:\n self.error(e)\n if self.strict:\n raise\n\n def on_equal(self):\n try:\n if self.stored_op != '':\n value = self.boh.handle(\n self.stored_op,\n self.get_result(),\n self.get_value())\n self.value = str(value)\n\n self.result = self.value\n self.stored_op = ''\n self.clear_value = True\n except Error as e:\n self.error(e)\n if self.strict:\n raise\n\n def on_clear(self):\n if self.clear_value:\n return\n\n self.value = '0'\n self.clear_value = True\n\n def on_clear_all(self):\n self.stored_op = ''\n self.result = '0'\n\n self.value = '0'\n self.clear_value = True\n\n\nclass DiscoStyleUI(object):\n \"\"\"This is for fun.\n\n If I would need to do CUI I would use click, curses or even urwid.\n If this is possible GUI is also possible.\n \"\"\"\n debug = False\n\n def __init__(self, calc=None):\n \"\"\"\n Calculator() may be extended and supplied throught `calc` argument\n to this UI e.g. Delegation pattern\n \"\"\"\n\n if calc is None:\n calc = Calculator()\n self.calc = calc\n\n def menu_main(self):\n while True:\n if self.debug:\n print('Calculator result={}, '\n 'stored op={!r}, '\n 'clear_value={!r}'.format(self.calc.result,\n self.calc.stored_op,\n self.calc.clear_value))\n\n print('Calculator value: {}'.format(self.calc.get_value()))\n print('Calculator menu:')\n print('\\t1. Digit...')\n print('\\t2. Whole number...')\n print('\\t3. Separator')\n print('\\t4. Sign')\n print('\\t5. Unary Op...')\n print('\\t6. Binary Op...')\n print('\\t7. Equal')\n print('\\t8. Clear')\n print('\\t9. Clear All')\n print('\\t0. Quit')\n\n menu = input('Enter menu #: ')\n\n if menu == '1':\n self.menu_digit_input()\n elif menu == '2':\n self.menu_number_input()\n elif menu == '3':\n self.calc.on_sep()\n elif menu == '4':\n self.calc.on_sign()\n elif menu == '5':\n self.menu_unary_op()\n elif menu == '6':\n self.menu_binary_op()\n elif menu == '7':\n self.calc.on_equal()\n elif menu == '8':\n self.calc.on_clear()\n elif menu == '9':\n self.calc.on_clear_all()\n elif menu == '0' or menu.lower() == 'q':\n break\n else:\n print('ERROR! Bad menu #, try again')\n\n def menu_digit_input(self):\n while True:\n print('Calculator::Digit sumbenu:')\n digit = input('Enter the digit (ENTER to get back): ')\n if len(digit) == 0:\n break\n\n if not digit.isdigit():\n print('ERROR! Input is not a digit, try again')\n continue\n\n if len(digit) != 1:\n print('ERROR! Only one digit please, try again')\n continue\n\n self.calc.on_digit(digit)\n break\n\n def menu_number_input(self):\n while True:\n print('Calculator::Number sumbenu:')\n number = input('Enter the number (ENTER to get back): ')\n if len(number) == 0:\n break\n\n try:\n v = float(number)\n except ValueError:\n print('ERROR! Input is not a number, try again')\n continue\n\n self.calc.on_number(v)\n break\n\n def menu_unary_op(self):\n while True:\n print('Calculator::Unary operator sumbenu:')\n print('\\t1. Square root')\n print('\\t2. Power of two')\n print('\\t3. 1/x')\n print('\\t4. <-Back')\n\n submenu = input('Enter submenu #: ')\n\n if submenu == '1':\n self.calc.on_unary_op('sqrt')\n break\n elif submenu == '2':\n self.calc.on_unary_op('pow2')\n break\n elif submenu == '3':\n self.calc.on_unary_op('reciproc')\n break\n elif submenu == '4':\n break\n else:\n print('ERROR! Bad submenu #, try again')\n\n def menu_binary_op(self):\n while True:\n print('Calculator::Binary operator sumbenu:')\n print('\\t1. Add')\n print('\\t2. Subtract')\n print('\\t3. Multiply')\n print('\\t4. Divide')\n print('\\t5. <-Back')\n\n submenu = input('Enter submenu #: ')\n\n if submenu == '1':\n self.calc.on_binary_op('add')\n break\n elif submenu == '2':\n self.calc.on_binary_op('sub')\n break\n elif submenu == '3':\n self.calc.on_binary_op('mul')\n break\n elif submenu == '4':\n self.calc.on_binary_op('div')\n break\n elif submenu == '4':\n break\n else:\n print('ERROR! Bad submenu #, try again')\n\n def run(self):\n return self.menu_main()\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser(\n description='Calculator with Disco Style UI')\n\n parser.add_argument(\n '-d',\n '--debug',\n action='store_true',\n dest='debug',\n default=False,\n help='print debug')\n\n parser.add_argument(\n '-s',\n '--strict',\n action='store_true',\n dest='strict',\n default=False,\n help='raise exceptions on error')\n\n args = parser.parse_args()\n\n cui = DiscoStyleUI()\n\n cui.debug = args.debug\n cui.strict = args.strict\n cui.run()\n","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":11139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"144536790","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.shortcuts import render\nfrom .models import Tweet\n\n\n# Create your views here.\ndef tweet_list(request):\n\tqueryset_list = Tweet.objects.all()#GET alle tweetene i databasen\n\t#Paginator kode\n\tpaginator = Paginator(queryset_list, 10) # Show 10 contacts per page\n\tpage_request_var = 'page'\n\tpage = request.GET.get(page_request_var)\n\ttry:\n\t\tqueryset = paginator.page(page)\n\texcept PageNotAnInteger:\n\t\t# If page is not an integer, deliver first page.\n\t\tqueryset = paginator.page(1)\n\texcept EmptyPage:\n\t\t# If page is out of range (e.g. 9999), deliver last page of results.\n\t\tqueryset = paginator.page(paginator.num_pages)\n\n\t\n\tcontext = {\n\t\t'object_list': queryset,\n\t\t'title': 'List',\n\t\t'page_request_var':page_request_var,\n\t}\n\treturn render(request,'tweet_list.html',context)","sub_path":"gatherer/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"520621417","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 8 16:55:20 2017\n\nTests for the models creation module.\n\n@author: Álvaro Barbero Jiménez\n\"\"\"\n\nimport tensorflow as tf\nimport numpy as np\n\nfrom neurowriter.models import tensorslice\nfrom neurowriter.models import CNNLSTMModel\n\n\ndef test_tensorslice_normal():\n \"\"\"Tensor slicing is performed correctly for data > slices\"\"\"\n data = np.array([\n [[1,1,1], [1,1,1], [1,1,1]],\n [[2,2,2], [2,2,2], [2,2,2]],\n [[3,3,3], [3,3,3], [3,3,3]],\n [[4,4,4], [4,4,4], [4,4,4]],\n [[5,5,5], [5,5,5], [5,5,5]],\n [[6,6,6], [6,6,6], [6,6,6]]\n ])\n datatensor = tf.constant(data)\n \n tests = [\n (1, [data]),\n (2, [data[0:3], data[3:6]]),\n (3, [data[0:2], data[2:4], data[4:6]]),\n (6, [data[0:1], data[1:2], data[2:3],\n data[3:4], data[4:5], data[5:6]])\n ]\n \n # We need a tensor flow session to run the graph\n sess = tf.Session()\n \n for nslices, expected in tests:\n obtained = [sess.run(tensorslice(datatensor, i, nslices)) for i in range(nslices)]\n print(\"nslices:\", nslices)\n print(\"Expected:\", expected)\n print(\"Obtained:\", obtained)\n assert(np.allclose(obtained, expected))\n\n\ndef test_tensorslice_small():\n \"\"\"Tensor slicing is performed correctly for data < slices\"\"\"\n data = np.array([\n [[1,1,1], [1,1,1], [1,1,1]],\n [[2,2,2], [2,2,2], [2,2,2]],\n [[3,3,3], [3,3,3], [3,3,3]]\n ])\n datatensor = tf.constant(data)\n\n nulldata = np.zeros([0,3,3])\n tests = [\n (4, [nulldata, data[0:1], data[1:2], data[2:3]]),\n (5, [nulldata, data[0:1], nulldata, data[1:2], data[2:3]]),\n (10, [nulldata, nulldata, nulldata, data[0:1], nulldata,\n nulldata, data[1:2], nulldata, nulldata,data[2:3]])\n ]\n \n # We need a tensor flow session to run the graph\n sess = tf.Session()\n \n for nslices, expected in tests:\n obtained = [sess.run(tensorslice(datatensor, i, nslices)) for i in range(nslices)]\n print(\"nslices:\", nslices)\n print(\"Expected:\", expected)\n print(\"Obtained:\", obtained)\n for x, y in zip(obtained, expected):\n assert(x.shape == y.shape)\n assert(np.allclose(x, y))\n\n\ndef test_train_cnnlstm():\n \"\"\"CNN-LSTM models can built correctly\"\"\"\n paramsets = [\n {\"inputtokens\": 128, \"vocabsize\": 1000},\n {\"inputtokens\": 128, \"vocabsize\": 1000, \"convlayers\": 0, \"lstmunits\": 64, \"lstmdropout\": 0, \"embedding\": 256,\n \"embdropout\": 0},\n {\"inputtokens\": 128, \"vocabsize\": 1000, \"convlayers\": 1, \"kernels\": 64, \"kernelsize\": 3,\n \"convdropout\": 0, \"lstmunits\": 64, \"lstmdropout\": 0, \"embedding\": 256, \"embdropout\": 0},\n {\"inputtokens\": 128, \"vocabsize\": 1000, \"convlayers\": 2, \"kernels\": 256, \"kernelsize\": 5,\n \"convdropout\": 0.5, \"lstmunits\": 128, \"lstmdropout\": 0.1, \"embedding\": 512, \"embdropout\": 0.5},\n {\"inputtokens\": 128, \"vocabsize\": 1000, \"convlayers\": 3, \"kernels\": 1024, \"kernelsize\": 15,\n \"convdropout\": 0.9, \"lstmunits\": 512, \"lstmdropout\": 0.9, \"embedding\": 1024, \"embdropout\": 0.5},\n ]\n\n for paramset in paramsets:\n model = CNNLSTMModel.create(**paramset)\n assert hasattr(model, \"compile\")\n model.compile(optimizer='sgd', loss='categorical_crossentropy')\n assert hasattr(model, \"fit_generator\")\n assert hasattr(model, \"summary\")\n model.summary()\n","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"161416411","text":"from flask import Flask, Response\nfrom os.path import join, dirname, abspath\nfrom babeljs import transformer\n\nDIR_ROOT = abspath(dirname(__file__))\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef home():\n jsx_path = join(DIR_ROOT, 'getComponent.jsx')\n javascript = transformer.transform(jsx_path)\n return Response(javascript, mimetype='application/javascript')\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"rnpy.py","file_name":"rnpy.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"156394513","text":"\n\n#calss header\nclass _WORSHIPPER():\n\tdef __init__(self,): \n\t\tself.name = \"WORSHIPPER\"\n\t\tself.definitions = [u'someone who goes to a religious ceremony to worship God: ', u'someone who worships and performs religious ceremonies to a particular god or object: ', u'someone who enjoys or values a particular thing very much or too much: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_worshipper.py","file_name":"_worshipper.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"233234567","text":"import csv\r\nimport pandas as pd\r\nfrom datetime import date\r\nchunksize = 10 ** 6\r\ntrain = pd.read_csv(\"train.csv\",chunksize=chunksize)\r\nsumdist=0\r\ncheckin=[]\r\ncheckout=[]\r\ndef avg(l):\r\n\treturn sum(l)/len(l)\r\n\r\ndate_diffs=[]\r\nmin_day=10 ** 6\r\nmax_day=0\r\n\r\ndistances = []\r\nmin_dist=10 ** 6\r\nmax_dist=0\r\n\r\nuser_location_countries = []\r\nmin_user_loc_ctry=10 ** 6\r\nmax_user_loc_ctry=0\r\n\r\nuser_location_regions = []\r\nmin_user_loc_rgn =10 ** 6\r\nmax_user_loc_rgn = 0\r\n\r\nuser_location_cities = []\r\nmin_user_loc_cty =10 ** 6\r\nmax_user_loc_cty = 0\r\n\r\nsrch_destination_type_ids = []\r\nmin_desttype_id =10 ** 6\r\nmax_desttype_id = 0\r\n\r\nhotel_continents = []\r\nmin_hotel_cnts = 10 ** 6\r\nmax_hotel_cnts = 0\r\n\r\nhotel_countries = []\r\nmin_hotel_cntr = 10 ** 6\r\nmax_hotel_cntr = 0\r\n\r\nhotel_markets = []\r\nmin_hotel_mrkt = 10 ** 6\r\nmax_hotel_mrkt = 0\r\n\r\nsrch_adults_cnts = []\r\nmin_srch_adults = 10 ** 6\r\nmax_srch_adults = 0\r\n\r\nsrch_children_cnts = []\r\nmin_srch_chdrn = 10 ** 6\r\nmax_srch_chdrn = 0\r\n\r\nsrch_rm_cnts = []\r\nmin_srch_rm = 10 ** 6\r\nmax_srch_rm = 0\r\n\r\nis_bookings = []\r\nprint(train[\"orig_destination_distance\"].mean())\r\nexit(0)\r\nfor chunk in train:\r\n\tcount = 0\r\n\tfor c in chunk['is_booking']:\r\n\t\tis_bookings.append(c)\r\n\t\tcount+=1\r\n\t\tif count==500:\r\n\t\t\tcount=0\r\n\t\t\tbreak\r\n\tfor c in chunk['orig_destination_distance']:\r\n\t\tprint(c)\r\n\t\tdistances.append(c)\r\n\t\tif c>max_dist:\r\n\t\t\tmax_dist = c\r\n\t\tif cmax_user_loc_ctry:\r\n\t\t\tmax_user_loc_ctry = c\r\n\t\tif cmax_user_loc_rgn:\r\n\t\t\tmax_user_loc_rgn = c\r\n\t\tif cmax_user_loc_cty:\r\n\t\t\tmax_user_loc_cty = c\r\n\t\tif cmax_desttype_id:\r\n\t\t\tmax_desttype_id = c\r\n\t\tif cmax_hotel_cnts:\r\n\t\t\tmax_hotel_cnts = c\r\n\t\tif cmax_hotel_cntr:\r\n\t\t\tmax_hotel_cntr = c\r\n\t\tif cmax_hotel_mrkt:\r\n\t\t\tmax_hotel_mrkt = c\r\n\t\tif cmax_srch_adults:\r\n\t\t\tmax_srch_adults = c\r\n\t\tif cmax_srch_chdrn:\r\n\t\t\tmax_srch_chdrn = c\r\n\t\tif cmax_srch_rm:\r\n\t\t\tmax_srch_rm = c\r\n\t\tif cmax_day:\r\n\t\tmax_day = day\r\n\tif day project cam coords; https://i.imgur.com/n8cpHe7.png\n self.H, self.W = cam_dict['height'], cam_dict['width'] \n self.bbox = bbox\n \n self.xyz_min = np.zeros(3,) + np.inf\n self.xyz_max = np.zeros(3,) - np.inf\n self.bbox_c = self.transform(self.bbox) # in camera coords\n self.bbox_c_T = (self.T @ (self.bbox_c.T)).T # in projection coords\n\n self.edge_list = [[0, 1], [1, 2], [2, 3], [3, 0], [4, 5], [5, 6], [6, 7], [7, 4], [0, 4], [1, 5], [2, 6], [3, 7]]\n self.face_list = [[1, 0, 2, 3], [4, 5, 7, 6], [0, 1, 4, 5], [1, 5, 2, 6], [3, 2, 7, 6], [4, 0, 7, 3]]\n self.edge_face_list = []\n for edge_vertices in self.edge_list:\n edge_tuple = (edge_vertices, [])\n for face_idx, face_vertices in enumerate(self.face_list):\n if edge_vertices[0] in face_vertices and edge_vertices[1] in face_vertices:\n edge_tuple[1].append(face_idx)\n self.edge_face_list.append(edge_tuple)\n \n def form_camera(self, cam_dict):\n '''\n axes: np.array([[-1., 0., -1.], [0., 1., 0.], [1., 0., -1]])\n fov_x, fov_y: in degrees\n\n '''\n origin = cam_dict['origin'].reshape((3,1))\n if cam_dict['cam_axes'] is None:\n lookat_pnt = cam_dict['lookat'].reshape((3,1))\n toward = cam_dict['toward'].reshape((3,1)) # x-axis\n toward /= torch.linalg.norm(toward)\n up = cam_dict[6:9] # y-axis\n up /= torch.linalg.norm(up)\n right = torch.cross(toward, up) # z-axis\n right /= torch.linalg.norm(right)\n else:\n (toward, up, right) = torch.split(cam_dict['cam_axes'].T, 1, dim=1) # x_cam, y_cam, z_cam\n toward = toward / torch.linalg.norm(toward)\n up = up / torch.linalg.norm(up)\n right = right / torch.linalg.norm(right)\n assert abs(torch.dot(toward.flatten(), up.flatten())) < 1e-5\n assert abs(torch.dot(toward.flatten(), right.flatten())) < 1e-5\n assert abs(torch.dot(right.flatten(), up.flatten())) < 1e-5\n cam_axes = torch.hstack([toward, up, right]).T\n R = cam_axes.T # columns respectively corresponds to toward, up, right vectors.\n t = origin\n\n width = cam_dict['width']\n height = cam_dict['height']\n if 'fov_x' in cam_dict and 'fov_y' in cam_dict:\n fov_x = cam_dict['fov_x'] / 180. * np.pi\n fov_y = cam_dict['fov_y'] / 180. * np.pi\n f_x = width / (2 * torch.tan(fov_x/2.))\n f_y = height / (2 * torch.tan(fov_y/2.))\n else:\n assert 'f_x' in cam_dict and 'f_y' in cam_dict\n f_x, f_y = cam_dict['f_x'], cam_dict['f_y']\n\n K = torch.tensor([[f_x, 0., (width-1)/2.], [0., f_y, (height-1)/2.], [0., 0., 1.]]).to(self.device)\n\n cam_params = {'K': K, 'R': R, 'origin': origin, 'cam_axes': cam_axes, 'toward': toward, 'up': up, 'right': right}\n cam_params.update({'f_x': f_x, 'f_y': f_y, 'u0': (width-1)/2., 'v0': (height-1)/2.})\n return cam_params\n\n def transform(self, x):\n assert len(x.shape)==2 and x.shape[1]==3\n x = x.reshape((-1, 3))\n return (self.cam_params['cam_axes'] @ (x.T - self.cam_params['origin'])).T\n\n def transform_and_proj(self, x):\n x_c = self.transform(x)\n x_c_T = (self.T @ (x_c.T)).T\n x_c_proj = (self.K @ x_c_T.T).T\n x_c_proj = x_c_proj[:, :2] / (x_c_proj[:, 2:3]+1e-6)\n front_flags = (x_c_T[:, 2]>0).tolist()\n return x_c_proj, front_flags\n\n def param_planes(self):\n plane_params = [[] for i in range(6)]\n vv, uu = torch.meshgrid(torch.arange(self.H), torch.arange(self.W))\n uu, vv = uu.to(self.device), vv.to(self.device)\n invd_list = []\n\n for face_idx in range(6):\n # face_vertices = bbox[self.face_list[face_idx]]\n\n # https://kitchingroup.cheme.cmu.edu/blog/2015/01/18/Equation-of-a-plane-through-three-points/\n p1 = self.bbox_c_T[self.face_list[face_idx][0]]\n p2 = self.bbox_c_T[self.face_list[face_idx][1]]\n p3 = self.bbox_c_T[self.face_list[face_idx][2]]\n # These two vectors are in the plane\n v1 = p3 - p1\n v2 = p2 - p1\n # the cross product is a vector normal to the plane\n cp = torch.cross(v1, v2)\n a, b, c = cp\n # This evaluates a * x3 + b * y3 + c * z3 which equals d\n d = torch.dot(cp, p3)\n\n # print(face_idx, self.face_list[face_idx][:3],p1, p2, p3)\n # print('The equation is {0}x + {1}y + {2}z = {3}'.format(a, b, c, d))\n plane_params[face_idx] = [a, b, c, d]\n\n # Zhang et al. - 2020 - GeoLayout Geometry Driven Room Layout Estimation, Sec. 3.1\n p = -a / (self.cam_params['f_x'] * d)\n q = -b / (self.cam_params['f_y'] * d)\n r = 1/d * (a/self.cam_params['f_x']*self.cam_params['u0'] + b/self.cam_params['f_y']*self.cam_params['v0'] - c)\n invd = - (p * uu + q * vv + r)\n # print('>>>>>>>>', torch.sum(torch.isnan(invd)))\n invd_list.append(invd)\n return invd_list\n \n def vis_3d(self, bbox):\n fig = plt.figure(figsize=(5, 5))\n ax_3d = fig.add_subplot(111, projection='3d')\n ax_3d = fig.gca(projection='3d')\n ax_3d.set_proj_type('ortho')\n ax_3d.set_aspect(\"auto\")\n\n [cam_xaxis, cam_yaxis, cam_zaxis] = np.split(self.cam_params['cam_axes'].T, 3, 1)\n vis_cube_plt(ax_3d, bbox, linewidth=2, if_vertex_idx_text=True)\n vis_axis(ax_3d, make_bold=[1])\n vis_axis_xyz(ax_3d, cam_xaxis.flatten(), cam_yaxis.flatten(), cam_zaxis.flatten(), self.cam_params['origin'].flatten(), suffix='_c', make_bold=[0])\n\n self.xyz_min = np.minimum(self.xyz_min, np.amin(bbox, 0))\n self.xyz_max = np.maximum(self.xyz_max, np.amax(bbox, 0))\n self.xyz_min = np.minimum(self.xyz_min, self.cam_params['origin'].reshape((3,))-1.)\n self.xyz_max = np.maximum(self.xyz_max, self.cam_params['origin'].reshape((3,))+1.)\n ax_3d.view_init(elev=121, azim=-111)\n ax_3d.set_box_aspect([1,1,1])\n new_limits = np.hstack([self.xyz_min.reshape((3, 1)), self.xyz_max.reshape((3, 1))])\n set_axes_equal(ax_3d, limits=new_limits) # IMPORTANT - this is also required\n\n return ax_3d\n\n def vis_2d_bbox_proj(self, bbox, if_show=True, if_vertex_idx_text=True, edge_list=None):\n '''\n Projecting all vertices including those behind the camera; will cause artifacts: wrong locations of projected edges and edges behind the camera\n '''\n if edge_list is None:\n edge_idxes_list = self.edge_list\n verts = bbox\n else:\n verts = torch.vstack(edge_list)\n num_edges = len(edge_list)\n edge_idxes_list = [x.tolist() for x in np.split(np.arange(num_edges*2), num_edges)]\n if_vertex_idx_text = False\n\n verts_proj, front_flags = self.transform_and_proj(verts)\n # print(verts)\n # print(verts_proj)\n fig = plt.figure()\n for edge_idx, edge in enumerate(edge_idxes_list):\n x1 = verts_proj[edge[0]]\n x2 = verts_proj[edge[1]]\n # print(edge, x1, x2)\n plt.plot([x1[0], x2[0]], [x1[1], x2[1]], color='k', linewidth=2, linestyle='--')\n if if_vertex_idx_text:\n for idx, x2d in enumerate(verts_proj):\n plt.text(x2d[0]+10, x2d[1]+10, str(idx))\n plt.axis('equal')\n plt.xlim([0., self.W-1])\n plt.ylim([self.H-1, 0])\n if if_show:\n plt.show()\n return plt.gca()\n\n def poly_to_masks(self, face_verts_list):\n mask_list = []\n mask_combined = torch.zeros(self.H, self.W, dtype=torch.long) + 6 # 6 for no faces, 0..5 for faces 0..5\n mask_conflict = np.zeros((self.H, self.W), np.bool)\n\n for face_idx, face_verts in enumerate(face_verts_list):\n if len(face_verts)==0:\n continue\n face_verts_proj = self.transform_and_proj(face_verts)\n face_verts_proj_reindex = ConvexHull(face_verts_proj[0].cpu().numpy()).vertices\n face_verts_proj_convex = face_verts_proj[0][face_verts_proj_reindex]\n\n # reduce poly to screen space to speed up rasterization\n p1 = Polygon([x.tolist() for x in face_verts_proj_convex])\n p2 = Polygon([(0, 0), (self.W-1, 0.), (self.W-1,self.H-1), (0, self.H-1)])\n face_poly = p1.intersection(p2)\n \n if face_poly.is_empty:\n continue\n mask = mask_for_polygons([face_poly], (self.H, self.W))\n mask = mask == 1\n mask_conflict = np.logical_or(mask_conflict, np.logical_and((mask_combined!=6).cpu().numpy(), mask))\n mask = np.logical_and(mask, np.logical_not(mask_conflict))\n mask_combined[mask] = face_idx\n mask_list.append((face_idx, mask))\n\n return mask_combined, [[x[0], np.logical_and(np.logical_not(mask_conflict), x[1])] for x in mask_list], mask_conflict\n \n def vis_mask_combined(self, mask_combined, ax_2d=None):\n # print(ax_2d)\n assert ax_2d is not None\n index_map_vis = vis_index_map(mask_combined)\n ax_2d.imshow(index_map_vis)\n return ax_2d\n\n\n def get_edges_front(self, ax_3d=None, if_vis=False):\n edges_front_list = []\n face_edges_list = [[] for i in range(6)]\n face_verts_list = [[] for i in range(6)]\n for edge_idx, (edge, edge_face) in enumerate(zip(self.edge_list, self.edge_face_list)):\n x1x2 = self.bbox[edge]\n _, front_flags = self.transform_and_proj(x1x2)\n x1x2_front, if_new_tuple = get_front_3d_line(x1x2, front_flags, self.cam_params['origin'], self.cam_params['toward']*0.01, if_torch=True)\n if x1x2_front is not None:\n # print('----', edge, x1x2, x1x2_front)\n edges_front_list.append((x1x2_front, if_new_tuple))\n edge_face_face_idxes = edge_face[1]\n for face_idx in edge_face_face_idxes:\n face_edges_list[face_idx].append((x1x2_front, if_new_tuple))\n edges_front = torch.stack([x[0] for x in edges_front_list])\n\n if if_vis:\n for edge_front in edges_front:\n ax_3d.plot3D(edge_front[:, 0], edge_front[:, 1], edge_front[:, 2], color='r', linestyle='-', linewidth=3)\n\n new_edge_list = []\n for face_idx, face_edges in enumerate(face_edges_list):\n if len(face_edges)==0:\n continue\n all_verts = torch.vstack([x[0] for x in face_edges])\n face_verts_list[face_idx] = torch.unique(all_verts.detach(), dim=0)\n\n if if_vis:\n all_verts_if_new = torch.stack([x[1] for x in face_edges]).flatten()\n new_edge = all_verts[all_verts_if_new]\n if new_edge.shape[0]!=0:\n new_edge = torch.unique(new_edge.detach(), dim=0)\n assert new_edge.shape==(2, 3)\n new_edge_list.append(new_edge)\n ax_3d.plot3D(new_edge[:, 0], new_edge[:, 1], new_edge[:, 2], color='m', linestyle='--', linewidth=3)\n\n return edges_front_list, face_edges_list, face_verts_list\n\n\n\n","sub_path":"utils_SL_torch.py","file_name":"utils_SL_torch.py","file_ext":"py","file_size_in_byte":12051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"620407672","text":"\ndef last_ancestor(folders,X,Y):\n def previous(K):\n above_K, current = [K], K\n while True:\n previous = [Z for Z in folders if current in folders[Z]]\n if previous:\n above_K+= [previous[0]]\n current = previous[0]\n else: return above_K\n \n return [Z for Z in previous(X) if Z in previous(Y)][0]\n\n","sub_path":"cvA35yPFAggr7rtve_5.py","file_name":"cvA35yPFAggr7rtve_5.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"628000993","text":"from app.database.engine import SiteSettings, sess\nfrom app.database.schema import SiteSettings as Schema\nfrom app.database.model import TableModel\nfrom app.services.Service import Service\n\nSession = sess()\n# return Session.query(self._table).get(record_id)\n\nclass SiteSettingsModel(TableModel):\n def __init__(self):\n super().__init__()\n self._table = SiteSettings\n \n def get_record(self, site_id):\n\n return Session.query(self._table).filter(\n self._table.site_uuid == site_id, self._table.is_test == False\n ).order_by(\n self._table.created_at.desc()\n ).first()\n\ntable = SiteSettingsModel()\n\nSiteServiceModel = Service(table, Schema)\n","sub_path":"app/services/SiteService.py","file_name":"SiteService.py","file_ext":"py","file_size_in_byte":666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"521425415","text":"\"\"\"Monte Carlo Simulator\"\"\"\nimport random\nfrom statistics import mean, median\n\ntry:\n from matplotlib import pyplot\n USING_MATPLOTLIB = True\nexcept ImportError:\n USING_MATPLOTLIB = False\n\n\nclass MonteCarloSimulaterController():\n def __init__(self, actions=[], results=[]):\n assert len(actions) >= 1 and isinstance(actions, list)\n assert len(results) >= 1 and isinstance(results, list)\n\n self._actions = actions\n self._results = results\n self._results_count = [0 for i in range(len(results))]\n self._iterations = 0\n\n\n @staticmethod\n def flip_a_coin(outputs=[0, 1]):\n \"\"\"Flips A Coin\n params: output - What should it output - default: [0, 1]\n \"\"\"\n assert len(output) == 2\n return random.choice(outputs)\n\n @staticmethod\n def roll_a_dice(outputs=[1, 2, 3, 4, 5, 6]):\n \"\"\"Rolls A Dice\n params: output - What should it output - default: [ 1, 2, 3, 4, 5, 6]\n \"\"\"\n assert len(outputs) == 6\n return random.choice(outputs)\n\n def _strength(self, result):\n \"\"\"Gets The Strength of a Result\"\"\"\n for x, y in zip(self._results, self._results_count):\n if x == result:\n return y / self._iterations\n\n def take_action(self, available_actions=None):\n \"\"\"Takes An Action\"\"\"\n available_actions = self._actions if available_actions is None else available_actions\n return random.choice(available_actions)\n\n\n def add_result(self, results):\n \"\"\"Processes The Results\n params: resuts - The Result Which Occured\"\"\"\n for result_index in range(len(self._results)):\n if results == self._results[result_index]:\n self._results_count[result_index] += 1\n self._iterations += 1\n return\n raise Exception(\"Result Did'nt Match PreDefined Results\")\n\n def max_result(self, strength=False):\n \"\"\"Returns Result With Most Occurrence\n params: strength - if strength is true, the function will return strength of that result in decimal (Which can be converted in percentage by multiplying with 100\"\"\"\n maximum = self._results[self._results_count.index(max(self._results_count))]\n if not strength:\n return maximum\n else:\n return maximum, self._strength(maximum)\n\n def avg_result(self, strength=False):\n \"\"\"Returns The Average Output\"\"\"\n avg_occur = mean(self._results_count)\n avg = self._results[self._results_count.index(min(self._results_count, key=lambda x:abs(x-avg_occur)))]\n if not strength:\n return avg\n else:\n return avg, self._strength(avg)\n\n def median_result(self, strength=False):\n \"\"\"Returns The Average Output\"\"\"\n median_occur = median(self._results_count)\n med = self._results[self._results_count.index(min(self._results_count, key=lambda x:abs(x-median_occur)))]\n if not strength:\n return med\n else:\n return med, self._strength(med)\n\n def results_count(self):\n \"\"\"Returns Each Result Count\"\"\"\n counts = {}\n for x, y in zip(self._results, self._results_count):\n counts[x] = y\n return counts\n\n\n","sub_path":"PyMonteCarlo/mcs.py","file_name":"mcs.py","file_ext":"py","file_size_in_byte":3274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"595689074","text":"from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\nfrom pdfminer.pdfpage import PDFPage\nfrom cStringIO import StringIO\nimport re\n\ndef convert_pdf_to_txt(path):\n rsrcmgr = PDFResourceManager() \n retstr = StringIO()\n device = TextConverter(rsrcmgr, retstr, laparams=LAParams())\n fp = file('Leinwand CV.pdf', 'rb') # Open the file for reading ('rb')\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n for page in PDFPage.get_pages(fp):\n interpreter.process_page(page)\n text = retstr.getvalue()\n fp.close() # Close the file\n device.close()\n retstr.close()\n return text\n\n\nplain_text = convert_pdf_to_txt(\"Leinwand CV.pdf\")\n\n### set up list to capture lines in pdf\nlines = []\nclean = []\n\n### select pattern you want to search for\npattern = 'Ph.D.' ##set this upto be the list of degrees\nspattern = 'Yale'\n\n\n### separate each item in the list by a linebreak\nfor text in plain_text.split(\"\\n\"):\n lines.append(text)\n\n## remove any items in the list that are merely spaces or commas\nfor line in lines:\n if line != '' and line != ' ':\n clean.append(line)\n\n\n## find the indicies in clean that contain info found in pattern\nindices = [i for i, s in enumerate(clean) if pattern in s]\nsindices = [i for i, s in enumerate(clean) if spattern in s]\n\n### find the line previous and the line after the idicies that contain the info i want\n## find the items in clean with the indicies at those three spots\neducation = []\ned_list = []\n\nfor i in indices:\n for s in sindices:\n if i == s + 1 or i == s - 1:\n education.append(i)\n education.append(s)\n if i == s:\n education.append(i)\n ed_list.append(i)\n\nif not ed_list:\n m1 = min(education)\n m2 = min(education) + 1\n ed_final = [m1, m2]\nelse:\n ed_final = [min(education)]\n\n\ned_print = []\n\nfor ed in ed_final:\n ed_print.append(clean[ed])\n\n\n\n","sub_path":"testing_other_cvs.py","file_name":"testing_other_cvs.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"329343364","text":"from django.conf import settings\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib import messages\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.core.mail import send_mail\nfrom django.http import (HttpResponse, HttpResponseRedirect,\n HttpResponseNotAllowed)\nfrom django.template.loader import render_to_string\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.views.generic import (CreateView, DetailView, ListView, UpdateView,\n RedirectView, DeleteView)\nfrom django.views.generic.edit import FormMixin\nfrom django.views.generic.detail import SingleObjectMixin\nfrom django.urls import reverse\n\nfrom accounts.mixins import ContributorRequiredMixin\nfrom bundles.models import Bundle\nfrom bundles.forms import BookmarkForm\nfrom aids.forms import AidEditForm, AidSearchForm\nfrom aids.models import Aid, AidWorkflow\n\n\nclass SearchView(FormMixin, ListView):\n \"\"\"Search and display aids.\"\"\"\n\n template_name = 'aids/search.html'\n context_object_name = 'aids'\n paginate_by = 20\n form_class = AidSearchForm\n\n def get_form_kwargs(self):\n \"\"\"Take input data from the GET values.\"\"\"\n\n kwargs = super().get_form_kwargs()\n kwargs.update({\n 'data': self.request.GET,\n })\n\n return kwargs\n\n def get_queryset(self):\n \"\"\"Return the list of results to display.\"\"\"\n\n qs = Aid.objects \\\n .published() \\\n .open() \\\n .select_related('perimeter', 'author') \\\n .prefetch_related('backers')\n\n filter_form = self.get_form()\n results = filter_form.filter_queryset(qs)\n ordered_results = filter_form.order_queryset(results)\n return ordered_results\n\n\nclass ResultsView(SearchView):\n \"\"\"Only display search results.\n\n This view is designed to be called via ajax, and only renders html\n fragment of search engine results.\n \"\"\"\n template_name = 'aids/_results.html'\n\n def get_context_data(self, **kwargs):\n kwargs['search_actions'] = True\n return super().get_context_data(**kwargs)\n\n\nclass ResultsReceiveView(LoginRequiredMixin, SearchView):\n \"\"\"Send the search results by email.\"\"\"\n\n http_method_names = ['post']\n EMAIL_SUBJECT = 'Vos résultats de recherche'\n\n def get_form_data(self):\n querydict = self.request.POST.copy()\n for key in ('csrfmiddlewaretoken', 'integration'):\n try:\n querydict.pop(key)\n except KeyError:\n pass\n return querydict\n\n def get_form_kwargs(self):\n kwargs = super().get_form_kwargs()\n kwargs['data'] = self.get_form_data()\n return kwargs\n\n def post(self, request, *args, **kwargs):\n \"\"\"Send those search results by email to the user.\n\n We do it synchronously, but this view is meant to be called from an\n ajax query, so it should not be a problem.\n \"\"\"\n\n results = self.get_queryset()\n nb_results = results.count()\n first_results = results[:10]\n site = get_current_site(self.request)\n querystring = self.get_form_data().urlencode()\n scheme = 'https' if self.request.is_secure() else 'http'\n search_url = reverse('search_view')\n full_url = '{scheme}://{domain}{search_url}?{querystring}'.format(\n scheme=scheme,\n domain=site.domain,\n search_url=search_url,\n querystring=querystring)\n results_body = render_to_string('emails/search_results.txt', {\n 'user_name': self.request.user.full_name,\n 'aids': first_results,\n 'nb_results': nb_results,\n 'full_url': full_url,\n 'scheme': scheme,\n 'domain': site.domain,\n })\n send_mail(\n self.EMAIL_SUBJECT,\n results_body,\n settings.DEFAULT_FROM_EMAIL,\n [self.request.user.email],\n fail_silently=False)\n return HttpResponse('')\n\n\nclass AidDetailView(DetailView):\n \"\"\"Display an aid detail.\"\"\"\n\n template_name = 'aids/detail.html'\n\n def get_queryset(self):\n qs = Aid.objects \\\n .published() \\\n .open() \\\n .select_related('perimeter') \\\n .prefetch_related('backers')\n return qs\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n\n # Registered users see a \"bookmark this aid\" form.\n if self.request.user.is_authenticated:\n user_bundles = Bundle.objects \\\n .filter(owner=self.request.user) \\\n .order_by('name')\n context['user_bundles'] = user_bundles\n aid_bundles = user_bundles \\\n .filter(aids=self.object)\n context['bookmark_form'] = BookmarkForm(\n user=self.request.user,\n bundles=user_bundles,\n initial={'bundles': aid_bundles})\n\n context['similar_aids'] = self.find_similar_aids()\n return context\n\n def find_similar_aids(self):\n from django.db.models import Count\n tags = self.object.tags\n aids = Aid.objects \\\n .published() \\\n .open() \\\n .filter(_tags_m2m__name__in=tags) \\\n .annotate(nb_tags=Count('_tags_m2m')) \\\n .filter(nb_tags__gte=2) \\\n .exclude(id=self.object.id)\n return aids\n\n def post(self, request, *args, **kwargs):\n if not request.user.is_authenticated:\n return HttpResponseNotAllowed(permitted_methods=['get'])\n\n self.object = self.get_object()\n\n form = BookmarkForm(\n user=request.user,\n bundles=request.user.bundles,\n data=request.POST)\n\n if form.is_valid():\n AidBookmark = Bundle._meta.get_field('aids').remote_field.through\n AidBookmark.objects \\\n .filter(bundle__owner=request.user) \\\n .filter(aid=self.object) \\\n .delete()\n\n bookmarks = []\n bundles = form.cleaned_data['bundles']\n for bundle in bundles:\n bookmarks.append(AidBookmark(\n bundle=bundle,\n aid=self.object\n ))\n AidBookmark.objects.bulk_create(bookmarks)\n\n if not self.request.is_ajax():\n msg = _('This aid was added to the selected bundles.')\n messages.success(self.request, msg)\n\n if self.request.is_ajax():\n response = HttpResponse('')\n else:\n response = HttpResponseRedirect(self.object.get_absolute_url())\n return response\n\n\nclass AidEditMixin:\n \"\"\"Common code to aid editing views.\"\"\"\n\n def get_queryset(self):\n qs = Aid.objects \\\n .filter(author=self.request.user) \\\n .order_by('name')\n self.queryset = qs\n return super().get_queryset()\n\n\nclass AidDraftListView(ContributorRequiredMixin, AidEditMixin, ListView):\n \"\"\"Display the list of aids published by the user.\"\"\"\n\n template_name = 'aids/draft_list.html'\n context_object_name = 'aids'\n paginate_by = 30\n sortable_columns = ['name', 'date_created', 'date_updated']\n default_ordering = 'date_created'\n\n def get_queryset(self):\n qs = super().get_queryset()\n qs = qs.prefetch_related('backers')\n return qs\n\n def get_ordering(self):\n order = self.request.GET.get('order', '')\n order_field = order.lstrip('-')\n if order_field not in self.sortable_columns:\n order = self.default_ordering\n return order\n\n def get_context_data(self, **kwargs):\n kwargs['ordering'] = self.get_ordering()\n return super().get_context_data(**kwargs)\n\n\nclass AidCreateView(ContributorRequiredMixin, CreateView):\n \"\"\"Allows publishers to submit their own aids.\"\"\"\n\n template_name = 'aids/create.html'\n form_class = AidEditForm\n\n def form_valid(self, form):\n self.object = aid = form.save(commit=False)\n aid.author = self.request.user\n aid.save()\n form.save_m2m()\n\n msg = _('Your aid was sucessfully created. You can keep editing it.')\n messages.success(self.request, msg)\n return HttpResponseRedirect(self.get_success_url())\n\n def get_success_url(self):\n edit_url = reverse('aid_edit_view', args=[self.object.slug])\n return edit_url\n\n\nclass AidEditView(ContributorRequiredMixin, SuccessMessageMixin, AidEditMixin,\n UpdateView):\n \"\"\"Edit an existing aid.\"\"\"\n\n template_name = 'aids/edit.html'\n context_object_name = 'aid'\n form_class = AidEditForm\n success_message = _('Your aid was sucessfully updated.')\n\n def get_success_url(self):\n edit_url = reverse('aid_edit_view', args=[self.object.slug])\n return edit_url\n\n\nclass AidStatusUpdate(ContributorRequiredMixin, AidEditMixin,\n SingleObjectMixin, RedirectView):\n \"\"\"Update an aid status.\"\"\"\n\n http_method_names = ['post']\n\n def post(self, request, *args, **kwargs):\n self.object = self.get_object()\n self.update_aid_status()\n return super().post(request, *args, **kwargs)\n\n def update_aid_status(self):\n \"\"\"Move the aid to the next step in the workflow.\n\n None of these transitions require any special permission, hence we\n don't run any additional checks.\n \"\"\"\n aid = self.object\n\n # Check that submitted form data is still consistent\n current_status = self.request.POST.get('current_status', None)\n if aid.status != current_status:\n return\n\n STATES = AidWorkflow.states\n if aid.status == STATES.draft:\n aid.submit()\n elif aid.status == STATES.reviewable:\n aid.unpublish()\n elif aid.status == STATES.published:\n aid.unpublish()\n\n msg = _('We updated your aid status.')\n messages.success(self.request, msg)\n\n def get_redirect_url(self, *args, **kwargs):\n return reverse('aid_edit_view', args=[self.object.slug])\n\n\nclass AidDeleteView(ContributorRequiredMixin, AidEditMixin, DeleteView):\n \"\"\"Soft deletes an existing aid.\"\"\"\n\n def delete(self, request, *args, **kwargs):\n self.object = self.get_object()\n confirmed = self.request.POST.get('confirm', False)\n if confirmed:\n self.object.soft_delete()\n msg = _('Your aid was deleted.')\n messages.success(self.request, msg)\n\n success_url = reverse('aid_draft_list_view')\n redirect = HttpResponseRedirect(success_url)\n return redirect\n","sub_path":"src/aids/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"535064176","text":"#! /usr/bin/env python2\n\n# author: tmv\n\"\"\"\nAct as lirc client to control lights/screen, send IR signals to control equipments\nand use wakeonlan to wakeup a media computer.\nRequires python-lirc package (sudo apt-get install python-lirc)\nFor wakeonlan command install wakeonlan from pip (sudo pip install wakeonlan)\nlircd daemon must be running\n\"\"\"\n\nimport lirc\nimport ConfigParser\nimport sys, traceback\nimport CommandProcessor\n\n_lirc_section = \"lirc\"\n\nclass PiLircControl:\n \n def __init__(self, configFile=\"conf/HtRoomControl.conf\"):\n \n # read the configuration file\n self.config = ConfigParser.ConfigParser()\n self.config.read(configFile)\n \n self.program_name = self.config.get(_lirc_section, \"program\")\n self.lirc_config = self.config.get(_lirc_section, \"config\")\n \n self.command_processor = CommandProcessor.CommandProcessor(self.config)\n\n # initialize this lirc Client\n self.sock_id = lirc.init(self.program_name, self.lirc_config)\n \n def __del__(self):\n lirc.deinit()\n \n # start listening to lirc events \n def run(self):\n \n # initially turn on the lights\n self.command_processor.process_command(\"gpio lights-on\")\n while True:\n codes = lirc.nextcode()\n if codes:\n for code in codes:\n try:\n self.command_processor.process_command(code)\n except:\n e = sys.exc_info()[0]\n traceback.print_exc()\n \n\nif __name__ == \"__main__\":\n import os\n \n # change the working directory to where the script was located\n abspath = os.path.abspath(__file__)\n dname = os.path.dirname(abspath)\n os.chdir(dname)\n \n lirc_control = PiLircControl()\n lirc_control.run()\n","sub_path":"control/PiLircControl.py","file_name":"PiLircControl.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"453285462","text":"import cv2,os,time,datetime\n\ndef moviecut(url,outurl,formats):\n video_path=url\n times=0\n #提取视频的频率,每1帧提取一个\n frameFrequency=5\n #输出图片到当前目录vedio文件夹下\n outPutDirName=outurl\n if not os.path.exists(outPutDirName):\n #如果文件目录不存在则创建目录\n os.makedirs(outPutDirName)\n camera = cv2.VideoCapture(video_path)\n while True:\n times+=1\n res, image = camera.read()\n if not res:\n print('not res , not image')\n break\n if (times%frameFrequency==0):\n # 以每一帧的时间命名\n filename=str(datetime.timedelta(milliseconds=camera.get(0))).replace(\":\",\"-\").replace(\".\",\"-\")\n # print(filename)\n cv2.imwrite(outPutDirName + '/'+filename+formats, image) # 存储为图像\n print(outPutDirName+\"/\"+filename+formats)\n cv2.waitKey(1)\n print('图片提取结束')\n camera.release()\n\n\ndef save_img(url,saveurl):\n vc = cv2.VideoCapture(url) # 读入视频文件,命名cv\n n = 1 # 计数\n if vc.isOpened(): # 判断是否正常打开\n rval, frame = vc.read()\n else:\n rval = False\n timeF = 1 # 视频帧计数间隔频率\n i = 0\n while rval: # 循环读取视频帧\n rval, frame = vc.read()\n if (n % timeF == 0): # 每隔timeF帧进行存储操作\n i += 1\n cv2.imwrite(saveurl+'/{}.jpg'.format(i), frame) # 存储为图像\n print(saveurl+'/{}.jpg'.format(i))\n n = n + 1\n cv2.waitKey(1)\n vc.release()\n\n\nif __name__ == '__main__':\n # 需要cut的视频路径 保存路径(不能有中文) 保存格式\n moviecut(r\"E:\\电影\\为了一张图,我让程序看了20小时的超炮.flv\",\"E:/outputss\",'.jpg')\n # save_img(\"E:/电影/为了一张图,我让程序看了20小时的超炮.flv\",\"E:/output\")","sub_path":"spyder/spyder/moviecut.py","file_name":"moviecut.py","file_ext":"py","file_size_in_byte":1927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"545851280","text":"\"\"\"\ncalculator 0\nSkeleton for newtonCalculator\n\nImplements inverse using numpy \n\"\"\"\nimport sys\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nimport numpy as np\n\n\nclass AppForm(QMainWindow):\n def __init__(self, parent=None):\n QMainWindow.__init__(self, parent)\n self.setWindowTitle('Calculator')\n self.create_main_frame()\n self.textbox.setText('Give a number')\n\n \n def create_main_frame(self):\n self.main_frame = QWidget()\n \n # GUI controls\n self.textbox = QLineEdit()\n self.textbox.setMinimumWidth(200)\n \n self.inverse_button = QPushButton('Inverse')\n self.reset_button = QPushButton('Reset') \n\n # Buttons actions\n @pyqtSlot()\n def on_click_inverse():\n text = QLineEdit.text(self.textbox)\n y = 1. / float(text)\n self.textbox.setText(str(y)) \n def on_click_reset():\n self.textbox.setText('') \n \n self.inverse_button.clicked.connect(on_click_inverse) \n self.reset_button.clicked.connect(on_click_reset) \n \n # Geometry\n \n hbox1 = QHBoxLayout()\n hbox1.addWidget(self.textbox)\n hbox1.addWidget(self.reset_button)\n\n hbox2 = QHBoxLayout() \n for w in [ self.inverse_button ]:\n hbox2.addWidget(w)\n hbox2.setAlignment(w, Qt.AlignVCenter)\n \n vbox = QVBoxLayout()\n vbox.addLayout(hbox1)\n vbox.addLayout(hbox2) \n \n self.main_frame.setLayout(vbox)\n self.setCentralWidget(self.main_frame)\n\n\ndef main():\n app = QApplication(sys.argv)\n form = AppForm()\n form.show()\n app.exec_()\n\nif __name__ == \"__main__\":\n main()","sub_path":"CN/lab6/calculator0.py","file_name":"calculator0.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"340252515","text":"#Andreas Nugroho\n#71200646\n#Deni adalah seorang pegawai di salah satu agency. Karena untuk mencegah covid, manager dari perusaan tersebut\n#meminta ke deni untuk membuatkan aplikasi absen yang tidak perlu menggunakan sidik jari. karena deni tidak\n#mampu membuatnya lalu deni meminta tolong kepada kamu untuk membuatkan aplikasi yang di minta deni.\n#Bantulah deni agar bisa menyelesaikan tugasnya.\n\n#input\n#Nama\n#Kehadiran\n\n#proses\n#if dan perulangan\n\n#output\n#data absensi dalam bentuk file txt\n\nwhile True:\n print(\"=== Selamat Datang ===\")\n print(\"1. Absen Pegawai\\n2. Tampilkan Data (Khusus Admin)\\n3. keluar\")\n pilih = int(input(\"Masukkan Pilihan Anda: \"))\n\n if pilih == 1:\n buka = open(\"absen.txt\",\"a\")\n nama = input(\"Masukkan Nama :\")\n print(\"=== Absen Hari Ini ===\")\n print(\"1.Hadir \\n2.Tidak Hadir\")\n absen = int(input(\"Masukkan Pilihan Anda: \"))\n if absen == 1:\n absen = \"Hadir\"\n print(nama,\"Berhasil Absen!\")\n elif absen == 2:\n absen = \"Tidak Hadir\"\n print(nama,\"Berhasil Absen!\")\n else:\n print(\"Inputan tidak ada!\")\n hasil = \"\\nNama: {}\\nAbsen Hari Ini: {}\\n----\".format(nama,absen)\n buka.write(hasil)\n buka.close()\n\n elif pilih == 2:\n nama = input(\"Username :\")\n kode = input(\"Password :\")\n if nama == \"admin\" and kode == \"admin\":\n buka = open(\"absen.txt\",\"r\")\n baca = buka.read()\n print(baca)\n buka.close()\n\n elif pilih == 3:\n print(\"Berhasil Keluar!\")\n break\n\n else:\n print(\"Inputan tidak ada!\")\n","sub_path":"71200646_Video Mingggu 7.py","file_name":"71200646_Video Mingggu 7.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"65630641","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\nfrom allpairspy import AllPairs\n\n\nif __name__ == '__main__':\n # 带宽,丢包,延迟\n parameters = [[\"有C01\",\"无C01\"],[\"有C02\",\"无C02\"],[\"有C03\",\"无C03\"],[\"有C07\",\"无C07\"]]\n # 设置组合因子n的数目,默认为2\n pairwise = AllPairs(parameters, n=2)\n for i, v in enumerate(pairwise):\n print(\"%i:\\t%s\" % (i, str(v).decode('utf-8')))\n # parameters2 = [[\"有C03\",\"无C03\"],[\"有C02\",\"无C02\"],[\"有C01\",\"无C01\"]]\n # pairwise2 = AllPairs(parameters2, n=2)\n # for i, v in enumerate(pairwise2):\n # print(\"%i:\\t%s\" % (i, str(v).decode('utf-8')))\n # combine = set(pairwise)&set(pairwise2)\n # print combine,111","sub_path":"testcase_generate/pair.py","file_name":"pair.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"77914048","text":"\"\"\"\nCreating PIPY package instruction:\n\npython3 -m pip install --user --upgrade setuptools wheel\npython3 setup.py sdist\npython3 -m pip install --user --upgrade twine\ntwine check dist/*\ntwine upload dist/*\n\"\"\"\n\nfrom setuptools import setup, find_packages\nfrom numpy.distutils.core import setup, Extension\nfrom os import path\nimport io\n\nthis_directory = path.abspath(path.dirname(__file__))\nwith io.open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\nif __name__ == \"__main__\":\n setup(name = 'pybeach',\n author = \"Tomas Beuzen\",\n author_email = \"https://tomasbeuzen.github.io/\",\n url = \"https://github.com/TomasBeuzen/pybeach\",\n version = \"0.1.1\",\n description = \"Coastal Processes, Environments & Systems.\",\n long_description = long_description,\n long_description_content_type='text/markdown',\n packages = ['pybeach','pybeach.support','pybeach.classifiers'],\n install_requires = [\n 'numpy>=1.16.3',\n 'scikit-learn>=0.20.3',\n 'pandas>=0.25',\n 'pytz==2019.1',\n 'scipy>=1.2.1',\n 'joblib==0.13.2',\n ],\n python_requires = '>=3.7',\n # package_data = {'pybeach': ['Notebooks/notebooks/*ipynb',\n # 'Notebooks/notebooks/*py'] },\n include_package_data = True,\n classifiers = ['Programming Language :: Python :: 3.7']\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"579490337","text":"import arcade\nimport random\nimport math\n\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 800\nSCREEN_TITLE = \"Predator Prey Behaviour\"\n\nrandom.seed()\n\ncreatures = [[\"predator\", 20, (255, 0, 0)],\n [\"prey\", 8, (0, 128, 0)],\n [\"prey\", 8, (0, 128, 0)],\n [\"prey\", 8, (0, 128, 0)],\n [\"prey\", 8, (0, 128, 0)],\n [\"prey\", 8, (0, 128, 0)],\n [\"prey\", 8, (0, 128, 0)],\n [\"prey\", 8, (0, 128, 0)],\n [\"prey\", 8, (0, 128, 0)],\n [\"prey\", 8, (0, 128, 0)],\n [\"prey\", 8, (0, 128, 0)],\n [\"preyleader\", 12, (34, 139, 34)]]\n\n\nSPEED = [-2, -1.9, -1.8, -1.7, -1.6, -1.5, -1.4, -1.3, -1.2, -1.1, -1, 0, 1, 1.1, 1.2, 1.2, 1.3, 1.4, 1.5, 1.6, 1.7, 1.8, 1.9, 2]\n\nclass Ball:\n def __init__(self, role, size, color):\n self.x = 0\n self.y = 0\n self.change_x = 0\n self.change_y = 0\n self.steer_x = 0\n self.steer_y = 0\n self.role = role\n self.size = size\n self.color = color\n self.state = \"randomlywandering\"\n\n def wander(self):\n\n if self.steer_x != self.change_x:\n if self.steer_x > self.change_x:\n self.change_x += 0.05\n else:\n self.change_x -= 0.05\n\n if self.steer_y != self.change_y:\n if self.steer_y > self.change_y:\n self.change_y += 0.05\n else:\n self.change_y -= 0.05\n\n self.x += self.change_x\n self.y += self.change_y\n\n if random.random() < 0.01:\n self.steer_x = SPEED[random.randint(0,len(SPEED)-1)]\n self.steer_y = SPEED[random.randint(0,len(SPEED)-1)]\n\n\n def seek(self, preyX, preyY):\n \n if preyX > self.x:\n self.change_x = 2\n elif preyX < self.x:\n self.change_x = -2\n else: \n self.change_x = 0\n\n if preyY > self.y:\n self.change_y = 2\n elif preyY < self.y:\n self.change_y = -2\n else: \n self.change_y = 0\n \n self.x += self.change_x\n self.y += self.change_y\n\n def flee(self, predatorX, predatorY):\n if predatorX > self.x:\n self.change_x = -2.5\n elif predatorX < self.x:\n self.change_x = 2.5\n else: \n self.change_x = 0\n\n if predatorY > self.y:\n self.change_y = -2.5\n elif predatorY < self.y:\n self.change_y = 2.5\n else: \n self.change_y = 0\n \n self.x += self.change_x\n self.y += self.change_y\n\n def avoid_obstacles(self):\n\n self.x += self.change_x\n self.y += self.change_y\n\n if self.x < self.size + 50:\n self.change_x += 0.1\n\n if self.y < self.size + 50:\n self.change_y += 0.1\n\n if self.x > SCREEN_WIDTH - self.size - 50:\n self.change_x -= 0.1\n\n if self.y > SCREEN_HEIGHT - self.size - 50:\n self.change_y -= 0.1\n\n def follow_leader(self, leaderX, leaderY, leaderSize, leaderDistance):\n\n if leaderDistance >30:\n if leaderX > self.x:\n self.change_x = 2\n elif leaderX < self.x:\n self.change_x = -2\n else: \n self.change_x = 0\n\n if leaderY > self.y:\n self.change_y = 2\n elif leaderY < self.y:\n self.change_y = -2\n else: \n self.change_y = 0\n elif leaderDistance < leaderSize*2:\n self.change_x = 0\n self.change_y = 0\n else:\n if leaderX > self.x:\n self.change_x = 1\n elif leaderX < self.x:\n self.change_x = -1\n else: \n self.change_x = 0\n\n if leaderY > self.y:\n self.change_y = 1\n elif leaderY < self.y:\n self.change_y = -1\n else: \n self.change_y = 0\n\n self.x += self.change_x\n self.y += self.change_y\n\ndef make_ball(role, size, color):\n\n ball = Ball(role, size, color)\n\n ball.x = random.randrange(ball.size+50, SCREEN_WIDTH - ball.size-50)\n ball.y = random.randrange(ball.size+50, SCREEN_HEIGHT - ball.size-50)\n\n ball.change_x = SPEED[random.randint(0,len(SPEED)-1)]\n ball.change_y = SPEED[random.randint(0,len(SPEED)-1)]\n\n return ball\n\nclass MyGame(arcade.Window):\n\n def __init__(self):\n super().__init__(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)\n self.ball_list = []\n\n for creature in creatures:\n ball = make_ball(creature[0], creature[1], creature[2])\n self.ball_list.append(ball)\n\n def on_draw(self):\n\n arcade.start_render()\n\n for ball in self.ball_list:\n arcade.draw_circle_filled(ball.x, ball.y, ball.size, ball.color)\n\n def on_update(self, delta_time):\n\n for ball in self.ball_list:\n leaderDistance = SCREEN_WIDTH + SCREEN_HEIGHT\n predatorDistance = SCREEN_WIDTH + SCREEN_HEIGHT\n closestPrey = len(creatures)\n\n if ball.role == \"prey\":\n leaderDistance = math.sqrt((ball.x - self.ball_list[len(creatures)-1].x)**2 + (ball.y - self.ball_list[len(creatures)-1].y)**2)\n predatorDistance = math.sqrt((ball.x - self.ball_list[0].x)**2 + (ball.y - self.ball_list[0].y)**2)\n elif ball.role == \"predator\": \n for i in self.ball_list:\n if (predatorDistance > math.sqrt((ball.x - i.x)**2 + (ball.y - i.y)**2)) and (math.sqrt((ball.x - i.x)**2 + (ball.y - i.y)**2) != 0):\n predatorDistance = math.sqrt((ball.x - i.x)**2 + (ball.y - i.y)**2)\n closestPrey = self.ball_list.index(i)\n else:\n predatorDistance = math.sqrt((ball.x - self.ball_list[0].x)**2 + (ball.y - self.ball_list[0].y)**2)\n\n if ball.role == \"predator\" and predatorDistance < 150:\n ball.state = \"preyclose\"\n elif ball.x < ball.size + 50 or ball.y < ball.size + 50 or ball.x > SCREEN_WIDTH - ball.size - 50 or ball.y > SCREEN_HEIGHT - ball.size - 50:\n ball.state = \"obstacledetected\"\n elif (ball.role == \"prey\" or ball.role == \"preyleader\") and predatorDistance < 150:\n ball.state = \"predatorclose\"\n elif ball.role == \"prey\" and leaderDistance < 100:\n ball.state = \"leaderclose\"\n else:\n ball.state = \"randomlywandering\"\n\n if ball.state == \"preyclose\":\n ball.seek(self.ball_list[closestPrey].x, self.ball_list[closestPrey].y)\n elif ball.state == \"predatorclose\":\n ball.flee(self.ball_list[0].x, self.ball_list[0].y)\n elif ball.state == \"leaderclose\":\n ball.follow_leader(self.ball_list[len(creatures)-1].x, self.ball_list[len(creatures)-1].y, self.ball_list[len(creatures)-1].size, leaderDistance)\n elif ball.state == \"obstacledetected\":\n ball.avoid_obstacles()\n else:\n ball.wander()\n\n\ndef main():\n MyGame()\n arcade.run()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"predatorprey.py","file_name":"predatorprey.py","file_ext":"py","file_size_in_byte":7213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"342225937","text":"badget = float(input())\nstatist = int(input())\ndress = float(input())\n\ndekor = badget * 0.1\ndress_statist = statist * dress\nmoney_needet = dekor + dress_statist\n\n\nif statist >= 150:\n diskaunt = dress_statist * 0.10\n money_needet = (dekor + dress_statist) - diskaunt\n\nif money_needet <= badget:\n money_left = badget - money_needet\n print('Action!')\n print(f'Wingard starts filming with {money_left:.2f} leva left.')\nelif money_needet >= badget:\n money_left = money_needet - badget\n print('Not enough money!')\n print(f'Wingard needs {money_left:.2f} leva more.')","sub_path":"Lectures 11.07.2020/Yprajneniq/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"278683178","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 23 14:40:03 2020\n\n@author: tony\n\"\"\"\n\n\nfrom operators import *\n\ndef init_state(circuit):\n dim = prod(circuit)\n state = zeros(dim)\n state[0] = 1\n return state\n\ndef nim_move(circuit):\n # for now, I'll assume (3,3) (x) (3) format and generalize later\n # index: 0 1 2\n \n # board controls history's first move\n z = ones([3,3])\n instruct = {'2': -z, #diffuse(3, [1,2]), \n '1': 2*z }\n \n control_codes = encode_state([3], Print=True)\n board_c_hist = create_control(circuit, 1, 0, instruct)\n \n return board_c_hist\n\nclass display_object(): # used to print out states with amplitude\n def __init__(self, amp, code):\n self.amp = amp\n self.code = code\n \ndef output_state(circuit, state, amplitude='no'):\n # this function prints out states formatted as xxx|yyy> + ...\n # xxx is the amplitude, yyy is the basis vector\n \n encoding = encode_state(circuit)\n objs = []\n size = prod(circuit)\n \n if amplitude is 'no':\n for i in range(size):\n if state[i] != 0: # amp state\n objs.append( display_object('', encoding[i]) )\n else:\n for i in range(size):\n if state[i] != 0: # amp state\n objs.append( display_object(str(state[i].round(3)), encoding[i]) )\n \n strings = []\n for i in objs:\n strings.append( i.amp + '|' + i.code +'> ' )\n\n state_string = strings[0]\n \n for i in range(1,len(strings)):\n state_string += '+ ' + strings[i]\n \n print(state_string)","sub_path":"q_program.py","file_name":"q_program.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"507325896","text":"#%%\nimport pandas as pd\nimport tqdm\nimport numpy as np\nimport prot.size\n\n# Load the necessary datasets.\ndata = pd.read_csv('../../../data/compiled_annotated_complexes.csv', comment='#')\ndata.dropna(subset=['n_units'], inplace=True)\n#%%\n# define necessary complexes.\ncomplexes = {'dnap': {'name': 'DNA polymerase III (holo enzyme)',\n 'complexes': ['CPLX0-3803'],\n 'rate_per_sec': 600,\n 'units': 'bp/s',\n 'method': 'sum',\n 'category':'synthesis'},\n 'rnap': {'name':'RNA polymerase (core enzyme)',\n 'complexes': ['APORNAP-CPLX'],\n 'rate_per_sec': 40,\n 'units':'nt/s',\n 'method':'sum',\n 'category':'synthesis'},\n 'dntp': {'name': 'Ribonucleoside-diphosphate reductase (I)',\n 'complexes': ['RIBONUCLEOSIDE-DIP-REDUCTI-CPLX',\n 'RIBONUCLEOSIDE-DIP-REDUCTII-CPLX',\n 'NRDACTMULTI-CPLX'],\n 'rate_per_sec': 10,\n 'units': 'dNTP/s' ,\n 'method': 'sum',\n 'category':'synthesis'},\n 'sigma70': {'name':'σ-70 (RpoD)',\n 'gene_name': ['rpoD'],\n 'rate_per_sec': np.nan,\n 'units': 'none',\n 'method': 'sum',\n 'category':'synthesis'},\n 'all_sigma': {'name': 'all σ-factors',\n 'gene_name': ['rpoE', 'fecI', 'rpoF', 'rpoH', 'rpoN', 'rpoD', 'rpoS'],\n 'rate_per_sec': np.nan,\n 'units':'none',\n 'method':'sum total',\n 'category':'synthesis'},\n 'trna': {'name': 'tRNA ligases',\n 'go_terms': ['GO:0006419', 'GO:0006420', 'GO:0006421',\n 'GO:0006422', 'GO:0006423', 'GO:0006424',\n 'GO:0006425', 'GO:0006426', 'GO:0006427',\n 'GO:0006428', 'GO:0006429', 'GO:0006430',\n 'GO:0006431', 'GO:0006432', 'GO:0006433',\n 'GO:0006434', 'GO:0006435', 'GO:0006436',\n 'GO:0006437', 'GO:0006438'],\n 'rate_per_sec': 20,\n 'units': 'AA/s',\n 'method':'sum',\n 'category':'synthesis'},\n 'carbon_tport': {'name': 'Carbon Importers (total)',\n 'go_terms': ['GO:0009401'],\n 'rate_per_sec': 200,\n 'units': 'carbs/s',\n 'method': 'sum',\n 'category':'transport'},\n 'carbohydrate_tport_tot': {'name': 'Carbohydrate Transporters (total)',\n 'go_terms': ['GO:0009401'],\n 'rate_per_sec': 200,\n 'units': 'carbs/s',\n 'method': 'sum',\n 'category':'transport'},\n 'glucose_tport': {'name': 'Glucose/Mannose Transporters',\n 'complexes': ['CPLX-157', 'CPLX-165'],\n 'rate_per_sec': 200,\n 'units': 'gluc/sec',\n 'method':'sum',\n 'category':'transport'},\n 'glycerol_tport': {'name': 'Glycerol Transporters',\n 'complexes': ['CPLX0-7654'],\n 'rate_per_sec': 2000,\n 'units': 'glyc/sec',\n 'method':'sum',\n 'category':'transport'},\n 'xylose_tport': {'name': 'Xylose Transporters',\n 'gene_name': ['xylG', 'xylH', 'xylF', 'xylE'],\n 'rate_per_sec': 50,\n 'units': 'xyl/sec',\n 'method': 'sum',\n 'category':'transport'},\n 'fructose_tport': {'name': 'Fructose Transporter FruBA',\n 'complexes': ['CPLX-158'],\n 'rate_per_sec': 200,\n 'units': 'frc/s',\n 'method':'sum',\n 'category':'transport'},\n 'nitrogen_tport': {'name': 'Ammonium Transporter (AmtB)',\n 'gene_name': ['amtB'],\n 'rate_per_sec': 300,\n 'units': 'NH4+/s',\n 'method':'sum',\n 'category':'transport'},\n 'sulfur_tport': {'name': 'Sulfate Transporter (CysUWA)',\n 'complexes': ['ABC-70-CPLX', 'ABC-7-CPLX'],\n 'rate_per_sec':10,\n 'units': 'SO4/s',\n 'method':'avg',\n 'category':'transport'},\n 'phosphate_tport': {'name': 'Phosphate Transport System',\n 'gene_name': ['pitA', 'pitB'],\n 'rate_per_sec': 300,\n 'units': 'Pi/s',\n 'method':'sum',\n 'category':'transport'},\n 'ribosome': {'name': 'Ribosome (50S + 30S)',\n 'complexes': ['CPLX0-3964'],\n 'rate_per_sec': 15,\n 'units': 'AA/s',\n 'method':'sum',\n 'category':'synthesis'},\n 'eftu': {'name': 'Elongation Factor EF-Tu',\n 'gene_name': ['tufA', 'tufB'],\n 'method': 'sum',\n 'rate_per_sec': np.nan,\n 'units':'none',\n 'category':'sum'},\n 'atp_synthase': {'name': 'F1-F0 ATP Synthase',\n 'complexes': ['ATPSYN-CPLX'],\n 'method': 'sum',\n 'rate_per_sec': 300,\n 'units':'atp/s',\n 'category': 'energy production'},\n 'proton_gradient': {'name': 'respiratory complex',\n 'go_terms': ['GO:0019646', 'GO:0006136', 'GO:0006137', 'GO:0006138'],\n 'method':'sum',\n 'rate_per_sec': 1500,\n 'units': 'protons/s',\n 'category': 'energy production'},\n 'fas': {'name': 'Fatty Acid Synthetases (FabA + FabZ)',\n 'gene_name': ['fabZ', 'fabA'],\n 'method': 'sum',\n 'rate_per_sec': 1,\n 'units':'lipid/s',\n 'category': 'synthesis'},\n 'transpeptidases': {\n 'name':'transpeptidases',\n 'gene_name': ['mrdA', 'mrdB', 'mrcA', 'mrcB'],\n 'method': 'sum',\n 'units': 'crosslinks/s^-1',\n 'rate_per_sec':2,\n 'category':'synthesis'}}\n\n# %%\ncomplex_df = pd.DataFrame([])\nfor g, d in tqdm.tqdm(data.groupby(['dataset', 'dataset_name', 'condition', 'growth_rate_hr'])):\n for k, v in complexes.items():\n if 'complexes' in list(v.keys()):\n _d = d[d['complex'].isin(v['complexes'])]\n if 'go_terms' in list(v.keys()):\n cplxs = []\n for t in v['go_terms']:\n __d = d[d['go_terms'].str.contains(t)]\n for kplx in __d['complex'].unique():\n cplxs.append(kplx)\n _d = d[d['complex'].isin(cplxs)]\n if 'gene_name' in list(v.keys()):\n _d = d[d['gene_name'].isin(v['gene_name'])]\n if len(_d) > 0:\n _d = _d.drop_duplicates(subset=['gene_name'])\n _d = _d.groupby(['complex'])['n_units'].mean().reset_index()\n if v['method'] == 'sum':\n units = _d['n_units'].sum()\n _method = 'sum total'\n if v['method'] == 'avg':\n units = _d['n_units'].mean()\n _method = 'average'\n\n # assemble a dictionary\n volume = np.round(prot.size.lambda2size(g[3]), 2)\n _data = {'dataset':g[0], 'dataset_name':g[1],\n 'condition':g[2], 'growth_rate_hr':g[3],\n 'volume': volume,\n 'n_complex':units,\n 'rate': v['rate_per_sec'],\n 'rate_units': v['units'],\n 'shorthand': k,\n 'name': v['name'],\n 'aggregation_method': _method,\n 'category': v['category'],\n 'concentration_uM': 1E6 * (units / 6.022E23) / (volume * 1E-15)}\n\n complex_df = complex_df.append(_data, ignore_index=True)\n\ncomplex_df.to_csv('../../../data/compiled_estimate_categories.csv', index=False)\n\n\n\n","sub_path":"code/processing/collation/collate_estimate_categories.py","file_name":"collate_estimate_categories.py","file_ext":"py","file_size_in_byte":9275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"29694065","text":"from azureml.core import Run\nimport argparse\nimport pandas as pd\nimport numpy as np\nimport os\nimport joblib\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.linear_model import LogisticRegression\n\n# get the experiment run context\nrun = Run.get_context()\n\n# add args\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--reg-rate\",\n type=float,\n dest=\"reg_rate\",\n default=0.01,\n )\nargs = parser.parse_args()\nreg = args.reg_rate\n\n# load and prep data\ndiabetes = pd.read_csv(\"diabetes.csv\")\n\nX, y = diabetes.values, diabetes[\"Diabetic\"].values\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30)\n\n# train model\nmodel = LogisticRegression(C=1/reg, solver=\"liblinear\").fit(X_train, y_train)\n\n# generate predictions and calculate accuracy\ny_hat = model.predict(X_test)\nacc = np.average(y_hat == y_test)\nrun.log(\"Accuracy\", np.float(acc))\n\n# save trained model\nos.makedirs(\"outputs\", exist_ok=True)\njoblib.dump(value=model, filename=\"outputs/model.pkl\")\n\nrun.complete()","sub_path":"custom/build-and-operate-machine-learning-solutions-with-azure-machine-learning/training_folder/training_script.py","file_name":"training_script.py","file_ext":"py","file_size_in_byte":1038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"357781583","text":"# -*- coding: utf-8 -*-\n\nimport grok\nfrom interfaces import IMeasurementService, IMeasurementSource\nfrom zope.component import getAllUtilitiesRegisteredFor\n\n\nclass MeasurementService(object):\n \"\"\"Defines an object providing proxy access to a measurement\n information service.\"\"\"\n\n grok.implements(IMeasurementService)\n\n def getSources(self):\n return getAllUtilitiesRegisteredFor(IMeasurementSource)\n\n def isValidUnitsString(self,units):\n u\"\"\" True if the units string 'units' is a valid string\n according to the HL7 UCUM specification. units is not None\n \"\"\"\n sources = self.getSources()\n for source in sources:\n if source.isValidUnitsString(units):\n return True\n return False\n\n\n def unitsEquivalent(self, units1, units2):\n u\"\"\" True if two units strings correspond to the same measured\n property. isValidUnitsString(units1) and\n isValidUnitsString(units2)\n \"\"\"\n\n sources = self.getSources()\n for source in sources:\n units1_valid = source.isValidUnitsString(units1)\n units2_valid = source.isValidUnitsString(units2)\n if units1_valid and units2_valid:\n return source.unitsEquivalent(units1, units2)\n return False\n\n\n","sub_path":"src/oship.openehr.rm/src/oship/openehr/rm/support/measurement/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"555075993","text":"import matplotlib.dates as mdate\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\norgData = pd.read_excel('F:\\\\Z-Main Affairs(USA)\\\\US Stusy\\\\2019_Fall_DataScience\\\\HW3\\\\AirQualityUCI.xlsx', sep=';', header=0)\ndata1 = orgData[['Date','CO(GT)']]\ndata2 = data1.drop(data1.loc[data1['CO(GT)']==-200].index, inplace=False)\ndata3 = data2.groupby(data2[\"Date\"]).mean().reset_index()\n\nprint(data3.iloc[:50,0].shape)\nprint(type(data3.iloc[:50,0]))\nlistfy = list(data3.iloc[:50,0])\n# 这里发现直接调用python的内置函数就可以把series转化为list类型\n# 因为list类型接受的是iterable的接口,只要你这个类实现了__iterable__函数,就可以用list调用\n# 这个和 print函数调用 tostring 是一样的\nprint(listfy)\nprint(type(listfy))\nexit()\nfig1 = plt.figure(figsize=(15, 5))\nax1 = fig1.add_subplot(1, 1, 1)\nax1.xaxis.set_major_formatter(mdate.DateFormatter('%Y-%m-%d')) # 设置时间标签显示格式\nplt.xticks(pd.date_range('2004-03-10', '2005-04-04'), rotation=90)\n# plt.title(title)\nplt.plot(data3.iloc[:50,0],data3.iloc[:50,1],color='red',linestyle='-',linewidth=1,marker='D')\nplt.show()\n","sub_path":"Python_Study/HW3_数据画图__iter__Study.py","file_name":"HW3_数据画图__iter__Study.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"461588834","text":"# coding:utf-8\r\n\"\"\"PySide port of the opengl/samplebuffers example from Qt v4.x\"\"\"\r\n\r\nimport sys\r\n\r\nfrom PySide import QtGui, QtOpenGL\r\nfrom PySide.QtCore import QTimer\r\nfrom PySide.QtGui import QWidget, QMessageBox\r\n\r\nfrom fisig2 import SignalData\r\nfrom gui.GLCangvas.GLCanvasSignal import GLCanvasSignal\r\nfrom gui.GLCangvas.GLCanvasSpectrogram import GLCanvasSpectrogram\r\nfrom gui.googleColorCode import gcolor\r\n\r\ntry:\r\n from OpenGL import GL\r\nexcept ImportError:\r\n app = QtGui.QApplication(sys.argv)\r\n QtGui.QMessageBox.critical(None, \"OpenGL samplebuffers\",\r\n \"PyOpenGL must be installed to run this example.\",\r\n QtGui.QMessageBox.Ok | QtGui.QMessageBox.Default,\r\n QtGui.QMessageBox.NoButton)\r\n sys.exit(1)\r\n\r\n\r\nclass MainWidget(QWidget):\r\n def __init__(self, parent=None):\r\n QWidget.__init__(self, parent)\r\n self.resize(480, 360)\r\n\r\n # プロットオブジェクト\r\n self.glwidget = GLCanvasSignal()\r\n self.glwidget2 = GLCanvasSignal()\r\n self.glwidget3 = GLCanvasSpectrogram()\r\n self.glwidget4 = GLCanvasSignal()\r\n\r\n # UIの生成\r\n self.setupUI()\r\n self.setPlotData()\r\n self.connectSingla()\r\n\r\n # デバッグ用) 自動終了\r\n self.timer = QTimer()\r\n self.timer.singleShot(20 * 1000, self.close)\r\n\r\n def connectSingla(self):\r\n self.glwidget.hairlineMoved.connect(self.glwidget.set_hairline)\r\n self.glwidget.hairlineMoved.connect(self.glwidget3.set_hairline)\r\n self.glwidget.hairlineMoved.connect(self.setSpectruDataPlot)\r\n\r\n pass\r\n\r\n def setSpectruDataPlot(self, qpoint):\r\n # スペクトルのプロット\r\n duration = 70\r\n st = qpoint.x() * duration\r\n et = st+1\r\n self.spec = self.spec_gwt.slc().slice_time_ms(st,et).time_average()\r\n self.glwidget2.set_plotting_data(ydata=self.spec.get_logpow(), xdata=self.spec.get_xdata())\r\n # self.glwidget2.set_grid(xticks=26, yticks=2)\r\n # self.glwidget2.set_line_color(color=gcolor(color=\"LightBlue\", level=\"A700\"))\r\n\r\n gwt_timeline = self.spec_gwt.slc().get_logpow()\r\n freq = int(512 * qpoint.y())\r\n self.glwidget4.set_plotting_data(\r\n ydata = gwt_timeline[:,freq],\r\n xdata = self.spec_gwt.get_xdata()\r\n )\r\n\r\n\r\n def setupUI(self):\r\n # レイアウトの生成\r\n layout = QtGui.QVBoxLayout(self)\r\n # レイアウトにオブジェクトをセット\r\n layout.addWidget(self.glwidget)\r\n layout.addWidget(self.glwidget2)\r\n layout.addWidget(self.glwidget3)\r\n layout.addWidget(self.glwidget4)\r\n\r\n self.setLayout(layout)\r\n\r\n def setPlotData(self):\r\n self.signaldata = SignalData()\r\n self.signaldata.load_wav(\"audio.wav\", ch=\"M\") # .slice_time_ms(80,150)\r\n self.spec_gwt = self.signaldata.slice_time_ms(80, 150).gwt()\r\n self.spec = self.spec_gwt.time_average()\r\n self.spec_gwt.slc().slice_time_ms(10, 50)\r\n\r\n # 信号データのプロット\r\n self.glwidget.set_plotting_data(ydata=self.signaldata.get_data(), xdata=None)\r\n self.glwidget.set_ylim(-0.5, 0.5)\r\n self.glwidget.set_grid(xticks=6, yticks=5)\r\n self.glwidget.set_line_color(color=gcolor(color=\"Green\", level=\"A700\"))\r\n\r\n # スペクトルのプロット\r\n self.glwidget2.set_plotting_data(ydata=self.spec.get_logpow(), xdata=self.spec.get_xdata())\r\n self.glwidget2.set_grid(xticks=26, yticks=2)\r\n self.glwidget2.set_line_color(color=gcolor(color=\"LightBlue\", level=\"A700\"))\r\n\r\n # スペクトログラムのプロット\r\n self.glwidget3.set_plotting_data(\r\n xdata=self.spec_gwt.get_xdata(),\r\n ydata=self.spec_gwt.get_ydata(),\r\n zdata=self.spec_gwt.get_logpow()\r\n )\r\n\r\n # タイムライン\r\n self.glwidget4.set_plotting_data(\r\n ydata = self.spec_gwt.get_logpow()[:,128],\r\n xdata = self.spec_gwt.get_xdata()\r\n )\r\n\r\nif __name__ == '__main__':\r\n app = QtGui.QApplication(sys.argv)\r\n\r\n if not QtOpenGL.QGLFormat.hasOpenGL():\r\n QMessageBox.information(0, \"OpenGL pbuffers\",\r\n \"This system does not support OpenGL.\",\r\n QMessageBox.Ok)\r\n sys.exit(1)\r\n\r\n widget = MainWidget()\r\n # widget.resize(640, 480)\r\n widget.move(0, 0)\r\n widget.show()\r\n\r\n sys.exit(app.exec_())\r\n","sub_path":"gldemo.py","file_name":"gldemo.py","file_ext":"py","file_size_in_byte":4563,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"558248743","text":"# 9分\n# 配列として持たず、個別に判定してNoの時点でbreakさせるのがポイント\n# -*- coding: utf-8 -*-\ns = list(input())\nresult = 'Yes'\nfor i in range(len(s)):\n if i % 2 != 0 and s[i].islower():\n result = \"No\"\n break\n elif i % 2 == 0 and not s[i].islower():\n result = \"No\"\n break\n\nprint(result)\n","sub_path":"192/B/192B.py","file_name":"192B.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"429436711","text":"start = 50\ndef setup():\n # size(640, 480)\n fullScreen()\n colorMode(HSB, 360, 100, 100)\n background(0, 0,100)\n # noiseSeed(1400)\ndef draw():\n # PerlinIn1D()\n # PerlinIn1D_2()\n PerlinIn2D()\n\n\n\n\ndef PerlinIn2D():\n # translate(width/2, height/2)\n scale(0.25)\n n = 160\n inc = width/n\n global off1, off2, start\n # off1 = start\n off1 = map(mouseX, 0, width, 0, -10)\n\n # background(0, 0,100)\n # noFill()\n noStroke()\n # fill(0,0,0)\n for x in range(0, width*4, inc):\n off2 = map(mouseY, 0, height, 0, -10)\n for y in range(0, height*4, inc):\n f = map(noise(off1,off2, start), 0,1, 0, 100)\n # f = map(noise(cos(off1), cos(off2), (start)), 0,1, 0, 100)\n # f = int(map(f, 0,100,0,2))*100\n f = int(map(f, 0,100,0,8))\n f = map(f, 0,8, -20, 300)\n # fill(0,0,f)\n fill(f, 100, 100)\n rect(x, y, inc, inc)\n off2 +=0.005\n off1+=0.005\n start+=0.005\n\ndef PerlinIn1D():\n global off1, start\n off1 = start\n background(0, 0,100)\n noFill()\n # noStroke()\n fill(0,0,0)\n beginShape()\n vertex(0, height/2)\n # vertex(0, height)\n for x in range(width):\n y = map(noise(off1), 0, 1, 0, height/2)\n sin_y = map(sin(off1+random(0.1)), -1, 1, 0, height/2)\n y = sin_y+y\n # unConstrain...\n y =constrain(y, 0, height/2)\n vertex(x, y)\n off1 += 0.005\n # vertex(width, height)\n vertex(width, height/2)\n start+=0.005\n endShape()\n\ndef PerlinIn1D_2():\n global off1, start\n off1 = start\n off2 = 0\n background(190, 90,100)\n noFill()\n noStroke()\n for y in range(height/20):\n fill(140,90, y*5)\n beginShape()\n vertex(0, height)\n for x in range(width):\n hpt = map(noise(off1, off2), 0, 1, 0, height/2)\n hpt =constrain(hpt, 0, height/2)\n vertex(x, hpt+y*20)\n off1 += 0.005\n off2 += 0.005\n vertex(width, height)\n endShape()\n # vertex(width, height)\n # vertex(width, height/2)\n start+=0.005\n # endShape()\n","sub_path":"Pycessing/Perlin/PerlinNoise_Basics/PerlinNoise_Basics.pyde","file_name":"PerlinNoise_Basics.pyde","file_ext":"pyde","file_size_in_byte":2147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"497068054","text":"from django.views import generic\nfrom users.forms import *\nfrom django.contrib.auth import login, authenticate\nfrom users.models import *\nfrom django.shortcuts import redirect, render\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.contrib.auth.decorators import login_required\nfrom social_django.models import UserSocialAuth\nfrom django.db import transaction\nfrom django.contrib.messages.views import SuccessMessageMixin\nfrom django.contrib import messages\nfrom django.utils.translation import ugettext_lazy as _\n\n\n\n\nclass SignUp(SuccessMessageMixin,generic.CreateView):\n model = User\n template_name = 'registration/signup.html'\n form_class = UserCreationForm\n success_message = \"Your account was created successfully\"\n\n\n def get_context_data(self, **kwargs):\n kwargs['user_type'] = 'seeker'\n return super(SignUp, self).get_context_data(**kwargs)\n\n def form_valid(self, form):\n # save the new user first\n form.save()\n # get the email and password\n email = self.request.POST['email']\n password = self.request.POST['password1']\n # authenticate user then login\n user = authenticate(email=email, password=password)\n login(self.request, user)\n return redirect('home')\n\n\n@login_required\ndef view_profile(request, pk):\n user = User.objects.get(pk=pk)\n return render(request,'accounts/profile.html',{'user':user})\n\n\n\n\n\n@login_required\n@transaction.atomic\ndef edit_profile(request, pk):\n user = User.objects.get(pk=pk)\n if request.method == 'POST':\n user_form = BaseProfileForm(request.POST, instance=request.user)\n profile_form = ProfileForm(request.POST, instance=request.user.profile)\n if user_form.is_valid() and profile_form.is_valid():\n user_form.save()\n profile_form.save()\n messages.success(request, _('Your profile was successfully updated!'))\n return render(request, 'accounts/profile.html', {'user':user})\n else:\n messages.error(request, _('Please correct the error below.'))\n else:\n user_form = BaseProfileForm(instance=request.user)\n profile_form = ProfileForm(instance=request.user.profile)\n return render(request, 'accounts/edit_profile.html', {\n 'user_form': user_form,\n 'profile_form': profile_form,\n 'user':user\n\n })\n\n\n\n\n\n\n\n\n\n\n\n# settings for social login\n\n@login_required\ndef settings(request):\n user = request.user\n\n try:\n twitter_login = user.social_auth.get(provider='twitter')\n except UserSocialAuth.DoesNotExist:\n twitter_login = None\n\n try:\n facebook_login = user.social_auth.get(provider='facebook')\n except UserSocialAuth.DoesNotExist:\n facebook_login = None\n\n can_disconnect = (user.social_auth.count() > 1 or user.has_usable_password())\n\n return render(request, 'registration/settings.html', {\n 'twitter_login': twitter_login,\n 'facebook_login': facebook_login,\n 'can_disconnect': can_disconnect\n })\n\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3214,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"639270716","text":"import gym\nfrom main1_Agent import Agent_DQN\n\nenv = gym.make('CartPole-v0')\nenv = env.unwrapped\n\nprint(\"action_size:\", env.action_space.n)\nprint(\"state_size:\", env.observation_space.shape[0])\n\nagent = Agent_DQN(action_size=env.action_space.n,\n state_size=env.observation_space.shape[0])\n\ntotal_steps = 0\ntotal_episode = 190\nreward_history = []\nrender = False\n\nfor e in range(total_episode):\n s = env.reset()\n episode_reward_sum = 0\n while True:\n if render:\n env.render()\n action = agent.get_action(s)\n s_, _ , done, info = env.step(action)\n\n x, _, theta, _ = s_\n r1 = (env.x_threshold - abs(x)) / env.x_threshold\n r2 = (env.theta_threshold_radians - abs(theta)) / env.theta_threshold_radians\n reward = r1 + r2\n agent.store_transition(s, action, reward, s_)\n episode_reward_sum += reward\n agent.learn()\n\n if done:\n print(e,\"-th episode, reward sum: \", episode_reward_sum,\n 'learning rate:', agent.learning_rate_history[len(agent.learning_rate_history)-1],\n ' epsilon: ', agent.epsilon)\n reward_history.append(episode_reward_sum)\n if episode_reward_sum > 3000:\n render = True\n break\n\n s = s_\n total_steps += 1\n\nagent.plot_loss()\nagent.plot_reward(reward_history)\n","sub_path":"main1_DQN.py","file_name":"main1_DQN.py","file_ext":"py","file_size_in_byte":1386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"50159943","text":"from flask import Flask,render_template\n\napp = Flask(__name__)\n\n@app.route('/result')\ndef result():\n dic = {'phy':50,'che':60,'maths':70}\n return render_template('table.html',result=dic)\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0',port=30000,debug=True)\n","sub_path":"flask/Application/template2.py","file_name":"template2.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"278624827","text":"from sklearn.externals import joblib\r\nmodel= joblib.load(\"digits_cls.pkl\")\r\nimport cv2\r\nimport time\r\nimport os\r\nimport sklearn.datasets\r\nimport numpy as np\r\nimport time\r\nimport mahotas\r\nimport exp \r\n# hog= HOG(orientations= 9, pixelsPerCell=(14,14), cellsPerBlock=(1,1))\r\ndef donne():\r\n # indir= \"This PC/Downloads/digit_images_p1\"\r\n indir= \"C:\\\\Users\\\\clinic18\\\\Desktop\\\\tdata\"\r\n contours=[]\r\n data=[]\r\n List=[]\r\n e=0\r\n target=[]\r\n for root, dirs, filenames in os.walk(indir):\r\n for f in filenames:\r\n \r\n full_filename = indir + \"\\\\\" + f\r\n # print(full_filename)\r\n \r\n image= cv2.imread(full_filename)\r\n # cv2.imshow(\"targetImage\",image)\r\n # cv2.waitKey(100)\r\n # # time.sleep(0.4)\r\n \r\n # print(image.shape[0], image.shape[1])\r\n \r\n #image= cv2.resize(image,(28,28),interpolation=cv2.INTER_AREA)\r\n img= cv2.cvtColor(image,cv2.COLOR_BGR2GRAY)\r\n blurred= cv2.GaussianBlur(img, (5, 5), 0)\r\n \r\n thresh=img.copy()\r\n T = mahotas.thresholding.otsu(blurred)\r\n thresh[thresh > T] = 255\r\n thresh = cv2.bitwise_not(thresh)\r\n e=e+1\r\n # print(roi.shape)\r\n # print(thresh.shape[0],thresh.shape[1],e, f)\r\n cv2.imshow(\"test\",thresh)\r\n cv2.waitKey(100)\r\n # time.sleep(0.5)\r\n thresh= exp.center_extent(thresh, (28,28))\r\n # print(thresh.shape)\r\n # print(thresh)\r\n for i in range(thresh.shape[0]):\r\n for e in range(thresh.shape[1]):\r\n pixel=thresh[i, e]\r\n pixel= int(pixel)\r\n List+=[pixel]\r\n\r\n # print(len(List))\r\n # time.sleep(0.5)\r\n data+=[List]\r\n # print(data)\r\n # time.sleep(1.5)\r\n List=[]\r\n\r\n\r\n target= [0, 5, 0, 0, 6, 0, 6, 9, 7, 7, 6, 2, 1, 3, 4, 6, 2, 2, 1, 3, 3, 0, 8, 8, 2, 9, 8, 9, 8, 9, 3, 8, 5, 2, 6, 9, 6, 9, 2, 6, 5, 4, 9, 9, 5, 5, 7, 7, 6, 9, 7, 6, 5, 7, 6, 4, 4, 3, 4, 4, 5, 3, 4, 3, 2, 6, 5, 4, 4, 7, 4, 3, 7, 7, 5, 3, 6, 4, 6, 3, 8, 3, 3, 7, 0, 4, 7, 5, 5, 4, 3, 3, 8, 2, 0, 6, 8, 3, 9, 3, 6, 2, 8, 8, 1, 2, 8, 8, 9, 1, 7, 8, 0, 3, 3, 8, 7, 8, 3, 1, 8, 0, 0, 3, 0, 0, 1, 1, 1, 9, 3, 5, 2, 9, 2, 0, 7, 5, 1, 8, 8, 8, 1, 9, 9, 9, 1, 1, 9, 7, 8, 4, 9, 8, 1, 2]\r\n data= np.array(data, dtype=\"uint8\")\r\n target=np.array(target)\r\n dataset= sklearn.datasets.base.Bunch(data=data, target=target)\r\n print(type(dataset))\r\n return dataset \r\n ","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"583931626","text":"import os\nprint(\"\\n\")\nprint(\"Informe 1 para realizar um saque\")\nprint(\"Informe 2 para realizar um deposito\")\nprint(\"Informe 3 para retornar o extrato da conta\")\nprint(\"informe 4 para sair do programa\")\nescolha = int(input(\"Qual operação você deseja realizar? \"))\ndinheiro = 0\nwhile escolha != 4:\n\tif escolha == 1:\n\t\tvalorsaque = float(input(\"Informe a quantidade desejada para o saque: \"))\n\t\tif valorsaque <= dinheiro:\n\t\t\tdinheiro = dinheiro - valorsaque\n\t\t\tprint(f\"Foi sacado R$ {valordaque} da sua conta\")\n\t\t\tarquivo = open('extrato.txt', 'r')\n\t\t\tconteudo = arquivo.readlines()\n\t\t\tconteudo.append(f'Foi realizado saque de {valorsaque}\\n')\n\t\t\tarquivo = open('extrato.txt', 'w')\n\t\t\tarquivo.writelines(conteudo)\n\t\t\tescolha = int(input(\"Qual operação você deseja realizar? \"))\n\t\telse:\n\t\t\tprint(\"Dinheiro insuficiente na sua conta para realizar este saque\")\n\t\t\tescolha = int(input(\"Qual operação você deseja realizar? \"))\n\telif escolha == 2:\n\t\tvalordeposito = float(input(\"Informe a quantidade que deseja depositar: \"))\n\t\tdinheiro = dinheiro + valordeposito\n\t\tprint(f\"Foi depositado {valordeposito} na sua conta\")\n\t\tarquivo = open('extrato.txt', 'a+')\n\t\t# conteudo = arquivo.readlines()\n\t\t# conteudo.append(f'Foi realizado um deposito de {valordeposito}\\n')\n\t\t# arquivo = open('extrato.txt', 'w+')\n\t\tarquivo.write(f'Foi realizado um deposito de {valordeposito}\\n')\n\t\tarquivo.close()\n\t\tescolha = int(input(\"Qual operação você deseja realizar? \"))\n\n\telif escolha == 3:\n\t\tarquivo = open('extrato.txt', 'r')\n\t\textrato = arquivo.read()\n\t\tarquivo.close()\n\t\tprint(extrato)\n\t\tprint(\"Saldo: %.4f\"%dinheiro)\n\t\tescolha = int(input(\"Qual operação você deseja realizar? \"))\n\telif escolha == 4:\n\t\tarquivo = open('extrato.txt', 'w')\n\t\tarquivo.close()\n","sub_path":"HeeloWorld/trabalho.py","file_name":"trabalho.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"122138220","text":"\"\"\"\nGiven an object/dictionary with keys and values that consist of both strings and integers, design an algorithm to calculate and return the sum of all of the numeric values.\nFor example, given the following object/dictionary as input:\n{\n \"cat\": \"bob\",\n \"dog\": 23,\n 19: 18,\n 90: \"fish\"\n}\nYour algorithm should return 41, the sum of the values 23 and 18.\nYou may use whatever programming language you'd like.\nVerbalize your thought process as much as possible before writing any code. Run through the UPER problem solving framework while going through your thought process.\n\"\"\"\n\ndef return_sum(input):\n # initialize sum variable\n sum = 0\n\n # loop through dict\n for i in input.values():\n # check type of value in key/value pair\n if isinstance(i, int):\n # if its an integer, add it to the sum\n sum += i\n\n # return sum\n return sum\n\nprint(return_sum({\n \"cat\": \"bob\",\n \"dog\": 23,\n 19: 18,\n 90: \"fish\"\n}))","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"86339645","text":"# -*- coding: utf-8 -*-\n# Copyright © 2008-2011 Kozea\n# This file is part of Multicorn, licensed under a 3-clause BSD license.\n\nfrom bson.code import Code\nfrom .where import Where\n\n\nclass MapReduce(object):\n\n def __init__(self, map, reduce, where=None):\n self.map = map\n self.reduce = reduce\n self.where = where if where else Where()\n\n def execute(self, collection, in_value=False):\n mapjs = Code(self.map.replace(\"this.\", \"this.value.\")) \\\n if in_value else Code(self.map)\n reducejs = Code(self.reduce.replace(\"this.\", \"this.value.\")) \\\n if in_value else Code(self.reduce)\n results = collection.map_reduce(\n mapjs,\n reducejs,\n \"mr\",\n query=self.where())\n return results\n\n def __repr__(self):\n return (\"MapReduce(map=%r, reduce=%r, where=%r)\") % (\n self.map,\n self.reduce,\n self.where)\n\n\ndef make_mr_map(fields, where=None):\n with_all = False\n if 'this' in fields:\n with_all = True\n del fields[\"this\"]\n fields_str = \"fields = {\"\n for field, origin in fields.items():\n fields_str = \"%s%s: %s, \" % (fields_str, field, origin)\n fields_str += \"};\"\n if with_all:\n fields_str += (\n \"for (attr in this) {\"\n \"if (attr != '_id') {\"\n \" fields[attr] = this[attr];\"\n \"}};\")\n map = (\"function () {\"\n \"%s\"\n \"emit(this._id, fields);\"\n \"}\") % fields_str\n reduce = (\"function (k, v) {\"\n \"return v[0];\"\n \"}\")\n return MapReduce(map, reduce, where)\n","sub_path":"multicorn/corns/mongo/mapreduce.py","file_name":"mapreduce.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"394522506","text":"#!/usr/bin/python\n\nimport os,sys,datetime\n\n#database info\ndatabase = \"dg\"\nyesterdayDBTime = (datetime.date.today() + datetime.timedelta(days=-1)).strftime('%Y-%m-%d')\ntoday = datetime.date.today().strftime('%Y-%m-%d %H:%M:%S')\n#format date file\nyesterday = (datetime.date.today() + datetime.timedelta(days=-1)).strftime('%Y%m%d')\n\n#bak dir\nbakDir = \"/home/data/\"\n\n#dict for tables\ndictTables = {\n \"xslsb\":\"xslsb_bak\",\n \"zffsb\":\"zffsb_bak\",\n \"yhqdqb\":\"yhqdqb_bak\",\n \"yhqfqb\":\"yhqfqb_bak\"\n}\ndictLs = {\n \"xslsb\":\"xslsb_bak\",\n \"zffsb\":\"zffsb_bak\"\n}\ndictYhq = {\n \"yhqdqb\":\"yhqdqb_bak\",\n \"yhqfqb\":\"yhqfqb_bak\"\n}\n\n#create /home/data/yesterday dir\ndef createDateDir():\n createCmd = \"mkdir \"+bakDir+yesterday\n os.system(createCmd)\n\n#export mysql data to txt\ndef exportSqlFunc():\n for key in dictTables.keys():\n try:\n exportSql = \"select * from %s\"%(database)+\".\"+key+\" where scsj like '%s'\"%(yesterdayDBTime+\"%\")+\" order by scsj asc\"\n execSql = \"mysql -e \"+\"\\\"\"+exportSql+\"\\\" > \"+bakDir+yesterday+\"/\"+key+\".txt\"\n os.system(execSql)\n except:\n print(\"error...\")\n sys.exit()\n\n#insert into from xslsb and zffsb to xslsb_bak and zffsb_bak\ndef insertSZSqlFunc():\n for key,value in dictLs.items():\n try:\n insertSql = \"insert into %s\"%(database)+\".\"+value+\" select * from %s\"%(database)+\".\"+key+\" where scsj like '%s'\"%(yesterdayDBTime+\"%\")\n execSql = \"mysql -e \"+\"\\\"\"+insertSql+\"\\\"\"\n os.system(execSql)\n except:\n print(\"error ...\")\n sys.exit()\n\n#delete from xslsb and zffsb yesterday data\ndef delSZFunc():\n for key in dictLs.keys():\n try:\n delSql = \"delete from %s\"%(database)+\".\"+key+\" where scsj like '%s'\"%(yesterdayDBTime+\"%\")\n execSql = \"mysql -e \"+\"\\\"\"+delSql+\"\\\"\"\n os.system(execSql)\n except:\n print(\"error ...\")\n sys.exit()\n\n#insert into from yhqdqb yhqfqb to yhqdqb_bak and yhqfqb_bak\ndef insertYhqFunc():\n for key,value in dictYhq.items():\n try:\n insertSql = \"insert into %s\"%(database)+\".\"+value+\" select * from %s\"%(database)+\".\"+key+\" where jssj < '%s'\"%(today)\n execSql = \"mysql -e \"+\"\\\"\"+insertSql+\"\\\"\"\n os.system(execSql)\n except:\n print(\"error ...\")\n sys.exit()\n\n#delete from yhqdqb and yhqfqb jssj today data\ndef delYhqFunc():\n for key in dictYhq.keys():\n try:\n delSql = \"delete from %s\"%(database)+\".\"+key+\" where jssj < '%s'\"%(today)\n execSql = \"mysql -e \"+\"\\\"\"+delSql+\"\\\"\"\n os.system(execSql)\n except:\n print(\"error ...\")\n sys.exit()\n\n#change data to dos format\ndef changeDos():\n for key in dictTables.keys():\n try:\n changeCmd = \"unix2dos \"+bakDir+yesterday+\"/\"+key+\".txt\"\n os.system(changeCmd)\n except:\n print(\"error ...\")\n sys.exit()\n\n#delete file 1st line\ndef deleteOneLineFunc():\n for key in dictTables.keys():\n try:\n delCmd = \"sed -i \\'1d\\' \"+bakDir+yesterday+\"/\"+key+\".txt\"\n os.system(delCmd)\n except:\n print(\"error ...\")\n sys.exit()\n\n#make file to package\ndef makePackageFunc():\n packageCmd = \"cd \"+bakDir+\" && tar -zcvf \"+yesterday+\".sql.tar.gz \"+yesterday+\" && rm -rf \"+yesterday\n os.system(packageCmd)\n\n#main func\nif __name__ == \"__main__\":\n createDateDir()\n exportSqlFunc()\n insertSZSqlFunc()\n insertYhqFunc()\n delSZFunc()\n delYhqFunc()\n changeDos()\n deleteOneLineFunc()\n makePackageFunc()","sub_path":"project_scripts/mysql_bak.py","file_name":"mysql_bak.py","file_ext":"py","file_size_in_byte":3651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"401761764","text":"import pygame\nimport sys\nimport random\n\n# esse arquivo responsável por definir as variaveis globais\n# que serão utilizadas entre arquivos\n\n# é utilizado para impedir lag de audio\npygame.mixer.pre_init(44100, -16, 2, 512)\n# inicializa o pygame\npygame.init()\nclock = pygame.time.Clock()\n\n# define algumas cores que serão utilizadas\nbg_color = pygame.Color(\"#0D0A0B\")\naccent_color = pygame.Color(\"#E2FCEF\")\n\n# cria fonte para score\nfont = pygame.font.Font(\"fonts/RetroGaming.ttf\", 32)\n\n# define a altura e largura da janela\nscreen_width, screen_height = 1024, 600\nscreen = pygame.display.set_mode((screen_width, screen_height))\n\n# define o titulo da janela\npygame.display.set_caption(\"Pong\")\n\n\n\nbg_color_mult = pygame.Color(\"#8b1851\") # bg color\nbg_color = pygame.Color(\"#29398e\") # bg color\naccent_color = (253, 255, 252) # cor das letras e linha no meio\nbasic_font = pygame.font.Font(\"fonts/RetroGaming.ttf\", 32) # carrega a fonte\n\nhit_sound = pygame.mixer.Sound(\"sounds/pong.wav\") \nscore_sound = pygame.mixer.Sound(\"sounds/score.wav\") \ndestroy_sound = pygame.mixer.Sound(\"sounds/destroy.wav\") \nbutton_sound = pygame.mixer.Sound(\"sounds/button.wav\") \n\n\nmenu_sound = pygame.mixer.Sound(\"sounds/flight.mp3\") \nsingleP_sound = pygame.mixer.Sound(\"sounds/make_a_run.mp3\") \nmultiP_sound = pygame.mixer.Sound(\"sounds/as_midnight.mp3\")\n\n\n# cria uma linha para ser desenhada no meio da tela\nmiddle_strip = pygame.Rect(screen_width/2 - 2, 0, 4, screen_height) ","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"370935970","text":"\nimport os,time,cv2\nimport tensorflow as tf\nimport tensorflow.contrib.slim as slim\nimport numpy as np\nimport os\nimport functions as F\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\ndef ConvBlock(inputs, n_filters, kernel_size=[3, 3], stride=[1, 1]):\n\t\"\"\"\n\tBuilds the conv block for MobileNets\n\tApply successivly a 2D convolution, BatchNormalization relu\n\t\"\"\"\n\t# Skip pointwise by setting num_outputs=Non\n\n\tnet = slim.conv2d(inputs, n_filters, kernel_size=kernel_size, stride=stride, activation_fn=None)\n\t#net = slim.batch_norm(net, fused=True)\n\tnet = tf.nn.relu(net)\n\treturn net\n\n\ndef conv_transpose_block(inputs, n_filters, kernel_size=[2, 2]):\n\t\"\"\"\n\tBasic conv transpose block for Encoder-Decoder upsampling\n\tApply successivly Transposed Convolution, BatchNormalization, ReLU nonlinearity\n\t\"\"\"\n\tnet = slim.conv2d_transpose(inputs, n_filters, kernel_size=[2, 2], stride=[2, 2], activation_fn=None)\n\tnet = tf.nn.relu(net)#slim.batch_norm(net))\n\treturn net\n\ndef blockConv(net, n_filters, kernel_size=[3,3], strides=[1, 1]):\n net = slim.batch_norm(net, fused=True)\n net = tf.nn.relu(net)\n net = slim.conv2d(net, n_filters, kernel_size=kernel_size, stride=strides,activation_fn=None)\n return net\n\ndef build_deepUnet(inputs, num_classes):\n ##ENCODING\n\tnet = slim.conv2d(inputs, 64, kernel_size=[3, 3], activation_fn=None)\n\tnet = blockConv(net, 64)\n\tshortpath = slim.conv2d(inputs, 64, kernel_size=[1, 1], stride=[1, 1])\n\tnet = tf.add(net, shortpath)\n\tskip1= net\n\n\tnet = blockConv(net, 128, strides=[2,2])\n\tnet = blockConv(net, 128)\n\tshortpath = slim.conv2d(skip1, 128, kernel_size=[1, 1], stride=[2, 2])\n\tnet = tf.add(net, shortpath)\n\tskip2 = net\n\n\tnet = blockConv(net, 256, strides=[2, 2])\n\tnet = blockConv(net, 256)\n\tshortpath = slim.conv2d(skip2, 256, kernel_size=[1, 1], stride=[2, 2])\n\tnet = tf.add(net, shortpath)\n\tskip3 = net\n\n\tnet = blockConv(net, 512, strides=[2, 2])\n\tnet = blockConv(net, 512)\n\tshortpath = slim.conv2d(skip3, 512, kernel_size=[1, 1], stride=[2, 2])\n\tnet = tf.add(net, shortpath)\n\tskip4 = net\n\n\t##BRIDGE\n\n\n\tnet = F.FPABlock(net, n_filters=512, rate=32)\n\n\t##DECODING\n\n\tnet = conv_transpose_block(net, 256)\n\tnet = tf.concat([net, skip3], axis=3)\n\n\tskip4 = slim.conv2d(net, 256, kernel_size=[1, 1])\n\tnet = blockConv(net, 256)\n\tnet = blockConv(net, 256)\n\tnet = tf.add(net, skip4)\n\n\tnet = conv_transpose_block(net, 128)\n\tnet = tf.concat([net, skip2], axis=3)\n\tskip5 = slim.conv2d(net, 128, kernel_size=[1, 1])\n\tnet = blockConv(net, 128)\n\tnet = blockConv(net, 128)\n\tnet = tf.add(net, skip5)\n\n\tnet = conv_transpose_block(net, 64)\n\tnet = tf.concat([net, skip1], axis=3)\n\tskip6 = slim.conv2d(net, 64, kernel_size=[1, 1])\n\tnet = blockConv(net, 64)\n\tnet = blockConv(net, 64)\n\tnet = tf.add(net, skip6)\n\n\tnet = slim.conv2d(net, num_classes, [1, 1], activation_fn=None)\n\treturn net\n","sub_path":"models/deepUnet.py","file_name":"deepUnet.py","file_ext":"py","file_size_in_byte":2800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"185386997","text":"\n\"\"\"\n# ChooseAllWisely: Unified python function to evaluate Choosing Wisely Metrics with pandas dataframes.\n# ****************************************************************************************************\n#\n# Developed by: Colin Walsh, Trent Rosenbloom, John Angiolillo \n# Vanderbilt University Medical Center - DBMI \n#\n\"\"\"\nfrom keepindications_removeexclusions_preppingfor10to10 import keepindications_removeexclusions\n#from keepindications_removeexclusions import keepindications_removeexclusions\n#from preprocessingclaims import preprocessingclaims\nfrom preprocessingclaims_preppingfor10to10 import preprocessingclaims\nfrom cw_utilityfunx import add_month, timeBtwnDex, subsetByDemographics, df_claimtrimmer\nfrom datetime import datetime, timedelta\nimport numpy as np\nimport pandas as pd\n\ndef testallwisely(metric, #0\n servdata, #1\n claimdata, #2\n demodata, #3\n demoref, #4\n indicref, #5\n redflagref, #6\n servref, #7\n dexadata=None, #8\n preopreftab=None, #9\n admsdata=None, #10\n tempsort_day=True, #11\n lookback=365, #12\n rootICD_gx=10, #13\n sourcegemdict=None, #14\n procgem_dict=None, #15\n uniqueflagx=None, #16\n second_data=None, #17\n window_date = None #18\n ):\n\n #################################################################################################\n output={}\n output_denominator=None\n output_numerator=None\n sizeflag = 0\n \n ######################################################\n debug1='orig'\n debug3='orig'\n debug5='orig' \n debug7='orig'\n debug9='orig'\n debug11='orig'\n debug13='orig'\n debug15='orig'\n debug17='orig'\n debug19='orig'\n debug21='orig'\n debug23='orig'\n debug25='orig'\n debug27='orig'\n debug29='orig'\n debug31='orig'\n debug33='orig'\n debug35='orig'\n debug37='orig'\n debug39='orig'\n debug41='orig'\n debug43='orig'\n debug45='orig'\n debug47='orig'\n output='orig'\n ######################################################\n \n \n \n if uniqueflagx==False:\n a=10 if rootICD_gx==9 else 9\n elif (uniqueflagx==True and rootICD_gx==10):\n a=10 \n else:\n raise KeyError(\"The dictionary flags are inconsistent \\n (review rootICD_gx, sourcegemdict, and uniqueflagx) \\n [Working is ={}]\".format(uniqueflagx))\n \n if len(servref[servref.startWith==1])>0:\n raise KeyError(\"servref dataframe codes include stemmed codes (ie those with startWith==1). \\nThis is problematic b/c of 'services['CODE'].isin(services.code)' doesn't account for stemmed codes\")\n elif len(indicref[indicref.startWith==1].groupby('class').count())>1:\n raise KeyError(\"indicref dataframe codes include non-ICD stemmed codes (ie those with startWith==1). \\n This is a problem b/c df.isin() doesn't account for this\")\n elif len(redflagref[redflagref.startWith==1].groupby('class').count())>1:\n raise KeyError(\"redflagref dataframe codes include non-ICD stemmed codes (ie those with startWith==1). \\n This is a problem b/c df.isin() doesn't account for this\")\n else:\n print(\"restrictions on reference startWith code-stems verified.\")\n metrics_needing_drugs_as_primary_code_but_still_use_cpts = ['narc','psyc']\n print('FYI the metrics included in drugs with cpts: {}'.format(metrics_needing_drugs_as_primary_code_but_still_use_cpts))\n \n if metric in metrics_needing_drugs_as_primary_code_but_still_use_cpts:\n if second_data is None:\n raise KeyError('The second_data parameter is None, which is incompatible with algorithm.')\n else:\n print('second_data.head(3) is:')\n second_data=second_data[['MRN','CODE','EVT_DATE']]\n second_data.rename(columns={'MRN':'rMRN','CODE':'rCODE','EVT_DATE':'rEVT_DATE'}, inplace=True)\n print(second_data.head(3)) \n else:\n pass\n #Subset by Demographics\n minage=demoref['agemin'].loc[demoref['key']==metric].tolist()\n maxage=demoref['agemax'].loc[demoref['key']==metric].tolist()\n gend=demoref['gendercrit'].loc[demoref['key']==metric].tolist()\n \n print('minage: {}'.format(minage))\n print('maxage: {}'.format(maxage))\n print('gend: {}'.format(gend))\n #to limit memory use\n del demoref\n \n print('subsetting 1')\n claimdata=subsetByDemographics(claimdata,minage,maxage,gend)\n\n servdata=subsetByDemographics(servdata,minage,maxage,gend)\n print('subsetting 2')\n ##### debugging\n debug1 = claimdata\n debug3 = servdata\n if dexadata is not None:\n dexadata=subsetByDemographics(dexadata,minage,maxage,gend)\n\n # limit the reference df's to just this metric\n servref=servref[servref['key']==metric]\n indicref=indicref[indicref['key']==metric]\n redflagref=redflagref[redflagref['key']==metric]\n \n \n \n \n#================================================================================================================= \n#================================================================================================================= \n#======== INITIATE MAPPING FORWARD AND BACKWARD OF RELEVANT DATAFRAMES ========================================= \n#================================================================================================================= \n#================================================================================================================= \n if uniqueflagx == False:\n ## NORMAL block of code for running intergenerational assessments (9->10 and 10->9)\n ## First part of block is for updating ref-9 to ICD10 basis\n if (rootICD_gx == 9 and sourcegemdict is not None):\n # update all reference tables with appropriate dictionary (both 1:1 and 1:many dictionaries should work)\n \"\"\"preopreftab doesn't have ICD codes (?initial impression)\"\"\"\n #######################################################################\n \n indHCC= indicref[indicref['class']=='HCC']\n indCCS= indicref[indicref['class']=='CCS']\n indICD= indicref[indicref['class']=='ICD']\n s_indICD= indICD[indICD.startWith==1]\n f_indICD= indICD[indICD.startWith!=1]\n indother= indicref[~indicref['class'].isin(['HCC','CCS','ICD'])]\n \n redHCC= redflagref[redflagref['class']=='HCC']\n redCCS= redflagref[redflagref['class']=='CCS']\n redICD= redflagref[redflagref['class']=='ICD']\n s_redICD= redICD[redICD.startWith==1]\n f_redICD= redICD[redICD.startWith!=1]\n redother= redflagref[~redflagref['class'].isin(['HCC','CCS','ICD'])]\n \n ######\n # servref broken into [servICD, servother] --> then, servICD broken into [s_servICDnf, f_servICDnf, servICD_f]\n servICD = servref[servref['class']=='ICD']\n servICDnf = servICD[servICD.key!='feed']\n servICD_f = servICD[servICD.key=='feed']\n s_servICDnf = servICDnf[servICDnf.startWith==1]\n f_servICDnf = servICDnf[servICDnf.startWith!=1]\n servother= servref[servref['class']!='ICD']\n \n # need to map the serv ICDnf forward\n print('references unpacked')\n print('variable a, (target ICD generation), is {}'.format(a))\n print('columns for dictionary')\n print(sourcegemdict.columns.values)\n print('______________________')\n \n \n ##### BEGINNING TO STEM GEM #####\n ishortest=0\n ilongest=0\n rshortest=0\n rlongest=0\n sshortest=0\n slongest=0\n \n try:\n ishortest= min(s_indICD['subcode'].str.len())\n ilongest= max(s_indICD['subcode'].str.len())\n except:\n pass\n try: \n rshortest= min(s_redICD['subcode'].str.len())\n rlongest= max(s_redICD['subcode'].str.len())\n except:\n pass\n try: \n sshortest= min(s_servICDnf['code'].str.len())\n slongest= max(s_servICDnf['code'].str.len())\n except:\n pass\n \n # indications reference mapping\n if ilongest!=0:\n iholder=[]\n for y in range(ishortest, ilongest+1):\n print(y)\n gem_short=sourcegemdict.loc[sourcegemdict['ICD{}_subcode'.format(rootICD_gx)].apply(lambda x: x[:y]).isin(s_indICD.subcode),:]\n gem_short.loc[:,'first']=gem_short.loc[:,'ICD{}_subcode'.format(rootICD_gx)].str[:y]\n gem_short.rename(columns={'ICD{}_subcode'.format(rootICD_gx):'subcode_todrop',\n 'first':'ICD{}_subcode'.format(rootICD_gx)}, inplace=True)\n gem_short=gem_short.drop(['subcode_todrop'], axis=1, inplace=False)\n iholder.append(gem_short) \n igem_stemmed= pd.concat(iholder)\n \n s_indICD = s_indICD.merge(igem_stemmed[['ICD{}_subcode'.format(a),\n 'ICD{}_subcode'.format(rootICD_gx)]],\n left_on='subcode',\n right_on='ICD{}_subcode'.format(rootICD_gx),\n how='left')\n s_indICD.rename(columns={'subcode':'subcode_defunct',\n 'ICD{}_subcode'.format(a):'subcode'},inplace=True)\n \n # redflags reference mapping \n if rlongest!=0:\n rholder=[]\n for y in range(rshortest, rlongest+1):\n print(y)\n gem_short=sourcegemdict.loc[sourcegemdict['ICD{}_subcode'.format(rootICD_gx)].apply(lambda x: x[:y]).isin(s_redICD.subcode),:]\n gem_short.loc[:,'first']=gem_short.loc[:,'ICD{}_subcode'.format(rootICD_gx)].str[:y]\n gem_short.rename(columns={'ICD{}_subcode'.format(rootICD_gx):'subcode_todrop',\n 'first':'ICD{}_subcode'.format(rootICD_gx)}, inplace=True)\n gem_short=gem_short.drop(['subcode_todrop'], axis=1, inplace=False)\n rholder.append(gem_short) \n rgem_stemmed= pd.concat(rholder)\n \n s_redICD = s_redICD.merge(rgem_stemmed[['ICD{}_subcode'.format(a),\n 'ICD{}_subcode'.format(rootICD_gx)]],\n left_on='subcode',\n right_on='ICD{}_subcode'.format(rootICD_gx),\n how='left')\n s_redICD.rename(columns={'subcode':'subcode_defunct',\n 'ICD{}_subcode'.format(a):'subcode'},inplace=True)\n \n # services reference mapping\n if slongest!=0:\n sholder=[]\n for y in range(sshortest,slongest+1):\n print(y)\n #collect the root ICD stems that are included in the s_servICDnf dataframe\n gem_short=sourcegemdict.loc[sourcegemdict['ICD{}_subcode'.format(rootICD_gx)].apply(lambda x: x[:y]).isin(s_servICDnf.code),:]\n # truncate those codes to the appropriate stem length\n gem_short.loc[:,'first']=gem_short.loc[:,'ICD{}_subcode'.format(rootICD_gx)].str[:y]\n # match the column names\n gem_short.rename(columns={'ICD{}_subcode'.format(rootICD_gx):'subcode_todrop',\n 'first':'ICD{}_subcode'.format(rootICD_gx)}, inplace=True)\n gem_short=gem_short.drop(['subcode_todrop'], axis=1, inplace=False)\n # build the codes into a local ref dataframe\n sholder.append(gem_short) \n sgem_stemmed= pd.concat(sholder) \n \n # notice the abnormal column name for servref 'code' instead of 'subcode'\n s_servICDnf = s_servICDnf.merge(sgem_stemmed[['ICD{}_subcode'.format(a),\n 'ICD{}_subcode'.format(rootICD_gx)]],\n left_on='code',\n right_on='ICD{}_subcode'.format(rootICD_gx),\n how='left')\n s_servICDnf.rename(columns={'code':'code_defunct',\n 'ICD{}_subcode'.format(a):'code'},inplace=True)\n \n ######################################################################\n ### Map the none stemmed (aka startWith==0) to other generation\n \n f_indICD = f_indICD.merge(sourcegemdict[['ICD{}_subcode'.format(a),\n 'ICD{}_subcode'.format(rootICD_gx)]],\n left_on='subcode',\n right_on='ICD{}_subcode'.format(rootICD_gx),\n how='left')\n \n \n f_redICD = f_redICD.merge(sourcegemdict[['ICD{}_subcode'.format(a),\n 'ICD{}_subcode'.format(rootICD_gx)]],\n left_on='subcode',\n right_on='ICD{}_subcode'.format(rootICD_gx),\n how='left')\n \n f_servICDnf = f_servICDnf.merge(sourcegemdict[['ICD{}_subcode'.format(a),\n 'ICD{}_subcode'.format(rootICD_gx)]],\n left_on='code',\n right_on='ICD{}_subcode'.format(rootICD_gx),\n how='left')\n \n for x in [f_indICD, f_redICD]:\n print('x.merge columns:')\n x.rename(columns={'subcode':'subcode_defunct',\n 'ICD{}_subcode'.format(a):'subcode'},inplace=True)\n x.dropna(subset=['subcode'],inplace=True) \n\n \n f_servICDnf.rename(columns={'code':'code_defunct',\n 'ICD{}_subcode'.format(a):'code'}, inplace=True)\n f_servICDnf.dropna(subset=['code'],inplace=True)\n\n print('post renaming') \n \n indHCC = indHCC.merge(sourcegemdict[['hcclev{}'.format(a),\n 'hcclev{}'.format(rootICD_gx)]],\n left_on='subcode', \n right_on='hcclev{}'.format(rootICD_gx),\n how='left') \n redHCC = redHCC.merge(sourcegemdict[['hcclev{}'.format(a),\n 'hcclev{}'.format(rootICD_gx)]],\n left_on='subcode', \n right_on='hcclev{}'.format(rootICD_gx),\n how='left')\n for x in [indHCC, redHCC]:\n print('afterHCC merges:')\n print(x.columns.values) \n \n indCCS = indCCS.merge(sourcegemdict[['ccslev{}'.format(a),\n 'ccslev{}'.format(rootICD_gx)]],\n left_on='subcode',\n right_on='ccslev{}'.format(rootICD_gx),\n how='left')\n redCCS = redCCS.merge(sourcegemdict[['ccslev{}'.format(a),\n 'ccslev{}'.format(rootICD_gx)]],\n left_on='subcode',\n right_on='ccslev{}'.format(rootICD_gx),\n how='left')\n for x in [indCCS, redCCS]:\n print('afterCCS merges:')\n print(x.columns.values) \n \n \n indCCS.rename(columns={'subcode':'subcode_defunct',\n 'ccslev{}'.format(a):'subcode'},inplace=True)\n indHCC.rename(columns={'subcode':'subcode_defunct',\n 'hcclev{}'.format(a):'subcode'},inplace=True)\n redCCS.rename(columns={'subcode':'subcode_defunct',\n 'ccslev{}'.format(a):'subcode'},inplace=True)\n redHCC.rename(columns={'subcode':'subcode_defunct',\n 'hcclev{}'.format(a):'subcode'},inplace=True)\n # ===================================================\n #######################################################################\n ############ New structure July 12 evening\n if metric == 'feed':\n if procgem_dict is not None:\n servICD_p = servICD_f[servICD_f.code.str[-2:]=='_p']\n print('procedure only ICDs')\n print(servICD_p)\n servICD_p['code']=servICD_p.code.str[:-2]\n print(servICD_p)\n servICD_p = servICD_p.merge(procgem_dict[['ICD{}_subcode'.format(a),'ICD{}_subcode'.format(rootICD_gx)]],\n left_on='code',\n right_on='ICD{}_subcode'.format(rootICD_gx),\n how='left')\n \n servICD_p['code'].update(servICD_p['ICD{}_subcode'.format(a)]) \n servICD_p = servICD_p[[ 'class' ,'code' ,'key' ,'label' ,'startWith']]\n else:\n pass\n servICD_d = servICD_f[servICD_f.code.str[-2:]!='_p']\n s_servICD_d = servICD_d[servICD_d.startWith==1]\n f_servICD_d = servICD_d[servICD_d.startWith!=1]\n \n \n \n f_servICD_d = f_servICD_d.merge(sourcegemdict[['ICD{}_subcode'.format(a),\n 'ICD{}_subcode'.format(rootICD_gx)]],\n left_on='code',\n right_on='ICD{}_subcode'.format(rootICD_gx),\n how='left')\n \n f_servICD_d.rename(columns={'code':'code_defunct',\n 'ICD{}_subcode'.format(a):'code'}, inplace=True)\n f_servICD_d.dropna(subset=['code'],inplace=True)\n # this should only update those columns where ICD{}_subcode is not none, thus CPTs should be safe\n \n sshortest_d = 0\n slongest_d = 0\n try: \n sshortest_d= min(s_servICD_d['code'].str.len())\n slongest_d= max(s_servICD_d['code'].str.len())\n except:\n pass\n if slongest_d!=0:\n sholder_d=[]\n for y in range(sshortest_d,slongest_d+1):\n print(y)\n #collect the root ICD stems that are included in the s_servICDnf dataframe\n gem_short=sourcegemdict.loc[sourcegemdict['ICD{}_subcode'.format(rootICD_gx)].apply(lambda x: x[:y]).isin(s_servICD_d.code),:]\n # truncate those codes to the appropriate stem length\n gem_short.loc[:,'first']=gem_short.loc[:,'ICD{}_subcode'.format(rootICD_gx)].str[:y]\n # match the column names\n gem_short.rename(columns={'ICD{}_subcode'.format(rootICD_gx):'subcode_todrop',\n 'first':'ICD{}_subcode'.format(rootICD_gx)}, inplace=True)\n gem_short=gem_short.drop(['subcode_todrop'], axis=1, inplace=False)\n # build the codes into a local ref dataframe\n sholder_d.append(gem_short) \n sgem_stemmed_d= pd.concat(sholder_d) \n \n # notice the abnormal column name for servref 'code' instead of 'subcode'\n s_servICD_d = s_servICD_d.merge(sgem_stemmed_d[['ICD{}_subcode'.format(a),\n 'ICD{}_subcode'.format(rootICD_gx)]],\n left_on='code',\n right_on='ICD{}_subcode'.format(rootICD_gx),\n how='left')\n s_servICD_d.rename(columns={'code':'code_defunct',\n 'ICD{}_subcode'.format(a):'code'},inplace=True)\n \n \n #####\n s_servICD_d = s_servICD_d[['class' ,'code' ,'key' ,'label' ,'startWith']]\n f_servICD_d = f_servICD_d[['class' ,'code' ,'key' ,'label' ,'startWith']]\n if procgem_dict is not None: \n #Remember, from servref[key=='feed'] --> [servref_d, servICD_p] from servref_d --> [s_servICD_d, f_servICD_d, servother_d]\n servICD_f= pd.concat([servICD_p, s_servICD_d, f_servICD_d])\n else:\n print(\"New approach, ignoring that certain GEMs (bestMap9) lack ICD9 Procedure codes for feeding tube placement to ICD10, due to absent equiv-map.\")\n servICD_f= pd.concat([s_servICD_d, f_servICD_d])\n #========================================== \n \n # Reconstitute the DataFrames\n indICD=pd.concat([s_indICD, f_indICD])\n redICD=pd.concat([s_redICD, f_redICD])\n \n i_con = pd.concat([ indHCC,\n indCCS,\n indICD,\n indother])\n r_con = pd.concat([ redHCC,\n redCCS,\n redICD,\n redother])\n \n s_con = pd.concat([servother,\n servICD_f, \n s_servICDnf, \n f_servICDnf]) \n \n \n print('indicref repacked columns: {}'.format(len(i_con)))\n print(i_con.columns.values)\n print('redflagref repacked columns {}:'.format(len(r_con)))\n print(r_con.columns.values)\n print('servref repacked columns: {}'.format(len(s_con)))\n print(s_con.columns.values)\n \n indicref=i_con\n redflagref=r_con\n servref = s_con\n \n \n #to limit memory use\n del indHCC\n del indCCS\n del indICD\n del indother\n del redHCC\n del redCCS\n del redICD\n del redother\n del s_servICDnf\n del f_servICDnf\n del servICD_f\n del servother\n del i_con\n del r_con\n del s_con\n #######################################################################\n ## End of ICD9->10 conversions\n #to limit memory use\n del sourcegemdict\n \n print(indicref.head(3))\n print('now reds')\n print(redflagref.head(3))\n ####################################################################################################################\n #################################################################################################################### \n elif (rootICD_gx == 10 and sourcegemdict is not None):\n # #\n ####################################################################################################################\n # Second part of 'if' branch (this part is converting 10-claims to 9 for comparison with ref-9 codes\n ### With new ICD codes, merge on ICD codes to map to ICD{a}, HCC{a}, CCS{a} with dictionaries to get new mappings\n if metric == 'feed':\n print(\"New implementation ignores that certain GEMs lack feeding tube placement mappings\")\n else:\n pass\n \n claimdata=claimdata.merge(sourcegemdict[['ICD{}_subcode'.format(a),\n 'ICD{}_subcode'.format(rootICD_gx),\n 'hcclev{}'.format(a),\n 'ccslev{}'.format(a)]],\n left_on='ICD{}_subcode'.format(rootICD_gx),\n right_on='ICD{}_subcode'.format(rootICD_gx),\n how='left') \n claimdata.rename(columns={'ICD{}_subcode_x'.format(a):'ICD{}_subcode_ignored'.format(a),\n 'ICD{}_subcode_y'.format(a):'ICD{}_subcode'.format(a),\n 'hcclev':'hcclev_ignored',\n 'ccslev':'ccslev_ignored',\n 'hcclev{}'.format(a):'hcclev',\n 'ccslev{}'.format(a):'ccslev'},inplace=True)\n #######################################################################\n claimdata.dropna(subset=['ICD{}_subcode'.format(a)],inplace=True)\n \n #to limit memory use\n del sourcegemdict\n elif sourcegemdict is None:\n pass\n else:\n raise KeyError(\" argument, {}: is inappropriate for current design of code.\".format(rootICD_gx))\n\n elif uniqueflagx==True:\n ##################################################\n ## to perform ICD10 : ICD10 assessments\n # \n if indicref['class'].str.contains('HCC').any():\n raise ValueError('The algorithm for 10 to 10 was not built for HCC codes in the reference tables')\n elif redflagref['class'].str.contains('HCC').any():\n raise ValueError('The algorithm for 10 to 10 was not built for HCC codes in the reference tables')\n elif indicref['class'].str.contains('CCS').any():\n raise ValueError('The algorithm for 10 to 10 was not built for CCS codes in the reference tables')\n elif redflagref['class'].str.contains('CCS').any():\n raise ValueError('The algorithm for 10 to 10 was not built for CCS codes in the reference tables')\n else:\n pass\n \n claimdata.loc[:,'hcclev']=0\n claimdata.loc[:, 'ccslev']=0\n ##################################################\n \n#=================================================================================================================\n#=================================================================================================================\n#=================================================================================================================\n#== SECOND PHASE OF THE ALGORITHM ================================================================================\n#=================================================================================================================\n#=================================================================================================================\n#=================================================================================================================\n#=================================================================================================================\n print('dumping null mapped (sub)codes')\n indicref = indicref[~indicref.subcode.isnull()]\n redflagref = redflagref[~redflagref.subcode.isnull()]\n servref = servref[~servref.code.isnull()]\n print('dumping zero length codes 0')\n indicref['length']=indicref['subcode'].apply(lambda x: len(x))\n print('dumping zero length codes 1')\n redflagref['length']=redflagref['subcode'].apply(lambda x: len(x))\n print('dumping zero length codes 2')\n servref['length']=servref['code'].apply(lambda x: len(x))\n print('dumping zero length codes 3')\n indicref = indicref[indicref['length']>0]\n print('dumping zero length codes 4')\n redflagref = redflagref[redflagref['length']>0]\n print('dumping zero length codes 5')\n servref = servref[servref['length']>0]\n print('i{},r{},s{}'.format(len(indicref),len(redflagref),len(servref)))\n \n \n # DUMP unneeded records for those records where denominator is dependent on ICD code (migraine/dementia/lbp/bph dx)\n # Keep in mind that the demented pts / migraine pts use stemmed ICD definitions, which need mapping done above\n debug45=claimdata\n debug47=indicref\n if metric in ['psyc','feed','narc','lbp','bph']: \n print('entered dx-tied loop')\n # Need to shed the cases of CPT/drug admin that are not used for diagnosis specific pts (demented/migraine)\n t_claim = claimdata[['MRN','ICD{}_subcode'.format(a), 'EVT_DATE']]\n print(t_claim.head(2))\n # so, create list of MRNs with relevant diagnoses (demented/migraine/lowbackpain/bph)\n ### REALLY, this likely should just be the original indications table, with later keepindications/removeexclusions removed/silenced\n xt_claim = list(t_claim.loc[t_claim['ICD{}_subcode'.format(a)].isin(indicref.subcode[indicref['startWith']==0]),'MRN'])\n ##### debug\n debug5 = xt_claim\n if metric=='feed':\n pass\n else:\n df_t_claim = t_claim.loc[t_claim['ICD{}_subcode'.format(a)].isin(indicref.subcode[indicref['startWith']==0]),['MRN','EVT_DATE']]\n print('df_t_claim has been created')\n debug35=df_t_claim\n # don't forget to accomodate the startWith diagnoses\n refCodes=list(indicref.loc[indicref.startWith==1,'subcode'])\n print('refCodes')\n print(refCodes[:5])\n beany=0\n for i in refCodes: \n print('going through startWith refCodes')\n print(beany)\n beany+=1\n xt_claim=np.append(xt_claim, list(t_claim.loc[t_claim['ICD{}_subcode'.format(a)].str.startswith(i), 'MRN']))\n debug37=xt_claim\n if metric =='feed':\n pass\n else:\n # capture the MRNs and associated dates for the rows with claim codes that are in indications list\n df_t_claim = pd.concat([df_t_claim, \n t_claim.loc[t_claim['ICD{}_subcode'.format(a)].str.startswith(i), ['MRN','EVT_DATE']]])\n debug39=df_t_claim\n\n #to limit memory use\n del t_claim \n print(len(servdata))\n # final list of all MRNs with relevant dx codes for the denominator:\n xt_claim= list(np.unique(xt_claim))\n ##### debug\n debug7 = xt_claim \n print('list of MRNs with needed dx')\n print(xt_claim[-10:])\n ##### Restrict the datasets to MRNs in question\n if metric =='feed':\n servdata=servdata[servdata.MRN.isin(xt_claim)]\n debug39 = claimdata\n debug41= indicref\n debug43= servref\n else:\n df_t_claim.rename(columns={'EVT_DATE':'tEVT_DATE'}, inplace=True)\n df_t_claim.sort_values(['MRN','tEVT_DATE'],ascending=True, inplace=True, na_position='last') \n # for each individual MRN, keep only the index/first date of a relevant code (ie, date of first record of migraine)\n df_t_claim.drop_duplicates(['MRN','tEVT_DATE'], keep='first', inplace='True')\n debug41=df_t_claim\n print('df_t_claim columns')\n print(df_t_claim.columns.values)\n print('servdata columns')\n print(servdata.columns.values)\n # use inner merge to keep only servdata records of MRNs that are in tailored claims df\n servdata=servdata.merge(df_t_claim, on='MRN', how='inner')\n del df_t_claim\n print('empty cells in tEVT_DATE:')\n debug43=servdata\n print(servdata[servdata.tEVT_DATE.isnull()])\n print('-------------------------')\n #\n # use date criteria to only keep records of services provided after the first relevant claim (claimdate=tEVT_DATE)\n print('implementing time restriction')\n servdata = servdata[servdata.EVT_DATE>=servdata.tEVT_DATE]\n # 123456 89\n claimdata= claimdata[claimdata.MRN.isin(xt_claim)]\n ##### debug\n debug9 = claimdata\n debug11 = servdata\n print('after xt_claim')\n print('length serv {}'.format(len(servdata)))\n print('length claim {}'.format(len(claimdata)))\n try:\n second_data = second_data[second_data.MRN.isin(xt_claim)]\n except:\n pass\n else:\n pass\n #####################################################################################\n #####################################################################################\n refkey = pd.concat([redflagref,indicref])\n servref['subcode']=servref.code\n allcodes = pd.concat([servref,refkey])\n ############################################### July 8, 2017\n allcodes_nos = allcodes[allcodes.startWith!=1]\n allcodes_s = allcodes[allcodes.startWith==1]\n print('allcodes_s:')\n print(allcodes_s.groupby('class').count())\n allcodes_nos = list(allcodes_nos['subcode'])\n allcodes_s = list(allcodes_s['subcode'])\n ################################################ July 8\n print('length before extracting special tables {}'.format(len(servdata)))\n print(servdata.head(1))\n \n # PREPPING INDIVIDUAL METRICS FOR DENOMINATOR and PREPROCESSING\n if metric in ['nonpreop','catpreop']:\n print('creating rCODE column in preop metrics')\n # these metrics need indirect CPTs carried into the preprocessingclaims method\n servdata['rCODE']=0\n \n elif metric in metrics_needing_drugs_as_primary_code_but_still_use_cpts: #narcs and antipsychotics\n # second_data has the relevant cpts\n print('metrics_needing_drugs ...')\n second_data= second_data[second_data.rCODE.isin(list(refkey.loc[refkey['class'].isin(['CPT','DRUG']),'subcode']))]\n \n earlydenom = servdata.rename(columns={'EVT_DATE':'TEST_DATE','CODE':'TEST_CODE'},inplace=False)\n earlydenom['TEST_DATE_a']=earlydenom['TEST_DATE'].values.astype('datetime64[D]')\n reportStartDatex=earlydenom.sort_values('TEST_DATE',ascending=True)['TEST_DATE'].iloc[0]\n earlydenom = earlydenom[earlydenom.TEST_DATE_a>=window_date]\n # Set the denominator\n #### 123456\n earlydenom = add_month(earlydenom,'TEST_DATE') \n output_denominator=earlydenom.drop_duplicates(subset=['MRN','TEST_DATE_month']).sort_values(['MRN','TEST_DATE_a'])\n # cut down the remaining records, to only keep those who actually received the medicine in question. \n servdata=servdata[servdata['CODE'].isin(servref.code)] \n\n # for memory needs, drop all the ICD codes that are irrelevant \n # - this MESSES UP the inner merges that come later in preprocessingclaims()\n # - hence these metrics need , but only for those metrics that don't demand a specific diagnosis/ICD in the chart (ie, not demented/migraines)\n print('claimdata columns:')\n print(claimdata.columns.values)\n ##################### July 8\n claimdata = df_claimtrimmer(claimdata, a, allcodes_nos, allcodes_s)\n sizeflag = 1\n #######################\n print('claimdata after code restriction')\n debug13=(servdata,claimdata)\n print(claimdata.head(5))\n # meds metrics are now ready for preprocessingclaims()\n \n elif metric in ['bph','lbp']:\n earlydenom = servdata.rename(columns={'EVT_DATE':'TEST_DATE','CODE':'TEST_CODE'},inplace=False)\n earlydenom['TEST_DATE_a']=earlydenom['TEST_DATE'].values.astype('datetime64[D]')\n\n reportStartDatex=earlydenom.sort_values('TEST_DATE',ascending=True)['TEST_DATE'].iloc[0]\n earlydenom = earlydenom[earlydenom.TEST_DATE_a>=window_date]\n # Set the denominator\n #### 123456\n # should we get rid of TEST_CODE/TEST_DATE_a? \n# output_denominator=earlydenom.drop_duplicates(subset=['MRN','TEST_DATE_a','TEST_CODE']).sort_values(['MRN','TEST_DATE_a'])\n earlydenom = add_month(earlydenom,'TEST_DATE')\n debug13=earlydenom\n output_denominator=earlydenom.drop_duplicates(subset=['MRN','TEST_DATE_month']).sort_values(['MRN','TEST_DATE_a'])\n \n # cut down the remaining records, to only keep those who actually received the imaging in question. \n\n servdata=servdata[servdata['CODE'].isin(servref.code)].sort_values('EVT_DATE',ascending=True) \n servdata['rCODE']=0 \n print('servdata after code restriction')\n print(servdata.head(5))\n debug15=servdata\n # bph and lbp are prepped for preprocessingclaims()\n \n elif metric in ['dexa','vitd']:\n # keep track of CPT codes that are used in redflags/indications\n # copy out only relevant CPT codes (redflag/indications) from main servdata dataframe\n print('in the dexa/vitd servdat_x chunk')\n debug5 = servdata\n servdat_x = servdata[['EVT_DATE','CODE','MRN']]\n servdat_x.rename(columns={'EVT_DATE':'rEVT_DATE','CODE':'rCODE', 'MRN':'rMRN'}, inplace=True)\n # this may be wrong\n debug7 = servdat_x\n servdat_x = servdat_x[servdat_x.rCODE.isin(list(refkey.loc[refkey['class'].isin(['CPT','DRUG']),'subcode']))]\n servdata=servdata[servdata['CODE'].isin(servref.code)] \n debug13 = servdat_x\n debug15 = servdata \n\n elif metric == 'feed':\n # feed metric needs this because it relies on procedural ICDs: \n # first, get MRNs with relevant ICD-procedure codes\n \n claimfeedproc = claimdata[claimdata['ICD{}_subcode'.format(a)].isin(servref.code)]\n ##### debug\n debug13=claimfeedproc\n sepMRNclaim = list(claimdata.loc[claimdata['ICD{}_subcode'.format(a)].isin(servref.code),'MRN'])\n sepMRNclaim = np.unique(sepMRNclaim)\n ##### debug\n debug15=sepMRNclaim\n print('number MRNs w/ feeding ICDs: {}'.format(len(sepMRNclaim)))\n # at this point, servdata has all service records of MRNs with dementia, without regard to feeding tube placement\n print('post preprocessing')\n print(len(servdata))\n \n servdata.rename(columns={'EVT_DATE':'TEST_DATE','CODE':'TEST_CODE'},inplace=True)\n servdata['TEST_DATE_a']=servdata['TEST_DATE'].values.astype('datetime64[D]')\n reportStartDatex=servdata.sort_values('TEST_DATE',ascending=True)['TEST_DATE'].iloc[0]\n earlydenom = servdata\n earlydenom = earlydenom[earlydenom.TEST_DATE_a>=window_date]\n # Set the denominator\n # ?? premature \n #### 123456\n earlydenom = add_month(earlydenom,'TEST_DATE')\n output_denominator=earlydenom.drop_duplicates(subset=['MRN','TEST_DATE_month']).sort_values(['MRN','TEST_DATE_a'])\n\n # now, select the MRNs w/ dementia that have feeding tube by ICD records\n in_serv = servdata[servdata.MRN.isin(sepMRNclaim)]\n claimfeedproc = claimfeedproc[['MRN','EVT_DATE','ICD{}_subcode'.format(a),'hcclev','ccslev']]\n claimfeedproc.rename(columns={'EVT_DATE':'CLAIM_DATE',\n 'ICD{}_subcode'.format(a):'ICD{}_proccode'.format(a)}, inplace=True)\n claimfeedproc = claimfeedproc[claimfeedproc.CLAIM_DATE>=window_date] \n in_serv = in_serv.merge(claimfeedproc, on='MRN', how='inner')\n ##### debug\n debug17=in_serv\n # limit the merge to single row per ICD{}_proccode\n in_serv = in_serv[in_serv['TEST_DATE_a']>=window_date]\n in_serv = in_serv.drop_duplicates(['MRN', 'CLAIM_DATE', 'ICD{}_proccode'.format(a)])\n \n # of those not included based on ICD-procedure coding, which ones have CPT for feeding tube placement?\n out_serv = servdata[~servdata.MRN.isin(sepMRNclaim)]\n out_serv = out_serv[out_serv['TEST_CODE'].isin(servref.code)] \n out_serv = out_serv[out_serv['TEST_DATE_a']>=window_date]\n ##### debug\n debug19=out_serv\n out_serv = out_serv.drop_duplicates(['MRN','TEST_DATE_a','TEST_CODE']).sort_values(['MRN','TEST_DATE_a'], ascending=True)\n out_serv['based_on'] = 'CPT'\n in_serv['based_on'] = 'ICD'\n # this is a list of all MRNs with dementia who've received a feeding tube:\n servdata = pd.concat([in_serv,out_serv])\n ##### debug\n debug21=servdata\n print('early completion')\n #### 123456\n # caution with this line of code, already performed with in_serv and out_serv\n servdata= add_month(servdata,'TEST_DATE')\n servdata=servdata.drop_duplicates(['MRN','TEST_DATE_a','TEST_CODE','CLAIM_DATE','ICD{}_proccode'.format(a)]).sort_values(['MRN','TEST_DATE_a'], ascending=True)\n output_numerator=servdata\n print('numerator exists, 1 row:')\n print(output_numerator.head(1))\n output = dict(numerator=output_numerator, denominator =output_denominator)\n print('reached end of cycle.')\n print('feed.keys() {}'.format(output.keys()))\n return ('one', debug1,\n 'three', debug3,\n 'five', debug5,\n 'seven', debug7,\n 'nine', debug9,\n 'eleven', debug11,\n 'thirteen', debug13,\n 'fifteen', debug15,\n 'seventeen', debug17,\n 'nineteen', debug19,\n 'twenty1', debug21,\n 'twenty3',debug23,\n 'twenty5',debug25,\n 'twenty7', debug27,\n 'twenty9', debug29,\n 'thirty1', debug31,\n 'thirty3', debug33,\n 'thirty5', debug35,\n 'thirty7', debug37,\n 'thirty9', debug39,\n 'forty1', debug41,\n 'forty3', debug43,\n 'forty5', debug45,\n 'forty7', debug47,\n 'forty9_d', output)\n \n elif metric =='card':\n print('card chunk')\n sizeflag=1\n debug35=servdata\n servdata=servdata[servdata['CODE'].isin(servref.code)].sort_values('EVT_DATE',ascending=True) \n servdata['rCODE']=0\n debug37=servdata\n \n\n elif tempsort_day is False:\n # drop the CPTs that aren't explicitly tied to the metric, for metrics that don't use CPTs in redflags/indications\n print('in tempsort_day False chunk')\n servdata=servdata[servdata['CODE'].isin(servref.code)]\n servdata['rCODE']=0\n \n else: \n print('length: {}'.format(len(servdata)))\n print(servdata.head(1))\n print('last else chunk entered')\n \n # drop the CPTs that aren't explicitly tied to the metric, for metrics that don't use CPTs in redflags/indications\n servdata=servdata[servdata['CODE'].isin(servref.code)].sort_values('EVT_DATE',ascending=True) \n servdata['rCODE']=0\n \n print('length of dataframe entering preprocessing:')\n print(len(servdata))\n try:\n print('preview denominator')\n print(output_denominator.head(2))\n except:\n pass\n # =====================================================================================================\n # =====================================================================================================\n # Run the main preprocessing block\n # =====================================================================================================\n # =====================================================================================================\n claimdata = claimdata[['MRN','EVT_DATE','ICD{}_subcode'.format(a),'hcclev','ccslev']]\n if sizeflag == 0:\n try:\n print('attempting sizeflag=0')\n servdata=preprocessingclaims(servdata,claimdata, servref, preopreftab, admsdata, metric, a, tempsort_day, metrics_needing_drugs_as_primary_code_but_still_use_cpts, sizeflag)\n except:\n print('sizeflag exception raised, trying sizeflag =1')\n sizeflag =1 if tempsort_day is False else 0\n ##################################### July 8 17\n claimdata = df_claimtrimmer(claimdata, a, allcodes_nos, allcodes_s)\n #######################\n servdata=preprocessingclaims(servdata,claimdata, servref, preopreftab, admsdata, metric, a, tempsort_day, metrics_needing_drugs_as_primary_code_but_still_use_cpts, sizeflag)\n else:\n print('using sizeflag=1')\n ##################################### July 8 17\n tempx = claimdata\n claimdata = df_claimtrimmer(claimdata, a, allcodes_nos, allcodes_s)\n debug39 = (tempx,claimdata)\n #######################\n servdata=preprocessingclaims(servdata,claimdata, servref, preopreftab, admsdata, metric, a, tempsort_day, metrics_needing_drugs_as_primary_code_but_still_use_cpts, sizeflag)\n\n print('the metrics included in drugs with cpts: {}'.format(metrics_needing_drugs_as_primary_code_but_still_use_cpts))\n \n if metric=='narc':\n ### most recent version:\n print('narc before adms')\n print(servdata.head(2))\n # it is ok to drop these at this point, because denominator set above (unlike preop metrics)\n servdata=servdata[servdata.adms_flag==False] \n print('admission flag just implemented')\n try:\n print(output_denominator.head(2))\n except:\n pass\n else:\n pass\n if metric in metrics_needing_drugs_as_primary_code_but_still_use_cpts:\n print('in second_data chunk')\n debug19=servdata\n servdata=servdata.merge(second_data, left_on='MRN', right_on='rMRN', how='left')\n del second_data\n print('length after second_data {}'.format(len(servdata)))\n servdata.rEVT_DATE.fillna(np.datetime64('1900-01-01'), inplace=True)\n servdata.rCODE.fillna(0, inplace=True)\n print('before TEST_DATE>=rEVT_DATE {}'.format(servdata))\n print(servdata.head(2))\n servdata=servdata[servdata.TEST_DATE>=servdata.rEVT_DATE]\n debug21=servdata\n print('after TEST_DATE>= {}'.format(len(servdata)))\n servdata['dys_difffrom_rCODE']=servdata.TEST_DATE.sub(servdata.rEVT_DATE)/np.timedelta64(1,'D')\n servdata.drop(['rEVT_DATE'], axis=1, inplace=True)\n print('leaving second_data chunk {}'.format(len(servdata)))\n \n elif metric in ['dexa','vitd']:\n debug17=servdata\n servdata=servdata.merge(servdat_x[['rEVT_DATE','rMRN','rCODE']], left_on='MRN', right_on='rMRN', how='left')\n #to limit memory use\n debug19=servdata\n del servdat_x\n servdata.rEVT_DATE.fillna(np.datetime64('1900-01-01'), inplace=True)\n servdata.rCODE.fillna(0, inplace=True)\n # this assumes that only services before service in question should be considered\n servdata=servdata[servdata.TEST_DATE>=servdata.rEVT_DATE]\n servdata['dys_difffrom_rCODE']=servdata.TEST_DATE.sub(servdata.rEVT_DATE)/np.timedelta64(1,'D')\n print('length of servdata with cpts merged')\n print(len(servdata))\n servdata.drop(['rEVT_DATE'], axis=1, inplace=True)\n debug21 = servdata\n else:\n pass\n\n ##################################################################################################\n ### Post - preprocessing ##\n ##################################################################################################\n if metric in ['nonpreop','catpreop']:\n #only keep the records of testing done w/in tempsort_day(s) of surgery.\n servdata['daysBeforeSurg']=servdata.SURG_DATE.sub(servdata.TEST_DATE) \n debug13=servdata\n servdata=servdata[(servdata.daysBeforeSurg<=timedelta(tempsort_day))]#\n debug15=servdata\n else:\n pass\n \n if metric in metrics_needing_drugs_as_primary_code_but_still_use_cpts: #narcs and antipsychotics\n pass\n elif metric in ['lbp','bph']:\n pass\n elif metric =='feed':\n raise ValueError('feed metric skipped the main processing chunk incorrectly')\n else:\n print('post preprocessing')\n print(len(servdata))\n reportStartDatex=servdata.sort_values('TEST_DATE',ascending=True)['TEST_DATE'].iloc[0]\n # Set the denominator\n servdata['TEST_DATE_a']=servdata['TEST_DATE'].values.astype('datetime64[D]')\n print('before denominator grouping after full preprocessclaims()')\n print(servdata.columns.values)\n output_denominator= add_month(servdata,'TEST_DATE')\n output_denominator= output_denominator.drop_duplicates(subset=['MRN','TEST_DATE_a','TEST_CODE']).sort_values(['MRN','TEST_DATE_a'])\n try:\n print('again, the output_denom:')\n print(output_denominator.head(2))\n except:\n pass \n# ================================================================================================================= \n# Now, determine the numerator (for non-feeding tube metrics)\n# =================================================================================================================\n if dexadata is not None:\n print('dexadata is not None')\n try:\n print(output_denominator.head(3))\n except:\n pass\n print(dexadata.head(1))\n #restrict df to only those MRNs with more than one record:\n debug5=dexadata\n dexadata=dexadata[dexadata.MRN.duplicated(keep=False)]\n # but get rid of duplicate records on same day\n dexadata=dexadata.sort_values(['MRN','EVT_DATE'],ascending=True).drop_duplicates(['MRN','EVT_DATE'])\n debug7=dexadata\n grdex=dexadata.groupby('MRN').apply(lambda x:timeBtwnDex(x,'EVT_DATE'))\n print('grdex: {}'.format(grdex.head(2)))\n grdex=grdex[grdex['since_last']=reportStartDatex])]\n print('servdata.isin(grdex) {}'.format(servdata.head(2)))\n grdex_enc= grdex.dropna(subset=['since_last'], inplace=False)\n debug15=grdex_enc\n print('grdex_enc : {}'.format(grdex_enc.head(2)))\n print('end of dexadata')\n try:\n print(output_denominator.head(3))\n except:\n pass\n #to limit memory use\n del grdex\n servdata=servdata[servdata.ENC_ID.isin(grdex_enc['ENC_ID'])]\n print('servdata after grdex_enc {}'.format(servdata.head(2)))\n #to limit memory use\n del grdex_enc\n else:\n pass\n #REMOVE MRNs Meeting Red Flags, KEEP MRNs using inappropriate indications\n try:\n print('just before exclusions')\n print(servdata.head(2))\n print('...BAR...')\n print(output_denominator.head(3))\n except:\n pass\n ####### new: July 10 2017\n servdata= servdata[servdata.TEST_DATE>=window_date] \n ######################### \n debug31=servdata\n \n servdata=keepindications_removeexclusions(metric,servdata,redflagref,'exclude',lookback, tempsort_day, a, uniqueflagx)\n\n debug33 = servdata\n try:\n print('check a')\n print(servdata.head(2))\n print('....BAR....')\n print(output_denominator.head(3))\n except:\n pass\n \"\"\"\n if len(indicref[indicref['key']==metric])>0:\n servdata=keepindications_removeexclusions(metric,servdata,indicref,'include',lookback, tempsort_day, a, uniqueflagx)\n \"\"\" \n print('check b')\n try:\n print(servdata.head(2))\n print('....BAR....')\n print(output_denominator.head(3))\n \n except:\n pass\n if metric in ['lbp','bph']: # this block of code should come earlier so that rows aren't lost ??\n print('lbp/bph loop started')\n if metric not in ['lbp','bph']:\n print(\"Caution, parsing of numerator for long-established diagnoses is currently applied, but was only developed for lbp and bph metrics.\\n\")\n servdata=servdata[(servdata['dys_difftime_test_code']>=0)]\n print('first cut of difftime applied')\n # Of the remaining in the numerator group, get rid of those entries dependent on longstanding dx that precede the tempsort_day period\n #####################################################33\n # START START START START\n \"\"\"\n serv_culled=servdata[servdata['dys_difftime_test_code']>=tempsort_day][['MRN','TEST_DATE_a','TEST_CODE','CLAIM_DATE',\n 'ICD{}_subcode'.format(a), 'hcclev','ccslev', ]]\n serv_culled.rename(columns={'MRN':'MRN_culled','TEST_DATE_a':'TEST_DATE_culled', 'TEST_CODE':'TEST_CODE_culled',\n 'CLAIM_DATE':'CLAIM_DATE_culled', 'ICD{}_subcode'.format(a):'ICD{}sub_culled'.format(a),\n 'hcclev':'hcclev_culled', 'ccslev':'ccslev_culled'},inplace=True)\n serv_culled['Marker']=0\n #### 123456 --- will need a month column for Colin's output\n #### This is NOT outdated: it identifies the claims data for simple lbp that is long lasting\n indicref_x = indicref\n# serv_culled.loc[serv_culled['TEST_CODE_culled'].isin(indicref_x['subcode'][(indicref_x['class']=='CPT')]), 'Marker'] =1\n serv_culled.loc[serv_culled.ccslev_culled.astype(str).isin(indicref_x['subcode'][indicref_x['class']=='CCS']), 'Marker'] =1\n serv_culled.loc[serv_culled.hcclev_culled.astype(str).isin(indicref_x['subcode'][indicref_x['class']=='HCC']), 'Marker'] =1\n serv_culled.loc[serv_culled['ICD{}sub_culled'.format(a)].isin(indicref_x['subcode'][indicref_x['class']=='ICD']),'Marker'] =1\n serv_culled=serv_culled[serv_culled['Marker']==1] \n servdata=servdata.merge(serv_culled[['MRN_culled','TEST_DATE_culled','TEST_CODE_culled', 'CLAIM_DATE_culled',\n 'hcclev_culled','ccslev_culled','ICD{}sub_culled'.format(a)]], \n left_on = ['MRN','TEST_DATE_a','TEST_CODE'],\n right_on = ['MRN_culled','TEST_DATE_culled','TEST_CODE_culled'],\n how = 'left')\n #to limit memory use\n del serv_culled\n servdata=servdata[servdata['TEST_DATE_culled'].isnull()]\n # ORIGINAL END END END END\n \"\"\"\n # the marker \"2\" is used to ensure no confusion with the marker \"1\"\n print('creating cullmarker')\n servdata['cullmarker']=0\n servdata.loc[servdata['dys_difftime_test_code']>=tempsort_day,'cullmarker']= 2\n servdata_c1 = servdata[servdata.cullmarker==2]\n servdata_c0 = servdata[servdata.cullmarker!=2]\n servdata_c0['cullmarker']=0\n servdata_c1['cullmarker']=0 # reset the longstanding group\n print('culled by difftime')\n # Mark the rows, which have preceding indication (lbp or LUTS complaint) that has been longstanding, with \"1\"\n # . These rows are therefore \"valid\" and not wasteful ---> drop these rows\n # . those rows that remain \"0\"'s remain wasteful\n servdata_c1.loc[servdata_c1.ccslev.astype(str).isin(indicref['subcode'][indicref['class']=='CCS']), 'cullmarker'] =1\n servdata_c1.loc[servdata_c1.hcclev.astype(str).isin(indicref['subcode'][indicref['class']=='HCC']), 'cullmarker'] =1\n servdata_c1.loc[servdata_c1['ICD{}_subcode'.format(a)].isin(indicref['subcode'][indicref['class']=='ICD']),'cullmarker'] =1\n debug21=servdata_c1\n servdata_c1 = servdata_c1[servdata_c1.cullmarker!=1]\n print('dropped longstanding cases')\n servdata= pd.concat([servdata_c0,\n servdata_c1])\n ##### NEW END END END END\n \n elif ((metric=='catpreop') or (metric=='nonpreop')):\n ### most recent version:\n debug5=servdata\n servdata=servdata[servdata.adms_flag==False]\n debug7=servdata\n elif metric=='narc':\n ### most recent version:\n print('narc before adms')\n print(servdata.head(2))\n debug15=servdata\n servdata=servdata[servdata.adms_flag==False]\n debug17=servdata\n print('admission flag just implemented')\n try:\n print(output_denominator.head(3))\n except:\n pass\n else:\n pass\n\n # Cut down to unique services and Encounters, don't double count after inner join\n print('check d')\n \n debug29=servdata\n servdata=servdata.drop_duplicates(['MRN','TEST_DATE_a','TEST_CODE']).sort_values(['MRN','TEST_DATE_a'], ascending=True)\n output_numerator = servdata\n output_numerator = add_month(output_numerator,'TEST_DATE')\n print('check e')\n try:\n print(output_denominator.head(3))\n except:\n pass\n try:\n print(output_denominator.head(3))\n except:\n pass\n output = dict(numerator=output_numerator, denominator=output_denominator)\n print(output['numerator'].head(4))\n print('...BAR....')\n print(output['denominator'].head(6))\n\n print('reached end of cycle.')\n print(output.keys())\n return ('one', debug1,\n 'three', debug3,\n 'five', debug5,\n 'seven', debug7,\n 'nine', debug9,\n 'eleven', debug11,\n 'thirteen', debug13,\n 'fifteen', debug15,\n 'seventeen', debug17,\n 'nineteen', debug19,\n 'twenty1', debug21,\n 'twenty3',debug23,\n 'twenty5',debug25,\n 'twenty7', debug27,\n 'twenty9', debug29,\n 'thirty1', debug31,\n 'thirty3', debug33,\n 'thirty5', debug35,\n 'thirty7', debug37,\n 'thirty9', debug39,\n 'forty1', debug41,\n 'forty3', debug43,\n 'forty5', debug45,\n 'forty7', debug47,\n 'forty9_d', output)\n\n###############################################################################################\n\n","sub_path":"Scripts/chooseallwisely_mod_10to10rep.py","file_name":"chooseallwisely_mod_10to10rep.py","file_ext":"py","file_size_in_byte":60875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"471038039","text":"import cv2 as cv\nimport os\nimport numpy as np\n\ndef bicub(img):\n width = int(img.shape[1] * 2)\n height = int(img.shape[0] * 2)\n dim = (width, height)\n return cv.resize(img, dim, interpolation=cv.INTER_CUBIC)\n\n\ndef sharp(image, is_strong=False):\n if is_strong:\n kernel = np.array([[-1, -1, 0], [-2, 5, -2], [0, 1, 1]], np.float32)\n else:\n kernel = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]], np.float32)\n dst = cv.filter2D(image, -1, kernel=kernel)\n return dst","sub_path":"application/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"78493509","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nThis script loads information about semantic relations defined in the AI2D\ndataset from a pandas DataFrame, in order to annotate them using Rhetorical\nStructure Theory.\n\nTo continue annotation from a previous session, give the path to the existing\nDataFrame to the -o/--output argument.\n\nUsage:\n python annotate_semantic_relations.py -a annotation.pkl -o output.pkl\n\nArguments:\n -a/--annotation: Path to the pandas DataFrame containing the annotation\n extracted from the AI2D dataset.\n -i/--images: Path to the directory containing the AI2D diagram images.\n -o/--output: Path to the output file, in which the resulting annotation is\n stored.\n\nReturns:\n A pandas DataFrame containing the annotation stored in the input DataFrame\n and the annotation created using this script.\n\"\"\"\n\n# Import packages\nimport argparse\nimport numpy as np\nimport os\nimport pandas as pd\nimport cv2\n\n\n# Start by defining a convenience function for drawing annotation on images.\ndef draw(image, element_type, coords, role):\n \"\"\"\n A function for visualizing the AI2D annotation on the diagram image.\n\n Parameters:\n image: The diagram image to draw on.\n element_type: A string indicating the type of element being drawn.\n Either 'blob', 'arrow' or 'text'.\n coords: A list of coordinates indicating the location of the element.\n role: A string indicating the function of the element, either 'origin'\n or 'destination', depending on whether the relation originates or\n terminates at the element.\n\n Returns:\n Draws the annotation on the diagram image.\n \"\"\"\n # Begin by checking for whether the element to be drawn stands for the\n # origin or the destination. This defines the colour used for drawing.\n if role == 'origin':\n color = (0, 0, 255) # red\n if role == 'destination':\n color = (0, 255, 0) # green\n\n # Next, check the type of the element that is being drawn. Begin with blobs\n # and arrows, which require drawing polygons.\n if element_type in ['blob', 'arrow']:\n\n # Convert the list into a NumPy array and reshape for drawing in OpenCV.\n origin_points = np.array(coords, np.int32)\n origin_points = origin_points.reshape(-1, 1, 2)\n\n # Draw the polygon\n cv2.polylines(image, [origin_points], isClosed=True, color=color,\n thickness=2, lineType=cv2.LINE_AA)\n\n # Then check for rectangles, which are typically used for text boxes.\n if element_type == 'text':\n\n # Get the rectangle for the text block and extract the coordinates for\n # the start and end points.\n rectangle = coords\n start, end = tuple(rectangle[0]), tuple(rectangle[1])\n\n # Draw the rectangle\n cv2.rectangle(image, start, end, color=color, thickness=2,\n lineType=cv2.LINE_AA)\n\n # Finally, check for the so-called image constants in the AI2D annotation,\n # which refer to the entire image.\n if element_type == 'entire_image':\n\n # Get the input image shape\n (height, width) = image.shape[:2]\n\n # Define a rectangle around the entire image. Note that width and height\n # need to be shifted around, because cv2.rectangle requires this order.\n # Use a 5 pixel buffer throughout.\n start, end = (5, 5), (width-5, height-5)\n\n # Draw the rectangle\n cv2.rectangle(image, start, end, color=color, thickness=2,\n lineType=cv2.LINE_AA)\n\n\ndef print_info(question, request):\n \"\"\"\n A function that prints provides additional commands and information during\n the annotation.\n\n Parameters:\n question: A string containing a standard question defined in the\n annotation. The question must be one of the following:\n rel_q (RST relation), nuclearity of origin (origin_q)\n or nuclearity of destination (dest_q).\n request: A string indicating the requested information. Valid values\n include 'info' for information and 'rels' for a list of RST\n relations.\n\n Returns:\n A string containing the user's answer to the standard question.\n \"\"\"\n # Print the requested information to the user\n if request == 'info':\n print(info)\n if request == 'rels':\n print(rels)\n # Use input to wait until the user is ready to continue\n input(\"Press Enter to continue ...\")\n # Clear screen\n os.system('cls' if os.name == 'nt' else 'clear')\n # Present the requested standard question\n answer = input(question)\n # Return the answer\n return answer\n\n\n# Define a string providing additional\ninfo = \"\"\"\n Available commands include:\n\n rels: Print a list of RST relations and their descriptions.\n quit: Exit without saving.\n \"\"\"\n\n# Define a string containing information on relations defined by Rhetorical\n# Structure Theory.\nrels = \"\"\"Common RST relations for describing diagrams include:\n \n identification: A short text segment, such as a single noun or a noun \n group, which identifies an entity or its part(s). A \n common example would be a label for a part of an entity.\n elaboration: A more extensive verbal description, such as a clause, \n which provides more specific information about some \n entity or its part(s).\n effect: A generic mononuclear relation for describing processes \n that take place between entities, which are often \n reinforced using lines or arrows. The affected entity \n acts as the nucleus, while the origin of the effect acts\n as the satellite.\n restatement: A multinuclear relation holding between two entities \n that could act as a substitute for each other, such as \n the name of an entity and its visualisation.\n sequence: A multinuclear relation indicating a temporal or spatial \n sequence holding between entities.\n title: A text segment acting as the title for the entire \n diagram or its parts.\n \n \"\"\"\n\n# Define a dictionary of RST relations.\nrel_dict = {'antithesis', 'background', 'circumstance', 'concession',\n 'condition', 'elaboration', 'enablement', 'evaluation',\n 'evidence', 'interpretation', 'justify', 'means', 'motivation',\n 'nonvolitional-cause', 'nonvolitional-result', 'otherwise',\n 'preparation', 'purpose', 'restatement', 'solutionhood',\n 'summary', 'unless', 'volitional-cause', 'volitional-result',\n 'contrast', 'joint', 'list', 'restatement', 'sequence',\n 'identification', 'class-ascription', 'property-ascription',\n 'possession', 'projection', 'effect', 'title', 'none'}\n\n# Define a dictionary of roles for diagram elements\nnuc_dict = {'n', 'nuc', 'nucleus', 's', 'sat', 'satellite'}\n\n# Define standard questions presented to the user\nrel_q = \"Which rhetorical relation best describes the relation between \" \\\n \"the source element (red) and the target element (green)? \"\norigin_q = \"Does the origin element (red) act as a nucleus or a satellite? \"\ndest_q = \"Does the destination element (green) act as a nucleus or a \" \\\n \"satellite? \"\n\n# Set up the argument parser\nap = argparse.ArgumentParser()\n\n# Define arguments\nap.add_argument(\"-a\", \"--annotation\", required=True)\nap.add_argument(\"-i\", \"--images\", required=True)\nap.add_argument(\"-o\", \"--output\", required=True)\n\n# Parse arguments\nargs = vars(ap.parse_args())\n\n# Assign arguments to variables\nann_path = args['annotation']\nimages_path = args['images']\noutput_path = args['output']\n\n# Check if the output file exists already, or whether to continue with previous\n# annotation.\nif os.path.isfile(output_path):\n annotation_df = pd.read_pickle(output_path)\n\n# Otherwise, read the annotation from the input DataFrame and create new columns\nif not os.path.isfile(output_path):\n annotation_df = pd.read_pickle(ann_path)\n\n # Create a new column to hold the RST annotation and nuclearity information\n annotation_df['rst_relation'] = None\n annotation_df['origin_role'] = None\n annotation_df['destination_role'] = None\n\n# Begin looping over the rows of the input DataFrame. Enumerate the result to\n# show annotation progress to the user.\nfor i, (ix, row) in enumerate(annotation_df.iterrows()):\n\n # Check that no annotation exists for the current row\n if row['rst_relation'] is None:\n\n # Extract the filename of the diagram image by splitting the string at\n # .json and taking the first item in the list, which is the image name.\n image_filename = row['filename'].split('.json')[0]\n\n # Define path to diagram image\n image_path = os.path.join(images_path, image_filename)\n\n # Read the image using OpenCV\n image = cv2.imread(image_path)\n\n # Begin extracting information about the semantic relation, starting\n # with the origin and destination.\n origin = row['origin']\n destination = row['destination']\n\n # Extract information on the position of the diagram elements that take\n # part in the relation. This information is contained in a dictionary\n # in the 'polygons' column.\n polygons = row['polygons']\n\n # Use the 'draw' function to draw the origin.\n draw(image, polygons[origin]['type'], polygons[origin]['coords'],\n 'origin')\n\n # Use the 'draw' function to draw the destination.\n draw(image, polygons[destination]['type'],\n polygons[destination]['coords'], 'destination')\n\n # Resize the image for better visualization. Begin by calculating aspect\n # ratio (target width / current width) and new width of the image.\n ratio = 800.0 / image.shape[1]\n size = (800, int(image.shape[0] * ratio))\n\n # Resize the preview image\n preview = cv2.resize(image, size, interpolation=cv2.INTER_AREA)\n\n # Show the image\n cv2.imshow(\"{} - {}/{}\".format(image_filename, i+1, len(annotation_df)),\n preview)\n\n # Print status to user.\n print(\"Press any key in the window displaying the image to continue.\")\n\n # Wait for user input\n cv2.waitKey()\n\n # Begin the annotation by clearing the screen\n os.system('cls' if os.name == 'nt' else 'clear')\n\n # Request the user to determine the relationship between the elements by\n # presenting the standard question about the relationship (rel_q)\n rel = input(rel_q)\n\n # Check that the input entered by the user is a relation or a request\n # for more information.\n while rel not in rel_dict:\n # If the user requests additional information on the available\n # commands, print the information.\n if rel == 'info':\n # Print available commands and present the question again\n rel = print_info(rel_q, 'info')\n if rel == 'rels':\n # Print information on RST relations and present the question\n rel = print_info(rel_q, 'rels')\n # If requested, exit the program\n if rel == 'quit':\n print(\"Quitting.\")\n exit()\n # If the input is not a valid relation or a request for more info,\n # then ignore the input and present the standard question again\n else:\n print(\"Sorry, that is not a valid relation.\")\n rel = input(rel_q)\n\n # Append the RST annotation to the DataFrame at the current index\n annotation_df.at[ix, 'rst_relation'] = rel\n\n # If the RST relation was annotated as 'none', set both values for\n # nuclearity also to 'none' and continue\n if rel == 'none':\n annotation_df.at[ix, 'origin_role'] = 'none'\n annotation_df.at[ix, 'destination_role'] = 'none'\n # Close the window\n cv2.destroyAllWindows()\n continue\n\n # Then ask the standard question about nuclei and satellites. Begin by\n # clearing the screen, then pose the question for the origin.\n os.system('cls' if os.name == 'nt' else 'clear')\n origin_role = input(origin_q)\n\n # Again, check that the input is valid, either 'nucleus', 'satellite' or\n # their valid abbreviation ('nuc' or 'sat').\n while origin_role not in nuc_dict:\n # If the user requests additional information, print the information\n # and present the question again.\n if origin_role == 'info':\n origin_role = print_info(origin_q, 'info')\n # If the input is not valid, print a message and repeat question\n else:\n print(\"Sorry, that is not a valid role for nuclearity.\")\n origin_role = input(origin_q)\n\n # When a valid entry is entered, enter the value to the DataFrame\n if origin_role in ['n', 'nuc', 'nucleus']:\n annotation_df.at[ix, 'origin_role'] = 'nucleus'\n if origin_role in ['s', 'sat', 'satellite']:\n annotation_df.at[ix, 'origin_role'] = 'satellite'\n\n # Next, do the same for the destination element. Clear screen first.\n os.system('cls' if os.name == 'nt' else 'clear')\n dest_role = input(dest_q)\n\n # Check that the input is valid\n while dest_role not in nuc_dict:\n # If additional information is requested, print and repeat question\n if dest_role == 'info':\n dest_role = print_info(dest_q, 'info')\n # If the input is not valid, print a message and repeat question\n else:\n print(\"Sorry, that is not a valid role for nuclearity.\")\n dest_role = input(dest_q)\n\n # When a valid entries have been entered, enter their values into to the\n # DataFrame.\n if origin_role in ['n', 'nuc', 'nucleus']:\n annotation_df.at[ix, 'origin_role'] = 'nucleus'\n if origin_role in ['s', 'sat', 'satellite']:\n annotation_df.at[ix, 'origin_role'] = 'satellite'\n if dest_role in ['n', 'nuc', 'nucleus']:\n annotation_df.at[ix, 'destination_role'] = 'nucleus'\n if dest_role in ['s', 'sat', 'satellite']:\n annotation_df.at[ix, 'destination_role'] = 'satellite'\n\n # Close the window\n cv2.destroyAllWindows()\n\n # Save the DataFrame to disk to save the annotation\n annotation_df.to_pickle(output_path)\n","sub_path":"utils/annotate_semantic_relations.py","file_name":"annotate_semantic_relations.py","file_ext":"py","file_size_in_byte":14864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"505300571","text":"import os\nimport sys\nsys.path.append(os.path.dirname(os.path.dirname(__file__)))\nfrom page_objects.page_ovirt_dashboard import OvirtDashboardPage\n\n\nclass TestOvirtDashboard(OvirtDashboardPage):\n \"\"\"\n :avocado: enable\n :avocado: tags=ovirt_dashboard\n \"\"\"\n\n def test_health_status(self):\n nodectl_check = self.nodectl_check_on_host()\n expected_status = nodectl_check['status']\n expected_icon = self.gen_icon_from_status(expected_status)\n status_on_ui = self.get_health_text()\n icon_on_ui = self.get_health_icon()\n self.assertEqual(status_on_ui, expected_status)\n self.assertIn(status_on_ui, icon_on_ui)\n\n def test_node_health(self):\n def check_icons(dict_a):\n for key, value in dict_a.items():\n expected_name = self.gen_expected_name_from_nodectl_check(key)\n expected_icon = self.gen_expected_icon_from_nodectl_check(\n value)\n icon_on_ui = self.get_item_icon_on_node_health(\n expected_name)\n self.assertIn(expected_icon, icon_on_ui)\n if not isinstance(value, dict) or len(value) <= 1:\n continue\n self.open_item_on_node_health(expected_name)\n value.pop('status')\n check_icons(value)\n\n nodectl_check = self.nodectl_check_on_host()\n self.open_node_health_window()\n check_icons(nodectl_check)\n\n def test_current_layer(self):\n nodectl_info = self.nodectl_info_on_host()\n expected_current_layer = nodectl_info['current_layer']\n current_layer_on_ui = self.get_current_layer_text()\n self.assertEqual(expected_current_layer, current_layer_on_ui)\n\n def test_node_information(self):\n global mem\n mem = None\n\n def check_contents(dict_a):\n global mem\n for key, value in dict_a.items():\n if not isinstance(value, dict):\n text = self.get_arg_value_on_node_info(key)\n self.assertEqual(value, text)\n continue\n if 'rhvh' in key:\n if mem:\n self.toggle_item_on_node_info(mem)\n mem = key\n self.toggle_item_on_node_info(key)\n if key == \"layers\":\n for k, v in value.items():\n text = self.get_layer_on_node_info(k)\n self.assertIn(v[0], text)\n continue\n check_contents(value)\n\n nodectl_info = self.nodectl_info_on_host()\n self.open_node_information_window()\n check_contents(nodectl_info)\n\n def test_rollback(self):\n nodectl_info = self.nodectl_info_on_host()\n current_layer = nodectl_info['current_layer']\n layers = nodectl_info['layers'].values()\n available_layer = None\n self.open_rollback_window()\n for layer in layers:\n rollback_attr = self.get_rollback_attr_on_layer(layer[0])\n if layer[0] != current_layer:\n self.assertNotIn('disabled', rollback_attr)\n available_layer = layer[0]\n else:\n self.assertIn('disabled', rollback_attr)\n if available_layer:\n self.execute_rollback_on_layer(available_layer)\n self.assert_element_visible(self.ROLLBACK_ALERT % available_layer)\n\n def test_network_info_link(self):\n self.open_network_info_link()\n self.assertIn('Networking', self.get_title())\n\n def test_system_log_link(self):\n self.open_system_logs_link()\n self.assertIn('Logs', self.get_title())\n\n def test_storage_link(self):\n self.open_storage_link()\n self.assertIn('Storage', self.get_title())\n\n def test_ssh_host_key_link(self):\n ssh_key_on_host = self.get_ssh_key_on_host()\n ssh_key_on_ui = self.get_ssh_key_on_page()\n self.assertEqual(ssh_key_on_host, ssh_key_on_ui)\n","sub_path":"test_suites/test_ovirt_dashboard.py","file_name":"test_ovirt_dashboard.py","file_ext":"py","file_size_in_byte":4027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"544112951","text":"from argparse import ArgumentParser\nfrom random import gauss\nfrom time import sleep\n\nfrom requests import Session\n\n\ndef main(classification):\n data = (\n [0.03, 0.0506801187398187, -0.002, -0.01, 0.04, 0.01, 0.08, -0.04, 0.005, -0.1]\n if not classification\n else [6.1, 2.8, 4.7, 1.2]\n )\n with Session() as s:\n for i in range(60):\n data[0] = gauss(6, 2) if classification else gauss(0, 0.05)\n resp = s.post(\"http://localhost:5000\", json=data)\n resp.raise_for_status()\n if i % 10 == 0:\n print(f\"response[{i}]: {resp.json()}\")\n sleep(0.1)\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser(description=\"Generate inference workload.\")\n parser.add_argument(\"-c\", \"--classification\", action=\"store_true\")\n args = parser.parse_args()\n main(args.classification)\n","sub_path":"examples/grafana-prometheus/metrics/load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"557321237","text":"# Copyright (c) 2018, IBM.\n#\n# This source code is licensed under the Apache License, Version 2.0 found in\n# the LICENSE.txt file in the root directory of this source tree.\n\nfrom qiskit import register, available_backends, get_backend\nfrom IBMQuantumExperience import IBMQuantumExperience\nimport argparse\nimport json\nimport warnings\nfrom multiprocessing import Pool\n\nPUBLIC_NAMES = {\n 'ibmq_20_tokyo': 'IBM Q 20 Tokyo',\n 'QS1_1': 'IBM Q 20 Austin',\n 'ibmqx5': 'IBM Q 16 Rueschlikon',\n 'ibmq_16_rueschlikon': 'IBM Q 16 Rueschlikon',\n 'ibmqx4': 'IBM Q 5 Tenerife',\n 'ibmq_5_tenerife': 'IBM Q 5 Tenerife',\n 'ibmqx2': 'IBM Q 5 Yorktown',\n 'ibmq_5_yorktown': 'IBM Q 5 Yorktown',\n 'ibmq_qasm_simulator': 'IBM Q QASM Simulator'\n}\n\n\ndef main():\n warnings.simplefilter('ignore')\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--apiToken')\n parser.add_argument('--url', nargs='?',\n default='https://quantumexperience.ng.bluemix.net/api')\n parser.add_argument('--hub', nargs='?', default=None)\n parser.add_argument('--group', nargs='?', default=None)\n parser.add_argument('--project', nargs='?', default=None)\n parser.add_argument('--status', default=False)\n\n args = vars(parser.parse_args())\n\n if (args['url'] is None):\n args['url'] = 'https://quantumexperience.ng.bluemix.net/api'\n\n if (args['hub'] is None) or (args['group'] is None) or (args['project'] is None):\n register(args['apiToken'], args['url'])\n else:\n register(args['apiToken'], args['url'], args['hub'],\n args['group'], args['project'])\n\n backs = available_backends({'local': False})\n\n if str(args['status']) == \"True\":\n statusDevices = []\n for back in backs:\n fullInfoBack = createDeviceStatus(back)\n statusDevices.append(fullInfoBack)\n print(json.dumps(statusDevices, indent=2, sort_keys=True))\n else:\n print(json.dumps(backs, indent=2, sort_keys=True))\n\n\ndef createDeviceStatus(back):\n return {\n 'name': PUBLIC_NAMES[back],\n 'status': parseBackendStatus(get_backend(back).status)\n }\n\n\ndef parseBackendStatus(backendStatus):\n return {\n 'name': backendStatus['name'],\n 'pending_jobs': backendStatus['pending_jobs'],\n 'available': parseAvailability(backendStatus)\n }\n\n\ndef parseAvailability(backendStatus):\n try:\n return backendStatus['available']\n except KeyError:\n return backendStatus['operational']\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"client/resources/qiskitScripts/listRemoteBackends.py","file_name":"listRemoteBackends.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"18432357","text":"#Option Num = 2\n#Parse from XML to JSON\n#Понедельник\n\n#Выполнил студент группы P3115 Бусыгин Дмитрий\n\nimport time\n\nstart_time = time.time()\n\n\ndef has_only_open_tag(str):\n return (str.find('<') != -1 and str.find('') + 1:str.find('')]\n\n\ndef find_repitable_tags(str):\n repitable_tags = {}\n for i in range(len(str)):\n tag_name = get_tag(str[i]).strip().strip()\n if str.count(str[i]) != 1 and '')]\n tabs -= 1\n if tag_name in used_repitable_tags and repitable_tags[tag_name] == 0:\n json_line = ' ' * tabs + '} \\n'\n tabs -= 1\n json_line += ' ' * tabs + ']'\n else:\n if tag_name in repitable_tags or (i != len(xml_lines) - 1 and (has_only_open_tag(xml_lines[i+1]) or is_content_in_line(xml_lines[i+1]))):\n json_line = ' ' * tabs + '},'\n else:\n json_line = ' ' * tabs + '}'\n json_lines.append(json_line)\n\n#собираем в единое целое строки json\njson_lines.append('}')\nfor elem in json_lines:\n json_file.write(elem + '\\n')\n\n#закрываем открытые файлы\njson_file.close()\nxml_file.close()\n\ntime_file = open('docs/time.txt', 'a')\nprint('Десятикратное время выполнения программы, в которой не использовались библиотеки', file=time_file)\nprint(\"--- %s seconds ---\" % ((time.time() - start_time)*10), file=time_file)\ntime_file.close()","sub_path":"Main_task/ParserWithoutLibraries.py","file_name":"ParserWithoutLibraries.py","file_ext":"py","file_size_in_byte":5048,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"285794908","text":"#\n# For licensing see accompanying LICENSE file.\n# Copyright (C) 2020 Apple Inc. All rights reserved.\n#\n'''Train CIFAR10 with PyTorch.'''\n# import os\n# os.environ['CUDA_LAUNCH_BLOCKING'] = \"1\"\n\n# Num epochs=600, lr scheduler after every 100 epochs\n\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\n\nfrom src import capsule_model_amp as capsule_model\nfrom utils import progress_bar\nimport pickle\nimport json\n\nfrom datetime import datetime\n\nfrom utils import seed_torch\ntry:\n from apex import amp\n APEX_AVAILABLE = True\nexcept ModuleNotFoundError:\n APEX_AVAILABLE = False\n\n\n# +\nparser = argparse.ArgumentParser(description='Training Capsules using Inverted Dot-Product Attention Routing')\n\nparser.add_argument('--resume_dir', '-r', default='', type=str, help='dir where we resume from checkpoint')\nparser.add_argument('--num_routing', default=2, type=int, help='number of routing. Recommended: 0,1,2,3.')\nparser.add_argument('--dataset', default='CIFAR100', type=str, help='dataset. CIFAR10 or CIFAR100.')\nparser.add_argument('--backbone', default='resnet', type=str, help='type of backbone. simple or resnet')\nparser.add_argument('--num_workers', default=2, type=int, help='number of workers. 0 or 2')\nparser.add_argument('--config_path', default='./configs/resnet_backbone_CIFAR100_capsdim1024.json', type=str, help='path of the config')\nparser.add_argument('--debug', action='store_true',\n help='use debug mode (without saving to a directory)')\nparser.add_argument('--sequential_routing', action='store_true', help='not using concurrent_routing')\n\nparser.add_argument('--train_bs', default=64, type=int, help='Batch Size for train')\nparser.add_argument('--test_bs', default=100, type=int, help='Batch Size for test')\nparser.add_argument('--seed', default=12345, type=int, help='Random seed value')\n\nparser.add_argument('--accumulation_steps', default=2, type=float, help='Number of gradeitn accumulation steps')\nparser.add_argument('--lr', default=0.1, type=float, help='learning rate: 0.1 for SGD')\nparser.add_argument('--dp', default=0.0, type=float, help='dropout rate')\nparser.add_argument('--weight_decay', default=5e-4, type=float, help='weight decay')\nparser.add_argument('--total_epochs', default=400, type=int, help='Total epochs for training')\nparser.add_argument('--model', default='sinkhorn', type=str, help='default or sinkhorn')\nparser.add_argument('--use_amp', default=False, type=bool, help='True or False')\nparser.add_argument('--opt-level', default='O1', type=str, help='Opt level of AMP')\n\n\n\n# parser.add_argument('--save_dir', default='CIFAR10', type=str, help='dir to save results')\n\n# -\n\n\nargs = parser.parse_args()\nassert args.num_routing > 0\naccumulation_steps=args.accumulation_steps\nseed_torch(args.seed)\n\ndevice = 'cuda' if torch.cuda.is_available() else 'cpu'\n\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\nuse_amp = args.use_amp\n# Data\nprint('==> Preparing data..')\nassert args.dataset == 'CIFAR10' or args.dataset == 'CIFAR100'\ntransform_train = transforms.Compose([\n transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\ntrainset = getattr(torchvision.datasets, args.dataset)(root='../data', train=True, download=True, transform=transform_train)\ntestset = getattr(torchvision.datasets, args.dataset)(root='../data', train=False, download=True, transform=transform_test)\nnum_class = int(args.dataset.split('CIFAR')[1])\n\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_bs, shuffle=True, num_workers=args.num_workers)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=args.test_bs, shuffle=False, num_workers=args.num_workers)\n\nprint('==> Building model..')\n\n# Model parameters\n# CIFAR Image size\nimage_dim_size = 32 \n\nwith open(args.config_path, 'rb') as file:\n params = json.load(file)\n\nprint(params)\nif args.model=='default':\n net = capsule_model.CapsModel(image_dim_size,\n params,\n args.dataset,\n args.backbone,\n args.dp,\n args.num_routing,\n sequential_routing=args.sequential_routing,\n seed = args.seed)\nelif args.model=='sinkhorn':\n net = capsule_model.CapsSAModel(image_dim_size,\n params,\n args.dataset,\n args.backbone,\n args.dp,\n args.num_routing,\n sequential_routing=args.sequential_routing,\n seed = args.seed)\n\nelif args.model=='BilinearRandomInit':\n net = capsule_model.CapsRandomInitBAModel(image_dim_size,\n params,\n args.dataset,\n args.backbone,\n args.dp,\n args.num_routing,\n sequential_routing=args.sequential_routing,\n seed = args.seed)\n\n\nelif args.model=='bilinear':\n net = capsule_model.CapsBAModel(image_dim_size,\n params,\n args.dataset,\n args.backbone,\n args.dp,\n args.num_routing,\n sequential_routing=args.sequential_routing,\n seed = args.seed)\n\nelif args.model=='HintonDynamic':\n print(\"Using Sara Sabour's Dynamic Routing\")\n assert args.sequential_routing == True\n net = capsule_model.CapsDRModel(image_dim_size,\n params,\n args.dataset,\n args.backbone,\n args.dp,\n args.num_routing,\n sequential_routing=args.sequential_routing,\n seed = args.seed)\n\nelif args.model=='DynamicBilinear':\n assert args.sequential_routing == True\n net = capsule_model.CapsDBAModel(image_dim_size,\n params,\n args.dataset,\n args.backbone,\n args.dp,\n args.num_routing,\n sequential_routing=args.sequential_routing,\n seed = args.seed)\n \nelif args.model=='MultiHeadBilinear':\n net = capsule_model.CapsMultiHeadBAModel(image_dim_size,\n params,\n args.dataset,\n args.backbone,\n args.dp,\n args.num_routing,\n multi_transforms = args.multi_transforms,\n sequential_routing=args.sequential_routing,\n seed = args.seed)\n\n\nif args.model=='LocalLinformer':\n net = capsule_model.CapsBilinearLocalLinformer(image_dim_size,\n params,\n args.dataset,\n args.backbone,\n args.dp,\n args.num_routing,\n multi_transforms = args.multi_transforms,\n kernel_transformation = args.kernel_transformation,\n sequential_routing=args.sequential_routing,\n seed = args.seed)\n\n\nif args.model=='MultiHeadLocalLinformer':\n net = capsule_model.CapsMultiHeadBilinearLocalLinformer(image_dim_size,\n params,\n args.dataset,\n args.backbone,\n args.dp,\n args.num_routing,\n kernel_transformation = args.kernel_transformation,\n sequential_routing=args.sequential_routing,\n seed = args.seed)\n\n\nif args.model=='GlobalLinformer':\n net = capsule_model.CapsBilinearGlobalLinformerModel(image_dim_size,\n params,\n args.dataset,\n args.backbone,\n args.dp,\n args.num_routing,\n sequential_routing=args.sequential_routing,\n seed = args.seed)\n\nelif args.model=='resnet18':\n net = torchvision.models.resnet18(pretrained=True) \n num_ftrs = net.fc.in_features\n net.fc = nn.Linear(num_ftrs, num_class)\n\n# +\noptimizer = optim.SGD(net.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.weight_decay)\nlr_decay = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[150, 250, 350], gamma=0.1)\n# lr_decay = torch.optim.lr_scheduler.StepLR(optimizer, step_size=100, last_epoch=-1)\n\n\n\n\n# -\ndef count_parameters(model):\n for name, param in model.named_parameters():\n if param.requires_grad:\n # .numel() returns total number of elements\n print(name, param.numel())\n return sum(p.numel() for p in model.parameters() if p.requires_grad)\n\n\nprint(net)\ntotal_params = count_parameters(net)\n# print(\"Total model paramters: \",total_params)\n\n\n# Get configuration info\ncapsdim = args.config_path.split('capsdim')[1].split(\".\")[0] if 'capsdim' in args.config_path else 'normal'\nprint(capsdim)\n\n\nsave_dir_name = 'model_' + str(args.model)+ '_dataset_' + str(args.dataset) + '_batch_' +str(args.train_bs)+'_acc_'+str(args.accumulation_steps) + '_epochs_'+ str(args.total_epochs)+'_num_routing_' + str(args.num_routing) + '_backbone_' + args.backbone + '_config_'+capsdim + '_amp_'+str(use_amp)+'_opt_'+str(args.opt_level)\nif not os.path.isdir('results') and not args.debug:\n os.mkdir('results')\nif not args.debug:\n # store_dir = os.path.join('results', args.save_dir+'_'+datetime.today().strftime('%Y-%m-%d-%H-%M-%S'))\n store_dir = os.path.join('results', save_dir_name) \nif not os.path.isdir(store_dir) : \n os.mkdir(store_dir)\n\nnet = net.to(device)\n\n# Use AMP Library\nif(use_amp):\n assert APEX_AVAILABLE==True\n print(\"Initialising Apex Mixed Precision Model and Optimizer\")\n net, optimizer = amp.initialize(\n net, optimizer, opt_level=args.opt_level, \n keep_batchnorm_fp32=None, loss_scale=\"dynamic\"\n )\n\nif device == 'cuda' and use_amp==False:\n # Multi GPU Data Parallelization\n net = torch.nn.DataParallel(net)\n cudnn.benchmark = True\n# else:\n\n\n\nloss_func = nn.CrossEntropyLoss()\n\n\n\n\nif args.resume_dir and not args.debug:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n checkpoint = torch.load(os.path.join(args.resume_dir, 'ckpt_replica2.pth'))\n net.load_state_dict(checkpoint['net'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n\n\ndef train(epoch):\n print(\"TRAINING WITH GRADIENT ACCUMULATION\")\n global accumulation_steps\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n optimizer.zero_grad()\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs = inputs.to(device)\n targets = targets.to(device)\n \n v = net(inputs)\n loss = loss_func(v, targets)\n loss = loss / accumulation_steps\n\n if use_amp:\n assert APEX_AVAILABLE==True\n with amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n if (batch_idx+1) % accumulation_steps == 0: \n # print(\"Performed Gradient update\") \n optimizer.step()\n optimizer.zero_grad()\n\n # optimizer.step()\n\n train_loss += loss.item()\n _, predicted = v.max(dim=1) \n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n return 100.*correct/total\n\n\n# Training\ndef train_justgradacc(epoch):\n print(\"TRAINING WITH GRADIENT ACCUMULATION\")\n global accumulation_steps\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n optimizer.zero_grad()\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs = inputs.to(device)\n targets = targets.to(device)\n \n v = net(inputs)\n loss = loss_func(v, targets)\n loss = loss / accumulation_steps\n\n loss.backward()\n\n if (batch_idx+1) % accumulation_steps == 0: \n # print(\"Performed Gradient update\") \n optimizer.step()\n optimizer.zero_grad()\n\n # optimizer.step()\n\n train_loss += loss.item()\n _, predicted = v.max(dim=1) \n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n return 100.*correct/total\n\ndef train_withoutgradacc(epoch):\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n inputs = inputs.to(device)\n targets = targets.to(device)\n optimizer.zero_grad()\n v = net(inputs)\n loss = loss_func(v, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n _, predicted = v.max(dim=1) \n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss/(batch_idx+1), 100.*correct/total, correct, total))\n return 100.*correct/total\n\ndef test(epoch):\n global best_acc\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n with torch.no_grad():\n for batch_idx, (inputs, targets) in enumerate(testloader):\n inputs = inputs.to(device)\n targets = targets.to(device)\n v = net(inputs)\n loss = loss_func(v, targets)\n test_loss += loss.item()\n _, predicted = v.max(dim=1)\n total += targets.size(0)\n correct += predicted.eq(targets).sum().item()\n progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (test_loss/(batch_idx+1), 100.*correct/total, correct, total))\n\n # Save checkpoint.\n acc = 100.*correct/total\n if acc > best_acc:\n print('Saving..')\n if(use_amp):\n state = {\n 'net': net.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n 'amp': amp.state_dict()\n }\n torch.save(state, os.path.join(store_dir, 'amp_ckpt_replica2.pth'))\n else: \n state = {\n 'net': net.state_dict(),\n 'optimizer' : optimizer.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n }\n torch.save(state, os.path.join(store_dir, 'ckpt_replica2.pth'))\n best_acc = acc\n return 100.*correct/total\n\n# +\nresults = {\n 'total_params': total_params,\n 'args': args,\n 'params': params,\n 'train_acc': [],\n 'test_acc': [],\n}\n\ntotal_epochs = args.total_epochs\nif not args.debug: \n store_file = os.path.join(store_dir, 'debug_replica2.dct')\n\nfor epoch in range(start_epoch, start_epoch+total_epochs):\n results['train_acc'].append(train(epoch))\n lr_decay.step()\n results['test_acc'].append(test(epoch))\n pickle.dump(results, open(store_file, 'wb'))\n# -\n\n\n\n \n","sub_path":"main_MixedPrecision.py","file_name":"main_MixedPrecision.py","file_ext":"py","file_size_in_byte":16116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"549626707","text":"#!/usr/bin/env python\nimport cv2 as cv\n#from cv2 import aruco as aruco\nimport cv2.aruco as aruco\n#import rospy\n#from std_msgs.msg import String\n#from PIL import Image\n#import matplotlib.pyplot as plt\nimport numpy as np\n#from sensor_msgs.msg import Image as sensImg\n#from sensor_msgs.msg import CompressedImage as CsensImg\n#frofm sensor_msgs.msg import PointCloud2 as sensPCld\n#from std_msgs.msg import Float64MultiArray\n\n\n\n#################### COSMETHIC FUNCTIONS ############################\n\ndef IdOverAruco(id, corners, QueryImg):\n font = cv.FONT_HERSHEY_SIMPLEX\n\n cv.fillPoly(QueryImg, corners.astype(int), (230, 230, 230))\n\n (textX, textY )= np.abs(corners[0][0] + corners[0][2]) / 2\n textsizeX, textsizeY = cv.getTextSize(str(id[0]), font, 1, 3)[0]\n textX = (textX - textsizeX / 2).astype(int)\n textY = (textY + textsizeY / 2).astype(int)\n cv.putText(QueryImg, str(id[0]), (textX, textY), font, 1, (0, 0, 0), 2)\n\n return QueryImg\n\ndef distOverAruco(distanceMarker, corners, queryImg):\n font = cv.FONT_HERSHEY_SIMPLEX\n\n cv.fillPoly(queryImg, corners.astype(int), (230, 230, 230)) \n (textX, textY) = np.abs(corners[0][0] + corners[0][2]) / 2 \n textsizeX, textsizeY = cv.getTextSize(str(distanceMarker), font, 1, 3)[0] \n textX = (textX - textsizeX / 2).astype(int)\n textY = (textY + textsizeY / 2).astype(int) \n cv.putText(queryImg, str(distanceMarker), (textX, textY), font, 1, (255, 0,255), 2)\n return queryImg\n\n\n#def findRectangles(imgRect, idsR, cornersR):\n# debug = 0\n# # Grayscale image is requested for contour recognition\n# imgRectGray = cv.cvtColor(imgRect, cv.COLOR_BGR2GRAY)\n#\n# # Check if at least one marker has been found\n# if idsR is None or len(idsR) == 0:\n# # If no marker detected, exit\n# print(\"No marker detected!\")\n# return None\n#\n# # Print found arucos\n# if debug:\n# for i, corner in zip(idsR, cornersR):\n# print('Detected aruco with ID: {}.'.format(i[0]))\n#\n# #======== Find contours in image ========\n# \n# # The \"findContours\" function nedd a binary image, so need to threeshold before\n# #ret, imgThresh = cv2.threshold(imgRectGray, 127, 255, 0)\n# imgThresh = cv.adaptiveThreshold(imgRectGray, 255, cv.ADAPTIVE_THRESH_GAUSSIAN_C, cv.THRESH_BINARY, 51, 2)\n# contours, hierarchy = cv.findContours(imgThresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)[-2:]\n#\n# # Identify rectangular contours\n# rect_cnts = []\n# areas = []\n# for cnt in contours:\n# peri = cv.arcLength(cnt, True)\n# approx = cv.approxPolyDP(cnt, 0.04 * peri, True)\n# #(x, y, w, h) = cv2.boundingRect(cnt)\n# #ar = w / float(h)\n# if len(approx) == 4: # shape filtering condition\n# # Get the area of the rectangle, need to exclude rectangles with area less than the \n# # one of the smallest aruco\n# area = cv.contourArea(cnt)\n#\n# # Exclude rectangles with pixel area, due to some threesholding error perhaps\n# if area >= 5.0:\n# areas.append(area)\n# rect_cnts.append(cnt) # Shape is rectangle, add to the valid list\n# # Now in rect_cnts[] we have only rectangular contours\n#\n# #======== Discard the contours that do not contain any aruco (multiple markers can be present in the image)\n#\n# # Make a copy to preserve the original image, draw functions are destructive\n# imgRectDraw = np.copy(imgRect)\n#\n# j = 0\n# in_cnt = [] # dim (2,2) array of markers containing array of contours for each marker\n# for aruco_n, corner_n in zip(idsR, cornersR): # for every aruco marker in image...\n# cnt_father = [] # collect contours, for each marker\n# corner_n = corner_n[0].astype(int) # adjust array dimensionality\n# if debug:\n# imgRectDraw = cv.circle(imgRectDraw, (corner_n[0][0], corner_n[0][1]), 50, (0,0,255), 3)\n# i=0\n# for cnt in rect_cnts: # for every rectangular contour...\n# dist = cv.pointPolygonTest(cnt, (corner_n[0][0], corner_n[0][1]), True) # Check if top left corner of the aruco\n# # dist is:\n# # - dist<0 if point is outside contour\n# # - dist=0 if point is in the contour itself\n# # - dist>0 if point is inside\n# # Note that the ==0 is not exactly zero, can be 0.5, so a threshold is needed\n# # Check difference in area: must be 10% greater than the one of the aruco\n# if (dist > 1.) and (areas[i] > cv.contourArea(corner_n)*1.20): # if the aruco is inside the contour...\n# cnt_father.append(cnt) # add the contour in list\n# if debug:\n# print(\"Contour distance:\", dist)\n# cv.drawContours(imgRectDraw, [cnt], -1, (0,255,0), 2) # for debug draw the contour found\n# i+=1\n# if len(cnt_father) != 0:\n# in_cnt.append(cnt_father) # check next aruco\n# \n# return in_cnt\n \n\n#def draw_axis_on_marker(queryImg,corners, ids):\n# \n# # Create a square on flat plane with dimension of 4,6 cm\n# marker_square = np.float32([[0, 0, 0], [0, 4.6, 0], [4.6, 4.6, 0], [4.6, 0, 0]])\n# # Array for drawing the 3 cartesian axes\n# axis = np.float32([[3,0,0], [0,3,0], [0,0,3]]).reshape(-1,3)\n# # assuming one marker\n# the_marker = corners#[0]\n# # Find the rotation and translation vectors\n# ret, rvecs, tvecs = cv.solvePnP(marker_square, the_marker[0], camera_matrix, camera_dist_coefs)\n#\n# # Project axes points according to camera matrix and distortion coeff\n# imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, camera_matrix, camera_dist_coefs)\n# # Draw axes in the corner of the marker\n# for marker in the_marker:\n# drawnImg = draw(queryImg, marker, imgpts)\n#\n# # From rvecs compute the rotation matrix\n# rotation_matrix = cv.Rodrigues(rvecs)[0]\n# # Get the Projection matrix\n# P = np.hstack((rotation_matrix, tvecs))\n\n # Compute eulero angles in degree\n# euler_angles_degrees = - cv.decomposeProjectionMatrix(P)[6]\n# euler_angles_radians = euler_angles_degrees * np.pi / 180\n# print(euler_angles_degrees)\n#\n# return drawnImg\n \n \ndef drawSingleAru(queryImg, corners, imgpts):\n corner = tuple(corners.ravel())\n queryImg = cv.line(queryImg, corner, tuple(imgpts[0].ravel()), (255,0,0), 3)\n queryImg = cv.line(queryImg, corner, tuple(imgpts[1].ravel()), (0,255,0), 3)\n queryImg = cv.line(queryImg, corner, tuple(imgpts[2].ravel()), (0,0,255), 3)\n return queryImg\n\n\n\n\n# def cut_markers_area(queryImg,corners,rotation_matrix):\n \n# rotatedImg = cv2.warpAffine(queryImg, rotation_matrix, _img_rot.shape[1::-1], flags=cv2.INTER_LINEAR)\n# _extrema = cv2.perspectiveTransform(np.array([maxx,maxy,minx,miny]), _rot_mat)\n \n# img_mrk.append(_img_rot[_extrema[0][1]:_extrema[1][1], _extrema[0][0]:_extrema[1][0], :])\n \n# i+=1\n# return img_mrk\n \n\n########### GEOMETRIC FUNCTIONS ####################\n\n#def computeDistance(imgShape,corners,marker_real_world_mm, debug=0):\n# # ====== Camera parameters ==========\n# # Sensor is 5.64mm wide\n# # Original resolution is 4032x1960\n# focal_lenght = 3558.572811\n# # ====== End camera parameters ======\n#\n# # Size of the square marker\n## marker_real_world_mm = 46\n#\n## imgShape = queryImg.shape\n# \n# marker_dim_px = np.sqrt((corners[0][0][0][0] - corners[0][0][3][0])**2 + (corners[0][0][0][1] - corners[0][0][3][1])**2)\n# \n# distance_mm = marker_real_world_mm * (np.max(imgShape) / 4032) * focal_lenght / marker_dim_px\n# if debug: print(\"Distance: {}cm\".format(round(distance_mm / 10, 1)))\n# return distance_mm\n \n\ndef computeDistanceSingle(imgShape,corners,marker_real_world_mm, debug=0):\n # ====== Camera parameters ==========\n # Sensor is 5.64mm wide\n # Original resolution is 4032x1960\n focal_lenght = 426\n # ====== End camera parameters ======\n\n # Size of the square marker\n# marker_real_world_mm = 46\n\n# imgShape = queryImg.shape\n \n marker_dim_px = np.sqrt((corners[0][0][0] - corners[0][3][0])**2 + (corners[0][0][1] - corners[0][3][1])**2)\n \n \n distance_mm = marker_real_world_mm * (np.max(imgShape) / 640) * focal_lenght / marker_dim_px\n if debug: print(\"Distance: {}cm\".format(round(distance_mm / 10, 1)))\n return distance_mm\n \n\ndef oldsingleAruRelPos(queryImg,corners,Id,markerSize_mm,camera_matrix, camera_dist_coefs,\n focal_length,superimpAru='none'):\n \n imgShape = queryImg.shape\n \n markerDim_px = np.sqrt((corners[0][0][0] - corners[0][3][0])**2 + (corners[0][0][1] - corners[0][3][1])**2)\n distnc_mm = markerSize_mm * (np.max(imgShape) / 640) * focal_length / markerDim_px\n \n mrkSiz_cm= round(markerSize_mm/10,1)\n markerSquare_cm = np.float32([[0, 0, 0], [0, mrkSiz_cm, 0], [mrkSiz_cm, mrkSiz_cm, 0], [mrkSiz_cm, 0, 0]])\n _, rvecs, tvecs = cv.solvePnP(markerSquare_cm, corners, camera_matrix, camera_dist_coefs)\n# rvecs,tvecs,_= aruco.estimatePoseSingleMarkers(corners,markerSize_mm,camera_matrix,camera_dist_coefs)\n# r & tvects are different from the ones with previous code\n \n rotation_matrix = cv.Rodrigues(rvecs)[0]# From rvecs compute the rotation matrix\n P = np.hstack((rotation_matrix, 10*tvecs))# Get the Projection matrix\n \n # Project axes points according to camera matrix and distortion coeff\n axis = np.float32([[3,0,0], [0,3,0], [0,0,3]]).reshape(-1,3)# Array for drawing the 3 cartesian axes\n imgpts, jac = cv.projectPoints(axis, rvecs, tvecs, camera_matrix, camera_dist_coefs)\n \n# cv.drawContours(queryImg, [corners], 0, (0,255,0), 3)#gotta work on this\n if superimpAru=='distance': queryImg=distOverAruco(round(distnc_mm, 1),corners,queryImg)\n elif superimpAru=='marker': queryImg=IdOverAruco(Id,corners,queryImg)\n \n queryImg = drawSingleAru(queryImg, corners[0][0], imgpts)#this solution works better than the following\n# queryImg = aruco.drawAxis(queryImg, camera_matrix, camera_dist_coefs, rvecs, tvecs, 2)\n \n centerx,centery=np.abs(corners[0][0] + corners[0][2])/2\n #\n# (mrkSiz_cm/2,mrkSiz_cm/2)\n# _, rvecsCent, tvecsCent = cv.solvePnP([[[mrkSiz_cm/2, mrkSiz_cm/2]]], [[[centerx, centery]]], camera_matrix, camera_dist_coefs)\n# print('center t vecs',tvecsCent)\n# euler_angles_degrees = - cv.decomposeProjectionMatrix(P)[6]\n# euler_angles_radians = euler_angles_degrees * np.pi / 180\n \n queryImg=cv.circle(queryImg, (int(centerx),int(centery)),5,(255,255,0),-1) \n return queryImg,distnc_mm,P\n\n\n\ndef nsingleAruRelPos(queryImg,corners,Id,markerSize_mm,camera_matrix,camera_dist_coefs, \n superimpAru='none',tglDrawMark=0,tglDrawCenter=0):\n# positiion estimation\n rvecs,tvecs= aruco.estimatePoseSingleMarkers(corners,markerSize_mm,camera_matrix,camera_dist_coefs)\n (rvecs - tvecs).any() # get rid of that nasty numpy value array error\n \n# distance [mm]\n distnc_mm=np.sqrt((tvecs**2).sum())\n# rotation and projection matrix\n rotation_matrix = cv.Rodrigues(rvecs)[0]\n P = np.hstack((rotation_matrix, np.reshape(tvecs,[3,1])))\n# euler_angles_degrees = - cv.decomposeProjectionMatrix(P)[6]\n# euler_angles_radians = euler_angles_degrees * np.pi / 180\n \n# substitute marker with distance of Id\n if superimpAru=='distance': queryImg=distOverAruco(round(distnc_mm, 1),corners,queryImg)\n elif superimpAru=='id': queryImg=IdOverAruco(Id,corners,queryImg)\n# draws axis half of the size of the marker\n if tglDrawMark:\n markerDim_px = np.sqrt((corners[0][0][0] - corners[0][3][0])**2 + (corners[0][0][1] - corners[0][3][1])**2) \n aruco.drawAxis(queryImg, camera_matrix, camera_dist_coefs, rvecs, tvecs, int(markerDim_px//4))\n\n if tglDrawCenter:\n centerx,centery=np.abs(corners[0][0] + corners[0][2])/2\n markerDim_px = np.sqrt((corners[0][0][0] - corners[0][3][0])**2 + (corners[0][0][1] - corners[0][3][1])**2)\n queryImg=cv.circle(queryImg, (int(centerx),int(centery)),int(markerDim_px/16),(255,255,0),-1)\n \n return queryImg,distnc_mm,P\n\n##################################\n# bibliography\n# https://docs.opencv.org/4.2.0/d5/dae/tutorial_aruco_detection.html\n# for the parameters list\n","sub_path":"scripts/roscamLibrary.py","file_name":"roscamLibrary.py","file_ext":"py","file_size_in_byte":12357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"155679219","text":"#!/bin/python3\n\nimport sys\n\ndef jump(c):\n res = 0\n ind = 0\n \n while ind != len(c)-1:\n if ind != len(c)-2 and c[ind+2] == 0:\n ind += 2\n else:\n ind += 1\n res += 1\n \n return res\n \n\nif __name__ == \"__main__\":\n n = int(input().strip())\n c = list(map(int, input().strip().split(' ')))\n result = jump(c)\n print(result)\n","sub_path":"interview-preparation-kit/jumping-on-the-clouds.py","file_name":"jumping-on-the-clouds.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"109650509","text":"\"\"\"\r\n DUPIN Léa - Aéro 2 classe F2\r\n Ma 223 - Tp 2 : Méthode de Cholesky pour la résolution de systèmes linéaires.\r\n Programme permettant d'obtenir les graphiques de temps de calcul.\r\n Institut Polytechnique des Sciences Avancées - IPSA Paris\r\n\"\"\"\r\nimport numpy as np\r\nimport math\r\nimport matplotlib as mp\r\nimport time\r\nimport matplotlib.pyplot as plt\r\nfrom Cholesky import *\r\nfrom time import process_time\r\n\r\n\r\ndef ResolCholesky(A, B):\r\n # Décomposition de Cholesky:\r\n L = Cholesky(A)\r\n\r\n # Résolution de L * Y = B:\r\n Aaug = np.hstack([L, B])\r\n Y = ResolutionSystTriInf(Aaug)\r\n\r\n # Résolution de L(transposée) * X = Y:\r\n Y = Y.reshape(-1, 1)\r\n LT = np.transpose(L)\r\n Taug = np.hstack([LT, Y])\r\n X = ResolutionSystTriSup(Taug)\r\n return(X)\r\n\r\n\r\ndef graph():\r\n # Mise en page pour mettre les 3 graphiques\r\n plt.gcf().subplots_adjust(wspace=0.5, hspace=0.5)\r\n plt.subplot(2, 1, 1)\r\n\r\n # Demande de la taille de la matrice maximale à calculer\r\n nb = int(input(\"Taille max de la matrice souhaitée ? \\n\"))\r\n\r\n time_list_Cholesky = []\r\n\r\n for taille in range(0, nb + 1):\r\n A, B = init(taille)\r\n ResolCholesky(A, B)\r\n t = process_time()\r\n time_list_Cholesky.append(t)\r\n\r\n # ------ Temps de calcul\r\n T_list_Cholesky = []\r\n for i in range(len(time_list_Cholesky)):\r\n if i == len(time_list_Cholesky)-1:\r\n T = time_list_Cholesky[-1] - time_list_Cholesky[0]\r\n elif i == len(time_list_Cholesky):\r\n None\r\n else:\r\n T = time_list_Cholesky[i + 1] - time_list_Cholesky[i]\r\n T_list_Cholesky.append(T)\r\n\r\n # Affichage des temps en console\r\n if taille > 50:\r\n for i in range(0, len(T_list_Cholesky) - 1, 50):\r\n print(\"Le temps de calcul pour une matrice de taille \", i, \"est de :\", T_list_Cholesky[i], \"secondes.\")\r\n print(\"Le temps de calcul pour une matrice de taille \", taille, \"est de :\", T_list_Cholesky[-2], \"secondes.\")\r\n\r\n print(\"\\nLe temps de calcul total est de\", T_list_Cholesky[-1], \"secondes\")\r\n minutes = int(T_list_Cholesky[-1]//60)\r\n secondes = int(T_list_Cholesky[-1] % 60)\r\n if minutes == 1:\r\n print(\"Soit environ\", minutes, \"minute et\", secondes, \"secondes.\")\r\n elif minutes > 1:\r\n print(\"Soit environ\", minutes, \"minutes et\", secondes, \"secondes.\")\r\n\r\n # On supprime le temps total afin de pouvoir afficher les temps de calcul\r\n del(T_list_Cholesky[- 1])\r\n\r\n abscisse = []\r\n for i in range(0, taille):\r\n abscisse.append(i)\r\n\r\n # -- Création de la courbe\r\n plt.plot(abscisse, T_list_Cholesky, color='r', label='Méthode de Cholesky')\r\n\r\n # -- Affichage de la courbe\r\n # Graphique 1 : Temps / taille\r\n plt.title(\"Temps de calcul en fonction de la taille de la matrice\")\r\n plt.ylabel('Temps de calcul en secondes')\r\n plt.xlabel('Taille de la matrice')\r\n plt.grid(True)\r\n plt.legend(loc='best')\r\n # Graphique 2 : Temps en échelle logarithmique / taille\r\n plt.subplot(2, 1, 2)\r\n plt.plot(abscisse, T_list_Cholesky, color='r', label='Méthode de Cholesky')\r\n plt.title(\"Temps de calcul en fonction de la taille de la matrice\\n Echelle logarithmique \")\r\n plt.ylabel('Temps de calcul en secondes (log)')\r\n plt.xlabel('Taille de la matrice')\r\n plt.yscale('log')\r\n plt.grid(True)\r\n plt.legend(loc='best')\r\n\r\n plt.show()\r\n\r\ngraph()\r\n","sub_path":"Cholesky_time.py","file_name":"Cholesky_time.py","file_ext":"py","file_size_in_byte":3432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"150565043","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # 1.爬取單月資料\n\n# ## 1-a. 爬取鴻海公司單月「個股日本益比、殖利率及股價淨值比」資訊\n\n# In[76]:\n\n\nimport requests\nimport json\nimport pandas as pd\n\n\n# In[77]:\n\n\ndata = {\n 'response': 'json',\n 'date': '20181001',\n 'stockNo':'2317',\n '_':'1540911963420'\n}\nres = requests.get('http://www.twse.com.tw/exchangeReport/BWIBBU?response=json&date=20181001&stockNo=2317&_=1540911963420')\nprint(res.text)\n\n\n# In[78]:\n\n\njres = json.loads(res.text)\njres\n\n\n# In[79]:\n\n\njres['stat']\n\n\n# In[80]:\n\n\njres['data']\n\n\n# In[81]:\n\n\ndf_temp = pd.DataFrame(jres['data'],columns=jres['fields'])\ndf_temp\n\n\n# ## 1-b. 爬取鴻海公司單月「各日成交資訊」資訊\n\n# In[82]:\n\n\ndata = {\n 'response': 'json',\n 'date': '20181001',\n 'stockNo':'2317',\n '_':'1540912291297'\n}\nres2 = requests.get('http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=json&date=20181001&stockNo=2317&_=1540912291297')\nprint(res2.text)\n\n\n# In[83]:\n\n\njres2 = json.loads(res2.text)\njres2\n\n\n# In[84]:\n\n\njres2['stat']\n\n\n# In[85]:\n\n\njres2['data']\n\n\n# In[86]:\n\n\ndf_temp2 = pd.DataFrame(jres2['data'],columns=jres2['fields'])\ndf_temp2\n\n\n# ## 1-c. 合併兩張表:單月「個股日本益比、殖利率及股價淨值比」資訊 + 單月「各日成交資訊」資訊\n\n# In[87]:\n\n\n#先預處理df_temp的資料格式\ndf_temp['日期'] = df_temp['日期'].str.split(\"年\").str.get(0)+\"/\"+ df_temp['日期'].str.split(\"年\").str.get(1).str.split(\"��\").str.get(0)+\"/\"+ df_temp['日期'].str.split(\"年\").str.get(1).str.split(\"月\").str.get(1).str.split(\"日\").str.get(0)\n\n\n# In[88]:\n\n\n#格式已改變\ndf_temp['日期']\n\n\n# In[89]:\n\n\n#Left join兩筆資料\ndf_temp_final = pd.merge(df_temp,df_temp2, left_on='日期', right_on='日期', how='left')\ndf_temp_final\n\n#最終大表呈現的模樣\n\n\n# # 2.爬取多月資訊\n\n# In[90]:\n\n\nimport datetime\nimport calendar\nimport time\n\ntime = datetime.date(2018, 10, 1) \n\n#求前一個月的第一天\nfirst_day = datetime.date(time.year, time.month, 1)\npre_month = first_day - datetime.timedelta(days = 1) \nfirst_day_of_pre_month = datetime.date(pre_month.year, pre_month.month, 1)\nfirst_day_of_pre_month\n\n\n# In[91]:\n\n\n#製作爬蟲回傳存取目標Dataframe\ncolumn_list = list(df_temp.columns)\ndf = pd.DataFrame(columns=column_list)\ndf\n\n\n# In[92]:\n\n\nfrom datetime import datetime\nfrom datetime import timedelta\nimport time\n\n#開始爬蟲\ncrawl_date = datetime(2018,11,30) # start_date\ndf = df_temp\n\n#第一份資料:單月「個股日本益比、殖利率及股價淨值比」資訊\nfor i in range(18):\n crawl_date -= timedelta(29)\n first_day = datetime(crawl_date.year, crawl_date.month, 1)\n pre_month = first_day - timedelta(days = 1) \n first_day_of_pre_month = datetime(pre_month.year, pre_month.month, 1)\n crawl_date_str = datetime.strftime(first_day_of_pre_month, '%Y%m%d')\n \n res = requests.get('http://www.twse.com.tw/exchangeReport/BWIBBU?response=json&date=' + crawl_date_str + '&stockNo=2317&_=1540911963420')\n jres = json.loads(res.text)\n \n # 證交所回覆有資料\n if(jres['stat']=='OK'):\n print(crawl_date_str, ': crawling data...')\n \n # 將讀取回的json轉成的DataFrame(df_temp)\n df_temp = pd.DataFrame(jres['data'],columns=jres['fields'])\n \n # 更改df_temp的日期資料格式\n df_temp['日期'] = df_temp['日期'].str.split(\"年\").str.get(0)+\"/\"+ df_temp['日期'].str.split(\"年\").str.get(1).str.split(\"月\").str.get(0)+\"/\"+ df_temp['日期'].str.split(\"年\").str.get(1).str.split(\"月\").str.get(1).str.split(\"日\").str.get(0)\n \n # 欄位合併\n df = df.append(df_temp)\n \n else:\n print(crawl_date_str, ': no data')\n \n # 讓程式睡個10秒再繼續爬取下一天資料,避免頻繁抓取被台灣證券交易所封鎖IP拒絕存取\n time.sleep(10) \n\n\n# In[93]:\n\n\n#製作爬蟲回傳存取目標Dataframe\ncolumn_list = list(df_temp2.columns)\ndf2 = pd.DataFrame(columns=column_list)\ndf2\n\n\n# In[97]:\n\n\nfrom datetime import datetime\nimport time \n\n#開始爬蟲\ncrawl_date = datetime(2018,11,30) # start_date\ndf2 = df_temp2\n\n#第二份資料:單月「各日成交資訊」資訊\nfor i in range(18):\n crawl_date -= timedelta(29)\n first_day = datetime(crawl_date.year, crawl_date.month, 1)\n pre_month = first_day - timedelta(days = 1) \n first_day_of_pre_month = datetime(pre_month.year, pre_month.month, 1)\n crawl_date_str = datetime.strftime(first_day_of_pre_month, '%Y%m%d')\n \n res2 = requests.get('http://www.twse.com.tw/exchangeReport/STOCK_DAY?response=json&date=' + crawl_date_str + '&stockNo=2317&_=1540912291297')\n jres2 = json.loads(res2.text)\n\n # 證交所回覆有資料\n if(jres2['stat']=='OK'):\n print(crawl_date_str, ': crawling data...')\n \n # 將讀取回的json轉成的DataFrame(df_temp)\n df_temp2 = pd.DataFrame(jres2['data'],columns=jres2['fields'])\n \n # 欄位合併\n df2 = df2.append(df_temp2)\n \n else:\n print(crawl_date_str, ': no data')\n \n # 讓程式睡個15秒再繼續爬取下一天資料,避免頻繁抓取被台灣證券交易所封鎖IP拒絕存取\n time.sleep(15)\n\n\n# In[98]:\n\n\n# 整合這兩張表\ndf_all = pd.merge(df,df2, left_on='日期', right_on='日期', how='left')\npd.set_option('display.max.columns',30)\npd.set_option('display.max.rows',300)\ndf_all\n\n\n# # 3.資料預處理\n\n# In[99]:\n\n\n# X0.00的部分應該直接換成0\ndf_all = df_all.replace('X0.00',0)\ndf_all\n\n\n# In[100]:\n\n\ndf_all[\"成交股數\"] = df_all[\"成交股數\"].str.split(\",\").str.get(0)+df_all[\"成交股數\"].str.split(\",\").str.get(1)+ df_all[\"成交股數\"].str.split(\",\").str.get(2)\ndf_all\n\n\n# In[101]:\n\n\ndf_all[\"成交金額\"] = df_all[\"成交金額\"].str.split(\",\").str.get(0)+df_all[\"成交金額\"].str.split(\",\").str.get(1)+ df_all[\"成交金額\"].str.split(\",\").str.get(2)+df_all[\"成交金額\"].str.split(\",\").str.get(3)\ndf_all\n\n\n# In[102]:\n\n\ndf_all[\"成交筆數\"] = df_all[\"成交筆數\"].str.split(\",\").str.get(0)+df_all[\"成交筆數\"].str.split(\",\").str.get(1)\ndf_all\n\n\n# In[103]:\n\n\n# 重複的日期刪除\ndf_all = df_all.drop_duplicates(subset=None, keep='first', inplace=False)\ndf_all\n\n\n# In[104]:\n\n\ndf_all['日期'].size\n\n\n# In[105]:\n\n\n#依��日期時間升冪排列\ndf_all = df_all.sort_values(\"日期\")\ndf_all\n\n\n# In[106]:\n\n\n#重設index\ndf_all = df_all.reset_index(drop=True)\npd.set_option('display.max.columns',30)\npd.set_option('display.max.rows',400)\ndf_all\n\n\n# In[107]:\n\n\nfor i in range(len(df_all)):\n df_all.loc[i,'日期'] = str(int(df_all.loc[i,'日期'][:3])+1911) + df_all.loc[i,'日期'][3:]\n\n\n# In[109]:\n\n\n#檢查日期格式已改為西元年\ndf_all\n\n\n# In[110]:\n\n\ndel df_all[\"股利年度\"]\n\n\n# In[111]:\n\n\ndel df_all[\"財報年/季\"]\n\n\n# In[116]:\n\n\ndf_all[\"日期\"] = df_all[\"日期\"].str.split(\"/\").str.get(0)+df_all[\"日期\"].str.split(\"/\").str.get(1)+ df_all[\"日期\"].str.split(\"/\").str.get(2)\ndf_all\n\n\n# In[117]:\n\n\ndf_all = df_all.set_index(df_all['日期'], drop=True)\ndf_all.head()\n\n\n# In[118]:\n\n\ntype(df.index)\n\n\n# In[119]:\n\n\n#轉成DatetimeTndex\ndf_all.index = pd.to_datetime(df_all.index,format='%Y%m%d')\ntype(df_all.index)\n\n\n# In[121]:\n\n\ndel df_all[\"日期\"]\n\n\n# In[122]:\n\n\ndf_all.dtypes\n\n\n# In[123]:\n\n\ndf_all[\"殖利率(%)\"] = df_all[\"殖利率(%)\"].astype(float)\ndf_all[\"本益比\"] = df_all[\"本益比\"].astype(float)\ndf_all[\"股價淨值比\"] = df_all[\"股價淨值比\"].astype(float)\ndf_all[\"成交股數\"] = df_all[\"成交股數\"].astype(int)\ndf_all[\"成交金額\"] = df_all[\"成交金額\"].astype(float)\ndf_all[\"開盤價\"] = df_all[\"開盤價\"].astype(float)\ndf_all[\"最高價\"] = df_all[\"最高價\"].astype(float)\ndf_all[\"最低價\"] = df_all[\"最低價\"].astype(float)\ndf_all[\"收盤價\"] = df_all[\"收盤價\"].astype(float)\ndf_all[\"漲跌價差\"] = df_all[\"漲跌價差\"].astype(float)\ndf_all[\"成交筆數\"] = df_all[\"成交筆數\"].astype(float)\n\ndf_all.dtypes\n\n\n# # 4.視覺化\n\n# In[124]:\n\n\nimport matplotlib.pyplot as plt\nplt.rcParams['font.family']='SimHei' #顯示中文('SimHei' for MacOS)\nplt.rcParams['axes.unicode_minus'] = False #正常顯示負號\nplt.style.use('ggplot')\n#圖片顯示於Jupyter Notebook上\nget_ipython().run_line_magic('matplotlib', 'inline')\n\n\n# In[211]:\n\n\ndf_all['收盤價'].plot(figsize=(10,8))\n\n\n# In[126]:\n\n\ndf_all.loc[:,\"開盤價\":\"收盤價\"].plot(figsize=(20,16))\n\n\n# In[158]:\n\n\ndf_all.loc[:,\"成交股數\":\"成交金額\"].plot(figsize=(20,16))\n\n\n# In[210]:\n\n\ndf_all['本益比'].plot(figsize=(10,8))\n\n\n# In[212]:\n\n\ndf_all['殖利率(%)'].plot(figsize=(10,8))\n\n\n# In[213]:\n\n\ndf_all['股價淨值比'].plot(figsize=(10,8))\n\n\n# In[128]:\n\n\ndf_all.plot(kind='scatter',x='殖利率(%)', y='本益比',figsize=(10,8))\n\n\n# # 5.分析\n\n# ## 相關分析\n\n# In[129]:\n\n\ndf_all.loc[:,\"殖利率(%)\":\"成交筆數\"].corr()\n\n\n# In[130]:\n\n\nimport seaborn as sns\ncorr = df_all.loc[:,\"殖利率(%)\":\"成交筆數\"].corr()\n\nplt.figure(figsize=(20,20))\nsns.heatmap(corr, square=True, annot=True)\nplt.show()\n\n\n# ## 統計分析\n\n# In[131]:\n\n\n#統計分析\n\ndf_all_stock = df_all.loc[:,\"殖利率(%)\":\"成交筆數\"]\ndf_all_stock.describe()\n\n\n# In[132]:\n\n\ndf_all_stock.mean()\n\n\n# # 6.機器學習模型預估\n\n# ### 股票殖利率=現金股利 / 股價 (越高越好)\n# ### 本益比(PER) = 每股市價 / 每股盈餘(EPS) (越低越好)\n# ### 股價淨值比(PBR) = 股票市值 / 每股淨值 (股價淨值比小於1時,代表現在比較便宜;股價淨值比大於1時,代表現在比較昂貴)\n\n# In[133]:\n\n\nX = df_all[['殖利率(%)','本益比','股價淨值比']]\n\n\n# ## Kmeans分群\n\n# In[134]:\n\n\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nsc.fit(X)\nX_std = sc.transform(X)\n\n\n# In[135]:\n\n\nfrom sklearn.cluster import KMeans\nkm = KMeans(n_clusters=5)\ny_pred = km.fit_predict(X_std)\n\n\n# In[136]:\n\n\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nplt.rcParams['font.family']='SimHei' #顯示中文('SimHei' for MacOS)\n\nplt.figure(figsize=(10,8))\nplt.scatter(X['殖利率(%)'],X['本益比'],c=y_pred)\nplt.xlabel('殖利率(%)', fontsize=20)\nplt.ylabel('本益比', fontsize=20)\n\n\n# In[137]:\n\n\nfrom mpl_toolkits.mplot3d import Axes3D\nfig = plt.figure(figsize=(10,8))\nax = Axes3D(fig)\nax.scatter(xs=X['殖利率(%)'],ys=X['股價淨值比'],zs=X['本益比'],c=y_pred)\nax.set_xlabel('殖利率(%)',fontsize=18)\nax.set_ylabel('股價淨值比',fontsize=18)\nax.set_zlabel('本益比',fontsize=18)\n\n\n# ## 製作明日收盤價\n\n# In[138]:\n\n\n#製作明日收盤價\ndate_list = list(df_all.index)\nfor i in range(len(df_all)-1):\n df_all.loc[date_list[i],'明日收盤價'] = df_all.loc[date_list[i+1], '收盤價']\ndf_all\n\n\n# In[139]:\n\n\ndf_all = df_all.dropna()\nX = df_all[['開盤價','最高價','最低價','收盤價','漲跌價差','殖利率(%)','本益比','股價淨值比']]\ny = df_all[['明日收盤價']]\n\n\n# ### 切分資料\n\n# In[141]:\n\n\n#切分資料\nX_train = X[:-1]\nX_test = X[-1:]\ny_train = y[:-1]\ny_test = y[-1:]\n\n\n# In[142]:\n\n\nX_test\n\n\n# In[143]:\n\n\ny_test\n\n\n# ### 標準化\n\n# In[144]:\n\n\n#標準化\nfrom sklearn.preprocessing import StandardScaler\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)\n\n\n# ### 訓練\n\n# In[145]:\n\n\n#訓練資料\nfrom sklearn import linear_model\n\n# linear regression 物件\nregr = linear_model.LinearRegression()\n\n# 訓練模型\nregr.fit(X_train_std, y_train)\n\n\n# In[146]:\n\n\nregr.score(X_train_std, y_train)\n\n\n# In[147]:\n\n\nregr.coef_\n\n\n# In[148]:\n\n\nplt.figure(figsize=(16,6))\nplt.plot(X_train.index, y_train.values, label='real')\nplt.plot(X_train.index, regr.predict(X_train_std), label='predict')\nplt.grid()\nplt.legend()\n\n\n# ### 預測單日收盤價\n\n# In[149]:\n\n\nprint('2018/2/26 收盤價')\nprint('實際值', y_test.values)\nprint('預測值', regr.predict(X_test_std))\nprint('誤差百分比 =', (regr.predict(X_test_std)[0][0] - y_test.values[0][0])/y_test.values[0][0] * 100, '%')\n\n\n# ## 製作明日漲跌價差\n\n# In[150]:\n\n\n#製作明日漲跌價差\ndf_all2 = df_all.copy()\ndate_list = list(df_all2.index)\nfor i in range(len(df_all2)-1):\n df_all2.loc[date_list[i],'明日漲跌價差'] = df_all2.loc[date_list[i+1], '漲跌價差']\ndf_all2\n\n\n# In[151]:\n\n\ndf_all2 = df_all2.dropna()\nX = df_all2[['開盤價','最高價','最低價','收盤價','漲跌價差','殖利率(%)','本益比','股價淨值比']]\ny = df_all2[['明日漲跌價差']]\n\n\n# ### 切分資料\n\n# In[152]:\n\n\n#切分資料\nX_train = X[:-1]\nX_test = X[-1:]\ny_train = y[:-1]\ny_test = y[-1:]\n\n\n# ### 標準化\n\n# In[153]:\n\n\n#標準化\nsc = StandardScaler()\nsc.fit(X_train)\nX_train_std = sc.transform(X_train)\nX_test_std = sc.transform(X_test)\n\n\n# ### 訓練\n\n# In[154]:\n\n\n#訓練資料\n# linear regression 物件\nregr = linear_model.LinearRegression()\n\n# 訓練模型\nregr.fit(X_train_std, y_train)\n\n\n# In[155]:\n\n\nregr.score(X_train_std, y_train)\n\n\n# In[156]:\n\n\nplt.rcParams['axes.unicode_minus'] = False #正常顯示負號\nplt.figure(figsize=(16,6))\nplt.plot(X_train.index, y_train.values,label='real')\nplt.plot(X_train.index, regr.predict(X_train_std), label='predict')\nplt.grid()\nplt.legend()\n\n\n# ### 預測單日漲跌價差\n\n# In[157]:\n\n\nprint('2018/2/26 漲跌價差')\nprint('實際值', y_test.values)\nprint('預測值', regr.predict(X_test_std))\nprint('誤差百分比 =', (regr.predict(X_test_std)[0][0] - y_test.values[0][0])/y_test.values[0][0] * 100, '%')\n\n","sub_path":"Stock_Analysis_Result.py","file_name":"Stock_Analysis_Result.py","file_ext":"py","file_size_in_byte":13520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"316642643","text":"import discord\nfrom discord.ext import commands\n\nclass owner(commands.Cog, name=\"Utility\"):\n def __init__(self, bot):\n self.bot = bot\n\n #Reload\n\n @commands.command(hidden=True)\n @commands.is_owner()\n async def reload(self,ctx,name:str):\n try:\n self.bot.reload_extension(f\"cogs.{name}\")\n except Exception as e:\n return await ctx.send(ethrow.traceback_throw(e))\n await ctx.send(f\"✅ | Reloaded extension `{name}`\")\n\ndef setup(bot):\n bot.add_cog(owner(bot))\n print(\"owner file is loaded!\")\n","sub_path":"cogs/owner.py","file_name":"owner.py","file_ext":"py","file_size_in_byte":559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"75420539","text":"#Import packages and librarie\nimport pandas as pd\nimport numpy as np\nimport dash_daq as daq\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport dash_daq as daq\nimport plotly.graph_objects as go\nimport plotly.express as px\nimport dash_bootstrap_components as dbc\nfrom dash.dependencies import Input, Output\nimport base64\nimport os\nimport statsmodels\n\nos.chdir('/Users/shaanaucharagram/Documents/repos/plotly_dash_anime')\n\n# external JavaScript files\nexternal_scripts = [\n 'https://www.google-analytics.com/analytics.js',\n {'src': 'https://cdn.polyfill.io/v2/polyfill.min.js'},\n {\n 'src': 'https://cdnjs.cloudflare.com/ajax/libs/lodash.js/4.17.10/lodash.core.js',\n 'integrity': 'sha256-Qqd/EfdABZUcAxjOkMi8eGEivtdTkh3b65xCZL4qAQA=',\n 'crossorigin': 'anonymous'\n }\n]\n\n# external CSS stylesheets\nexternal_stylesheets = [\n 'https://codepen.io/chriddyp/pen/bWLwgP.css',\n {\n 'href': 'https://stackpath.bootstrapcdn.com/bootstrap/4.1.3/css/bootstrap.min.css',\n 'rel': 'stylesheet',\n 'integrity': 'sha384-MCw98/SFnGE8fJT3GXwEOngsV7Zt27NXFoaoApmYm81iuXoPkFOJwJ8ERdknLPMO',\n 'crossorigin': 'anonymous'\n }\n]\n\nemail_input = html.Div(\n [\n dbc.FormGroup(\n [\n dbc.Label(\"Email\"),\n dbc.Input(id=\"email-input\", type=\"email\", value=\"\"),\n dbc.FormText(\"We only accept gmail...\"),\n dbc.FormFeedback(\n \"That looks like a gmail address :-)\", valid=True\n ),\n dbc.FormFeedback(\n \"Sorry, we only accept gmail for some reason...\",\n valid=False,\n ),\n ]\n )\n ]\n)\n\nform = dbc.Form(\n [\n dbc.FormGroup(\n [\n dbc.Label(\"Email\", className=\"mr-2\"),\n dbc.Input(type=\"email\", placeholder=\"Enter email\"),\n ],\n className=\"mr-3\",\n ),\n dbc.FormGroup(\n [\n dbc.Label(\"Password\", className=\"mr-2\"),\n dbc.Input(type=\"password\", placeholder=\"Enter password\"),\n ],\n className=\"mr-3\",\n ),\n dbc.Button(\"Submit\", color=\"primary\"),\n ],\n inline=True,\n)\n\n\npath = '/Users/shaanaucharagram/Documents/repos'\nanime_df = pd.read_csv(path + \"/big_data/anime.csv\")\nrating_df = pd.read_csv(path + \"/big_data/rating.csv\")\ndf = pd.get_dummies(anime_df, columns=['type'])\ndf_anime_new = pd.merge(anime_df, df)\ndf_anime_new_corr = df_anime_new.corr()\n\nrating_df['did_rate'] = np.where(rating_df['rating']!=-1, 1, 0)\ntest_df = anime_df.merge(rating_df, on='anime_id', how='left')\ntest_df['count'] = test_df['anime_id'].map(test_df['anime_id'].value_counts())\n\ngenres = pd.DataFrame(anime_df.genre.str.split(',', expand=True).stack(), columns=['genre'])\ngenres = genres.reset_index(drop=True)\ngenre_count = pd.DataFrame(genres.groupby(by=['genre']).size(), columns=['count'])\ngenre_count = genre_count.reset_index()\n\ntop_20 = genre_count.nlargest(20, 'count')\ntop_10 = genre_count.nlargest(10, 'count')\ntop_5 = genre_count.nlargest(5, 'count')\n\n\nfigure_bar = px.bar(top_20, x='genre', y='count')\n\nfig_box = px.box(anime_df, x=\"type\", y=\"rating\")\n\n\ndf_anime_new['episodes'] = df_anime_new.episodes.fillna(0)\ndf_anime_new.episodes.replace(('Unknown'), (0), inplace=True)\ndf_anime_new['episodes'] = df_anime_new.episodes.astype(int)\ndf_anime_new['episodes']=df_anime_new['episodes'].replace(0,df_anime_new['episodes'].mean())\n\n\nfor column in ['type','rating']:\n df_anime_new[column].fillna(df_anime_new[column].mode()[0], inplace=True)\n\n\n\n\nfigure_trendlines = px.scatter(df_anime_new, x=\"episodes\", y=\"rating\",trendline=\"ols\")\n\n\nfigure_heatmap = go.Figure(data=go.Heatmap(\n z=df_anime_new_corr,\n x=df_anime_new_corr.columns,\n y=df_anime_new_corr.columns,\n colorscale='Viridis'))\n\n\nsound_filename = path + '/plotly_dash_anime/anime.mp3' # replace with your own .mp3 file\nencoded_sound = base64.b64encode(open(sound_filename, 'rb').read())\n\n\n\n\n# image_filename = path + '/data/marker.png' # replace with your own image\n# encoded_image = base64.b64encode(open(image_filename, 'rb').read())\n\napp = dash.Dash(__name__,\n external_scripts=external_scripts,\n external_stylesheets=external_stylesheets\n )\n\n#############################################div for main bar#####################################################\napp.layout = html.Div(\n className=\"content\",\n children=[\n\nhtml.Div(\n className=\"left_menu\",\n children=[\n html.Div(\n daq.Gauge(\n color={\"gradient\":True,\"ranges\":{\"yellow\":[0,5],\"orange\":[5,7],\"red\":[7,10]}},\n label=\"Default\",\n value=5\n ),\n ),\n ]\n),\n\n\nhtml.Div(\n className=\"left_menu_2\",\n children=[\n html.Div(\n daq.Gauge(\n id='my-gauge',\n color={\"gradient\":True,\"ranges\":{\"yellow\":[0,20],\"orange\":[20,40],\"red\":[40,50]}},\n label=\"Default\",\n value=15,\n max=50\n ),\n ),\n dcc.Slider(\n id='my-gauge-slider',\n min=1,\n max=50,\n step=1,\n value=15\n ),\n ]\n),\n\nhtml.Div(\n className=\"left_menu_3\",\n children=[\n html.Div(\n\n html.Button(id=\"button1\", children=\"Click me for sound\")\n\n\n\n ),\n html.Div(id=\"placeholder\", style={\"display\": \"none\"})\n ]\n),\n\n\nhtml.Div(\n className=\"right_content\",\n children=[\n html.Div(\n className=\"header\",\n children=[\n html.Div([\n html.H1(\"Anime Dashboard\",\n style={ \"font-family\": \"Helvetica\",\n \"fontSize\":90,\n \"color\":\"orange\",\n 'background-image': 'url(/assets/test1.jpeg)',\n 'justify-content': 'center',\n 'align-items': 'center',\n 'display': 'flex'})\n ]),\n ]\n ),\n\n ]\n),\n\n\n\nhtml.Div(\n className=\"top_metrics_50\",\n children=[\n html.Div(\n\n dcc.Graph(figure=figure_trendlines)\n\n ),\n ]\n),\n\nhtml.Div(\n className=\"left_side\",\n children=[\n html.Div(\n dcc.Graph(id='bar')\n ),\n ]\n),\n\n\nhtml.Div(\n className=\"left_side_2\",\n children=[\n html.Div(\n dcc.Graph(figure=figure_heatmap)\n ),\n ]\n),\n\n\n\n ])\n\n@app.callback(\n Output('bar', 'figure'),\n [Input('my-gauge-slider','value')])\ndef update_graph(slider):\n top_n_bar = genre_count.nlargest(slider, 'count')\n figure_bar = px.bar(top_n_bar, x='genre', y='count')\n\n\n return figure_bar\n# #\n@app.callback(\n dash.dependencies.Output('my-gauge', 'value'),\n [dash.dependencies.Input('my-gauge-slider', 'value')]\n)\ndef update_output(value):\n return value\n\n@app.callback(Output(\"placeholder\", \"children\"),\n [Input(\"button1\", \"n_clicks\")],\n)\ndef play(n_clicks):\n if n_clicks is None:\n n_clicks = 0\n if n_clicks != 0:\n return html.Audio(src='data:audio/mpeg;base64,{}'.format(encoded_sound.decode()),\n controls=False,\n autoPlay=True,\n )\n n_clicks = 0\n\n# @app.callback(\n# [Output(\"email-input\", \"valid\"), Output(\"email-input\", \"invalid\")],\n# [Input(\"email-input\", \"value\")],\n# )\n# def check_validity(text):\n# if text:\n# is_gmail = text.endswith(\"@gmail.com\")\n# return is_gmail, not is_gmail\n# return False, False\n\nif __name__ == '__main__':\n app.run_server(debug=True)\n\n\n\n\n\n\n# app.layout = html.Div([\n# ###############################Div for header####################################################################\n# html.Div([\n# html.Div([\n# html.H1(\"Anime Dashboard\",\n# style={ \"font-family\": \"Helvetica\",\n# \"fontSize\":90,\n# \"color\":\"orange\",})\n#\n#\n# ],\n# className='six columns',\n# style={\"display\":\"inline-block\"})\n#\n# ],\n# className=\"row header\",\n# style={'background-image': 'url(/assets/test1.jpeg)','justify-content': 'center', 'align-items': 'center','display': 'flex'}),\n#\n#\n# html.Div([\n# html.Div([\n# daq.Gauge(\n# id='my-gauge',\n# color={\"gradient\":True,\"ranges\":{\"yellow\":[0,5],\"orange\":[5,7],\"red\":[7,10]}},\n# label=\"Default\",\n# value=5\n# ),\n# dcc.Slider(\n# id = 'slider',\n# min=5,\n# max=15,\n# step=None,\n# verticalHeight=200,\n# vertical=True,\n# marks={\n# 5: 'Top 5',\n# 10: 'Top 10',\n# 15: 'Top 20',\n# },\n# value=5,\n# )\n# ], className= \"left_menu\"),\n#\n# html.Div([\n# dcc.Graph(id='bar')\n# ], className= \"left_menu\"),\n#\n# html.Div([\n# dcc.Graph(figure=fig_box)\n# ], className= \"five columns\")\n#\n# ],className='row'),\n#\n#\n# html.Div([\n# html.Div([\n# dcc.Graph(figure=figure_heatmap)\n#\n# ], className='four columns')\n#\n#\n# ], className='row')\n#\n# ])\n#\n#\n@app.callback(\n Output('bar', 'figure'),\n [Input('my-gauge-slider','value')])\ndef update_graph(slider):\n top_n_bar = genre_count.nlargest(slider, 'count')\n figure_bar = px.bar(top_n_bar, x='genre', y='count')\n\n\n return figure_bar\n# #\n@app.callback(\n dash.dependencies.Output('my-gauge', 'value'),\n [dash.dependencies.Input('my-gauge-slider', 'value')]\n)\ndef update_output(value):\n return value\n\n\n# @app.callback(\n# Output('pie', 'figure'),\n# [Input('slider','value')])\n# def update_graph(slider):\n# if slider == 5:\n# figure_bar = px.bar(top_5, x='genre', y='count')\n# elif slider == 10:\n# figure_bar = px.bar(top_10, x='genre', y='count')\n# elif slider == 15:\n# figure_bar = px.bar(top_20, x='genre', y='count')\n#\n# return figure_bar\n\n\n\n\n\n# if __name__ == '__main__':\n# app.run_server(debug=True)\n","sub_path":"anime_rec.py","file_name":"anime_rec.py","file_ext":"py","file_size_in_byte":10439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"485059049","text":"#!/usr/bin/env python3\n\n\nfile_list = ['james.txt', 'julie.txt', 'mikey.txt', 'sarah.txt']\n\ndef sanitize(time_string):\n \"\"\" Funcao que padroniza os separadores\"\"\"\n if '-' in time_string:\n splitter = '-'\n elif ':' in time_string:\n splitter = ':'\n else:\n return(time_string)\n\n (mins, secs) = time_string.split(splitter)\n return (mins + '.' + secs)\n\n\n\ndef get_coach_data(filename):\n try:\n with open(filename) as namef:\n data = namef.readline()\n result = data.strip().split(',')\n clean_name = [sanitize(each_t) for each_t in result]\n return(sorted(set(clean_name))[0:3])\n\n except IOError as ioerr:\n print('File error: ' + str(ioerr))\n return(None)\n\nfor players in file_list:\n print(get_coach_data(players))\n\n","sub_path":"chapter5/meu_jeito/couch_code_review.py","file_name":"couch_code_review.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"172790460","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Club',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(default=b'', max_length=200)),\n ('description', models.TextField()),\n ('location', models.CharField(max_length=400)),\n ('additional_info', models.TextField()),\n ('timestamp', models.DateTimeField(auto_now_add=True)),\n ('updated', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='Member',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('first_name', models.CharField(max_length=100)),\n ('last_name', models.CharField(max_length=100)),\n ],\n ),\n migrations.AddField(\n model_name='club',\n name='members',\n field=models.ManyToManyField(to='Minutes.Member'),\n ),\n migrations.AddField(\n model_name='club',\n name='owners',\n field=models.ManyToManyField(to=settings.AUTH_USER_MODEL),\n ),\n ]\n","sub_path":"src/Minutes/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"364394452","text":"# -*- coding: utf-8 -*-\n\n\n# 求 base^index%mod\n\n# (ab)%c == [(a%c)(b%c)]%c\ndef counting_mod(base, index, mod):\n # 代数进去发现成立 ,直接用,不用去推导\n base %= mod\n result = 1\n while index != 0:\n # index & 1 == index % 1\n if index & 1:\n result = result * base % mod\n index >>= 1\n base = base * base % mod\n return result\n\n\nif __name__ == '__main__':\n a, b, c = map(int, input().split())\n print(counting_mod(a, b, c))","sub_path":"recursion/counting_mod.py","file_name":"counting_mod.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"502954679","text":"import instances\nimport game\nimport helpers\nimport palette\nimport utils\n\nfrom ai import action\nfrom ai import brain\n\nfrom ai.actions import throwaction\n\nfrom entities import creature\nfrom entities.items.weapons import fist\n\n\nclass Player(creature.Creature):\n def __init__(self, char='@', position=(0, 0), fg=palette.BRIGHT_RED, bg=palette.BLACK):\n super().__init__(char, position, fg, bg)\n self.name = 'Player'\n self.brain = PlayerBrain(self)\n self._last_action_tick = 0\n self._current_tick = 999999\n self._has_taken_action = False\n self.cheer_counter = 0\n\n def update(self, time):\n super().update(time)\n\n def tick(self, tick):\n super().tick(tick)\n\n self._current_tick = tick\n\n if self._has_taken_action or self.state == 'EXITED':\n self._last_action_tick = tick\n self._has_taken_action = False\n\n def half_tick(self):\n if self.cheer_counter <= 0:\n return\n\n super().tick(self._current_tick)\n\n def on_perform_action(self, action):\n self._has_taken_action = True\n if self.cheer_counter > 0:\n self.cheer_counter -= 1\n\n def draw(self, console):\n if self.state != 'EXITED':\n super().draw(console)\n\n @property\n def idle(self):\n return self._current_tick - self._last_action_tick > 30\n\n def move(self, x, y):\n super().move(x, y)\n\n def queue_batched_move(self, moves):\n moves = [m for m in moves if m in helpers.DirectionHelper.valid_moves]\n if not moves:\n return\n\n batched_move = action.BatchedAction(self)\n\n for command in moves:\n move_action = helpers.MoveHelper.move_to_action(self, command)\n\n if move_action:\n move_action.parent = batched_move\n self.brain.add_action(move_action)\n self._has_taken_action = True\n\n self.brain.add_action(batched_move)\n\n def handle_events(self, event):\n super().handle_events(event)\n\n if self.state == \"EXITED\":\n return\n\n if event.type == 'TWITCHCHATMESSAGE':\n if event.nickname == self.name:\n commands = event.message.split(' ')\n\n if commands[0].upper() == '!MOVE' or commands[0].upper() == '!MV':\n # Moves can be either a series of moves (eg. ULDR) or a\n # players name\n moves = ''.join(commands[1:])\n if moves and moves[0] == '@':\n moves = moves[1:]\n\n players = instances.scene_root.players\n target = [p for p in players if p.name == moves.lower()]\n target = target[0] if target else None\n\n if target and target is not self:\n path = instances.scene_root.level.player_pathfinder.get_path(*self.position, *target.position)[:-1]\n moves = helpers.MoveHelper.path_to_moves(self.position, path)\n\n self.queue_batched_move(moves)\n\n elif commands[0].upper() == '!ATTACK' or commands[0].upper() == '!AT':\n moves = ''.join(commands[1:])\n moves = [helpers.DirectionHelper.get_direction(m) for m in moves if m in helpers.DirectionHelper.valid_moves]\n\n if moves:\n batched_attack = action.BatchedAction(self)\n\n for attack_dir in moves:\n #act = attackaction.AttackAction(self, direction=attack_dir)\n act = self.weapon.Action(self, direction=attack_dir)\n act.parent = batched_attack\n self.brain.add_action(act)\n\n self.brain.add_action(batched_attack)\n\n self._has_taken_action = True\n\n elif commands[0].upper() == '!DROP':\n self.drop_weapon()\n self._has_taken_action = True\n\n elif commands[0].upper() == '!THROW':\n direction = commands[1] if len(commands) > 1 else None\n direction = helpers.DirectionHelper.get_direction(direction[0])\n\n if not direction:\n return\n\n dest = utils.math.add(self.position, direction)\n\n for target_entity in instances.scene_root.get_entity_at(*dest):\n act = throwaction.ThrowAction(self, target_entity)\n if act.prerequisite():\n self.brain.add_action(act)\n break\n\n if not self.weapon.isinstance('Fist'):\n w = self.weapon\n w.remove()\n w.position = dest\n instances.scene_root.append(w)\n self.equip_weapon(fist.Fist())\n act = throwaction.ThrowAction(self, w)\n self.brain.add_action(act)\n\n elif commands[0].upper() == '!STAIRS' and game.Game.args.debug:\n stair = instances.scene_root.downward_stair\n path = instances.scene_root.level.player_pathfinder.get_path(self.x, self.y, stair.x, stair.y)\n moves = helpers.MoveHelper.path_to_moves(self.position, path)\n self.queue_batched_move(moves)\n\n elif commands[0].upper() == '!STOP':\n next_action = self.brain.actions[0] if self.brain.actions else None\n\n if next_action and next_action.isinstance('MoveAction'):\n next_action.fail()\n\n elif event.type == 'HALF-TICK':\n self.half_tick()\n\n\nclass PlayerBrain(brain.Brain):\n def perform_action(self):\n if self.actions:\n current_action = self.actions.pop(0)\n\n # Make sure our owner is the entity actually doing the action\n if current_action.performer is not self.owner:\n raise RuntimeError('Performing action not as owner!')\n\n if current_action.prerequisite():\n current_action.perform()\n self.owner.on_perform_action(current_action)\n\n else:\n current_action.fail()\n","sub_path":"entities/creatures/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":6369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"448813341","text":"import math\nimport numpy as np\n\n\nclass Math:\n @staticmethod\n def __create_point(orientation, center_x, center_y, angle, direction, is_width):\n if direction:\n x = orientation / 2 * math.cos(math.radians(angle - 90 if is_width else angle)) + center_x\n y = orientation / 2 * math.sin(math.radians(angle - 90 if is_width else angle)) + center_y\n else:\n x = center_x - orientation / 2 * math.cos(math.radians(angle - 90 if is_width else angle))\n y = center_y - orientation / 2 * math.sin(math.radians(angle - 90 if is_width else angle))\n\n return x, y\n\n @staticmethod\n def create_rectangle(center_x, center_y, width, height, angle):\n x1, y1 = Math.__create_point(width, center_x, center_y, angle, True, True)\n x2, y2 = Math.__create_point(width, center_x, center_y, angle, False, True)\n x3, y3 = Math.__create_point(height, center_x, center_y, angle, True, False)\n x4, y4 = Math.__create_point(height, center_x, center_y, angle, False, False)\n\n tlc = (x2 - center_x + x4, y2 - center_y + y4)\n trc = (x1 - center_x + x4, y1 - center_y + y4)\n brc = (x1 - center_x + x3, y1 - center_y + y3)\n blc = (x2 - center_x + x3, y2 - center_y + y3)\n\n return tlc, blc, brc, trc\n\n @staticmethod\n def aabb_in_aabb(aabb1, aabb2):\n return aabb1[0][0] < aabb2[0][0] + aabb2[3][0] - aabb2[0][0] and \\\n aabb1[0][0] + aabb1[3][0] - aabb1[0][0] > aabb2[0][0] and \\\n aabb1[0][1] < aabb2[0][1] + aabb2[1][1] - aabb2[0][1] and \\\n aabb1[0][1] + aabb1[1][1] - aabb1[0][1] > aabb2[0][1]\n\n @staticmethod\n def center_of_polygon(polygon):\n if len(polygon) > 0:\n x_values = [vertex[0] for vertex in polygon]\n y_values = [vertex[1] for vertex in polygon]\n\n return sum(x_values) / len(polygon), sum(y_values) / len(polygon)\n\n return -1, -1\n\n @staticmethod\n def stretch_polygon_by_percent(a, b, percent):\n x = (b[0] - a[0]) * percent + b[0]\n y = (b[1] - a[1]) * percent + b[1]\n\n return x, y\n\n @staticmethod\n def get_perpendicular_vector(v):\n return -v[1], v[0]\n\n @staticmethod\n def vector_norm(v):\n return math.sqrt(Math.dot(v, v))\n\n @staticmethod\n def normalize_vector(v):\n n = float(Math.vector_norm(v))\n\n if n != 0:\n return [float(v[i]) / n for i in range(len(v))]\n else:\n return [-1 for i in range(len(v))]\n\n @staticmethod\n def cross(u, v):\n return u[0] * v[1] - v[0] * u[1]\n\n @staticmethod\n def intersection(p, q, r, s):\n x = (r[0] - p[0], r[1] - p[1])\n d1 = (q[0] - p[0], q[1] - p[1])\n d2 = (s[0] - r[0], s[1] - r[1])\n\n cross1 = Math.cross(x, d2)\n cross2 = Math.cross(d1, d2)\n\n if cross2 == 0:\n return False, None\n else:\n t1 = cross1 / cross2\n\n if t1 == 0:\n return False, None\n\n return True, (p[0] + d1[0] * t1, p[1] + d1[1] * t1)\n\n @staticmethod\n def shapify(line):\n shape, shape_part_one, shape_part_two = [], [], []\n\n p = line[0]\n\n for i in range(1, len(line), 2):\n q = line[i]\n\n pq = Math.get_perpendicular_vector((q[0] - p[0], q[1] - p[1]))\n pq = Math.normalize_vector(pq)\n\n shape_part_one.append((p[0] - pq[0] * 35, p[1] - pq[1] * 35))\n shape_part_two.append((p[0] + pq[0] * 35, p[1] + pq[1] * 35))\n\n p = q\n\n for s in shape_part_one:\n shape.append(s)\n\n for s in reversed(shape_part_two):\n shape.append(s)\n\n if len(shape_part_one) > 0:\n shape.append(shape_part_one[0])\n\n if len(shape_part_two) > 0:\n shape.append(shape_part_two[0])\n\n shape_part_one.append(shape_part_one[0])\n shape_part_two.append(shape_part_two[0])\n\n return shape, shape_part_one, shape_part_two\n\n @staticmethod\n def resample_points(points, num_desired_points=64):\n I = Math.path_length(points) / (num_desired_points - 1)\n D = 0.0\n\n new_points = [points[0]]\n\n i = 1\n\n while i < len(points):\n d = Math.vector_norm((points[i - 1][0] - points[i][0], points[i - 1][1] - points[i][1]))\n\n if (D + d) >= I:\n qx = points[i - 1][0] + ((I - D) / d) * (points[i][0] - points[i - 1][0])\n qy = points[i - 1][1] + ((I - D) / d) * (points[i][1] - points[i - 1][1])\n new_points.append((qx, qy))\n points.insert(i, (qx, qy))\n\n D = 0.0\n else:\n D += d\n\n i += 1\n\n if len(new_points) == num_desired_points - 1:\n new_points.append(points[-1])\n\n return new_points\n\n @staticmethod\n def path_length(points):\n d = 0.0\n\n for i in range(1, len(points)):\n d += Math.vector_norm((points[i - 1][0] - points[i][0], points[i - 1][1] - points[i][1]))\n\n return d\n\n @staticmethod\n def rotate_palette(p, q, angle):\n ox, oy = p[0], p[1]\n px, py = q[0], q[1]\n\n math.radians(angle)\n\n qx = ox + math.cos(math.radians(angle)) * (px - ox) - math.sin(\n math.radians(angle)) * (py - oy)\n qy = oy + math.sin(math.radians(angle)) * (px - ox) + math.cos(\n math.radians(angle)) * (py - oy)\n\n return qx, qy\n\n @staticmethod\n def rotate(p, q, angle):\n ox, oy = p[0], p[1]\n px, py = q[0], q[1]\n\n math.radians(angle + 90) # ?\n\n qx = ox + math.cos(math.radians(angle)) * (px - ox) - math.sin(math.radians(angle)) * (py - oy)\n qy = oy + math.sin(math.radians(angle)) * (px - ox) + math.cos(math.radians(angle)) * (py - oy)\n\n return qx, qy\n\n @staticmethod\n def sufficient_shape_area(shape, threshold=15000):\n area = 0.0\n\n for i in range(len(shape)):\n j = (i + 1) % len(shape)\n area += shape[i][0] * shape[j][1]\n area -= shape[j][0] * shape[i][1]\n\n area = abs(area) / 2.0\n\n return threshold < area\n\n @staticmethod\n def polygon_aabb(polygon):\n if len(polygon) > 0:\n minx, miny = float(\"inf\"), float(\"inf\")\n maxx, maxy = float(\"-inf\"), float(\"-inf\")\n\n for x, y in polygon:\n if x < minx:\n minx = x\n if y < miny:\n miny = y\n if x > maxx:\n maxx = x\n elif y > maxy:\n maxy = y\n\n return [(minx, miny), (minx, maxy), (maxx, maxy), (maxx, miny)]\n\n return None\n\n @staticmethod\n def get_arrow_tip_part(p, q, angle, part_stretch_factor):\n pr = Math.rotate(q, p, angle)\n qr = (p[0] - pr[0], p[1] - pr[1])\n\n Math.normalize_vector(qr)\n\n qr = (qr[0] * -part_stretch_factor + p[0], qr[1] * -part_stretch_factor + p[1])\n\n return [qr, q]\n\n @staticmethod\n def get_arrow_tip(p, q, angle, part_stretch_factor):\n return Math.get_arrow_tip_part(p, q, angle, part_stretch_factor), Math.get_arrow_tip_part(p, q, 360 - angle, part_stretch_factor)\n\n @staticmethod\n def to_movement_arrow(p, is_change_in_x, is_change_positive):\n A = 10\n B = 20\n C = 55\n\n p11 = (p[0] + (0 if is_change_in_x else -A), p[1] + (-A if is_change_in_x else 0))\n p21 = (p[0] + (0 if is_change_in_x else A), p[1] + (A if is_change_in_x else 0))\n p12 = (p11[0] + ((B if is_change_positive else -B) if is_change_in_x else 0), p11[1] + ((B if is_change_positive else -B) if not is_change_in_x else 0))\n p22 = (p21[0] + ((B if is_change_positive else -B) if is_change_in_x else 0), p21[1] + ((B if is_change_positive else -B) if not is_change_in_x else 0))\n p3 = (p[0] + ((C if is_change_positive else -C) if is_change_in_x else 0), p[1] + ((C if is_change_positive else -C) if not is_change_in_x else 0))\n\n arrow_tip1, arrow_tip2 = Math.get_arrow_tip(p, p3, 30, 0.75)\n\n return {'upper': [p11, p12], 'lower': [p21, p22], 'angle_one': arrow_tip1, 'angle_two': arrow_tip2}\n\n @staticmethod\n def movement_indication(aabb):\n p, q, r, s = aabb\n\n return [\n Math.to_movement_arrow((p[0] - 10, p[1] + ((q[1] - p[1]) / 2)), True, False),\n Math.to_movement_arrow((q[0] + ((r[0] - q[0]) / 2), q[1] + 10), False, True),\n Math.to_movement_arrow((r[0] + 10, s[1] + ((r[1] - s[1]) / 2)), True, True),\n Math.to_movement_arrow((p[0] + ((s[0] - p[0]) / 2), p[1] - 10), False, False)\n ]\n\n @staticmethod\n def dot(u, v):\n return sum((a * b) for a, b in zip(u, v))\n\n @staticmethod\n def angle_degrees(u, v):\n u_u = Math.normalize_vector(u)\n v_u = Math.normalize_vector(v)\n\n return np.arccos(np.clip(np.dot(u_u, v_u), -1.0, 1.0))\n\n @staticmethod\n def compute_circle(x, y, r):\n temp1 = []\n temp2 = []\n\n for x_ in range(-r, r):\n y_ = int(math.sqrt(int(r * r) - x_ * x_) + 0.5)\n\n temp1.append((x + x_, y + y_))\n temp2.append((x + x_, y - y_))\n\n return temp1 + list(reversed(temp2))\n\n @staticmethod\n def curve_median_point(curve):\n if len(curve) % 2 == 1:\n return curve[len(curve) // 2]\n else:\n a = (curve[len(curve) // 2 - 1][0], curve[len(curve) // 2 - 1][1])\n b = (curve[len(curve) // 2 + 1][0], curve[len(curve) // 2 + 1][1])\n\n return ((a[0] + b[0]) / 2.0, (a[1] + b[1]) / 2.0)\n\n @staticmethod\n def palette_circle_extension(curve, parent):\n circle_center = parent.center\n m = Math.curve_median_point(curve)\n v = (m[0] - circle_center[0], m[1] - circle_center[1])\n\n d = Math.vector_norm(v) * 1.5\n vn = Math.normalize_vector(v)\n\n p = (vn[0] * d + circle_center[0], vn[1] * d + circle_center[1])\n\n temp = []\n\n for r in parent.roi:\n if r == m:\n temp.append(p)\n else:\n v = (r[0] - circle_center[0], r[1] - circle_center[1])\n\n d = Math.vector_norm(v) * 1.5\n vn = Math.normalize_vector(v)\n q = (vn[0] * d + circle_center[0], vn[1] * d + circle_center[1])\n\n temp.append(q)\n\n return m, temp\n","sub_path":"python/smath/smath.py","file_name":"smath.py","file_ext":"py","file_size_in_byte":10415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"546787193","text":"\n#============================================================================\n# This bot periodically fetches tweets in which you are mentioned. If the \n# tweet is not a reply to another tweet, and it contains the words \"help\" \n# or \"support\", then the tweet author will be followed, and the tweet will \n# be replied to with another tweet saying \"Please reach us via DM\". \n# 04/01/2021\n#============================================================================\n\nimport tweepy\nimport logging\nfrom config import create_api\nimport time\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger()\n\ndef check_mentions(api, keywords, since_id):\n logger.info(\"Retrieving mentions\")\n new_since_id = since_id\n for tweet in tweepy.Cursor(api.mentions_timeline,\n since_id=since_id).items():\n new_since_id = max(tweet.id, new_since_id)\n if tweet.in_reply_to_status_id is not None:\n continue\n if any(keyword in tweet.text.lower() for keyword in keywords):\n logger.info(f\"Answering to {tweet.user.name}\")\n\n if not tweet.user.following:\n tweet.user.follow()\n\n api.update_status(\n status=\"Please reach us via DM\",\n in_reply_to_status_id=tweet.id,\n )\n return new_since_id\n\ndef main():\n api = create_api()\n since_id = 1\n while True:\n since_id = check_mentions(api, [\"help\", \"support\"], since_id)\n logger.info(\"Waiting...\")\n time.sleep(60)\n\nif __name__ == \"__main__\":\n main()\n\n#______________________________________________________________________________\n# To run a bot.\n\n# In your directory of the bot, activate python and this file.\n# You can stop the bot using Ctrl-C.\n","sub_path":"bots/autoreply.py","file_name":"autoreply.py","file_ext":"py","file_size_in_byte":1830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"520418421","text":"# assuming that these are passed in\nwords_and_counts = sorted([('hello', 1), ('my', 2), ('name', 1), ('is', 3), ('coleman', 1)], key=lambda t: t[0])\nonly_words = list(map(lambda t: t[0], words_and_counts))\n\n\n# takes the dictionary, the word counts, and a vector \n# which is built up in the function calls\ndef create_vector(content, dictionary, vect):\n # base case\n if len(dictionary) == 0:\n return vect\n\n # need to fill in zeros\n if len(content) == 0:\n return vect + list(map(lambda t: (t[0], 0), dictionary))\n\n # get the first two tuples and check equality\n curr = content[0]\n curr_dict = dictionary[0]\n if curr[0] == curr_dict[0]:\n return create_vector(content[1:], dictionary[1:], [curr] + vect)\n\n # not equal. fill in a zero and continue on\n return create_vector(content, dictionary[1:], [(curr_dict[0], 0)] + vect)\n\n\ndef vectorize(uid, word_counts):\n # needed for the create vector function to work\n word_counts.sort(key=lambda a: a[0])\n\n # get rid of anything not in the dictionary\n only_dicts = filter(lambda word_tup: \\\n word_tup[0] in only_words,\n word_counts)\n\n # create and return the vector\n vect = create_vector(list(only_dicts), words_and_counts, [])\n return sorted(vect, key=lambda t: t[0])\n\n\nif __name__ == \"__main__\":\n print(vectorize(\"test\", [('hello', 10), ('james', 5), ('gibson', 3), ('my', 5)]))\n","sub_path":"hadoop-project/vectorize.py","file_name":"vectorize.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"499067854","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2020/1/14 14:46\n# @Author : 马飞\n# @File : datax_sync.py.py\n# @Software: PyCharm\n\nimport json\nimport urllib.parse\nimport urllib.request\nimport ssl\nimport os,happybase\nimport warnings\nimport sys\nimport datetime\nimport smtplib\nfrom email.mime.text import MIMEText\nimport traceback\nimport pymysql\n\ndef get_ds_hbase(ip,port):\n conn = happybase.Connection(host=ip,\n port=int(port),\n timeout=3600000,\n autoconnect=True,\n table_prefix=None,\n table_prefix_separator=b'_',\n compat='0.98',\n transport='buffered',\n protocol='binary')\n conn.open()\n return conn\n\ndef get_hbase_tab_rows(db,tab):\n table = db.table(tab)\n i_counter =0\n for key, data in table.scan():\n i_counter=i_counter+1\n if i_counter>=1:\n break\n return i_counter\n\ndef send_mail25(p_from_user,p_from_pass,p_to_user,p_title,p_content):\n to_user=p_to_user.split(\",\")\n try:\n msg = MIMEText(p_content,'html','utf-8')\n msg[\"Subject\"] = p_title\n msg[\"From\"] = p_from_user\n msg[\"To\"] = \",\".join(to_user)\n server = smtplib.SMTP(\"smtp.exmail.qq.com\", 25)\n server.set_debuglevel(0)\n server.login(p_from_user, p_from_pass)\n server.sendmail(p_from_user, to_user, msg.as_string())\n server.quit()\n except smtplib.SMTPException as e:\n print(e)\n\ndef exception_interface(v_title,v_content):\n v_templete = '''\n \n \n \n \n \n $$TABLE$$ \n \n \n '''\n v_templete = v_templete.replace('$$TABLE$$',v_content)\n send_mail25('190343@lifeat.cn','Hhc5HBtAuYTPGHQ8','190343@lifeat.cn', v_title,v_templete)\n\ndef aes_decrypt(p_password,p_key):\n values = {\n 'password': p_password,\n 'key':p_key\n }\n url = 'http://$$API_SERVER$$/read_db_decrypt'\n context = ssl._create_unverified_context()\n data = urllib.parse.urlencode(values).encode(encoding='UTF-8')\n req = urllib.request.Request(url, data=data)\n res = urllib.request.urlopen(req, context=context)\n res = json.loads(res.read())\n if res['code'] == 200:\n print('接口read_db_decrypt 调用成功!')\n config = res['msg']\n return config\n else:\n print('接口read_db_decrypt 调用失败!,{0}'.format(res['msg']))\n sys.exit(0)\n\ndef get_ds_mysql(ip,port,service ,user,password):\n conn = pymysql.connect(host=ip, port=int(port), user=user, passwd=password, db=service, charset='utf8')\n return conn\n\ndef get_config(tag):\n try:\n values = {\n 'tag': tag\n }\n print('values=', values)\n url = 'http://$$API_SERVER$$/read_datax_config_sync'\n context = ssl._create_unverified_context()\n data = urllib.parse.urlencode(values).encode(encoding='UTF-8')\n print('data=', data)\n req = urllib.request.Request(url, data=data)\n res = urllib.request.urlopen(req, context=context)\n res = json.loads(res.read())\n print(res, res['code'])\n if res['code'] == 200:\n print('read_datax_config_sync:接口调用成功!')\n print(res['msg'])\n config = res['msg']\n config['db_mysql_sour_ip'] = config['sync_db_sour'].split(':')[0]\n config['db_mysql_sour_port'] = config['sync_db_sour'].split(':')[1]\n config['db_mysql_sour_service'] = config['sync_db_sour'].split(':')[2]\n config['db_mysql_sour_user'] = config['sync_db_sour'].split(':')[3]\n config['db_mysql_sour_pass'] = aes_decrypt(config['sync_db_sour'].split(':')[4], config['db_mysql_sour_user'])\n config['db_mysql_sour_string'] = config['db_mysql_sour_ip'] + ':' + config['db_mysql_sour_port'] + '/' + config['db_mysql_sour_service']\n config['db_mysql_sour'] = get_ds_mysql(config['db_mysql_sour_ip'],\n config['db_mysql_sour_port'],\n config['db_mysql_sour_service'],\n config['db_mysql_sour_user'],\n config['db_mysql_sour_pass'])\n\n return config\n else:\n print('dataX接口调用失败!,{0}'.format(res['msg'])) # 发异常邮件\n v_title = 'dataX数据同步接口异常[★]'\n v_content = '''\n \n \n \n
接口地址$$interface$$
接口参数$$parameter$$
错误信息$$error$$
'''\n v_content = v_content.replace('$$interface$$', url)\n v_content = v_content.replace('$$parameter$$', json.dumps(values))\n v_content = v_content.replace('$$error$$', res['msg'])\n if res['code'] != -3:\n exception_interface(v_title, v_content)\n sys.exit(0)\n else:\n print(res['msg'])\n sys.exit(0)\n except Exception as e :\n v_title = 'dataX数据同步接口异常[★★]'\n v_content = '''\n \n \n \n
接口地址$$interface$$
接口参数$$parameter$$
错误信息$$error$$
'''\n v_content = v_content.replace('$$interface$$', url)\n v_content = v_content.replace('$$parameter$$', json.dumps(values))\n v_content = v_content.replace('$$error$$', traceback.format_exc())\n exception_interface(v_title, v_content)\n print(traceback.format_exc())\n sys.exit(0)\n\ndef get_templete(id):\n try:\n values = {\n 'id': id\n }\n print('values=', values)\n url = 'http://$$API_SERVER$$/read_datax_templete'\n context = ssl._create_unverified_context()\n data = urllib.parse.urlencode(values).encode(encoding='UTF-8')\n print('data=', data)\n req = urllib.request.Request(url, data=data)\n res = urllib.request.urlopen(req, context=context)\n res = json.loads(res.read())\n print(res, res['code'])\n if res['code'] == 200:\n print('read_datax_templete:接口调用成功!')\n print(res['msg'])\n config = res['msg']\n return config\n else:\n print('read_datax_templete:接口调用失败:'+res['msg'])\n v_title = 'dataX数据同步接口异常[★]'\n v_content = '''\n \n \n \n
接口地址$$interface$$
接口参数$$parameter$$
错误信息$$error$$
'''\n v_content = v_content.replace('$$interface$$', url)\n v_content = v_content.replace('$$parameter$$', json.dumps(values))\n v_content = v_content.replace('$$error$$', res['msg'])\n if res['code'] != -3:\n exception_interface(v_title, v_content)\n sys.exit(0)\n else:\n print(res['msg'])\n sys.exit(0)\n\n except Exception as e :\n v_title = 'dataX数据同步接口异常[★★]'\n v_content = '''\n \n \n \n
接口地址$$interface$$
接口参数$$parameter$$
错误信息$$error$$
'''\n v_content = v_content.replace('$$interface$$', url)\n v_content = v_content.replace('$$parameter$$', json.dumps(values))\n v_content = v_content.replace('$$error$$', traceback.format_exc())\n exception_interface(v_title, v_content)\n print(traceback.format_exc())\n sys.exit(0)\n\ndef get_time():\n return datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef get_file_contents(filename):\n file_handle = open(filename, 'r')\n line = file_handle.readline()\n lines = ''\n while line:\n lines = lines + line\n line = file_handle.readline()\n lines = lines + line\n file_handle.close()\n return lines\n\ndef write_datax_sync_log(config):\n v_tag = {\n 'sync_tag' : config['sync_tag'],\n 'create_date' : get_time(),\n 'table_name' : config['table_name'],\n 'duration' : config['sync_duration'],\n 'amount' : config['sync_amount']\n }\n v_msg = json.dumps(v_tag)\n values = {\n 'tag': v_msg\n }\n url = 'http://$$API_SERVER$$/write_datax_sync_log'\n context = ssl._create_unverified_context()\n data = urllib.parse.urlencode(values).encode(encoding='UTF-8')\n req = urllib.request.Request(url, data=data)\n res = urllib.request.urlopen(req, context=context)\n res = json.loads(res.read())\n print(res)\n print(res['code'])\n if res['code'] == 200:\n print('Interface write_datax_sync_log call successful!')\n else:\n print('Interface write_datax_sync_log call failed!')\n\ndef print_dict(config):\n print('-'.ljust(85,'-'))\n print(' '.ljust(3,' ')+\"name\".ljust(20,' ')+'value')\n print('-'.ljust(85,'-'))\n for key in config:\n print(' '.ljust(3,' ')+key.ljust(20,' ')+'=',config[key])\n print('-'.ljust(85,'-'))\n\ndef get_seconds(b):\n a=datetime.datetime.now()\n return int((a-b).total_seconds())\n\ndef get_sync_table_rows(config,hbase_rows):\n db = config['db_mysql_sour']\n cr = db.cursor()\n tab = config['sync_table']\n where = config['sync_incr_where']\n sql = ''\n if where is None or where =='':\n sql = \"select count(0) from {0}\".format(tab)\n else:\n if hbase_rows== 0 :\n sql = \"select count(0) from {0}\".format(tab)\n else:\n sql = \"select count(0) from {0} where {1}\".format(tab, where)\n cr.execute(sql)\n rs=cr.fetchone()\n cr.close()\n print('get_sync_table_rows=', sql,rs[0])\n return rs[0]\n\ndef main():\n sync_tag=''\n warnings.filterwarnings(\"ignore\")\n for p in range(len(sys.argv)):\n if sys.argv[p] == \"-tag\":\n sync_tag = sys.argv[p + 1]\n\n config = get_config(sync_tag)\n print_dict(config)\n\n print('config=',config)\n thrift_host = config['hbase_thrift'].split(':')[0]\n thrift_port = int(config['hbase_thrift'].split(':')[1])\n hbase_table = config['sync_hbase_table']\n datax_home = config['datax_home']\n datax_script = config['script_path']\n datax_incr = config['sync_incr_col']\n sync_id = config['id']\n db = get_ds_hbase(thrift_host,thrift_port)\n hbase_rows = get_hbase_tab_rows(db,hbase_table)\n\n v_full_json = '{0}/{1}_full.json'.format(datax_script,sync_tag)\n v_incr_json = '{0}/{1}_incr.json'.format(datax_script,sync_tag)\n\n v_full_scp = '{0}/bin/datax.py {1}/{2}'.format(datax_home, datax_script, sync_tag + '_full.json')\n v_incr_scp = '{0}/bin/datax.py {1}/{2}'.format(datax_home, datax_script, sync_tag + '_incr.json')\n\n v_templete = get_templete(sync_id)\n start_time = datetime.datetime.now()\n\n print('full_templete=',v_templete['full'])\n print('incr_templete=',v_templete['incr'])\n\n #替换模板操作\n with open(v_full_json, 'w') as obj_file:\n obj_file.write(v_templete['full'])\n\n with open(v_incr_json, 'w') as obj_file:\n obj_file.write(v_templete['incr'])\n\n #替换^M字符\n os.system('{0}/repstr.sh {1}'.format(datax_script,v_full_json))\n os.system('{0}/repstr.sh {1}'.format(datax_script,v_incr_json))\n\n if hbase_rows == 0:\n print(v_full_scp)\n os.system(v_full_scp)\n else:\n if datax_incr is not None or datax_incr != '':\n print(v_incr_scp)\n os.system(v_incr_scp)\n else:\n print(v_full_scp)\n os.system(v_full_scp)\n\n config['table_name'] = config['sync_table']\n config['sync_duration'] = str(get_seconds(start_time))\n config['sync_amount'] = str(get_sync_table_rows(config,hbase_rows))\n write_datax_sync_log(config)\n print('hbase_table=', hbase_table)\n print('hbase_rows=', hbase_rows)\n\n\nif __name__ == \"__main__\":\n main()\n\n","sub_path":"templete/datax/datax_sync.py","file_name":"datax_sync.py","file_ext":"py","file_size_in_byte":14256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"65710867","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n# Author: xq17\n\nimport requests, pysnooper, re, time, json, sys\nfrom lxml import etree\nfrom urllib.parse import quote, unquote\nfrom argparse import ArgumentParser, RawTextHelpFormatter\n\n#### 采用类的写法方便后期导入调用\n\nconfig = {\n \"timeout\": 5\n}\n\nheaders = {\n \"User-Agent\": \"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36\",\n \"Orgin\": \"https://dis.tianyancha.com\",\n \"Referer\": \"https://www.tianyancha.com/login?from=https%3A%2F%2Fwww.tianyancha.com%2Fsearch%3Fkey%3Dbaidu\"\n}\n\n# 创建一个工具类\nclass Utils:\n def percent_to_int(self, string):\n try:\n if \"%\" in string:\n newint = float(string.strip(\"%\"))\n return newint\n except:\n pass\n return 0.0\n\n def parse_cid(self, cid):\n if type(cid) == int:\n return cid\n if cid.startswith(\"http\"):\n return int(re.search(\"company/(\\d+)\", cid).group(1))\n\n#创建个公司类,默认都是public可以直接调用,orz,快忘了\nclass Company:\n def __init__(self, cid):\n # 公司名字\n self.name = \"\"\n # 公司手机\n self.tel = \"\"\n # 公司域名\n self.website = \"\"\n # 公司邮箱\n self.email = \"\"\n # 公司标识\n self.cid = self.parse_cid(cid)\n # 公司股权结构\n self.equity = \"\"\n\n def __str__(self):\n return f\"{self.name}, {self.tel}, {self.website}, {self.email}, {self.cid}, {self.equity}\"\n\n\n def toArray(self):\n return [self.name, self.tel, self.website, self.email, self.cid, self.equity]\n\n def toDict(self):\n return {\n \"cid\": self.cid,\n \"name\": self.name,\n \"tel\": self.tel,\n \"website\": self.website,\n \"email\": self.email,\n \"equity\": self.equity\n }\n\n def get_equity(self):\n return self.equity\n\n def set_equity(self, data):\n self.equity = data\n\n def parse_cid(self, cid):\n if type(cid) == int:\n return cid\n if cid.startswith(\"http\"):\n return int(re.search(\"company/(\\d+)\", cid).group(1))\n\n\n# 创建一个核心Hacker操作类\nclass Hacker:\n def __init__(self, query=\"\", pages=\"\", offset=0, cookies=\"\"):\n self.query = quote(unquote(query))\n self.offset = offset\n self.pages = pages\n self.cookies = self.parse_cookie(cookies)\n self.preHTML = self.prepare()\n self.total = self.get_total()\n self.utils = Utils()\n\n def __str__(self):\n return f\"{self.query}, {self.offset}, {self.pages}, {self.cookie}, {self.self.total}\"\n\n def toDict(self):\n hacker_dict = {\n \"query\": self.query,\n \"pages\": self.pages,\n \"cookie\":self.cookie,\n \"preHTML\": self.preHTML,\n \"total\": self.total\n }\n return hacker_dict\n\n def prepare(self):\n if self.query:\n url = f\"https://www.tianyancha.com/search/p1?key={self.query}\"\n try:\n r = requests.get(url, headers=headers, cookies=self.cookies, timeout=config[\"timeout\"], allow_redirects=False)\n # print(r.text)\n html = etree.HTML(r.text)\n return html\n except Exception as e:\n print(\"[-] Hacker>prepare Failed!\")\n return \"\"\n\n def parse_cookie(self, cookies):\n if type(cookies) == str and cookies != \"\":\n cookies = { item.split('=')[0]:item.split('=')[1] for item in cookies.split(\"; \")}\n return cookies\n else:\n return {}\n\n def get_total(self):\n if self.query:\n page = self.preHTML.xpath('//*[@id=\"customize\"]/div/@onclick')\n if page:\n try:\n total = re.search(\", (\\d+?)\\)\", str(page[0])).group(1)\n # print(total)\n return int(total)\n except Exception as e:\n print(\"[-] get_total Exception!\")\n else:\n print(\"[-] get_total Failed!\")\n return 0\n\n\n def search(self):\n if self.pages == None:\n if self.cookies:\n self.pages = self.total\n else:\n if self.total < 5:\n self.pages = self.total\n else:\n self.pages = 5\n if self.offset > self.pages:\n print(\"[-] search Failed! offset > page Error\")\n exit(0)\n\n print(f\"[+] total page:{self.total}\")\n print(f\"[+] Now searching {unquote(self.query)} keyword from {self.offset} to {self.pages}\")\n company_ids = []\n for page in range(self.offset, self.pages+1):\n company_ids += self.get_id(page)\n\n print(f\"[+] Success explored {len(company_ids)} relevant companies!\")\n company_infos = [self.get_info(cid) for cid in company_ids]\n # 打印基础信息\n for obj in company_infos:\n # print(obj.name)\n print(obj.toArray())\n print(f\"now retrying {obj.name}'s equity...\")\n obj.set_equity(self.get_equity(obj.cid))\n return company_infos\n\n def get_id(self,page):\n url = f\"https://www.tianyancha.com/search/p{page}?key={self.query}\"\n try:\n res = requests.get(url, headers=headers, cookies=self.cookies, timeout=config[\"timeout\"], allow_redirects=False)\n html = etree.HTML(res.text)\n hrefs = html.xpath('//div[@class=\"search-result-single \"]//a[contains(@class,\"name\")]/@href')\n # href_id = [re.search(\"company/(\\d+)\", url).group(1) for url in href]\n return list(set(hrefs))\n except Exception as e:\n print(\"[-] get_id Failed!\")\n # exit(0)\n return []\n\n # @pysnooper.snoop()\n def get_info(self, cid):\n cid = self.utils.parse_cid(cid)\n url = f\"https://www.tianyancha.com/company/{cid}\"\n company = Company(cid)\n try:\n res = requests.get(url, headers=headers, cookies=self.cookies, timeout=config[\"timeout\"])\n # print(res.text)\n html = etree.HTML(res.text)\n name = html.xpath('//div[@class=\"content\"]//h1[@class=\"name\"]/text()')\n tel = html.xpath('//div[@class=\"detail \"]//div[@class=\"f0\"]/div[1]/span[2]/text()')\n email = html.xpath('//div[@class=\"detail \"]//div[@class=\"f0\"]/div[2]/span[2]/text()')\n website = html.xpath('//*[@id=\"company_web_top\"]/div[2]/div[3]/div[3]/div[2]/div[1]/a/@href')\n company.website = \"\" if len(website) == 0 else website[0]\n company.email = \"\" if len(email) == 0 else email[0]\n company.tel = \"\" if len(tel) == 0 else tel[0]\n company.name = \"\" if len(name) == 0 else name[0]\n # print(company.name)\n except Exception as e:\n print(f\"[-] get_info Fail! \\n{e}\")\n return company\n\n\n # @pysnooper.snoop()\n def get_equity(self, cid=\"\", ratio=0):\n # 获取cloud_token\n url = f\"https://capi.tianyancha.com/cloud-equity-provider/v4/qq/name.json?id={cid}?random={int(time.time())}\"\n self.cookies[\"CT_TYCID\"] = str(cid)\n cloud_token = \"\"\n try:\n res = requests.get(url, headers=headers, cookies=self.cookies, timeout=config[\"timeout\"], allow_redirects=False)\n # print(res.status_code)\n res_data = json.loads(res.text)\n # print(res_data)\n chars = res_data['data']['v']\n fnStr = \"\".join([chr(int(x)) for x in chars.split(',')])\n cloud_token = re.search(\"cloud_token=([a-z0-9A-Z]+)?;\", fnStr).group(1)\n except Exception as e:\n print(f\"[-] get_equity Failed! \\n{e}\")\n\n _url = f\"https://capi.tianyancha.com/cloud-equity-provider/v4/equity/indexnode.json?id={cid}\"\n self.cookies[\"cloud_token\"] = cloud_token\n try:\n res_data = requests.get(_url, headers=headers, cookies=self.cookies, timeout=config[\"timeout\"]).text\n json_data = json.loads(res_data)[\"data\"][\"investorList\"]\n # print(type(json_data))\n json_res = []\n for k in json_data:\n # print(k[\"percent\"])\n if self.utils.percent_to_int(k[\"percent\"]) >= ratio:\n print(\"name:\" + k[\"name\"] + \"-equity:\" + str(k[\"percent\"]) + \"-id:\" + str(k[\"id\"]))\n json_res.append(k)\n # 分隔目标\n print(\"\\n\")\n return json_res\n except Exception as e:\n print(f\"[-] get_equity Failed! \\n{e}\")\n\n\ndef check_args(args):\n if not args.cookies:\n print(\"[-] Missing your cookies, the frequent search may be detected!\")\n\ndef parse_args():\n parser = ArgumentParser()\n parser.add_argument(\"-key\", type=str, help=\"search query\")\n parser.add_argument(\"-t\", dest=\"target\" , type=int, help=\"company's identity,like 22822(baidu.com)\")\n parser.add_argument(\"--percent\",dest=\"percent\", type=int, default=0, help='specify percent equity,degfault 0')\n parser.add_argument(\"--json\", dest='json', type=str, help='output reult to json type')\n parser.add_argument(\"-m\", \"--mode\", dest=\"mode\", type=int, default=1, help='1: info 2.equity 3.all')\n parser.add_argument(\"-o\", \"--offset\", dest='offset', type=int, default=0, help='page offset to start from')\n parser.add_argument(\"-p\", \"--pages\", dest='pages', type=int, help='specify multiple pages')\n parser.add_argument(\"-c\", \"--cookies\", dest='cookies', type=str, default={}, help='specify your cookies')\n if len(sys.argv) == 1:\n sys.argv.append(\"-h\")\n args = parser.parse_args()\n check_args(args)\n return args\n\ndef start_work(args):\n print(args)\n ratio = args.percent\n cookies = args.cookies\n offset = args.offset\n pages = args.pages\n keyword = args.key\n json_file = args.json\n\n if not args.target and args.key:\n # 简单搜索查询\n hacker = Hacker(keyword, offset=offset, pages=pages, cookies=cookies)\n # 这个是返回的数据,如果想保存成其他格式可以在这里开始修改\n data = [ obj.toDict() for obj in hacker.search()]\n if json_file:\n json_data = json.dumps(data)\n with open(json_file, \"w\") as f:\n f.write(json_data)\n\n elif args.target:\n cid = args.target\n # 针对特定目标查询\n if args.mode == 1:\n # 只查询基本信息\n print(Hacker(cookies=cookies).get_info(cid=cid))\n elif args.mode == 2:\n Hacker(cookies=cookies).get_equity(cid=cid, ratio=ratio)\n elif args.mode == 3:\n print(Hacker(cookies=cookies).get_info(cid=cid))\n Hacker(cookies=cookies).get_equity(cid=cid, ratio=ratio)\n\ndef main():\n args = parse_args()\n start_work(args)\n # hacker = Hacker(\"百度\", offset=0, pages=0, cookies=cookies)\n # hacker.search()\n # print(Hacker(cookies=cookies).get_equity(cid=22822, ratio=50))\n # print(Hacker(cookies=cookies).get_info(cid=22822))\n\nif __name__ == '__main__':\n main()","sub_path":"EquityInfoer.py","file_name":"EquityInfoer.py","file_ext":"py","file_size_in_byte":11210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"16581678","text":"'''\nBinary Search Tree\n'''\nclass BST_Node:\n def __init__(self, data):\n\n self.val = data\n self.left = None\n self.right = None\n\n\nclass BST_Ops:\n\n def construct_tree(self, data, root):\n if root is None:\n root = BST_Node(data)\n elif root.val > data:\n if root.left is not None:\n self.construct_tree(data, root.left)\n else:\n root.left = BST_Node(data)\n else:\n if root.right is not None:\n self.construct_tree(data, root.right)\n else:\n root.right = BST_Node(data)\n return root\n\n def print_bst_inorder(self, root):\n if root is None:\n return\n\n self.print_bst_inorder(root.left)\n print(root.val)\n self.print_bst_inorder(root.right)\n\n def print_bst_preorder(self, root):\n if root is None:\n return\n print(root.val)\n self.print_bst_inorder(root.left)\n self.print_bst_inorder(root.right)\n\n def print_bst_postorder(self, root):\n if root == None:\n return\n print(root.val)\n self.print_bst_inorder(root.right)\n self.print_bst_inorder(root.left)\n\n kthelement = 0\n\n def kth_smallest(self, root, k, nCount):\n if root is None:\n return\n\n self.kth_smallest(root.left, k, nCount)\n self.kthelement += 1\n nCount[0] += 1\n if self.kthelement == k:\n print(\"kth::\"+str(root.val)+\" \"+str(nCount[0]))\n return\n self.kth_smallest(root.right, k, nCount)\nbs = BST_Ops()\nroot = None\nfor each in [8, 4, 7, 3, 2, 5, 1]:\n root = bs.construct_tree(each, root)\n\nprint(\"inorder\")\nbs.print_bst_inorder(root)\nprint(\"preorder\")\nbs.print_bst_preorder(root)\nprint(\"postorder\")\nbs.print_bst_postorder(root)\n\nprint(\"kth smallest\")\nnCount = [0]*1\nbs.kth_smallest(root, 1, nCount)\nprint(nCount[0])\nbs.kthelement=0\nbs.kth_smallest(root, 4, nCount)\n","sub_path":"randomprobs/BST.py","file_name":"BST.py","file_ext":"py","file_size_in_byte":1980,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"359409465","text":"\"\"\"\nUser Story 1\n\nThe Jim Morton business is a super-express coffee shop that only sells one size of coffee, for a single base price of $1.25 per cup. They've asked you to write a program to help them streamline their ordering system, to provide a more efficient customer experience.\n\nThey've requested a simple, user-friendly program that will ask customers how many cups of coffee they wish to order. Once the customer has entered a number, the program will display a confirmation of their number, then calculate and display the following:\n\nThe pre-tax cost of the order, based on how many cups the user ordered.\nThe amount of tax that will be applied to the order\nThe total amount the customer must pay.\nThe calculated cost should display as a standard dollar amount, ie. with proper currency formatting.\n\"\"\"\n\n__AUTHOR__ = \"Cristina Ponay \"\n\ndef main():\n while True:\n coffeeCup = input(\"How many cups of coffee would you like to order? \")\n if not coffeeCup.isdigit() or not coffeeCup.isnumeric():\n print(\"***Please enter a number!***\")\n else:\n break\n \n # computation\n netTotal = float(coffeeCup) * 1.25 #price/cup: $1.25\n hst = netTotal * 0.15 # 15% hst\n total = netTotal + hst\n\n ## display to user\n print(\"\\nHello! You ordered \" + coffeeCup + \" cups of coffee.\")\n print(\"Net total:\\t $\" + str(netTotal))\n print(\"HST 15%:\\t \" + str(hst))\n print(\"TOTAL:\\t\\t $\" + str(total))\n\nif __name__ == \"__main__\":\n main()","sub_path":"Notes/Exercises/UserStory1/JimMortons.py","file_name":"JimMortons.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"16509496","text":"import sys\n\nsys.setrecursionlimit(100)\n\n\ndef regex_engine(regex_p, string_p) -> bool:\n result = None\n\n if len(regex_p) == 0:\n result = True\n else:\n if len(string_p) == 0:\n result = False\n else:\n if regex_p[0] == '.' or (regex_p[0] == string_p[0]):\n result = regex_engine(regex_p[1:], string_p[1:])\n else:\n result = False\n return result\n\n\ndef entry_point(input_string):\n slist = input_string.split(\"|\")\n regex = slist[0]\n string_ = slist[1]\n result_e = False\n if regex == '':\n result_e = True\n elif string_ == '':\n result_e = False\n else:\n i = 0\n while (result_e == False) and (len(string_[i:i + len(regex)]) != 0):\n result_e = regex_engine(regex, string_[i:i + len(regex)])\n i += 1\n return result_e\n\n\n# in_string = input()\n# print(entry_point(in_string))\n\nprint(f'Expected: True, returned: {entry_point(\"apple|apple\")}')\nprint(f'Expected: True, returned: {entry_point(\"ap|apple\")}')\nprint(f'Expected: True, returned: {entry_point(\"le|apple\")}')\nprint(f'Expected: True, returned: {entry_point(\"a|apple\")}')\nprint(f'Expected: True, returned: {entry_point(\".|apple\")}')\nprint(f'Expected: False, returned: {entry_point(\"apwle|apple\")}')\nprint(f'Expected: False, returned: {entry_point(\"peach|apple\")}')\n","sub_path":"RegexEngine/regexEngine_3.py","file_name":"regexEngine_3.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"80371914","text":"import time\nimport np as np\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom pandas import DataFrame, Series\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import precision_recall_curve, average_precision_score, roc_curve, auc, accuracy_score, \\\n precision_score, recall_score\n\nCSV_FILE_PATH = \"train_WOE.csv\"\ntrain_data = pd.read_csv(CSV_FILE_PATH, index_col=0)\n\nCSV_FILE_PATH_1 = \"test_WOE.csv\"\ntest_data = pd.read_csv(CSV_FILE_PATH_1, index_col=0)\n\n# 训练集\n# 只选取IV大于0.1的特征\nx = train_data.loc[:, ['RevolvingUtilizationOfUnsecuredLines', 'age',\n 'NumberOfTime30-59DaysPastDueNotWorse',\n 'NumberOfTimes90DaysLate',\n 'NumberOfTime60-89DaysPastDueNotWorse']]\ny = 1 - train_data['SeriousDlqin2yrs']\n\n# 测试集\n# 只选取IV大于0.1的特征\nx_test = test_data.loc[:, ['RevolvingUtilizationOfUnsecuredLines', 'age',\n 'NumberOfTime30-59DaysPastDueNotWorse',\n 'NumberOfTimes90DaysLate',\n 'NumberOfTime60-89DaysPastDueNotWorse']]\ny_test = 1 - test_data['SeriousDlqin2yrs']\n# print(np.array(y_true))\n# print(x)\n# print(y)\n# 建立模型,并使用训练集进行模型训练\n# 逻辑回归\nclf = RandomForestClassifier()\nprint('RandomForestClassifier')\nstart = time.process_time()\nclf.fit(x, y)\nend = time.process_time()\nprint('Running time: %s Seconds' % (end - start))\n# y_score = clf.oob_score(x_test)\n# print('y score: ', y_score)\n# print(clf)\n# 获得变量权重\n# print('变量权重:', clf.coef_)\n\n# 使用测试集进行模型预测\ny_predict = clf.predict(x_test)\n# print('预测模型:', y_predict)\n\ny_score = clf.predict_proba(x_test)[:, 1]\n# probs = clf.decision_function(x_test)\n# y_score = (probs - probs.min()) / (probs.max() - probs.min())\n# print('y_score: ', y_score)\n# print('模型score: ', clf.score(x_test, y_test))\n\nprint('Accuracy: ', accuracy_score(y_test, y_predict))\n\nprint('Precision: ', precision_score(y_test, y_predict, average=None)[0])\n\nprint('Recall: ', recall_score(y_test, y_predict, average=None)[0])\n\nnew_pd = pd.DataFrame({'y_test': y_test, 'y_pre': y_predict})\n\n\n# new_pd.to_csv('pre_and_test.csv')\n# print(new_pd)\n\n\n# 绘制PR图\ndef draw_PR(Y_test, Y_score):\n plt.figure(1) # 创建图表1\n plt.title('Precision/Recall Curve') # give plot a title\n plt.xlabel('Recall') # make axis labels\n plt.ylabel('Precision')\n plt.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label=\"Luck\") # 画对角线\n plt.xlim([-0.05, 1.05])\n plt.ylim([-0.05, 1.05])\n Y_test = np.array(Y_test)\n precision, recall, thresholds = precision_recall_curve(np.array(Y_test), Y_score)\n # print('precision:', precision)\n # print('recall:', recall)\n # print('y_test', Y_test)\n # print('y_predict', Y_score)\n plt.figure(1)\n plt.plot(recall, precision)\n plt.show()\n\n\n# draw_PR(y_test, y_score)\n\n\n# 绘制ROC图\n\ndef draw_ROC_curve(Y_test, Y_score):\n Y_test = np.array(Y_test)\n false_positive_rate, true_positive_rate, thresholds = roc_curve(Y_test, Y_score)\n print('y test:', Y_test)\n print('y_predict', Y_score)\n roc_auc = auc(false_positive_rate, true_positive_rate)\n plt.title('ROC')\n plt.plot(false_positive_rate, true_positive_rate, 'b', label='AUC = %0.2f' % roc_auc)\n plt.legend(loc='lower right')\n plt.plot([0, 1], [0, 1], 'r--')\n plt.ylabel('True Positive Rate')\n plt.xlabel('False Positive Rate')\n plt.show()\n\n\n# draw_ROC_curve(y_test, y_score)\n\n# 计算ks值\ndff = pd.DataFrame({'score': y_predict, 'label': y_test})\n\n\ndef ks(df, y_true, y_pre, num=10, good=0, bad=1):\n # 1.将数据从小到���平均分成num组\n df_ks = df.sort_values(y_pre).reset_index(drop=True)\n df_ks['rank'] = np.floor((df_ks.index / len(df_ks) * num) + 1)\n df_ks['set_1'] = 1\n # 2.统计结果\n result_ks = pd.DataFrame()\n result_ks['group_sum'] = df_ks.groupby('rank')['set_1'].sum()\n result_ks['group_min'] = df_ks.groupby('rank')[y_pre].min()\n result_ks['group_max'] = df_ks.groupby('rank')[y_pre].max()\n result_ks['group_mean'] = df_ks.groupby('rank')[y_pre].mean()\n # 3.最后一行添加total汇总数据\n result_ks.loc['total', 'group_sum'] = df_ks['set_1'].sum()\n result_ks.loc['total', 'group_min'] = df_ks[y_pre].min()\n result_ks.loc['total', 'group_max'] = df_ks[y_pre].max()\n result_ks.loc['total', 'group_mean'] = df_ks[y_pre].mean()\n # 4.好用户统计\n result_ks['good_sum'] = df_ks[df_ks[y_true] == good].groupby('rank')['set_1'].sum()\n result_ks.good_sum.replace(np.nan, 0, inplace=True)\n result_ks.loc['total', 'good_sum'] = result_ks['good_sum'].sum()\n result_ks['good_percent'] = result_ks['good_sum'] / result_ks.loc['total', 'good_sum']\n result_ks['good_percent_cum'] = result_ks['good_sum'].cumsum() / result_ks.loc['total', 'good_sum']\n # 5.坏用户统计\n result_ks['bad_sum'] = df_ks[df_ks[y_true] == bad].groupby('rank')['set_1'].sum()\n result_ks.bad_sum.replace(np.nan, 0, inplace=True)\n result_ks.loc['total', 'bad_sum'] = result_ks['bad_sum'].sum()\n result_ks['bad_percent'] = result_ks['bad_sum'] / result_ks.loc['total', 'bad_sum']\n result_ks['bad_percent_cum'] = result_ks['bad_sum'].cumsum() / result_ks.loc['total', 'bad_sum']\n # 6.计算ks值\n result_ks['diff'] = result_ks['bad_percent_cum'] - result_ks['good_percent_cum']\n # 7.更新最后一行total的数据\n result_ks.loc['total', 'bad_percent_cum'] = np.nan\n result_ks.loc['total', 'good_percent_cum'] = np.nan\n result_ks.loc['total', 'diff'] = result_ks['diff'].max()\n\n result_ks = result_ks.reset_index()\n\n return result_ks\n\n\ndef ks_curve(df, num=10):\n # 防止中文乱码\n plt.rcParams['font.sans-serif'] = ['SimHei']\n plt.rcParams['axes.unicode_minus'] = False\n\n ks_value = df['diff'].max()\n\n # 获取绘制曲线所需要的数据\n x_curve = range(num + 1)\n y_curve1 = [0] + list(df['bad_percent_cum'].values[:-1])\n y_curve2 = [0] + list(df['good_percent_cum'].values[:-1])\n y_curve3 = [0] + list(df['diff'].values[:-1])\n # 获取绘制ks点所需要的数据\n df_ks_max = df[df['diff'] == ks_value]\n x_point = [df_ks_max['rank'].values[0], df_ks_max['rank'].values[0]]\n y_point = [df_ks_max['bad_percent_cum'].values[0], df_ks_max['good_percent_cum'].values[0]]\n # 绘制曲线\n plt.title('KS Curve')\n plt.plot(x_curve, y_curve1, label='bad', linewidth=2)\n plt.plot(x_curve, y_curve2, label='good', linewidth=2)\n plt.plot(x_curve, y_curve3, label='diff', linewidth=2)\n # 标记ks\n plt.plot(x_point, y_point, label='ks - {:.2f}'.format(ks_value), color='r', marker='o', markerfacecolor='r',\n markersize=5)\n plt.scatter(x_point, y_point, color='r')\n plt.legend()\n plt.show()\n\n return ks_value\n\n\ndef ks_calc_auc(true, score):\n # 功能: 计算KS值,输出对应分割点和累计分布函数曲线图\n # score: 一维数组或series,代表模型得分(一般为预测正类的概率)\n # true: 一维数组或series,代表真实的标签({0,1}或{-1,1})\n # 输出值:\n # 'ks': KS值\n fpr, tpr, thresholds = roc_curve(true, score)\n ks_value = max(tpr - fpr)\n print('ks: ', ks_value)\n return ks_value\n\n\nks_calc_auc(y_test, y_score)\n","sub_path":"machine_learning_nex/eg1/Seventh_Step_Evaluate_Model(Random).py","file_name":"Seventh_Step_Evaluate_Model(Random).py","file_ext":"py","file_size_in_byte":7416,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"213096571","text":"#! /usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Script Name: pomodoro-GUI.py\n# Author: Alejandro Druetta\n# Version: 0.1\n#\n# Description: Python 3 GUI pomodoro app.\n#\n# Usage:\n\nimport tkinter as tk\nfrom tkinter import Tk, Frame, Label, Button, StringVar\nfrom os import path\nfrom pygame import mixer\nimport sys\nfrom time import time, gmtime, strftime, sleep\n\nDEBUG = False\n\n# Constants\nif DEBUG:\n T_WORK = 0.5\nelse:\n T_WORK = 25\nT_BREAK = T_WORK * 0.2\nT_LONG = T_WORK * 0.6\n\nYELLOW = \"#fcf3cf\"\nBLUE = \"#2874a6\"\nRED = \"#e74c3c\"\nGREEN = \"#229954\"\nORANGE = \"#dc7633\"\n\nWORK = \"Work\"\nPAUSE = \"Pause\"\nCONTINUE = \"Continue\"\nBREAK = \"Break\"\n\n\nclass Pomodoro(Frame):\n\n def __init__(self, parent, abspath):\n super().__init__(parent)\n self.root = parent\n self.abspath = abspath\n\n self.work_count = 0\n\n # Tk variables\n self.tagVar = StringVar()\n self.tagVar.set(\"\")\n self.actionVar = StringVar()\n self.actionVar.set(WORK)\n self.displayVar = StringVar()\n self.displayVar.set(\"00:00\")\n\n self.initUI()\n\n def initUI(self):\n # toplevel\n self.root.title(\"PomodoroPy\")\n self.root.resizable(0, 0)\n\n # main frame\n self.pack()\n\n self.timeLabel = Label(self, textvariable=self.displayVar)\n self.timeLabel[\"background\"] = YELLOW\n self.timeLabel[\"padx\"] = \"10px\"\n self.timeLabel[\"font\"] = \"helvetica 48 bold\"\n self.timeLabel[\"fg\"] = \"gray\"\n self.timeLabel.pack(expand=True, fill=tk.X)\n\n self.actionButton = Button(self)\n self.actionButton[\"text\"] = WORK\n self.actionButton[\"font\"] = \"helvetica 16\"\n self.actionButton[\"command\"] = lambda: self.action(\n self.actionButton.cget(\"text\"))\n self.actionButton.pack(expand=True, fill=tk.X)\n\n def action(self, action):\n if action == WORK:\n self.work_count += 1\n self.timeLabel[\"fg\"] = BLUE\n self.actionButton[\"text\"] = PAUSE\n self.clock(T_WORK)\n self.actionButton[\"text\"] = BREAK\n elif action == PAUSE:\n self.timeLabel[\"fg\"] = RED\n self.actionButton[\"text\"] = CONTINUE\n elif action == CONTINUE:\n self.timeLabel[\"fg\"] = BLUE\n self.actionButton[\"text\"] = PAUSE\n elif action == BREAK:\n self.actionButton[\"state\"] = \"disable\"\n if self.work_count < 4:\n self.timeLabel[\"fg\"] = GREEN\n self.clock(T_BREAK)\n elif self.work_count >= 4:\n self.timeLabel[\"fg\"] = ORANGE\n self.clock(T_LONG)\n self.work_count = 0\n self.actionButton[\"state\"] = \"normal\"\n self.actionButton[\"text\"] = WORK\n\n def clock(self, minutes):\n finish = time() + minutes * 60\n while(time() < finish):\n self.actionButton.update()\n if self.actionButton.cget(\"text\") != CONTINUE:\n seconds = finish - time()\n remaining = gmtime(seconds)\n self.displayVar.set(strftime(\"%M:%S\", remaining))\n self.update_idletasks()\n sleep(1)\n else:\n finish = time() + seconds\n\n self.playSound()\n\n def playSound(self):\n mixer.init()\n soundPath = path.join(self.abspath, \"sounds/alert2.mp3\")\n mixer.music.load(soundPath)\n mixer.music.play()\n\n\ndef main():\n dirname = path.dirname(sys.argv[0])\n abspath = path.abspath(dirname)\n root = Tk()\n app = Pomodoro(root, abspath)\n root.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"pomodoro.py","file_name":"pomodoro.py","file_ext":"py","file_size_in_byte":3690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"418003182","text":"\"\"\"\n.. module:: encoder_html.py\n\n :copyright: @2013 Earth System Documentation (http://es-doc.org)\n :license: GPL / CeCILL\n :platform: Unix, Windows\n :synopsis: HTML encoder from document.\n\n.. moduleauthor:: Earth System Documentation (ES-DOC) \n\n\"\"\"\n# Module imports.\nimport datetime\nimport os\n\nimport tornado.template as template\n\nfrom .. utils import (\n convert,\n functional\n )\n\n\n\n# Template loader.\n_loader = template.Loader(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"html_templates\"))\n\n\n# Main document set template.\n_template = _loader.load(\"core/document_set.html\")\n\n# Document templates keyed by document type.\n_document_templates = {\n \"cim.1.activity.ensemble\": _loader.load(\"cim_1/activity_ensemble.html\"),\n \"cim.1.activity.numericalexperiment\": _loader.load(\"cim_1/activity_numerical_experiment.html\"),\n \"cim.1.activity.simulationrun\": _loader.load(\"cim_1/activity_simulation_run.html\"),\n \"cim.1.data.dataobject\": _loader.load(\"cim_1/data_data_object.html\"),\n \"cim.1.grids.gridspec\": _loader.load(\"cim_1/grids_grid_spec.html\"),\n \"cim.1.shared.platform\": _loader.load(\"cim_1/shared_platform.html\"),\n \"cim.1.software.modelcomponent\": _loader.load(\"cim_1/software_model_component.html\"),\n \"cim.1.quality.cimquality\": _loader.load(\"cim_1/quality_cimquality.html\")\n}\n\n\ndef _format_value(v, formatter=None):\n \"\"\"Formats values for document output.\"\"\"\n def _format(s):\n if s is None:\n s = None\n # TODO add support for time formatting.\n elif isinstance(v, datetime.datetime):\n s = str(s)[:10]\n else:\n s = str(s)\n\n if s and len(s):\n s = unicode(s.decode('utf8').strip())\n\n if formatter:\n s = formatter(s)\n\n return s\n\n return \" \".join(map(_format, v)).strip() if isinstance(v, list) else _format(v)\n\n\ndef _get_value(data, path):\n \"\"\"Returns formatted value for document output.\"\"\"\n if data is None:\n return None\n\n def is_collection_reference(attr):\n try:\n int(attr)\n except ValueError:\n return False\n else:\n return True\n\n # Initialise return value.\n value = data\n\n # Walk attribute path.\n for attr in path.split(\".\"):\n # ... collection filter by index\n if is_collection_reference(attr):\n value = value[int(attr)]\n # ... collection filter by attribute\n elif \"=\" in attr:\n left, right = attr.split(\"=\")\n value = functional.first(value, left, right.lower(), lambda v: str(v).lower())\n # ... item attribute filter\n elif hasattr(value, attr):\n value = getattr(value, attr)\n # Otherwise escape.\n else:\n break\n\n # Escape at dead-end.\n if value is None:\n break\n\n return None if value == data else value\n\n\ndef _get_name(name):\n \"\"\"Returns formatted name for document output.\"\"\"\n # Initialise.\n name = \"\" if name is None else name.strip()\n\n # Convert to spaced case.\n if len(name) > 4:\n name = convert.str_to_spaced_case(name).strip()\n\n # Prefixes.\n n = name.lower()\n prefixes = { \"number of \": \"\" }\n for prefix in prefixes.keys():\n if n.startswith(prefix):\n name = prefixes[prefix] + name[len(prefix):]\n\n # Substrings.\n replacements = {\n \"_\": \" \",\n \"Second\": \"2nd\",\n \"First\": \"1st\"\n }\n for replacement in replacements.keys():\n name = name.replace(replacement, replacements[replacement])\n\n # Substitutions.\n swaps = {\n \"id\": \"ID\",\n }\n for swap in swaps.keys():\n if name == swap:\n name = swaps[swap]\n\n return name\n\n\nclass _TemplateInfo():\n \"\"\"Template processing information.\"\"\"\n def __init__(self,\n data,\n header=None,\n fieldset=[],\n fieldset_type=\"namevalue\",\n tag_id=None,\n template=None,\n previous=None,\n depth=0):\n if isinstance(data, _TemplateInfo):\n previous = data\n data = previous.data\n self.data = None\n self.depth = depth\n self.header = header\n self.field = None\n self.fieldset = fieldset\n self.fieldset_type = fieldset_type\n self.previous = previous\n self.template = template\n self._set_tag_id(tag_id)\n self._set_dataset(data)\n\n\n def _set_dataset(self, data):\n \"\"\"Sets the associated dataset.\"\"\"\n try:\n iter(data)\n except TypeError:\n self.data = data\n self.dataset = [data]\n else:\n self.dataset = data\n self.dataset = [i for i in self.dataset if i is not None]\n\n\n def _set_tag_id(self, id):\n \"\"\"Sets template tag id.\"\"\"\n if id is not None:\n self.tag_id = id\n elif self.depth == 0 and self.header:\n self.tag_id = self.header.lower()\n else:\n self.tag_id = None\n\n\n def reset_fieldset(self):\n self.fieldset = []\n\n\nclass _FieldInfo():\n \"\"\"Document field processing information.\"\"\"\n def __init__(self,\n name,\n email=None,\n email_path=None,\n formatter=None,\n link=None,\n link_path=None,\n path=None,\n tag_id=None,\n value=None):\n self.name = name\n self.email = email\n self.email_path = email_path\n self.formatter = formatter\n self.link = link\n self.link_path = link_path\n self.path = path\n self.tag_id = tag_id\n self.value = value\n\n\n def get_name(self):\n \"\"\"Returns formatted field name for html output.\"\"\"\n return _get_name(self.name)\n\n\n def get_value(self, data=None):\n \"\"\"Returns value of field for html output.\n\n :param object data: An object from which the field value is derived.\n\n :returns: The derived field value.\n :rtype str:\n\n \"\"\"\n v = _get_value(data, self.path) if self.path else self.value\n v = _format_value(v, self.formatter)\n\n return v\n\n\n def get_link(self, data):\n \"\"\"Returns value of associated hyperlink.\n\n :param object data: An object from which the hyperlink value is derived.\n\n :returns: The derived field hyperlink.\n :rtype str:\n\n \"\"\"\n v = _get_value(data, self.link_path)\n v = _format_value(v)\n\n return v\n\n\n def get_email(self, data):\n \"\"\"Returns value of associated email link.\n\n :param object data: An object from which the email link value is derived.\n\n :returns: The derived field email link.\n :rtype str:\n\n \"\"\"\n v = _get_value(data, self.email_path)\n v = _format_value(v)\n\n return v\n\n\ndef _generate(document):\n template_key = document.meta.type.lower()\n if template_key not in _document_templates:\n msg = u\"TODO - document html generator for {0} documents.\"\n return msg.format(template_key)\n\n template = _document_templates[template_key]\n return template.generate(doc=document,\n FieldInfo=_FieldInfo,\n TemplateInfo=_TemplateInfo)\n\n\ndef _get_group_set(document_set):\n def get_sort_key(document):\n \"\"\"Returns key used for document sorting.\"\"\"\n return document.ext.full_display_name.lower()\n\n def get_group_key(document):\n return \"{0}-{1}\".format(document.ext.type_sortkey,\n document.ext.type_display_name)\n\n group_set = {}\n for document in document_set:\n group_key = get_group_key(document)\n if group_key not in group_set:\n group_set[group_key] = []\n group_set[group_key].append(document)\n\n for group_key, document_set in group_set.iteritems():\n group_set[group_key] = sorted(document_set, key=get_sort_key)\n\n return group_set\n\n\ndef encode(doc):\n \"\"\"Encodes a document to HTML.\n\n :param object doc: Document being encoded.\n\n :returns: An HTML representation of a document.\n :rtype: str\n\n \"\"\"\n def get_sort_key(document):\n \"\"\"Returns key used for document sorting.\"\"\"\n return document.meta.type_sortkey\n\n # Convert to sorted iterable.\n try:\n iter(doc)\n except TypeError:\n document_set = [doc]\n else:\n document_set = sorted(doc, key=get_sort_key)\n\n # Return generated template.\n return _template.generate(document_set=document_set,\n document_group_set=_get_group_set(document_set),\n generate_document=_generate)\n","sub_path":"src/pyesdoc/serialization/encoder_html.py","file_name":"encoder_html.py","file_ext":"py","file_size_in_byte":8786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"434591878","text":"\"\"\"\ndate: 20190713\ncreated by: ishida\nUpdate a simple plot as rapidly as possible to measure speed.\n\"\"\"\n\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport numpy as np\nimport pyqtgraph as pg\nfrom pyqtgraph.ptime import time\nimport serial as sr\n\n# create object serial port\nser = sr.Serial('COM3', 9600)\nlength = 8\nser.flushInput()\ncnt = 0\n\napp = QtGui.QApplication([])\n\nwindow_width = 5000\n\np = pg.plot()\np.setWindowTitle('Signal from cell')\np.setRange(QtCore.QRectF(0, 0, window_width, 10000))\np.setLabel('left', 'Intensity')\np.setLabel('bottom', 'time [ms]')\ncurve = p.plot()\n\nt = np.zeros(window_width)\ndata = np.linspace(0, 0, window_width)\n\nptr = 0\nlastTime = time()\nfps = None\n\ndef update():\n global curve, data, ptr, p, lastTime, fps\n data[:-1] = data[1:]\n value = ser.readline()\n array = value.decode(\"utf-8\")\n data[-1] = int(array)\n # print(int(array[0] * 10 ** (length - 1)))\n # data[-1] = np.random.rand() * 1000\n curve.setData(data)\n\n ptr += 1\n\n now = time()\n dt = now - lastTime\n lastTime = now\n if fps is None:\n fps = 1.0/dt\n else:\n s = np.clip(dt*3., 0, 1)\n fps = fps * (1-s) + (1.0/dt) * s\n p.setTitle('%0.2f fps' % fps)\n app.processEvents() ## force complete redraw for every plot\n\ntimer = QtCore.QTimer()\ntimer.timeout.connect(update)\ntimer.start(0)\n\n\n## Start Qt event loop unless running in interactive mode.\nif __name__ == '__main__':\n import sys\n if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):\n QtGui.QApplication.instance().exec_()\n","sub_path":"PIC/CounterToPC/real_time_pyqtgraph.py","file_name":"real_time_pyqtgraph.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"322782021","text":"# -*- coding: utf-8 -*-\nimport scrapy\n\nclass ToScrapeSpiderXPath(scrapy.Spider):\n name = 'toscrape-goudengids-xpath'\n start_urls = [\n 'https://www.goudengids.be/sitemap_index_nl_be.xml'\n ]\n\n def parse(self, response):\n for quote in response.xpath('//div[@class=\"profile__inner\"]'):\n yield {\n 'name': quote.xpath('./span[@class=\"profile__title\"]/text()').extract_first(),\n 'address': quote.xpath('.//small[@class=\"card__info fl\"]/text()').extract_first(),\n 'categorie': quote.xpath('.//div[@class=\"categories detail mb20\"]/text()').extract()\n }\n\n # next_page_url = response.xpath('//li[@class=\"next\"]/a/@href').extract_first()\n # if next_page_url is not None:\n # yield scrapy.Request(response.urljoin(next_page_url))\n\n","sub_path":"quotesbot/spiders/toscrape-goudengids-xpath.py","file_name":"toscrape-goudengids-xpath.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"97049691","text":"import pymysql\nfrom datetime import timezone, datetime\nfrom common import sql_host, sql_user, sql_db, sql_pwd\n\ndef get_db_cursor():\n\tdb = pymysql.connect(host=sql_host, user=sql_user, password= sql_pwd, db=sql_db)\n\tcur = db.cursor()\n\treturn db, cur\n\ndef get_one_record(sql_command, params):\n\tdb, cur = get_db_cursor()\n\tnum = cur.execute(sql_command, params)\n\trecord = cur.fetchone()\n\tcur.close()\n\tdb.close()\n\treturn num, record\n\ndef get_records(sql_command, params):\n\tdb, cur = get_db_cursor()\n\tnum = cur.execute(sql_command, params)\n\trv = cur.fetchall()\n\treturn rv\n\ndef get_one_data(sql_command, id):\n\tdb, cur = get_db_cursor()\n\tnum = cur.execute(sql_command, id)\n\trow_headers = [x[0] for x in cur.description] # this will extract row headers\n\trecord = cur.fetchone()\n\tjson_data = []\n\tjson_data.append(dict(zip(row_headers, record)))\n\tcur.close()\n\tdb.close()\n\treturn num, json_data\n\ndef update_record(sql_command, params):\n\tdb, cur = get_db_cursor()\n\tcur.execute(sql_command, params)\n\tdb.commit()\n\tcur.close()\n\tdb.close()\n\ndef get_full_data(sql_command, row_headers=None):\n\tdb, cur = get_db_cursor()\n\tnum = cur.execute(sql_command)\n\tif row_headers is None: row_headers = [x[0] for x in cur.description] # this will extract row headers\n\trv = cur.fetchall()\n\tjson_data = []\n\tfor row in rv:\n\t\tresult = []\n\t\tfor item in row:\n\t\t\tif type(item) is datetime: item = item.astimezone(timezone.utc)\n\t\t\tresult.append(item)\n\t\tjson_data.append(dict(zip(row_headers, result)))\n\tcur.close()\n\tdb.close()\n\treturn json_data\n\ndef add_administrator(admin_pwd):\n\tdb, cur = get_db_cursor()\n\tsql_command = 'select `name` from `users` where `admin` = 1'\n\tnum = cur.execute(sql_command)\n\tif num < 1:\n\t\tsql_command = 'insert into `users` (`name`, `pwd`, `admin`) values (%s, %s, %s)'\n\t\tcur.execute(sql_command, ('admin', admin_pwd, 1))\n\t\tdb.commit()\n\tcur.close()\n\tdb.close()\n","sub_path":"sql.py","file_name":"sql.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"391170624","text":"import json\n\nfrom django.core.serializers.json import DjangoJSONEncoder\nfrom django.http.response import HttpResponse\n\n\nclass JsonResponseUTF8(HttpResponse):\n \"\"\"Удобное создание JSON-ответа с выводом любого не-ASCII текста в UTF-8.\n\n Args:\n response (list|dict): Ответ для конвертации в JSON.\n \"\"\"\n def __init__(self, response, status=None, **kwargs):\n # HTTP-код ответа (по умолчанию 200)\n kwargs['status'] = status or 200\n\n kwargs.setdefault('content_type', 'application/json')\n\n json_dumps_params = {'ensure_ascii': False, }\n response = json.dumps(response, cls=DjangoJSONEncoder, **json_dumps_params)\n\n super(JsonResponseUTF8, self).__init__(content=response, **kwargs)\n","sub_path":"api/shortcuts.py","file_name":"shortcuts.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"617481014","text":"\nclass Lexer(object):\n def __init__(self, text):\n self.text = text\n self.index = -1\n self.length = -1\n self.symbols = []\n\n self._parse()\n\n def next(self, i=1):\n self.index += i \n if self.index >= self.length:\n return (-2, '') \n\n elif self.index < 0:\n self.index = 0\n\n return self.symbols[self.index]\n\n def head(self):\n return self.index\n\n def seek(self, index):\n if -1 <= index < self.length:\n self.index = index\n return True\n\n return False\n\n def _parse(self):\n state = 0\n\n for c in self.text:\n if state == 0:\n if c == '\\\\':\n state = 1\n\n elif c in \"()[]{}$^*+?.|\":\n self.symbols.append((1, c))\n\n else:\n self.symbols.append((0, c))\n\n elif state == 1:\n self.symbols.append((0, c))\n state = 0\n\n if state != 0:\n self.symbols.append((-1, '\\\\'))\n\n self.length = len(self.symbols)\n\n","sub_path":"regex/lexer.py","file_name":"lexer.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"315858223","text":"# coding: utf-8\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.ndimage\nimport sys\n\n\nNPY_DIR = \"/Users/pycra/Desktop/NPY_DATA\"\n\nif len(sys.argv) < 2:\n print(\"buttai.py npyfilename\")\n exit(-1)\n\narray = np.load(NPY_DIR+\"/\"+sys.argv[1])\narray = 1 * (array == 3) # 3=池\n\nim_open = scipy.ndimage.binary_opening(array, np.ones((3, 3)), iterations=2)\nlabel3, num_features = scipy.ndimage.measurements.label(im_open)\nprint(\"label3=\", label3)\nprint(\"num_features=\", num_features)\n\nim_open = scipy.ndimage.binary_opening(array, np.ones((2, 2)), iterations=2)\nlabel2, num_features = scipy.ndimage.measurements.label(im_open)\nprint(\"label2=\", label2)\nprint(\"num_features=\", num_features)\n\nim_open = scipy.ndimage.binary_opening(array, np.ones((1, 1)), iterations=2)\nlabel1, num_features = scipy.ndimage.measurements.label(im_open)\nprint(\"label1=\", label1)\nprint(\"num_features=\", num_features)\n\nplt.subplot(221)\nplt.imshow(array, cmap='Greys_r')\nplt.colorbar()\nplt.subplot(222)\nplt.imshow(label3, cmap='Greys_r')\nplt.colorbar()\nplt.subplot(223)\nplt.imshow(label2, cmap='Greys_r')\nplt.colorbar()\nplt.subplot(224)\nplt.imshow(label1, cmap='Greys_r')\nplt.colorbar()\nplt.show()\n","sub_path":"geocraft/buttai.py","file_name":"buttai.py","file_ext":"py","file_size_in_byte":1187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"466128505","text":"from rest_framework import serializers\nfrom .models import Question\nimport uuid\n\n\nclass QuestionSerializer(serializers.ModelSerializer):\n \"\"\"Serializer of a question\"\"\"\n user = serializers.EmailField(read_only=True)\n code = serializers.CharField(read_only=True)\n\n class Meta:\n model = Question\n fields = ('__all__')\n\n def save(self, user):\n question = Question.objects.create(title=self.validated_data['title'],\n content=self.validated_data['content'],\n code=self.generate_code(),\n user=user)\n for category in self.validated_data['categories']:\n question.categories.add(category)\n for tag in self.validated_data['tags']:\n question.tags.add(tag)\n question.save()\n \n def generate_code(self):\n code = f\"{uuid.uuid4().hex[:8]}\"\n try:\n question = Question.objects.get(code=code)\n except Question.DoesNotExist:\n return code\n else:\n return generate_code()\n\n def update(self, id):\n question = Question.objects.get(id=id)\n question.title = self.validated_data['title']\n question.content = self.validated_data['content']\n\n question.categories.set(self.validated_data['categories'])\n question.tags.set(self.validated_data['tags'])\n\n question.save()\n","sub_path":"questions/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"502705132","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Nov 11 18:15:55 2020\r\n\r\n@author: Administrator\r\n\"\"\"\r\n\r\n\r\n\r\n#### imports\r\n\r\nimport sys\r\nimport pandas as pd\r\nfrom io import StringIO\r\n\r\n####\r\n\r\nfile = open(str(sys.argv[1]),'r')\r\n#bests_hit_file = open(str(sys.argv[2]),'w')\r\n\r\ntxt = file.read()\r\nquerys = txt.split('# BLASTP 2.2.31+')[1:-1]\r\ndatabase_info = querys[0].split('\\n')[1:][1]\r\nstring =''\r\nstring += database_info + '\\n'\r\nstring += 'query id\\tsubject id\\t% identity\\talignment length\\tmismatches\\tgap opens\\tgaps\\tq. start\\tq. end\\ts. start\\ts. end\\tevalue\\tbit score\\tquery length\\tsubject length' + '\\n'\r\nnew_name = str(sys.argv[1]).split('/')[-1][:-4] + '_compare.csv'\r\n\r\nfor q in querys:\r\n lines = q.split('\\n')[1:]\r\n if lines[2] == '# 0 hits found':\r\n #best_hit = lines[0][9:]+ '\\t' +'no hits found'\r\n #querys_with_no_hits_found.append(lines[0][9:])\r\n continue\r\n else:\r\n best_hit = lines[4]\r\n string += best_hit + '\\n'\r\n \r\n### Critères de séléction\r\n\r\ndt = StringIO(string)\r\n\r\ndf = pd.read_csv(dt, header = 1,sep=\"\\t\")\r\ndf['% couverture'] = (df['alignment length'] / df[[\"query length\",\"subject length\"]].max(axis=1)) *100\r\n\r\ndf['evalue'] = df['evalue'].astype(float)\r\ndf['% identity'] = df['% identity'].astype(float)\r\ndf['% couverture'] = df['% couverture'].astype(float)\r\nndf = df[['query id','subject id','% identity','evalue','% couverture']]\r\nndf = ndf[(ndf['% identity'] > 70) & (df['evalue'] < 0.01) & (df['% couverture'] > 60)]\r\n\r\nndf.to_csv(new_name, index = False)\r\n","sub_path":"parser2_windows.py","file_name":"parser2_windows.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"248777466","text":"# Copyright 2014 Cloudbase Solutions SRL\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n\"\"\"\nUnit tests for the Hyper-V utils factory.\n\"\"\"\n\nimport mock\nfrom oslo_config import cfg\n\nfrom nova import test\nfrom nova.virt.hyperv import utilsfactory\nfrom nova.virt.hyperv import volumeutils\nfrom nova.virt.hyperv import volumeutilsv2\n\nCONF = cfg.CONF\n\n\nclass TestHyperVUtilsFactory(test.NoDBTestCase):\n def test_get_volumeutils_v2(self):\n self._test_returned_class(expected_class=volumeutilsv2.VolumeUtilsV2,\n os_supports_v2=True)\n\n def test_get_volumeutils_v1(self):\n self._test_returned_class(expected_class=volumeutils.VolumeUtils)\n\n def test_get_volumeutils_force_v1_and_not_min_version(self):\n self._test_returned_class(expected_class=volumeutils.VolumeUtils,\n force_v1=True)\n\n @mock.patch.object(utilsfactory, 'CONF')\n def _test_returned_class(self, mock_CONF, expected_class, force_v1=False,\n os_supports_v2=False):\n # NOTE(claudiub): temporary change, in order for unit tests to pass.\n # force_hyperv_utils_v1 CONF flag does not exist anymore.\n # utilsfactory and its test cases will be removed next commit.\n mock_CONF.hyperv.force_volumeutils_v1 = force_v1\n with mock.patch.object(\n utilsfactory.utils,\n 'check_min_windows_version') as mock_check_min_windows_version:\n mock_check_min_windows_version.return_value = os_supports_v2\n\n actual_class = type(utilsfactory.get_volumeutils())\n self.assertEqual(actual_class, expected_class)\n","sub_path":"nova/tests/unit/virt/hyperv/test_utilsfactory.py","file_name":"test_utilsfactory.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"235516861","text":"from flask import render_template, url_for, flash, redirect,request\nfrom app import app,db, bcrypt\nfrom app.forms import RegistrationForm, LoginForm,AnnouncementForm\nfrom app.models import User,Post\nfrom flask_login import login_user,current_user,logout_user,login_required\n\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n page = request.args.get('page', 1, type=int)\n posts = Post.query.order_by(Post.date_posted.desc()).paginate(page=page, per_page=4)\n return render_template('home.html', posts=posts)\n\n\n@app.route(\"/signup\", methods=['GET', 'POST'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash('Account successfully created,You are now able to log in', 'success')\n return redirect(url_for('login'))\n return render_template('signup.html', title='Signup', form=form)\n\n\n@app.route(\"/login\", methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('home'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(email=form.email.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next')\n return redirect(next_page) if next_page else redirect(url_for('home'))\n else:\n flash('Unsuccessful Login.Please check email and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('home'))\n\n\n@app.route(\"/announcements/new\", methods=['GET', 'POST'])\n@login_required\ndef new_announcement():\n form = AnnouncementForm()\n if form.validate_on_submit():\n posts= Post(title=form.title.data, content=form.content.data, author=current_user)\n db.session.add(posts)\n db.session.commit()\n flash('Announcement successfully added!', 'success')\n return redirect(url_for('home'))\n return render_template('announcement.html', title='Announcements',form=form, legend=' Create An Announcement')\n\n\n@app.route(\"/announcements/\")\ndef announcement (post_id):\n post = Post.query.get_or_404(post_id)\n return render_template('modify.html',post=post)\n\n@app.route(\"/announcements//update\", methods=['GET', 'POST'])\n@login_required\ndef update_announcement(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n form = AnnouncementForm()\n if form.validate_on_submit():\n post.title = form.title.data\n post.content = form.content.data\n db.session.commit()\n flash('Annoucement successfully updated!', 'success')\n return redirect(url_for('announcement', post_id=post.id))\n elif request.method == 'GET':\n form.title.data = post.title\n form.content.data = post.content\n return render_template('announcement.html',form=form,legend='Update Announcement')\n\n\n@app.route(\"/announcements//delete\", methods=['POST'])\n@login_required\ndef delete_announcement(post_id):\n post = Post.query.get_or_404(post_id)\n if post.author != current_user:\n abort(403)\n db.session.delete(post)\n db.session.commit()\n flash('Announcement deleted!', 'success')\n return redirect(url_for('home'))\n\n@app.route(\"/admin/\")\ndef admin_announcements(username):\n page = request.args.get('page', 1, type=int)\n user = User.query.filter_by(username=username).first_or_404()\n posts = Post.query.filter_by(author=user).order_by(Post.date_posted.desc()).paginate(page=page, per_page=5)\n return render_template('admin.html', posts=posts, user=user)","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"634121755","text":"import os\nimport sys\n\nargc = len(sys.argv)\nargv = sys.argv\n\nif argc > 2:\n cmd = './'\n for i in range(1,argc):\n cmd = cmd + argv[i] + ' '\n if os.path.isfile(argv[1]):\n # print('Python script: ', cmd)\n os.system(cmd)\n else:\n print('Binary file does not exist')\n bin = 'g++ -o ' + argv[1] + ' '+ argv[1] + '.cpp'\n print(bin)\n os.system(bin)\n if os.path.isfile(argv[1]):\n os.system(cmd)\n else:\n print('Binary source does not exist')\n exit(0)\nelse:\n print('USAGE: python3.4', argv[0], \" BINARY_FILE INPUT_ARGS\");\n exit(0)\n","sub_path":"runScript.py","file_name":"runScript.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"211311744","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# encoding=utf-8 vi:ts=4:sw=4:expandtab:ft=python\n\"\"\"\nCopyright (c) 2020 Baidu.com, Inc. All Rights Reserved\nFile: test_sqrt.py\nAuthors: liyang109\nDate: 2021/3/23 3:53\n\"\"\"\nfrom apibase import APIBase\nimport paddle\nimport numpy as np\n\n\nclass TestSqrt(APIBase):\n \"\"\"\n test sqrt\n \"\"\"\n def hook(self):\n \"\"\"\n implement\n \"\"\"\n self.types = [np.float16]\n # self.debug = True\n # enable check grad\n self.enable_backward = False\n\nobj = TestSqrt(paddle.fluid.layers.sqrt)\n\ndef test_sqrt_base():\n \"\"\"\n default\n \"\"\"\n x = np.random.random([3072, 768]).astype(np.float16)\n res = np.sqrt(x)\n obj.run(res=res, x=x)","sub_path":"npu/npu_benchmark/test_sqrt.py","file_name":"test_sqrt.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"320725416","text":"from tkinter import *\n\n\ndef enter_leave(event):\n if event.type == '7': event.widget['text'] = 'In'\n elif event.type == '8': event.widget['text'] = 'Out'\n\n\nroot = Tk()\n\nlab1 = Label(width=20, height=3, bg='white')\nlab1.pack()\nlab1.bind('', enter_leave)\nlab1.bind('', enter_leave)\n\nlab2 = Label(width=20, height=3, bg='black',\n fg='white')\nlab2.pack()\nlab2.bind('', enter_leave)\nlab2.bind('', enter_leave)\nroot.mainloop()\n","sub_path":"урок7фаил3.py","file_name":"урок7фаил3.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"428968549","text":"import uuid\n\n\nclass Task:\n \"\"\"\n A piece of work to be completed\n in a defined period of time\n \"\"\"\n def __init__(self, name, start=None, end=None, duration=None):\n self.name = name\n self.start = start\n self.end = end\n self.duration = duration\n self.cost = None\n self.id = None\n self.isComplete = False\n self.predecessors = []\n self.resources = []\n self.subtasks = []\n self.get_duration()\n self.get_uuid()\n\n def get_duration(self):\n if self.end and self.start:\n dur = self.end - self.start\n self.duration = dur\n return dur\n\n def add_resources(self, *args):\n for resource in args:\n self.resources.append(resource)\n return\n\n def add_predecessors(self, *args):\n for predecessor in args:\n self.predecessors.append(predecessor)\n return\n\n def get_uuid(self):\n if self.id:\n return\n else:\n self.id = uuid.uuid4()\n return\n\n def add_subtasks(self, *args):\n for task in args:\n self.subtasks.append(task)\n return\n\n\ndef main():\n mb = Task(\"Make Breakfast\", 3, 6)\n print(mb.duration)\n print(mb.id)\n mb.get_uuid()\n print(mb.id)\n mb.add_resources(\"tim\", \"bob\", \"joanne\")\n print(mb.resources)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Organization/PlantPlanning/Tasks.py","file_name":"Tasks.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"167597053","text":"from typing import List\n\nfrom fastapi import APIRouter, Depends, HTTPException\nfrom sqlalchemy.orm import Session\n\nfrom db.db import get_db\nfrom app.astronomy import schemas, services, models\n\nrouter = APIRouter()\n\n\n@router.get(\"/event/{id}\", response_model=schemas.EventGet)\ndef get_event(id: int, city: str, db: Session = Depends(get_db)):\n \"\"\"Get one astronomical event\"\"\"\n event = services.get_event(db=db, id=id)\n if not event:\n raise HTTPException(status_code=404, detail=\"Event not found\")\n\n event = services.add_field_cloud_percent(city=city, event_obj=[event])\n setattr(event[0], \"duration\", str(event[0].date_end - event[0].date_start))\n return event[0]\n\n\n@router.get(\"/event/\", response_model=List[schemas.EventGet])\ndef filter_events(*, day_from: str = \"2020-12-01\", day_to: str = \"2020-12-31\", city: str,\n db: Session = Depends(get_db)):\n \"\"\"Filter astronomical event, return list events\"\"\"\n events = services.filter_event_by_date(db=db, day_from=day_from, day_to=day_to)\n if not events:\n raise HTTPException(status_code=404, detail=\"Events not found\")\n events = services.add_field_cloud_percent(city=city, event_obj=events)\n return events\n\n\n@router.post(\"/event/\", response_model=schemas.EventCreate)\ndef create_event(*, db: Session = Depends(get_db), schema: schemas.EventCreate):\n \"\"\"Add to database astronomical events\"\"\"\n return services.create_event(db=db, schema=schema)\n\n\n@router.put(\"/event/{id}\", response_model=schemas.EventUpdate)\ndef update_event(*, id: int, db: Session = Depends(get_db), schema: schemas.EventUpdate):\n \"\"\"Update in database astronomical event\"\"\"\n event = services.get_event(db=db, id=id)\n if not event:\n raise HTTPException(status_code=404, detail=\"Event not found\")\n event = services.update(db=db, schema=schema, db_obj=event)\n return event\n\n\n@router.delete(\"/event/{id}\", response_model=schemas.EventGet)\ndef delete_event(id: int, db: Session = Depends(get_db)):\n \"\"\"Delete in n database astronomical event\"\"\"\n event = services.get_event(db=db, id=id)\n if not event:\n raise HTTPException(status_code=404, detail=\"Event not found\")\n return services.remove(db=db, id=id)\n","sub_path":"app/astronomy/endpoints/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":2227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"411972121","text":"\n\nfrom xai.brain.wordbase.nouns._founder import _FOUNDER\n\n#calss header\nclass _FOUNDERED(_FOUNDER, ):\n\tdef __init__(self,): \n\t\t_FOUNDER.__init__(self)\n\t\tself.name = \"FOUNDERED\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"founder\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_foundered.py","file_name":"_foundered.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"518569536","text":"from profiling.monitor_memory_usage import monitor_memory_usage\nfrom subprocess import Popen\nfrom subprocess import PIPE\nfrom subprocess import STDOUT\nimport sys\nimport argparse\n\n\ndef comp_degree(d):\n print(\"Beginning okay\")\n command = \"/Applications/MATLAB_R2018a.app/bin/matlab -nosplash -nodisplay -nodesktop -r \\\"addpath('segway'); segway_FRS_solver_okay({}); exit;\\\"\"\n\n proc = Popen(command.format(d), shell=True, stdin=PIPE, stdout=sys.stdout, stderr=sys.stdout, close_fds=True)\n\n monitor_memory_usage(proc.pid, 1, \"mem_okay_{}.log\".format(d));\n\n print(\"End okay\")\n print(\"Beginning shreyas\")\n\n command = \"/Applications/MATLAB_R2018a.app/bin/matlab -nosplash -nodisplay -nodesktop -r \\\"addpath('segway'); segway_FRS_solver_shrey({}); exit;\\\"\"\n\n proc = Popen(command.format(d), shell=True, stdin=PIPE, stdout=sys.stdout, stderr=sys.stdout, close_fds=True)\n \n monitor_memory_usage(proc.pid, 1, \"mem_shrey_{}.log\".format(d));\n\n print(\"End shreyas\")\n\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('d',\n help='Max degree of solution',\n type=int)\n args = parser.parse_args()\n\n comp_degree(args.d)","sub_path":"segway/frs_comp.py","file_name":"frs_comp.py","file_ext":"py","file_size_in_byte":1227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"151499568","text":"from test.utest import Test\nfrom test.geometry.conic.test_conic import TestConic\nfrom test.geometry.conic.tdata_hyperbola import hyperbola_data\nfrom pyppler.geometry.conic.hyperbola import _Hyperbola\n\n__author__ = 'Lai Tash (lai.tash@yandex.ru)'\n\n\nclass Test_Hyperbola(Test, TestConic):\n def cases(self):\n for case in hyperbola_data:\n H = _Hyperbola(case.e, case.a)\n yield case, H\n\n\n\n def test_at_x(self):\n for case, H in self.cases():\n for point in case.points:\n pts = H.at_x(point.x)\n if H.a > abs(point.x):\n self.assertIsNone(pts)\n return\n x1 = pts[0].x\n x2 = pts[1].x\n y1 = pts[0].y\n y2 = pts[1].y\n self.assertEqual(x1, x2)\n self.assertEqual(x1, point.x)\n self.assertTrue(y1 == -y2)\n self.assertNear(abs(y1), abs(point.y), mod=0.05)\n\n def test_at_y(self):\n for case, H in self.cases():\n for point in case.points:\n pts = H.at_y(point.y)\n y1 = pts[0].y\n y2 = pts[1].y\n x1 = pts[0].x\n x2 = pts[1].x\n self.assertEqual(y1, y2)\n self.assertEqual(y1, point.y)\n self.assertTrue(x1 == -x2)\n self.assertNear(abs(x1), abs(point.x), mod=0.05)\n\n def test_semimajor_axis(self):\n for case, H in self.cases():\n self.assertEqual(H.semimajor_axis, case.a)\n\n def test_eccentricity(self):\n for case, H in self.cases():\n self.assertEqual(H.eccentricity, case.e)\n\n def test_semiminor_axis(self):\n for case, H in self.cases():\n self.assertNear(H.semimajor_axis, case.a)\n\n def test_foci_distance(self):\n for case, H in self.cases():\n self.assertNear(H.foci_distance, case.f)\n\n def test_foci_parameter(self):\n for case, H in self.cases():\n self.assertNear(H.foci_parameter, case.p,0.1)\n\n def test_length(self):\n for case, H in self.cases():\n self.assertEqual(H.length, float('inf'))\n\n def test_Ra(self):\n for case, H in self.cases():\n self.assertEqual(H.Ra, float('inf'))\n\n def test_Rp(self):\n for case, H in self.cases():\n self.assertNear(H.Rp, case.Rp)\n\n def test_asymptotes(self):\n for case, H in self.cases():\n for point in case.asymptotes:\n x = point.x\n a1, a2 = H.asymptotes(x)\n self.assertEqual(a1.x, x)\n self.assertEqual(a2.x, x)\n self.assertEqual(a1.y, -a2.y)\n self.assertNear(abs(a1.y),point.y, 0.1)","sub_path":"src/test/geometry/conic/test__Hyperbola.py","file_name":"test__Hyperbola.py","file_ext":"py","file_size_in_byte":2764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"486547797","text":"# 34.\tOs números primos possuem várias aplicações dentro da Computação, por exemplo na Criptografia. Um número primo\n# é aquele que é divisível apenas por um e por ele mesmo. Faça um programa que peça um número inteiro e determine se ele\n# é ou não um número primo.\nnum = int(input('Verifica primo: '))\ni = 2\nwhile i < num:\n if num % i == 0:\n print(f'{num} não é primo.')\n break\n elif i == num-1:\n print(f'{num} é primo.')\n i += 1\n\n","sub_path":"Estrutura De Repeticao/034.py","file_name":"034.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"253957003","text":"#------------------------------------Modules $ Libraries------------------------------------#\r\n\r\n# Backend\r\nimport docker\r\nimport os\r\nimport math\r\nimport json\r\nimport collections\r\nimport time \r\n\r\n# Frontend\r\nimport tkinter as tk\r\nimport tkinter.messagebox\r\nfrom tkinter import filedialog\r\nimport matplotlib.pyplot as plt\r\nfrom matplotlib.backends.backend_tkagg import FigureCanvasTkAgg\r\nimport pandas as pd\r\n\r\n\r\n\r\n#---------------------------------------Initialization---------------------------------------#\r\n\r\n# Docker Configuration\r\n# Docker Image\r\nCONTAINER_NAME = 'analysis_test'\r\nContainer_count = 1\r\n# Volume Bind between Docker and Local OS\r\nDATA_DIRECTORY = 'c:/Users/pei-seng.tan/Desktop/Docker/docker-map-reduce-example/data'\r\nOUTPUT_DIRECTORY = 'c:/Users/pei-seng.tan/Desktop/Docker/docker-map-reduce-example/out'\r\n\r\n\r\n# Some targetd words of Sustainable Development Goal \r\nUN_word_list = [\"poverty\", \"hunger\", \"health\", \"education\", \"equality\", \"sanitation\", \"clean\", \r\n\"growth\", \"innovation\", \"sustainable\", \"production\", \"climate\", \"water\", \"life\", \"peace\", \"partnerships\", \r\n\"resources\"]\r\n\r\n# The input list containing file directory\r\nfilePathList=[]\r\n# Display the result in GUI purpose\r\nlistResultBox_count = 0\r\n# For time claculating purpose\r\nstart=0\r\n\r\n\r\n#---------------------------------------Functions---------------------------------------#\r\n\r\ndef docker_run (file_list, N): \r\n # Local Variables: \r\n # file_list -> The input list containing file directory\r\n # N-> Number of containers\r\n\r\n # Instantiate a client to communicate with Docker Daemon\r\n client = docker.from_env()\r\n\r\n def reduce_output_files():\r\n results = [] # Result for all the inputed companies\r\n results_single={} # Result for each company\r\n\r\n # Read the processing result from containers\r\n filenames = [filename for filename in os.listdir(OUTPUT_DIRECTORY) if filename.endswith('.json')]\r\n for filename in filenames:\r\n single_cpy_list=[]\r\n with open(os.path.join(OUTPUT_DIRECTORY,filename),'r') as input_file:\r\n json_read=json.loads(input_file.read())\r\n results.append(json_read)\r\n single_cpy_list.append(json_read)\r\n results_single[filename[:-5]]=reduce_results(single_cpy_list)\r\n \r\n return reduce_results(results), results_single\r\n\r\n # Convert the collected result into dictionary for later process\r\n def reduce_results(results):\r\n word_frequencies = collections.defaultdict(lambda:0)\r\n for result in results:\r\n for word, frequency in result.items():\r\n word_frequencies[word]+=frequency\r\n return word_frequencies \r\n\r\n # Send the job order to the contianers\r\n def analyze_files_in_container(files):\r\n\r\n print(\"Launching container for files {}\".format(\", \".join(files)))\r\n listResultBox_adding(\"Launching container for files: {}\".format(\", \".join(shorten_filename(files))))\r\n \r\n # Environment Setup inside the containers\r\n environment = {\r\n 'INPUT_FILENAMES' : ';'.join(files)\r\n }\r\n\r\n # Build and run the containers\r\n ctnr = client.containers.run(\r\n image=CONTAINER_NAME,\r\n volumes={\r\n DATA_DIRECTORY: {'bind': '/data','mode': 'ro'},\r\n OUTPUT_DIRECTORY: {'bind': '/out','mode': 'rw'}\r\n },\r\n environment=environment,\r\n detach=True)\r\n \r\n # Display running containers\r\n cntr_IDs=client.containers.list()\r\n if cntr_IDs == None:\r\n print(\"Running_containers : No more\")\r\n listResultBox_adding(\"Running_containers : No more\")\r\n else: \r\n print(\"Running containers: {}\".format(str(cntr_IDs)))\r\n listResultBox_adding(\"Running containers: {}\".format(str(cntr_IDs)))\r\n\r\n # Display the container logs\r\n print(ctnr.logs())\r\n print(\"Build the container\")\r\n\r\n return ctnr\r\n \r\n # Change the file directory to filename only. \r\n def shorten_filename(filelist):\r\n fls = []\r\n for f_end in filelist:\r\n _, tail = os.path.split(f_end)\r\n fls.append(tail)\r\n return fls\r\n \r\n _files=shorten_filename(file_list)\r\n \r\n # Parallelize the tasks based on quantity of files. \r\n containers = []\r\n chunk_size = int(math.ceil(len(_files)/float(N)))\r\n listResultBox_adding(\"\\n\")\r\n listResultBox_adding(\"-----Running-----\")\r\n for i in range(0,len(_files),chunk_size):\r\n files_chunk = _files[i:i+chunk_size]\r\n containers.append(analyze_files_in_container(files_chunk))\r\n\r\n print(\"The process of container initilization is ended. Waiting for the containers to finish...\")\r\n listResultBox_adding(\"The process of container initilization is ended. Waiting for the containers to finish...\")\r\n\r\n # Receive the status of containers \r\n for container in containers:\r\n exit_code = container.wait()\r\n print(\"Container exited with code: {}\".format(exit_code))\r\n listResultBox_adding(\"Container exited with code {}\".format(exit_code))\r\n \r\n \r\n # combine the results sent from containers\r\n reduced_results, results_single_dict = reduce_output_files()\r\n\r\n return reduced_results, results_single_dict\r\n\r\n\r\ndef selectFiles():\r\n # Make this variable global\r\n global filePathList \r\n # Ask for inputs\r\n filePath = filedialog.askopenfilenames(parent=root,title='Choose a file')\r\n if filePath== None:\r\n return\r\n else:\r\n filePathList = list(filePath)\r\n i = 1\r\n for file in filePathList:\r\n listFilesBox.insert(i,file)\r\n i+=1\r\n runButton.config(state=\"normal\")\r\n\r\n\r\ndef run():\r\n global start\r\n start = time.time()\r\n directory=\"c:/Users/pei-seng.tan/Desktop/Docker/docker-map-reduce-example/out\"\r\n filelist = [ f for f in os.listdir(directory) if f.endswith(\".json\") ]\r\n for f in filelist:\r\n os.remove(os.path.join(directory, f))\r\n # Get the inputs selected by the users\r\n # Get the filenames\r\n listResultBox_adding(\"-----Input information-----\")\r\n print(\"Input filenames :\" + str(filePathList))\r\n listResultBox_adding(\"Input filenames:\" + str(filePathList))\r\n # Get the total numbers of files\r\n print(\"Number of files selected: {}\" .format(str(len(filePathList))))\r\n listResultBox_adding(\"Number of files selected: {}\" .format(str(len(filePathList))))\r\n # Get the number of containers\r\n num_of_containers = userNumInput.get()\r\n print(\"Number of containers selected: {}\" .format(num_of_containers))\r\n listResultBox_adding(\"Number of containers selected: {}\" .format(num_of_containers))\r\n \r\n # Run the Map Reduce \r\n Dict_O, Dict_S = docker_run(filePathList, num_of_containers)\r\n \r\n # Pass the data for visualization\r\n visualization_data(Dict_O, Dict_S)\r\n\r\n\r\ndef visualization_data(overall_dict, single_dict):\r\n filtered_list = {} \r\n filtered_single_list = {}\r\n\r\n # To find related keywords form the output (For overall searching)\r\n for key in UN_word_list:\r\n filtered_list[key] = 0\r\n if key in overall_dict:\r\n filtered_list[key] = overall_dict[key]\r\n \r\n # To find related keywords form the output (For single company)\r\n for cpny in single_dict:\r\n filtered_single_list[cpny]={}\r\n for key in UN_word_list:\r\n filtered_single_list[cpny][key] = 0\r\n if key in single_dict[cpny]:\r\n filtered_single_list[cpny][key] = single_dict[cpny][key]\r\n \r\n # For displaying the result in Command Prompt and tool box\r\n listResultBox_adding(\"\\n\")\r\n listResultBox_adding(\"-----Result for each company-----\") \r\n for cpny in filtered_single_list:\r\n print(cpny)\r\n listResultBox_adding(\"Company Name: {}\" .format(cpny))\r\n print(filtered_single_list[cpny])\r\n listResultBox_adding(filtered_single_list[cpny])\r\n \r\n # For display the result in toolbox\r\n listResultBox_adding(\"\\n\")\r\n listResultBox_adding(\"-----Overall Searching Result-----\")\r\n listResultBox_adding(filtered_list)\r\n\r\n # Overall result searching chart plotting \r\n Data1 = {}\r\n Data1['Key_Word'] = list(filtered_list.keys())\r\n Data1['Word_Count'] = list(filtered_list.values())\r\n df1 = pd.DataFrame(Data1, columns= ['Key_Word', 'Word_Count'])\r\n df1 = df1[['Key_Word', 'Word_Count']].groupby('Key_Word').sum()\r\n figure1 = plt.Figure(figsize=(10,15), dpi=70, tight_layout = True)\r\n ax1 = figure1.add_subplot(111)\r\n ax1.locator_params(integer=True)\r\n df1.plot(kind='bar', legend=True, ax=ax1, fontsize=11, color = 'c')\r\n ax1.set_title('Total Word Count From Company Reports')\r\n bar1 = FigureCanvasTkAgg(figure1, root)\r\n bar1.get_tk_widget().pack(side=tk.TOP, fill=tk.BOTH)\r\n bar1.draw()\r\n end = time.time()\r\n print(\"--End--\")\r\n listResultBox_adding(\"\\n\")\r\n listResultBox_adding(\"-----End-----\")\r\n print(\"Overall Time Completed: {} seconds\".format(end-start))\r\n listResultBox_adding(\"Overall Time Completed: {} seconds\".format(str(end-start)))\r\n\r\n\r\ndef listResultBox_adding(text):\r\n global listResultBox_count\r\n listResultBox.insert(listResultBox_count, text)\r\n listResultBox_count += 1\r\n\r\n\r\n\r\n#-------------------------------------Main Code-------------------------------------#\r\n\r\nif __name__ == '__main__':\r\n # Tkinter Setup and Configurations\r\n root = tk.Tk()\r\n root.title(\"Python Map-Reduce Word Account Project\")\r\n root.geometry('1000x600')\r\n # Tkinter GUI Elements\r\n labelBox = tk.Label(root, text=\"(Required) Number of container(s): \")\r\n labelBox.pack()\r\n userNumInput = tk.Spinbox(root, from_=1, to=4)\r\n userNumInput.pack()\r\n selectFilesButton = tk.Button(root, text = \"Select Company Reports\", command = selectFiles)\r\n selectFilesButton.pack()\r\n listFilesBox = tk.Listbox(root, width = 200, height = 4)\r\n listFilesBox.pack()\r\n runButton = tk.Button(root, text = \"Run Word Count\", command = run, state=\"disable\")\r\n runButton.pack()\r\n listResultBox = tk.Listbox(root, width = 200)\r\n listResultBox.pack()\r\n # GUI Appplication Running\r\n root.mainloop()\r\n","sub_path":"docker_parallelize.py","file_name":"docker_parallelize.py","file_ext":"py","file_size_in_byte":10281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"216844105","text":"\"\"\"\nExperimental lib\n\"\"\"\n\nimport re\n\ndef fileToEventList(filename, regex, filter = None):\n fh = open(filename, 'r')\n lines = fh.readlines()\n fh.close()\n\n events = []\n for line in lines:\n m = re.search(regex, line)\n if m is not None:\n \n if filter is None:\n events.append(m.group(1).strip())\n else:\n toAdd = False\n for f in filter:\n if f in m.group(1):\n toAdd = True\n \n if toAdd:\n events.append(m.group(1).strip())\n\n return events\n\ndef chunk(events, nchunks):\n chunkLen = int(len(events) / nchunks)\n \n i = 0\n chunks = []\n while i < len(events) -1:\n chunks.append( events[i:i+chunkLen] )\n i += chunkLen \n return chunks\n\ndef eventsToOccur(events):\n \n occurs = {}\n for e in events:\n if occurs.get(e) is None:\n occurs[e] = 1\n else:\n occurs[e] += 1\n \n #print(occurs)\n return occurs\n \ndef occurDictsToTable(occursDict):\n \n #print(occursDict)\n \n masterDict = {}\n for d in occursDict:\n for k, v in d.items():\n if masterDict.get(k) is None:\n masterDict[k] = v\n else:\n masterDict[k] += v\n \n #print(masterDict)\n \n headers = []\n for k, v in masterDict.items():\n headers.append(k)\n headers = [headers]\n \n for d in occursDict:\n newHorz = []\n \n for h in headers[0]:\n if d.get(h) == None:\n newHorz.append(0)\n else:\n newHorz.append(d[h])\n \n headers.append(newHorz)\n \n \n txtLines = []\n \n for h in headers:\n #print(h)\n \n h = list( map( lambda x : str(x), h ))\n \n txtLines.append(\", \".join(h))\n \n return \"\\n\".join(txtLines)\n","sub_path":"deprecated/regparse.py","file_name":"regparse.py","file_ext":"py","file_size_in_byte":1946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"123098543","text":"from urllib.request import Request, urlopen, URLError\nfrom urllib.error import URLError\nimport re\n\nclass Youtube:\n @staticmethod\n def getChannelID(username):\n requrl = 'https://www.youtube.com/' + username\n req = Request(requrl)\n try:\n response = urlopen(req)\n except URLError as e:\n if hasattr(e, 'reason'):\n return {'error' : e.reason }\n if hasattr(e, 'code'):\n return {'error' : 'Error code: ' + e.code}\n else:\n r = response.read()\n m = re.search(b'https:\\/\\/www\\.youtube\\.com\\/channel\\/([^\\\"]*)', r)\n if not m == None:\n channel_id = m.group(1)\n return {'channel_id' : channel_id.decode('utf-8'),\n 'username' : username}\n else:\n return {'error' : 'user not found'}\n\n","sub_path":"multitwitch/lib/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"59481230","text":"import tables\nimport numpy as np\nimport os\nimport sys\nimport pickle\n\npath = \"\"\noutname = \"all_data.pk\"\n\n\ndef segmentstonucleotides(segments,y_vec):\n nucleotides = [y_vec[0]]\n #print(segments)\n #print(y_vec)\n segment = segments[0]\n i = 1\n ind = segment\n while(segment!=0):\n if y_vec[ind]!=-1:\n nucleotides.append(y_vec[ind])\n segment = segments[i]\n ind += segment\n i+=1\n return nucleotides\n\n\nif __name__=='__main__':\n if len(sys.argv)>1:\n path = sys.argv[1]\n h5file = tables.open_file(os.path.join(path,'train_cache.h5'), driver=\"H5FD_CORE\")\n root = h5file.root\n #a = root._f_get_child(\"label_raw\")._v_nchildren\n Y_ctc = root._f_get_child('Y_ctc')\n Y_seg = root._f_get_child('Y_seg')\n X_data = root._f_get_child('X_data')\n Y_vec = root._f_get_child('Y_vec')\n seq_len = root._f_get_child('seq_len')\n avail_data = {}\n print(\"saving cached data into pickle\")\n for key in range(len(X_data)):\n segs = np.array(Y_seg[str(key)])\n #print(key)\n avail_data[key] = {}\n avail_data[key][\"x_data\"] = X_data[int(key)]\n avail_data[key][\"y_vec\"] = Y_vec[int(key)]\n avail_data[key][\"segments\"] = segs\n avail_data[key][\"nucleotides\"] = segmentstonucleotides(segs,Y_vec[int(key)])\n h5file.close()\n avail_file = open(outname,\"wb\")\n pickle.dump(avail_data,avail_file)\n avail_file.close()\n","sub_path":"save_data_pickle.py","file_name":"save_data_pickle.py","file_ext":"py","file_size_in_byte":1433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"339091995","text":"\nfrom keras.datasets import mnist\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense, Dropout, Activation\nfrom keras.optimizers import SGD\n\ndef input():\n (X_train, y_train), (X_test, y_test) = mnist.load_data()\n return (X_train, y_train), (X_test, y_test)\n\ndef init_nn():\n model = Sequential()\n model.add(Dense(20, 64, init='uniform'))\n model.add(Activation('tanh'))\n model.add(Dropout(0.5))\n model.add(Dense(64, 64, init='uniform'))\n model.add(Activation('tanh'))\n model.add(Dropout(0.5))\n model.add(Dense(64, 2, init='uniform'))\n model.add(Activation('softmax'))\n\n sgd = SGD(lr=0.1, decay=1e-6, momentum=0.9, nesterov=True)\n model.compile(loss='mean_squared_error', optimizer=sgd)\n\ndef train_nn(*train):\n model.fit(X_train, y_train, nb_epoch=20, batch_size=16)\n\ndef test_nn(*test):\n score = model.evaluate(X_test, y_test, batch_size=16)\n return score\n\nif __name__ == \"__main__\":\n (X_train, y_train), (X_test, y_test) = input()\n\n\n\n","sub_path":"scripts/keras_mnist.py","file_name":"keras_mnist.py","file_ext":"py","file_size_in_byte":1004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"312270612","text":"from flask import Flask, jsonify, request\nfrom flask_cors import CORS\nimport requests\n\napp = Flask(__name__)\nCORS(app)\n\nBACKEND_URL = 'https://tuxts-backend.herokuapp.com/'\n\n@app.route('/texts', methods=['GET'])\ndef get_texts():\n date = request.args.get('date', '')\n r = requests.get(BACKEND_URL + 'texts.json', params={'date': date})\n texts = [replace_url(text) for text in r.json()]\n return jsonify(texts)\n\n@app.route('/texts', methods=['POST'])\ndef post_text():\n r = requests.post(BACKEND_URL + 'texts.json', json=request.json)\n text = replace_url(r.json())\n return jsonify(text)\n\n@app.route('/texts/', methods=['GET'])\ndef get_text(id):\n r = requests.get(BACKEND_URL + 'texts/{}.json'.format(id))\n text = replace_url(r.json())\n return jsonify(text)\n\n@app.route('/texts/', methods=['PUT'])\ndef put_text(id):\n r = requests.put(BACKEND_URL + 'texts/{}.json'.format(id), json=request.json)\n text = replace_url(r.json())\n return jsonify(text)\n\n@app.route('/texts/', methods=['DELETE'])\ndef delete_text(id):\n r = requests.delete(BACKEND_URL + 'texts/{}.json'.format(id))\n return '', 204\n\ndef replace_url(text):\n text['url'] = request.host_url + 'texts/{}'.format(text['id'])\n return text\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"297576027","text":"from typing import Dict, Optional\nimport logging\n\nimport boto3\nfrom botocore.exceptions import ClientError\n\n\nclass SQSProducer:\n def __init__(self, endpoint_url: str, queue_name: str, message_group_id: str, logger: logging.Logger) -> None:\n self.sqs = boto3.client(\"sqs\", endpoint_url=endpoint_url)\n self.queue_url: str = self.sqs.get_queue_url(QueueName=queue_name)[\"QueueUrl\"]\n self.message_group_id = message_group_id\n self.logger = logger\n\n def send_message(self, message: str) -> Optional[str]:\n try:\n self.logger.debug(f\"attempting to produce message to SQS: {message}\")\n\n response: Dict = self.sqs.send_message(\n QueueUrl=self.queue_url, MessageBody=message, MessageGroupId=self.message_group_id\n )\n\n if \"MessageId\" in response:\n return response[\"MessageId\"]\n else:\n return None\n except ClientError as err:\n self.logger.error(\"error producing message to SQS\", exc_info=True)\n return None\n","sub_path":"libs/kermes-infra/kermes_infra/queues/sqs_producer.py","file_name":"sqs_producer.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"419846655","text":"from forms import Registration, log_in_form\nfrom flask import Flask, render_template, url_for, flash, redirect\nimport pyrebase\nfrom getpass import getpass\n\n\nfirebaseConfig = {\n \"apiKey\" : \"AIzaSyA_FK4WL3wS1Cy7WOiDA71IP-m2SYwXJv8\",\n \"authDomain\" : \"big-brother-b8f63.firebaseapp.com\",\n \"databaseURL\" : \"https://big-brother-b8f63.firebaseio.com\",\n \"projectId\" : \"big-brother-b8f63\",\n \"storageBucket\" : \"big-brother-b8f63.appspot.com\",\n \"messagingSenderId\" : \"445901799408\",\n \"appId\" : \"1:445901799408:web:696967fb3a0871e9f16735\",\n \"measurementId\" : \"G-22J582W7SC\"\n }\n\nfirebase = pyrebase.initialize_app(firebaseConfig)\nauth = firebase.auth()\ndb = firebase.database()\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = 'bT4YXj2gdI7jAzEJQVuuYO4KsgEU14H5'\n\n# message=\"/Classroom%201\"\n# def stream_handler(message):\n# print(message)\n# my_stream = db.child(\"/Classroom%201\").stream(stream_handler)\n\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n return render_template('home.html',)\n\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html')\n\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n form = Registration()\n if form.validate_on_submit():\n flash(f'Account Created for {form.username.data}!', 'success')\n return redirect(url_for('home'))\n return render_template('user_reg.html', title='Register', form=form)\n\n\n@app.route(\"/log_in\")\ndef log_in():\n form = log_in_form()\n return render_template('user_login.html', title='Login', form=form)\n\n\n@app.route(\"/roster\", methods=['GET', 'POST'])\ndef roster():\n # print(message[\"event\"])\n # print(message[\"/Classroom%201\"])\n # print(message[\"data\"])\n # my_stream = db.child(\"posts\").stream(stream_handler)\n # return render_template('roster.html', events=my_stream)\n # db_temp = db.child(\"/Classroom%201/1st%20Period\").get().val()\n db_temp = db.child(\"/Classroom%201/1st%20Period\").get().val().items()\n return render_template('roster.html', events=db_temp)\n\nif __name__ == '__main__':\n app.run(debug=True)","sub_path":"website_database/apptest.py","file_name":"apptest.py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"556321247","text":"import json\nfrom dataclasses import dataclass\nfrom typing import List\n\nfrom input_data.products import ProductType\n\n\n@dataclass(frozen=True)\nclass ProductScenarioOutcome:\n product_type: ProductType\n actual_volume: int\n vendor_id: str\n delivery_number: int\n\n\n@dataclass(frozen=True)\nclass Scenario:\n product_outcomes: List[ProductScenarioOutcome]\n\n\ndef load_scenarios(number_of_scenarios: int):\n scenarios = [\n _load_scenario(scenario_index)\n for scenario_index in range(number_of_scenarios)\n ]\n return scenarios\n\n\ndef _load_scenario(scenario_index: int) -> Scenario:\n\n path = \"scenarios/scenario\" + str(scenario_index) + \".json\"\n\n with open(path) as file:\n scenario_dicts = json.loads(file.read())\n\n scenario = Scenario(\n [\n ProductScenarioOutcome(\n product_type=get_product_type(product_scenario_outcome[\"product_type\"]),\n actual_volume=product_scenario_outcome[\"actual_delivery_volume\"],\n vendor_id=product_scenario_outcome[\"vendor_id\"],\n delivery_number=product_scenario_outcome[\"delivery_number\"]\n )\n for product_scenario_outcome in scenario_dicts[\"results\"]\n ]\n )\n return scenario\n\n\ndef get_product_type(product_type: str):\n if product_type == \"SALMON_1_2\":\n return ProductType.SALMON_1_2\n if product_type == \"SALMON_2_3\":\n return ProductType.SALMON_2_3\n if product_type == \"SALMON_3_4\":\n return ProductType.SALMON_3_4\n if product_type == \"SALMON_4_5\":\n return ProductType.SALMON_4_5\n if product_type == \"SALMON_5_6\":\n return ProductType.SALMON_5_6\n if product_type == \"SALMON_6_7\":\n return ProductType.SALMON_6_7\n if product_type == \"SALMON_7_8\":\n return ProductType.SALMON_7_8\n if product_type == \"SALMON_8_9\":\n return ProductType.SALMON_8_9\n if product_type == \"SALMON_9\":\n return ProductType.SALMON_9\n else:\n raise Exception(\"Unknown product type\")\n","sub_path":"scenarios/load_scenarios.py","file_name":"load_scenarios.py","file_ext":"py","file_size_in_byte":2025,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"558076713","text":"\"\"\"Tests for the system info helper.\"\"\"\nimport json\nfrom unittest.mock import patch\n\nfrom homeassistant.const import __version__ as current_version\nfrom homeassistant.core import HomeAssistant\nfrom homeassistant.helpers.system_info import async_get_system_info\n\n\nasync def test_get_system_info(hass: HomeAssistant) -> None:\n \"\"\"Test the get system info.\"\"\"\n info = await async_get_system_info(hass)\n assert isinstance(info, dict)\n assert info[\"version\"] == current_version\n assert info[\"user\"] is not None\n assert json.dumps(info) is not None\n\n\nasync def test_container_installationtype(hass: HomeAssistant) -> None:\n \"\"\"Test container installation type.\"\"\"\n with patch(\"platform.system\", return_value=\"Linux\"), patch(\n \"os.path.isfile\", return_value=True\n ), patch(\"homeassistant.helpers.system_info.getuser\", return_value=\"root\"):\n info = await async_get_system_info(hass)\n assert info[\"installation_type\"] == \"Home Assistant Container\"\n\n with patch(\"platform.system\", return_value=\"Linux\"), patch(\n \"os.path.isfile\", side_effect=lambda file: file == \"/.dockerenv\"\n ), patch(\"homeassistant.helpers.system_info.getuser\", return_value=\"user\"):\n info = await async_get_system_info(hass)\n assert info[\"installation_type\"] == \"Unsupported Third Party Container\"\n\n\nasync def test_getuser_keyerror(hass: HomeAssistant) -> None:\n \"\"\"Test getuser keyerror.\"\"\"\n with patch(\"homeassistant.helpers.system_info.getuser\", side_effect=KeyError):\n info = await async_get_system_info(hass)\n assert info[\"user\"] is None\n","sub_path":"tests/helpers/test_system_info.py","file_name":"test_system_info.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"297182544","text":"# Do relevant imports\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nimport numpy as np\nimport cv2\nfrom moviepy.editor import VideoFileClip\n\n\n\ndef grayscale(img):\n \"\"\"Applies the Grayscale transform\n This will return an image with only one color channel\n but NOTE: to see the returned image as grayscale\n (assuming your grayscaled image is called 'gray')\n you should call plt.imshow(gray, cmap='gray')\"\"\"\n return cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)\n # Or use BGR2GRAY if you read an image with cv2.imread()\n # return cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\ndef grayToColor(img):\n return cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)\n\ndef canny(img, low_threshold, high_threshold):\n \"\"\"Applies the Canny transform\"\"\"\n return cv2.Canny(img, low_threshold, high_threshold)\n\n\ndef gaussian_blur(img, kernel_size):\n \"\"\"Applies a Gaussian Noise kernel\"\"\"\n return cv2.GaussianBlur(img, (kernel_size, kernel_size), 0)\n\n\ndef region_of_interest(img, vertices):\n \"\"\"\n Applies an image mask.\n\n Only keeps the region of the image defined by the polygon\n formed from `vertices`. The rest of the image is set to black.\n \"\"\"\n # defining a blank mask to start with\n mask = np.zeros_like(img)\n\n # defining a 3 channel or 1 channel color to fill the mask with depending on the input image\n if len(img.shape) > 2:\n channel_count = img.shape[2] # i.e. 3 or 4 depending on your image\n ignore_mask_color = (255,) * channel_count\n else:\n ignore_mask_color = 255\n\n # filling pixels inside the polygon defined by \"vertices\" with the fill color\n cv2.fillPoly(mask, vertices, ignore_mask_color)\n\n # returning the image only where mask pixels are nonzero\n masked_image = cv2.bitwise_and(img, mask)\n return masked_image\n\n\ndef draw_lines(img, lines, color=[255, 0, 0], thickness=2):\n \"\"\"\n NOTE: this is the function you might want to use as a starting point once you want to\n average/extrapolate the line segments you detect to map out the full\n extent of the lane (going from the result shown in raw-lines-example.mp4\n to that shown in P1_example.mp4).\n\n Think about things like separating line segments by their\n slope ((y2-y1)/(x2-x1)) to decide which segments are part of the left\n line vs. the right line. Then, you can average the position of each of\n the lines and extrapolate to the top and bottom of the lane.\n\n This function draws `lines` with `color` and `thickness`.\n Lines are drawn on the image inplace (mutates the image).\n If you want to make the lines semi-transparent, think about combining\n this function with the weighted_img() function below\n \"\"\"\n for line in lines:\n for x1, y1, x2, y2 in line:\n cv2.line(img, (x1, y1), (x2, y2), color, thickness)\n\n\ndef hough_lines(img, rho, theta, threshold, min_line_len, max_line_gap):\n \"\"\"\n `img` should be the output of a Canny transform.\n\n Returns an image with hough lines drawn.\n \"\"\"\n lines = cv2.HoughLinesP(img, rho, theta, threshold, np.array([]), minLineLength=min_line_len,\n maxLineGap=max_line_gap)\n line_img = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n draw_lines(line_img, lines)\n return line_img\n\n\n# Python 3 has support for cool math symbols.\n\ndef weighted_img(img, initial_img, α=0.8, β=1., λ=0.):\n \"\"\"\n `img` is the output of the hough_lines(), An image with lines drawn on it.\n Should be a blank image (all black) with lines drawn on it.\n\n `initial_img` should be the image before any processing.\n\n The result image is computed as follows:\n\n initial_img * α + img * β + λ\n NOTE: initial_img and img must be the same shape!\n \"\"\"\n return cv2.addWeighted(initial_img, α, img, β, λ)\n\n\n\n# Define the Hough transform parameters\nrho = 2\ntheta = np.pi/90\nthreshold = 15\nmin_line_length = 200\nmax_line_gap = 150\n\n# Define the canny edge detection parameters\nblur_kernel = 13\ncanny_low_threshold = 50\ncanny_high_threshold = 150\n\n# Define the detection horizon\ny_cut = .62\n\n# The coordinates of the lines\nglobal prev_bottom_left_x\nglobal prev_bottom_right_x\nglobal prev_top_left_x\nglobal prev_top_right_x\n\nprev_bottom_left_x = 0\nprev_bottom_right_x = 0\nprev_top_left_x = 0\nprev_top_right_x = 0\n\n# This function processes each frame from the video individually\ndef process_image(image):\n global prev_bottom_left_x\n global prev_bottom_right_x\n global prev_top_left_x\n global prev_top_right_x\n\n imshape = image.shape\n\n vertices = np.array([[ (0, imshape[0]),\n (imshape[1] * .45, imshape[0] * y_cut),\n (imshape[1] * .55, imshape[0] * y_cut),\n (imshape[1], imshape[0]) ]], dtype=np.int32)\n\n ## Conversion to grayscale and blurring\n gray = gaussian_blur(grayscale(image),blur_kernel)\n\n ## Increase contrast\n highlights = (gray[:,:] > 180)\n gray[highlights] = 255\n\n\n edges = canny(gray,canny_low_threshold,canny_high_threshold)\n masked = region_of_interest(edges, vertices)\n\n lines = cv2.HoughLinesP(masked, rho, theta, threshold, np.array([]),\n min_line_length, max_line_gap)\n\n line_image = np.copy(image)*0\n # Iterate over the output \"lines\" and draw lines on the blank\n\n left_x = []\n left_y = []\n right_x = []\n right_y = []\n if lines is not None:\n for line in lines:\n for x1, y1, x2, y2 in line:\n slope = np.abs((y2 - y1) / (x2 - x1))\n if(slope > .5):\n if (x1 < x2 and y1 > y2) or (x2 < x1 and y2 > y1) :\n # left line\n left_x.append(x1)\n left_x.append(x2)\n left_y.append(y1)\n left_y.append(y2)\n else:\n # right line\n right_x.append(x1)\n right_x.append(x2)\n right_y.append(y1)\n right_y.append(y2)\n\n top_left_y = int(imshape[0] * y_cut)\n if(len(left_x) and len(left_y)):\n left_slope, left_intersect = np.polyfit(left_x, left_y, 1)\n bottom_left_x = int((imshape[0] - left_intersect) / left_slope)\n top_left_x = int((top_left_y - left_intersect) / left_slope)\n prev_bottom_left_x = bottom_left_x\n prev_top_left_x = top_left_x\n else:\n bottom_left_x = prev_bottom_left_x\n top_left_x = prev_top_left_x\n\n\n top_right_y = int(imshape[0] * y_cut)\n if (len(right_x) and len(right_y)):\n right_slope, right_intersect = np.poly1d(np.polyfit(right_x, right_y, 1))\n bottom_right_x = int((imshape[0] - right_intersect) / right_slope)\n top_right_x = int((top_right_y - right_intersect) / right_slope)\n prev_bottom_right_x = bottom_right_x\n prev_top_right_x = top_right_x\n else:\n bottom_right_x = prev_bottom_right_x\n top_right_x = prev_top_right_x\n\n\n draw_lines(line_image, [[[bottom_left_x, imshape[0], top_left_x, top_left_y],\n [bottom_right_x, imshape[0], top_right_x, top_right_y]]], thickness=10)\n\n\n\n # Create a \"color\" binary image to combine with line image\n ##color_edges = np.dstack((masked, masked, masked))\n\n\n # Draw the lines on the edge image\n full_image = cv2.addWeighted(image, 0.7 , line_image, 1, 0)\n return full_image\n\n\nwhite_output = 'solidYellowLeft-output.mp4'\nclip1 = VideoFileClip(\"solidYellowLeft.mp4\")\nwhite_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!!\nwhite_clip.write_videofile(white_output, audio=False,fps=25,codec='mpeg4')\n\n","sub_path":"project 1.py","file_name":"project 1.py","file_ext":"py","file_size_in_byte":7737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"18"} +{"seq_id":"580753033","text":"from flask import Flask, Blueprint, render_template, redirect, request, url_for, session, flash\nimport forms\nfrom flask import Response # for api: fasta , csv and so on\nfrom flask import jsonify\nfrom flask_wtf import CsrfProtect\nimport os\nimport queries\n\n\n\ninhibitor_blueprint = Blueprint(\n 'inhibitor', __name__,\n template_folder = 'templates/inhibitor',\n url_prefix = '/inhibitor'\n)\n\n@inhibitor_blueprint.record\ndef record_params(setup_state):\n app = setup_state.app\n inhibitor_blueprint.config = dict([(key,value) for (key,value) in app.config.items()])\n\n\n\n#### inhibitors\n\n\n@inhibitor_blueprint.route('/search')\ndef inhibitor_search_result():\n # get arguments from url\n requested_name = request.values['search']\n requested_type = request.values['type']\n\n # modify the results according if the string was found as the name of the inhibitor\n # or as a synonym\n if (requested_type == 'inn'):\n search_results = queries.select_gral(inhibitor_blueprint.config['DATABASE'], 'inn_name, phase, mw','inhibitors_gral_info',\\\n 'inn_name LIKE \"%{0}%\"'.format(requested_name))\n elif (requested_type == 'syn'):\n search_results = queries.select_gral(inhibitor_blueprint.config['DATABASE'], 'inn_name, phase, mw','inhibitors_synonims', \\\n 'synonyms LIKE \"%{0}%\"'.format(requested_name))\n\n context = {'search_results': search_results}\n return render_template('inhibitor_search_results.html', context = context)\n\n@inhibitor_blueprint.route('/')\ndef inhibitor_data(inhib_name):\n\n # get required information from the database\n gral_info = queries.select_gral(inhibitor_blueprint.config['DATABASE'], '*','inhibitors_gral_info',\\\n ' inn_name LIKE \"{}\"'.format(inhib_name)).loc[0,:]\n targets = queries.select_gral(inhibitor_blueprint.config['DATABASE'], 'inh.targets, basic.uniprot_id',\\\n 'inhibitors_targets inh LEFT JOIN basic_info basic ON basic.gene = inh.targets',\\\n ' inn_name LIKE \"{}\"'.format(inhib_name))\n synonyms = list(queries.select_gral(inhibitor_blueprint.config['DATABASE'], 'synonyms','inhibitors_synonims',\\\n ' inn_name LIKE \"{}\"'.format(inhib_name)).loc[:, 'synonyms'])\n pdbid = list(queries.select_gral(inhibitor_blueprint.config['DATABASE'], 'pdbid','inhibitors_pdbid',\\\n ' inn_name LIKE \"{}\"'.format(inhib_name)).loc[:, 'pdbid'])\n families = list(queries.select_gral(inhibitor_blueprint.config['DATABASE'], 'kinase_families','inhibitors_kin_family',\\\n ' inn_name LIKE \"{}\"'.format(inhib_name)).loc[:,'kinase_families'])\n\n # pass the values to a dictionary, to be used inside the template\n context = {'gral_info':gral_info, 'targets': targets, 'synonyms':synonyms, 'pdbid':pdbid, 'families':families}\n\n return render_template('inhibitor_data.html', context=context)\n\n\n\n@inhibitor_blueprint.route('/basic_info/')\ndef inhibitor_data_information_iframe(inhib_name):\n ''' This is used to generate the