diff --git "a/2733.jsonl" "b/2733.jsonl" new file mode 100644--- /dev/null +++ "b/2733.jsonl" @@ -0,0 +1,746 @@ +{"seq_id":"44941479","text":"import sys\nfrom tech import drc, spice\nimport debug\nfrom math import log,sqrt,ceil\nimport datetime\nimport getpass\nimport numpy as np\nfrom vector import vector\nfrom globals import OPTS, print_time\n\nfrom sram_base import sram_base\nfrom bank import bank\nfrom dff_buf_array import dff_buf_array\nfrom dff_array import dff_array\n\n\nclass sram_1bank(sram_base):\n \"\"\"\n Procedures specific to a one bank SRAM.\n \"\"\"\n def __init__(self, name, sram_config):\n sram_base.__init__(self, name, sram_config)\n \n\n def create_netlist(self):\n sram_base.create_netlist(self)\n self.create_modules()\n\n def create_modules(self):\n \"\"\" \n This adds the modules for a single bank SRAM with control\n logic. \n \"\"\"\n \n self.bank_inst=self.create_bank(0)\n\n self.control_logic_inst = [None] * self.total_ports\n for port in range(self.total_ports):\n self.control_logic_inst[port] = self.create_control_logic(port)\n\n self.row_addr_dff_inst = self.create_row_addr_dff()\n\n if self.col_addr_dff:\n self.col_addr_dff_inst = self.create_col_addr_dff()\n \n self.data_dff_inst = self.create_data_dff()\n \n def place_modules(self):\n \"\"\" \n This places the modules for a single bank SRAM with control\n logic. \n \"\"\"\n \n # No orientation or offset\n self.place_bank(self.bank_inst, [0, 0], 1, 1)\n\n # The control logic is placed such that the vertical center (between the delay/RBL and\n # the actual control logic is aligned with the vertical center of the bank (between\n # the sense amps/column mux and cell array)\n # The x-coordinate is placed to allow a single clock wire (plus an extra pitch)\n # up to the row address DFFs.\n control_pos = vector(-self.control_logic.width - 2*self.m2_pitch,\n self.bank.bank_center.y - self.control_logic.control_logic_center.y)\n self.control_logic_inst[0].place(control_pos)\n\n # The row address bits are placed above the control logic aligned on the right.\n row_addr_pos = vector(self.control_logic_inst[0].rx() - self.row_addr_dff.width,\n self.control_logic_inst[0].uy())\n self.row_addr_dff_inst.place(row_addr_pos)\n\n # This is M2 pitch even though it is on M1 to help stem via spacings on the trunk\n data_gap = -self.m2_pitch*(self.word_size+1)\n \n # Add the column address below the bank under the control\n # The column address flops are aligned with the data flops\n if self.col_addr_dff:\n col_addr_pos = vector(self.bank.bank_center.x - self.col_addr_dff.width - self.bank.central_bus_width,\n data_gap - self.col_addr_dff.height)\n self.col_addr_dff_inst.place(col_addr_pos)\n \n # Add the data flops below the bank to the right of the center of bank:\n # This relies on the center point of the bank:\n # decoder in upper left, bank in upper right, sensing in lower right.\n # These flops go below the sensing and leave a gap to channel route to the\n # sense amps.\n data_pos = vector(self.bank.bank_center.x,\n data_gap - self.data_dff.height)\n self.data_dff_inst.place(data_pos)\n \n # two supply rails are already included in the bank, so just 2 here.\n # self.width = self.bank.width + self.control_logic.width + 2*self.supply_rail_pitch\n # self.height = self.bank.height \n \n def add_layout_pins(self):\n \"\"\"\n Add the top-level pins for a single bank SRAM with control.\n \"\"\"\n # Connect the control pins as inputs\n for n in self.control_logic_inputs + [\"clk\"]:\n self.copy_layout_pin(self.control_logic_inst[0], n)\n\n for i in range(self.word_size):\n dout_name = \"dout0[{}]\".format(i)\n self.copy_layout_pin(self.bank_inst, dout_name, \"DOUT0[{}]\".format(i))\n\n # Lower address bits\n for i in range(self.col_addr_size):\n self.copy_layout_pin(self.col_addr_dff_inst, \"din[{}]\".format(i),\"ADDR0[{}]\".format(i))\n # Upper address bits\n for i in range(self.row_addr_size):\n self.copy_layout_pin(self.row_addr_dff_inst, \"din[{}]\".format(i),\"ADDR0[{}]\".format(i+self.col_addr_size))\n\n for i in range(self.word_size):\n din_name = \"din[{}]\".format(i)\n self.copy_layout_pin(self.data_dff_inst, din_name, \"DIN0[{}]\".format(i))\n \n def route(self):\n \"\"\" Route a single bank SRAM \"\"\"\n\n self.add_layout_pins()\n\n self.route_vdd_gnd()\n\n self.route_clk()\n \n self.route_control_logic()\n \n self.route_row_addr_dff()\n\n if self.col_addr_dff:\n self.route_col_addr_dff()\n \n self.route_data_dff()\n\n def route_clk(self):\n \"\"\" Route the clock network \"\"\"\n\n # This is the actual input to the SRAM\n self.copy_layout_pin(self.control_logic_inst[0], \"clk\")\n\n # Connect all of these clock pins to the clock in the central bus\n # This is something like a \"spine\" clock distribution. The two spines\n # are clk_buf and clk_buf_bar\n \n bank_clk_buf_pin = self.bank_inst.get_pin(\"clk_buf\")\n bank_clk_buf_pos = bank_clk_buf_pin.center()\n bank_clk_buf_bar_pin = self.bank_inst.get_pin(\"clk_buf_bar\")\n bank_clk_buf_bar_pos = bank_clk_buf_bar_pin.center()\n\n if self.col_addr_dff:\n dff_clk_pin = self.col_addr_dff_inst.get_pin(\"clk\")\n dff_clk_pos = dff_clk_pin.center()\n mid_pos = vector(bank_clk_buf_pos.x, dff_clk_pos.y)\n self.add_wire((\"metal3\",\"via2\",\"metal2\"),[dff_clk_pos, mid_pos, bank_clk_buf_pos])\n \n data_dff_clk_pin = self.data_dff_inst.get_pin(\"clk\")\n data_dff_clk_pos = data_dff_clk_pin.center()\n mid_pos = vector(bank_clk_buf_pos.x, data_dff_clk_pos.y)\n self.add_wire((\"metal3\",\"via2\",\"metal2\"),[data_dff_clk_pos, mid_pos, bank_clk_buf_pos])\n\n # This uses a metal2 track to the right of the control/row addr DFF\n # to route vertically.\n control_clk_buf_pin = self.control_logic_inst[0].get_pin(\"clk_buf\")\n control_clk_buf_pos = control_clk_buf_pin.rc()\n row_addr_clk_pin = self.row_addr_dff_inst.get_pin(\"clk\")\n row_addr_clk_pos = row_addr_clk_pin.rc()\n mid1_pos = vector(self.row_addr_dff_inst.rx() + self.m2_pitch,\n row_addr_clk_pos.y)\n mid2_pos = vector(mid1_pos.x,\n control_clk_buf_pos.y)\n # Note, the via to the control logic is taken care of when we route\n # the control logic to the bank\n self.add_wire((\"metal3\",\"via2\",\"metal2\"),[row_addr_clk_pos, mid1_pos, mid2_pos, control_clk_buf_pos])\n \n def route_vdd_gnd(self):\n \"\"\" Propagate all vdd/gnd pins up to this level for all modules \"\"\"\n\n # These are the instances that every bank has\n top_instances = [self.bank_inst,\n self.row_addr_dff_inst,\n self.data_dff_inst,\n self.control_logic_inst[0]]\n if self.col_addr_dff:\n top_instances.append(self.col_addr_dff_inst)\n\n \n for inst in top_instances:\n self.copy_layout_pin(inst, \"vdd\")\n self.copy_layout_pin(inst, \"gnd\")\n \n def new_route_vdd_gnd(self):\n \"\"\" Propagate all vdd/gnd pins up to this level for all modules \"\"\"\n\n # These are the instances that every bank has\n top_instances = [self.bank_inst,\n self.row_addr_dff_inst,\n self.data_dff_inst,\n self.control_logic_inst[0]]\n if self.col_addr_dff:\n top_instances.append(self.col_addr_dff_inst)\n\n \n # for inst in top_instances:\n # self.copy_layout_pin(inst, \"vdd\")\n # self.copy_layout_pin(inst, \"gnd\")\n\n blockages=self.get_blockages(\"metal3\", top_level=True)\n\n # Gather all of the vdd/gnd pins\n vdd_pins=[]\n gnd_pins=[]\n for inst in top_instances:\n vdd_pins.extend([x for x in inst.get_pins(\"vdd\") if x.layer == \"metal3\"])\n gnd_pins.extend([x for x in inst.get_pins(\"gnd\") if x.layer == \"metal3\"])\n\n # Create candidate stripes on M3/M4\n lowest=self.find_lowest_coords()\n highest=self.find_highest_coords()\n m3_y_coords = np.arange(lowest[1],highest[1],self.m2_pitch)\n\n # These are the rails that will be available for vdd/gnd\n m3_rects = []\n # These are the \"inflated\" shapes for DRC checks\n m3_drc_rects = []\n for y in m3_y_coords:\n # This is just what metal will be drawn\n ll = vector(lowest[0], y - 0.5*self.m3_width)\n ur = vector(highest[0], y + 0.5*self.m3_width)\n m3_rects.append([ll, ur])\n # This is a full m3 pitch for DRC conflict checking\n ll = vector(lowest[0], y - 0.5*self.m3_pitch )\n ur = vector(highest[0], y + 0.5*self.m3_pitch)\n m3_drc_rects.append([ll, ur])\n\n vdd_rects = []\n gnd_rects = []\n \n # Now, figure how if the rails intersect a blockage, vdd, or gnd pin\n # Divide the rails up alternately\n # This should be done in less than n^2 using a kd-tree or something\n # for drc_rect,rect in zip(m3_drc_rects,m3_rects):\n # for b in blockages:\n # if rect_overlaps(b,drc_rect):\n # break\n # else:\n # gnd_rects.append(rect)\n\n \n\n # Create the vdd and gnd rails\n for rect in m3_rects:\n (ll,ur) = rect\n \n for rect in gnd_rects:\n (ll,ur) = rect\n self.add_layout_pin(text=\"gnd\",\n layer=\"metal3\",\n offset=ll,\n width=ur.x-ll.x,\n height=ur.y-ll.y)\n for rect in vdd_rects:\n (ll,ur) = rect\n self.add_layout_pin(text=\"vdd\",\n layer=\"metal3\",\n offset=ll,\n width=ur.x-ll.x,\n height=ur.y-ll.y)\n \n def route_control_logic(self):\n \"\"\" Route the outputs from the control logic module \"\"\"\n for n in self.control_logic_outputs:\n src_pin = self.control_logic_inst[0].get_pin(n)\n dest_pin = self.bank_inst.get_pin(n) \n self.connect_rail_from_left_m2m3(src_pin, dest_pin)\n self.add_via_center(layers=(\"metal1\",\"via1\",\"metal2\"),\n offset=src_pin.rc(),\n rotate=90)\n \n\n def route_row_addr_dff(self):\n \"\"\" Connect the output of the row flops to the bank pins \"\"\"\n for i in range(self.row_addr_size):\n flop_name = \"dout[{}]\".format(i)\n bank_name = \"addr0[{}]\".format(i+self.col_addr_size)\n flop_pin = self.row_addr_dff_inst.get_pin(flop_name)\n bank_pin = self.bank_inst.get_pin(bank_name)\n flop_pos = flop_pin.center()\n bank_pos = bank_pin.center()\n mid_pos = vector(bank_pos.x,flop_pos.y)\n self.add_wire((\"metal3\",\"via2\",\"metal2\"),[flop_pos, mid_pos,bank_pos])\n self.add_via_center(layers=(\"metal2\",\"via2\",\"metal3\"),\n offset=flop_pos,\n rotate=90)\n\n def route_col_addr_dff(self):\n \"\"\" Connect the output of the row flops to the bank pins \"\"\"\n\n bus_names = [\"addr[{}]\".format(x) for x in range(self.col_addr_size)] \n col_addr_bus_offsets = self.create_horizontal_bus(layer=\"metal1\",\n pitch=self.m1_pitch,\n offset=self.col_addr_dff_inst.ul() + vector(0, self.m1_pitch),\n names=bus_names,\n length=self.col_addr_dff_inst.width)\n\n dff_names = [\"dout[{}]\".format(x) for x in range(self.col_addr_size)]\n data_dff_map = zip(dff_names, bus_names)\n self.connect_horizontal_bus(data_dff_map, self.col_addr_dff_inst, col_addr_bus_offsets)\n \n bank_names = [\"addr0[{}]\".format(x) for x in range(self.col_addr_size)]\n data_bank_map = zip(bank_names, bus_names)\n self.connect_horizontal_bus(data_bank_map, self.bank_inst, col_addr_bus_offsets)\n \n\n def route_data_dff(self):\n \"\"\" Connect the output of the data flops to the write driver \"\"\"\n # This is where the channel will start (y-dimension at least)\n offset = self.data_dff_inst.ul() + vector(0, self.m1_pitch)\n\n dff_names = [\"dout[{}]\".format(x) for x in range(self.word_size)]\n bank_names = [\"din0[{}]\".format(x) for x in range(self.word_size)]\n\n route_map = list(zip(bank_names, dff_names))\n dff_pins = {key: self.data_dff_inst.get_pin(key) for key in dff_names }\n bank_pins = {key: self.bank_inst.get_pin(key) for key in bank_names }\n # Combine the dff and bank pins into a single dictionary of pin name to pin.\n all_pins = {**dff_pins, **bank_pins}\n self.create_horizontal_channel_route(route_map, all_pins, offset)\n \n \n\n def add_lvs_correspondence_points(self):\n \"\"\" \n This adds some points for easier debugging if LVS goes wrong. \n These should probably be turned off by default though, since extraction\n will show these as ports in the extracted netlist.\n \"\"\"\n \n for n in self.control_logic_outputs:\n pin = self.control_logic_inst[0].get_pin(n)\n self.add_label(text=n,\n layer=pin.layer,\n offset=pin.center())\n","sub_path":"compiler/sram_1bank.py","file_name":"sram_1bank.py","file_ext":"py","file_size_in_byte":14248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"464434539","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport sys\n\nfrom bs4 import BeautifulSoup\n\nfrom common.Scientist import Scientist\nfrom polon.POLonParser import POLonParser\n\nreload(sys)\nsys.setdefaultencoding(\"utf8\")\n\n\nclass POLonPhDParser(POLonParser):\n def __init__(self):\n POLonParser.__init__(self)\n\n def parse(self, html):\n\n soup = BeautifulSoup(html, 'html.parser')\n\n colcounter = 0\n row = []\n\n trs = soup.findAll(\"tr\", {\"class\": \"rf-dt-r\"})\n for tr in trs:\n tds = tr.findAll(\"td\", {\"class\": \"rf-dt-c\"})\n for td in tds:\n row.append(td.text.strip())\n colcounter += 1\n\n if colcounter == 6:\n colcounter = 0\n self.scientists.append(\n Scientist(' '.join(row[1].split()[1:]).title(), ' '.join(row[2].split()[1:]).title(), ' '.join(row[3].split()[2:4]),\n ' '.join(row[4].split()[42:]), ' '.join(row[5].split()[4:])))\n row[:] = []\n\n return self.scientists\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"POL-on/polon/POLonPhDParser.py","file_name":"POLonPhDParser.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"648490478","text":"from __future__ import annotations\nfrom os import path\nfrom itertools import islice\nfrom typing import Optional\nfrom collections import deque\nimport time\n\nDeck = list[int]\n\ndef parse_decks(input_file: str) -> list[Deck]:\n with open(input_file) as f:\n deck_strs = f.read().split('\\n\\n')\n return [parse_deck(s) for s in deck_strs]\n\ndef parse_deck(deck_str: str) -> Deck:\n lines = deck_str.splitlines()\n return [int(line) for line in lines[1:]]\n\ndef play_combat(decks: list[Deck]) -> tuple[int, Deck]:\n players = [deque(d) for d in decks]\n\n # while there are at least two players with cards\n while len([p for p in players if len(p) > 0]) > 1:\n plays = [(p.popleft(), p) for p in players if len(p) > 0]\n\n sorted_plays = sorted(plays, reverse = True)\n _, winning_p = sorted_plays[0]\n for num, _ in sorted_plays:\n winning_p.append(num)\n\n return next((idx, list(p)) for idx, p in enumerate(players) if len(p) > 0)\n\ndef play_recursive_combat(decks: list[Deck]) -> tuple[int, Deck]:\n players = [deque(d) for d in decks]\n previous_states = list[list[deque[int]]]()\n\n # while there are at least two players with cards\n while len([p for p in players if len(p) > 0]) > 1:\n # if this state already appeared, player 1 wins instantly\n curr_state = [p.copy() for p in players]\n if curr_state in previous_states:\n return 0, list(players[0])\n else:\n previous_states.append(curr_state)\n\n # otherwise, draw the cards from the top of each deck\n plays = [(p.popleft(), p) for p in players if len(p) > 0]\n\n # if all have enough cards left, enter a new recursive combat\n # otherwise the highest wins\n if all(len(p) >= num for num, p in plays):\n rec_winner_idx, _ = play_recursive_combat([list(islice(p, num)) for num, p in plays])\n winning_num, winning_p = plays[rec_winner_idx]\n else:\n winning_num, winning_p = max(plays)\n\n # add the winning first and the rest sorted (not in the game rules, but trying to generalize to 3+ players)\n winning_p.append(winning_num)\n for num, _ in sorted(plays, reverse = True):\n if num != winning_num:\n winning_p.append(num)\n\n return next((idx, list(p)) for idx, p in enumerate(players) if len(p) > 0)\n \n\ndef get_score(deck: Deck) -> int:\n return sum(\n (idx + 1) * num\n for idx, num\n in enumerate(deck[::-1])\n )\n \ndef solve(input_file: str, *, expected: tuple[Optional[int], Optional[int]] = (None, None), skip_part1: bool = False) -> None:\n print(f'[{input_file}]')\n full_path = path.join(path.dirname(__file__), input_file)\n\n # Common\n start_common = time.time_ns()\n decks = parse_decks(full_path)\n time_common = time.time_ns() - start_common\n\n # Part 1\n if not skip_part1:\n start_p1 = time.time_ns()\n _, winner_deck = play_combat(decks)\n print(winner_deck)\n obtained_p1 = get_score(winner_deck)\n time_p1 = (time.time_ns() - start_p1) + time_common\n\n print('Part 1 answer:', obtained_p1, '(took', time_p1, 'ns)')\n if expected[0] is not None and expected[0] != obtained_p1:\n print('Expected:', expected[0])\n\n # Part 2\n start_p2 = time.time_ns()\n _, winner_deck_p2 = play_recursive_combat(decks)\n print(winner_deck_p2)\n obtained_p2 = get_score(winner_deck_p2)\n time_p2 = (time.time_ns() - start_p2) + time_common\n\n print('Part 2 answer:', obtained_p2, '(took', time_p2, 'ns)')\n if expected[1] is not None and expected[1] != obtained_p2:\n print('Expected:', expected[1])\n\n print()\n\nif __name__ == '__main__':\n solve('example.txt', expected = (306, 291))\n solve('example2.txt', expected = (None, 105), skip_part1 = True) # part 1 doesn't handle loops\n solve('input.txt', expected = (30138, 31587))","sub_path":"day22/day22.py","file_name":"day22.py","file_ext":"py","file_size_in_byte":3927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"378247752","text":"#\n# LSST Data Management System\n# Copyright 2016-2017 AURA/LSST.\n#\n# This product includes software developed by the\n# LSST Project (http://www.lsst.org/).\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the LSST License Statement and\n# the GNU General Public License along with this program. If not,\n# see .\n#\n\n\"\"\"Simple unit test for supertask.SuperTask.\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n\nimport unittest\n\nimport lsst.utils.tests\nfrom lsst.daf.butler.core.datasets import DatasetRef\nfrom lsst.daf.butler.core.quantum import Quantum\nfrom lsst.daf.butler.core.run import Run\nimport lsst.pex.config as pexConfig\nimport lsst.pipe.base as pipeBase\nfrom lsst.pipe import supertask\nfrom lsst.pipe.supertask.examples.exampleStorageClass import ExampleStorageClass # noqa: F401\n\n\nclass ButlerMock():\n \"\"\"Mock version of butler, only usable for this test\n \"\"\"\n def __init__(self):\n self.datasets = {}\n\n @staticmethod\n def key(ref):\n \"\"\"Make a dict key out of ref.\n \"\"\"\n key = (ref.dataId[\"camera\"], ref.dataId[\"visit\"])\n return tuple(key)\n\n def get(self, ref, parameters=None):\n key = self.key(ref)\n# print(\"butler.get: name={} key={}\".format(ref.datasetType.name, key))\n dsdata = self.datasets.get(ref.datasetType.name)\n if dsdata:\n return dsdata.get(key)\n return None\n\n def put(self, ref, inMemoryDataset, producer=None):\n key = self.key(ref)\n# print(\"butler.put: {} -> name={} key={}\".format(inMemoryDataset, ref.datasetType.name, key))\n dsdata = self.datasets.setdefault(ref.datasetType.name, {})\n dsdata[key] = inMemoryDataset\n\n\nclass AddConfig(supertask.SuperTaskConfig):\n addend = pexConfig.Field(doc=\"amount to add\", dtype=int, default=3)\n input = pexConfig.ConfigField(dtype=supertask.InputDatasetConfig,\n doc=\"Input dataset type for this task\")\n output = pexConfig.ConfigField(dtype=supertask.OutputDatasetConfig,\n doc=\"Output dataset type for this task\")\n\n def setDefaults(self):\n # set units of a quantum, this task uses per-visit quanta and it\n # expects dataset units to be the same\n self.quantum.units = [\"Camera\", \"Visit\"]\n self.quantum.sql = None\n\n # default config for input dataset type\n self.input.name = \"add_input\"\n self.input.units = [\"Camera\", \"Visit\"]\n self.input.storageClass = \"example\"\n\n # default config for output dataset type\n self.output.name = \"add_output\"\n self.output.units = [\"Camera\", \"Visit\"]\n self.output.storageClass = \"example\"\n\n\nclass AddTask(supertask.SuperTask):\n ConfigClass = AddConfig\n _DefaultName = \"add_task\"\n\n def run(self, input, output):\n self.metadata.add(\"add\", self.config.addend)\n output = [val + self.config.addend for val in input]\n return pipeBase.Struct(output=output)\n\n\nclass SuperTaskTestCase(unittest.TestCase):\n \"\"\"A test case for supertask.SuperTask\n \"\"\"\n\n def _makeDSRefVisit(self, dstype, visitId):\n return DatasetRef(datasetType=dstype,\n dataId=dict(camera=\"X\",\n visit=visitId,\n physical_filter='a',\n abstract_filter='b'))\n\n def _makeQuanta(self, config):\n \"\"\"Create set of Quanta\n \"\"\"\n run = Run(collection=1, environment=None, pipeline=None)\n\n dstype0 = supertask.SuperTask.makeDatasetType(config.input)\n dstype1 = supertask.SuperTask.makeDatasetType(config.output)\n\n quanta = []\n for visit in range(100):\n quantum = Quantum(run=run, task=None)\n quantum.addPredictedInput(self._makeDSRefVisit(dstype0, visit))\n quantum.addOutput(self._makeDSRefVisit(dstype1, visit))\n quanta.append(quantum)\n\n return quanta\n\n def testRunQuantum(self):\n \"\"\"Test for AddTask.runQuantum() implementation.\n \"\"\"\n butler = ButlerMock()\n task = AddTask(config=AddConfig())\n\n # make all quanta\n quanta = self._makeQuanta(task.config)\n\n # add input data to butler\n dstype0 = supertask.SuperTask.makeDatasetType(task.config.input)\n for i, quantum in enumerate(quanta):\n ref = quantum.predictedInputs[dstype0.name][0]\n butler.put(ref, 100 + i)\n\n # run task on each quanta\n for quantum in quanta:\n task.runQuantum(quantum, butler)\n\n # look at the output produced by the task\n outputName = task.config.output.name\n dsdata = butler.datasets[outputName]\n self.assertEqual(len(dsdata), len(quanta))\n for i, quantum in enumerate(quanta):\n ref = quantum.outputs[outputName][0]\n self.assertEqual(dsdata[butler.key(ref)], 100 + i + 3)\n\n def testChain2(self):\n \"\"\"Test for two-task chain.\n \"\"\"\n butler = ButlerMock()\n task1 = AddTask(config=AddConfig())\n config2 = AddConfig()\n config2.addend = 200\n config2.input.name = task1.config.output.name\n config2.output.name = \"add_output_2\"\n task2 = AddTask(config=config2)\n\n # make all quanta\n quanta1 = self._makeQuanta(task1.config)\n quanta2 = self._makeQuanta(task2.config)\n\n # add input data to butler\n dstype0 = supertask.SuperTask.makeDatasetType(task1.config.input)\n for i, quantum in enumerate(quanta1):\n ref = quantum.predictedInputs[dstype0.name][0]\n butler.put(ref, 100 + i)\n\n # run task on each quanta\n for quantum in quanta1:\n task1.runQuantum(quantum, butler)\n for quantum in quanta2:\n task2.runQuantum(quantum, butler)\n\n # look at the output produced by the task\n outputName = task1.config.output.name\n dsdata = butler.datasets[outputName]\n self.assertEqual(len(dsdata), len(quanta1))\n for i, quantum in enumerate(quanta1):\n ref = quantum.outputs[outputName][0]\n self.assertEqual(dsdata[butler.key(ref)], 100 + i + 3)\n\n outputName = task2.config.output.name\n dsdata = butler.datasets[outputName]\n self.assertEqual(len(dsdata), len(quanta2))\n for i, quantum in enumerate(quanta2):\n ref = quantum.outputs[outputName][0]\n self.assertEqual(dsdata[butler.key(ref)], 100 + i + 3 + 200)\n\n\nclass MyMemoryTestCase(lsst.utils.tests.MemoryTestCase):\n pass\n\n\ndef setup_module(module):\n lsst.utils.tests.init()\n\n\nif __name__ == \"__main__\":\n lsst.utils.tests.init()\n unittest.main()\n","sub_path":"tests/test_superTask.py","file_name":"test_superTask.py","file_ext":"py","file_size_in_byte":7278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"208424642","text":"import os\nimport logging\nimport json\nimport datetime\n\nfrom .utilities import connect_to_mongo_gridfs, get_mongoDB\n\n\n\nNO_DASK = False # set this to True to run locally without dask (for debug purposes)\n\nlogging.basicConfig()\nlogging.getLogger().setLevel(logging.INFO)\nlogger = logging.getLogger(\"nta_app.ms2\")\nlogger.setLevel(logging.INFO)\n\nMONGO_SERVER = os.environ.get('MONGO_SERVER')\n\n#def store_data(path, input_data):\n # to_save = json.dumps(input_data)\n # gridfs = connect_to_mongo_gridfs(mongo_address)\n# gridfs.put(to_save, filename =\"TEST/PATH1\", _id=\"TEST/PATH1\", encoding='utf-8')\n\ndef delete_data(filename, jobid, ms):\n gridfs = connect_to_mongo_gridfs(MONGO_SERVER)\n mongoDB = get_mongoDB(MONGO_SERVER)\n files = mongoDB.get_collection(\"fs.files\")\n for ID in files.find({'filename': filename, 'jobid': jobid, 'ms': ms}).distinct('_id'):\n gridfs.delete(ID)\n \ndef get_filenames(jobid, ms):\n resp_dict = {'Neg':[], 'Pos':[]}\n gridfs = connect_to_mongo_gridfs(MONGO_SERVER)\n mongoDB = get_mongoDB(MONGO_SERVER)\n files = mongoDB.get_collection(\"fs.files\")\n for ID in files.find({'jobid': jobid, 'ms': ms, 'mode': 'neg'}).distinct('_id'):\n resp_dict['Neg'].append(gridfs.get(ID).filename)\n for ID in files.find({'jobid': jobid, 'ms': ms, 'mode': 'pos'}).distinct('_id'):\n resp_dict['Pos'].append(gridfs.get(ID).filename)\n return json.dumps(resp_dict)\n\ndef get_grid_db():\n gridfs = connect_to_mongo_gridfs(MONGO_SERVER)\n return gridfs\n\ndef handle_uploaded_file(file, filename, filetype, ms, mode, jobid):\n gridfs_df = get_grid_db()\n file_id = gridfs_df.put(file, filename = filename, filetype = filetype, encoding='utf-8', ms=ms, mode=mode, jobid=jobid)\n \n mongoDB = get_mongoDB(MONGO_SERVER)\n files = mongoDB.get_collection(\"fs.files\")\n chunks = mongoDB.get_collection(\"fs.chunks\")\n\n files.create_index([(\"uploadDate\", 1)], expireAfterSeconds=86400) #Expires in 24h\n \n chunks.update_many({\"files_id\": file_id}, \n {\"$set\": {\"uploadDate\": datetime.datetime.utcnow()}})\n chunks.create_index([(\"uploadDate\", 1)], expireAfterSeconds=86460) #Expires in 24h\n \n return file_id\n \n \ndef list(self):\n \"\"\"List the names of all files stored in this instance of\n :class:`GridFS`.\n .. versionchanged:: 3.1\n ``list`` no longer ensures indexes.\n \"\"\"\n # With an index, distinct includes documents with no filename\n # as None.\n return [\n name for name in self.__files.distinct(\"filename\")\n if name is not None]","sub_path":"app/data_handler/data_task.py","file_name":"data_task.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"344502773","text":"#%%\nfrom numpy.lib.function_base import average, percentile\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport requests\nfrom io import StringIO\nfrom sklearn.cluster import KMeans\n\n\n#%%\n'''\n-------------------------\nCARGA DE DATASET BLINKING\n-------------------------\n'''\nsignals = pd.read_csv( 'data/blinking.dat', delimiter=' ', \n names = ['timestamp','counter','eeg','attention','meditation','blinking']\n )\n\nprint('Information:')\nprint(signals.head())\n\ndata=signals.values\n\n#%%\n#Me quedo con la columna corrsepondiente a eeg\neeg = data[:,2]\n# %%\nprint(\"Largo 1 %2d\" % len(eeg))\n\n# %%\nplt.plot(eeg,'r', label='EEG')\nplt.xlabel('t');\nplt.ylabel('eeg(t)');\nplt.title(r'Original EEG Signal') # r'' representa un raw string que no tiene caracteres especiales\nplt.ylim([-2000, 2000]);\nplt.xlim([0,len(eeg)])\nplt.savefig('signal.png')\nplt.show()\n\n\n\n# %%\n# La operación de convolución permite implementar el suavizado del Moving Average\nwindowlength = 10\navgeeg = np.convolve(eeg, np.ones((windowlength,))/windowlength, mode='same')\n# %%\n# El kernel/máscara está compuesto de 10 valores de 1/10. Cuando esos valores se suman para cada posición, implica que se reemplaza el valor por el promedio\n# de los 5 valores anteriores y 4 posteriores. \n\nx = avgeeg\nkmeans = KMeans(n_clusters=3).fit(x.reshape(-1,1))\nkmeans.predict(x.reshape(-1,1))\n\n# %%\nkmeans.cluster_centers_\nkmeans.labels_\n# %%\nnp.bincount(kmeans.labels_)\n# %%\nyValues = x\nxValues = np.arange(0,len(x),1)\ncolorValues = kmeans.labels_\n\n# %%\n\n# values of x\nx = xValues\n \n# values of y\ny = yValues\n \n# empty list, will hold color value\n# corresponding to x\ncol =[]\n \nfor i in range(0, len(x)):\n if colorValues[i]==0:\n col.append('blue') \n elif colorValues[i]==1:\n col.append('green') \n else:\n col.append('magenta') \n \nfor i in range(len(x)):\n \n # plotting the corresponding x with y \n # and respective color\n plt.scatter(x[i], y[i], c = col[i], s = 10,\n linewidth = 0)\n \n \nplt.show()\n# %%\nfiltro_eeg=[]\ncontador=0\nfor i in range(len(avgeeg)):\n if colorValues[i]==0:\n filtro_eeg.append(0)\n elif colorValues[i]==1:\n filtro_eeg.append(1)\n if colorValues[i-1] != colorValues[i]:\n print(i)\n contador=contador+1\n elif colorValues[i]==2:\n filtro_eeg.append(-1)\n if colorValues[i-1] != colorValues[i]:\n print(i)\n contador=contador+1\n else:\n filtro_eeg.append(0)\nprint(\"Blinking counter: {}\".format(contador))\nfiltro_eeg=np.asarray(filtro_eeg)\nplt.figure(figsize=(16,5))\nplt.plot(filtro_eeg,color=\"blue\")\nplt.title(\"Blinking Filter\",size=20)\nplt.ylabel(\"Class\",size=10)\nplt.xlabel(\"Timepoint\",size=10)\nplt.show()\n\n# %%\n","sub_path":"clustering.py","file_name":"clustering.py","file_ext":"py","file_size_in_byte":2812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"496839855","text":"# -*- coding: utf-8 -*-\n# (C) Copyright 2014 Voyager Search\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\nimport sys\nimport json\nimport shutil\nimport requests\nimport arcpy\nfrom utils import status\nfrom utils import task_utils\nimport warnings\nfrom requests.packages.urllib3.exceptions import InsecureRequestWarning\nwarnings.simplefilter('ignore', InsecureRequestWarning)\n\n\n# Get SSL trust setting.\nverify_ssl = task_utils.get_ssl_mode()\n\nstatus_writer = status.Writer()\nresult_count = 0\nprocessed_count = 0.\nskipped_reasons = {}\nerrors_reasons = {}\narcpy.env.overwriteOutput = True\nmxd_path = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), 'supportfiles', 'GroupLayerTemplate.mxd')\n\n\nclass ObjectEncoder(json.JSONEncoder):\n \"\"\"Support non-native Python types for JSON serialization.\"\"\"\n def default(self, obj):\n if isinstance(obj, (list, dict, str, unicode, int, float, bool, type(None))):\n return json.JSONEncoder.default(self, obj)\n\n\ndef update_index(file_location, layer_file, item_id, name, location, server, hdrs):\n \"\"\"Update the index by re-indexng an item.\"\"\"\n import zmq\n indexer = sys.argv[3].split('=')[1]\n zmq_socket = zmq.Context.instance().socket(zmq.PUSH)\n zmq_socket.connect(indexer)\n res = requests.get(\"{0}/api/rest/index/record/{1}\".format(server, item_id), verify=verify_ssl, headers=hdrs)\n fields = res.json()\n fields[\"path_to_lyr\"] = layer_file\n fields[\"hasLayerFile\"] = True\n entry = {\"action\": \"UPDATE\", \"id\": item_id, \"location\": location, \"entry\": {\"fields\": fields}}\n zmq_socket.send_json(entry, cls=ObjectEncoder)\n\n\ndef execute(request):\n \"\"\"Copies files to a target folder.\n :param request: json as a dict.\n \"\"\"\n created = 0\n skipped = 0\n errors = 0\n global result_count\n parameters = request['params']\n headers = {'x-access-token': task_utils.get_security_token(request['owner'])}\n\n if not os.path.exists(request['folder']):\n os.makedirs(request['folder'])\n\n # meta_folder = task_utils.get_parameter_value(parameters, 'meta_data_folder', 'value')\n voyager_server = sys.argv[2].split('=')[1].split('solr')[0][:-1]\n url = \"{0}/api/rest/system/settings\".format(voyager_server)\n response = requests.get(url, verify=verify_ssl, headers=headers)\n meta_folder = response.json()['folders']['meta']\n result_count, response_index = task_utils.get_result_count(parameters)\n # Query the index for results in groups of 25.\n query_index = task_utils.QueryIndex(parameters[response_index])\n fl = query_index.fl\n query = '{0}{1}{2}'.format(sys.argv[2].split('=')[1], '/select?&wt=json', fl)\n fq = query_index.get_fq()\n if fq:\n groups = task_utils.grouper(range(0, result_count), task_utils.CHUNK_SIZE, '')\n query += fq\n elif 'ids' in parameters[response_index]:\n groups = task_utils.grouper(list(parameters[response_index]['ids']), task_utils.CHUNK_SIZE, '')\n else:\n groups = task_utils.grouper(range(0, result_count), task_utils.CHUNK_SIZE, '')\n\n status_writer.send_percent(0.0, _('Starting to process...'), 'create_layer_files')\n i = 0.\n\n for group in groups:\n i += len(group) - group.count('')\n if fq:\n results = requests.get(query + \"&rows={0}&start={1}\".format(task_utils.CHUNK_SIZE, group[0]), verify=verify_ssl, headers=headers)\n elif 'ids' in parameters[response_index]:\n results = requests.get(query + '{0}&ids={1}'.format(fl, ','.join(group)), verify=verify_ssl, headers=headers)\n else:\n results = requests.get(query + \"&rows={0}&start={1}\".format(task_utils.CHUNK_SIZE, group[0]), verify=verify_ssl, headers=headers)\n\n docs = results.json()['response']['docs']\n # docs = eval(results.read().replace('false', 'False').replace('true', 'True'))['response']['docs']\n if not docs:\n docs = parameters[response_index]['response']['docs']\n input_items = []\n for doc in docs:\n if 'path' in doc:\n input_items.append((doc['id'], doc['path'], doc['name'], doc['location']))\n result = create_layer_file(input_items, meta_folder, voyager_server, headers)\n created += result[0]\n errors += result[1]\n skipped += result[2]\n\n try:\n shutil.copy2(os.path.join(os.path.dirname(os.path.dirname(__file__)), 'supportfiles', '_thumb.png'), request['folder'])\n except IOError:\n pass\n # Update state if necessary.\n if errors > 0 or skipped > 0:\n status_writer.send_state(status.STAT_WARNING, _('{0} results could not be processed').format(skipped + errors))\n task_utils.report(os.path.join(request['folder'], '__report.json'), created, skipped, errors, errors_reasons, skipped_reasons)\n\n\ndef create_layer_file(input_items, meta_folder, voyager_server, hdrs, show_progress=False):\n \"\"\"Creates a layer for input items in the appropriate meta folders.\"\"\"\n created = 0\n skipped = 0\n errors = 0\n global processed_count\n\n for input_item in input_items:\n try:\n lyr = None\n id = input_item[0]\n path = input_item[1]\n name = input_item[2]\n location = input_item[3]\n layer_folder = os.path.join(meta_folder, id[0], id[1:4])\n lyr_mxd = arcpy.mapping.MapDocument(mxd_path)\n dsc = arcpy.Describe(path)\n\n # Create layer folder if it does not exist.\n if not os.path.exists(layer_folder):\n os.makedirs(layer_folder)\n\n if not os.path.exists(os.path.join(layer_folder, '{0}.layer.lyr'.format(id))):\n # os.makedirs(layer_folder)\n try:\n if dsc.dataType in ('FeatureClass', 'Shapefile', 'ShapeFile'):\n feature_layer = arcpy.MakeFeatureLayer_management(path, os.path.basename(path))\n lyr = arcpy.SaveToLayerFile_management(feature_layer, os.path.join(layer_folder, '{0}.layer.lyr'.format(id)))\n elif dsc.dataType == 'RasterDataset':\n raster_layer = arcpy.MakeRasterLayer_management(path, os.path.splitext(os.path.basename(path))[0])\n lyr = arcpy.SaveToLayerFile_management(raster_layer, os.path.join(layer_folder, '{0}.layer.lyr'.format(id)))\n elif dsc.dataType in ('CadDrawingDataset', 'FeatureDataset'):\n arcpy.env.workspace = path\n lyr_mxd = arcpy.mapping.MapDocument(mxd_path)\n data_frame = arcpy.mapping.ListDataFrames(lyr_mxd)[0]\n group_layer = arcpy.mapping.ListLayers(lyr_mxd, 'Group Layer', data_frame)[0]\n for fc in arcpy.ListFeatureClasses():\n dataset_name = os.path.splitext(os.path.basename(path))[0]\n l = arcpy.MakeFeatureLayer_management(fc, '{0}_{1}'.format(dataset_name, os.path.basename(fc)))\n arcpy.mapping.AddLayerToGroup(data_frame, group_layer, l.getOutput(0))\n arcpy.ResetEnvironments()\n group_layer.saveACopy(os.path.join(layer_folder, '{0}.layer.lyr'.format(id)))\n lyr = '{0}.layer.lyr'.format(id)\n elif dsc.catalogPath.lower().endswith('.tab') or dsc.catalogPath.lower().endswith('.mif'):\n arcpy.ImportToolbox(r\"C:\\Program Files (x86)\\DataEast\\TAB Reader\\Toolbox\\TAB Reader.tbx\")\n lyr = arcpy.GPTabsToArcGis_TR(dsc.catalogPath, False, '', True, True, os.path.join(layer_folder, '{0}.layer.lyr'.format(id)))\n else:\n skipped += 1\n status_writer.send_status(_('Invalid input type: {0}').format(dsc.name))\n skipped_reasons[name] = _('Invalid input type: {0}').format(dsc.dataType)\n continue\n except arcpy.ExecuteError:\n errors += 1\n status_writer.send_status(arcpy.GetMessages(2))\n errors_reasons[name] = arcpy.GetMessages(2)\n continue\n except RuntimeError as re:\n errors += 1\n status_writer.send_status(re.message)\n errors_reasons[name] = re.message\n continue\n except AssertionError as ae:\n status_writer.send_status(_('FAIL: {0}. MXD - {1}').format(repr(ae), mxd_path))\n else:\n lyr = os.path.join(layer_folder, '{0}.layer.lyr'.format(id))\n created += 1\n\n # Update the index.\n if lyr:\n try:\n update_index(path, lyr, id, name, location, voyager_server, hdrs)\n except (IndexError, ImportError) as ex:\n status_writer.send_state(status.STAT_FAILED, ex)\n processed_count += 1\n status_writer.send_percent(processed_count / result_count, _('Created: {0}').format('{0}.layer.lyr'.format(id)), 'create_layer_file')\n except IOError as io_err:\n processed_count += 1\n status_writer.send_percent(processed_count / result_count, _('Skipped: {0}').format(input_item), 'create_layer_file')\n status_writer.send_status(_('FAIL: {0}').format(repr(io_err)))\n errors_reasons[input_item] = repr(io_err)\n errors += 1\n pass\n return created, errors, skipped\n","sub_path":"processing/tasks/create_layer_files.py","file_name":"create_layer_files.py","file_ext":"py","file_size_in_byte":10064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"382329015","text":"import asyncio\nimport platform\nimport sys\nfrom typing import Optional\n\nfrom tarpn.app.runner import Context\n\n\nclass ChatApplication:\n \"\"\"\n Two parts to this, the CHAT server and CHAT clients.\n\n The server is configured with a static list of neighbors to forward chat messages to.\n It will send periodic keep-alive messages to its neighbors with the current BPQChatServer\n version (6.0.14.12).\n\n CHAT clients can be any call sign, but only one instance of that call sign is supported\n on the network at once. When joining, the server will send out a \"join\" request for the\n user that connected.\n\n JK4DBZ-10 KM4NKU David from Python 3.7\n\n And once a topic is set, that will be send as well\n\n TK4DBZ-10 KM4NKU General\n\n\n\n Here are the client commands:\n 02:39 PM: Commands can be in upper or lower case.\n 02:39 PM: /U - Show Users.\n 02:39 PM: /N - Enter your Name.\n 02:39 PM: /Q - Enter your QTH.\n 02:39 PM: /T - Show Topics.\n 02:39 PM: /T Name - Join Topic or Create new Topic. Topic Names are not case sensitive\n 02:39 PM: /P - Show Ports and Links.\n 02:39 PM: /A - Toggle Alert on user join - Disabled.\n 02:39 PM: /C - Toggle Colour Mode on or off (only works on Console or BPQTerminal - Disabled.\n 02:39 PM: /Codepage CPnnnn - Set Codepage to use if UTF-9 is disabled.\n 02:39 PM: /E - Toggle Echo - Enabled .\n 02:39 PM: /Keepalive - Toggle sending Keepalive messages every 10 minutes - Disabled.\n 02:39 PM: /ShowNames - Toggle displaying name as well as call on each message - Disabled\n 02:39 PM: /Auto - Toggle Automatic character set selection - Disabled.\n 02:39 PM: /UTF-8 - Character set Selection - UTF-8.\n 02:39 PM: /Time - Toggle displaying timestamp on each message - Disabled.\n 02:39 PM: /S CALL Text - Send Text to that station only.\n 02:39 PM: /F - Force all links to be made.\n 02:39 PM: /K - Show Known nodes.\n 02:39 PM: /B - Leave Chat and return to node.\n 02:39 PM: /QUIT - Leave Chat and disconnect from node.\n \"\"\"\n keep_alive_thread: Optional = None\n\n def __init__(self, context: Context, environ, *args, **kwargs):\n self.context = context\n self.connected_chats = []\n self.keep_alive_chats = []\n asyncio.create_task(self._keep_alive())\n\n async def _keep_alive(self):\n await asyncio.sleep(3) # Initial delay\n while True:\n for other_chat in self.keep_alive_chats:\n self.context.write(other_chat, b\"\\x01KK4DBZ-10 K4DBZ-9 6.0.14.12\\r\")\n await asyncio.sleep(60)\n\n def on_connect(self, address):\n print(f\"CHAT got connection from {address}\")\n self.connected_chats.append(address)\n\n def on_disconnect(self, address):\n print(f\"CHAT got disconnected from {address}\")\n self.connected_chats.remove(address)\n self.keep_alive_chats.remove(address)\n\n def on_data(self, address, data):\n print(f\"CHAT got data from {address}: {repr(data)}\")\n lines = data.split(b\"\\r\")\n for line in lines:\n if len(line) == 0:\n continue\n if line == b\"*RTL\":\n # Remote station trying to connect, need to reply\n resp = b\"[BPQChatServer-6.0.14.12]\\rOK\\r\"\n self.context.write(address, resp)\n continue\n if line[0] == 1:\n inst = chr(line[1])\n rem = line[2:]\n if inst == \"K\":\n print(f\"CHAT keepalive\")\n elif inst == \"D\":\n msg = rem.decode(\"ASCII\")\n print(f\"CHAT data: {msg}\")\n elif inst == \"J\":\n if rem.startswith(b\"K4DBZ-9 K4DBZ\"):\n resp = b\"\\x01JK4DBZ-10 KM4NKU David from Python 3.7\\r\\x01TK4DBZ-10 KM4NKU General\\r\"\n self.context.write(address, resp)\n self.keep_alive_chats.append(address)\n else:\n msg = repr(rem)\n print(f\"CHAT join {msg}\")\n elif inst == \"S\":\n msg = rem.decode(\"ASCII\")\n print(f\"Direct Message {msg}\")\n parts = msg.split(\" \")\n message_origin = parts[0]\n message_user = parts[1]\n message_target = parts[2]\n message = \" \".join(parts[3:])\n if message == \"version\":\n resp_msg = f\"SK4DBZ-10 {message_target} {message_user} {sys.version}\"\n elif message == \"os\":\n resp_msg = f\"SK4DBZ-10 {message_target} {message_user} {platform.system()} \" \\\n f\"{platform.machine()} {platform.release()}\"\n else:\n resp_msg = f\"SK4DBZ-10 {message_target} {message_user} Unknown command '{message}'\"\n self.context.write(address, b\"\\x01\" + resp_msg.encode(\"ASCII\") + b\"\\r\")\n\n else:\n msg = repr(rem)\n print(f\"CHAT unknown instruction {inst}: {msg}\")\n else:\n msg = repr(line)\n print(f\"CHAT unknown: {msg}\")\n","sub_path":"tarpn/app/chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":5248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"626620326","text":"\"\"\"\n此文件包含 faq 引擎请求帮助函数\n\"\"\"\nimport json\nimport requests\n\nfrom config import global_config\nfrom utils.funcs import post_rpc\nfrom utils.define import UNK\n\nFAQ_ENGINE_ADDR = global_config['faq_engine_addr']\n\n__all__ = [\"faq_update\", \"faq_delete\", \"faq_delete_all\", \"faq_ask\"]\n\n\ndef faq_update(robot_id, data):\n \"\"\"添加或者更新faq语料数据\n\n Args:\n robot_id (str): 机器人的唯一标识。\n data (list): 需要存储的问题数据。\n\n Return:\n response (dict): faq服务器返回的信息\n\n Examples:\n >>> robot_id = \"doctest_id\"\n >>> data = [\n ... {\n ... \"faq_id\": \"id1\",\n ... \"title\": \"苹果手机多少钱\",\n ... \"similar_questions\": [\n ... \"Apple手机多少钱\",\n ... \"iphone多少钱\"\n ... ],\n ... \"related_quesions\": [\n ... \"ipad多少钱\",\n ... \"iwatch多少钱\"\n ... ],\n ... \"key_words\": [\n ... \"苹果\",\n ... \"Apple\",\n ... \"iphone\"\n ... ],\n ... \"effective_time\": \"2020-12-31\",\n ... \"tags\": [\n ... \"手机\",\n ... \"电子产品\"\n ... ],\n ... \"answer\": \"5400元\",\n ... \"catagory\": \"电子产品价格\"\n ... }\n ... ]\n >>> faq_update(robot_id, data)\n {'status_code': 0}\n \"\"\"\n url = \"http://{}/robot_manager/single/add_items\".format(FAQ_ENGINE_ADDR)\n\n documents = []\n for item in data:\n doc = {\n \"answer\": json.dumps(item, ensure_ascii=False),\n \"question\": item[\"title\"],\n \"id\": item[\"faq_id\"],\n \"answer_id\": item[\"faq_id\"]\n }\n documents.append(doc)\n\n request_data = {\n \"documents\": documents,\n \"robot_id\": robot_id\n }\n return post_rpc(url, request_data)\n\n\ndef faq_delete(robot_id, data):\n \"\"\"删除faq引擎中的语料数据\n\n Args:\n robot_id (str): 机器人的唯一标识。\n data (dict): 请求参数,里面包含需要删除的语料id\n\n Examples:\n >>> robot_id = \"doctest_id\"\n >>> qids = {\"faq_ids\": [\"id1\"]}\n >>> faq_delete(robot_id, qids)\n {'status_code': 0}\n \"\"\"\n url = \"http://{}/robot_manager/single/delete_items\".format(FAQ_ENGINE_ADDR)\n q_ids = data[\"faq_ids\"]\n request_data = {\n \"q_ids\": q_ids,\n \"robot_code\": robot_id\n }\n return post_rpc(url, request_data)\n\n\ndef faq_delete_all(robot_id):\n \"\"\"删除特定机器人的所有语料\n\n Args:\n robot_id (str): 机器人的唯一标识\n\n Examples:\n >>> robot_id = \"doctest_id\"\n >>> faq_delete_all(robot_id)\n {'status_code': 0}\n \"\"\"\n url = \"http://{}/robot_manager/single/delete_robot\".format(FAQ_ENGINE_ADDR)\n request_data = {\n \"robot_code\": robot_id\n }\n return post_rpc(url, request_data)\n\n\ndef faq_ask(robot_id, question, raw=False):\n \"\"\"向faq引擎提问\n Args:\n robot_id (str): 机器人的唯一标识\n question (str): 向机器人提问的问题\n raw (bool, optional): 返回faq引擎的原始数据,还是返回解析后的答案数据。Default is False\n Examples:\n >>> robot_id = \"doctest_id\"\n >>> question = \"你好\"\n >>> answer = faq_ask(robot_id, question)\n >>> isinstance(answer, dict)\n True\n \"\"\"\n url = \"http://{}/robot_manager/single/ask\".format(FAQ_ENGINE_ADDR)\n request_data = {\n \"robot_code\": robot_id,\n \"question\": question\n }\n response_data = post_rpc(url, request_data)\n\n if raw:\n return response_data\n\n elif response_data[\"answer_type\"] == -1:\n return {\n \"faq_id\": UNK,\n \"title\": \"\",\n \"similar_questions\": [],\n \"related_quesions\": [],\n \"key_words\": [],\n \"effective_time\": \"\",\n \"tags\": [],\n \"answer\": response_data[\"answer\"],\n \"catagory\": \"\"\n }\n else:\n return json.loads(response_data[\"answer\"])\n","sub_path":"backend/faq/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557619109","text":"# -*- coding: utf-8 -*-\n# @File : test.py\n# @Author: Panbo\n# @Date : 2018/12/19\n# @Desc : 泰坦尼克号,灾难问题。\nimport pandas as pd #数据分析\nimport numpy as np #科学计算\nfrom sklearn import linear_model\nfrom sklearn.model_selection import StratifiedKFold\n\n\n#显示所有列\npd.set_option('display.max_columns', None)\n#显示所有行\npd.set_option('display.max_rows', None)\n\ntitanic = pd.read_csv(\"D://studyPythonMl//kaggle_competition//Titannic//source_data//train.csv\")\n'''\nprint( titanic.describe()) #std代表方差,Age中存在缺失\n#以下操作为对数据进行预处理\n#算法大多是矩阵运算,不能��在缺失值,用均值来填充缺失值\ntitanic[\"Age\"] = titanic[\"Age\"].fillna(titanic[\"Age\"].median())\n\nprint( titanic.describe())\n'''\n\n#sex是字符串,无法进行计算,将它转成数字,用0代表man,1代表female\nprint(titanic[\"Sex\"].unique())\n\ntitanic.loc[titanic[\"Sex\"]==\"male\",\"Sex\"] = 0\ntitanic.loc[titanic[\"Sex\"]==\"female\",\"Sex\"] = 1\n\n\n#登船的地点也是字符串,需要变换成数字,并填充缺失值\nprint (titanic[\"Embarked\"].unique())\ntitanic[\"Embarked\"] = titanic[\"Embarked\"].fillna('S')\n#loc通过索引获取数据\ntitanic.loc[titanic[\"Embarked\"]==\"S\",\"Embarked\"] = 0\ntitanic.loc[titanic[\"Embarked\"]==\"C\",\"Embarked\"] = 1\ntitanic.loc[titanic[\"Embarked\"]==\"Q\",\"Embarked\"] = 2\n\npredictors = [\"Pclass\", \"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Fare\", \"Embarked\"]\n\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.cross_validation import KFold\n\npredictors = [\"Pclass\", \"Sex\", \"Age\", \"SibSp\", \"Parch\", \"Fare\", \"Embarked\"]\nalg = LinearRegression()\nkf = KFold(titanic.shape[0], n_folds=3, random_state=1)\n\npredictions = []\nfor train, test in kf:\n # print(train) # 297-890 | 0-296 594-890 | 0-593\n # print(test) # 0-296 | 297-593 | 594-890\n X_train = titanic[predictors].iloc[train, :]\n y_train = titanic[\"Survived\"].iloc[train]\n alg.fit(X_train, y_train)\n X_test = titanic[predictors].iloc[test, :]\n y_test = alg.predict(X_test)\n predictions.append(y_test)\n\nimport numpy as np\n\npredictions = np.concatenate(predictions)\npredictions[predictions > .5] = 1\npredictions[predictions <= .5] = 0\n\naccuracy = sum(predictions[predictions == titanic[\"Survived\"]]) / len(predictions)\nprint(accuracy)\n","sub_path":"dateDeal.py","file_name":"dateDeal.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"325270409","text":"# %load q04_spearman_correlation/build.py\n# Default Import\nimport pandas as pd\n\ndataframe_1 = pd.read_csv('data/house_prices_multivariate.csv')\ndataframe_2 = pd.read_csv('data/house_prices_copy.csv')\n\n# Your code here\ndef spearman_correlation():\n df=dataframe_1.loc[:,['SalePrice']].rank()\n df['SalePrice2']=dataframe_2.loc[:,['SalePrice']].rank()\n df['d']=df['SalePrice']-df['SalePrice2']\n df['dsq']=df['d']**2\n sum_dsq=df['dsq'].sum()\n count_dsq=df['dsq'].count()\n x = 1-((6 * sum_dsq)/(count_dsq*(count_dsq**2-1)))\n return x\n\n#spearman_correlation()\n\n","sub_path":"q04_spearman_correlation/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"611311955","text":"from client.client import DefaultClient\nfrom client.request import Request\nfrom common import constant\n\n\nclass FyClient(DefaultClient):\n def __init__(self, app_key, app_secret, project_res, time_out=None, host='https://api.link.aliyun.com'):\n super(FyClient, self).__init__(app_key=app_key, app_secret=app_secret, time_out=time_out)\n self.__project_res = project_res\n self.host = host\n\n def _execute(self, url, params=None, token=None, version=None):\n fy_request = Request()\n fy_request.set_host(self.host)\n fy_request.set_method('post')\n fy_request.set_protocol(constant.HTTPS)\n fy_request.set_content_type(constant.CONTENT_TYPE_JSON)\n fy_request.set_url(url)\n fy_request.set_cloud_token(token)\n fy_request.set_params(params)\n fy_request.format_params()\n if version:\n fy_request.set_api_ver(version)\n return super().execute(fy_request)\n\n def get_token(self, version='1.0.0'):\n \"\"\"\n 获取token\n :param version:\n :return:\n \"\"\"\n url = '/cloud/token'\n params = {\n 'grantType': 'project',\n 'res': self.__project_res\n }\n return self._execute(url, params)\n\n def refresh_token(self, token, version='1.0.0'):\n \"\"\"\n 刷新token\n :param token:\n :return:\n \"\"\"\n url = '/cloud/token/refresh'\n params = {'cloudToken': token}\n return self._execute(url, params, token)\n\n\nif __name__ == '__main__':\n FEIYAN_TMALL_API_APP_KEY = '24937007'\n FEIYAN_TMALL_API_APP_SECRET = '9520bff041dadeec3273f40a78e02e50'\n FEIYAN_TMALL_PROJECT_ID = 'a124GqLOvh5l4JTM'\n S100_PRODUCT_KEY = 'a1sPQ2SRvUt'\n\n client = FyClient(app_key=FEIYAN_TMALL_API_APP_KEY, app_secret=FEIYAN_TMALL_API_APP_SECRET,\n project_res=FEIYAN_TMALL_PROJECT_ID)\n token = client.get_token()\n\n\n\n\n","sub_path":"fy_client.py","file_name":"fy_client.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"41712123","text":"\"\"\"\nProgram will compare different ways to use queues\n\"\"\"\n\nimport collections\nimport random\nimport time\n\ndata = random.sample(range(10**6), k = 10**6)\n\ndef tester(times):\n c = collections.deque()\n start = time.perf_counter()\n for _ in range(times):\n c.append(random.choice(data))\n\n for _ in range(times):\n if random.random() < 0.5:\n c.popleft() \n else:\n c.append(random.choice(data))\n elapsed = time.perf_counter() - start\n print(elapsed)\n\n","sub_path":"algos/queue_tester.py","file_name":"queue_tester.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"124783519","text":"import matplotlib.pyplot as plt\nfrom tkinter import *\nimport numpy as np\nroot = Tk()\n\n# function for first task\ndef task1_def():\n #crating of rectangles\n plt.plot([10, 80, 80, 10, 10], [60, 60, 0, 0, 60], color=\"cyan\", linewidth=3)\n plt.plot([15, 75, 75, 15, 15], [55, 55, 5, 5, 55], color=\"blue\", linewidth=3)\n plt.plot([20, 70, 70, 20, 20], [50, 50, 10, 10, 50], color=\"red\", linewidth=3)\n plt.plot([25, 65, 65, 25, 25], [45, 45, 15, 15, 45], color=\"darkorange\", linewidth=3)\n plt.plot([30, 60, 60, 30, 30], [40, 40, 20, 20, 40], color=\"yellow\", linewidth=3)\n\n#show on screen\n plt.show()\n\n\ndef task2_1_def():\n x1 = [50, 90, 70, 50]# coordinates of logotypes triangle\n y1 = [110, 110, 60, 110]\n x2 = [20, 70, 20, 20]\n y2 = [80, 60, 40, 80]\n x3 = [50, 90, 70, 50]\n y3 = [10, 10, 60, 10]\n x4 = [120, 120, 70, 120]\n y4 = [80, 40, 60, 80]\n#design\n plt.fill(x1, y1, linewidth=3, edgecolor=\"darkred\", facecolor=\"red\")\n plt.fill(x2, y2, linewidth=3, edgecolor=\"darkorange\", facecolor=\"orange\")\n plt.fill(x3, y3, linewidth=3, edgecolor=\"blue\", facecolor=\"deepskyblue\")\n plt.fill(x4, y4, linewidth=3, edgecolor=\"green\", facecolor=\"lime\")\n\n#show on the screen\n plt.show()\n\n# function for 2-d task\ndef task2_2_def():\n x1 = [50, 90, 70, 50]# coordinates\n y1 = [110, 110, 60, 110]\n x2 = [20, 70, 20, 20]\n y2 = [80, 60, 40, 80]\n x3 = [50, 90, 70, 50]\n y3 = [10, 10, 60, 10]\n x4 = [120, 120, 70, 120]\n y4 = [80, 40, 60, 80]\n fig, ax = plt.subplots()\n\n ax.fill(x1, y1, \"green\")\n ax.fill(x2, y2, \"green\")\n ax.fill(x3, y3, \"green\")\n ax.fill(x4, y4, \"green\")\n\n fig.set_figwidth(6) # width and height\n fig.set_figheight(5)\n fig.set_facecolor('floralwhite')\n ax.set_facecolor('seashell')\n\n plt.show()\n plt.show()\n\n# function for craeting graphics\ndef task3_1_def():\n ax = plt.gca()\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position('center')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.xaxis.set_label_coords(0.99, 0.48)\n ax.yaxis.set_label_coords(0.55, 0.99)\n # Независимая (x) и зависимая (y) переменные\n plt.xlabel(\"x\", fontsize=14) # ось абсцисс\n plt.ylabel(\"y\", fontsize=14) # ось ординат\n x = np.linspace(-2*np.pi, 2*np.pi, 100)\n y = 0.01 * 1 * np.sin(x)\n\n plt.plot(x, y, color=\"green\")\n plt.show()\n\n# function for third task\ndef task3_2_def():\n ax = plt.gca()\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position('center')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.xaxis.set_label_coords(0.99, 0.48)\n ax.yaxis.set_label_coords(0.55, 0.99)\n # Независимая (x) и зависимая (y) переменные\n plt.xlabel(\"x\", fontsize=14) # ось абсцисс\n plt.ylabel(\"y\", fontsize=14) # ось ординат\n x = np.linspace(-2 * np.pi, 2 * np.pi, 100)\n y = (1+3) * np.sin(x)\n\n plt.plot(x, y, color=\"green\")\n plt.show()\n\n# function for third task\ndef task3_3_def():\n ax = plt.gca()\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position('center')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.xaxis.set_label_coords(0.99, 0.48)\n ax.yaxis.set_label_coords(0.55, 0.99)\n # Независимая (x) и зависимая (y) переменные\n plt.xlabel(\"x\", fontsize=14) # ось абсцисс\n plt.ylabel(\"y\", fontsize=14) # ось ординат\n x = np.linspace(-2 * np.pi, 2 * np.pi, 100)\n y = 0.01 * 1 * np.cos(x)\n\n plt.plot(x, y, color=\"green\")\n plt.show()\n\n# function for third task\ndef task3_4_def():\n ax = plt.gca()\n ax.spines['left'].set_position('center')\n ax.spines['bottom'].set_position('center')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.xaxis.set_label_coords(0.99, 0.48)\n ax.yaxis.set_label_coords(0.55, 0.99)\n # Choose evenly spaced x intervals\n x3 = np.linspace(-2*np.pi, 2*np.pi, 666)\n y3 = (1+3) * np.sin(x3)\n y3[np.abs(np.cos(x3)) <= np.abs(np.sin(x3[1]-x3[0]))] = np.nan\n plt.plot(x3, y3, color=\"magenta\")\n\n # Независимая (x) и зависимая (y) переменные\n\n plt.xlabel(\"x\", fontsize=14) # ось абсцисс\n plt.ylabel(\"y\", fontsize=14) # ось ординат\n\n x1 = np.linspace(-2 * np.pi, 2 * np.pi, 100)\n y1 = 0.01 * 1 * np.cos(x1)\n\n plt.plot(x1, y1)\n\n x2 = np.linspace(-2 * np.pi, 2 * np.pi, 100)\n y2 = 0.01 * 1 * np.sin(x2)\n\n plt.plot(x2, y2, color=\"green\")\n\n plt.show()\n\n#buttons for showing resolved tasks\n\ntask1_button = Button(root, text=\"Завдання 1\", command=task1_def)\ntask2_1_button = Button(root, text=\"Завдання 2.1\", command=task2_1_def)\ntask2_2_button = Button(root, text=\"Завдання 2.2\", command=task2_2_def)\ntask3_1_button = Button(root, text=\"Завдання 3.1\", command=task3_1_def)\ntask3_2_button = Button(root, text=\"Завдання 3.2\", command=task3_2_def)\ntask3_3_button = Button(root, text=\"Завдання 3.3\", command=task3_3_def)\ntask3_4_button = Button(root, text=\"Завдання 3.4\", command=task3_4_def)\n\n\n\ntask1_button.pack()\ntask2_1_button.pack()\ntask2_2_button.pack()\ntask3_1_button.pack()\ntask3_2_button.pack()\ntask3_3_button.pack()\ntask3_4_button.pack()\nroot.mainloop()","sub_path":"compgraph_lab1_3.py","file_name":"compgraph_lab1_3.py","file_ext":"py","file_size_in_byte":5486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"138043792","text":"import PTVS\nimport sys\n\n_MODULE_MAP = {\n 'ptvs': PTVS\n}\n\nclass IncompleteKeyListError(Exception):\n \"\"\"Raised when a url is being extracted but the key list does not point\n to a url.\n \"\"\"\n def __init__(self, msg, **kwargs):\n self.message = self.msg = msg\n print(msg)\n return super().__init__(**kwargs)\n\nclass InfoManager(object):\n \"\"\"Handles accessing of information data for LuisInterpreters.\"\"\"\n def __init__(self, moduleName):\n self.update_mod(moduleName)\n\n def update_mod(self, mod):\n \"\"\"Sets the InfoManager's module, which is the object's access\n to information.\n \"\"\"\n try:\n self.links = _MODULE_MAP[mod].LINKS\n self.key_map = _MODULE_MAP[mod].KEY_MAP\n self.name = _MODULE_MAP[mod].NAME\n self._mod = mod\n except KeyError:\n raise ValueError(\"Cannot find module named: {0}\".format(mod))\n\n def find_path_to_trigger_key(self, literal, path=None, dic=None):\n \"\"\"When literal is found to be a trigger, a list of keys that will lead to\n the key the literal matches is returned. Otherwise, False is returned.\n \"\"\"\n d = dic if dic else self.key_map\n p = path if path else []\n for k, v in d.items():\n # See if any keys are triggered by literal in this dict or any of its descendents.\n if isinstance(v, dict): # A key's triggers are always in its value's dictionary.\n try:\n if literal.lower() in v['Triggers']:\n # We found a key in this dict.\n p.append(k)\n return p\n else:\n # This key wasn't triggered, but...\n p.append(k) # ...maybe a descendants key will be triggered.\n p = self.find_path_to_trigger_key(literal, p, v) # ... let's check.\n except KeyError:\n # A dictionary might not have 'Triggers' as a key.\n pass \n # If no key was triggered in any descendants p will be the same, and False.\n if p == path or not p:\n # Path stays the same or is reset, depending on the \n # depth of the dictionary (v) we are searching.\n p = path if path else []\n # If a descendant's key was triggered by literal, p will have been changed.\n else:\n return p\n if p:\n # If we're here, no descendant's keys were triggered by literal.\n p.pop() # So remove this key from the path.\n return p or False # False when no key triggered in root dict.\n\n def traverse_keys(self, keys):\n \"\"\"Traverses the links dictionary of the current module by way of keys.\n Returns the value of the deepest key in keys.\n \"\"\"\n v = self.links # Start at the root of the links dict.\n # Find the final value pointed to by keys.\n if isinstance(keys, list):\n for key in keys:\n v = v[key]\n elif isinstance(keys, str):\n v = v[keys]\n return v\n\nclass ProjectSystemInfoManager(InfoManager):\n \"\"\" A derived class from InfoManager. Has some logic that is coupled with\n project systems as a whole.\n \"\"\"\n\n def get_url_description(self, url):\n \"\"\"Grabs the last filename, which is usually the most descriptive of\n a link, from url. If url matches the expected format, it will extract\n and reformat the name for printing. If url does not match, False is \n returned. The following is an example of the expected format for\n url: \"https://github.com/Microsoft/PTVS/wiki\". In this case 'wiki'\n is returned.\n \"\"\"\n last = url.rfind('/')\n if 0 <= last:\n s = url[last + 1:]\n s = s.replace('-', ' ')\n s = s.replace('#', ': ')\n return s or False\n\n\n def set_from_key_values(self, dic=None, set_=None, k_to_collect=None):\n \"\"\"Creates a set of all the values for every key matches the string k_to_collect. This can be used\n to create Phrase List Features on the fly after creating an info file that adheres\n to the specifications. It works by searching the key map (or any dict, for that matter) for\n all values of dic['k_to_collect'] and adding them to the the set of those already found.\n You can use this set to seed your Luis app programatically.\n \"\"\"\n d = dic if dic else self.key_map\n set_ = set_ if set_ else set(())\n k_to_collect = k_to_collect if k_to_collect else 'Triggers'\n\n for k, v in d.items():\n if isinstance(v, dict):\n try:\n t = v[k_to_collect]\n if t:\n set_ |= t\n except KeyError:\n # set_ stays the same.\n pass\n finally:\n set_ = self.set_from_key_values(v, set_, k_to_collect)\n return set_\n\n def gen_file_from_info(self, filename, func, **kwargs):\n \"\"\"Iterates over the return value of func called with kwargs passed in\n as the parameters. Filename should be a .txt file. Values will be\n added to the file delimited by a comma.\n \"\"\"\n fn = filename if filename.endswith('.txt') else filename + '.txt'\n with open(fn, 'w') as fd:\n for el in func(kwargs):\n el += \",\"\n fd.write(el)\n \ndef main():\n im = ProjectSystemInfoManager('ptvs')\n print(im.set_from_key_values())\n im.gen_file_from_info('triggers_we_hope.txt', im.set_from_key_values)\n\nif __name__ == \"__main__\":\n sys.exit(int(main() or 0))\n","sub_path":"Minerva/Information/InfoManager.py","file_name":"InfoManager.py","file_ext":"py","file_size_in_byte":5837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"474559484","text":"n=input()\ns=[]\nb=0\nw=0\nfor i in range(int(n)):\n t=input()\n s.append(t)\nres=list(map(int,s))\nlargest=res[0]\nfor large in res:\n if large > largest:\n largest=large\n b+=1\nsmallest=res[0]\nfor small in res:\n if small < smallest:\n smallest=small\n w+=1\nprint(b,w)","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"323555643","text":"from Bio import SeqIO\r\nfrom CAI import CAI, RSCU, relative_adaptiveness\r\n\r\nsequences = []\r\n\r\nfor seq_record in SeqIO.parse(\"RhodosporidiumToruloidesRPGenes.fasta\", \"fasta\"): #change file name #Change File Name depending on organism\r\n dnaSeq = str(seq_record.seq.lower())\r\n if len(dnaSeq)%3 !=0:\r\n print(seq_record.id)\r\n else:\r\n sequences.append(dnaSeq)\r\n\r\n\r\nweights = relative_adaptiveness(sequences=sequences)\r\nprint(weights)\r\n\r\n\r\n\r\nfor seq_record in SeqIO.parse(\"RhodosporidiumToruloidesRPGenes.fasta\", \"fasta\"): #changw file name depending on organism\r\n dnaSeq = str(seq_record.seq.lower())\r\n if len(dnaSeq)%3 !=0:\r\n print(seq_record.id)\r\n else:\r\n print(CAI(dnaSeq,weights=weights))\r\n\r\n\r\n","sub_path":"Data/CAIofRPs.py","file_name":"CAIofRPs.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"435950181","text":"import yaml\nfrom nose.tools import assert_equals\n\nfrom lib.installation import targets_from\nfrom lib.config_safe_loader import ConfigSafeLoader\n\n\ndef parse_targets(string_config, enabled=None):\n enabled = enabled if enabled else set()\n return list(targets_from(yaml.load(string_config, Loader=ConfigSafeLoader), enabled))\n\n\ndef test_targets_from_simple_cases():\n assert list(targets_from({}, set())) == []\n assert parse_targets(\"\") == []\n\n assert_equals(\n parse_targets(\"\"\"\nweasel:\n type: foo\n targets:\n - moo\n \"\"\"), [\n {'type': 'foo', 'name': 'moo', 'context': ['weasel']}\n ])\n\n\ndef test_targets_from_carries_hierarchy_config():\n assert_equals(\n parse_targets(\"\"\"\nweasel:\n base_config: \"weasel\"\n weasel_config: \"weasel\"\n baboon:\n type: foo\n base_config: \"baboon\" # overrides weasel\n targets:\n - ook\n \"\"\"), [\n {'type': 'foo', 'base_config': 'baboon', 'weasel_config': 'weasel',\n 'context': ['weasel', 'baboon'], 'name': 'ook'}\n ])\n\n\ndef test_codependent_configs():\n [target] = parse_targets(\"\"\"\ncompilers:\n gcc:\n check_exe: \"bin/{arch_prefix}/blah\"\n subdir: arm\n mips:\n arch_prefix: \"{subdir}-arch\"\n check_exe: \"{arch_prefix}/blah\"\n targets:\n - name: 5.4.0\n subdir: mips\n \"\"\")\n assert_equals(target['check_exe'], \"mips-arch/blah\")\n\n\ndef test_codependent_throws():\n try:\n parse_targets(\"\"\"\ncompilers:\n mips:\n x: \"{y}\"\n y: \"{x}\"\n targets:\n - name: 5.4.0\n subdir: mips\n \"\"\")\n assert False\n except RuntimeError as re:\n assert_equals(str(re), \"Too many mutual references (in compilers/mips)\")\n\n\ndef test_numbers_at_root():\n [target] = parse_targets(\"\"\"\ncompilers:\n num_to_keep: 2\n targets:\n - name: 5.4.0\n \"\"\")\n assert_equals(target['num_to_keep'], 2)\n\n\ndef test_numbers_at_leaf():\n [target] = parse_targets(\"\"\"\ncompilers:\n targets:\n - name: 5.4.0\n num_to_keep: 2\n \"\"\")\n assert_equals(target['num_to_keep'], 2)\n","sub_path":"bin/test/installation_test.py","file_name":"installation_test.py","file_ext":"py","file_size_in_byte":2072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"149403566","text":"from ctypes import *\nimport platform\nimport os\n\nlib_ext = \"\"\nif os.name == 'nt':\n lib_ext = \"dll\"\nelif platform.system() == \"Linux\":\n lib_ext = \"so\"\nelse:\n exit(\"OS not supported.\")\n\ncdll.LoadLibrary(\"libbsvml.\" + lib_ext)\nbvl = CDLL(\"libbsvml.\" + lib_ext)\n\n# MODE\nSINGLE = 0\nDUAL = 1\nLOGIC = 2\nMIXED = 3\nSTREAM_SINGLE = 4\nSTREAM_DUAL = 5\nSTREAM_LOGIC = 6\nSTREAM_MIXED = 7\n\n# TRIGTYPE\nSALT = 0\nCOMP = 1\n\n# CHANNELS\nCHA = 0\nCHB = 1\nCHL = 128\n\n# LOGICCHANNELS\nL0 = 128\nL1 = 129\nL2 = 130\nL3 = 131\nL4 = 132\nL5 = 133\nL6 = 134\nL7 = 135\n\n# TRACECODES\nDONE = 0\nAUTO = 1\nWAIT = 2\nSTOP = 3\n\n# STATUS\nIDLE = 0\nSTREAMING = 1\nTRACING = 2\nUPDATING = 3\n\n# TRIGLOGIC\nFALL = 0\nRISE = 1\n\n# GEN FUNCTION\nSINE = 0\nTRIANGLE = 1\nEXPONENTIAL = 2\nSQUARE = 3\nARBITRARY = 4\n\n# BitScopeInfo Struct\n\nclass BitScopeInfo(Structure):\n _fields_ = [\n (\"port\", c_char_p),\n (\"model\", c_char_p),\n (\"uid\", c_char_p)\n ]\n\n# State\nmode = bvl.bv_mode\nrate = bvl.bv_rate\ndumpSize = bvl.bv_dumpSize\naddress = bvl.bv_address\nenableAnalogueChannel = bvl.bv_enableAnalogueChannel\ndisableAnalogueChannel = bvl.bv_disableAnalogueChannel\ndumpChannel = bvl.bv_dumpChannel\nrange = bvl.bv_range\noffset = bvl.bv_offset\nmacro = bvl.bv_macro\nunifiedDump = bvl.bv_unifiedDump\ntraceIntro = bvl.bv_traceIntro\ntraceOutro = bvl.bv_traceOutro\ntraceDelay = bvl.bv_traceDelay\ntraceTimeout = bvl.bv_traceTimeout\ntrigSource = bvl.bv_trigSource\ntrigSwap = bvl.bv_trigSwap\ntrigType = bvl.bv_trigType\ntrigChannelEnable = bvl.bv_trigChannelEnable\ntrigChannelEdge = bvl.bv_trigChannelEdge\ntrigIntro = bvl.bv_trigIntro\ntrigOutro = bvl.bv_trigOutro\ntrigValue = bvl.bv_trigValue\ntrigLevel = bvl.bv_trigLevel\n\n# Exec\nopenBitScope = bvl.bv_openBitScope\nupdateBitScope = bvl.bv_updateBitScope\ntrace = bvl.bv_trace\nasyncTrace = bvl.bv_asyncTrace\nacquire = bvl.bv_acquire\nstream = bvl.bv_stream\nstreamAcquire = bvl.bv_streamAcquire\ncancel = bvl.bv_cancel\nwait = bvl.bv_wait\n\n# Helpers\ngetBitScopeModel = bvl.bv_getBitScopeModel\ngetBitScopeId = bvl.bv_getBitScopeId\ngetStartAddress = bvl.bv_getStartAddress\ngetIntroStartAddress = bvl.bv_getIntroStartAddress\ngetBytesInDump = bvl.bv_getBytesInDump\nlistBitScopes = bvl.bv_listBitScopes\nfindBitScopes = bvl.bv_findBitScopes\nopenOneFromQueue = bvl.bv_openOneFromQueue\ngetMinTimeout = bvl.bv_getMinTimeout\ngetStatus = bvl.bv_getStatus\ngetTraceCode = bvl.bv_getTraceCode\n\n# Dump convertors\nconvertMacroTrace = bvl.bv_convertMacroTrace\nconvertMacroStream = bvl.bv_convertMacroStream\nsplitDualStream = bvl.bv_splitDualStream\nsplitDualMixedStream = bvl.bv_splitDualMixedStream\nsplitSingleMixedStream = bvl.bv_splitSingleMixedStream\nsplitDualTrace = bvl.bv_splitDualTrace\nsplitDualMixedTrace = bvl.bv_splitDualMixedTrace\nsplitSingleMixedTrace = bvl.bv_splitSingleMixedTrace\n\n# AWG\ngenFunction = bvl.bv_genFunction\ngenRate = bvl.bv_genRate\ngenSymmetry = bvl.bv_genSymmetry\ngenLevel = bvl.bv_genLevel\ngenOffset = bvl.bv_genOffset\ngenSize = bvl.bv_genSize\ngenAddress = bvl.bv_genAddress\nwriteWaveData = bvl.bv_writeWaveData\nupdateAWG = bvl.bv_updateAWG\n\n# Serial passthrough\nwrite = bvl.bv_write\nread = bvl.bv_read\n\n# Type information\n\nmode.argtypes = [c_int]\nmode.restype = c_int\n\nrate.argtypes = [c_int, c_float]\nrate.restype = c_float\n\ndumpSize.argtypes = [c_int, c_int]\ndumpSize.restype = c_int\n\nenableAnalogueChannel.argtypes = [c_int, c_int]\nenableAnalogueChannel.restype = c_bool\n\ndisableAnalogueChannel.argtypes = [c_int, c_int]\ndisableAnalogueChannel.restype = c_bool\n\ndumpChannel.argtypes = [c_int, c_int]\ndumpChannel.restypes = c_int\n\nrange.argtypes = [c_int, c_float]\nrange.restype = c_float\n\noffset.argtypes = [c_int, c_float]\noffset.restype = c_float\n\nmacro.argtypes = [c_int, c_bool]\nmacro.restype = c_bool\n\nunifiedDump.argTypes = [c_int, c_bool]\nunifiedDump.restype = c_bool\n\ntraceIntro.argtypes = [c_int, c_int]\ntraceIntro.restype = c_int\n\ntraceOutro.argtypes = [c_int, c_int]\ntraceOutro.restype = c_int\n\ntraceDelay.argtypes = [c_int, c_float]\ntraceDelay.restype = c_float\n\ntraceTimeout.argtypes = [c_int, c_float]\ntraceTimeout.restype = c_float\n\ntrigSource.argtypes = [c_int, c_int]\ntrigSource.restype = c_int\n\ntrigSwap.argtypes = [c_int, c_bool]\ntrigSwap.restype = c_bool\n\ntrigType.argtypes = [c_int, c_bool]\ntrigType.restype = c_bool\n\ntrigChannelEnable.argTypes = [c_int, c_int, c_bool]\ntrigChannelEnable.restype = c_bool\n\ntrigChannelEdge.argtypes = [c_int, c_int, c_bool]\ntrigChannelEdge.restype = c_bool\n\ntrigIntro.argtypes = [c_int, c_int]\ntrigIntro.restype = c_int\n\ntrigOutro.argtypes = [c_int, c_int]\ntrigOutro.restype = c_int\n\ntrigLevel.argtypes = [c_int, c_float]\ntrigLevel.restype = c_float\n\ntrigValue.argtypes = [c_int, c_float]\ntrigValue.restype = c_float\n\nopenBitScope.argtypes = [c_char_p]\nopenBitScope.restype = c_int\n\nupdateBitScope.argtypes = [c_int]\nupdateBitScope.restype = c_bool\n\ntrace.argtypes = [c_int]\ntrace.restype = c_bool\n\nasyncTrace.argtypes = [c_int]\nasyncTrace.restype = c_bool\n\nacquire.argtypes = [c_int, POINTER(c_ubyte)]\nacquire.restype = c_bool\n\nstream.argtypes = [c_int]\nstream.restype = c_bool\n\nstreamAcquire.argtypes = [c_int, POINTER(c_ubyte)]\nstreamAcquire.restype = c_bool\n\ncancel.argtypes = [c_int]\ncancel.restype = c_bool\n\nwait.argtypes = [c_int]\nwait.restype = c_bool\n\ngetBitScopeModel.argtypes = [c_int]\ngetBitScopeModel.restype = c_char_p\n\ngetBitScopeId.argtypes = [c_int]\ngetBitScopeId.restype = c_char_p\n\ngetStartAddress.argtypes = [c_int]\ngetStartAddress.restype = c_int\n\ngetIntroStartAddress.argtypes = [c_int]\ngetIntroStartAddress.restype = c_int\n\ngetBytesInDump.argtypes = [c_int]\ngetBytesInDump.restype = c_int\n\nlistBitScopes.argtypes = [c_int, POINTER(BitScopeInfo)]\nlistBitScopes.restype = c_int\n\nfindBitScopes.argtypes = [c_int]\nfindBitScopes.restype = c_int\n\nopenOneFromQueue.argtypes = []\nopenOneFromQueue.restype = c_int\n\ngetMinTimeout.argtypes = [c_int]\ngetMinTimeout.restype = c_float\n\ngetStatus.argtypes = [c_int]\ngetStatus.restype = c_int\n\ngetTraceCode.argtypes = [c_int]\ngetTraceCode.restype = c_int\n\nconvertMacroTrace.argtypes = [POINTER(c_ubyte), POINTER(c_short), c_int]\n\nconvertMacroStream.argtypes = [POINTER(c_ubyte), POINTER(c_short), c_int]\n\nsplitDualStream.argtypes = [\n POINTER(c_ubyte), POINTER(c_ubyte),\n POINTER(c_ubyte), c_int, c_bool]\n\nsplitDualMixedStream.argtypes = [\n POINTER(c_ubyte), POINTER(c_ubyte),\n POINTER(c_ubyte), POINTER(c_ubyte),\n c_int]\n\nsplitSingleMixedStream.argtypes = [\n POINTER(c_ubyte), POINTER(c_ubyte),\n POINTER(c_ubyte), c_int]\n\nsplitDualTrace.argtypes = [\n POINTER(c_ubyte), POINTER(c_ubyte),\n POINTER(c_ubyte), c_int, c_bool]\n\nsplitDualMixedTrace.argtypes = [\n POINTER(c_ubyte), POINTER(c_ubyte),\n POINTER(c_ubyte), POINTER(c_ubyte), c_int]\n\nsplitSingleMixedTrace.argtypes = [\n POINTER(c_ubyte), POINTER(c_ubyte),\n POINTER(c_ubyte), c_int]\n\ngenFunction.argtypes = [c_int, c_int]\ngenFunction.restype = c_int\n\ngenRate.argtypes = [c_int, c_float]\ngenRate.restype = c_float\n\ngenSymmetry.argtypes = [c_int, c_float]\ngenSymmetry.restype = c_float\n\ngenLevel.argtypes = [c_int, c_float]\ngenLevel.restype = c_float\n\ngenOffset.argtypes = [c_int, c_float]\ngenOffset.restype = c_float\n\ngenSize.argtypes = [c_int, c_int]\ngenSize.restype = c_int\n\ngenAddress.argtypes = [c_int, c_int]\ngenAddress.restype = c_int\n\nwriteWaveData.argtypes = [c_int, POINTER(c_ubyte), c_int]\n\nupdateAWG.argtypes = [c_int]\nupdateAWG.restype = c_bool\n\nwrite.argtypes = [c_int, c_char_p, c_int]\nwrite.restype = c_int\n\nread.argtypes = [c_int, c_char_p, c_int, c_int]\nread.restype = c_int\n","sub_path":"py_bsvml.py","file_name":"py_bsvml.py","file_ext":"py","file_size_in_byte":7847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"114894502","text":"# who = ['Jeff', 'Rachel', 'Gary', 'Steve']\n# how_many = [12, 3, 6, 9]\n# when = ['today', 'yesterday', 'Tuesday', 'Last week']\n#\n# items = zip(who, how_many, when)\n#\n# for i in items:\n# print('{} bought {} apples on {}!'.format(i[0], i[1], i[2]))\n\nimport argparse\nimport sys\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--x', type=float, default=1.0,\n help='What is the first number?')\n parser.add_argument('--y', type=float, default=1.0,\n help='What is the second number?')\n parser.add_argument('--operation', type=str, default='add',\n help='What operation? Can choose add, sub, mul, or div')\n args = parser.parse_args()\n sys.stdout.write(str(calc(args)))\n\n\ndef calc(args):\n if args.operation == 'add':\n return args.x + args.y\n elif args.operation == 'sub':\n return args.x - args.y\n elif args.operation == 'mul':\n return args.x * args.y\n elif args.operation == 'div':\n return args.x / args.y\n\n\nif __name__ == '__main__':\n main()","sub_path":"python/intermediate_examples.py","file_name":"intermediate_examples.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"376133735","text":"import torch\nfrom torch.distributions.multivariate_normal import MultivariateNormal\n\nfrom filing_paths import path_model\nimport sys\nsys.path.insert(1, path_model)\nfrom parameters import delta_t, delta_t_gen, variance\n\n\nif torch.cuda.is_available():\n cuda0 = torch.device(\"cuda:0\") # you can continue going on here, like cuda:1 cuda:2....etc.\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\nelse:\n cuda0 = torch.device(\"cpu\")\n print(\"Running on the CPU\")\n\nclass SystemModel:\n\n def __init__(self, f, q, h, r, T, T_test, m, n, modelname):\n\n ####################\n ### Motion Model ###\n ####################\n self.modelname = modelname\n\n self.f = f\n self.m = m\n\n self.q = q\n if self.modelname == 'pendulum':\n self.Q = q * q * torch.tensor([[(delta_t**3)/3, (delta_t**2)/2],\n [(delta_t**2)/2, delta_t]])\n elif self.modelname == 'pendulum_gen':\n self.Q = q * q * torch.tensor([[(delta_t_gen**3)/3, (delta_t_gen**2)/2],\n [(delta_t_gen**2)/2, delta_t_gen]])\n else:\n self.Q = q * q * torch.eye(self.m)\n\n \n\n #########################\n ### Observation Model ###\n #########################\n self.h = h\n self.n = n\n\n self.r = r\n self.R = r * r * torch.eye(self.n)\n\n #Assign T and T_test\n self.T = T\n self.T_test = T_test\n\n #####################\n ### Init Sequence ###\n #####################\n def InitSequence(self, m1x_0, m2x_0):\n\n self.m1x_0 = torch.squeeze(m1x_0).to(cuda0)\n self.m2x_0 = torch.squeeze(m2x_0).to(cuda0)\n\n\n #########################\n ### Update Covariance ###\n #########################\n def UpdateCovariance_Gain(self, q, r):\n\n self.q = q\n self.Q = q * q * torch.eye(self.m)\n\n self.r = r\n self.R = r * r * torch.eye(self.n)\n\n def UpdateCovariance_Matrix(self, Q, R):\n\n self.Q = Q\n\n self.R = R\n\n\n #########################\n ### Generate Sequence ###\n #########################\n def GenerateSequence(self, Q_gen, R_gen, T):\n # Pre allocate an array for current state\n self.x = torch.empty(size=[self.m, T])\n # Pre allocate an array for current observation\n self.y = torch.empty(size=[self.n, T])\n # Set x0 to be x previous\n self.x_prev = self.m1x_0\n\n # Generate Sequence Iteratively\n for t in range(0, T):\n ########################\n #### State Evolution ###\n ########################\n # Process Noise\n if self.q == 0:\n xt = self.f(self.x_prev) \n else:\n xt = self.f(self.x_prev)\n mean = torch.zeros([self.m])\n if self.modelname == \"pendulum\":\n distrib = MultivariateNormal(loc=mean, covariance_matrix=Q_gen)\n eq = distrib.rsample()\n else:\n eq = torch.normal(mean, self.q)\n \n # Additive Process Noise\n xt = torch.add(xt,eq)\n\n ################\n ### Emission ###\n ################\n yt = self.h(xt)\n\n # Observation Noise\n mean = torch.zeros([self.n])\n er = torch.normal(mean, self.r)\n # er = np.random.multivariate_normal(mean, R_gen, 1)\n # er = torch.transpose(torch.tensor(er), 0, 1)\n\n # Additive Observation Noise\n yt = torch.add(yt,er)\n\n ########################\n ### Squeeze to Array ###\n ########################\n\n # Save Current State to Trajectory Array\n self.x[:, t] = torch.squeeze(xt)\n\n # Save Current Observation to Trajectory Array\n self.y[:, t] = torch.squeeze(yt)\n\n ################################\n ### Save Current to Previous ###\n ################################\n self.x_prev = xt\n\n ######################\n ### Generate Batch ###\n ######################\n def GenerateBatch(self, size, T, randomInit=False):\n\n # Allocate Empty Array for Input\n self.Input = torch.empty(size, self.n, T)\n\n # Allocate Empty Array for Target\n self.Target = torch.empty(size, self.m, T)\n\n initConditions = self.m1x_0 \n\n ### Generate Examples\n for i in range(0, size):\n # Generate Sequence\n # Randomize initial conditions to get a rich dataset\n if(randomInit):\n initConditions = torch.rand_like(self.m1x_0) * variance\n self.InitSequence(initConditions, self.m2x_0)\n self.GenerateSequence(self.Q, self.R, T)\n\n # Training sequence input\n self.Input[i, :, :] = self.y\n\n # Training sequence output\n self.Target[i, :, :] = self.x\n\n","sub_path":"Extended_sysmdl.py","file_name":"Extended_sysmdl.py","file_ext":"py","file_size_in_byte":5039,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"439876204","text":"import cv2\nimport numpy as np \n\nimg_og = cv2.imread(\"test2.png\")\n\ngray = cv2.cvtColor(img_og, cv2.COLOR_BGR2GRAY)\ngray = cv2.blur(gray, (7,7))\nret, thresh = cv2.threshold(gray,120,255, cv2.THRESH_BINARY+cv2.THRESH_OTSU )\ncontours, hierarchy = cv2.findContours(thresh,cv2.RETR_LIST,cv2.CHAIN_APPROX_SIMPLE)\n#fin = cv2.drawContours(img_og, contours, -1, (0,255,0), 1)\n\n\n#ignore the words and boundary\ncont =[]\nm=[]\nfor c in contours:\n\tl=cv2.arcLength(c, True)\n\tif cv2.arcLength(c, True)>200 and cv2.arcLength(c, True)<600:\n\t\tcont.append(c)\n\t\tm.append(cv2.moments(c))\ncenter=[]\nfor moment in m:\n\tcenter.append((int(moment['m10']/moment['m00']),int(moment['m01']/moment['m00'] )))\n\n\nprint(len(cont))\nprint(center)\n\nfin2 = cv2.drawContours(img_og, cont, -1, (0,0,255), 1)\nfor c in center:\n\tcv2.circle(fin2, c, 5, (255,0,0), -1)\n\n#M=cv2.moments(contours[1])\n#cx = int(M['m10']/M['m00'])\n#cy = int(M['m01']/M['m00'])\n#print(M)\n#print((cx, cy))\n#fin[cx,cy]=(255,255,255)\n#cv2.imshow(\"all\", fin)\ncv2.imshow(\"selected\", fin2)\ncv2.waitKey(0)\ncv2.destroyAllWindows()","sub_path":"OpenCV/assig_4.py","file_name":"assig_4.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"177264634","text":"from api.common import mail\nfrom api.models.models import Feature\nfrom api.tests.unit.simplenight_test_case import SimplenightTestCase\n\n\nclass TestMail(SimplenightTestCase):\n def setUp(self) -> None:\n super().setUp()\n self.stub_feature(Feature.MAILGUN_API_KEY, \"4fc764e45639a2008a075f69a0706591-2fbe671d-1bc16189\")\n self.stub_feature(Feature.TEST_MODE, \"false\")\n\n def test_send_mail(self):\n template_name = \"order_confirmation\"\n subject = \"Simplenight Hotel Reservation\"\n recipient = \"James Morton\"\n to_email = \"james@simplenight.com\"\n\n params = {\n \"booking_id\": \"123\",\n \"order_currency_symbol\": \"$\",\n \"order_total\": \"100.00\",\n \"hotel_name\": \"Hotel Foo Bar\",\n \"hotel_sub_total\": \"80\",\n \"record_locator\": \"8848293472\",\n \"cancellation_policy\": \"Partial Refund\",\n \"hotel_address\": \"123 Main Street\",\n \"checkin\": \"4:00pm\",\n \"checkout\": \"12:00pm\",\n \"resort_fee\": \"0.00\",\n \"hotel_taxes\": \"20.00\",\n \"hotel_room_type\": \"Jr. Suite\",\n \"last_four\": \"1234\",\n \"order_base_rate\": \"80\",\n \"order_taxes\": \"20\"\n }\n\n mail.send_mail(template_name, subject, recipient, to_email, variables=params)\n","sub_path":"api/tests/online/test_mail.py","file_name":"test_mail.py","file_ext":"py","file_size_in_byte":1328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"386713162","text":"import pygame\nimport time\n\nfrom Player import Player\nfrom Block import Block\nfrom pygame.locals import *\nfrom CONSTANTS import *\n\npygame.init()\n\n### Spelers hebben coordinaten, zo weten we waar ze zijn.\n\nDISPLAYSURFACE = pygame.display.set_mode((800, 600))\nDISPLAYSURFACER = DISPLAYSURFACE.get_rect()\nDISPLAYSURFACE.fill((255, 255, 255))\nb_blocks = {}\nt_blocks = {}\nrunning = True\nclock = pygame.time.Clock()\nplayers = 4\ncurrent_player = 0\ncurrent_view_player = 0\nbp = Block(100,250,(0,0,0))\nmp = Block(250,90,(0,0,0))\nup = Block(50,250,(0,0,0))\n\n\ndef text_objects(text, font, color = WHITE):\n text_surface = font.render(text, True, color)\n return text_surface, text_surface.get_rect()\n\ndef display_text(display, size, text, color, pos):\n used_font = size\n text_surface, text_rectangle = text_object(text, used_font, color)\n text_rectangle.left, text_rectangle.top = pos\n display.blit(text_surface, text_rectangle)\n\ndef create_button(display, content, x, y, w, h, ic, ac, func=False, cargo=False):\n mouse = pygame.mouse.get_pos()\n if x + w > mouse[0] > x and y + h > mouse[1] > y:\n pygame.draw.rect(display, ac, [x,y,w,h])\n if func and pygame.mouse.get_pressed()[0] >= 1:\n # execute functions\n if func == draw_right or func == draw_left:\n func()\n time.sleep(.4)\n\n else:\n pygame.draw.rect(display, ic, [x,y,w,h])\n button_font = SMALL_FONT\n textSurf, textRect = text_objects(content, button_font)\n textRect.center = ((x+(w/2)), (y+(h/2)))\n display.blit(textSurf, textRect)\n\ndef bp_init(tp = bp, y_range = 2, fp = 50, block_dict = b_blocks, size = (50, 25), block_colors = [(0,0,255), (255,0,0), (255,255,0), (0,0,0)]): # blue - red - yellow - black / red - blue - black - yellow\n fp,sp = (0,0)\n o = 0\n for i in [i for i in range(4)]:\n for x in range(10):\n for y in range(y_range):\n block_dict.setdefault(i, {})[o] = {'block': Block(size[0],\\\n size[1],block_colors[i]), 'center': Block(10,5,(255, 255, 255)), \\\n 'occupied': False, 'player': False}\n block_dict[i][o]['block'].image.blit(block_dict[i][o]['center'].image\\\n ,((block_dict[i][o]['block'].rect.width/2) - (block_dict[i][o]['center'].rect.width/2)\\\n ,block_dict[i][o]['block'].rect.height/2))\n tp.image.blit(block_dict[i][o]['block'].image,(fp,sp))\n fp += 50\n o += 1\n fp = 0\n sp +=25\n fp = 0\n o = 0\n\ndef draw_part(part=0):\n pass\ndef draw_left(current_player):\n pass\ndef draw_right():\n global current_view_player\n # Players 4(0,1,2,3) / Players 3(0,1,2) / Players 2(0,1)\n fp,sp=(0,0)\n o = 0\n if current_view_player+1 not in range(4):\n current_view_player = 0\n for key,value in b_blocks[0].items():\n bp.image.blit(value['block'].image, (fp,sp))\n if o == 1:\n fp=0\n o=0\n sp += 25\n else:\n fp+=50\n o+=1\n fp,sp = ((0,0))\n for key,value in t_blocks[0].items():\n up.image.blit(value['block'].image, (fp,sp))\n sp += 25\n current_view_player += 1\n else:\n for key, value in b_blocks[current_view_player].items():\n bp.image.blit(value['block'].image, (fp,sp))\n if o == 1:\n fp=0\n o=0\n sp += 25\n else:\n fp+=50\n o+=1\n fp,sp=(0,0)\n for key, value in t_blocks[current_view_player+1].items():\n up.image.blit(value['block'].image, (fp,sp))\n sp += 25\n current_view_player +=1\nbp_init()\nbp_init(up, 1, 50, t_blocks, (50,25),[(255,0,0), (0,0,255), (0,0,0), (255,255,0)]) # red - blue - black - yellow\n\n\nic,ac=(DARKBLACK,BLACK)\n\nwhile running:\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n running = False\n if event.type == QUIT:\n running = False\n create_button(DISPLAYSURFACE, \"Left\", DISPLAYSURFACER.width/2 - 200, DISPLAYSURFACER.height - 100, 150, 50, ic, ac, draw_left)\n create_button(DISPLAYSURFACE, \"Right\", DISPLAYSURFACER.width/2 + 50, DISPLAYSURFACER.height - 100, 150, 50, ic, ac, draw_right)\n DISPLAYSURFACE.blit(bp.image, ((DISPLAYSURFACER.width/2)-(bp.rect.width/2), DISPLAYSURFACER.height - bp.rect.height))\n DISPLAYSURFACE.blit(mp.image, ((DISPLAYSURFACER.width/2)-(mp.rect.width/2), DISPLAYSURFACER.height - (bp.rect.height + mp.rect.height)))\n DISPLAYSURFACE.blit(up.image, ((DISPLAYSURFACER.width/2)-(up.rect.width/2), (DISPLAYSURFACER.height - bp.rect.height) - (bp.rect.height + mp.rect.height)))\n clock.tick(15)\n pygame.display.update()\n","sub_path":"etc/test-euro.py","file_name":"test-euro.py","file_ext":"py","file_size_in_byte":4899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"383423331","text":"\"\"\"\r\n===================\r\nimageStackViewer\r\n\r\n@author dsbarker\r\n===================\r\n\r\n\"\"\"\r\nfrom __future__ import print_function, unicode_literals, division, absolute_import\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib.widgets\r\nimport matplotlib.patches\r\nimport mpl_toolkits.axes_grid1\r\n\r\nclass PageSlider(matplotlib.widgets.Slider):\r\n\r\n def __init__(self, ax, label, numpages = 10, valinit=0, valfmt='%1d',\r\n closedmin=True, closedmax=True,\r\n dragging=True, **kwargs):\r\n\r\n self.facecolor=kwargs.get('facecolor',\"w\")\r\n self.activecolor = kwargs.pop('activecolor',\"b\")\r\n self.fontsize = kwargs.pop('fontsize', 10)\r\n self.numpages = numpages\r\n\r\n super(PageSlider, self).__init__(ax, label, 0, numpages,\r\n valinit=valinit, valfmt=valfmt, **kwargs)\r\n\r\n self.poly.set_visible(False)\r\n self.vline.set_visible(False)\r\n self.pageRects = []\r\n for i in range(numpages):\r\n facecolor = self.activecolor if i==valinit else self.facecolor\r\n r = matplotlib.patches.Rectangle((float(i)/numpages, 0), 1./numpages, 1,\r\n transform=ax.transAxes, facecolor=facecolor)\r\n ax.add_artist(r)\r\n self.pageRects.append(r)\r\n ax.text(float(i)/numpages+0.5/numpages, 0.5, str(i+1),\r\n ha=\"center\", va=\"center\", transform=ax.transAxes,\r\n fontsize=self.fontsize)\r\n self.valtext.set_visible(False)\r\n\r\n divider = mpl_toolkits.axes_grid1.make_axes_locatable(ax)\r\n bax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\r\n fax = divider.append_axes(\"right\", size=\"5%\", pad=0.05)\r\n self.button_back = matplotlib.widgets.Button(bax, label='<',\r\n color=self.facecolor, hovercolor=self.activecolor)\r\n self.button_forward = matplotlib.widgets.Button(fax, label='>',\r\n color=self.facecolor, hovercolor=self.activecolor)\r\n self.button_back.label.set_fontsize(self.fontsize)\r\n self.button_forward.label.set_fontsize(self.fontsize)\r\n self.button_back.on_clicked(self.backward)\r\n self.button_forward.on_clicked(self.forward)\r\n\r\n def _update(self, event):\r\n super(PageSlider, self)._update(event)\r\n i = int(self.val)\r\n if i >=self.valmax:\r\n return\r\n self._colorize(i)\r\n\r\n def _colorize(self, i):\r\n for j in range(self.numpages):\r\n self.pageRects[j].set_facecolor(self.facecolor)\r\n self.pageRects[i].set_facecolor(self.activecolor)\r\n\r\n def forward(self, event):\r\n current_i = int(self.val)\r\n i = current_i+1\r\n if (i < self.valmin) or (i >= self.valmax):\r\n return\r\n self.set_val(i)\r\n self._colorize(i)\r\n\r\n def backward(self, event):\r\n current_i = int(self.val)\r\n i = current_i-1\r\n if (i < self.valmin) or (i >= self.valmax):\r\n return\r\n self.set_val(i)\r\n self._colorize(i)\r\n\r\nclass IndexTracker(object):\r\n def __init__(self, fig, ax, X, scale=False, clim=None):\r\n self.fig = fig\r\n fig.subplots_adjust(left=0.05,right=0.875,top=0.85,bottom=0.25)\r\n self.ind = 0\r\n self.ax = ax\r\n ax.set_title('use slider to switch image\\n displaying image %s' % (self.ind+1))\r\n self.X = X\r\n self.slices, rows, cols = X.shape\r\n self.scale = scale\r\n\r\n if self.scale:\r\n if clim is None:\r\n self.clims = (np.amin(self.X), np.amax(self.X))\r\n else:\r\n self.clims = clim\r\n\r\n ax_imSelector = fig.add_axes([0.15, 0.05, 0.8, 0.04])\r\n self.ax.set_ylabel('y (pixels)')\r\n self.ax.set_xlabel('x (pixels)')\r\n self.imSelector = PageSlider(ax_imSelector, 'Image', self.slices, valinit=(self.ind))\r\n self.imSelector.on_changed(self.changeImage)\r\n self.im = ax.imshow(self.X[self.ind, :, :])\r\n ax_cbar = fig.add_axes([0.8, 0.19, 0.05, 0.7])\r\n self.cbar = fig.colorbar(self.im, cax=ax_cbar)\r\n self.changeImage(self.imSelector.val)\r\n\r\n def changeImage(self, val):\r\n self.ind = int(self.imSelector.val)\r\n self.im.set_data(self.X[self.ind, :, :])\r\n if self.scale:\r\n self.im.set_clim((self.clims))\r\n self.cbar.set_clim((self.clims))\r\n else:\r\n self.im.set_clim(vmin=np.min(self.X[self.ind, :, :]),vmax=np.max(self.X[self.ind, :, :]))\r\n self.cbar.set_clim(np.min(self.X[self.ind, :, :]),np.max(self.X[self.ind, :, :]))\r\n self.ax.set_title('use slider to switch image\\ndisplaying image %s' % (self.ind+1))\r\n self.im.axes.figure.canvas.draw()\r\n\r\n\"\"\"fig, ax = plt.subplots(1, 1)\r\n\r\nX = np.random.rand(10, 20, 20)\r\n\r\ntracker = IndexTracker(fig, ax, X)\r\n\r\n\r\n#fig.canvas.mpl_connect('scroll_event', tracker.onscroll)\r\nplt.show()\"\"\"\r\n","sub_path":"imageStackViewer.py","file_name":"imageStackViewer.py","file_ext":"py","file_size_in_byte":4959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"280531673","text":"# import pandas as pd\nimport csv\nimport math\nimport nltk\nimport re\n\n\nclass NaiveBayes:\n positive_count = 0\n negative_count = 0\n neutral_count = 0\n total_count = 0\n po_list = [] \t#un = po\n ne_list = []\t\t#ow = ne\n nu_list = []\t\t#n = nu\n total_list = []\n\n poHash = {}\n neHash = {}\n nuHash={}\n total_positive_words = 0\n total_negative_words = 0\n total_neutral_words = 0\n unique_words = 0\n\n prior_positive = 0\n prior_negative = 0\n prior_neutral = 0\n # test_data = ['nepal','nepal','nepal','tokyo','japan']\n \n test_data = ['nepal','bangal','hongkong']\n likelihood = {}\n positive_conditional_probability = {}\n negative_conditional_probability = {}\n neutral_conditional_probability = {}\n # test_data = ['japan']\n \n def __init__(self):\n pass\n def train(self,file):\n with open(file,'r') as f:\n self.reader = csv.reader(f)\n list_reader = list(self.reader)\n print(list_reader)\n # for row in list_reader:\n # \tprint(row)\n for row in list_reader:\n if row[4] == 'positive':\n self.po_list.append(row)\n self.positive_count += 1\n if row[4] == 'negative':\n self.ne_list.append(row)\n self.negative_count += 1\n if row[4] == 'neutral':\n self.nu_list.append(row)\n self.neutral_count += 1\n \n # to reove positvei,negative , neutral word\n self.po_list = self.remove(self.po_list)\n self.ne_list = self.remove(self.ne_list)\n self.nu_list = self.remove(self.nu_list)\n\n \n print(self.positive_count)\n print(self.negative_count)\n print(self.neutral_count)\n # self.po_dic = self.po_list\n # for row in self.po_list:\n # \t\tprint(row[0:])\n # for row in self.ne_list:\n # print(row[0:])\n # for row in self.nu_list:\n # print(row[0:])\n self.total_count = self.positive_count+self.negative_count+self.neutral_count\n print(self.total_count)\n print(\"\\n\")\n print(\"Prior Probabilities: \\n\")\n self.prior_neutral = self.neutral_count/self.total_count\n self.prior_positive = self.positive_count/self.total_count\n self.prior_negative = self.negative_count/self.total_count\n \n print('neutral:',self.prior_neutral)\n print('positive:',self.prior_positive)\n print('negative:',self.prior_negative)\n\n \n\n\n # self.total_count = self.positive_count+self.negative_count+self.neutral_count\n # print(self.total_count)\n self.total_list = self.po_list + self.ne_list+ self.nu_list\n # print(self.total_list)\n self.poHash =self.generateHash(self.po_list,1)\n print(\"ok\")\n print(self.poHash)\n self.neHash =self.generateHash(self.ne_list,-1)\n # print(self.neHash)\n self.nuHash =self.generateHash(self.nu_list,0)\n # print(self.nuHash)\n \n self.unique_words = self.total_distinct_word(self.nuHash,self.neHash,self.poHash)\n\n\n print(\"Total number of positive words : \",self.total_positive_words)\n print(\"\\nTotal number of negative words : \",self.total_negative_words)\n print(\"\\nTotal number of neutral words : \",self.total_neutral_words)\n print(\"\\nTotal number of distinct words : \",self.unique_words)\n \n\n self.positive_conditional_probability = self.find_conditional_probability(self.poHash,self.test_data,self.total_positive_words,\"positive_words\")\n self.negative_conditional_probability = self.find_conditional_probability(self.neHash,self.test_data,self.total_negative_words,\"negative_words\")\n self.neutral_conditional_probability = self.find_conditional_probability(self.nuHash,self.test_data,self.total_neutral_words,\"neutral_words\")\n\n classify = self.test(self.test_data,self.positive_conditional_probability)\n\n # classify = self.test(self.test_data,positive_conditional_probability,negative_conditional_probability,neutral_conditional_probability)\n\n \n\n # def adjustValue(self,dict1,dict2,dict3):\n # for i in dict1:\n # if i not in dict2:\n # dict2[i] = 0\n # if i not in dict3:\n # dict3[i] = 0\n # for i in dict2:\n # if i not in dict1:\n # dict1[i] = 0\n # if i not in dict3:\n # dict3[i] = 0\n # for i in dict3:\n # if i not in dict1:\n # dict1[i] = 0\n # if i not in dict2:\n # dict2[i] = 0\n # print(\"\\n\")\n # print(dict1)\n # print(\"\\n\")\n # print(dict2)\n # print(\"\\n\")\n # print(dict3)\n\n # return dict1,dict2,dict3\n \n def remove(self,lists):\n for row in lists:\n del row[4]\n print(lists)\n return lists\n\n\n def total_distinct_word(self,dict1,dict2,dict3):\n # dict1,dict2,dict3 = self.adjustValue(dict1,dict2,dict3)\n # total_dic = {}\n distinct_word = []\n for i in dict1:\n if i not in distinct_word:\n distinct_word += [i]\n \n for j in dict2:\n if j not in distinct_word:\n distinct_word += [j]\n \n for k in dict3:\n if k not in distinct_word:\n distinct_word += [k]\n result = len(distinct_word) \n print(distinct_word)\n return result\n\n\n # def hashTable_Prob(self,dict1,dict2,dict3):\n # dict1,dict2,dict3 = self.adjustValue(dict1,dict2,dict3)\n # for i in dict1:\n # dict1[i] = (dict1[i]+1)/(self.neutral_count+len(self.nu_list))\n # for i in dict2:\n # dict2[i] = (dict2[i]+1)/(self.negative_count+len(self.ne_list))\n # for i in dict3:\n # dict3[i] = (dict3[i]+1)/(self.positive_count+len(self.po_list))\n\n # dict ={'neutral':dict1,'negative':dict2,'positive':dict3}\n # return dict\n\n # def generateHash(self,word_list):\n # word_frequencies = []\n # unique_words = []\n # dict_unique_word = {}\n\n # for x in word_list:\n # \tif x not in unique_words:\n # \t\tunique_words += [x]\n\n # for x in unique_words:\n # \tword_frequencies += str(word_list.count(x))\n # \tprint(word_frequencies)\n\n # print(\"\\n\")\n # dist_word = len(unique_words)\n # # unique_words = str(unique_words)\n # print(type(unique_words))\n # print(\"i am here\")\n # print(len(unique_words))\n # print(unique_words)\n # for i in range(dist_word):\n\n # print(\"\\nlets watch here\")\n # dict_unique_word[unique_words[i]] = word_frequencies[i]\n \n\n # print(\"\\n now daam here\")\n # print(dict_unique_word)\t\n # return dict_unique_word \n\n\n\n def generateHash(self,lists,number):\n dic = {}\n new_list = []\n for i in lists:\n new_list +=[i]\n print(new_list)\n # new = {}\n # new = dict(lists)\n\n lists = str(lists)\n token_pattern = r'\\w+' # small w represent [a-zA-Z0-9 _ ]\n regex_wt = nltk.RegexpTokenizer(pattern = token_pattern,gaps=False) #haven't==haven t\n tokens = regex_wt.tokenize(lists) #tokens ---list objec\n print (\"\\n\")\n # for token in tokens:\n # print(token)\n if number is 1:\n self.total_positive_words = len(tokens)\n elif number is -1:\n self.total_negative_words =len(tokens)\n else:\n self.total_neutral_words =len(tokens)\n\n\n print(\"total number of words: \",len(tokens))\n\n for x in tokens:\n if x not in dic:\n dic[x] = 1\n else:\n dic[x] +=1 \n # print(dic)\n return dic\n\n\n\n def find_conditional_probability(self,dict,data,total_words,sentiment):\n conditional_probability_dict = {}\n dic = {}\n conditional_probability = 0\n\n # for i in data:\n # if i not in dic:\n # dic[i] = 1\n # else:\n # dic[i] += 1\n # print(data[1])\n\n # for x in data:\n # print(x[0])\n\n # if (x == positive_conditional_probability.key()):\n for i in data:\n print(i)\n for y in dict:\n if i == y :\n print(\"uffff....\")\n conditional_probability_dict[i] = (dict[y] + 1)/(total_words + self.unique_words)\n break #yo break dherai kata ho\n else:\n print(\"come on man!!!\")\n conditional_probability_dict[i] = 1/(total_words + self.unique_words)\n\n \n print(\"hello rice killer\\n\")\n print(\"Conditional_probability of \",sentiment)\n print(conditional_probability_dict)\n return conditional_probability_dict\n\n\n\n def test(self,data,positive_conditional_probability):\n # test_data = ['nepal','nepal','nepal','tokyo','japan']\n dic = {}\n conditional_probability = 1\n score = {}\n # print(data[1])\n # i = 0\n # for i in range(len(data)):\n # self.likelihood = self.calculate_likelihood(data[i])\n\n\n\n\n for x in data:\n # for i in x:\n if x not in dic:\n dic[x] = 1\n else:\n dic[x] += 1\n\n print(\"where r u man\")\n print(dic)\n # return dic\n for x in dic:\n print(x)\n # if (x == positive_conditional_probability.key()):\n for y in positive_conditional_probability:\n if x == y :\n print(\"uffff....\")\n # print(positive_conditional_probability[y])\n conditional_probability *= (positive_conditional_probability[y]**dic[x])\n \n # print(\"::::::::::::::::;\",conditional_probability)\n # conditional_probability *= 10000\n conditional_probability *= 10000\n print(\"Conditional Probability : \", conditional_probability)\n \n positive_likelihood_probability = conditional_probability * self.prior_positive\n print(\"\\nPositive likelihood probability : \",positive_likelihood_probability)\n print(\"\\nohoooo...hait\")\n # print(positive_score)\n\n \n\n\n # def classify(self,args):\n # priorN = self.neutral_count/self.total_count\n # priorpo = self.positive_count/self.total_count\n # priorne = self.negative_count/self.total_count\n # sex = args[0]\n # height = round((float(args[1])))\n # wt = int(float(args[2]))\n # weight = wt - (wt%10)\n # cheight = 'ht_' + str(height)\n # cweight = 'wt_' + str(weight)\n # ht = []\n # wt = []\n # try:\n # ht =[self.nuHash[cheight],self.poHash[cheight],self.neHash[cheight]]\n # wt =[self.nuHash[cweight],self.poHash[cweight],self.neHash[cweight]]\n # except KeyError:\n # ht = [0,0,0]\n # wt = [0,0,0]\n # pN = ((priorN * ht[0]* wt[0])+1)/((self.neutral_count**2)+len(self.nuHash))\n # ppo = ((priorpo * ht[1] * wt[1])+1)/((self.positive_count**2)+len(self.poHash))\n # pne = ((priorne * ht[2] * wt[2])+1)/((self.negative_count**2)+len(self.neHash))\n # if pN>ppo and pN>pne:\n # return 'neutral'\n # elif ppo > pne:\n # return 'positive'\n # elif pne > ppo:\n # return 'negative'\n # else:\n # return 'Cannot be classified'\n # def test(self,file):\n # count = 0\n # with open(file,'r') as f:\n # self.reader = csv.reader(f)\n # list_reader = list(self.reader)\n # for rne in list_reader[1:]:\n # actual = self.calculateBMI(rne)\n # calculated = self.classify(rne)\n # if(actual == calculated):\n # count += 1\n # accuracy = (len(list_reader[1:])-count)/len(list_reader[1:])*100\n # return accuracy\n\n # def calculate_likelihood(self,data):\n # conditional_probability = 1\n # dic = {}\n # for i in data:\n # if i not in dic:\n # dic[i] = 1\n # else:\n # dic[i] += 1\n # print(dic)\n\n # for x in dic:\n # print(x)\n # # if (x == positive_conditional_probability.key()):\n # for y in self.positive_conditional_probability:\n # if x == y :\n # print(\"uffff....\")\n # # print(positive_conditional_probability[y])\n # conditional_probability *= (self.positive_conditional_probability[y]**dic[x])\n \n # # print(\"::::::::::::::::;\",conditional_probability)\n # # conditional_probability *= 10000\n # # conditional_probability *= 10000\n # print(\"\\nhey bhagawan\")\n # print(\"Conditional Probability : \", conditional_probability)\n \n\n\n\n\n\n\n\nif __name__==\"__main__\":\n try:\n naivebayes = NaiveBayes()\n naivebayes.train(\"trainData.csv\")\n # print(\"Accuracy:\",naivebayes.test('testSet.csv'))\n # print(\"Actual BMI for ['M',5,50]:\",naivebayes.calculateBMI(['M',5,50]))\n # print(\"Calculated BMI:\",naivebayes.classify(['M',5,50]))\n except FileNotFoundError:\n print(\"File not found\")\n","sub_path":"tokenise/basic_naive.py","file_name":"basic_naive.py","file_ext":"py","file_size_in_byte":13811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"515867578","text":"#!/usr/bin/env python3\n\nimport sys\nimport math\n\ndef get_true_zero(matrice):\n\tmatrice[0][0] = minus_zero_to_zero(matrice[0][0])\n\tmatrice[0][1] = minus_zero_to_zero(matrice[0][1])\n\tmatrice[0][2] = minus_zero_to_zero(matrice[0][2])\n\tmatrice[1][0] = minus_zero_to_zero(matrice[1][0])\n\tmatrice[1][1] = minus_zero_to_zero(matrice[1][1])\n\tmatrice[1][2] = minus_zero_to_zero(matrice[1][2])\n\tmatrice[2][0] = minus_zero_to_zero(matrice[2][0])\n\tmatrice[2][1] = minus_zero_to_zero(matrice[2][1])\n\tmatrice[2][2] = minus_zero_to_zero(matrice[2][2])\n\treturn (matrice)\n\ndef minus_zero_to_zero(number):\n\tif number > -0.01 and number < 0:\n\t\tnumber = float(0)\n\treturn (number)\n\ndef my_translation(matrice, vi, vj):\n a = 1 * matrice[0][0] + 0 * matrice[1][0] + 0 * matrice[2][0]\n b = 0 * matrice[0][0] + 1 * matrice[1][0] + 0 * matrice[2][0]\n c = vi * matrice[0][0] + vj * matrice[1][0] + 1 * matrice[2][0]\n\n d = 1 * matrice[0][1] + 0 * matrice[1][1] + 0 * matrice[2][1]\n e = 0 * matrice[0][1] + 1 * matrice[1][1] + 0 * matrice[2][1]\n f = vi * matrice[0][1] + vj * matrice[1][1] + 1 * matrice[2][1]\n\n g = 1 * matrice[0][2] + 0 * matrice[1][2] + 0 * matrice[2][2]\n h = 0 * matrice[0][2] + 1 * matrice[1][2] + 0 * matrice[2][2]\n i = vi * matrice[0][2] + vj * matrice[1][2] + 1 * matrice[2][2]\n matrice = [[a, d, g], [b, e, h], [c, f, i]]\n return matrice\n\ndef my_homothety(matrice, m, n):\n a = m * matrice[0][0] + 0 * matrice[1][0] + 0 * matrice[2][0]\n b = 0 * matrice[0][0] + n * matrice[1][0] + 0 * matrice[2][0]\n c = 0 * matrice[0][0] + 0 * matrice[1][0] + 1 * matrice[2][0]\n\n d = m * matrice[0][1] + 0 * matrice[1][1] + 0 * matrice[2][1]\n e = 0 * matrice[0][1] + n * matrice[1][1] + 0 * matrice[2][1]\n f = 0 * matrice[0][1] + 0 * matrice[1][1] + 1 * matrice[2][1]\n\n g = m * matrice[0][2] + 0 * matrice[1][2] + 0 * matrice[2][2]\n h = 0 * matrice[0][2] + n * matrice[1][2] + 0 * matrice[2][2]\n i = 0 * matrice[0][2] + 0 * matrice[1][2] + 1 * matrice[2][2]\n matrice = [[a, d, g], [b, e, h], [c, f, i]]\n return matrice\n\ndef my_rotation(matrice, alpha):\n alpha = math.radians(alpha)\n a = math.cos(alpha) * matrice[0][0] + math.sin(alpha) * matrice[1][0] + 0 * matrice[2][0]\n b = -math.sin(alpha) * matrice[0][0] + math.cos(alpha) * matrice[1][0] + 0 * matrice[2][0]\n c = 0 * matrice[0][0] + 0 * matrice[1][0] + 1 * matrice[2][0]\n\n d = math.cos(alpha) * matrice[0][1] + math.sin(alpha) * matrice[1][1] + 0 * matrice[2][1]\n e = -math.sin(alpha) * matrice[0][1] + math.cos(alpha) * matrice[1][1] + 0 * matrice[2][1]\n f = 0 * matrice[0][1] + 0 * matrice[1][1] + 1 * matrice[2][1]\n\n g = math.cos(alpha) * matrice[0][2] + math.sin(alpha) * matrice[1][2] + 0 * matrice[2][2]\n h = -math.sin(alpha) * matrice[0][2] + math.cos(alpha) * matrice[1][2] + 0 * matrice[2][2]\n i = 0 * matrice[0][2] + 0 * matrice[1][2] + 1 * matrice[2][2]\n matrice = [[a, d, g], [b, e, h], [c, f, i]]\n return matrice\n\ndef my_symmetry(matrice, alpha):\n alpha = math.radians(alpha)\n a = math.cos(2 * alpha) * matrice[0][0] + math.sin(2 * alpha) * matrice[1][0] + 0 * matrice[2][0]\n b = math.sin(2 * alpha) * matrice[0][0] - math.cos(2 * alpha) * matrice[1][0] + 0 * matrice[2][0]\n c = 0 * matrice[0][0] + 0 * matrice[1][0] + 1 * matrice[2][0]\n\n d = math.cos(2 * alpha) * matrice[0][1] + math.sin(2 * alpha) * matrice[1][1] + 0 * matrice[2][1]\n e = math.sin(2 * alpha) * matrice[0][1] - math.cos(2 * alpha) * matrice[1][1] + 0 * matrice[2][1]\n f = 0 * matrice[0][1] + 0 * matrice[1][1] + 1 * matrice[2][1]\n\n g = math.cos(2 * alpha) * matrice[0][2] + math.sin(2 * alpha) * matrice[1][2] + 0 * matrice[2][2]\n h = math.sin(2 * alpha) * matrice[0][2] - math.cos(2 * alpha) * matrice[1][2] + 0 * matrice[2][2]\n i = 0 * matrice[0][2] + 0 * matrice[1][2] + 1 * matrice[2][2]\n matrice = [[a, d, g], [b, e, h], [c, f, i]]\n return matrice\n\ndef print_words(flag, arg1, arg2):\n\tif flag == \"-t\":\n\t\tprint(\"Translation by the vector ({0:.0f}, {1:.0f})\".format(arg1, arg2))\n\telif flag == \"-h\":\n\t\tprint(\"Homothety by the ratios {0:.0f} and {1:.0f}\".format(arg1, arg2))\n\telif flag == \"-r\":\n\t\tprint(\"Rotation at a {0:.0f} degree angle\".format(arg1))\n\telif flag == \"-s\":\n\t\tprint(\"Symmetry about an axis inclined with an angle of {0:.0f} degrees\".format(arg1))\n\ndef print_matrice(matrice):\n print(\"{0:.2f}\\t{1:.2f}\\t{2:.2f}\".format(matrice[0][0], matrice[1][0], matrice[2][0]))\n print(\"{0:.2f}\\t{1:.2f}\\t{2:.2f}\".format(matrice[0][1], matrice[1][1], matrice[2][1]))\n print(\"{0:.2f}\\t{1:.2f}\\t{2:.2f}\".format(matrice[0][2], matrice[1][2], matrice[2][2]))\n\ndef matrice_mult(matrice, point2) :\n x2 = matrice[0][0] * point2[0] + matrice[1][0] * point2[1] + matrice[2][0] * point2[2]\n y2 = matrice[0][1] * point2[0] + matrice[1][1] * point2[1] + matrice[2][1] * point2[2]\n z2 = matrice[0][2] * point2[0] + matrice[1][2] * point2[1] + matrice[2][2] * point2[2]\n point2 = [x2, y2, z2]\n return (point2)\n","sub_path":"102architect_2017/form_calculate.py","file_name":"form_calculate.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"74459727","text":"def createShit(): #創造大便\n shit=[]\n for i in range(10000): #範圍0-9999\n a = addzero(i) #補足4位數(補0)\n if(ingnoreDouble(a)==True): #檢查重複數字\n pass #重複=略過\n elif(ingnoreDouble(a)==False):\n shit.append(a) #不重複才加進大便LIST\n return shit #回傳創造好的大便\ndef addzero(i): #補0\n i=str(i)\n while(len(i)<4):\n i = \"0\"+ i\n return i\ndef ingnoreDouble(a): #檢查重複數字\n for i in range(0,4):\n for j in range(i+1,4):\n if(a[i]==a[j]):\n return True\n return False\ndef checkAB(guess,bigShit):\n a,b=0,0\n for i in range(4): #猜測與答案幾A幾B\n if(bigShit[i]==guess[i]):\n a += 1\n b -= 1\n if(bigShit[i] in guess):\n b += 1\n if(a==abtemp[0] and b==abtemp[1]):\n return False\n else:\n return True\ndef checkShit(guess,bigShit): #大便內元素比對猜測 是否與 猜測與答案 幾A幾B相同, 不相同將\"index\"存到待刪除LIST\n deltemp=[]\n for i in range(len(bigShit)):\n\n if(checkAB(guess,bigShit[i])==True): #不相同將\"index\"存到待刪除LIST, 相同PASS.\n deltemp.append(i)\n return deltemp \ndef main():\n bigShit = createShit() #創建大便\n #print(len(bigShit))\n while(len(bigShit)>1):\n innput = [i for i in input().split(\",\")] #輸入猜測答案 ,拆開\n guess=innput[0] #取得猜測\n abtemp[0]=int(innput[1][0]) #取得A\n abtemp[1]=int(innput[1][2]) #取得B\n #print(abtemp)\n deltemp = checkShit(guess,bigShit) #大便內元素比對猜測 是否與 猜測與答案 幾A幾B相同\n for i in reversed(deltemp): #倒著POP刪除不要得可能\n bigShit.pop(i)\n #print(deltemp)\n #print(bigShit)\n print(bigShit[0])\n #print(bigShit)\nabtemp=[-1,-1]\nmain()","sub_path":"week3/Wtf_guess_num.py","file_name":"Wtf_guess_num.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"169350021","text":"from Tkinter import *\n\n#Load file\ntg = {}\ntry:\n for entry in open(\"Class10w.txt\"):\n entry.rsplit()\n student, attendance = entry.split(\":\")\n tg[student] = int(attendance) #int to catch errors\n #debug\n print(tg)\n\n#Occurs when no file\nexcept IOError:\n print(\"Error: Failed to load the attendance\")\n print(\"IOError\")\n\n#Occurs when invalid value \nexcept ValueError:\n print(\"Error: Failed to load the attendance\")\n print(\"ValueError\")\n\n for entry in open(\"Class10w.txt\"):\n entry.rsplit()\n student, attendance = entry.split(\":\")\n try:\n attendance = int(attendance)\n except:\n tg[student] = (attendance)\n print(student + \":\" + attendance)\n\nexcept:\n print(\"Error: Failed to load the attendance\")\n print(\"Unknown Error\") \n","sub_path":"master.py","file_name":"master.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"68512103","text":"\"\"\"\nRuns a celery worker process that exposes a task API for\nores.scoring_systems.CeleryQueue\n\nUsage:\n celery [--config-dir=]... [--logging-config=]\n [--debug] [--verbose]\n\nOptions:\n -h --help Prints this documentation\n --config-dir= The path to a directory containing configuration\n [default: config/]\n --logging-config= The path to a logging configuration file\n --debug Print debug logging information\n --verbose Print verbose extraction information\n\"\"\"\nimport logging\n\nimport docopt\n\nfrom ..scoring_systems import CeleryQueue\nfrom .util import build_config\n\n\ndef main(argv=None):\n args = docopt.docopt(__doc__, argv=argv)\n verbose = args['--verbose']\n debug = args['--debug']\n\n run(verbose, debug,\n config_dirs=args['--config-dir'],\n logging_config=args['--logging-config'])\n\n\ndef run(verbose, debug, **kwargs):\n logging.basicConfig(\n level=logging.DEBUG if debug else logging.INFO,\n format='%(asctime)s %(levelname)s:%(name)s -- %(message)s'\n )\n logging.getLogger('requests').setLevel(logging.INFO)\n if verbose:\n logging.getLogger('revscoring.dependencies.dependent') \\\n .setLevel(logging.DEBUG)\n else:\n logging.getLogger('revscoring.dependencies.dependent') \\\n .setLevel(logging.INFO)\n\n logging.getLogger(\"ores.metrics_collectors.logger\").setLevel(logging.DEBUG)\n logging.getLogger(\"stopit\").setLevel(logging.ERROR)\n\n application = build(**kwargs)\n logging.getLogger('ores').setLevel(logging.DEBUG)\n celery_log_level = \"DEBUG\" if debug else \"INFO\"\n application.worker_main(\n argv=[\"celery_worker\", \"--loglevel=\" + celery_log_level])\n\n\ndef build(*args, **kwargs):\n config = build_config(*args, **kwargs)\n scoring_system = CeleryQueue.from_config(\n config, config['ores']['scoring_system'])\n return scoring_system.application\n","sub_path":"ores/applications/celery.py","file_name":"celery.py","file_ext":"py","file_size_in_byte":2000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"450894419","text":"from Track import Track_class\n\nacdc_highway_to_hell = Track_class(\"Highway to Hell\", 'music/acdc_highway_to_hell.ogg')\nmetallice_nothing_else_matters = Track_class(\"Nothing Else Matters\", 'music/metallice_nothing_else_matters.ogg')\nnirvana_smells_like_teen_spirit = Track_class(\"Smells Like Teen Spirit\", 'music/nirvana_smells_like_teen_spirit.ogg')\nadele_when_we_were_young = Track_class(\"When We Were Young\", 'music/adele_when_we_were_young.ogg')\nrihanna_work = Track_class(\"Work\", 'music/rihanna_work.ogg')\nlukas_graham_7_years = Track_class(\"7 Years\", 'music/lucas_graham_7_years.ogg')\naqua_barbie_girl = Track_class(\"Barbie Girl\", 'music/aqua_barbie_girl.ogg')\nbackstreet_boys_everybody = Track_class(\"Everybody\", 'music/backstreet_boys_everybody.ogg')\ntlc_no_scrubs = Track_class(\"No Scrubs\", 'music/tlc_no_scrubs.ogg')\nbeyonce_halo = Track_class(\"Halo\", 'music/beyonce_halo.ogg')\nlady_gaga_poker_face = Track_class(\"Poker Face\", 'music/lady_gaga_poker_face.ogg')\nthe_black_eyed_peas_boom_boom_pow = Track_class(\"Boom Boom Pow\", 'music/the_black_eyed_peas_boom_boom_pow.ogg')\nbob_marley_one_love = Track_class(\"One Love\", 'music/bob_marley_one_love.ogg')\nbob_marley_stir_it_up = Track_class(\"Stir It Up\", 'music/bob_marley_stir_it_up.ogg')\nisrealites_desmond_dekker = Track_class(\"Israelites\", 'music/isrealites_desmond_dekker.ogg')\ngeorge_thorogood_bad_to_the_bone = Track_class(\"Bad to The Bone\", 'music/george_thorogood_bad_to_the_bone.ogg')\nqueen_i_want_to_break_free = Track_class(\"I Want to Break Free\", 'music/queen_i_want_to_break_free.ogg')\nrick_astley_never_gonna_give_you_up = Track_class(\"Never Gonna Give You Up\", 'music/rick_astley_never_gonna_give_you_up.ogg')\nlouis_armstron_what_a_wonderful_world = Track_class(\"Let's Get It On\", 'music/louis_armstrong_what_a_wonderful_world.ogg')\nmarvin_gaye_lets_get_it_on = Track_class(\"Let's Get It On\", 'music/marvin_gaye_lets_get_it_on.ogg')\nfrank_sinatra_ive_got_you_under_my_skin = Track_class(\"I've Got You Under My Skin\", 'music/frank_sinatra_ive_got_you_under_my_skin.ogg')\nvivaldi_spring_allegro = Track_class(\"Lente - Vivaldi\", 'music/vivaldi_spring_allegro.ogg')\ntoccata_and_fugue = Track_class(\"Toccata en Fugue\", 'music/toccata_and_fugue.ogg')\node_to_joy = Track_class(\"Ode Aan de Vreugde\", 'music/ode_to_joy.ogg')\nnyan_cat = Track_class(\"Nyan Cat\", 'music/nyan_cat.ogg')\npeanut_butter_jelly_time = Track_class(\"Peanut Butter Jelly Time\", 'music/peanut_butter_jelly_time.ogg')\ntrololo = Track_class(\"Trololo\", 'music/trololo.ogg')\n\nsongs = {\n 'rock1': acdc_highway_to_hell,\n 'rock2': metallice_nothing_else_matters,\n 'rock3': nirvana_smells_like_teen_spirit,\n 'pop1': adele_when_we_were_young,\n 'pop2': rihanna_work,\n 'pop3': lukas_graham_7_years,\n '90s1': aqua_barbie_girl,\n '90s2': backstreet_boys_everybody,\n '90s3': tlc_no_scrubs,\n '00s1': beyonce_halo,\n '00s2': lady_gaga_poker_face,\n '00s3': the_black_eyed_peas_boom_boom_pow,\n 'reggea1': bob_marley_one_love,\n 'reggea2': bob_marley_stir_it_up,\n 'reggea3': isrealites_desmond_dekker,\n '80s1': george_thorogood_bad_to_the_bone,\n '80s2': queen_i_want_to_break_free,\n '80s3': rick_astley_never_gonna_give_you_up,\n 'jazz1': louis_armstron_what_a_wonderful_world,\n 'jazz2': marvin_gaye_lets_get_it_on,\n 'jazz3': frank_sinatra_ive_got_you_under_my_skin,\n 'klassiek1': vivaldi_spring_allegro,\n 'klassiek2': toccata_and_fugue,\n 'klassiek3': ode_to_joy,\n 'meme1': nyan_cat,\n 'meme2': peanut_butter_jelly_time,\n 'meme3': trololo\n}\n\n\n","sub_path":"PyQuiz/dictionary.py","file_name":"dictionary.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"33550494","text":"\nimport discord\nfrom discord.ext import commands\nimport sqlite3\nimport os\nimport time\nfrom datetime import datetime\ndatabase = os.getcwd()+r\"/db/database.db\"\ntheOwner = 194852876902727680\n\nclass Database(commands.Cog):\n \"\"\"Used to store data in to SQL databases using SQLLite3\"\"\"\n def __init__(self,client):\n self.client = client\n self.conn = create_connection(database)\n if self.conn is not None:\n create_table(self.conn,\"\"\"CREATE TABLE IF NOT EXISTS blacklist (\n user_id integer PRIMARY KEY,\n reason text NOT NULL,\n date_banned integer NOT NULL\n );\"\"\")\n create_table(self.conn,\"\"\"CREATE TABLE IF NOT EXISTS commandlogs (\n id integer PRIMARY KEY,\n command text NOT NULL,\n user_id integer NOT NULL,\n server_id integer NOT NULL,\n channel_id integer NOT NULL,\n date_ran integer\n );\"\"\")\n \n @commands.command()\n async def blacklist(self,ctx,member: discord.Member,*,reason=None):\n if ctx.author.id == theOwner:\n if reason==None:\n reason = \"None provided\"\n add_to_blacklist(self.conn,member.id,reason)\n await ctx.send(member.name+\" has been blacklisted\")\n channel = await member.create_dm()\n await channel.send(\"You have been blacklisted from this bot for :\"+\"\\n`\"+reason+\"`\"+\"\\nJoin https://discord.gg/3XBcER9 to appeal\")\n \n @commands.command()\n async def unblacklist(self,ctx,member: discord.Member):\n if ctx.author.id == theOwner:\n remove_from_blacklist(self.conn,member.id)\n await ctx.send(member.name+\" has been removed from the blacklist\")\n channel = await member.create_dm()\n await channel.send(\"Your blacklist has been removed\"+\"\\nYou may use this bot again\"+\"\\nNext time, think twice before deciding to do something to be blacklisted.\")\n\n @commands.command()\n async def checkblacklist(self,ctx,member: discord.Member):\n if ctx.author.id == theOwner:\n check = check_blacklist(self.conn, member.id)\n print(check)\n if check != None:\n await ctx.send(member.name + \" is on the blacklist since \"+datetime.utcfromtimestamp(list(check)[-1]).strftime('%Y-%m-%d %H:%M:%S'))\n else:\n await ctx.send(member.name + \" is not on the blacklist\")\n\n @commands.command()\n async def sql(self,ctx,*,command):\n if ctx.author.id == theOwner:\n if command.startswith(\"```SQL\"):\n command = (command[6:-3])\n rows = run_sql_query(self.conn,command)\n return_value=\"```\\n\"\n for row in rows:\n return_value = return_value + str(row) + \"\\n\"\n return_value = return_value + \"```\"\n await ctx.send(return_value)\n return\n\n \n\ndef setup(client):\n client.add_cog(Database(client))\n\n\ndef create_connection(db_file):\n conn = None\n try:\n conn = sqlite3.connect(db_file)\n except sqlite3.Error as e:\n print(e)\n return conn\n\ndef run_sql_query(conn,command):\n cur = conn.cursor()\n cur.execute(command)\n rows=cur.fetchall()\n conn.commit()\n return rows\n\ndef create_table(conn, create_table_sql):\n try:\n c = conn.cursor()\n c.execute(create_table_sql)\n except sqlite3.Error as e:\n print(e)\n\ndef add_to_blacklist(conn,user_id,reason):\n date = int(time.time())\n sql = \"\"\" INSERT INTO blacklist(user_id,reason,date_banned)\n VALUES(?,?,?)\"\"\"\n cur = conn.cursor()\n cur.execute(sql,(user_id,reason,date))\n conn.commit()\n \ndef remove_from_blacklist(conn,user_id):\n date = int(time.time())\n sql = \"\"\" DELETE FROM blacklist WHERE user_id = ?\"\"\"\n cur = conn.cursor()\n cur.execute(sql,(user_id,))\n conn.commit()\n\ndef check_blacklist(conn,user_id):\n \n sql = \"\"\" SELECT * FROM blacklist WHERE user_id = ? LIMIT 1;\"\"\"\n cur = conn.cursor()\n cur.execute(sql,(user_id,))\n rows = cur.fetchall()\n for row in rows:\n return row\n\ndef add_command_to_log(conn,ctx):\n sql = \"\"\" INSERT INTO commandlogs(command,user_id,server_id,channel_id,date_ran) VALUES(?,?,?,?,?)\"\"\"\n cur = conn.cursor()\n cur.execute(sql,(ctx.message.content,ctx.author.id,ctx.guild.id,ctx.channel.id,ctx.message.created_at))\n conn.commit()\n","sub_path":"cogs/databases.py","file_name":"databases.py","file_ext":"py","file_size_in_byte":4689,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"119934603","text":"from random import randint\nfrom time import sleep\nlista = []\ntemp = []\nuser = int(input('quantos palpites voce quer? '))\nprint('-='*30)\nfor l in range(0,user):\n for p in range(0,6):\n rand = randint(1,60)\n if rand not in temp:\n temp.append(rand)\n else:\n while True:\n rand = randint(1,60)\n if rand not in temp:\n temp.append(rand)\n break\n lista.append(temp[:])\n temp.clear()\n lista[l].sort()\n print(f'jogo{l+1}: {lista[l]}')\n print('-='*20)\n sleep(1)\nprint('BOM JOGO!!')","sub_path":"mundo 3/aula 18/exer88.py","file_name":"exer88.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"264837480","text":"values = list(int(x) for x in input().split()) \n\nk = values[0] # cost of first banana\nn = values[1] # initial number of dollars the soldier has \nw = values[2] # number of bananas he want\n\nprice = ((k*w*(w+1))//2) # calculating the dollars needed to purchase w number of bananas\n\nif(price <= n): # If price required to purchase w bananas is < number of dollars he possess \n print(0)\nelse:\n print(price-n) # else return the difference\n","sub_path":"A/Soldier_and_Bananas/fazeel.py","file_name":"fazeel.py","file_ext":"py","file_size_in_byte":453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"549792328","text":"# PROBLEM\n# A stock market analyst wants to estimate the average return \n# on a certain stock. A random sample of 15 days yields an average \n# (annualized) return of Xbar=10.37% and a standard deviation of s=3.5%. \n# Assuming a normal population of returns, give a 95% confidence \n# interval for the average return on this stock.\n\n\n# Sample Mean, xbar = 10.37\n# Sample Standard Deviation, sam_std = 3.5\n# Sample Size, n = 15\n\n# We don't know the Mean (µ) & Standard Deviation (σ) of the Population \n# As the Sample Size (n) is 15 which is less than 30,\n# we cannot use Normal Distribution\n\n# Here, we can use t-distribution which is more robust\n# Population Mean, µ = xbar +/- zt*(sam_std/√n)\n# zt is the z-score for t-distribution\n# zt = sp.t.ppf(confidence_interval, degrees_of_freedom)\n# confidence_interval = 0.025 # for 95% \n# degrees of freedom = (n - 1) = 14\n\n# CODE\n\nimport scipy.stats as sp\nimport math as m\n\nn = 15\nxbar = 10.37\nsam_std = 3.5\n\npop_mean_mu1 = xbar + sp.t.ppf(0.025,df=n-1)*(sam_std/m.sqrt(n))\n\npop_mean_mu2 = xbar - sp.t.ppf(0.025,df=n-1)*(sam_std/m.sqrt(n))\n\n# OUTPUT\n\n# >>> pop_mean_mu1\n# 8.431764604523753\n \n# >>> pop_mean_mu2\n# 12.308235395476245\n\n# ALTERNATE SOLUTION\n\nsp.t.interval(alpha=0.95, df = n-1,loc=xbar, scale=sd/m.sqrt(n))\n\n# >>> sp.t.interval(alpha=0.95, df = n-1,loc=xbar, scale=sam_std/m.sqrt(n))\n# (8.431764604523753, 12.308235395476245)\n\n# Conclusion: Average return of the stock will be between 8.4 and 12.3 \n# with a confidence of 95%\n","sub_path":"samples/average_returns_from_stocks/teabull.py","file_name":"teabull.py","file_ext":"py","file_size_in_byte":1489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"473307703","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Aug 8 10:59:22 2019\n\n@author: didelani\n\"\"\"\nimport re\nimport numpy as np\nimport json\nfrom flair.data import Sentence\nfrom flair.models import SequenceTagger\nimport logging\n\nlogger = logging.getLogger(__name__)\n\n# model_path = 'model/best-model.pt'\ntransf_dir = 'data/transformation/'\n\n\ndef read_ne_dict(data_dir_file):\n with open(data_dir_file) as f:\n text_lines = f.readlines()\n\n token_cnt_probs = []\n for line in text_lines:\n tok_cnt_prob = line.strip().split('\\t')\n token, count, prob = tok_cnt_prob\n token_cnt_probs.append([token, count, prob])\n\n return token_cnt_probs\n\n\ndef get_tagged_sentence(sent):\n tagged_sentence = ''\n for token, ne in sent:\n tagged_sentence += token\n if ne != 'O':\n tagged_sentence += '<'+ne+'>' + ' '\n else:\n tagged_sentence += ' '\n return tagged_sentence\n\n\ndef take_to_bio_format(labels):\n bio_labels = []\n for k, label in enumerate(labels):\n ne = label\n if k > 0 and len(labels[k - 1]) > 2:\n prev_ne = labels[k - 1]\n else:\n prev_ne = 'O'\n new_ne = ne\n if new_ne != 'O':\n ne = 'B-' + new_ne\n if new_ne == prev_ne:\n ne = 'I-' + new_ne\n\n bio_labels.append(ne)\n\n return bio_labels\n\ndef identify_private_tokens_in_sentences(sents, model):\n sentences = [Sentence(sent.strip()) for sent in sents]\n # predict tags and print\n model.predict(sentences)\n tagged_sentences = [sent.to_tagged_string() for sent in sentences]\n\n all_tagged_sents, all_coNLL_format_tags = [], []\n\n for tagged_sent in tagged_sentences:\n tagged_words = tagged_sent.split()\n coNLL_format_tags = []\n\n for k, tagged_word in enumerate(tagged_words):\n token = tagged_word\n if token in [\"\", \"[noise1]\", \"[noise2]\", \"umm\", \"ahh\"]:\n tag = 'O'\n coNLL_format_tags.append([token, tag])\n continue\n if token.startswith('<') and token.endswith('>'):\n continue\n\n if k < len(tagged_words) - 1:\n next_token = tagged_words[k + 1]\n else:\n next_token = ''\n\n if next_token.startswith('<') and next_token.endswith('>'):\n tag = next_token[1:-1]\n else:\n tag = 'O'\n if tag != 'O':\n coNLL_format_tags.append([token, tag[2:]])\n else:\n coNLL_format_tags.append([token, tag])\n\n unzip = list(zip(*coNLL_format_tags))\n tags = take_to_bio_format(list(unzip[1]))\n coNLL_format_tags = list(zip(list(unzip[0]), tags))\n\n all_tagged_sents.append(tagged_sent)\n all_coNLL_format_tags.append(coNLL_format_tags)\n\n return all_tagged_sents, all_coNLL_format_tags\n\n\ndef identify_private_tokens(sentr, model):\n\n sentence = Sentence(sentr.strip())\n\n # predict tags and print\n model.predict(sentence)\n tagged_sent = sentence.to_tagged_string()\n\n coNLL_format_tags = []\n\n tagged_words = tagged_sent.split()\n for k, tagged_word in enumerate(tagged_words):\n token = tagged_word\n if token.startswith('<') and token.endswith('>'):\n continue\n\n if k < len(tagged_words) - 1:\n next_token = tagged_words[k + 1]\n else:\n next_token = ''\n\n if next_token.startswith('<') and next_token.endswith('>'):\n tag = next_token[1:-1]\n else:\n tag = 'O'\n if tag != 'O':\n coNLL_format_tags.append([token, tag[2:]])\n else:\n coNLL_format_tags.append([token, tag])\n\n unzip = list(zip(*coNLL_format_tags))\n tags = take_to_bio_format(list(unzip[1]))\n coNLL_format_tags = list(zip(list(unzip[0]), tags))\n\n return tagged_sent, coNLL_format_tags\n\n\ndef getTaggedString(per_sent_tokens, predicted_tags):\n token_idxs = np.arange(len(per_sent_tokens))\n tagged_string = \"\"\n t = 0\n while t < len(token_idxs):\n token, tag = per_sent_tokens[t], predicted_tags[t]\n\n k = t\n new_token = token + ' '\n while (k + 1 < len(per_sent_tokens) and\n len(per_sent_tokens[k + 1]) > 0 and\n tag != 'O' and tag[:2] == 'B-'):\n\n n_tok, n_ne = per_sent_tokens[k + 1], predicted_tags[k + 1]\n if tag[2:] == n_ne[2:] and n_ne[:2] == 'I-':\n new_token += n_tok + ' '\n k += 1\n else:\n break\n t = k\n t += 1\n\n if tag != \"O\":\n tagged_string += new_token[:-1] + \" \"\n #tagged_string += \"'\" + new_token[:-1]+\"'<\"+tag[2:]+\"> \"\n else:\n tagged_string += token + \" \"\n\n\n return tagged_string\n\n\ndef check_multiword(sent_o, tag_prefix='singleword'):\n sent_new = []\n n = len(sent_o)\n i = 0\n while i < n:\n word_feat = sent_o[i]\n if word_feat[1] != 'O':\n j = i\n act_label = word_feat[1][2:]\n n_first_ne = word_feat[0]\n word_feat = (n_first_ne, word_feat[1])\n while j+1 < n and sent_o[j+1][1][2:] == act_label:\n tag = re.split('[_ -]', act_label)[-1]\n if tag_prefix == 'multiword':\n tag = 'MULTI-WORD_'+tag\n else:\n tag = 'B-'+tag\n word_feat = (n_first_ne, tag)\n j += 1\n i = j\n # change single-word expression to have multiword NE type\n '''\n if tag_prefix == 'multiword':\n tag = re.split('[_ -]', act_label)[-1]\n tag = 'MULTI-WORD_'+tag\n word_feat = (n_first_ne, tag)\n '''\n sent_new.append(word_feat)\n i += 1\n return sent_new\n\n\ndef anonymize_sentence_singleword(sent, ne_table_list, nes_to_idxs):\n N_words = 50\n\n for ne_label, idx in nes_to_idxs.items():\n if ne_label == 'O':\n continue\n ne_table = ne_table_list[idx]\n\n N_popNE = len(ne_table)\n ne_prob = list(zip(*ne_table))[-1]\n ne_list = list(zip(*ne_table))[0]\n sel_ne_ids = np.random.choice(range(N_popNE), (1, N_words), p=ne_prob)\n sel_ne_ids = sel_ne_ids.flatten()\n\n new_sent = []\n # print(sent)\n per_NE = dict()\n per_no = 0\n for k, word_label in enumerate(sent):\n token, ne = word_label\n N_words = len(sent)\n new_token = token\n if ne == ne_label:\n if token not in per_NE:\n ne_idx = sel_ne_ids[per_no]\n new_token = ne_list[ne_idx]\n per_no += 1\n per_NE[token] = new_token\n else:\n new_token = per_NE[token]\n # print(token, new_token)\n new_sent.append((new_token, ne))\n\n sent = new_sent\n\n new_sent = sent\n\n return new_sent\n\n\n# multiword to multiword text transformation\ndef anonymize_sentence_multiword(sent, ne_table_list, nes_to_idxs):\n sent = check_multiword(sent, tag_prefix='multiword')\n\n N_words = 50\n\n for ne_label, idx in nes_to_idxs.items():\n if ne_label == 'O' or 'MULTI-WORD' not in ne_label:\n continue\n ne_table = ne_table_list[idx]\n\n N_popNE = len(ne_table)\n ne_prob = list(zip(*ne_table))[-1]\n ne_list = list(zip(*ne_table))[0]\n sel_ne_ids = np.random.choice(range(N_popNE), (1, N_words), p=ne_prob)\n sel_ne_ids = sel_ne_ids.flatten()\n\n new_sent = []\n\n # if 'MULTI-WORD' in ne_label:\n # sent = check_multiword(sent, tag_prefix='multiword')\n\n per_NE = dict()\n per_no = 0\n for k, word_label in enumerate(sent):\n token, ne = word_label\n N_words = len(sent)\n new_token = token\n if ne == ne_label:\n if token not in per_NE:\n ne_idx = sel_ne_ids[per_no]\n new_token = ne_list[ne_idx]\n per_no += 1\n per_NE[token] = new_token\n else:\n new_token = per_NE[token]\n # print(token, new_token)\n new_sent.append((new_token, ne))\n\n sent = new_sent\n\n new_sent = anonymize_sentence_singleword(sent, ne_table_list, nes_to_idxs)\n\n anonynimized_sentence = []\n for token, tag in new_sent:\n multi_words = token.split()\n new_tag = re.split('[_ -]', tag)[-1]\n if 'MULTI-WORD' in tag:\n anonynimized_sentence.append([multi_words[0], 'B-' + new_tag])\n for word in multi_words[1:]:\n anonynimized_sentence.append([word, 'I-' + new_tag])\n else:\n anonynimized_sentence.append([token, tag])\n return anonynimized_sentence\n\n\ndef anonymize_corpus_placeholder(sent):\n sent_ph = []\n new_sent = []\n\n for k, word_label in enumerate(sent):\n token, ne = word_label\n new_token = token\n if ne != 'O':\n # new_token = 'PLACEHOLDER'\n new_token = '▮▮▮▮▮'\n\n new_sent.append((new_token, ne))\n sent_ph.append((token, ne))\n\n return new_sent\n\n\ndef transform_private_tokens(sentence, ne_table_list, nes_to_idxs, i=0):\n if i == 'REDACT':\n logger.info(\"Placeholder selected: {i}\")\n new_sent = anonymize_corpus_placeholder(sentence)\n elif i == 'WORD':\n logger.info(\"Word-by-word selected: {i}\")\n new_sent = anonymize_sentence_singleword(sentence,\n ne_table_list,\n nes_to_idxs)\n elif i == 'FULL':\n logger.info(\"Full-entity selected: {i}\")\n new_sent = anonymize_sentence_multiword(sentence,\n ne_table_list,\n nes_to_idxs)\n else:\n pass\n\n tokens, tags = list(zip(*new_sent))\n\n tagged_string = getTaggedString(list(tokens), list(tags))\n\n return tagged_string\n\n\ndef get_named_entities():\n\n with open(transf_dir + 'named_entity_to_idx.json') as f:\n nes_to_idxs = json.load(f)\n\n ne_table_list = ['' for _ in range(len(nes_to_idxs))]\n for ne, idx in nes_to_idxs.items():\n ne_table_list[idx] = read_ne_dict(transf_dir + ne + '.tsv')\n\n return nes_to_idxs, ne_table_list\n\n\nif __name__ == \"__main__\":\n\n import argparse\n parser = argparse.ArgumentParser(description='Transform texts removing sensitive words, named entities')\n parser.add_argument(\"-l\", \"--log\",\n dest=\"logLevel\",\n choices=['DEBUG', 'INFO',\n 'WARNING', 'ERROR',\n 'CRITICAL'],\n help=\"Set the logging level\",\n default='INFO')\n parser.add_argument('-r',\n help='sets the replace type',\n dest='replace_type',\n choices=['REDACT', 'WORD', 'FULL'],\n default='FULL')\n parser.add_argument('-m',\n help='model for named entity recognition',\n dest='model')\n parser.add_argument('-data_format',\n help='sets the data preprocessing format',\n dest='data_format',\n choices=['cased', 'uncased', 'cased_nopunct', 'uncased_nopunct'],\n default='cased')\n parser.add_argument('input',\n help=\"file of sentences\",\n type=argparse.FileType('r'))\n parser.add_argument('output',\n help=\"result file path\",\n type=argparse.FileType('w'))\n\n args = parser.parse_args()\n\n if args.model:\n model = args.model\n else:\n model = 'ner'\n\n transf_dir = transf_dir + 'bio_'+args.data_format+'/'\n\n logging.basicConfig(filename='text_transformer.log',\n filemode='w',\n level=getattr(logging,\n args.logLevel))\n\n # Get named entities\n nes_to_idxs, ne_table_list = get_named_entities()\n\n logger.info(\"File : {}\\n\".format(args.input))\n\n model = SequenceTagger.load(model)\n\n '''\n for sent in args.input:\n\n logger.info(\"Sentence: {}\\n\".format(sent))\n\n tagged_sent, tokens_tags = identify_private_tokens(sent, model)\n logger.info(\"Tagged sentence: {}\\n\".format(tagged_sent))\n\n tagged_string = transform_private_tokens(tokens_tags,\n ne_table_list,\n nes_to_idxs,\n i=args.replace_type)\n args.output.write(f\"{tagged_string}\\n\")\n '''\n\n all_sent = []\n for sent in args.input:\n all_sent.append(sent)\n\n all_tagged_sents, all_sents_tokens_tags = identify_private_tokens_in_sentences(all_sent, model)\n for i in range(len(all_tagged_sents)):\n tagged_sent = all_tagged_sents[i]\n tokens_tags = all_sents_tokens_tags[i]\n logger.info(\"Tagged sentence: {}\\n\".format(tagged_sent))\n\n tagged_string = transform_private_tokens(tokens_tags,\n ne_table_list,\n nes_to_idxs,\n i=args.replace_type)\n args.output.write(f\"{tagged_string}\\n\")\n\n\n args.output.close()\n","sub_path":"transformer/transformFast.py","file_name":"transformFast.py","file_ext":"py","file_size_in_byte":13681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"516641586","text":"from pyfcm import FCMNotification\n\n# TITLE: Send a message to the mobile app\n# OVERVIEW: This function will send a message to the user with that licence plate\n# OVERVIEW: this is token based, so it is safe.\ndef send_message_to_pay(token):\n\t# create the pusher service\n\tpush_service = FCMNotification(api_key=\"AAAAu7QlcNQ:APA91bE6r3fHQiI0fiVqxaBTdhz9vaEMcVmSqO_SqmhBcNrV3RgNAzRaCt4Tv2-sbvdN7cYivRZffOgPOuq4mf4Cv1dWgg4L65R5LBNgEnURnPjWm-zduNxAzwfM4rdnBmMxjeit3zwy\")\n\n\t# get the user to send via the token\n\tregistration_id = token\n\t# create the message\n\tmessage_title = \"Payment\"\n\tmessage_body = \"Hi, would you like to pay?\"\n\t# send the message\n\tresult = push_service.notify_single_device(registration_id=registration_id, message_title=message_title, message_body=message_body)\n\t# return output for debug purposes\n\treturn result","sub_path":"Server/sendMessage.py","file_name":"sendMessage.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"390459432","text":"# Module 2: Text Analysis with NLTK\n# Stop Words\n# Author: Dr. Alfred\n\n\nfrom nltk.tokenize import word_tokenize\n\ntext = \"\"\" Dostoevsky was the son of a doctor. \nHis parents were very hard-working and deeply religious people,\nbut so poor that they lived with their five children in only\ntwo rooms. The father and mother spent their evenings\nin reading aloud to their children, generally from books of\na serious character.\"\"\"\n\n# Create a set of stop words\nstop_words = ['.',',','a','they','the','his','so','and','were','from','that','of','in','only','with','to']\n\nprint(word_tokenize(text))\n\n# Remove stop words\ntext = word_tokenize(text.lower())\n\nfiltered = [word for word in text if word not in stop_words]\nprint(\"\\n----- After Filtering the Stop Words -----\\n\")\nprint(filtered)\n","sub_path":"exercises/module2_2_stopwords.py","file_name":"module2_2_stopwords.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"46712977","text":"import cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport time\nimport serial\nimport sys\n\nMODE = 'TX2'\n#MODE = 'debug'\n\nSKIP = False\n\ncamera_port = 0\n\nREADY_delay = 15\nFORWARD_1_delay = 15\nFORWARD_1_pwm = 60\nSLOWDOWN_1_pwm = 20\nSTOP_1_delay = 5\nGRAB_delay = 6\nFORWARD_2_delay = 5\nFORWARD_2_pwm = 60\nSTOP_1_delay = 5\nSLOWDOWN_2_pwm = 20\nSTOP_2_delay = 5\nFORWARD_3_pwm = 40\nSTOP_3_delay = 10\n\nif MODE == 'TX2':\n serialCom = '/dev/ttyACM0'\n ser = serial.Serial(serialCom, 9600)\n\ndef MOTION_STATE_PWM(MOTION_STATE):\n pwmL, pwmR = 0,0\n if MOTION_STATE == 'READY':\n pwmL, pwmR = 0,0\n \n if MOTION_STATE == 'FORWARD_1':\n pwmL, pwmR = FORWARD_1_pwm,FORWARD_1_pwm\n \n if MOTION_STATE == 'SLOWDOWN_1':\n pwmL, pwmR = SLOWDOWN_1_pwm,SLOWDOWN_1_pwm\n \n if MOTION_STATE == 'STOP_1':\n pwmL, pwmR = 0,0\n \n if MOTION_STATE == 'GRAB':\n pwmL, pwmR = 0,0\n \n if MOTION_STATE == 'FORWARD_2':\n pwmL, pwmR = FORWARD_2_pwm,FORWARD_2_pwm\n \n if MOTION_STATE == 'SLOWDOWN_2':\n pwmL, pwmR = SLOWDOWN_2_pwm,SLOWDOWN_2_pwm\n \n if MOTION_STATE == 'STOP_2':\n pwmL, pwmR = 0,0\n \n if MOTION_STATE == 'FORWARD_3':\n pwmL, pwmR = FORWARD_3_pwm,FORWARD_3_pwm\n \n if MOTION_STATE == 'STOP_3':\n pwmL, pwmR = 0,0\n \n return pwmL, pwmR\n \n\ndef MOTION_STATE_LEDs(MOTION_STATE):\n A,B,C,D = 1,1,1,1\n \n if MOTION_STATE == 'CARROT':\n A,B,C,D = 2,2,2,2 # Red\n \n if MOTION_STATE == 'READY':\n A,B,C,D = 7,7,7,7 # Megenta\n \n if MOTION_STATE == 'FORWARD_1':\n A,B,C,D = 5,5,5,5 # Cyan\n \n if MOTION_STATE == 'SLOWDOWN_1':\n A,B,C,D = 6,6,6,6 # YELLOW\n \n if MOTION_STATE == 'STOP_1':\n A,B,C,D = 7,7,7,7 # Megenta\n \n if MOTION_STATE == 'GRAB':\n A,B,C,D = 1,1,1,1 # White\n \n if MOTION_STATE == 'FORWARD_2':\n A,B,C,D = 5,5,5,5 # Cyan\n \n if MOTION_STATE == 'SLOWDOWN_2':\n A,B,C,D = 6,6,6,6 # YELLOW\n \n if MOTION_STATE == 'STOP_2':\n A,B,C,D = 7,7,7,7 # Megenta\n \n if MOTION_STATE == 'FORWARD_3':\n A,B,C,D = 5,5,5,5 # Cyan\n \n if MOTION_STATE == 'STOP_3':\n A,B,C,D = 7,7,7,7 # Megenta\n \n print(\"A B \",A,B)\n print(\"C D \",C,D)\n return A,B,C,D\n\n\ndef Num2Str(num):\n if str(type(num)) == \"\":\n converted = str(num)\n if len(converted) > 3:\n print(\"Error: invalid num length\\nlen:\",len(converted))\n return \"!!!\"\n if len(converted) == 1:\n converted = \"00\" + converted\n if len(converted) == 2:\n converted = \"0\" + converted\n return str(converted)\n else:\n print(\"Error: not int\\ndatatype:\",type(num))\n return \"!!!\"\n\ndef write_Serial(sender=0,state=0,power=0,motion=0,pwmL=0,pwmR=0,ledA=0,ledB=0,ledC=0,ledD=0):\n pwmL = Num2Str(pwmL)\n pwmR = Num2Str(pwmR)\n print(\"pwmL:\",pwmL)\n print(\"pwmR:\",pwmR)\n signalStr = str(sender) + str(state) + str(power) + str(motion)\n signalStr += str(pwmL) + str(pwmR) + str(ledA) + str(ledB) + str(ledC) + str(ledD)\n try:\n #print(\"Start writing serial...\")\n inputStr = str(signalStr) + str('e')\n if len(inputStr) == 15:\n print(\"py write:\",inputStr)\n if MODE == 'TX2':\n ser.write(str.encode(inputStr)) \n time.sleep(0.05) \n else:\n print(\"Error: serial inputStr length:\",len(inputStr))\n \n except KeyboardInterrupt:\n ser.close()\n print(\"keyboard interrupted!\")\n\n\ndef find_carrot(frame,least_area,Hmin_orange,Smin_orange,Vmin_orange,Hmax_orange,Smax_orange,Vmax_orange):\n \n \"\"\" thresholding \"\"\"\n cvted_img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n cvted_img = cv2.cvtColor(cvted_img, cv2.COLOR_RGB2HSV)\n\n lower_orange = np.array([Hmin_orange, Smin_orange, Vmin_orange])\n upper_orange = np.array([Hmax_orange, Smax_orange, Vmax_orange])\n mask_orange = cv2.inRange(cvted_img, lower_orange, upper_orange)\n #cv2.imshow(\"orange mask\", mask_orange)\n frame_carrot = mask_orange\n \n \"\"\" calculate area \"\"\"\n Area = cv2.countNonZero(mask_orange)\n RegionArea = frame.shape[0]*frame.shape[1]\n Perc = float(Area/RegionArea*100)\n #print(Perc, carrot)\n if Perc > least_area:\n return Perc, True, frame_carrot\n else:\n return Perc, False, frame_carrot\n\ndef find_Carrot(frame):\n\n Hmin_orange_phase1 = 9\n Smin_orange_phase1 = 55\n Vmin_orange_phase1 = 56\n Hmax_orange_phase1 = 25\n Smax_orange_phase1 = 255\n Vmax_orange_phase1 = 255\n\n frame = cv2.resize(frame, (640, 480))\n\n x = 240\n y = 100\n w = 70\n h = 90\n\n polygon = np.array([[x,y], [x,y+h], [x+w,y+h], [x+w,y]])\n cv2.polylines(frame,pts=[polygon],isClosed=True,color=(180,70,25),thickness=3)\n\n cv2.imshow(\"find carrot\",frame)\n\n \"\"\" cropping \"\"\"\n crop = frame[y:y+h, x:x+w]\n cv2.imshow(\"cropped\",crop)\n carrot_area, carrot, frame_carrot = find_carrot(crop,30,Hmin_orange_phase1,Smin_orange_phase1,Vmin_orange_phase1,Hmax_orange_phase1,Vmax_orange_phase1,Vmax_orange_phase1)\n\n #cv2.imshow(\"carrot\",frame_carrot)\n return carrot\n\n\n\ndef find_box(frame): #Or ultra sensor\n write_Serial(1,2,0,'E',0,0,1,1,1,1)\n return \"LOCATIONBOX\"\n\n\ndef grab_Fruit(): # to be determined\n write_Serial(1,2,0,'g',0,0,1,1,1,1)\n time.sleep(20)\n \ndef drop_Fruit():\n write_Serial(1,2,0,'d',0,0,1,1,1,1)\n time.sleep(20)\n\n\ncap = cv2.VideoCapture(camera_port) # cap = cv2.VideoCapture(cv2.CAP_DSHOW + camera_port)\nstate = 'READY'\nFruit = \"void\"\nif SKIP == True:\n state = 'SLOWDOWN_2'\n \nwhile cap.isOpened(): # Capture frame-by-frame\n ret, frame = cap.read()\n pwmL, pwmR = 0, 0\n if ret == True:\n if state == 'READY':\n print(\"\\nstate:\",state)\n pwmL,pwmR = MOTION_STATE_PWM(state)\n ledA,ledB,ledC,ledD = MOTION_STATE_LEDs(state)\n write_Serial(1,2,0,0,pwmL,pwmR,ledA,ledB,ledC,ledD)\n time.sleep(READY_delay)\n state = 'FORWARD_1'\n \n if state == 'FORWARD_1':\n print(\"\\nstate:\",state)\n pwmL,pwmR = MOTION_STATE_PWM(state)\n ledA,ledB,ledC,ledD = MOTION_STATE_LEDs(state)\n write_Serial(1,2,1,0,pwmL,pwmR,ledA,ledB,ledC,ledD)\n time.sleep(FORWARD_1_delay)\n state = 'SLOWDOWN_1'\n\n if state == 'SLOWDOWN_1':\n print(\"\\nstate:\",state)\n pwmL,pwmR = MOTION_STATE_PWM(state)\n ledA,ledB,ledC,ledD = MOTION_STATE_LEDs(state)\n write_Serial(1,2,1,0,pwmL,pwmR,ledA,ledB,ledC,ledD)\n Carrot = find_Carrot(frame)\n print(\"Carrot found:\",Carrot)\n if Carrot == True:\n state = 'STOP_1'\n \n\n if state == 'STOP_1':\n print(\"\\nstate:\",state)\n pwmL,pwmR = MOTION_STATE_PWM(state)\n ledA,ledB,ledC,ledD = MOTION_STATE_LEDs(Fruit) # Red if find CARROT\n write_Serial(1,2,0,0,pwmL,pwmR,ledA,ledB,ledC,ledD)\n time.sleep(STOP_1_delay)\n state = \"GRAB\"\n \n \n if state == \"GRAB\":\n print(\"\\nstate:\",state)\n pwmL,pwmR = MOTION_STATE_PWM(state)\n ledA,ledB,ledC,ledD = MOTION_STATE_LEDs(state)\n write_Serial(1,2,0,0,pwmL,pwmR,ledA,ledB,ledC,ledD)\n print(\"Start GRABBING Fruit...\")\n grab_Fruit()\n print(\"Fruit GRABBED!\")\n \n time.sleep(GRAB_delay)\n state = \"FORWARD_2\"\n \n \n if state == 'FORWARD_2':\n print(\"\\nstate:\",state)\n pwmL,pwmR = MOTION_STATE_PWM(state)\n ledA,ledB,ledC,ledD = MOTION_STATE_LEDs(state)\n write_Serial(1,2,1,0,pwmL,pwmR,ledA,ledB,ledC,ledD)\n time.sleep(FORWARD_2_delay)\n state = 'SLOWDOWN_2'\n\n \n if state == 'SLOWDOWN_2':\n print(\"\\nstate:\",state)\n pwmL,pwmR = MOTION_STATE_PWM(state)\n ledA,ledB,ledC,ledD = MOTION_STATE_LEDs(state)\n write_Serial(1,2,1,'h',pwmL,pwmR,ledA,ledB,ledC,ledD)\n \n while ser.in_waiting:\n echoStr = ser.readline().decode()\n print('arduino:', echoStr)\n if echoStr == \"-------- STOP --------\":\n print(\"STOP\")\n write_Serial(1,2,0,0,0,0,2,2,2,2)\n break\n \n time.sleep(0.1)\n\n if cv2.waitKey(25) & 0xFF == ord(\"q\"):\n write_Serial(1,2,0,0,0,0,2,2,2,2)\n sys.exit()\n break\n else:\n print(\"Error: no ret\")\n break\n \ncap.release()\ncv2.destroyAllWindows()\n\nif MODE == 'TX2':\n ser.close()\n","sub_path":"Carrot_Final.py","file_name":"Carrot_Final.py","file_ext":"py","file_size_in_byte":8838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"554803925","text":"from django.shortcuts import render\nfrom django.template.loader import get_template\nfrom django.template import Context\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom hello.constants import TOTAL_POKEMON\nfrom hello.utils import random_id, image_file, pokemon_name\nfrom hello.models import Attempt, check\n\nmy_strings = ['string1', 'string2', ]\n\n\ndef index(request):\n # Delete the session if a user interrupted a game to navigate here\n if \"attempt_id\" in request.session:\n del request.session['attempt_id']\n\n return render(request, 'index.html')\n\n\n@csrf_exempt\ndef game(request):\n # If attempt_id is set, a game is being continued\n if \"attempt_id\" in request.session:\n attempt = Attempt.objects.get(pk=request.session['attempt_id'])\n if attempt.complete:\n return new_high_score(request, attempt)\n last_guess = request.POST.get('poke_name')\n attempt.last_guess = last_guess\n attempt.save()\n\n if check(attempt):\n attempt.increment_score()\n # Add the last pokemon's id to the list of guessed pokemon\n attempt.append_poke_id()\n\n if attempt.complete:\n # Player has reached the maximum score\n return new_high_score(request, attempt)\n else:\n # Send the player to the high score page if they have a high score\n if attempt.is_high_score:\n return new_high_score(request, attempt)\n return game_over(request, attempt)\n\n # Otherwise, a game is being started\n else:\n attempt = Attempt.objects.create()\n request.session['attempt_id'] = attempt.id\n\n pokemon_id = random_id(attempt)\n attempt.last_poke_id = pokemon_id\n attempt.save()\n image = image_file(pokemon_id)\n poke_name = pokemon_name(pokemon_id)\n\n data = {\n 'image_file': \"images/%s\" % image,\n 'pokemon_name': poke_name,\n 'player_score': attempt.score,\n }\n\n template = get_template('game.html')\n html = template.render(Context(data))\n return HttpResponse(html)\n\n\ndef game_over(request, attempt):\n '''\n This page is returned after a player loses\n '''\n data = {\n 'pokemon_name': pokemon_name(attempt.last_poke_id).capitalize(),\n 'player_score': attempt.score,\n 'max_score': TOTAL_POKEMON,\n }\n template = get_template('game_over.html')\n html = template.render(Context(data))\n return HttpResponse(html)\n\n\ndef new_high_score(request, attempt):\n '''\n Prompt player to enter name for high score\n '''\n complete = attempt.score == TOTAL_POKEMON\n data = {\n 'player_score': attempt.score,\n 'max_score': TOTAL_POKEMON,\n 'complete': complete,\n }\n template = get_template('new_high_score.html')\n html = template.render(Context(data))\n return HttpResponse(html)\n\n\n@csrf_exempt\ndef scores(request):\n '''\n Display a table of high scores\n '''\n if \"attempt_id\" in request.session:\n # Record the player's name\n attempt = Attempt.objects.get(pk=request.session['attempt_id'])\n attempt.player_name = request.POST.get('player_name', 'unknown')\n attempt.save()\n\n # End the player's session\n del request.session['attempt_id']\n\n # Return the top 10 scores\n all_attempts = Attempt.objects.all()\n if len(all_attempts) > 10:\n attempts = all_attempts[:10]\n else:\n attempts = all_attempts\n return render(request, 'high_scores.html',\n {'attempts': attempts})\n\n\ndef connect(request):\n '''\n Display connect four game\n '''\n return render(request, 'connect_four.html')\n","sub_path":"hello/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"110692328","text":"from collections import deque\n\nh, w = map(int, input().split())\ninf = 10 ** 6\na = [[0] * (w + 2)] + [[0] + [inf] * w + [0] for _ in range(h)] + [[0] * (w + 2)]\nd = deque()\n\nfor i in range(h):\n for j, k in enumerate(input()):\n if k == \"#\":\n a[i + 1][j + 1] = 0\n d.append((i + 1, j + 1))\n\nstp = ((0, 1), (0, -1), (1, 0), (-1, 0))\nres = 0\n\nwhile d:\n x, y = d.popleft()\n for k, l in stp:\n nx, ny = x + k, y + l\n if a[x][y] + 1 < a[nx][ny]:\n a[nx][ny] = a[x][y] + 1\n res = max(res, a[nx][ny])\n d.append((nx, ny))\nprint(res)\n","sub_path":"Python_codes/p03053/s216613200.py","file_name":"s216613200.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"463761762","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def isValidBST(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: bool\n \"\"\"\n if not root:\n return True\n res = []\n self.inorder(root, res)\n return res == sorted(res) and len(res) == len(set(res))\n\n def inorder(self, root, res):\n cur = root\n if cur.left:\n self.inorder(cur.left, res)\n res.append(cur.val)\n if cur.right:\n self.inorder(cur.right, res)","sub_path":"0-100/98_validate_bst.py","file_name":"98_validate_bst.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173296170","text":"from django.template import Library\nfrom django.utils import timezone\n\nregister = Library()\n\n@register.inclusion_tag('tags/bets_won.html')\ndef show_won_chart(bets):\n\n won = 0\n lost = 0\n\n for item in bets:\n if item.bet in ['1', '2', 'X']:\n if item.bet == item.bet_output:\n won += 1\n else:\n lost += 1\n else:\n if item.bet == '1X':\n if item.bet_output in ['1', 'X']:\n won += 1\n else:\n lost += 1\n else:\n if item.bet_output in ['2', 'X']:\n won += 1\n else:\n lost += 1\n\n return {'won': won, 'lost': lost}\n\n@register.inclusion_tag('tags/bets_type.html')\ndef show_bets_types(bets):\n\n withX = 0\n withoutX = 0\n\n for item in bets:\n if item.bet in ['1', '2', 'X']:\n withoutX += 1\n else:\n withX += 1\n\n\n return {'withX': withX, 'withoutX': withoutX}\n\n@register.inclusion_tag('tags/bet_avg.html')\ndef show_bets_avg(bets):\n\n summed = 0\n count = 0\n\n for item in bets:\n if item.bet in ['1', '2', 'X']:\n if item.bet == item.bet_output:\n summed += 3\n count +=1\n else:\n count +=1\n else:\n if item.bet == '1X':\n if item.bet_output in ['1', 'X']:\n summed +=1\n count +=1\n else:\n count +=1\n else:\n if item.bet_output in ['2', 'X']:\n summed +=1\n count +=1\n else:\n count +=1\n if count != 0:\n avg = round(summed/count, 2)\n else:\n avg = \"Brak ocenionych zakładów\"\n\n return {'avg': avg}\n","sub_path":"mainapp/templatetags/mainapp_tags.py","file_name":"mainapp_tags.py","file_ext":"py","file_size_in_byte":1849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"359296741","text":"tests = input()\ntests = int(tests)\n\nlists = []\nfor i in range(tests):\n list1 = list(map(int, input().split()))\n lists.append(list1)\n\ndef IsPrime(num):\n if num == 1:\n return False\n\n for i in range(2, num // 2 + 1):\n if num % i == 0:\n return False\n return True\n\ndef func(object):\n base = object[0] + object[1]\n temp = object[0] + object[1]\n temp = int(temp)\n while not IsPrime(temp+1):\n temp += 1\n print(temp+1-base)\n\nfor i in range(lists.__len__()):\n func(lists[i])","sub_path":"Code/CodeRecords/2201/60590/235303.py","file_name":"235303.py","file_ext":"py","file_size_in_byte":529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"400809679","text":"from OpenGL.GL import *\t\t\t# contains standard OpenGL functions\nfrom OpenGL.GLUT import *\nfrom OpenGL.GLU import *\n\n\nclass Window:\n\n\tdef __init__(self, x, y, height, width):\n\t\tglutInit()\t\n\t\tglutInitDisplayMode(GLUT_RGBA | GLUT_DOUBLE | GLUT_ALPHA | GLUT_DEPTH) \n\t\tglutInitWindowSize(height,width)\n\t\tglutInitWindowPosition(x,y)\n\t\tglutCreateWindow(\"Phys-Lab: Wappow!!\")\n\t\tglutDisplayFunc(self.refresh) # set draw function callback\n\t\t# glutIdleFunc(self.refres) # draw all the time\n\t\tglutKeyboardFunc(self.keyPressed)\n\t\tglutMainLoop() \n\n\n\tdef refresh(self): # ondraw is called all the time\n\t\tglClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) # clear the screen\n\t\tglLoadIdentity()\n\t\tglViewport(0, 0, 640, 480)\n\t\tglMatrixMode(GL_PROJECTION)\n\t\tglLoadIdentity()\n\t\tglOrtho(0.0, 640, 0.0, 480, 0.0, 1.0)\n\t\tglMatrixMode (GL_MODELVIEW)\n\t\tglLoadIdentity()\n\t\t##\n\t\t## custom draw here\n\t\t##\n\t\tglutSwapBuffers() \n\n\n\n\tdef keyPressed(self, *args):\n\t\tif args[0] == '\\033':\n\t\t\tsys.exit()\n\n\n\n\nw = Window(0, 0, 640, 480)\n\n","sub_path":"window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"85353815","text":"\n\n\"\"\"\n\nAuthor: Kagaya john \nTutorial 10 : Conditions\n\n\"\"\"\n\n\"\"\"\nPython Conditions\nPython Conditions and If statements\nPython supports the usual logical conditions from mathematics:\n\nEquals: a == b\nNot Equals: a != b\nLess than: a < b\nLess than or equal to: a <= b\nGreater than: a > b\nGreater than or equal to: a >= b\nThese conditions can be used in several ways, most commonly in \"if statements\" and loops.\n\nAn \"if statement\" is written by using the if keyword.\n\nExample\nIf statement:\"\"\"\n\na = 33\nb = 200\nif b > a: print(\"b is greater than a\")\n\n\"\"\"\nIn this example we use two variables, a and b, which are used as part \nof the if statement to test whether b is greater than a. As a is 33,\n and b is 200, we know that 200 is greater than 33, and so we print to screen that \"b is greater than a\".\"\"\"\n\n\"\"\"\nIndentation\nPython relies on indentation, using whitespace, to define scope in the code. \nOther programming languages often use curly-brackets for this purpose.\"\"\"\n\n\"\"\"\nExample\nStatements on new lines MUST use indentations:\"\"\"\n\na = 33\nb = 200\nif b > a:\n print(\"b is greater than a\")\n\n\n \"\"\"Example\nIf statement, without indentation:\n\na = 33\nb = 200\nif b > a:\nprint(\"b is greater than a\") # you will get an error\"\"\"\n\n\n","sub_path":"11_Python_Conditions/01_Conditions.py","file_name":"01_Conditions.py","file_ext":"py","file_size_in_byte":1218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"226964351","text":"'''\n#1.编写函数,接收两个正整数作为参数,返回一个元组,\n其中第一个元素为最大公约数,第二个元素为最小公倍数。\n'''\ndef gongyueshu(i,j):\n if i>j:\n i,j = j,i\n p = j*i\n \n for num in range(1,i):\n if (i%num==0)&(j%num==0):\n k=num\n #最大公约数\n\n\n\n \n for l in range(j,p+1):\n if(l%j==0)&(l%i==0):\n m=l#最小公倍数\n break\n\n \n list1=[k,m]\n print(list1)\n return(tuple(list1))\n \n \n \nprint(gongyueshu(9,82))\n'''\n2.编写函数,接受一个字符串作为参数,\n计算并打印传入字符串中数字,\n字母,空格,以及其它的个数。以及其它的个数。\n'''\n\ndef fun1(str1): \n shuzi = 0 \n zimu = 0 \n space = 0 \n other = 0 \n for i in str1: \n i = ord(i) \n if i >= 48 and i <=57:\n shuzi += 1 \n elif (i >= 65 and i <= 90) or (i >= 97 and i <= 122): \n zimu += 1 \n elif i == 32: \n space += 1 \n else: \n other += 1 \n print('字符串中数字{}个、字母{}个、空格{}个 、 其他字符{}个'.format(shuzi,zimu,space,other))\nstr2 = input(\"请输入验证字符:\")\nfun1(str2) \n\n","sub_path":"homework7/Group15/1910096 .py","file_name":"1910096 .py","file_ext":"py","file_size_in_byte":1338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"565437877","text":"first_name = 'Ariel'\nlast_name = 'Brewer'\n\n# Join a String and a variable that contains a String\nprint('Student first name: ' + first_name)\nprint('Student last name: ' + last_name)\n\n# Join a couple strings and a couple variables\nprint('Student name: ' + first_name + ' ' + last_name)\n# Note how the empty space is created between the first and last name\n\n\nage = 30\nnext_age = age + 1\nbirth_month = 'August'\nbirth_stone = 'peridot'\n\n\nprint(first_name + ' is ' + str(age) + ' years old , but she will turn ' + str(next_age) + ' in ' + birth_month + '.' + 'Her birth stone is ' + birth_stone +'.')\n","sub_path":"python/test1.py","file_name":"test1.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"404478608","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Aug 10 11:54:30 2018\r\n\r\n@author: MaYongQiang\r\n\r\n算法介绍:\r\nHMM模型是由一个“五元组”组成:\r\n\r\n StatusSet: 状态值集合\r\n ObservedSet: 观察值集合\r\n TransProbMatrix: 转移概率矩阵\r\n EmitProbMatrix: 发射概率矩阵\r\n InitStatus: 初始状态分布.\r\n\r\n\"\"\"\r\nclass HMM(object):\r\n def __init__(self):\r\n self.trans_mat = {} # trans_mat[status][status] = int\r\n self.emit_mat = {} # emit_mat[status][observe] = int\r\n self.init_vec = {} # init_vec[status] = int\r\n self.state_count = {} # state_count[status] = int\r\n self.states = {}\r\n self.inited = False\r\n \r\n \r\n ","sub_path":"word/freq/HMM.py","file_name":"HMM.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"338087492","text":"# Adapted from https://github.com/kvfrans/parallel-trpo/blob/master/utils.py\nclass Shifter:\n def __init__(self, filter_mean=True):\n self.m = 0\n self.v = 0\n self.n = 0.\n self.filter_mean = filter_mean\n\n def state_dict(self):\n return {'m': self.m,\n 'v': self.v,\n 'n': self.n}\n\n def load_state_dict(self, saved):\n self.m = saved['m']\n self.v = saved['v']\n self.n = saved['n']\n\n def __call__(self, o):\n self.m = self.m * (self.n / (self.n + 1)) + o * 1 / (1 + self.n)\n self.v = self.v * (self.n / (self.n + 1)) + (o - self.m) ** 2 * 1 / (1 + self.n)\n self.std = (self.v + 1e-6) ** .5 # std\n self.n += 1\n if self.filter_mean:\n o_ = (o - self.m) / self.std\n else:\n o_ = o / self.std\n return o_\n","sub_path":"utils/shifter.py","file_name":"shifter.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"427270955","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport cv2\nimport glob\nfrom scipy import signal, misc\nimport matplotlib.pyplot as plt\n\n\n# In[2]:\n\n\npaths = sorted(glob.glob('../data/Photos/IMG*'))\nprint(paths)\n\n\n# In[3]:\n\n\nimages = []\n\nfor kImage in paths:\n image = cv2.imread(kImage)\n print(image.shape)\n ##\n images.append(image)\n\n\n# In[34]:\n\n\nstitcher = cv2.createStitcher() \n(status, stitched) = stitcher.stitch(images)\nprint(status)\nprint(stitched.shape)\n\n\n# In[35]:\n\n\nplt.imshow(stitched)\n\n\n# In[36]:\n\n\nwillis = stitched[750:1400,6400:6600,:]\n\n\n# In[47]:\n\n\nborder = 600\nstitched_new = stitched[border:stitched.shape[0]-border,3500+border:stitched.shape[1]-border,:]\n\n\n# In[48]:\n\n\nplt.imshow(stitched_new)\n\n\n# In[49]:\n\n\ncv2.imwrite('test.jpg', stitched_new)\n\n# Careful young man.. this is a dangerous road ahead..\n# cv2.imshow(\"Stitched\", stitched)\n# cv2.waitKey(0)\n\n\n# In[70]:\n\n\n\nrows, cols = stitched_new.shape[:2]\n\nvig_size = 3000\n\n# generating vignette mask using Gaussian kernels\nkernel_x = cv2.getGaussianKernel(cols,vig_size)\nkernel_y = cv2.getGaussianKernel(rows,vig_size)\nkernel = kernel_y * kernel_x.T\nmask = 255 * kernel / np.linalg.norm(kernel)\noutput = np.copy(stitched_new)\n\n# applying the mask to each channel in the input image\nfor i in range(3):\n output[:,:,i] = output[:,:,i] * mask\n\nplt.imshow(output) \n \n# cv2.imshow('Original', img)\n# cv2.imshow('Vignette', output)\n# cv2.waitKey(0)\n\n\n# In[71]:\n\n\n#lets find the willis tower\nscale = .2\ngray_image = cv2.cvtColor(output, cv2.COLOR_BGR2GRAY)\ngray_willis = cv2.cvtColor(willis, cv2.COLOR_BGR2GRAY)\n\ngray_image = cv2.resize(gray_image,None,fx=scale,fy=scale)\ngray_willis = cv2.resize(gray_willis,None,fx=scale,fy=scale)\n\n\n# In[72]:\n\n\nprint(gray_image.shape, gray_willis.shape)\n\n\n# In[73]:\n\n\n\nnoise_level = 0\ngray_willis_noise = gray_willis + np.random.randn(*gray_willis.shape) * noise_level\n\nplt.imshow(gray_willis_noise)\n\n\n# In[74]:\n\n\ncorr = signal.correlate2d(gray_image, gray_willis, boundary='fill', mode='same')\n\n\n# In[75]:\n\n\ny, x = np.unravel_index(np.argmax(corr), corr.shape)\n\n\n# In[76]:\n\n\nfig, (ax_orig, ax_template, ax_corr) = plt.subplots(3, 1,figsize=(6, 15))\n\nax_orig.imshow(gray_image, cmap='gray')\nax_orig.set_title('Chicago City')\nax_orig.set_axis_off()\n\nax_template.imshow(gray_willis, cmap='gray')\nax_template.set_title('Willis Tower')\nax_template.set_axis_off()\n\nax_corr.imshow(corr, cmap='gray')\nax_corr.set_title('Cross-correlation')\nax_corr.set_axis_off()\n\nax_orig.plot(x, y, 'ro')\n\nfig.show()\n\n\n# In[ ]:\n\n\n#PARKING LOT\n#stitched = cv2.copyMakeBorder(stitched, 10, 10, 10, 10, cv2.BORDER_CONSTANT, (0, 0, 0))\n\n# gray = cv2.cvtColor(stitched, cv2.COLOR_BGR2GRAY)\n# thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY)[1]\n\n# cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,\n# cv2.CHAIN_APPROX_SIMPLE)\n# cnts = imutils.grab_contours(cnts)\n# c = max(cnts, key=cv2.contourArea)\n\n# mask = np.zeros(thresh.shape, dtype=\"uint8\")\n# (x, y, w, h) = cv2.boundingRect(c)\n# cv2.rectangle(mask, (x, y), (x + w, y + h), 255, -1)\n\n# minRect = mask.copy()\n# sub = mask.copy()\n# while cv2.countNonZero(sub) > 0:\n# minRect = cv2.erode(minRect, None)\n# sub = cv2.subtract(minRect, thresh)\n\n# cnts = cv2.findContours(minRect.copy(), cv2.RETR_EXTERNAL,\n# cv2.CHAIN_APPROX_SIMPLE)\n# cnts = imutils.grab_contours(cnts)\n# c = max(cnts, key=cv2.contourArea)\n# (x, y, w, h) = cv2.boundingRect(c)\n\n# stitched = stitched[y:y + h, x:x + w]\n\n","sub_path":"challenges/Challenge 4 - Panorama.py","file_name":"Challenge 4 - Panorama.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9770448","text":"import math\n\ndef isPrime(i):\n if i==1: return False\n if i==2 or i==3: return True\n if i%2==0 or i%3==0: return False\n\n for j in range(5,math.isqrt(i),6):\n if i%j == 0: return False\n \n return True\n\n\ndef pFactors(n):\n if n==1: return 0\n res = []\n tmp = n\n i=2\n while(i0 else [n]\n\nprint(pFactors(813))","sub_path":"primefactors.py","file_name":"primefactors.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"554052687","text":"from django.urls import path, include\r\nfrom rest_framework_simplejwt import views as jwt_views\r\n\r\napp_name = \"asssa\"\r\nurlpatterns = [\r\n # APp Urls \r\n path(\"users/\", include(\"accounts.urls\", namespace=\"users\")),\r\n path(\"threads/\", include(\"threads.urls\", namespace=\"threads\")),\r\n path(\"clubs/\", include(\"clubs.urls\", namespace=\"clubs\")),\r\n path(\"comments/\", include(\"comment.urls\", namespace=\"comments\")),\r\n\r\n # Auth token urls\r\n path(\"token/\", jwt_views.TokenObtainPairView.as_view(), name=\"token_obtain_pair\"),\r\n path(\"token/refresh/\", jwt_views.TokenRefreshView.as_view(), name=\"token_refresh\"),\r\n]\r\n\r\n","sub_path":"asssa/api_urls.py","file_name":"api_urls.py","file_ext":"py","file_size_in_byte":628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"433621940","text":"import numpy as np\n\nclass k_NN:\n\n # k-近邻 一般k值取比较小的值 交叉验证选取最优的k值\n # 给定一个点和k值, 找出距离该点最近的k个点\n # 根据策略决定该点属于哪一类 如 哪一类的点多 这个点就归为那一类\n # 距离的类型有 欧式距离、Lp距离、Minkowski距离、曼哈顿距离\n\n#############################################################################\n\n def __init__(self, X):\n # X为训练数据 (d+1)*n d为数据维度 n为样本总数\n # 第d+1行的值为 样本所属的分类类型\n\n d, n = X.shape\n self.d = d-1\n self.n = n\n\n l = 0 # 当前的切分坐标轴\n # 生成kd树\n self.root = self.createkdTree(X, l)\n\n#############################################################################\n\n def getkdTree(self):\n return self.root\n\n##############################################################################\n\n def getk_NN(self):\n return self.knn\n\n###############################################################################\n\n def judge(self):\n # 判断分类类型\n cAll = np.array(self.knn)[:, self.d] # k个近邻的类型\n count = np.bincount(cAll) # cAll中每个元素出现的次数 下标为元素 值为次数\n c = np.argmax(count) # 最大值的下标\n return c\n\n###############################################################################\n\n def midSplit(self, l, X):\n # 将数据按坐标轴l上的中位数 分为两部分 并得到节点\n xl = X[l, :].flatten() # 当前切分坐标轴下的数据\n n = xl.size\n if np.mod(n, 2) == 0:\n # 为偶数的样本 则去掉第一个 使其为奇数 保证mid为存在的值\n mid = np.ceil(np.median(xl[1:n])) # 找出中位数\n else:\n mid = np.ceil(np.median(xl)) # 找出中位数\n\n midIdx = np.where(xl == mid)[0] # 中位数的下标\n lessIdx = np.where(xl < mid)[0] # 比中位数小的下标\n greaterIdx = np.where(xl > mid)[0] # 比中位数大的下标\n\n node = Node(X[:, midIdx]) # 中位数对应的点作为当前节点\n leftData = X[:, lessIdx] # 小于中位数的点作为左分支数据\n rightData = X[:, greaterIdx] # 大于中位数的点作为右分支数据\n\n return node, leftData, rightData\n\n#################################################################################\n\n def calculateDistance(self, x1, x2):\n # p >=1\n # p = 2 欧氏距离\n # p = 1 曼哈顿距离\n # p = inf 各个坐标最大距离\n Lp = np.power(np.sum(np.power(np.abs((x1-x2)), self.p)), 1/self.p)\n return Lp\n\n###############################################################################\n\n def createkdTree(self, data, j):\n\n # 递归实现\n # 非递归实现要借助 栈\n # node 当前节点\n # data 接下来要分类的左右两个分支\n # j 当前树的深度\n\n # 计算当前分类基于的维度\n l = np.mod(j, self.d)\n\n # 根据切分坐标轴 将数据切分 获得当前节点 及左右分支数据\n node, leftData, rightData = self.midSplit(l, data)\n if leftData.size > 0:\n # 左分支有数据 递归\n node.left = self.createkdTree(leftData, j+1)\n\n if rightData.size > 0:\n # 右分支有数据 递归\n node.right = self.createkdTree(rightData, j+1)\n\n # 返回当前节点及其子节点\n return node\n\n################################################################################\n\n def searchkdTree(self, x, p=2, k=1):\n # x为需要分类的点\n # p为距离的类型\n # p >=1\n # p = 2 欧氏距离\n # p = 1 曼哈顿距离\n # p = inf 各个坐标最大距离\n\n # 也用递归查找吧 不想实现栈先 虽然可以用个数组存储经过的对象\n self.x = x\n self.p = p\n self.k = k # 要找的k近邻的个数\n self.knn = [] # 距离最近的k个点\n self.distance = [] # 对应k个点的距离\n\n self.search(self.root)\n\n#############################################################################\n\n def search(self, node, j = 0):\n # 根据比较当前深度下 获得切分维度\n # 然后 x在该维的值小于节点在该维的值 则左移\n # 大于等于则右移 直到叶节点\n # 然后递归回退 找寻符合条件的点 直到回退到根节点\n\n # 递归 走到底了\n if node is None:\n return\n\n # 递归\n # 判断是否是叶节点\n if node.isLeaf():\n # 到了叶节点 计算两点距离 存入距离小的点\n self.updateKNN(node)\n return\n\n # 递归 直到None或叶节点\n l = np.mod(j, self.d)\n if self.x[l] < node.x[l, 0]:\n # 正常递归\n self.search(node.left, j+1)\n\n # 回退\n if node.right is not None:\n # 右节点存在 判断该节点是否存在更小的值 若存在 递归查找该分支\n # if self.updateKNN(node.right): # 这样会导致重复比较 同一个值存入多次\n if self.isUpdate(node.right): # 改成只判断 不存入 防止重复存入\n self.search(node.right, j+1)\n else:\n # 正常递归\n self.search(node.right, j+1)\n\n # 回退\n if node.left is not None:\n # 左节点不为空 判断该节点是否存在更小的值 若存在 递归查找该分支\n # if self.updateKNN(node.left):\n if self.isUpdate(node.left): # 改成只判断 不存入 防止重复存入\n self.search(node.left, j+1)\n\n # 回退 计算当前节点与目标点的距离\n self.updateKNN(node)\n\n####################################################################################\n\n def updateKNN(self, node):\n # 获取满足条件的点\n n = node.x.shape[1]\n for i in range(n):\n # 计算当前节点中点与待分类点的距离\n xi = node.x[:, i]\n dis = self.calculateDistance(self.x, xi[0:self.d])\n if len(self.distance) < self.k:\n # 还不够k个数据 直接存入\n self.distance.append(dis)\n self.knn.append(xi)\n else:\n # 已经够k个了 找出已有的最大值 与当前值比较\n # 如果当前值较小 则取代该值\n maxDis = np.max(self.distance)\n if maxDis > dis:\n # 取代当前的最大值\n maxIdx = np.argmax(self.distance)\n self.distance[maxIdx] = dis\n self.knn[maxIdx] = xi\n\n###############################################################################\n\n def isUpdate(self, node):\n n = node.x.shape[1]\n for i in range(n):\n # 计算当前节点中点与待分类点的距离\n xi = node.x[:, i]\n dis = self.calculateDistance(self.x, xi[0:self.d])\n if len(self.distance) < self.k:\n # 还不够k个数据 直接存入\n return True\n\n else:\n # 已经够k个了 找出已有的最大值 与当前值比较\n # 如果当前值较小 则取代该值\n maxDis = np.max(self.distance)\n if maxDis > dis:\n return True\n return False\n\n#############################################################################\n\n# 节点类\nclass Node:\n\n def __init__(self, x):\n self.x = x # 当前节点的数据 可能包含多个样本点\n\n self.left = None\n self.right = None\n\n def isLeaf(self):\n return self.right is None and self.left is None\n\n","sub_path":"k_NN.py","file_name":"k_NN.py","file_ext":"py","file_size_in_byte":8018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"513413610","text":"### PART OF FOIL LIBRARY\n# \n# Author: Konstantinos POLITIS\n# \n# Defines a Specific Instance of Foil : Wageningen \n# \n# To do list : \n# 1. Seperate propeller parts from foil parts \n# 2. Rework interpolation (for now linear) for V1, V2 values \n# 3. Add \"smoothing\" option (or extend option) for squared trailing edges\n# 4. Remove Use_Original Option : Create variants for Bseries propellers \n# using modified distributions of K(z)\n# \n# Note : To define a Wageningen Foil we have to specify \n# the geometry of the whole propeller:\n# Z : number of blades\n# EAR : extended area ratio\n# rR : radial location\n# \n# If we do not provide all the above then we cannot \n# define the basic characteristics of the foil.\n# \n# Therefore, we actually define the propeller to define \n# the foil ... this is a bit too much... but that's life \n# \n# \n# \n###\nfrom FOIL import afoil as foil\nfrom numpy import array as nparr\n\ndef WAGENINGEN(Z,EAR,rR,tLE_tmax=0.,tTE_tmax=0,Use_Original=1,Use_Smooth=0,s=0,Smooth_LE=0,x0s=0.1,x0p=0.1,ks=0.5,kp=0.5):\n \"\"\" WAGENINGEN Picker \n Inputs :\n Z : number of blades \n EAR : expanded area ratio\n rR : non dimensional reference radius (r/R)\n tLE_tmax, tTE_tmax : ratio of LE/TE thickness with respect to maximum thickness, default = 0\n Use_Original : use the original distributions of K(r)=c(r)/D*Z/EAR \n default = 1 : use the original distribution (different for propellers with Z=4 and Z=3)\n = 0 : use a modified distribution that produces the values of EAR very close to the expected\n = -1 : use a modified distribution resulting directly from Wageningen Drawings\n Use_Smooth : construct bivariate smoothing splines for the data used for thickness and pressure side calculations\n default = False \n s : smoothing factor, active for the procedure related to Use_Smooth \n default = 0 : interpolate \n s > 0 : smooth, the larger the s the larger the smoothing\n Smooth_LE : enable the LE smoothing procedure \n default = False\n x0s,x0p : smoothing lengths suction and pressure side, the smoothing is mostly active for \n x Original Data from Oosterveld 1975 = Data from Kuiper 1992 \n __Kr_4=nparr([1.662, 1.882, 2.05, 2.152, 2.187, 2.144, 1.970, 1.582, 0.68]) \n \n # K(r) (3 blades) > Data from Oosterveld 1975\n __Kr_3=nparr([1.633, 1.832, 2.0, 2.12, 2.186, 2.168, 2.127, 1.657, 0]) \n \n #### Use_Original = 0\n #### NOTE : these values will be used if Use_Original is 0\n # K(r) > if used for all cases these values generate the exact EAR when interpolated \n # the dataset commes from Kr_3 with a modified rR=0.6 value to match EAR \n __Kr0=nparr([1.633, 1.832, 2.0, 2.12, 2.176, 2.168, 2.127, 1.657, 0]) \n \n \n #### Use_Original = -1\n #### NOTE : these values will be used if Use_Original is -1\n # Values of distributions are provided at r/R=\n __rR2=nparr([0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1])\n \n ## K(r) > obtained by mean values from drawings of B4-100/B5-105/B4-55/B5-75\n #__Kr_dr=nparr([ 1.630860093322964e+00,\n # 1.834640509520060e+00,\n # 2.004486224599534e+00,\n # 2.120805579441116e+00,\n # 2.176620735204807e+00,\n # 2.127005486463469e+00,\n # 1.950603483933783e+00,\n # 1.586311948351957e+00,\n # 1.194496769953584e+00,\n # 0])\n # error ~ -1.581670669767310e-02\n __Kr=nparr([1.633, 1.832, 2.0, 2.12, 2.186, 2.199, 2.127, 1.657, 0]) \n ####################### END OF K(r)\n \n ####################### xtmax(r) : Function used for the Location of maximum thickness\n # x_tmax=X_tmax/c (4 blades +) > Data from Kuiper 1992\n __xtmax_Kuiper=nparr([0.35, 0.35, 0.351, 0.355, 0.389, 0.443, 0.486, 0.5, 0.5])\n \n # x_tmax=X_tmax/c (4 blades +) > Data from Oosterveld 1975\n __xtmax_Oosterveld4=nparr([0.35, 0.35, 0.351, 0.355, 0.389, 0.443, 0.479, 0.5, 0.])\n \n # x_tmax=X_tmax/c (3 blades) \n __xtmax_Oosterveld3=nparr([0.35, 0.35, 0.351, 0.355, 0.389, 0.442, 0.478, 0.5, 0.])\n \n # Note : Differences for 3, and 4+ blades do not seem significant\n # Kuiper values will be used. The 0.5 for r/R=1 indicates that the decreament of Xtmax=xtmax*c\n # will follow the decreament of the chord and shall not decrease independently\n \n # x_tmax=X_tmax/c (4 blades +) > Data from Kuiper 1992\n __xtmax_mod=nparr([0.35, 0.35, 0.35, 0.355, 0.389, 0.443, 0.486, 0.5, 0.5])\n __xtmax=__xtmax_mod\n \n ## xtmax > obtained by mean values from drawings of B4-100/B5-105/B4-55/B5-75\n #__xtmax_dr=nparr([3.472068514029757e-01,\n # 3.465972752014425e-01,\n # 3.473695167447711e-01,\n # 3.521592139805451e-01,\n # 3.912068836783953e-01,\n # 4.426962965048316e-01,\n # 4.725832276101920e-01,\n # 5.270663671102803e-01,\n # 5.869021976445969e-01,\n # 0])\n #\n # These values suggest that a linear increase is present for xtmax for r/R>0.7\n # The values for r/R<0.4 are almost constant and if we take into account the error\n # from the capturing procedure we can say that are equal to 0.35 as suggested.\n # In order to perform a better smoothing the values that we shall use a sligtly\n # smaller value for r/R=0.4 : 0.35 instead of 0.351 and enforce a zero first order\n # derivative there. \n ####################### END OF xtmax(r) \n \n \n ####################### A(r), B(r) : Functions used to define maximum thickness \n __A=nparr([0.0526, 0.0464, 0.0402, 0.0340, 0.0278, 0.0216, 0.0154, 0.0092, 0.003])\n __B=nparr([0.0040, 0.0035, 0.0030, 0.0025, 0.0020, 0.0015, 0.0010, 0.0005, 0.000])\n ####################### END OF xtmax(r) \n \n ####################### V1 and V2 : functions used to define pressure side and thickness \n # Percentages (NOT in %) of Maximum thickness for a given r/R and given position from maximum thickness \n # Note : Lines denote radius : : r/R = 0.15, 0.20, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7 \n # Columns denote percentage of distance from location of maximum thickness, x_tmax : For the j-th column we have the following\n #<------------ X/c=(1+a(j))*x_tmax/c ---------------------------------> X/c=x_tmax <------------------------- X/c=(1-a(j))*x_tmax/c+a ------------------------------->\n # LE TE\n #with:j=1 2 3 ....\n #a(j)=-1 -0.95 -0.9 -0.85 -0.8 -0.7 -0.6 -0.5 - 0.4 -0.2 0 0.2 0.4 0.5 0.6 0.7 0.8 0.9 0.95 1 \n __V1=nparr([ \n [ 0.3860 , 0.3150 , 0.2642 , 0.2230 , 0.1870 , 0.1320 , 0.0920 , 0.0615 , 0.0384 , 0.0096 , 0 , 0.0365 , 0.0955 , 0.1280 , 0.1610 , 0.1950 , 0.2300 , 0.2650 , 0.2824 , 0.3000],\n [ 0.3560 , 0.2821 , 0.2353 , 0.2000 , 0.1685 , 0.1180 , 0.0804 , 0.0520 , 0.0304 , 0.0049 , 0 , 0.0172 , 0.0592 , 0.0880 , 0.1207 , 0.1570 , 0.1967 , 0.2400 , 0.2630 , 0.2826], \n [ 0.3256 , 0.2513 , 0.2068 , 0.1747 , 0.1465 , 0.1008 , 0.0669 , 0.0417 , 0.0224 , 0.0031 , 0 , 0.0084 , 0.0350 , 0.0579 , 0.0899 , 0.1246 , 0.1651 , 0.2115 , 0.2372 , 0.2598],\n [ 0.2923 , 0.2186 , 0.1760 , 0.1445 , 0.1191 , 0.0790 , 0.0503 , 0.0300 , 0.0148 , 0.0027 , 0 , 0.0033 , 0.0202 , 0.0376 , 0.0623 , 0.0943 , 0.1333 , 0.1790 , 0.2040 , 0.2306],\n [ 0.2181 , 0.1467 , 0.1088 , 0.0833 , 0.0637 , 0.0357 , 0.0189 , 0.0090 , 0.0033 , 0 , 0 , 0 , 0.0044 , 0.0116 , 0.0214 , 0.0395 , 0.0630 , 0.0972 , 0.1200 , 0.1467],\n [ 0.1278 , 0.0778 , 0.0500 , 0.0328 , 0.0211 , 0.0085 , 0.0034 , 0.0008 , 0 , 0 , 0 , 0 , 0 , 0.0012 , 0.0040 , 0.0100 , 0.0190 , 0.0330 , 0.0420 , 0.0522], \n [ 0.0382 , 0.0169 , 0.0067 , 0.0022 , 0.0006 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ], \n [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],\n [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],\n [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ], \n [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ],\n [ 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 , 0 ] ])\n \n __rR_V=nparr([0.15, 0.20, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 1])\n __a =nparr([-1 ,-0.95, -0.9, -0.85, -0.8, -0.7, -0.6, -0.5, -0.4, -0.2, 0 , 0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1 ]) \n \n # Data for Thickness : \n # Note : Lines denote radius : : r/R = 0.15, 0.20, 0.25, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 1\n # Columns denote percentage of distance from location of maximum thickness, x_tmax : For the j-th column we have the following\n #<------------ X/c=(1+a(j))*x_tmax/c ---------------------------------> X/c=x_tmax <------------------------- X/c=(1-a(j))*x_tmax/c+a ------------------------------->\n # LE TE\n #with:j=1 2 3 ....\n #a(j)=-1 - 0.9 -0.95 -0.85 -0.8 -0.7 -0.6 -0.5 -0.4 -0.2 0 0.2 0.4 0.5 0.6 0.7 0.8 0.9 0.95 1 \n # NOTE : Modified Values for the following a,rR\n # rR= 0.15 : a =-0.9; a=0.9; a=0.95\n # rR= 0.2 : a=0.95\n # a = -0.4 : rR=0.3\n # \n __V2=nparr([\n [ 0 , 0.1380 , 0.2600 , 0.3665 , 0.4520 , 0.5995 , 0.7105 , 0.8055 , 0.8825 , 0.9749 , 1 , 0.9360 , 0.7805 , 0.6770 , 0.5585 , 0.4280 , 0.2870 , 0.1432 , 0.0700 , 0 ], \n [ 0 , 0.1560 , 0.2840 , 0.3905 , 0.4777 , 0.6190 , 0.7277 , 0.8170 , 0.8875 , 0.9750 , 1 , 0.9446 , 0.7984 , 0.6995 , 0.5842 , 0.4535 , 0.3060 , 0.1455 , 0.0700 , 0 ], \n [ 0 , 0.1758 , 0.3042 , 0.4108 , 0.4982 , 0.6359 , 0.7415 , 0.8259 , 0.8899 , 0.9751 , 1 , 0.9519 , 0.8139 , 0.7184 , 0.6050 , 0.4740 , 0.3228 , 0.1567 , 0.0725 , 0 ], \n [ 0 , 0.1890 , 0.3197 , 0.4265 , 0.5130 , 0.6505 , 0.7520 , 0.8315 , 0.8923 , 0.9750 , 1 , 0.9583 , 0.8265 , 0.7335 , 0.6195 , 0.4885 , 0.3360 , 0.1670 , 0.0800 , 0 ], \n [ 0 , 0.1935 , 0.3235 , 0.4335 , 0.5220 , 0.6590 , 0.7593 , 0.8345 , 0.8933 , 0.9725 , 1 , 0.9645 , 0.8415 , 0.7525 , 0.6353 , 0.5040 , 0.3500 , 0.1810 , 0.0905 , 0 ], \n [ 0 , 0.1750 , 0.3056 , 0.4135 , 0.5039 , 0.6430 , 0.7478 , 0.8275 , 0.8880 , 0.9710 , 1 , 0.9639 , 0.8456 , 0.7580 , 0.6439 , 0.5140 , 0.3569 , 0.1865 , 0.0950 , 0 ], \n [ 0 , 0.1485 , 0.2720 , 0.3775 , 0.4620 , 0.6060 , 0.7200 , 0.8090 , 0.8790 , 0.9690 , 1 , 0.9613 , 0.8426 , 0.7530 , 0.6415 , 0.5110 , 0.3585 , 0.1885 , 0.0965 , 0 ], \n [ 0 , 0.1240 , 0.2337 , 0.3300 , 0.4140 , 0.5615 , 0.6840 , 0.7850 , 0.8660 , 0.9675 , 1 , 0.96 , 0.84 , 0.75 , 0.64 , 0.51 , 0.36 , 0.19 , 0.0975 , 0 ], \n [ 0 , 0.1050 , 0.2028 , 0.2925 , 0.3765 , 0.5265 , 0.6545 , 0.7635 , 0.8520 , 0.9635 , 1 , 0.96 , 0.84 , 0.75 , 0.64 , 0.51 , 0.36 , 0.19 , 0.0975 , 0 ], \n [ 0 , 0.1000 , 0.1950 , 0.2830 , 0.3660 , 0.5160 , 0.6455 , 0.7550 , 0.8450 , 0.9615 , 1 , 0.96 , 0.84 , 0.75 , 0.64 , 0.51 , 0.36 , 0.19 , 0.0975 , 0 ], \n [ 0 , 0.0975 , 0.1900 , 0.2775 , 0.3600 , 0.51 , 0.6400 , 0.75 , 0.8400 , 0.9600 , 1 , 0.96 , 0.84 , 0.75 , 0.64 , 0.51 , 0.36 , 0.19 , 0.0975 , 0 ],\n [ 0 , 0.0975 , 0.1900 , 0.2775 , 0.3600 , 0.51 , 0.6400 , 0.75 , 0.8400 , 0.9600 , 1 , 0.96 , 0.84 , 0.75 , 0.64 , 0.51 , 0.36 , 0.19 , 0.0975 , 0 ]]) \n \n \n # Initialiser\n def __init__(self,Z,EAR,rR,tLE_tmax=0.2,tTE_tmax=0,Use_Original=1,Use_Smooth=0,s=0,Smooth_LE=0,x0s=0.1,x0p=0.1,ks=0.8,kp=0.8):\n \"\"\" Initialiser : Parameters, and closed LEADING edge specifier \"\"\"\n # NOTE : Here TE_Closed refers to the LEADING EDGE\n self.EAR=EAR\n self.Z=Z\n self.rR=rR\n self.Use_Original=Use_Original\n self.tLE_tmax=tLE_tmax\n if (tTE_tmax>0):\n self.closed(TE_closed=0)\n else:\n self.closed(TE_closed=1)\n self.tTE_tmax=tTE_tmax\n self.Use_Smooth=Use_Smooth\n # note if Use_Smooth is false then s has no effect\n self.s=s\n self.Smooth_LE=Smooth_LE\n self.construct_splines()\n if (self.Use_Smooth):\n self.construct_pres_suct_rR_representation() \n else:\n self.construct_pres_suct_rR_representation2() \n # setup smoothing parameters\n self.smooth_at_LE_WeibParams(x0s,ks,x0p,kp)\n \n # Construct the required splines for c(r)/D, tmax/D, Xtmax/D ...\n def construct_splines(self):\n from scipy import interpolate\n from numpy import zeros_like, ones_like, meshgrid, delete\n if (self.__splines_constructed):\n pass\n self.construct_cRspl()\n self.construct_xtmaxspl()\n self.construct_tmaxRspl()\n # note : the subroutine below only prepares data for visualisation using the plot_V2 function\n # as long as Use_Smooth is false\n self.construct_pres_suct_smoothing()\n if (not self.Use_Smooth):\n self.construct_pres_suct_smoothing2()\n self.__splines_constructed=True\n \n \n def construct_cRspl(self):\n from scipy import interpolate\n # Construction of chord/R spline\n if (self.Use_Original==1):\n if (self.Z<=3):\n self.cR = interpolate.UnivariateSpline(self.__rR,self.__Kr_3*2*self.EAR/self.Z,k=5,s=0)\n else:\n self.cR = interpolate.UnivariateSpline(self.__rR,self.__Kr_4*2*self.EAR/self.Z,k=5,s=0)\n elif (self.Use_Original==-1):\n self.cR = interpolate.UnivariateSpline(self.__rR2,self.__Kr_dr*2*self.EAR/self.Z,k=5,s=0)\n else:\n self.cR = interpolate.UnivariateSpline(self.__rR,self.__Kr*2*self.EAR/self.Z,k=5,s=0)\n \n def construct_xtmaxspl(self):\n from scipy import interpolate\n # Construction of xtmax=Xtmax/c spline\n self.xtmaxSp=interpolate.make_interp_spline(self.__rR[2:len(self.__rR)-1],self.__xtmax[2:len(self.__rR)-1],bc_type=([(1,0)],[(1,0)]))\n\n def construct_tmaxRspl(self):\n from scipy import interpolate\n # Construction of tmax/R spline\n self.tmaxR=interpolate.UnivariateSpline(self.__rR,(self.__A*self.Z-self.__B)/2,k=5,s=0)\n\n # Note : Bad subroutine name \n # The subroutine construct_pres_suct_smoothing prepares the Wageningen Data given as t/tmax and yp/tmax \n # to obtain the same ! data but as t/c and yp/c. It is clear that t/c=t/tmax * tmax/c\n # yp/c=yp/tmax * tmax/c\n # \n def construct_pres_suct_smoothing(self):\n from scipy import interpolate\n from numpy import zeros_like, ones_like, meshgrid, delete\n # Contruction of V2 spline\n #self.V2=interpolate.SmoothBivariateSpline(tile(self.__a.,len(self.__rR_V)),tile(self.__rR_V,len(self.__a)),self.__V2,s=0)\n #a,r=meshgrid(self.a2x(self.__a),self.__rR_V)\n #self.__X=self.a2x(self.__a,self.__rR_V)\n # Physical Internal Locations (X/c) where V2 and V1 data are defined\n a,r=meshgrid(self.__a,self.__rR_V)\n self.__Xc=self.a2x(a,r)\n self.__r=r.copy()\n #print(self.__Xc)\n # Physical Internal thickness (yt/c), pressure side and suction side\n # Prepare data\n self.__ytc=zeros_like(self.__V2)\n self.__ytc[:,0:11] =self.__V2[:,0:11]*(1-self.tLE_tmax)+self.tLE_tmax\n self.__ytc[:,11:20]=self.__V2[:,11:20]*(1-self.tTE_tmax)+self.tTE_tmax\n self.__ypc=ones_like(self.__V1)\n self.__ypc[:,0:11] =self.__V1[:,0:11]*(1-self.tLE_tmax) \n self.__ypc[:,11:20]=self.__V1[:,11:20]*(1-self.tTE_tmax)\n # Finalise data\n for i in range(len(self.__rR_V)):\n self.__ytc[i,:]*=self.tmaxR(self.__rR_V[i])/self.cR(self.__rR_V[i])\n self.__ypc[i,:]*=self.tmaxR(self.__rR_V[i])/self.cR(self.__rR_V[i])\n self.__ysc=self.__ytc+self.__ypc\n if (self.Use_Smooth):\n # Prepare Actual Pressure and Suction Sides with round LE\n self.__yscu=self.__ysc.copy()\n self.__yscu[:,0]=(self.__ysc[:,0]+self.__ypc[:,0])*0.5\n #print(self.__yscu[:,1])\n #print(self.__yscu.ravel())\n #self.yscSp=interpolate.SmoothBivariateSpline(delete(self.__Xc,[1],1).ravel(),delete(r,[1],1).ravel(),delete(self.__yscu,[1],1).ravel(),kx=5,ky=3,s=1e-2)\n self.yscSp=interpolate.SmoothBivariateSpline(self.__Xc.ravel(),r.ravel(),self.__ysc.ravel(),kx=3,ky=3,s=self.s)\n #self.yscSp=interpolate.SmoothBivariateSpline(a.ravel(),r.ravel(),self.__yscu.ravel(),kx=5,ky=3,s=0)\n self.__ypcu=self.__ypc.copy()\n #self.__ypcu=delete(self.__ypc,[1,2],1)\n self.__ypcu[:,0]=(self.__ysc[:,0]+self.__ypc[:,0])*0.5\n #print(self.__ypcu[:,1])\n #print(self.__yscu.ravel())\n #self.ypcSp=interpolate.SmoothBivariateSpline(delete(self.__Xc,[1],1).ravel(),delete(r,[1],1).ravel(),delete(self.__ypcu,[1],1).ravel(),kx=5,ky=3,s=1e-2)\n self.ypcSp=interpolate.SmoothBivariateSpline(self.__Xc.ravel(),r.ravel(),self.__ypc.ravel(),kx=3,ky=3,s=self.s)\n \n \n def construct_pres_suct_rR_representation(self):\n from scipy import interpolate\n from numpy import linspace, exp, sqrt, where\n # For the specified radius construct the representation of the foil that will be\n # used to obtain points\n # For rR : evaluate yp yc for npoints points\n npoints=100\n x=linspace(0,1,npoints)\n x=x.reshape(-1,1)\n yp=self.ypcSp(x,self.rR)\n #ys=self.yscSp(x,self.rR)\n m=(self.yscSp(0,self.rR)+self.ypcSp(0,self.rR))*0.5\n asuc=(m-self.yscSp(0.2,self.rR))/sqrt(0.2)\n yp[0]=m\n ys=where(x<=0.2,m-asuc*sqrt(x),self.yscSp(x,self.rR))\n ys[npoints-1]=yp[npoints-1]\n # create a nice interpolating spline \n self.ypsp=interpolate.UnivariateSpline(x,yp,k=5,s=0)\n self.yssp=interpolate.UnivariateSpline(x,ys,k=5,s=0)\n \n def construct_pres_suct_smoothing2(self):\n from scipy import interpolate\n # Specify how we interpolate the values of the tables V1 and V2 : here simple linear interpolations\n self.V1int=interpolate.interp2d(self.__a,self.__rR_V,self.__V1, kind='linear')\n self.V2int=interpolate.interp2d(self.__a,self.__rR_V,self.__V2, kind='linear')\n \n def construct_pres_suct_rR_representation2(self):\n from numpy import where\n from scipy import interpolate\n # Get values V values for thickness and yp for the given rR\n V1=self.V1int(self.__a,self.rR)\n V2=self.V2int(self.__a,self.rR)\n # Get values of x/c that correspond to these a and the given rR\n x=self.a2x(self.__a,self.rR)\n # Construct the values of pressure side and thickness\n yp=where(self.__a<0,V1*(1-self.tLE_tmax),V1*(1-self.tTE_tmax))\n yt=where(self.__a<0,V2*(1-self.tLE_tmax)+self.tLE_tmax,V2*(1-self.tTE_tmax)+self.tTE_tmax)\n # The above values of yp and yt are actually yp/tmax and yt/tmax, transform them to yp/c, yt/c. \n # The obtained values (given below) are transformed the values that correspond to yp/c, yt/c \n yp*=self.tmaxR(self.rR)/self.cR(self.rR)\n yt*=self.tmaxR(self.rR)/self.cR(self.rR)\n # find values for suction\n ys=yp+yt\n # find values for the camber (mean) line\n yc=yp+yt*0.5\n # create interpolating splines(or not, if s is eventually different than wero) \n # for pressure and suction side :\n self.ypsp=interpolate.UnivariateSpline(x,yp,k=5,s=0)\n self.yssp=interpolate.UnivariateSpline(x,ys,k=5,s=0)\n # create interpolating splines(or not, if s is eventually different than wero) \n # for the (half)thickness and the camber (mean) line\n self.ytsp=interpolate.UnivariateSpline(x,yt*0.5,k=5,s=0)\n self.ycsp=interpolate.UnivariateSpline(x,yc,k=5,s=0)\n \n \n \n def smooth_at_LE_WeibParams(self,x0_s,k_s,x0_p,k_p):\n from numpy import log\n # Create a smooth transition at LE\n # x0_p and x0_p are the smoothing lengths \n # k_s and k_p are the transition parameter for the smoothing\n # The closest these values are to one the smoother the transition \n if (not self.Smooth_LE):\n pass\n # Mollifier for the pressure side \n self.l_p=x0_p/(-log(0.05))**(1./k_p)\n self.k_p=k_p\n self.l_s=x0_s/(-log(0.05))**(1./k_s)\n self.k_s=k_s\n \n def Weibulp(self,x):\n from numpy import exp\n v=1-exp(-(x/self.l_p)**self.k_p)\n return v\n \n def Weibuls(self,x):\n from numpy import exp\n v=1-exp(-(x/self.l_s)**self.k_s)\n return v\n \n # Name\n def name(self):\n \"\"\" Get the foils name \"\"\"\n n=\"B\"+\"{:d}\".format(self.Z)+\"_\"+\"{:d}\".format(self.EAR*100)+\"_rR=\"+\"{:.2f}\".format(self.rR)\n return n \n \n # Location of maximum thickness for the requested radius\n # Value of xtmax\n def xtmax(self,rR):\n \"\"\" Location of maximum thickness for the requested radius \"\"\"\n from numpy import where\n lnrR=len(self.__rR)-1\n v=where(rRself.__rR[lnrR-1],self.__xtmax[lnrR-1], self.xtmaxSp(rR)))\n return v\n \n\n def x2a(self,x,rR):\n from numpy import where\n \"\"\" Transform function : from local variable x/c to a \"\"\"\n xt=self.xtmax(rR)\n a=where(x<=xt,x/xt-1.,(x-xt)/(1-xt))\n return a\n \n def a2x(self,a,rR):\n from numpy import where\n \"\"\" Transform function : from local variable x/c to a \"\"\"\n x=where(a<0,(1+a)*self.xtmax(rR), (1-a)*self.xtmax(rR)+a)\n return x \n \n # A completer\n def yt(self,x):\n \"\"\" Thickness for the requested radius \"\"\"\n pass\n \n \n def Xs(self,x):\n \"\"\" Suction side (X above coordinate)\"\"\"\n return x\n \n def Xp(self,x):\n \"\"\" Pressure side (X below coordinate)\"\"\"\n return x\n \n # A completer\n def Yp(self,x):\n \"\"\" Pressure side (Y below coordinate)\"\"\"\n from numpy import where\n #y=where(x<=1e-4,(self.ypsp(x)+self.yssp(x))*0.5,self.ypsp(x))\n if (self.Smooth_LE):\n y=self.ycsp(x)-self.ytsp(x)*self.Weibulp(x)\n else:\n y=self.ypsp(x)\n return y\n \n # A completer\n def Ys(self,x):\n \"\"\" Suction side (Y above coordinate)\"\"\"\n from numpy import where\n #y=where(x<=1e-4,self.ypcSp(x,self.rR),where(x>=1-1e-4,self.ypcSp(x,self.rR),self.yscSp(x,self.rR)))\n #y=where(x<=1e-4,(self.ypsp(x)+self.yssp(x))*0.5,self.yssp(x))\n if (self.Smooth_LE):\n y=self.ycsp(x)+self.ytsp(x)*self.Weibuls(x)\n else:\n y=self.yssp(x)\n return y\n \n def vecS_TE(self):\n \"\"\" Tangent at TE at pressure and suction (orientation from pressure to suction - towards LE and back to TE towards wake) \"\"\"\n from numpy import sqrt\n derS=self.yssp.derivatives(1)\n derP=self.ypsp.derivatives(1)\n vec_SuctSide_TEtoWkx= 1/(sqrt(1+derS[1]**2)) # above TE\n vec_SuctSide_TEtoWky= derS[1]/(sqrt(1+derS[1]**2)) # above TE\n vec_PresSide_TEtoLEx= -1/(sqrt(1+derP[1]**2)) # below TE\n vec_PresSide_TEtoLEy= -derP[1]/(sqrt(1+derP[1]**2)) # below TE\n return [vec_PresSide_TEtoLEx, vec_PresSide_TEtoLEy, vec_SuctSide_TEtoWkx, vec_SuctSide_TEtoWky]\n\n def vecS_LE(self):\n from numpy import sqrt\n if (not self.Smooth_LE):\n derS=self.yssp.derivatives(0)\n derP=self.ypsp.derivatives(0)\n vec_SuctSide_TEtoWkx= 1/(sqrt(1+derS[1]**2)) # above TE\n vec_SuctSide_TEtoWky= derS[1]/(sqrt(1+derS[1]**2)) # above TE\n vec_PresSide_TEtoLEx= -1/(sqrt(1+derP[1]**2)) # below TE\n vec_PresSide_TEtoLEy= -derP[1]/(sqrt(1+derP[1]**2)) # below TE\n else:\n vec_SuctSide_TEtoWkx= 0 # above TE\n vec_SuctSide_TEtoWky= 1 # above TE\n vec_PresSide_TEtoLEx= 0 # below TE\n vec_PresSide_TEtoLEy= 1 # below TE\n return [vec_PresSide_TEtoLEx, vec_PresSide_TEtoLEy, vec_SuctSide_TEtoWkx, vec_SuctSide_TEtoWky]\n \n \n def plot_cR(self,npoints=100):\n \"\"\" Returns a plt handle for the c/R=chord/R distribution \"\"\"\n import matplotlib.pyplot as plt\n from numpy import linspace, pi, cos\n x=linspace(self.__rR_Hub,1,npoints)\n plt.plot(x,self.cR(x),'r',label='Spline Interpolation')\n if (self.Use_Original==1):\n if (self.Z <=3):\n plt.plot(self.__rR,2*self.__Kr_3*self.EAR/self.Z ,'bo',label='B Series Data')\n else:\n plt.plot(self.__rR,2*self.__Kr_4*self.EAR/self.Z ,'bo',label='B Series Data')\n elif (self.Use_Original==-1):\n plt.plot(self.__rR2,2*self.__Kr_dr*self.EAR/self.Z ,'bo',label='B Series Data')\n else:\n plt.plot(self.__rR,2*self.__Kr*self.EAR/self.Z ,'bo',label='B Series Data')\n plt.plot(x[0],self.cR(x[0]),'go',label='Hub Radius')\n plt.xlabel('r/R')\n plt.ylabel('c/R')\n plt.legend()\n EAR_app=self.Z*self.cR.integral(self.__rR[0],1)/pi/cos(15*pi/180)\n EAR_err=abs(self.EAR-EAR_app)/self.EAR\n plt.text(0.54, self.cR(x[2])/3, r'Relative Error(EAR)='+\"{:.4f}\".format(EAR_err*100)+\"%\")\n print('EAR Err=',EAR_err)\n print('EAR_App=',EAR_app)\n plt.grid(1)\n return plt\n \n def plot_xtmax(self,npoints=100):\n \"\"\" Returns a plt handle for the xtmax=Xtmax/c distribution \"\"\"\n import matplotlib.pyplot as plt\n from numpy import linspace\n x=linspace(self.__rR_Hub,1,npoints)\n plt.plot(x,self.xtmax(x),'r',label='Spline Interpolation')\n plt.plot(self.__rR,self.__xtmax,'bo',label='B Series Data')\n plt.plot(x[0],self.xtmax(x[0]),'go',label='Hub Radius')\n plt.xlabel('r/R')\n plt.ylabel('Xtmax/c')\n plt.legend()\n plt.grid(1)\n return plt\n\n def plot_tmax(self,npoints=100):\n \"\"\" Returns a plt handle for the tmax/c distribution \"\"\"\n import matplotlib.pyplot as plt\n from numpy import linspace\n x=linspace(self.__rR_Hub,1,npoints)\n plt.plot(x,self.tmaxR(x)/self.cR(x),'r',label='Spline Interpolation')\n plt.plot(self.__rR,(self.__A*self.Z-self.__B)/2/(2*self.__Kr_4*self.EAR/self.Z),'bo',label='B Series Data')\n plt.plot(x[0],self.tmaxR(x[0])/self.cR(x[0]),'go',label='Hub Radius')\n plt.xlabel('r/R')\n plt.ylabel('tmax/c')\n plt.legend()\n plt.grid(1)\n return plt\n\n def plot_V2(self,npoints=10,mpoints=10):\n \"\"\" Returns a plt handle to visualise the V2 distribution (thickness) at physical space \"\"\"\n import matplotlib.pyplot as plt\n from mpl_toolkits.mplot3d import Axes3D\n from numpy import linspace, meshgrid\n x=linspace(0,1,npoints)\n #a=linspace(-1,1,npoints)\n #a=linspace(0,1,npoints)\n r=linspace(0.15,1,mpoints)\n X,R=meshgrid(x,r)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_wireframe(self.__Xc,self.__r,self.__ypc,color='k')\n ax.plot_wireframe(self.__Xc,self.__r,self.__ysc,color='k')\n #print(self.yscSp(x,r))\n #X=self.a2x(A,R)\n if (self.Use_Smooth):\n ax.plot_surface(X,R,self.yscSp(x,r).T,color='r')\n ax.plot_surface(X,R,self.ypcSp(x,r).T,color='b')\n #ax.plot_surface(x,r,self.V2(a,r))\n ax.set_zlim(0, 0.3)\n return plt\n\n # Visual Verifier of parameters\n def show_parameters(self):\n print(\"WAGENINGEN : \")\n print(\"EAR=\",self.EAR)\n print(\"Z=\",self.Z)\n print(\"rR=\",self.rR)\n if (self.TE_closed):\n print(\"Closed Leading Edge\")\n else:\n print(\"Open Leading Edge\")\n\n","sub_path":"V5/ForPython3/WAGENINGEN_LIB.py","file_name":"WAGENINGEN_LIB.py","file_ext":"py","file_size_in_byte":31824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"187616309","text":"import logging\nimport time\nfrom sakuraio.hardware.rpi import SakuraIOSMBus\n\nlogger = logging.getLogger(__name__)\n\ndef upload(filename):\n\n logger.info(\"start upload %s\", filename)\n\n data = None\n with open(filename, \"rb\") as f:\n data = f.read()\n\n sakuraio = SakuraIOSMBus()\n\n # Check online\n if not sakuraio.get_is_online():\n logger.error(\"Offline\")\n return\n\n # Start upload\n logger.info(\"Start upload size=%d\", len(data))\n sendChannels(sakuraio, [(1, len(data))])\n\n sequence = 0\n while len(data) > 0:\n # Send chunk\n\n channels = [(2, sequence)]\n chunk = data[:8*15]\n\n logger.info(\"Send chunk sequence=%d size=%d\", sequence, len(chunk))\n\n while len(chunk):\n d = chunk[:8] + b\"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\"\n channels.append((2, d[:8]))\n chunk = chunk[8:]\n\n try:\n sendChannels(sakuraio, channels)\n except KeyboardInterrupt:\n raise\n except:\n logger.exception(\"Send chunk error\")\n continue\n\n sequence += 1\n data = data[8*15:]\n\n # Finish upload\n logger.info(\"Finish upload\")\n sendChannels(sakuraio, [(3, 0)])\n\n\ndef sendChannels(sakuraio, channels):\n sakuraio.clear_tx()\n for channel in channels:\n sakuraio.enqueue_tx(channel[0], channel[1])\n\n sakuraio.send()\n\n while True:\n queue = sakuraio.get_tx_status()[\"queue\"]\n if queue == 0x00:\n # success\n return\n if queue == 0x01:\n # sending\n time.sleep(0.01)\n continue\n if queue == 0x02:\n # error\n raise Exception()\n\n\nif __name__ == \"__main__\":\n import sys\n filename = sys.argv[1]\n\n formatter = logging.Formatter(\"[%(asctime)s] [%(process)d] [%(name)s] [%(levelname)s] %(message)s\")\n handler = logging.StreamHandler()\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n logger.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n\n try:\n upload(filename)\n except:\n logger.exception(\"error\")\n","sub_path":"fileupload/raspberrypi/sakuraio-upload.py","file_name":"sakuraio-upload.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"271306783","text":"# -*- coding: utf-8 -*-\nfrom django.apps import apps\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import HttpResponse, Http404\nfrom core.views.api import api_view_decorator,APIException, find_internal_id\nimport alerts.serializers\nimport alerts.syncing\nfrom rest_framework.exceptions import ValidationError\n\n@csrf_exempt\n@api_view_decorator\ndef send_alert(request, data):\n\t'''\n\tSends an alert to a user. The format of the data is the following:\n\n\t{\n\t\t\"to\":\"asdas\", // external_id\n\t\t\"title\": \"asdas\",\n\t\t\"content\": \"asdasd\"\n\t}\n\n\t'''\n\tif request.method != 'POST':\n\t\traise APIException(\"Metodo %s nao permitido.\"%(request.method))\n\n\texternal_id = data.pop('to', None)\n\tif external_id is None:\n\t\traise APIException(\"O campo \\\"to\\\" nao pode ser nulo.\")\n\n\tUserEmployee = apps.get_model('accounts','UserEmployee')\n\tid = find_internal_id(UserEmployee, external_id)\n\n\tserializer = alerts.serializers.AlertSerializer(data=data)\n\ttry:\n\t\tserializer.is_valid(raise_exception=True)\n\texcept ValidationError as e:\n\t\traise APIException(e)\n\n\tnew_alert = serializer.save(user_employee_id=id)\n\n\t# Deleting older alerts\n\tAlert = apps.get_model('alerts','Alert')\n\told_alerts = Alert.objects.filter(user_employee_id=id).exclude(id=new_alert.id)\n\tsm = alerts.syncing.AlertSyncManager()\n\tfor al in old_alerts:\n\t\tsm.saveObject(new_alert.user_employee, '', {\"id\":al.id, \"userDismissed\":True})\n\n\treturn HttpResponse(\"OK\")\n","sub_path":"alerts/views/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"536005980","text":"# Copyright 2014 Hewlett-Packard Development Company, L.P.\n# Copyright 2014 SUSE Linux Products GmbH\n# Copyright 2015 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom __future__ import absolute_import\n\nimport ldap\n\nfrom oslo.config import cfg\nfrom keystone import config\nfrom keystone.assignment.backends import ldap as ldap_assign_backend\nfrom keystone.assignment.backends import sql as sql_assign\nfrom keystone.identity.backends import ldap as ldap_ident_backend\nfrom keystone.identity.backends import sql as sql_ident\nfrom keystone.common import sql\nfrom keystone.common import ldap as common_ldap\nfrom keystone import exception\nfrom keystone.openstack.common.gettextutils import _\nfrom keystone.openstack.common import log\n\n\nLOG = log.getLogger(__name__)\n\n\nhybrid_opts = [\n cfg.ListOpt('default_roles',\n default=['_member_', ],\n help='List of roles assigned by default to an LDAP user'),\n cfg.StrOpt('default_project',\n default='demo',\n help='Default project'),\n cfg.StrOpt('default_domain',\n default='default',\n help='Default domain'),\n]\n\nCONF = config.CONF\nCONF.register_opts(hybrid_opts, 'ldap_hybrid')\n\n\nclass Assignment(sql_assign.Assignment):\n _default_roles = list()\n _default_project = None\n identity = sql_ident.Identity()\n ldap_assign = ldap_assign_backend.Assignment()\n ldap_ident = ldap_ident_backend.Identity()\n\n def _get_metadata(self, user_id=None, tenant_id=None,\n domain_id=None, group_id=None, session=None):\n try:\n res = super(Assignment, self)._get_metadata(\n user_id, tenant_id, domain_id, group_id, session)\n except exception.MetadataNotFound:\n projects = self._list_projects(user_id)\n if tenant_id in [project['id'] for project in projects]:\n return {\n 'roles': [\n {'id': role_id} for role_id in self.default_roles\n ]\n }\n else:\n raise\n else:\n roles = res.get('roles', [])\n res['roles'] = roles + [\n {'id': role_id} for role_id in self.default_roles\n ]\n return res\n\n @property\n def default_project(self):\n if self._default_project is None:\n self._default_project = self.get_project_by_name(\n CONF.ldap_hybrid.default_project,\n CONF.ldap_hybrid.default_domain)\n return dict(self._default_project)\n\n @property\n def default_project_id(self):\n return self.default_project['id']\n\n @property\n def default_roles(self):\n if not self._default_roles:\n with sql.transaction() as session:\n query = session.query(sql_assign.Role)\n query = query.filter(sql_assign.Role.name.in_(\n CONF.ldap_hybrid.default_roles))\n role_refs = query.all()\n\n if len(role_refs) != len(CONF.ldap_hybrid.default_roles):\n raise exception.RoleNotFound(\n message=_('Could not find one or more roles: %s') %\n ', '.join(CONF.ldap_hybrid.default_roles))\n\n self._default_roles = [role_ref.id for role_ref in role_refs]\n return self._default_roles\n\n def _list_projects(self, user_id):\n user_dn = self.ldap_ident.user._id_to_dn(user_id),\n results = self.ldap_assign.role._ldap_get_list(\n self.ldap_assign.project.tree_dn, ldap.SCOPE_SUBTREE,\n query_params={self.ldap_assign.role.member_attribute:\n user_dn[0]},\n attrlist=[\n CONF.ldap.project_id_attribute,\n CONF.ldap.project_name_attribute,\n CONF.ldap.project_desc_attribute,\n ])\n projects = []\n for result in results:\n project = {\n 'description':\n result[1].get(CONF.ldap.project_desc_attribute)[0],\n 'domain_id': CONF.ldap_hybrid.default_domain,\n 'enabled': True,\n 'id': result[1].get(CONF.ldap.project_id_attribute)[0],\n 'name': result[1].get(CONF.ldap.project_name_attribute)[0],\n }\n projects.append(project)\n return projects\n\n def list_projects_for_user(self, user_id, group_ids, hints):\n try:\n self.identity.get_user(user_id)\n except exception.UserNotFound:\n projects = self._list_projects(user_id)\n for project in projects:\n # Check to see if the project already exists\n try:\n super(Assignment, self).get_project(project['id'])\n except:\n # Create the project locally\n try:\n super(Assignment, self).create_project(project['id'],\n project)\n except:\n # Don't worry if it can't be added\n pass\n\n # Add the proper roles to the project\n for role_id in self.default_roles:\n try:\n super(Assignment, self).add_role_to_user_and_project(\n user_id, project['id'], role_id)\n except:\n # Don't worry, the role probably has already been added\n pass\n\n else:\n projects = super(Assignment, self).list_projects_for_user(\n user_id, group_ids, hints)\n\n # Make sure the default project is in the project list for the user\n # user_id\n for project in projects:\n if project['id'] == self.default_project_id:\n return projects\n\n if not projects:\n # Only add the default project if they aren't already assigned\n projects.append(self.default_project)\n\n return projects\n","sub_path":"hybrid_assignment.py","file_name":"hybrid_assignment.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"51344672","text":"# Эта программа демонстрирует элемент интерфейса Button.\r\n# Когда пользователь нажимает кнопку Button,\r\n# на экран выводится информационное диалоговое окно.\r\n\r\nimport tkinter\r\nimport tkinter.messagebox\r\n\r\n\r\nclass MyGui:\r\n def __init__(self):\r\n self.main_window = tkinter.Tk()\r\n\r\n self.my_button = tkinter.Button(self.main_window,\r\n text='Нажми меня!',\r\n command=self.do_something)\r\n\r\n self.my_button.pack()\r\n tkinter.mainloop()\r\n\r\n def do_something(self):\r\n tkinter.messagebox.showinfo('Реакция',\r\n 'Благодарю, что нажали кнопку')\r\n\r\n\r\nmy_gui = MyGui()\r\n","sub_path":"Chapter 13 (GUI)/(13.7) button_demo.py","file_name":"(13.7) button_demo.py","file_ext":"py","file_size_in_byte":865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"508581578","text":"from Binary_Search_Tree import *\ndef itr(l,u,inList,tree):\n if u-l==0:\n tree.insert(inList[l])\n elif u-l==1:\n tree.insert(inList[l])\n tree.insert(inList[u])\n else:\n m=l+int((u-l)/2)\n tree.insert(inList[m])\n itr(l,m-1,inList,tree)\n itr(m+1,u,inList,tree)\n \"\"\"This Function iterates through all elements\n of the list in the manner that binary tree can be formed\"\"\"\ndef listToTree(inList):\n inList.sort()\n tree=BST()\n l=0\n u=len(inList)-1\n itr(l,u,inList,tree)\n return tree\ndef main():\n L=[100,50,150,25,75,125,175,10,40,60,90,110,140,160,190]\n T=listToTree(L)\n\nif __name__=='__main__':\n main()\n\n","sub_path":"PROGRAMS/ACM-DSA-18-master/Parth_Dodiya_Assignment-3/BST_from_sorted_List.py","file_name":"BST_from_sorted_List.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"535175054","text":"c = 5\n# loop until c is false(0) implicit is not Pythonic\nwhile c:\n\tc -= 1\n\nc = 5\n# same as above, but explicit\nwhile c != 0:\n\tc -= 1\n\n# breaking if entered input is 42\nprint(\"Please enter a number: \")\nwhile True:\n\tresponse = input()\n\tif int(response) == 42:\n\t\tbreak\n\tprint(\"Try again:\")\n","sub_path":"while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"368100366","text":"import datetime as dt\n\nfrom airflow import DAG\nfrom airflow.operators.python_operator import PythonOperator\nfrom godatadriven.operators.postgres_to_gcs import PostgresToGoogleCloudStorageOperator\nfrom operators import HttpToGcsOperator\n\ndag = DAG(\n dag_id=\"my_first_dag\",\n schedule_interval=\"30 7 * * *\",\n default_args={\n \"owner\": \"airflow\",\n \"start_date\": dt.datetime(2018, 6, 20),\n \"depends_on_past\": True,\n \"email_on_failure\": True,\n \"email\": \"airflow_errors@myorganisation.com\",\n },\n)\n\n\ndef print_exec_date(**context):\n print(context[\"execution_date\"])\n\n\nmy_task = PythonOperator(\n task_id=\"task_name\",\n python_callable=print_exec_date,\n provide_context=True,\n dag=dag\n)\n\n\npgsl_to_gcs = PostgresToGoogleCloudStorageOperator(\n task_id=\"postgres_to_gcs\",\n postgres_conn_id=\"postgres_airflow_training\",\n sql=\"SELECT * FROM public.land_registry_price_paid_uk WHERE transfer_date = '{{ ds }}'\",\n bucket='airflow-training-knab-geert',\n filename='land_registry_price_paid_uk/{{ ds }}/properties_{}.json',\n dag=dag\n)\n\n\nfor currency in {'EUR', 'USD'}:\n HttpToGcsOperator(\n task_id=\"get_currency_\" + currency,\n method=\"GET\",\n endpoint=\"airflow-training-transform-valutas?date={{ ds }}&from=GBP&to=\" + currency,\n http_conn_id=\"http_airflow_training\",\n gcs_conn_id=\"google_cloud_default\",\n gcs_bucket=\"airflow-training-knab-geert\",\n gcs_path=\"currency/{{ ds }}-\" + currency + \".json\",\n dag=dag\n )\n","sub_path":"dags/dag.py","file_name":"dag.py","file_ext":"py","file_size_in_byte":1531,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"249452521","text":"\"\"\"\nTHe problem is that all activations are stuck to 1.\nWell, if inference is purely based on the value of the activations, then if all of them are stuck to one, inference\nwill be trash.\n\nThis means the second line cannot be used.\nLet's try that.\n\"\"\"\n\n\n\nfrom __future__ import print_function\nimport argparse\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom torchvision import datasets, transforms\nfrom itertools import chain\n\n\nclass CurriculumStructure(nn.Module):\n\n def __init__(self):\n super(CurriculumStructure, self).__init__()\n self.conv1 = nn.Conv2d(1, 20, 5, 1)\n self.conv2 = nn.Conv2d(20, 50, 5, 1)\n self.fc1 = nn.Linear(4 * 4 * 50, 10)\n\n self.conv21 = nn.Conv2d(1, 20, 5, 1)\n self.conv22 = nn.Conv2d(20, 50, 5, 1)\n self.fc21 = nn.Linear(4 * 4 * 50, 10)\n\n self.conv31 = nn.Conv2d(1, 20, 5, 1)\n self.conv32 = nn.Conv2d(20, 50, 5, 1)\n self.fc31 = nn.Linear(4 * 4 * 50, 10)\n\n self.curriculum = 1\n\n def forward(self, input):\n if self.curriculum == 0:\n return self.forward0(input)\n else:\n return self.forward1(input)\n\n def forward0(self, input):\n A = F.relu(self.conv1(input))\n A = F.max_pool2d(A, 2, 2)\n A = F.relu(self.conv2(A))\n A = F.max_pool2d(A, 2, 2)\n A = A.view(-1, 4 * 4 * 50)\n A = F.sigmoid(self.fc1(A))\n\n ss = torch.sum(A, dim=1)\n ss = ss.unsqueeze(1)\n A = A / ss\n return torch.log(A)\n\n def forward1(self, input):\n A = F.relu(self.conv1(input))\n A = F.max_pool2d(A, 2, 2)\n A = F.relu(self.conv2(A))\n A = F.max_pool2d(A, 2, 2)\n A = A.view(-1, 4 * 4 * 50)\n A = F.sigmoid(self.fc1(A))\n\n B = F.relu(self.conv21(input))\n B = F.max_pool2d(B, 2, 2)\n B = F.relu(self.conv22(B))\n B = F.max_pool2d(B, 2, 2)\n B = B.view(-1, 4 * 4 * 50)\n B = F.sigmoid(self.fc21(B))\n\n C = F.relu(self.conv31(input))\n C = F.max_pool2d(C, 2, 2)\n C = F.relu(self.conv32(C))\n C = F.max_pool2d(C, 2, 2)\n C = C.view(-1, 4 * 4 * 50)\n C = F.sigmoid(self.fc31(C))\n\n yes = A * B + (1 - A) * C\n yesyes = yes\n return yesyes\n\n def curri_para(self, stage):\n self.curriculum = stage\n if stage == 0:\n return chain(self.conv1.parameters(), self.conv2.parameters(), self.fc1.parameters())\n else:\n return chain(self.conv21.parameters(), self.conv22.parameters(), self.fc21.parameters(),\n self.conv31.parameters(), self.conv32.parameters(), self.fc31.parameters())\n\ndef one_hot(target,num_class):\n tt=torch.zeros((target.shape[0]),num_class)\n tt.scatter_(1, target.view(-1,1), 1)\n return tt\n\ndef train(args, model, device, train_loader, optimizer, epoch, criterion=None):\n model.train()\n for batch_idx, (data, target) in enumerate(train_loader):\n target=one_hot(target,10)\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output= model(data)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n if batch_idx % args.log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.item()))\n\n\ndef ttest(args, model, device, test_loader, criterion=None):\n model.eval()\n test_loss = 0\n correct = 0\n cnt=0\n with torch.no_grad():\n for data, target in test_loader:\n targett = one_hot(target, 10)\n cnt+=1\n data, targett, target = data.to(device), targett.to(device), target.to(device)\n output = model(data)\n test_loss += criterion(output,targett)\n pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability\n correct += pred.eq(target.view_as(pred)).sum().item()\n\n test_loss /= cnt\n\n print('\\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n test_loss, correct, len(test_loader.dataset),\n 100. * correct / len(test_loader.dataset)))\n\ndef main_BCE():\n # Training settings\n parser = argparse.ArgumentParser(description='PyTorch MNIST Example')\n parser.add_argument('--batch-size', type=int, default=64, metavar='N',\n help='input batch size for training (default: 64)')\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',\n help='input batch size for testing (default: 1000)')\n parser.add_argument('--epochs', type=int, default=20, metavar='N',\n help='number of epochs to train (default: 10)')\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR',\n help='learning rate (default: 0.01)')\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M',\n help='SGD momentum (default: 0.5)')\n parser.add_argument('--no-cuda', action='store_true', default=False,\n help='disables CUDA training')\n parser.add_argument('--seed', type=int, default=1, metavar='S',\n help='random seed (default: 1)')\n parser.add_argument('--log-interval', type=int, default=10, metavar='N',\n help='how many batches to wait before logging training status')\n\n parser.add_argument('--save-model', action='store_true', default=False,\n help='For Saving the current Model')\n args = parser.parse_args()\n use_cuda = not args.no_cuda and torch.cuda.is_available()\n\n torch.manual_seed(args.seed)\n\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}\n train_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=True, download=True,\n transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.batch_size, shuffle=True, **kwargs)\n test_loader = torch.utils.data.DataLoader(\n datasets.MNIST('../data', train=False, transform=transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,))\n ])),\n batch_size=args.test_batch_size, shuffle=True, **kwargs)\n\n model = CurriculumStructure().to(device)\n\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\n criterion=nn.BCELoss()\n\n for epoch in range(1, args.epochs + 1):\n train(args, model, device, train_loader, optimizer, epoch, criterion)\n ttest(args, model, device, test_loader, criterion)\n\n if (args.save_model):\n torch.save(model.state_dict(), \"mnist_cnn.pt\")\n\nif __name__ == '__main__':\n main_BCE()","sub_path":"essential/modal_capture_BCE.py","file_name":"modal_capture_BCE.py","file_ext":"py","file_size_in_byte":7080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"189510443","text":"#!/usr/bin/env python\n\nfrom setuptools import find_packages, setup\nfrom setuptools.command.test import test as BaseTestCommand\nimport os.path\nimport sys\n\nsys.path = [\n os.path.join(os.path.dirname(__file__), 'src'),\n os.path.join(os.path.dirname(__file__), 'testproject')\n] + sys.path\n\nclass TestCommand(BaseTestCommand):\n def run(self):\n import os\n\n from django.core.management import call_command\n\n os.environ['DJANGO_SETTINGS_MODULE'] = 'project.settings'\n call_command('test')\n\nsetup(\n name='django-naturalsortfield',\n url='https://github.com/nathforge/django-naturalsortfield',\n version='0.7',\n description='Natural sorting for Django CharFields.',\n long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),\n \n author='Nathan Reynolds',\n author_email='email@nreynolds.co.uk',\n \n packages=find_packages(),\n\n cmdclass={'test': TestCommand},\n\n tests_require=['django'],\n\n zip_safe=True\n)\n","sub_path":"pypi_install_script/django-naturalsortfield-0.7.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":993,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"240917800","text":"import numpy as np\n\ndef load_simple_data():\n data_mat = np.mat([\n [1., 2.1],\n [2., 1.1],\n [1.3, 1.],\n [1., 1.],\n [2., 1]])\n class_labels = [1.0, 1.0, -1.0, -1.0, 1.0]\n return data_mat, class_labels\n\n\ndef stump_calssify(data_matrix, dimen, thresh_val, thresh_ineq):\n '''\n 树墩分类 弱分类器\n :param data_matrix:\n :param dimen: 维度\n :param thresh_val: 阈值\n :param thresh_ineq: 阈值不等式\n :return:\n '''\n ret_array = np.ones(shape=(data_matrix.shape[0], 1))\n if thresh_ineq == 'lt':\n ret_array[data_matrix[:, dimen] <= thresh_val] = -1.0\n else:\n ret_array[data_matrix[:, dimen] > thresh_val] = -1.0\n return ret_array\n\n\ndef build_stump(data_arr, class_labels, D):\n '''\n 构建树墩\n :param data_arr:\n :param class_labels:\n :param D:\n :return:\n '''\n data_matrix= np.mat(data_arr); label_matrix = np.mat(class_labels).T\n m, n = data_matrix.shape\n num_steps = 10.0; best_stump = {}; best_class_est = np.mat(np.zeros(shape=(m, 1)))\n min_error = np.inf\n #三个循环\n for i in range(n):\n range_min = data_matrix[:, i].min()\n range_max = data_matrix[:, i].max()\n step_size = float(range_max - range_min) / num_steps\n for j in range(-1, int(num_steps+1)):\n for inequal in ['lt', 'gt']:\n thresh_val = range_min + step_size * float(j)\n predicted_values = stump_calssify(data_matrix, i, thresh_val, inequal)\n error_arr = np.mat(np.ones(shape=(m, 1)))\n error_arr[predicted_values == label_matrix] = 0 # 预测结果和实际标记比较\n weight_error = D.T * error_arr\n print('切片:dim %d, thresh %.2f, thresh inequal %s, errorWeight %.2f'\\\n % (i, thresh_val, inequal, weight_error))\n if weight_error < min_error:\n min_error = weight_error\n best_class_est = predicted_values.copy()\n best_stump['dim'] = i\n best_stump['thresh'] = thresh_val\n best_stump['ineq'] = inequal\n return best_stump, min_error, best_class_est\n\n\ndef adaboost_train_ds(data_arr, class_labels, num_it):\n week_class_arr = []\n m = np.shape(data_arr)[0]\n D = np.mat(np.ones(shape=(m, 1))/m)\n agg_class_est = np.mat(np.zeros(shape=(m, 1))) #最终打分累加的\n for i in range(num_it):\n best_stump, error, class_est = build_stump(data_arr, class_labels, D)\n print('D : ',D.T)\n alpha = float(0.5 * np.log((1.0 - error) / np.max([error, 1e-16])))\n best_stump['alpha'] = alpha\n # 保留过程记录\n week_class_arr.append(best_stump)\n # print('classEst ', class_est.T)\n # 计算D\n expon = np.multiply(-1 * alpha * np.mat(class_labels).T, class_est) # m*1\n D = np.multiply(D, np.exp(expon))\n D = D / D.sum()\n\n #累计\n agg_class_est += alpha * class_est\n # print('agg_class_est: ', agg_class_est.T)\n agg_error = np.multiply(np.sign(agg_class_est) != np.sign(np.mat(class_labels).T), \\\n np.ones(shape=(m, 1)))\n agg_error_rate = float(agg_error.sum() / m)\n print('总体错误率为: %.2f' % agg_error_rate)\n if agg_error_rate == 0.0:\n break\n return week_class_arr\n\n\n#ada分类函数\ndef ada_classify(data2class, classifer_arr):\n data_mat = np.mat(data2class)\n m = data_mat.shape[0]\n agg_classifer = np.mat(np.zeros(shape=(m,1)))\n for i in range(len(classifer_arr)):\n stump = classifer_arr[i]\n class_est = stump_calssify(data_mat, stump['dim'], \\\n stump['thresh'], stump['ineq'])\n agg_classifer += stump['alpha'] * class_est\n print(agg_classifer)\n return np.sign(agg_classifer)\n\n\n#预测难分类数据\n\n#读取文件\ndef load_dataset(filename):\n num_feature = len(open(filename, 'r').readline().split('\\t'))\n data_arr = []; labels_arr =[]\n for line in open(filename, 'r').readlines():\n line_arr = []\n cur_line = line.strip().split('\\t')\n for i in range(num_feature - 1):\n line_arr.append(float(cur_line[i]))\n data_arr.append(line_arr)\n labels_arr.append(float(cur_line[-1]))\n return data_arr, labels_arr\n\n\n\n\n\nif __name__ == '__main__':\n # data_mat, class_labels = load_simple_data()\n # class_arr = adaboost_train_ds(data_mat, class_labels, 9)\n # print(\"结果\", ada_classify([[5, 5],[0, 0]], class_arr))\n data_arr, labels_arr = load_dataset('horseColicTraining2.txt')\n classifier_arr = adaboost_train_ds(data_arr, labels_arr, 10)\n m = len(labels_arr)\n err_arr = np.mat(np.ones(shape=(m, 1)))\n prediction = ada_classify(data_arr, classifier_arr)\n error_rate = float(err_arr[prediction != np.mat(labels_arr).T].sum() / m)\n print(\"训练错误率\", error_rate)\n\n test_arr, test_labels_arr = load_dataset('horseColicTest2.txt')\n prediction = ada_classify(test_arr, classifier_arr)\n\n m = len(test_labels_arr)\n err_arr = np.mat(np.ones(shape=(m, 1)))\n error_rate = float(err_arr[prediction != np.mat(test_labels_arr).T].sum() / m)\n print(\"测试错误率\", error_rate)\n\n\n","sub_path":"Ch07/my_adaboost.py","file_name":"my_adaboost.py","file_ext":"py","file_size_in_byte":5310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"162913668","text":"import torch\nfrom torch.nn import functional as F\n\n\ndef calc_iou(bbox1, bbox2):\n \"\"\" Calculate IoU(Intersection over union) of two bounding boxes.\n\n Args:\n bbox1: (x1, y1, x2, y2), bounding box 1.\n bbox2: (x1, y1, x2, y2), bounding box 2.\n\n Returns:\n iou: IoU of two bounding boxes.\n \"\"\"\n area_1 = (bbox1[2] - bbox1[0] + 1) * (bbox1[3] - bbox1[1])\n area_2 = (bbox2[2] - bbox2[0] + 1) * (bbox2[3] - bbox2[1])\n\n inter_x1 = torch.max(torch.cat([bbox1.new([bbox1[0]]), bbox1.new([bbox2[0]])]))\n inter_x2 = torch.max(torch.cat([bbox1.new([bbox1[1]]), bbox1.new([bbox2[1]])]))\n inter_y1 = torch.max(torch.cat([bbox1.new([bbox1[2]]), bbox1.new([bbox2[2]])]))\n inter_y2 = torch.max(torch.cat([bbox1.new([bbox1[3]]), bbox1.new([bbox2[3]])]))\n\n inter_w = torch.max(torch.cat([bbox1.new([0]), bbox1.new([inter_x2 - inter_x1 + 1])]))\n inter_h = torch.max(torch.cat([bbox1.new([0]), bbox1.new([inter_y2 - inter_y1 + 1])]))\n\n intersection = inter_w * inter_h\n iou = intersection / (area_1 + area_2 + intersection)\n\n return iou\n\n\ndef calc_maskrcnn_loss(cls_prob, bbox_reg, mask_prob, cls_targets, bbox_targets,\n mask_targets):\n \"\"\" Calculate Mask R-CNN loss.\n\n Args:\n cls_prob: (NxS)x num_classes, classification predict probability.\n bbox_reg: (NxS)x num_classes x 4(dx, dy, dw, dh), bounding box regression.\n mask_prob: (NxS)x num_classes x HxW, mask prediction.\n cls_targets: (NxS), classification targets.\n bbox_targets: (NxS)x4(dx, dy, dw, dh), bounding box regression targets.\n mask_targets: (NxS)xHxW, mask targets.\n\n Returns:\n maskrcnn_loss: Total loss of Mask R-CNN predict heads.\n\n Notes: In above, S: number of rois feed to prediction heads.\n\n \"\"\"\n cls_loss = F.nll_loss(cls_prob, cls_targets)\n _, cls_pred = torch.max(cls_prob, 1)\n # Only predicted class masks contribute to bbox and mask loss.\n bbox_loss, mask_loss = 0, 0\n for i in range(cls_prob.size(0)):\n cls_id = int(cls_pred[i])\n bbox_loss += F.smooth_l1_loss(bbox_reg[i, cls_id, :], bbox_targets[i, :])\n # last part is positive roi, contribute to mask loss.\n for i in range(mask_targets.size(0)):\n start = cls_pred.size(0) - mask_targets.size(0)\n cls_id = int(cls_pred[start + i])\n mask_loss += F.binary_cross_entropy(mask_prob[start + i, cls_id, :, :],\n mask_targets[i, :, :])\n maskrcnn_loss = cls_loss + bbox_loss + mask_loss\n return maskrcnn_loss\n\n\ndef coord_corner2center(bbox):\n \"\"\" Transform corner style coord (x1, y1, x2, y2) to center style (x, y, w, h). \n \n Args:\n bbox: (x1, y1, x2, y2), bounding box in corner coord, (x1, y1) stands for bbox \n top-left, (x2, y2) stands for bbox bottom-right.\n\n Returns: (x, y, w, h), bounding box in center coord, (x, y) stands for bbox center,\n (w, h) stands for bbox width and height.\n \"\"\"\n x1, y1 = bbox.new([bbox[0]]), bbox.new([bbox[1]])\n x2, y2 = bbox.new([bbox[2]]), bbox.new([bbox[3]])\n x = torch.floor((x2 - x1 + 1) / 2) + x1\n y = torch.floor((y2 - y1 + 1) / 2) + y1\n w = x2 - x1 + 1\n h = y2 - y1 + 1\n return x, y, w, h\n\n\ndef coord_center2corner(bbox):\n \"\"\" Transform center style coord (x, y, w, h) to corner style (x1, y1, x2, y2). \n\n Args:\n bbox: (x, y, w, h), bounding box in center coord, (x, y) stands for bbox center,\n (w, h) stands for bbox width and height.\n\n Returns: \n bbox: (x1, y1, x2, y2), bounding box in corner coord, (x1, y1) stands for bbox \n top-left, (x2, y2) stands for bbox bottom-right.\n \"\"\"\n\n x, y = bbox.new([bbox[0]]), bbox.new([bbox[1]])\n w, h = bbox.new([bbox[2]]), bbox.new([bbox[3]])\n x1 = x - torch.floor(w / 2)\n y1 = y - torch.floor(h / 2)\n x2 = x + torch.floor(w / 2)\n y2 = y + torch.floor(h / 2)\n\n return x1, y1, x2, y2\n","sub_path":"util/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"513474671","text":"from django.contrib.auth import get_user_model\nfrom django import forms\nfrom allauth.account.forms import SignupForm\nfrom django_countries.fields import CountryField\nfrom .models import Gender_CHOICES\n\n\nclass CustomSignupForm(SignupForm):\n first_name = forms.CharField(max_length=30, label='First Name')\n last_name = forms.CharField(max_length=30, label='Last Name')\n birthdate = forms.DateField(input_formats=['%Y-%m-%d',], required=True, widget=forms.widgets.DateInput(attrs={'type': 'date'}))\n citizenship = CountryField().formfield()\n gender = forms.ChoiceField(choices=Gender_CHOICES, required=True)\n\n def save(self, request):\n # .save() returns a User object.\n user = super(CustomSignupForm, self).save(request)\n user.first_name = self.cleaned_data['first_name']\n user.last_name = self.cleaned_data['last_name']\n user.birthdate = self.cleaned_data['birthdate']\n user.citizenship = self.cleaned_data['citizenship']\n user.gender = self.cleaned_data['gender']\n user.save()\n return user\n","sub_path":"apps/users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"509083049","text":"#2\r\n# ingreso de valores.\r\nnumero = input(\"ingrese un numero: \")\r\n\r\n# Validacion de valores.\r\ntry:\r\n numero = int(numero)\r\nexcept ValueError:\r\n print(\"usted no sabe leer\")\r\n exit()\r\n\r\n# verificacion de primo.\r\nif numero % 2 == 0:\r\n print(f\"el numero: {numero}, es par\")\r\nelse:\r\n print(f\"el numero: {numero}, es impar\")","sub_path":"dos.py","file_name":"dos.py","file_ext":"py","file_size_in_byte":333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"309147153","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Feb 23 11:11:52 2019\n\n@author: larakamal\n\"\"\"\n\nimport csv \nimport matplotlib\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.metrics import mean_squared_error, r2_score\nimport warnings\nimport math \nfrom sklearn.feature_selection import RFE\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score\nfrom matplotlib import rcParams\nfrom scipy import stats\nfrom sklearn import preprocessing\n\nwarnings.filterwarnings('always') \nwarnings.filterwarnings('ignore')\nimport time\n\n\n#find the transpose of a matrix \ndef matrixTranspose(matrix):\n if not matrix: \n return []\n else:\n try:\n return [ [ row[ i ] for row in matrix ] for i in range( len( matrix[ 0 ] ) ) ]\n except:\n result = []\n for i in matrix:\n result.append([i])\n return result\n\n#convert to log function \ndef logFunction(list):\n list2 = toFloat(list)\n result = []\n for i in list2:\n try:\n ans = round(math.log(i,10),1)\n except:\n ans = 0.0\n result.append(ans)\n #time.sleep(0.1)\n return result\n\n\n#convert a list to float \ndef toFloat(list):\n list2 = []\n for i in list:\n try:\n list2.append(float(i))\n except:\n #print('error',i)\n [ans] = i\n list2.append(float(ans))\n return list2\n\n#convert a list to string \ndef toString(list):\n myList = ['A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q']\n list2 = []\n curr = list[0]\n count = 0\n for i in list:\n if (i == curr):\n item = myList[count] + '- ' + str(i)\n list2.append(item)\n else:\n count = count +1\n curr = i\n item = myList[count] + '- ' + str(i)\n list2.append(item)\n \n return list2 \n\n#generate the plots\ndef plot(training, target, names):\n #find the transpose of training data \n trainingTrans = matrixTranspose(training) \n \n for i in range(len(trainingTrans)):\n #create an instance of plotting \n fig, ax = plt.subplots()\n \n \"\"\"\n ################ plot target ##################\n \n #convert to float and string \n trainingInst = toFloat(trainingTrans[i])\n target2 = logFunction(target)\n \n #plot \n ax.plot(target2, trainingInst,'go')\n \n\n ax.set(ylabel= names[i],xlabel='Log(Mass of blackhole)')\n plt.title(names[i] +' vs. Log(Mass of blackhole)')\n \n ax.grid()\n plt.show()\n rcParams.update({'figure.autolayout': True})\n fileName = 'plotstest/' +(names[i].replace('/','').replace('01','').replace(')',''))\n \n \"\"\" \n ###############################################\n \n \n ################ plot utarget ################## \n \n #convert to float and string \n trainingInst = toFloat(trainingTrans[i])\n \n #plot \n ax.plot(target, trainingInst,'go')\n \n\n ax.set(ylabel= names[i],xlabel='Ionization Parameters')\n plt.title(names[i] +' vs. Ionization Parameters')\n \n ax.grid()\n plt.show()\n rcParams.update({'figure.autolayout': True})\n fileName = 'plotsutest/' +(names[i].replace('/','').replace('01','').replace(')',''))\n \n ###############################################\n \n fig.savefig(fileName) # save the figure to file\n plt.close(fig) # close the figure\n \n\ndef removeData(training, names):\n #if the average = first value \n training2 = matrixTranspose(training)\n names2 = matrixTranspose(names)\n \n training3 = []\n names3 = []\n \n for i in range(len(training2)):\n avg = sum(training2[i])/ float(len(training2[i]))\n if (avg != training2[i][0]):\n training3.append(training2[i])\n names3.append(names2[i])\n \n training4 = matrixTranspose(training3)\n names4 = matrixTranspose(names3)\n [names5] = names4\n \n\n return training4, names5\n \n\ndef breakInBins(training, target, targetu):\n training1 = []\n target1 = []\n targetu1 = []\n training2= []\n target2 = []\n targetu2 = []\n training3 = []\n target3 = []\n targetu3 = []\n \n floatTarget = toFloat(target)\n for i in range(len(target)):\n if (floatTarget[i] <= 4 ):\n training1.append(training[i])\n target1.append(target[i])\n targetu1.append(targetu[i])\n elif (floatTarget[i] > 4 and floatTarget[i] <= 6):\n training2.append(training[i])\n target2.append(target[i])\n targetu2.append(targetu[i])\n else:\n training3.append(training[i])\n target3.append(target[i])\n targetu3.append(targetu[i])\n return training1, target1, targetu1, training2, target2, targetu2, training3, target3, targetu3\n\ndef plotSingle(x,y,xname,yname):\n fig, ax = plt.subplots()\n \n ax.plot(x, y,'o')\n \n ax.set(ylabel= yname,xlabel=xname)\n plt.title(xname + ' vs. '+ yname)\n \n ax.grid()\n plt.show()\n rcParams.update({'figure.autolayout': True})\n fileName = 'HighMassa/' + (yname+ ' ' +xname).replace('/','').replace('1.01','1')\n \n #print(fileName)\n fig.savefig(fileName) # save the figure to file\n \n plt.close(fig) # close the figure\n \n\ndef cross_validation(k,training,target):\n fold = 100/k\n fold = fold/100\n \n #convert training to float\n trainingFloat = []\n for i in training:\n trainingFloat.append(toFloat(i))\n \n #split\n x_train, x_test, y_train, y_test = train_test_split(training, target, test_size= fold, random_state=0)\n \n #logistic regression \n lm = LinearRegression()\n #lm.fit(trainingFloat,target)\n \n #rfe = RFE(model,k)\n # fit = rfe.fit(x_train, y_train)\n \n lm.fit(x_train, y_train)\n \n #test\n y_score = lm.predict(x_test)\n \n mse = mean_squared_error(toFloat(y_test), toFloat(y_score))\n print(mse)\n\n \n #print(y_test)\n # print(y_score)\n #y_test = ''.join(y_test)\n #y_score = ''.join(y_score)\n #print('scores:')\n #print ('accuracy: ', round (accuracy_score(y_test, y_score),3)*100, '%')\n #print ('precision: ', round (precision_score(y_test, y_score, average='weighted'),3)*100)\n #print ('recall: ', round (recall_score(y_test, y_score, average='weighted'),3)*100)\n #print ('f1 score: ', round (f1_score(y_test, y_score, average='weighted'),3)*100)\n #print(' ')\n \n \ndef applyRegression(training, target, names):\n \n trainingTrans = matrixTranspose(training) \n trainingFloat = []\n elementList = []\n scoreList = []\n coefList = []\n y = np.array(toFloat(target))\n\n \n for i in range(len(trainingTrans)):\n insFloat = toFloat(trainingTrans[i])\n x = np.array(insFloat).reshape(-1,1)\n #print(x)\n model = LinearRegression().fit(x,y)\n #plotSingle(y,x,'Ionization Parameter',names[i])\n #plotSingle(y,x,'Mass of Blackhole',names[i])\n\n \n [coef] = model.coef_\n score = model.score(x,y)\n coefList.append(abs(coef))\n scoreList.append(score)\n \n #normalize \n [newCoef] = preprocessing.normalize([coefList])\n #print(newCoef)\n \n for i in range(len(names)):\n elementList.append([names[i],newCoef[i],scoreList[i]])\n \n return elementList\n\ndef normalize(list1, list2, list3):\n #of the form [name, coef, mse]\n listN1= []\n listN2= []\n listN3= []\n coefList = []\n mseList = []\n \n #list1, 2 and 3 have the same length \n for i in range(len(list1)):\n coefList.append(list1[i][1])\n coefList.append(list2[i][1])\n coefList.append(list3[i][1])\n mseList.append(list1[i][2])\n mseList.append(list2[i][2])\n mseList.append(list3[i][2])\n \n minCoef=min(coefList)\n maxCoef=max(coefList)\n minMse=min(mseList)\n maxMse=max(mseList)\n \n for i in range(len(list1)):\n coef = list1[i][1] - minCoef / (maxCoef-minCoef)\n mse = list1[i][2] - minMse / (minMse - maxMse)\n curr = [list1[i][0], coef, mse]\n listN1.append(curr)\n \n coef = list2[i][1] - minCoef / (maxCoef-minCoef)\n mse = list2[i][2] - minMse / (minMse - maxMse)\n curr = [list2[i][0], coef, mse]\n listN2.append(curr)\n \n coef = list3[i][1] - minCoef / (maxCoef-minCoef)\n mse = list3[i][2] - minMse / (minMse - maxMse)\n curr = [list3[i][0], coef, mse]\n listN3.append(curr)\n \n return listN1, listN2, listN3\n\ndef getResult(MassBin, uListBin):\n \n \"\"\"\n list = []\n for i in range(len(MassBin)):\n for j in range(len(uListBin)):\n if (MassBin[i][0] == uListBin[j][0]):\n curr = []\n curr.append(MassBin[i][0])\n #[name,coef,score]\n final = MassBin[i][1] + MassBin[i][2] - uListBin[j][1] - uListBin[j][2]\n \n curr.append(round(final,2))\n #curr.append(MassBin[i][1])\n #curr.append(MassBin[i][2])\n #curr.append(uListBin[i][1])\n #curr.append(uListBin[i][2])\n list.append(curr)\n list.sort(key=lambda x: x[1], reverse =True)\n \"\"\"\n final = []\n listMass = []\n listU = [] \n\n for i in MassBin:\n listMass.append([i[0],i[1]+i[2]]) \n listMass.sort(key=lambda x: x[1], reverse =True)\n\n for i in uListBin:\n listU.append([i[0],i[1]+i[2]]) \n listU.sort(key=lambda x: x[1], reverse =True)\n\n k = 15\n listMass = listMass[:k]\n listU = listU[:k]\n \n for i in listMass:\n n = len(listMass) -1\n for j in listU:\n if (i[0] == j[0]):\n break\n elif (n == 0):\n final.append(i)\n n -=1 \n \n return final \n\ndef printToFile(list1, list2, list3):\n maxLen = max(len(list1), len(list2), len(list3))\n list1 = pad(list1,0,maxLen)\n list2 = pad(list2,0,maxLen) \n list3 = pad(list3,0,maxLen) \n \n with open('output.csv', mode='w') as file:\n outputwriter = csv.writer(file, delimiter=',')\n outputwriter.writerow(['Low Mass AGNs', 'Medium Mass AGNs','High Mass AGNs'])\n for i in range(maxLen):\n outputwriter.writerow([str(list1[i]), str(list2[i]),str(list3[i])])\n file.close()\n\ndef pad(l, content, width):\n l.extend([content] * (width - len(l)))\n return l\n \ndef contourPlot(x,y,z,xname,yname,zname):\n plt.figure()\n x1, y1 = np.meshgrid(x, y)\n cp = plt.contourf(x, y, z)\n plt.colorbar(cp)\n plt.title(zname)\n plt.xlabel(xname)\n plt.ylabel(yname)\n plt.show()\n \n\ndef findSubset(training, target, utarget, names):\n \n #break into bins\n trainingbin1, targetbin1, targetubin1, trainingbin2, targetbin2, targetubin2, trainingbin3, targetbin3, targetubin3 = breakInBins(training, target, utarget)\n \n MassBin1 = applyRegression(trainingbin1, targetbin1, names2)\n \n MassBin2 = applyRegression(trainingbin2, targetbin2, names2)\n \n MassBin3 = applyRegression(trainingbin3, targetbin3, names2)\n \n \n uListBin1 = applyRegression(trainingbin1, targetubin1, names2)\n \n uListBin2 = applyRegression(trainingbin2, targetubin2, names2)\n \n uListBin3 = applyRegression(trainingbin3, targetubin3, names2)\n \n #[name, coef, mse]\n \n list1 = getResult(MassBin1, uListBin1)\n list2 = getResult(MassBin2, uListBin2)\n list3 = getResult(MassBin3, uListBin3)\n #has [name, final, mass coef, mass mse, u coef, u mse]\n printToFile(list1,list2,list3) \n return list1,list2,list3\n \n\n \n \ndef createGraph(list1, list2, list3, x, y):\n px = []\n py = []\n for i in list1:\n if (i[0] == x):\n px.append(float(i[1]))\n if (i[0] == y):\n py.append(float(i[1]))\n \n for i in list2:\n if (i[0] == x):\n px.append(float(i[1]))\n if (i[0] == y):\n py.append(float(i[1]))\n \n for i in list3:\n if (i[0] == x):\n px.append(float(i[1]))\n if (i[0] == y):\n py.append(float(i[1]))\n \n #for color in ['r', 'b', 'g', 'k', 'm']:\n plt.plot(px[0], py[0], 'ro', color = 'r', label = 'Low Mass AGNs')\n plt.plot(px[1], py[1], 'ro', color = 'b', label = 'Medium Mass AGNs')\n plt.plot(px[2], py[2], 'ro', color = 'g', label = 'High Mass AGNs')\n\n \n plt.legend(loc='upper left')\n plt.xlabel(x)\n plt.ylabel(y)\n plt.title('Score Plot')\n \n \n plt.show() \n \ndef createDataGraph(list1, list2, list3, training2, names2, target, x, y):\n px = []\n py = []\n for i in list1:\n if (i[0] == x):\n px.append(float(i[1]))\n if (i[0] == y):\n py.append(float(i[1]))\n \n for i in list2:\n if (i[0] == x):\n px.append(float(i[1]))\n if (i[0] == y):\n py.append(float(i[1]))\n \n for i in list3:\n if (i[0] == x):\n px.append(float(i[1]))\n if (i[0] == y):\n py.append(float(i[1]))\n \n #for color in ['r', 'b', 'g', 'k', 'm']:\n plt.plot(px[0], py[0], 'ro', color = 'r', label = 'Low Mass AGNs')\n plt.plot(px[1], py[1], 'ro', color = 'b', label = 'Medium Mass AGNs')\n plt.plot(px[2], py[2], 'ro', color = 'g', label = 'High Mass AGNs')\n\n indexx = 0\n indexy = 0\n #find index\n for i in range(len(names)):\n if (names[i] == x):\n indexx = i\n if (names[i] == y):\n indexy = i\n #print data\n # for i in range(len(training)):\n # plt.plot(training[i][indexx], training[i][indexy], 'o', color = 'k')\n #plt.plot(training[0][indexx], training[0][indexy], 'o', color = 'k') \n \n \n plt.legend(loc='upper left')\n plt.xlabel(x)\n plt.ylabel(y)\n plt.title('Score Plot')\n \n \n plt.show() \n \n############################# READ TRAINING DATA #############################\ntraining = []\n#read target of training data \ntarget = []\nutarget= []\nfile_reader = open('RatiosGrid_test4.csv', \"r\")\nread = csv.reader(file_reader)\nfor row in read:\n #separate training and target\n if(row[:1] != '' and row[3:][0] != ''):\n utarget.append(row[1:2])\n target.append(row[:1])\n training.append(row[3:])\n \n#remove the labelling row \n[names] = training[:1]\ntraining = training[1:]\n\ntarget = target[1:] \nutarget = utarget[1:] \n\n\n############################# PREPROCESS DATA #############################\n#data is stored as string rather than float so we have to conver them\n#there are some missing data so we handle that by placing \n#0 in that spot \n\nfor i in range(len(training)):\n for j in range(len(training[1])):\n try:\n training[i][j] = float(training[i][j]) \n except:\n training[i][j] = 0\n \n\n#transpose a lit\nfor i in range(len(utarget)):\n utarget[i] = utarget[i][0]\n \nfor i in range(len(target)):\n target[i] = target[i][0]\n \n#convert values to float \nfor i in range(len(target)):\n target[i] = float(target[i])\n\n#appy log function \ntargetlog = logFunction(target) \n\n\n#normalize data using zscores\n#round the number to 3 decimal place\ntrans = matrixTranspose(training)\nnewTraining = []\n\"\"\"\nfor i in trans:\n zscoreList = stats.zscore(i)\n zscoreList2 = []\n for j in zscoreList:\n num = str(round(j,3))\n # print(num)\n if (num == 'nan'):\n num2 =0\n zscoreList2.append(num2)\n else:\n num2 = float(num) \n zscoreList2.append(num2)\n \n newTraining.append(zscoreList2)\n\"\"\" \nfor i in trans: \n [item] = preprocessing.normalize([i])\n newTraining.append(item)\n\ntraining2 = matrixTranspose(newTraining)\n\n#remove unuseful data\ntraining3, names2 = removeData(training2, names)\ncross_validation(5,training3,targetlog)\n#list1,list2,list3 = findSubset(training3, targetlog, utarget, names2) \n\n#contourPlot(targetlog,utarget,training2,'Log(Mass)','U',names[0])\n\n#createGraph(list1,list2,list3,'S9(1)/S3(18)', 'S9(3)/S3(18)')\n#createGraph(list1,list2,list3,'Na4(9)/Na3', 'Na4(21)/Na3')\n\n#low mass \nprint('low mass')\n#createGraph(list1,list2,list3,'Na6(8)/Na4', 'Si10/Si9')\n#createGraph(list1,list2,list3,'Na6(8)/Na4(6)', 'Si11/Si10')\n#createGraph(list1,list2,list3,'Na6(14)/Na4(21)', 'Al6(9)/Al5')\n\n#medium mass\n\nprint('medium mass')\n#createGraph(list1,list2,list3,'Fe13/Fe6(1.01)', 'Si9/Si6')\n#createGraph(list1,list2,list3,'Fe13/Fe6(1.01)', 'Si11/Si6')\n#createGraph(list1,list2,list3,'Si9/Si6', 'Al8(5)/Al6(9)')\n\n#high mass \n#print('high mass')\n#createGraph(list1,list2,list3,'Si11/Si10', 'Si11/Si6')\n#createGraph(list1,list2,list3,'Si11/Si7(2)', 'Si11/Si7(6)')\n#createGraph(list1,list2,list3,'Si10/Si6', 'Si9/Si6')\n\n\n\n\n#createDataGraph(list1,list2,list3,MassBin1, MassBin2, MassBin3,'Mg5(5)/Mg4', 'Si10/Si9')\n\n","sub_path":"regression.py","file_name":"regression.py","file_ext":"py","file_size_in_byte":17178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"435537067","text":"from collections import deque\nfrom random import randint\n\nfrom . import settings\nfrom .datatypes import Vector, Position, Draw\nfrom .exceptions import SnakeError, SnakePlacementError\nfrom .world import World\n\n\nclass BaseSnake:\n COLOR_0 = World.COLOR_0\n CH_VOID = World.CH_VOID\n CH_STONE = World.CH_STONE\n\n CH_HEAD = '@'\n CH_BODY = '*'\n CH_TAIL = '$'\n BODY_CHARS = frozenset([CH_HEAD, CH_BODY, CH_TAIL])\n\n CH_DEAD_HEAD = 'x'\n CH_DEAD_BODY = '+'\n CH_DEAD_TAIL = '%'\n DEAD_BODY_CHARS = frozenset([CH_DEAD_HEAD, CH_DEAD_BODY, CH_DEAD_TAIL])\n\n UP = Vector(0, -1)\n DOWN = Vector(0, 1)\n LEFT = Vector(-1, 0)\n RIGHT = Vector(1, 0)\n\n DIRECTIONS = (UP, DOWN, LEFT, RIGHT)\n\n color = None\n alive = False\n\n def __init__(self, game_settings, world, color):\n self._game_settings = game_settings\n self._world = world\n self.color = color\n self.alive = True\n\n def __repr__(self):\n return '<%s [color=%s]>' % (self.__class__.__name__, self.color)\n\n\nclass Snake(BaseSnake):\n grow = 0\n body = ()\n direction = None\n current_direction = None\n\n def __init__(self, *args, **kwargs):\n super(Snake, self).__init__(*args, **kwargs)\n self.body = deque()\n self.grew = False\n\n def reset(self):\n self.grow = 0\n self.body.clear()\n self.direction = self.current_direction = None\n\n def create(self):\n assert not self.grow\n assert not self.body\n assert not self.direction\n\n # try to spawn snake at some distance from world's borders\n distance = settings.INIT_LENGTH + settings.INIT_MIN_DISTANCE_BORDER\n x = randint(distance, World.SIZE_X - distance)\n y = randint(distance, World.SIZE_Y - distance)\n self.direction = self.current_direction = self.DIRECTIONS[randint(0, 3)]\n # create snake from tail to head\n render = []\n pos = Position(x, y)\n\n for i in range(0, settings.INIT_LENGTH):\n target = self._world[pos.y][pos.x]\n\n if target.char != self.CH_VOID:\n raise SnakePlacementError('Cannot place snake on %r because the position '\n 'is occupied by %r', pos, target)\n\n if i == 0:\n char = self.CH_TAIL\n elif i == settings.INIT_LENGTH - 1:\n char = self.CH_HEAD\n else:\n char = self.CH_BODY\n\n self.body.appendleft(pos)\n render.append(Draw(pos.x, pos.y, char, self.color))\n pos = self.next_position()\n\n return render\n\n def render_new(self):\n render = None\n\n for i in range(0, settings.INIT_RETRIES):\n try:\n render = self.create()\n except SnakePlacementError:\n self.reset()\n else:\n break\n\n if not render:\n raise SnakeError('There is no place for a new snake in this world :(')\n\n return render\n\n def next_position(self):\n # next position of the snake's head\n return Position(self.body[0].x + self.direction.xdir,\n self.body[0].y + self.direction.ydir)\n\n def render_move(self, ignore_tail=False):\n # moving snake to the next position\n render = []\n new_head = self.next_position()\n self.body.appendleft(new_head)\n # draw head in the next position\n render.append(Draw(new_head.x, new_head.y, self.CH_HEAD, self.color))\n # draw body in the old place of head\n render.append(Draw(self.body[1].x, self.body[1].y, self.CH_BODY, self.color))\n # save current direction of the head\n self.current_direction = self.direction\n\n # if we grow this turn, the tail remains in place\n if self.grow > 0:\n self.grow -= 1\n self.grew = True\n else:\n self.grew = False\n # otherwise the tail moves\n old_tail = self.body.pop()\n if not ignore_tail:\n render.append(Draw(old_tail.x, old_tail.y, self.CH_VOID, self.COLOR_0))\n new_tail = self.body[-1]\n render.append(Draw(new_tail.x, new_tail.y, self.CH_TAIL, self.color))\n\n return render\n\n def render_game_over(self):\n render = []\n\n # dead snake\n for i, pos in enumerate(self.body):\n if i == 0:\n render.append(Draw(pos.x, pos.y, self.CH_DEAD_HEAD, self.COLOR_0))\n elif i == len(self.body) - 1:\n render.append(Draw(pos.x, pos.y, self.CH_DEAD_TAIL, self.COLOR_0))\n else:\n render.append(Draw(pos.x, pos.y, self.CH_DEAD_BODY, self.COLOR_0))\n\n return render\n","sub_path":"snakepit/snake.py","file_name":"snake.py","file_ext":"py","file_size_in_byte":4737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66888910","text":"##################################\n# DB.py\n# Author: Charlie Mueller, Nathaniel Rupprecht, Alberto Zucchetta, Andrew Wightman\n# Date: June 11, 2015\n#\n# Data Type Key:\n# { a, b, c, ... } -- denotes a tuple\n# [ key ] -- denotes a dictionary of keys associated with objects\n# ( object ) -- denotes a list of objects \n##################################\n\n# Imports\nimport cx_Oracle\nimport socket\n# For the parsing\nimport re\n\n# Key version stripper\ndef stripVersion(name):\n if re.match('.*_v[0-9]+',name): name = name[:name.rfind('_')]\n #name = str(name.split('_v[0-9]')[0])\n #p = re.compile(r\"_v[0-9]\\b\")\n #name = p.sub('',name,)\n return name\n\n# A class that interacts with the CMS oracle database and fetches information that we need\nclass DBParser:\n def __init__(self, cfg) :\n self.cfg = cfg\n # Connect to the Database\n hostname = socket.gethostname()\n if hostname.find('lxplus') > -1: self.dsn_ = cfg[\"dsn_info\"]['offline']\n else: self.dsn_ = cfg[\"dsn_info\"]['online']\n\n orcl = cx_Oracle.connect(user=cfg[\"trg_connect\"]['user'],password=cfg[\"trg_connect\"]['passwd'],dsn=self.dsn_)\n # Create a DB cursor\n self.curs = orcl.cursor()\n\n self.L1Prescales = {} # {algo_index: {psi: prescale}}\n self.HLTPrescales = {} # {'trigger': [prescales]}\n self.HLTSequenceMap = {}\n self.GTRS_Key = \"\"\n self.HLT_Key = \"\"\n self.TSC_Key = \"\"\n self.ConfigId = \"\"\n self.GT_Key = \"\"\n self.nAlgoBits = 128\n\n self.HLTSeed = {}\n self.L1Mask = {}\n self.L1IndexNameMap = {}\n self.L1NameIndexMap = {}\n self.PSColumnByLS = {}\n \n # Returns: a cursor to the HLT database\n def getHLTCursor(self):\n while True:\n try:\n orcl = cx_Oracle.connect(user=self.cfg[\"hlt_connect\"]['user'],password=self.cfg[\"hlt_connect\"]['passwd'],dsn=self.dsn_)\n return orcl.cursor()\n except cx_Oracle.DatabaseError as e:\n print(\"Failed getting a connection to the HLT DB: \",e, \"- Retrying..\")\n\n # Returns: a cursor to the trigger database\n def getTrgCursor(self):\n while True:\n try:\n orcl = cx_Oracle.connect(user=self.cfg[\"trg_connect\"]['user'],password=self.cfg[\"trg_connect\"]['passwd'],dsn=self.dsn_)\n return orcl.cursor()\n except cx_Oracle.DatabaseError as e:\n print(\"Failed getting a connection to the Trigger DB: \",e, \"- Retrying..\")\n\n def getLSInfo(self, runNumber):\n sqlquery = \"\"\"\n SELECT\n LUMI_SECTION,\n PRESCALE_INDEX\n FROM\n CMS_UGT_MON.VIEW_LUMI_SECTIONS\n WHERE\n RUN_NUMBER = %s\n \"\"\" % (runNumber)\n ls_info = []\n try:\n self.curs.execute(sqlquery)\n ls_info = self.curs.fetchall()\n except:\n print(\"Unable to get LS list for run %s\" % runNumber)\n return ls_info\n\n # Returns the various keys used for the specified run as a 5-tuple\n def getRunKeys(self,runNumber):\n sqlquery = \"\"\"\n SELECT\n B.ID,\n B.HLT_KEY,\n B.L1_TRG_RS_KEY,\n B.L1_TRG_CONF_KEY,\n C.UGT_KEY\n FROM\n CMS_WBM.RUNSUMMARY A,\n CMS_L1_HLT.L1_HLT_CONF B,\n CMS_TRG_L1_CONF.L1_TRG_CONF_KEYS C\n WHERE\n B.ID = A.TRIGGERMODE AND\n A.RUNNUMBER = %d AND\n C.ID = B.L1_TRG_CONF_KEY\n \"\"\" % (runNumber)\n L1_HLT,HLT,GTRS,TSC,GT = \"\",\"\",\"\",\"\",\"\"\n try: \n self.curs.execute(sqlquery)\n L1_HLT,HLT,GTRS,TSC,GT = self.curs.fetchone()\n except:\n print(\"[ERROR] Unable to get keys for this run, %d\" % (runNumber))\n return L1_HLT,HLT,GTRS,TSC,GT\n\n # Returns: True if we succeded, false if the run doesn't exist (probably)\n def getRunInfo(self, runNumber):\n ## This query gets the L1_HLT Key (A), the associated HLT Key (B) and the Config number for that key (C)\n #KeyQuery = \"\"\"\n #SELECT A.TRIGGERMODE, B.HLT_KEY, B.GT_RS_KEY, B.TSC_KEY, D.GT_KEY FROM\n #CMS_WBM.RUNSUMMARY A, CMS_L1_HLT.L1_HLT_CONF B, CMS_HLT_GDR.U_CONFVERSIONS C, CMS_TRG_L1_CONF.TRIGGERSUP_CONF D WHERE\n #B.ID = A.TRIGGERMODE AND C.NAME = B.HLT_KEY AND D.TS_Key = B.TSC_Key AND A.RUNNUMBER=%d\n #\"\"\" % (runNumber)\n\n #KeyQuery = \"\"\"\n #SELECT A.TRIGGERMODE, B.HLT_KEY, B.L1_TRG_RS_KEY, B.L1_TRG_CONF_KEY, B.UGT_KEY FROM\n #CMS_WBM.RUNSUMMARY A, CMS_L1_HLT.V_L1_HLT_CONF_EXTENDED B, CMS_TRG_L1_CONF.TRIGGERSUP_CONF D WHERE\n #B.ID = A.TRIGGERMODE AND A.RUNNUMBER=%d\n #\"\"\" % (runNumber)\n\n sqlquery = \"\"\"\n SELECT\n LUMI_SECTION,\n PRESCALE_INDEX\n FROM\n CMS_UGT_MON.VIEW_LUMI_SECTIONS\n WHERE\n RUN_NUMBER = %s\n \"\"\" % (runNumber)\n\n try:\n self.curs.execute(sqlquery)\n self.PSColumnByLS = {} \n for lumi_section, prescale_column in self.curs.fetchall():\n self.PSColumnByLS[lumi_section] = prescale_column\n except:\n print(\"Trouble getting PS column by LS\")\n\n self.L1_HLT_Key, self.HLT_Key, self.GTRS_Key, self.TSC_Key, self.GT_Key = self.getRunKeys(runNumber)\n if self.HLT_Key == \"\":\n # The key query failed\n return False\n else:\n return True\n\n # Use: Get the instant luminosity for each lumisection from the database\n # Parameters:\n # -- runNumber: the number of the run that we want data for\n # Returns: A list of of information for each LS: ( { LS, instLumi, physics } )\n def getLumiInfo(self, runNumber, minLS=-1, maxLS=9999999, lumi_source=0):\n # NOTE: Currently making queries to CMS_BEAM_COND.CMS_BRIL_LUMINOSITY is taking excessively long times offline\n lumi_nibble = 16 # This is the value that WBM uses for lumi sections\n query = \"\"\"\n SELECT\n B.INSTLUMI,\n A.PLTZERO_INSTLUMI,\n A.HF_INSTLUMI,\n B.LUMISECTION,\n B.PHYSICS_FLAG*B.BEAM1_PRESENT,\n B.PHYSICS_FLAG*B.BEAM1_PRESENT*B.EBP_READY*\n B.EBM_READY*B.EEP_READY*B.EEM_READY*\n B.HBHEA_READY*B.HBHEB_READY*B.HBHEC_READY*\n B.HF_READY*B.HO_READY*B.RPC_READY*\n B.DT0_READY*B.DTP_READY*B.DTM_READY*\n B.CSCP_READY*B.CSCM_READY*B.TOB_READY*\n B.TIBTID_READY*B.TECP_READY*B.TECM_READY*\n B.BPIX_READY*B.FPIX_READY*B.ESP_READY*B.ESM_READY,\n C.PRESCALE_INDEX\n FROM\n CMS_BEAM_COND.CMS_BRIL_LUMINOSITY A,\n CMS_RUNTIME_LOGGER.LUMI_SECTIONS B,\n CMS_UGT_MON.VIEW_LUMI_SECTIONS C\n WHERE\n A.RUN = %s AND\n A.LUMINIBBLE = %s AND\n A.RUN = B.RUNNUMBER AND\n A.LUMISECTION = B.LUMISECTION AND\n C.RUN_NUMBER(+) = B.RUNNUMBER AND\n C.LUMI_SECTION(+) = B.LUMISECTION AND\n B.LUMISECTION >= %s AND C.LUMI_SECTION >= %s AND\n B.LUMISECTION <= %s AND C.LUMI_SECTION <= %s\n ORDER BY\n LUMISECTION\n \"\"\" % (runNumber,lumi_nibble,minLS,minLS,maxLS,maxLS)\n self.curs.execute(query) # Execute the query\n _list = []\n for item in self.curs.fetchall():\n ilum = item[lumi_source]\n if ilum is None:\n if item[1]:\n ilum = item[1]\n elif item[2]:\n ilum = item[2]\n else:\n ilum = None\n LS = item[3]\n phys = item[4]\n cms_ready = item[5]\n psi = item[6]\n _list.append([int(LS),float(ilum),int(psi),bool(phys),bool(cms_ready)])\n return _list\n\n # Use: Get the instant luminosity for each lumisection (only from CMS_RUNTIME_LOGGER.LUMI_SECTIONS)\n # Parameters:\n # -- runNumber: the number of the run that we want data for\n # Returns: A list of of information for each LS: ( { LS, instLumi, physics } )\n def getQuickLumiInfo(self,runNumber,minLS=-1,maxLS=9999999):\n query = \"\"\"\n SELECT\n B.INSTLUMI,\n B.LUMISECTION,\n B.PHYSICS_FLAG*B.BEAM1_PRESENT,\n B.PHYSICS_FLAG*B.BEAM1_PRESENT*B.EBP_READY*\n B.EBM_READY*B.EEP_READY*B.EEM_READY*\n B.HBHEA_READY*B.HBHEB_READY*B.HBHEC_READY*\n B.HF_READY*B.HO_READY*B.RPC_READY*\n B.DT0_READY*B.DTP_READY*B.DTM_READY*\n B.CSCP_READY*B.CSCM_READY*B.TOB_READY*\n B.TIBTID_READY*B.TECP_READY*B.TECM_READY*\n B.BPIX_READY*B.FPIX_READY*B.ESP_READY*B.ESM_READY,\n C.PRESCALE_INDEX\n FROM\n CMS_RUNTIME_LOGGER.LUMI_SECTIONS B,\n CMS_UGT_MON.VIEW_LUMI_SECTIONS C\n WHERE\n B.RUNNUMBER = %s AND\n C.RUN_NUMBER(+) = B.RUNNUMBER AND\n C.LUMI_SECTION(+) = B.LUMISECTION AND\n B.LUMISECTION >= %s AND C.LUMI_SECTION >= %s AND\n B.LUMISECTION <= %s AND C.LUMI_SECTION <= %s\n ORDER BY\n LUMISECTION\n \"\"\" % (runNumber,minLS,minLS,maxLS,maxLS)\n\n self.curs.execute(query) # Execute the query\n _list = []\n for item in self.curs.fetchall():\n ilum = item[0]\n LS = item[1]\n phys = item[2]\n cms_ready = item[3]\n psi = item[4]\n _list.append([int(LS),float(ilum),int(psi),bool(phys),bool(cms_ready)])\n return _list\n\n # Use: Get the prescaled rate as a function \n # Parameters: runNumber: the number of the run that we want data for\n # Returns: A dictionary [ triggerName ] [ LS ] \n def getPSRates(self, runNumber, minLS=-1, maxLS=9999999):\n # Note: we find the raw rate by dividing CMS_RUNINFO.HLT_SUPERVISOR_TRIGGERPATHS.Accept by 23.31041\n\n sqlquery = \"\"\"\n SELECT\n A.LSNUMBER,\n SUM(A.PACCEPT),\n (\n SELECT\n M.NAME\n FROM\n CMS_HLT_GDR.U_PATHS M,\n CMS_HLT_GDR.U_PATHIDS L\n WHERE\n L.PATHID=A.PATHID AND\n M.ID=L.ID_PATH\n ) PATHNAME\n FROM\n CMS_RUNINFO.HLT_SUPERVISOR_TRIGGERPATHS A\n WHERE\n RUNNUMBER = %s AND\n A.LSNUMBER >= %s AND\n A.LSNUMBER <= %s\n GROUP BY\n A.LSNUMBER,A.PATHID\n \"\"\" % (runNumber, minLS, maxLS)\n\n try:\n self.curs.execute(sqlquery)\n except:\n print(\"Getting rates failed. Exiting.\")\n exit(2) # Exit with error\n TriggerRates = {}\n\n for LS, HLTPass, triggerName in self.curs.fetchall():\n \n rate = HLTPass/23.31041 # A lumisection is 23.31041 seconds\n name = stripVersion(triggerName)\n\n if name not in TriggerRates:\n # Initialize the value of TriggerRates[name] to be a dictionary, which we will fill with [ LS, rate ] data\n TriggerRates[name] = {}\n TriggerRates[name][LS] = rate\n else:\n TriggerRates[name][LS] = rate\n\n return TriggerRates\n\n # DEPRECATED\n # Note: This function is based on a function from DatabaseParser.py\n # Use: Get the raw rate and prescale factor\n # Parameters:\n # -- runNumber: The number of the run that we are examining\n # Returns: A dictionary [triggerName][LS] { raw rate, prescale } \n def getRawRates(self, runNumber, minLS=-1, maxLS=9999999):\n # First we need the HLT and L1 prescale rates and the HLT seed info\n if not self.getRunInfo(runNumber):\n print(\"Failed to get run info \")\n return {} # The run probably doesn't exist\n\n # Get L1 info\n self.getL1Prescales(runNumber)\n self.getL1NameIndexAssoc(runNumber)\n # Get HLT info\n self.getHLTSeeds(runNumber)\n self.getHLTPrescales(runNumber)\n\n # Get the prescale index as a function of LS\n for LS, psi in self.curs.fetchall():\n self.PSColumnByLS[LS] = psi\n\n ## A more complex version of the getRates query\n sqlquery = \"\"\"\n SELECT\n A.LSNUMBER,\n SUM(A.L1PASS),\n SUM(A.PSPASS),\n SUM(A.PACCEPT),\n SUM(A.PEXCEPT),\n (\n SELECT\n M.NAME\n FROM\n CMS_HLT_GDR.U_PATHS M,\n CMS_HLT_GDR.U_PATHIDS L\n WHERE\n L.PATHID=A.PATHID AND\n M.ID=L.ID_PATH\n ) PATHNAME\n FROM\n CMS_RUNINFO.HLT_SUPERVISOR_TRIGGERPATHS A\n WHERE\n RUNNUMBER = %s AND\n A.LSNUMBER >= %s AND\n A.LSNUMBER <= %s \n GROUP BY \n A.LSNUMBER, A.PATHID\n \"\"\" % (runNumber, minLS, maxLS)\n \n try: self.curs.execute(sqlquery)\n except:\n print(\"Getting rates failed. Exiting.\")\n exit(2) # Exit with error\n\n TriggerRates = {} # Initialize TriggerRates\n \n for LS, L1Pass, PSPass, HLTPass, HLTExcept, triggerName in self.curs.fetchall():\n name = stripVersion(triggerName)\n\n rate = HLTPass/23.31041 # HLTPass is events in this LS, so divide by 23.31041 s to get rate\n hltps = 0 # HLT Prescale\n\n if name not in TriggerRates:\n TriggerRates[name] = {} # Initialize dictionary\n # TODO: We can probably come up with a better solution then a try, except here\n try: psi = self.PSColumnByLS[LS] # Get the prescale index\n except: psi = 0\n if psi is None: psi = 0\n \n try:\n hltps = self.HLTPrescales[name][psi]\n except:\n hltps = 1.\n hltps = float(hltps)\n \n try:\n if self.HLTSeed[name] in self.L1IndexNameMap:\n l1ps = self.L1Prescales[self.L1IndexNameMap[self.HLTSeed[name]]][psi]\n else:\n #AvL1Prescales = self.CalculateAvL1Prescales([LS])\n #l1ps = self.UnwindORSeed(self.HLTSeed[name] ,AvL1Prescales)\n l1ps = self.UnwindORSeed(self.HLTSeed[name],self.L1Prescales,psi)\n except:\n l1ps = 1\n\n ps = l1ps*hltps\n TriggerRates[name][LS]= [ps*rate, ps]\n\n return TriggerRates\n\n # Similar to the 'getRawRates' query, but is restricted to triggers that appear in trigger_list\n def getHLTRates(self, runNumber, trigger_list=[],minLS=-1, maxLS=9999999):\n # First we need the HLT and L1 prescale rates and the HLT seed info\n if not self.getRunInfo(runNumber):\n print(\"Failed to get run info \")\n return {} # The run probably doesn't exist\n\n # Get L1 info\n self.getL1Prescales(runNumber)\n self.getL1NameIndexAssoc(runNumber)\n # Get HLT info\n self.getHLTSeeds(runNumber)\n self.getHLTPrescales(runNumber)\n\n # Get the prescale index as a function of LS\n for LS, psi in self.curs.fetchall():\n self.PSColumnByLS[LS] = psi\n\n self.HLT_name_map = self.getHLTNameMap(runNumber)\n\n if len(trigger_list) == 0:\n # If no list is given --> get rates for all HLT triggers\n trigger_list = list(self.HLT_name_map.keys())\n\n trigger_rates = {}\n for name in trigger_list:\n if name not in self.HLT_name_map:\n # Ignore triggers which don't appear in this run\n continue\n trigger_rates[name] = self.getSingleHLTRate(runNumber,name,minLS,maxLS)\n\n return trigger_rates\n\n # Gets the HLT rate for a single trigger\n # WARNING: This function is meant to be called by the wrapper funciton 'getHLTRates', since many of the 'self.' dictionaries change between runs\n def getSingleHLTRate(self, runNumber, name, minLS=-1, maxLS=9999999):\n # Cache the various dictionaries, so we don't have to repeat the queries\n path_id = self.HLT_name_map[name]\n sqlquery = \"\"\"\n SELECT\n A.LSNUMBER,\n SUM(A.L1PASS),\n SUM(A.PSPASS),\n SUM(A.PACCEPT),\n SUM(A.PEXCEPT)\n FROM\n CMS_RUNINFO.HLT_SUPERVISOR_TRIGGERPATHS A\n WHERE\n A.RUNNUMBER = %s AND\n A.PATHID = %s AND\n A.LSNUMBER >= %s AND\n A.LSNUMBER <= %s\n GROUP BY\n A.LSNUMBER, A.PATHID\n \"\"\" % (runNumber,path_id,minLS,maxLS)\n try: \n self.curs.execute(sqlquery)\n except:\n print(\"Getting rates for %s failed. Exiting.\" % name)\n exit(2) # Exit with error\n\n trigger_rates = {}\n for LS, L1Pass, PSPass, HLTPass, HLTExcept in self.curs.fetchall():\n rate = HLTPass/23.31041 # HLTPass is events in this LS, so divide by 23.31041 s to get rate\n hltps = 0 # HLT Prescale\n\n # TODO: We can probably come up with a better solution then a try, except here\n try: \n psi = self.PSColumnByLS[LS] # Get the prescale index\n except:\n psi = 0\n\n if psi is None:\n psi = 0\n \n try:\n hltps = self.HLTPrescales[name][psi]\n except:\n hltps = 1.\n\n hltps = float(hltps)\n \n try:\n if self.HLTSeed[name] in self.L1IndexNameMap:\n l1ps = self.L1Prescales[self.L1IndexNameMap[self.HLTSeed[name]]][psi]\n else:\n l1ps = self.UnwindORSeed(self.HLTSeed[name],self.L1Prescales,psi)\n except:\n l1ps = 1\n\n ps = l1ps*hltps\n trigger_rates[LS] = [ps*rate, ps]\n return trigger_rates\n\n # Generates a dictionary that maps HLT path names to the corresponding path_id\n def getHLTNameMap(self,runNumber):\n sqlquery = \"\"\"\n SELECT DISTINCT\n C.PATHID,\n B.NAME\n FROM\n CMS_RUNINFO.HLT_SUPERVISOR_TRIGGERPATHS A,\n CMS_HLT_GDR.U_PATHS B,\n CMS_HLT_GDR.U_PATHIDS C\n WHERE\n A.RUNNUMBER = %s AND\n A.PATHID = C.PATHID AND\n B.ID = C.ID_PATH\n \"\"\" % (runNumber)\n\n name_map = {}\n self.curs.execute(sqlquery)\n for path_id,path_name in self.curs.fetchall():\n name = stripVersion(path_name)\n name_map[name] = path_id\n return name_map\n\n # DEPRECATED\n # Use: Gets data related to L1 trigger rates\n # Returns: The L1 raw rates: [ trigger ] [ LS ] { raw rate, ps }\n def getL1RawRates(self, runNumber, preDeadTime = True):\n # Get information that we will need to use\n self.getRunInfo(runNumber)\n self.getL1Prescales(runNumber)\n self.getL1NameIndexAssoc(runNumber)\n \n #pre-DT rates query (new uGT)\n #(0, 'ALGORITHM_RATE_AFTER_PRESCALE'),\n #(1, 'ALGORITHM_RATE_BEFORE_PRESCALE'),\n #(2, 'POST_DEADTIME_ALGORITHM_RATE_AFTER_PRESCALE'),\n #(3, 'POST_DEADTIME_ALGORITHM_RATE_AFTER_PRESCALE_BY_HLT'),\n #(4, 'POST_DEADTIME_ALGORITHM_RATE_AFTER_PRESCALE_PHYSICS'),\n #(5, 'POST_DEADTIME_ALGORITHM_RATE_AFTER_PRESCALE_CALIBRATION'),\n #(6, 'POST_DEADTIME_ALGORITHM_RATE_AFTER_PRESCALE_RANDOM')\n if preDeadTime: rate_type = '%d' % ( 0 )\n else: rate_type = '%d' % ( 4 )\n run_str = '0%d' % (runNumber)\n query_before_ps = \"\"\"\n SELECT\n LUMI_SECTIONS_ID,\n ALGO_RATE,\n ALGO_INDEX\n FROM\n CMS_UGT_MON.VIEW_ALGO_SCALERS\n WHERE\n SCALER_TYPE=0 AND LUMI_SECTIONS_ID LIKE '%s\"\"\" %(run_str) +\"\"\"%' \"\"\"\n\n self.curs.execute(query_before_ps)\n l1_rates_preDT_ps = self.curs.fetchall()\n\n L1Triggers = {}\n for tuple in l1_rates_preDT_ps:\n ls = int(tuple[0].split('_')[1].lstrip('0'))\n ps_rate = tuple[1]\n bit = tuple[2]\n algo_name = self.L1NameIndexMap[bit]\n\n if self.L1Mask[bit] == 0: ps_rate=0. \n\n if algo_name not in L1Triggers: L1Triggers[algo_name] = {}\n prescale_column = self.PSColumnByLS[ls]\n try:\n unprescaled_rate = ps_rate*self.L1Prescales[bit][prescale_column]\n except:\n print(\"prescales bit or column not avaiable \")\n continue\n L1Triggers[algo_name][ls] = [ unprescaled_rate , self.L1Prescales[bit][prescale_column] ]\n\n return L1Triggers # [ trigger ] [ LS ] { raw rate, ps }\n\n # DEPRECATED\n # Use: Gets the raw rate of a trigger during a run and the average prescale value of that trigger during the run\n # Returns: A dictionary: [ trigger name ] { ave ps, [ LS ] [ raw rate ] }\n def getRates_AvePS(self, runNumber):\n # Get the rates in the form: [triggerName][LS] { raw rate, prescale }\n Rates = self.getPSRates(runNumber)\n\n # Create the dictionary to be returned\n TriggerRates = {}\n\n for triggerName in Rates:\n TriggerRates[triggerName] = [ 0, {} ]\n counter = 0\n totalPS = 0\n for LS in Rates[triggerName]:\n totalPS += Rates[triggerName][LS][1]\n TriggerRates[triggerName][1][LS] = Rates[triggerName][LS][0] # Get the raw rate\n counter += 1\n TriggerRates[triggerName][0] = totalPS/counter # Set the ave ps\n\n return TriggerRates\n\n # Note: This function is from DatabaseParser.py (with moderate modification)\n # Use: Sets the L1 trigger prescales for this class\n # Returns: (void)\n def getL1Prescales(self, runNumber):\n sqlquery = \"\"\"\n SELECT\n A.ALGO_INDEX,\n A.ALGO_NAME,\n B.PRESCALE,\n B.PRESCALE_INDEX\n FROM\n CMS_UGT_MON.VIEW_UGT_RUN_ALGO_SETTING A,\n CMS_UGT_MON.VIEW_UGT_RUN_PRESCALE B\n WHERE\n A.ALGO_INDEX = B.ALGO_INDEX AND\n A.RUN_NUMBER = B.RUN_NUMBER AND\n A.RUN_NUMBER = %s\n ORDER BY\n A.ALGO_INDEX\n \"\"\" % (runNumber)\n\n try:\n self.curs.execute(sqlquery)\n except:\n print(\"Get L1 Prescales query failed\")\n return \n\n ps_table = self.curs.fetchall()\n self.L1Prescales = {}\n\n if len(ps_table) < 1:\n print(\"Cannot get L1 Prescales\")\n return\n\n for object in ps_table:\n algo_index = object[0]\n algo_name = object[1]\n algo_ps = object[2]\n ps_index = object[3]\n if algo_index not in self.L1Prescales: self.L1Prescales[algo_index] = {}\n self.L1Prescales[algo_index][ps_index] = algo_ps\n\n # DEPRECATED\n # Note: This function is from DatabaseParser.py (with slight modifications)\n # Use: Gets the average L1 prescales\n # Returns: A dictionary: [ Algo bit number ] \n def getAvL1Prescales(self, runNumber):\n AvgL1Prescales = [0]*self.nAlgoBits\n for index in LSRange:\n psi = self.PSColumnByLS[index]\n if not psi: psi = 0\n for algo in range(self.nAlgoBits):\n # AvgL1Prescales[algo]+=self.L1PrescaleTable[algo][psi]\n AvgL1Prescales[algo]+=self.L1Prescales[algo][psi]\n for i in range(len(AvgL1Prescales)):\n try:\n AvgL1Prescales[i] = AvgL1Prescales[i]/len(LSRange)\n except:\n AvgL1Prescales[i] = AvgL1Prescales[i]\n return AvgL1Prescales\n \n # Note: This function is from DatabaseParser.py\n # Use: Frankly, I'm not sure. I don't think its ever been called. Read the (origional) info string\n # Returns: The minimum prescale value\n def UnwindORSeed(self,expression,L1Prescales,psi):\n \"\"\"\n Figures out the effective prescale for the OR of several seeds\n we take this to be the *LOWEST* prescale of the included seeds\n \"\"\"\n #if expression.find(\" OR \") == -1:\n # return -1 # Not an OR of seeds\n #seedList = expression.split(\" OR \")\n #if len(seedList)==1:\n # return -1 # Not an OR of seeds, really shouldn't get here...\n #minPS = 99999999999\n #for seed in seedList:\n # if not self.L1IndexNameMap.has_key(seed): continue\n # ps = L1Prescales[self.L1IndexNameMap[seed]]\n # if ps: minPS = min(ps,minPS)\n #if minPS==99999999999: return 0\n #else: return minPS\n\n # Ignore 'AND' L1 seeds\n if expression.find(\" AND \") != -1:\n return 1\n\n seedList = []\n # This 'if' might be redundent\n if expression.find(\" OR \") != -1:\n for elem in expression.split(\" OR \"):\n # Strip all whitespace from the split strings\n seedList.append(elem.replace(\" \",\"\"))\n else:\n expression = expression.replace(\" \",\"\")\n seedList.append(expression)\n\n minPS = 99999999999\n for seed in seedList:\n if seed not in self.L1IndexNameMap: continue\n ps = L1Prescales[self.L1IndexNameMap[seed]][psi]\n if ps: minPS = min(ps,minPS)\n if minPS == 99999999999: return 0\n else: return minPS\n\n # Note: This function is from DatabaseParser.py (with slight modifications), double (##) comments are origionals\n # Use: Sets the L1 seed that each HLT trigger depends on\n # Returns: (void)\n def getHLTSeeds(self, runNumber):\n ### Check\n ## This is a rather delicate query, but it works!\n ## Essentially get a list of paths associated with the config, then find the module of type HLTLevel1GTSeed associated with the path\n ## Then find the parameter with field name L1SeedsLogicalExpression and look at the value\n ##\n ## NEED TO BE LOGGED IN AS CMS_HLT_R\n if self.HLT_Key == \"\": self.getRunInfo(runNumber)\n\n tmpcurs = self.getHLTCursor()\n sqlquery = \"\"\"\n SELECT\n s.name,\n d.value\n FROM\n cms_hlt_gdr.u_confversions h,\n cms_hlt_gdr.u_pathid2conf a,\n cms_hlt_gdr.u_pathid2pae n,\n cms_hlt_gdr.u_paelements b,\n cms_hlt_gdr.u_pae2moe c,\n cms_hlt_gdr.u_moelements d,\n cms_hlt_gdr.u_mod2templ e,\n cms_hlt_gdr.u_moduletemplates f,\n cms_hlt_gdr.u_pathids p,\n cms_hlt_gdr.u_paths s\n WHERE \n h.name='%s' AND\n a.id_confver=h.id AND\n n.id_pathid=a.id_pathid AND\n b.id=n.id_pae AND\n c.id_pae=b.id AND\n d.id=c.id_moe AND\n d.name='L1SeedsLogicalExpression' AND\n e.id_pae=b.id AND\n f.id=e.id_templ AND\n f.name='HLTL1TSeed' AND\n p.id=n.id_pathid AND\n s.id=p.id_path\n ORDER BY\n value\n \"\"\" % (self.HLT_Key,)\n \n tmpcurs.execute(sqlquery)\n for HLTPath,L1Seed in tmpcurs.fetchall():\n name = stripVersion(HLTPath) # Strip the version from the trigger name\n if name not in self.HLTSeed: ## this should protect us from L1_SingleMuOpen\n #self.HLTSeed[HLTPath] = L1Seed.lstrip('\"').rstrip('\"')\n self.HLTSeed[name] = L1Seed.lstrip('\"').rstrip('\"') \n\n # Note: This function is from DatabaseParser.py (with slight modification)\n # Use: Seems to return the algo index that corresponds to each trigger name\n # Returns: (void)\n def getL1NameIndexAssoc(self, runNumber):\n ## get the L1 algo names associated with each algo bit\n ### Check\n #if self.GT_Key == \"\":\n # self.getRunInfo(runNumber)\n #old GT query \n #AlgoNameQuery = \"\"\"SELECT ALGO_INDEX, ALIAS FROM CMS_GT.L1T_MENU_ALGO_VIEW\n #WHERE MENU_IMPLEMENTATION IN (SELECT L1T_MENU_FK FROM CMS_GT.GT_SETUP WHERE ID='%s')\n #ORDER BY ALGO_INDEX\"\"\" % (self.GT_Key,)\n AlgoNameQuery = \"\"\"\n SELECT\n ALGO_INDEX,\n ALGO_NAME,\n ALGO_MASK\n FROM\n CMS_UGT_MON.VIEW_UGT_RUN_ALGO_SETTING\n WHERE\n RUN_NUMBER=%s\n \"\"\" % (runNumber)\n try:\n self.curs.execute(AlgoNameQuery)\n except:\n print(\"Get L1 Name Index failed\")\n return\n\n for bit,name,mask in self.curs.fetchall():\n name = stripVersion(name)\n name = name.replace(\"\\\"\",\"\")\n self.L1IndexNameMap[name] = bit\n self.L1NameIndexMap[bit]=name\n self.L1Mask[bit] = mask\n\n # Note: This is a function from DatabaseParser.py (with slight modification)\n # Use: Gets the prescales for the various HLT triggers\n def getHLTPrescales(self, runNumber):\n ### Check\n if self.HLT_Key == \"\":\n self.getRunInfo(runNumber)\n tmp_curs = self.getHLTCursor()\n\n configIDQuery = \"\"\"\n SELECT\n CONFIGID\n FROM\n CMS_HLT_GDR.U_CONFVERSIONS\n WHERE\n NAME='%s'\n \"\"\" % (self.HLT_Key)\n\n tmp_curs.execute(configIDQuery)\n ConfigId, = tmp_curs.fetchone()\n\n SequencePathQuery =\"\"\"\n SELECT\n prescale_sequence,\n triggername\n FROM\n (\n SELECT\n J.ID,\n J.NAME,\n LAG(J.ORD,1,0) OVER (order by J.ID) PRESCALE_SEQUENCE,\n J.VALUE TRIGGERNAME,\n trim('{' from trim('}' from LEAD(J.VALUE,1,0) OVER (order by J.ID))) as PRESCALE_INDEX\n FROM\n CMS_HLT_GDR.U_CONFVERSIONS A,\n CMS_HLT_GDR.U_CONF2SRV S,\n CMS_HLT_GDR.U_SERVICES B,\n CMS_HLT_GDR.U_SRVTEMPLATES C,\n CMS_HLT_GDR.U_SRVELEMENTS J\n WHERE\n A.CONFIGID=%s AND\n A.ID=S.ID_CONFVER AND\n S.ID_SERVICE=B.ID AND\n C.ID=B.ID_TEMPLATE AND\n C.NAME='PrescaleService' AND\n J.ID_SERVICE=B.ID\n ) Q\n WHERE\n NAME='pathName'\n \"\"\" % (ConfigId,)\n \n tmp_curs.execute(SequencePathQuery)\n HLTSequenceMap = {}\n for seq,name in tmp_curs.fetchall():\n name = name.lstrip('\"').rstrip('\"')\n name = stripVersion(name)\n HLTSequenceMap[seq]=name\n \n SequencePrescaleQuery = \"\"\"\n WITH\n pq AS\n (\n SELECT \n Q.*\n FROM\n (\n SELECT\n J.ID,\n J.NAME,\n LAG(J.ORD,1,0) OVER (order by J.ID) PRESCALE_SEQUENCE,\n J.VALUE TRIGGERNAME,\n trim('{' from trim('}' from LEAD(J.VALUE,1,0) OVER (order by J.ID))) AS PRESCALE_INDEX\n FROM\n CMS_HLT_GDR.U_CONFVERSIONS A,\n CMS_HLT_GDR.U_CONF2SRV S,\n CMS_HLT_GDR.U_SERVICES B,\n CMS_HLT_GDR.U_SRVTEMPLATES C,\n CMS_HLT_GDR.U_SRVELEMENTS J\n WHERE\n A.CONFIGID=%s AND\n A.ID=S.ID_CONFVER AND\n S.ID_SERVICE=B.ID AND\n C.ID=B.ID_TEMPLATE AND\n C.NAME='PrescaleService' AND\n J.ID_SERVICE=B.ID\n ) Q\n WHERE \n NAME='pathName'\n )\n SELECT \n prescale_sequence,\n MYINDEX,\n regexp_substr (prescale_index, '[^,]+', 1, rn) mypsnum\n FROM\n pq\n CROSS JOIN\n (\n SELECT\n rownum rn,\n mod(rownum -1, level) MYINDEX\n FROM \n (\n SELECT\n max (length (regexp_replace (prescale_index, '[^,]+'))) + 1 mx\n FROM\n pq\n )\n CONNECT BY\n level <= mx\n )\n WHERE\n regexp_substr (prescale_index, '[^,]+', 1, rn) is not null\n ORDER BY\n prescale_sequence, myindex\n \"\"\" % (ConfigId,)\n \n tmp_curs.execute(SequencePrescaleQuery)\n lastIndex=-1\n lastSeq=-1\n row = []\n\n for seq,index,val in tmp_curs.fetchall():\n if lastIndex != index-1:\n self.HLTPrescales[HLTSequenceMap[seq-1]] = row\n row=[]\n lastSeq=seq\n lastIndex=index\n row.append(val)\n\n # Use: Returns the prescale column names of the HLT menu used for the specified run\n def getPrescaleNames(self,runNumber):\n ### Check\n if self.HLT_Key == \"\":\n self.getRunInfo(runNumber)\n tmp_curs = self.getHLTCursor()\n configIDQuery = \"\"\"\n SELECT\n CONFIGID\n FROM\n CMS_HLT_GDR.U_CONFVERSIONS\n WHERE\n NAME='%s'\n \"\"\" % (self.HLT_Key)\n\n tmp_curs.execute(configIDQuery)\n ConfigId, = tmp_curs.fetchone()\n\n sqlquery = \"\"\"\n SELECT\n J.NAME,\n TRIM('{' FROM TRIM('}' FROM J.VALUE))\n FROM\n CMS_HLT_GDR.U_CONFVERSIONS A,\n CMS_HLT_GDR.U_CONF2SRV S,\n CMS_HLT_GDR.U_SERVICES B,\n CMS_HLT_GDR.U_SRVTEMPLATES C,\n CMS_HLT_GDR.U_SRVELEMENTS J\n WHERE\n A.CONFIGID=%s AND\n A.ID=S.ID_CONFVER AND\n S.ID_SERVICE=B.ID AND\n C.ID=B.ID_TEMPLATE AND\n C.NAME='PrescaleService' AND\n J.ID_SERVICE=B.ID AND\n J.NAME='lvl1Labels'\n \"\"\" % (ConfigId,)\n tmp_curs.execute(sqlquery)\n name,ps_str = tmp_curs.fetchone()\n ps_names = [x.strip().strip('\"') for x in ps_str.strip().split(',')]\n return ps_names\n\n # Use: Returns the globaltag string for the HLT menu that was used in the specified run\n def getGlobalTag(self,runNumber):\n global_tag = \"\"\n HLT_Key = self.getRunKeys(runNumber)[1]\n\n if HLT_Key == \"\":\n # The key query failed\n return global_tag\n\n sqlquery = \"\"\"\n SELECT\n D.VALUE\n FROM\n CMS_HLT_GDR.U_CONFVERSIONS A,\n CMS_HLT_GDR.U_CONF2ESS B,\n CMS_HLT_GDR.U_ESSOURCES C,\n CMS_HLT_GDR.U_ESSELEMENTS D\n WHERE\n A.NAME = '%s' AND\n A.ID = B.ID_CONFVER AND\n B.ID_ESSOURCE = C.ID AND\n B.ID_ESSOURCE = D.ID_ESSOURCE AND\n C.NAME = 'GlobalTag' AND\n D.NAME = 'globaltag'\n \"\"\" % (HLT_Key)\n\n try:\n self.curs.execute(sqlquery)\n global_tag, = self.curs.fetchone()\n global_tag = global_tag.strip('\"')\n except:\n print(\"[ERROR] Failed to get globaltag for run, %d\" % (runNumber))\n return global_tag\n\n # Note: This is a function from DatabaseParser.py (with slight modification)\n # Use: Gets the number of colliding bunches during a run\n def getNumberCollidingBunches(self, runNumber):\n # Get Fill number first\n sqlquery = \"\"\"\n SELECT\n LHCFILL\n FROM\n CMS_WBM.RUNSUMMARY\n WHERE\n RUNNUMBER=%s\n \"\"\" % (runNumber)\n self.curs.execute(sqlquery)\n \n try: fill = self.curs.fetchone()[0]\n except: return [0,0]\n \n # Get the number of colliding bunches\n sqlquery = \"\"\"\n SELECT\n NCOLLIDINGBUNCHES,\n NTARGETBUNCHES\n FROM\n CMS_RUNTIME_LOGGER.RUNTIME_SUMMARY\n WHERE\n LHCFILL=%s\n \"\"\" % (fill)\n try:\n self.curs.execute(sqlquery)\n bunches = self.curs.fetchall()[0]\n bunches = [ int(bunches[0]), int(bunches[1]) ]\n return bunches\n except:\n #print \"database error querying for num colliding bx\" \n return [0, 0]\n\n # Use: Gets the last LHC status\n # Returns: A dictionary: [ status ] \n def getLHCStatus(self):\n import time \n utime = int(time.time())\n sqlquery = \"\"\"\n SELECT\n A.VALUE,\n CMS_LHCGMT_COND.GMTDB.VALUE_TEXT(A.GROUPINDEX,A.VALUE) TEXT_VALUE\n FROM\n CMS_LHCGMT_COND.LHC_GMT_EVENTS A,\n CMS_LHCGMT_COND.LHC_GMT_EVENT_DESCRIPTIONS B\n WHERE\n A.SOURCE=B.SOURCE(+) AND\n A.SOURCE=5130 AND\n A.SECONDS BETWEEN %s AND %s\n ORDER BY\n A.SECONDS DESC,A.NSECONDS DESC\n \"\"\" % (str(utime-86400), str(utime))\n self.curs.execute(sqlquery)\n queryResult = self.curs.fetchall()\n if len(queryResult) == 0: return ['----','Not available']\n elif len(queryResult[0]) >1: return queryResult[0][0]\n else: return ['---','Not available']\n \n # Use: Gets the dead time as a function of lumisection\n # Returns: A dictionary: [ LS ] \n def getDeadTime(self,runNumber,minLS=-1,maxLS=9999999):\n sqlquery = \"\"\"\n SELECT\n SECTION_NUMBER,\n DEADTIME_BEAMACTIVE_TOTAL\n FROM\n CMS_TCDS_MONITORING.tcds_cpm_deadtimes_v\n WHERE\n RUN_NUMBER=%s AND\n SECTION_NUMBER >= %s AND\n SECTION_NUMBER <= %s\n \"\"\" % (runNumber,minLS,maxLS)\n \n self.curs.execute(sqlquery)\n \n deadTime = {}\n for ls, dt in self.curs.fetchall():\n deadTime[ls] = dt\n \n return deadTime\n\n # Use: Gets the L1A physics lost rate as a function of lumisection\n # Returns: A dictionary: [ LS ] \n def getL1APhysicsLost(self,runNumber,minLS=-1,maxLS=9999999):\n sqlquery = \"\"\"\n SELECT\n SECTION_NUMBER,\n SUP_TRG_RATE_TT1\n FROM\n CMS_TCDS_MONITORING.tcds_cpm_rates_v\n WHERE\n RUN_NUMBER=%s AND\n SECTION_NUMBER >= %s AND\n SECTION_NUMBER <= %s\n \"\"\" % (runNumber,minLS,maxLS)\n self.curs.execute(sqlquery)\n \n l1rate = {}\n for ls, rate in self.curs.fetchall():\n l1rate[ls] = rate\n \n return l1rate\n\n # Use: Gets the total L1A physics rate as a function of lumisection\n # Returns: A dictionary: [ LS ] \n def getL1APhysics(self, runNumber,minLS=-1,maxLS=9999999):\n sqlquery = \"\"\"\n SELECT\n SECTION_NUMBER,\n TRG_RATE_TT1\n FROM\n CMS_TCDS_MONITORING.tcds_cpm_rates_v\n WHERE\n RUN_NUMBER=%s AND\n SECTION_NUMBER >= %s AND\n SECTION_NUMBER <= %s\n \"\"\" % (runNumber,minLS,maxLS)\n\n self.curs.execute(sqlquery)\n \n l1rate = {}\n for ls, rate in self.curs.fetchall():\n l1rate[ls] = rate\n \n return l1rate\n\n # Use: Gets the total L1A calibration rate as a function of lumisection\n # Returns: A dictionary: [ LS ] \n def getL1ACalib(self, runNumber,minLS=-1,maxLS=9999999):\n sqlquery = \"\"\"\n SELECT\n SECTION_NUMBER,\n TRG_RATE_TT2\n FROM\n CMS_TCDS_MONITORING.tcds_cpm_rates_v\n WHERE\n RUN_NUMBER=%s AND\n SECTION_NUMBER >= %s AND\n SECTION_NUMBER <= %s\n \"\"\" % (runNumber,minLS,maxLS)\n self.curs.execute(sqlquery)\n \n l1rate = {}\n for ls, rate in self.curs.fetchall():\n l1rate[ls] = rate\n \n return l1rate\n\n # Use: Gets the total L1ARand rate as a function of lumisection\n # Returns: A dictionary: [ LS ] \n def getL1ARand(self, runNumber,minLS=-1,maxLS=9999999):\n sqlquery = \"\"\"\n SELECT\n SECTION_NUMBER,\n TRG_RATE_TT3\n FROM\n CMS_TCDS_MONITORING.tcds_cpm_rates_v\n WHERE\n RUN_NUMBER=%s AND\n SECTION_NUMBER >= %s AND\n SECTION_NUMBER <= %s\n \"\"\" % (runNumber,minLS,maxLS)\n self.curs.execute(sqlquery)\n \n l1rate = {}\n for ls, rate in self.curs.fetchall():\n l1rate[ls] = rate\n \n return l1rate\n\n # Use: Gets the TOTAL L1 rate as a function of lumisection\n # Returns: A dictionary: [ LS ] \n def getL1rate(self, runNumber,minLS=-1,maxLS=9999999):\n # TODO: This function's name is very similar to getL1Rates, consider renaming\n sqlquery = \"\"\"\n SELECT\n SECTION_NUMBER,\n TRG_RATE_TOTAL\n FROM\n CMS_TCDS_MONITORING.tcds_cpm_rates_v\n WHERE\n RUN_NUMBER=%s AND\n SECTION_NUMBER >= %s AND\n SECTION_NUMBER <= %s\n \"\"\" % (runNumber,minLS,maxLS)\n \n self.curs.execute(sqlquery)\n \n l1rate = {}\n for ls, rate in self.curs.fetchall():\n l1rate[ls] = rate\n \n return l1rate \n\n # Use: Returns the number of the latest run to be stored in the DB\n def getLatestRunInfo(self):\n query = \"\"\"\n SELECT\n MAX(A.RUNNUMBER)\n FROM\n CMS_RUNINFO.RUNNUMBERTBL A,\n CMS_RUNTIME_LOGGER.LUMI_SECTIONS B\n WHERE\n B.RUNNUMBER=A.RUNNUMBER AND B.LUMISECTION > 0\n \"\"\"\n try:\n self.curs.execute(query)\n runNumber = self.curs.fetchone()\n except:\n print(\"Error: Unable to retrieve latest run number.\")\n return\n\n mode = self.getTriggerMode(runNumber)\n isCol = 0\n isGood = 1\n \n if mode is None:\n isGood = 0\n elif mode[0].find('l1_hlt_collisions') != -1:\n isCol = 1\n \n Tier0xferQuery = \"\"\"\n SELECT\n TIER0_TRANSFER TIER0\n FROM\n CMS_WBM.RUNSUMMARY\n WHERE\n RUNNUMBER = %d\n \"\"\" % (runNumber)\n self.curs.execute(Tier0xferQuery)\n tier0 = 1\n try:\n tier0 = self.curs.fetchone()\n except:\n print(\"Error: Unable to get tier0 status.\")\n \n if isCol and not tier0:\n print(\"WARNING: tier0 transfer is off\")\n elif not tier0:\n print(\"Please check if tier0 transfer is supposed to be off.\")\n \n return [runNumber[0], isCol, isGood, mode]\n\n def getWbmUrl(self,runNumber,pathName,LS):\n if pathName[0:4] == \"HLT_\":\n sqlquery = \"\"\"\n SELECT\n A.PATHID,\n (\n SELECT\n M.NAME\n FROM\n CMS_HLT_GDR.U_PATHS M,\n CMS_HLT_GDR.U_PATHIDS L\n WHERE\n L.PATHID=A.PATHID AND M.ID=L.ID_PATH\n ) PATHNAME\n FROM\n CMS_RUNINFO.HLT_SUPERVISOR_TRIGGERPATHS A\n WHERE\n RUNNUMBER=%s AND A.LSNUMBER=%s\n \"\"\" % (runNumber, LS)\n\n try: self.curs.execute(sqlquery)\n except: return \"-\"\n\n for id,fullName in self.curs.fetchall():\n name = stripVersion(fullName)\n if name == pathName:\n url = \"https://cmswbm.web.cern.ch/cmswbm/cmsdb/servlet/ChartHLTTriggerRates?fromLSNumber=&toLSNumber=&minRate=&maxRate=&drawCounts=0&drawLumisec=1&runID=%s&pathID=%s&TRIGGER_PATH=%s&LSLength=23.310409580838325\" % (runNumber,id,fullName)\n return url\n \n elif pathName[0:3]==\"L1_\":\n try:\n bitNum = self.L1IndexNameMap[pathName]\n url = \"https://cmswbm.web.cern.ch/cmswbm/cmsdb/servlet/ChartL1TriggerRates?fromTime=&toTime=&fromLSNumber=&toLSNumber=&minRate=&maxRate=&minCount=&maxCount=&preDeadRates=1&drawCounts=0&drawLumisec=1&runID=%s&bitID=%s&type=0&TRIGGER_NAME=%s&LSLength=23.310409580838325\" % (runNumber,bitNum,pathName)\n return url\n except:\n return \"-\"\n \n return \"-\"\n \n # Use: Get the trigger mode for the specified run\n def getTriggerMode(self, runNumber):\n TrigModeQuery = \"\"\"\n SELECT\n TRIGGERMODE\n FROM\n CMS_WBM.RUNSUMMARY\n WHERE\n RUNNUMBER = %d\n \"\"\" % (runNumber)\n try:\n self.curs.execute(TrigModeQuery)\n mode = self.curs.fetchone()\n except:\n print(\"Error: Unable to retrieve trigger mode.\")\n if mode is None:\n # Probably do not want to raise TriggerModeNoneError here, since if we're running plotTriggerRates with a list of runs, do not want to crash if only one of them is invalid\n return None\n else:\n return mode[0]\n\n # Use: Retrieves the data from all streams\n # Returns: A dictionary [ stream name ] { LS, rate, size, bandwidth }\n def getStreamData(self, runNumber, minLS=-1, maxLS=9999999):\n cursor = self.getTrgCursor()\n #StreamQuery = \"\"\"\n # SELECT\n # A.lumisection,\n # A.stream,\n # B.nevents/23.31041,\n # B.filesize,\n # B.filesize/23.31041\n # FROM\n # CMS_STOMGR.FILES_CREATED A,\n # CMS_STOMGR.FILES_INJECTED B\n # WHERE\n # A.filename = B.filename AND\n # A.runnumber = %s AND\n # A.lumisection >= %s AND\n # A.lumisection <= %s\n # \"\"\" % (runNumber, minLS, maxLS)\n\n StreamQuery = \"\"\"\n SELECT\n A.LUMISECTION,\n A.STREAM,\n A.NEVENTS/23.31041,\n A.FILESIZE,\n A.FILESIZE/23.31041\n FROM\n CMS_WBM.VIEW_SM_SUMMARY A\n WHERE\n A.RUNNUMBER = %s AND\n A.LUMISECTION >= %s AND\n A.LUMISECTION <= %s\n \"\"\" % (runNumber,minLS,maxLS)\n\n try:\n cursor.execute(StreamQuery)\n #self.curs.execute(StreamQuery)\n streamData = cursor.fetchall()\n except:\n print(\"Error: Unable to retrieve stream data.\")\n\n StreamData = {}\n for LS, stream, rate, size, bandwidth in streamData:\n if stream not in StreamData:\n StreamData[stream] = []\n StreamData[stream].append( [LS, rate, size, bandwidth] )\n\n return StreamData\n\n def getPrimaryDatasets(self, runNumber, minLS=-1, maxLS=9999999):\n cursor = self.getTrgCursor()\n PDQuery = \"\"\"\n SELECT\n DISTINCT E.NAME,\n F.LSNUMBER,\n F.ACCEPT/23.31041\n FROM\n CMS_HLT_GDR.U_CONFVERSIONS A,\n CMS_HLT_GDR.U_CONF2STRDST B,\n CMS_WBM.RUNSUMMARY C,\n CMS_HLT_GDR.U_DATASETIDS D,\n CMS_HLT_GDR.U_DATASETS E,\n CMS_RUNINFO.HLT_SUPERVISOR_DATASETS F\n WHERE\n D.ID = B.ID_DATASETID AND\n E.ID = D.ID_DATASET AND\n B.ID_CONFVER = A.ID AND\n D.ID = F.DATASETID AND\n A.CONFIGID = C.HLTKEY AND\n F.RUNNUMBER = C.RUNNUMBER AND\n C.RUNNUMBER = %s AND\n F.LSNUMBER >=%s AND\n F.LSNUMBER <=%s\n ORDER BY\n E.NAME\n \"\"\" % (runNumber,minLS,maxLS)\n\n try:\n cursor.execute(PDQuery)\n #self.curs.execute(PDQuery)\n pdData = cursor.fetchall()\n except:\n print(\"Error: Unable to retrieve PD data.\")\n\n PrimaryDatasets = {}\n for pd, LS, rate, in pdData:\n if pd not in PrimaryDatasets:\n PrimaryDatasets[pd] = []\n PrimaryDatasets[pd].append( [LS, rate] )\n\n return PrimaryDatasets\n\n def getFillRuns(self, fillNumber):\n #query = \"\"\"SELECT A.FILL_NUMBER, A.RUN_NUMBER, B.PHYSICS_FLAG*B.BEAM1_STABLE*B.BEAM2_STABLE, A.SECTION_NUMBER \n # FROM CMS_TCDS_MONITORING.tcds_cpm_counts_v A, CMS_RUNTIME_LOGGER.LUMI_SECTIONS B\n # WHERE A.FILL_NUMBER=%s AND A.RUN_NUMBER=B.RUNNUMBER\"\"\" % (fillNumber)\n #self.curs.execute(query)\n #output = self.curs.fetchone()\n #run_list = []\n #while (not output is None):\n # if output is None:\n # break\n # run_number = output[1]\n # flag = output[2]\n # if not run_number in run_list and flag == 1:\n # run_list.append(run_number)\n # output = self.curs.fetchone()\n\n tmp_list = []\n run_list = []\n query = \"\"\"\n SELECT \n DISTINCT A.RUN_NUMBER,\n B.RUNNUMBER\n FROM \n CMS_TCDS_MONITORING.tcds_cpm_counts_v A,\n CMS_RUNTIME_LOGGER.LUMI_SECTIONS B\n WHERE \n A.FILL_NUMBER=%s AND\n A.RUN_NUMBER=B.RUNNUMBER\n ORDER BY \n A.RUN_NUMBER\n \"\"\" % (fillNumber)\n self.curs.execute(query)\n self.curs.fetchone() # Discard the first run as it is actually from the previous fill\n for item in self.curs.fetchall():\n tmp_list.append(item[0]) # Add all runs from the fill to the list\n\n # We make the same query, but this time filter out runs w/o stable beam\n # NOTE: Might be able to bundle this into a single query, but for now this should work\n query = \"\"\"\n SELECT \n DISTINCT A.RUN_NUMBER,\n B.RUNNUMBER\n FROM \n CMS_TCDS_MONITORING.tcds_cpm_counts_v A,\n CMS_RUNTIME_LOGGER.LUMI_SECTIONS B\n WHERE \n A.FILL_NUMBER=%s AND\n A.RUN_NUMBER=B.RUNNUMBER AND\n B.PHYSICS_FLAG*B.BEAM1_STABLE*B.BEAM2_STABLE=1\n ORDER BY \n A.RUN_NUMBER\n \"\"\" % (fillNumber)\n self.curs.execute(query)\n for item in self.curs.fetchall():\n # We only include runs that are actually in this fill (i.e. runs with stable beams)!\n if item[0] in tmp_list:\n run_list.append(item[0])\n\n return run_list\n\n # Returns the runs from most recent fill with stable beams\n def getRecentRuns(self):\n query = \"\"\"\n SELECT\n DISTINCT A.RUN_NUMBER,\n A.FILL_NUMBER\n FROM \n CMS_TCDS_MONITORING.tcds_cpm_counts_v A,\n CMS_RUNTIME_LOGGER.LUMI_SECTIONS B\n WHERE\n A.RUN_NUMBER=B.RUNNUMBER AND\n B.PHYSICS_FLAG*B.BEAM1_STABLE*B.BEAM2_STABLE=1\n ORDER BY \n A.RUN_NUMBER DESC\n \"\"\"\n self.curs.execute(query)\n\n noCandidates = False\n last_fill = -1\n while True:\n row = self.curs.fetchone()\n if row is None:\n noCandidates = True\n break\n current_fill = row[1]\n if current_fill == last_fill:\n continue\n else:\n last_fill = current_fill\n # Check if the fill has valid runs\n run_list = []\n run_list += self.getFillRuns(current_fill)\n if len(run_list) > 0:\n # We have valid runs!\n break\n return run_list, last_fill\n\n # Returns a dictionary of streams that map to a list containing all the paths within that stream\n def getPathsInStreams(self,runNumber):\n # WARNING: NEED TO TEST THIS QUERY\n if not self.getRunInfo(runNumber):\n return None\n\n query = \"\"\"\n SELECT\n D.NAME,\n G.NAME\n FROM\n CMS_HLT_GDR.U_CONFVERSIONS A,\n CMS_HLT_GDR.U_PATHID2CONF B,\n CMS_HLT_GDR.U_PATHIDS C,\n CMS_HLT_GDR.U_PATHS D,\n CMS_HLT_GDR.U_PATHID2STRDST E,\n CMS_HLT_GDR.U_STREAMIDS F,\n CMS_HLT_GDR.U_STREAMS G\n WHERE\n A.NAME = '%s' AND\n B.ID_CONFVER = A.ID AND\n C.ID = B.ID_PATHID AND\n D.ID = C.ID_PATH AND\n E.ID_PATHID = B.ID_PATHID AND\n F.ID = E.ID_STREAMID AND\n G.ID = F.ID_STREAM\n ORDER BY\n G.NAME\n \"\"\" % (self.HLT_Key)\n\n self.curs.execute(query)\n\n stream_paths = {} # {'stream_name': [trg_paths] }\n for trg,stream in self.curs.fetchall():\n trg = stripVersion(trg)\n if stream not in stream_paths:\n stream_paths[stream] = []\n\n if not trg in stream_paths[stream]:\n stream_paths[stream].append(trg)\n\n return stream_paths\n\n def getPathsInDatasets(self,runNumber):\n if not self.getRunInfo(runNumber):\n return None\n\n query = \"\"\"\n SELECT\n D.NAME,\n G.NAME\n FROM\n CMS_HLT_GDR.U_CONFVERSIONS A,\n CMS_HLT_GDR.U_PATHID2CONF B,\n CMS_HLT_GDR.U_PATHIDS C,\n CMS_HLT_GDR.U_PATHS D,\n CMS_HLT_GDR.U_PATHID2STRDST E,\n CMS_HLT_GDR.U_DATASETIDS F,\n CMS_HLT_GDR.U_DATASETS G\n WHERE\n A.NAME = '%s' AND\n B.ID_CONFVER = A.ID AND\n C.ID = B.ID_PATHID AND\n D.ID = C.ID_PATH AND \n E.ID_PATHID = B.ID_PATHID AND\n F.ID = E.ID_DATASETID AND\n G.ID = F.ID_DATASET\n \"\"\" % (self.HLT_Key)\n\n self.curs.execute(query)\n\n dataset_paths = {} # {'dataset_name': [trg_paths] }\n for trg,dataset in self.curs.fetchall():\n trg = stripVersion(trg)\n if dataset not in dataset_paths:\n dataset_paths[dataset] = []\n\n if not trg in dataset_paths[dataset]:\n dataset_paths[dataset].append(trg)\n\n return dataset_paths\n\n # Returns a list of all L1 triggers used in the run\n def getL1Triggers(self,runNumber):\n query = \"\"\"\n SELECT\n ALGO_NAME\n FROM\n CMS_UGT_MON.VIEW_UGT_RUN_ALGO_SETTING\n WHERE\n RUN_NUMBER = %s\n \"\"\" % (runNumber)\n\n self.curs.execute(query)\n\n L1_list = []\n for item in self.curs.fetchall():\n L1_list.append(item[0])\n\n return L1_list\n\n # Functionally very similar to getL1RawRates, but allows for specifying which scalar type to query, also does no un-prescaling\n def getL1Rates(self,runNumber,minLS=-1,maxLS=9999999,scaler_type=0):\n self.getRunInfo(runNumber)\n self.getL1Prescales(runNumber)\n self.getL1NameIndexAssoc(runNumber)\n\n #pre-DT rates query (new uGT)\n #(0, 'ALGORITHM_RATE_AFTER_PRESCALE'),\n #(1, 'ALGORITHM_RATE_BEFORE_PRESCALE'),\n #(2, 'POST_DEADTIME_ALGORITHM_RATE_AFTER_PRESCALE'),\n #(3, 'POST_DEADTIME_ALGORITHM_RATE_AFTER_PRESCALE_BY_HLT'),\n #(4, 'POST_DEADTIME_ALGORITHM_RATE_AFTER_PRESCALE_PHYSICS'),\n #(5, 'POST_DEADTIME_ALGORITHM_RATE_AFTER_PRESCALE_CALIBRATION'),\n #(6, 'POST_DEADTIME_ALGORITHM_RATE_AFTER_PRESCALE_RANDOM')\n\n run_str = \"0%d\" % runNumber\n query = \"\"\"\n SELECT\n LUMI_SECTIONS_ID,\n ALGO_RATE,\n ALGO_INDEX\n FROM\n CMS_UGT_MON.VIEW_ALGO_SCALERS\n WHERE\n SCALER_TYPE = %d AND\n LUMI_SECTIONS_ID LIKE '%s%%'\n \"\"\" % (scaler_type,run_str)\n self.curs.execute(query)\n\n L1Triggers = {}\n for tup in self.curs.fetchall():\n ls = int(tup[0].split('_')[1].lstrip('0'))\n rate = tup[1]\n algo_bit = tup[2]\n\n if ls < minLS or ls > maxLS:\n #TODO: Move this check directly into the query\n continue\n\n algo_name = self.L1NameIndexMap[algo_bit]\n psi = self.PSColumnByLS[ls]\n algo_ps = self.L1Prescales[algo_bit][psi]\n\n if algo_name not in L1Triggers:\n L1Triggers[algo_name] = {}\n\n L1Triggers[algo_name][ls] = [rate, algo_ps]\n\n return L1Triggers # {'trigger': {LS: (rate,ps) } }\n\n# -------------------- End of class DBParsing -------------------- #\n","sub_path":"ratemon/OldDBParser.py","file_name":"OldDBParser.py","file_ext":"py","file_size_in_byte":63013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"304541824","text":"'''\n\n@Author : fushuai\n\n@Email : fushuai@qutoutiao.net\n\n@IDE : PyCharm\n\n@Time : 2020/8/6 13:55\n\n@Desc :\n\n'''\nimport pandas as pd\nfrom sklearn.metrics import log_loss, roc_auc_score\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import MinMaxScaler\nfrom deepctr.models import DeepFM\nfrom deepctr.feature_column import SparseFeat, DenseFeat, get_feature_names\n\nif __name__ == '__main__':\n data = pd.read_csv(\"criteo_sample.txt\")\n sparse_features = [\"C\" + str(i) for i in range(1, 27)]\n dense_features = [\"I\" + str(i) for i in range(1, 14)]\n data[sparse_features] = data[sparse_features].fillna('-1')\n data[dense_features] = data[dense_features].fillna(0)\n target = ['label']\n mms = MinMaxScaler()\n data[dense_features] = mms.fit_transform(data[dense_features])\n\n fixlen_feature_columns = [SparseFeat(feat, vocabulary_size=1000, embedding_dim=4, use_hash=True, dtype='string') for\n feat in sparse_features] + [DenseFeat(feat, 1) for feat in dense_features]\n linear_feature_columns = fixlen_feature_columns\n dnn_feature_columns = fixlen_feature_columns\n feature_names = get_feature_names(linear_feature_columns + dnn_feature_columns)\n train, test = train_test_split(data, test_size=0.2)\n train_model_input = {name: train[name] for name in feature_names}\n test_model_input = {name: test[name] for name in feature_names}\n model = DeepFM(linear_feature_columns, dnn_feature_columns, task='binary')\n model.compile(\"adam\", \"binary_crossentropy\", metrics=[\"binary_crossentropy\"])\n history = model.fit(train_model_input, train[target].values, batch_size=256, epochs=10, verbose=2,\n validation_split=0.2)\n pred_ans = model.predict(test_model_input, batch_size=256)\n print(\"test LogLoss\", round(log_loss(test[target].values, pred_ans), 4))\n print(\"test AUC\", round(roc_auc_score(test[target].values, pred_ans), 4))\n","sub_path":"examples/deep_fm_hashing.py","file_name":"deep_fm_hashing.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"270817157","text":"class Die:\n \"\"\"class creates die for random number between 1-6\"\"\"\n def __init__(self, sides=6):\n self.sides = sides\n\n def roll(self):\n \"\"\"roll returns number between 1-6\"\"\"\n import random\n random.seed(0)\n return(random.randint(1, self.sides))\n\n\nclass Player:\n \"\"\"class of players\"\"\"\n def __init__(self, n):\n self.n = n\n self.points = 0\n self.turnscore = 0\n\n def r(self):\n \"\"\"creates append list\"\"\"\n keep_rolling = 1\n while keep_rolling == 1:\n r = die.roll()\n print(\"Player {} got a \".format(self.n), r)\n if r == 1:\n self.turnscore = 0\n keep_rolling = 0\n print(\"The round is over\")\n print()\n else:\n self.turnscore += r\n print(\"Player {}'s turnscore is\".format(self.n),\n self.turnscore)\n roll = input(\"Keep rolling? r = roll, h = hold\")\n if roll == 'r':\n keep_rolling = 1\n else:\n self.h()\n return\n\n def h(self):\n self.points += self.turnscore\n if self.points >= 100:\n self.end()\n else:\n print(\"Player {}'s turn is over\".format(self.n))\n print()\n self.turnscore = 0\n return self.points\n\n def score(self):\n print(\"Player {}'s score is {}\".format(self.n, self.points))\n\n def end(self):\n \"\"\"ends game when board = 100\"\"\"\n print(\"Player {} Won!\".format(self.n))\n quit()\n\n\nclass Game:\n def __init__(self, player1, player2):\n pass\n\n def game(self):\n print(\"Welcome to Pig.\")\n while player1.points < 100 and player2.points < 100:\n player1.score()\n player2.score()\n player1.r()\n player1.score()\n player2.score()\n player2.r()\n\n\nif __name__ == \"__main__\":\n die = Die()\n player1 = Player(1)\n player2 = Player(2)\n game = Game(player1, player2)\n game.game()\n","sub_path":"pig.py","file_name":"pig.py","file_ext":"py","file_size_in_byte":2115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"62044014","text":"import os\nimport sys\nimport random\nimport math\nimport re\nimport time\nimport numpy as np\nimport cv2\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport glob\nimport xml.etree.ElementTree as ET\n\nfrom rgb_segmentor import get_rgb_masks\nfrom hsv_segmentor import get_hsv_masks\n\nfrom config import Config\nimport utils\nimport model as modellib\nimport visualize\nfrom model import log\n\nimport argparse\n\n# Parse command line arguments\nparser = argparse.ArgumentParser(description='Train Mask R-CNN on Custom Bags Dataset.')\nparser.add_argument(\"command\", metavar=\"\", help=\"'train' or 'eval'\")\nparser.add_argument('--model', required=False, metavar=\"/path/to/weights.h5\", help=\"Path to weights .h5 file or 'coco'\")\nparser.add_argument('--logs', required=False, default='log/', metavar=\"/path/to/logs/\", help='Logs and checkpoints directory (default=logs/)')\nargs = parser.parse_args()\n\n# Root directory of the project\nROOT_DIR = os.getcwd()\n\n# Directory to save logs and trained model\nMODEL_DIR = os.path.join(ROOT_DIR, \"logs\")\n\n# Local path to trained weights file\nCOCO_MODEL_PATH = os.path.join(ROOT_DIR, \"mask_rcnn_coco.h5\")\n# Download COCO trained weights from Releases if needed\nif not os.path.exists(COCO_MODEL_PATH):\n utils.download_trained_weights(COCO_MODEL_PATH)\n\nclass BagsConfig(Config):\n \n # Give the configuration a recognizable name\n NAME = \"bags\"\n\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n NUM_CLASSES = 1 + 12 # background [index: 0] + 12 classes\n STEPS_PER_EPOCH = 3000\n VALIDATION_STEPS = 100\n \nconfig = BagsConfig()\nconfig.display()\n\nclass InferenceConfig(BagsConfig):\n GPU_COUNT = 1\n IMAGES_PER_GPU = 1\n\ninference_config = InferenceConfig()\n\ndef get_ax(rows=1, cols=1, size=8):\n \"\"\"Return a Matplotlib Axes array to be used in\n all visualizations in the notebook. Provide a\n central point to control graph sizes.\n \n Change the default size attribute to control the size\n of rendered images\n \"\"\"\n _, ax = plt.subplots(rows, cols, figsize=(size*cols, size*rows))\n return ax\n\nclass BagsDataset(utils.Dataset):\n \"\"\"Generates the bags dataset. \n \"\"\"\n \n def load_bags(self, part):\n \"\"\"\n part: train/eval\n \"\"\"\n\n classes = ['black_backpack', 'nine_west_bag', 'meixuan_brown_handbag', 'sm_bdrew_grey_handbag', 'wine_red_handbag', 'sm_bclarre_blush_crossbody', 'mk_brown_wrislet', 'black_plain_bag', 'lmk_brown_messenger_bag', 'sm_peach_backpack', 'black_ameligalanti', 'white_bag']\n \n count = 1\n\n # Add classes\n \n for i, c in enumerate(classes):\n self.add_class(\"bags\", i+1, c)\n \n # Add train/val images\n \n pattern = re.compile(\".*bot[0-9]*.png\")\n \n for images in glob.glob(os.getcwd()+'/Data/handbag_images/JPEGImages/*.png'):\n \n f = images.split('JPEGImages')\n ann_path = f[0]+'Annotations'+f[1][:-3]+'xml'\n \n tree = ET.parse(ann_path)\n root = tree.getroot() \n width, height = int(root.find('size').find('width').text), int(root.find('size').find('height').text)\n \n if height>config.IMAGE_MAX_DIM or width>config.IMAGE_MAX_DIM or height 3999, the Romans used a bar over the numeral. Here I have had to use a single quote.\n\nromanSymbol = ['I', 'V', 'X', 'L', 'C', 'D', 'M', \"V'\"]\nromanValue = [1, 5, 10, 50, 100, 500, 1000, 5000]\nsubtractPoint = [4, 9, 40, 90, 400, 900, 4000]\n\ndef arabicToRomanNumerals(n):\n # Handling 4000 - 8999, MV' - V'MMMCMXCIX.\n rem = n % romanValue[7]\n if rem in range (subtractPoint[6], romanValue[7]):\n return romanSymbol[6] + romanSymbol[7] + handle_900_3999(n%romanValue[6])\n else:\n return romanSymbol[7] * (n//romanValue[7]) + handle_900_3999(rem)\n\n\ndef handle_900_3999(n):\n # Handle 900 - 3999, CM - MMMCMXCIX.\n rem = n % romanValue[6]\n if rem in range (subtractPoint[5], romanValue[6]):\n return romanSymbol[6] * (n//romanValue[6]) + romanSymbol[4] + romanSymbol[6] + handle_90_399(n%romanValue[4])\n else:\n return romanSymbol[6] * (n//romanValue[6]) + handle_400_899(rem)\n\ndef handle_400_899(n):\n # Handling 400 - 899, CD - DCCCXCIX.\n rem = n % romanValue[5]\n if rem in range (subtractPoint[4], romanValue[5]):\n return romanSymbol[4] + romanSymbol[5] + handle_90_399(n%romanValue[4])\n else:\n return romanSymbol[5] * (n//romanValue[5]) + handle_90_399(rem)\n\ndef handle_90_399(n):\n # Handle 90 - 399, XC - CCCXCIX\n rem = n % romanValue[4]\n if rem in range (subtractPoint[3], romanValue[4]):\n return romanSymbol[4] * (n//romanValue[4]) + romanSymbol[2] + romanSymbol[4] + handle_9_39(n%romanValue[2])\n else:\n return romanSymbol[4] * (n//romanValue[4]) + handle_40_89(rem)\n\ndef handle_40_89(n):\n # Handling 40 - 89, XL - LXXXIX.\n rem = n % romanValue[3]\n if rem in range (subtractPoint[2], romanValue[3]):\n return romanSymbol[2] + romanSymbol[3] + handle_9_39(n%romanValue[2])\n else:\n return romanSymbol[3] * (n//romanValue[3]) + handle_9_39(rem)\n\ndef handle_9_39(n):\n # Handle 9 - 39, IX - XXXIX.\n rem = n % romanValue[2]\n if rem in range (subtractPoint[1], romanValue[2]):\n return romanSymbol[2] * (n//romanValue[2]) + romanSymbol[0] + romanSymbol[2]\n else:\n return romanSymbol[2] * (n//romanValue[2]) + handle_4_8(rem)\n\ndef handle_4_8(n):\n #Handle 4 - 8, IV - VIII.\n rem = n % romanValue[1]\n if rem in range (subtractPoint[0], romanValue[1]):\n # Note: First expression below is just for attempted symmetry.\n return romanSymbol[1] * (n//romanValue[1]) + romanSymbol[0] + romanSymbol[1]\n else:\n return romanSymbol[1] * (n//romanValue[1]) + handle_1_3(rem)\n\ndef handle_1_3(n):\n# Handle 1 - 3.\n return romanSymbol[0] * n\n\nprint()\nprint('This program converts a range of numbers in Arabic numerals up to 8999 to Roman numerals. \\n')\nprint('Enter beginning number: ', end='')\nstartNumber = int(input())\nprint('Enter ending number: ', end='')\nendNumber = int(input())\n\nfor i in range (startNumber, endNumber+1):\n print('Arabic = ', i, ' Roman = ', arabicToRomanNumerals(i))\n\n","sub_path":"arabicToRomanNumerals.py","file_name":"arabicToRomanNumerals.py","file_ext":"py","file_size_in_byte":3512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"341152638","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : emilyenglish\nDate : 2019-04-09\nPurpose: calc hamming distance\n\"\"\"\n\nimport argparse\nimport sys\nimport logging\nimport os\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"get command-line arguments\"\"\"\n parser = argparse.ArgumentParser(\n description='Hamming distance',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\n 'FILE', metavar='FILE', help='File inputs', nargs = 2)\n\n parser.add_argument(\n '-d', '--debug', help='Debug', action='store_true')\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef warn(msg):\n \"\"\"Print a message to STDERR\"\"\"\n print(msg, file=sys.stderr)\n\n\n# --------------------------------------------------\ndef die(msg='Something bad happened'):\n \"\"\"warn() and exit with error\"\"\"\n warn(msg)\n sys.exit(1)\n# --------------------------------------------------\ndef dist6(seq1, seq2):\n print(\"hi\")\n \n# --------------------------------------------------\ndef dist(seq1, seq2):\n count = sum(1 for a, b in zip(seq1, seq2) if a != b) + abs(len(seq1) - len(seq2))\n return(count)\n# --------------------------------------------------\ndef dist3(s1, s2):\n k = 0\n i =0\n diff = 0\n for k in range(len(s1[i])):\n if s2[k] != s1[k]:\n diff += 1\n k += 1\n print(diff)\n# --------------------------------------------------\ndef dist2(s1, s2):\n diff = 0 \n i=0\n \n f1 = open(s1, 'r')\n words1 = list(f1.read().split())\n \n f2 = open(s2, 'r')\n words2 = list(f2.read().split())\n \n \n a = list(zip(words1, words2))\n for i in range(len(a)):\n \n if len(a[i][0]) > len(a[i][1]):\n j=0\n for j in range(len(a[i][0])):\n l = len(a[i][0])\n b= a[i][1].ljust(l,'0')\n diff = dist(a[i][0], b[i])\n count = count + diff\n #print(b)\n #print(a[i][0][j], b[i][1][j])\n \"\"\"if a[i][0][j] != b[i][1][j]:\n diff +=\n j += 1\"\"\"\n \n elif len(a[i][0]) < len(a[i][1]):\n k=0\n for k in range(len(a[i][1])):\n l = len(a[i][1])\n b = a[i][0].ljust(l,'0')\n diff = dist(b[i], a[i][1])\n count = count + diff\n #print(b)\n #print(b[k], a[i][1][k]) \n \"\"\"if b[k] != a[i][1][k]:\n diff += 1\n k += 1\"\"\"\n \n else:\n h=0\n for h in range(len(a[i][1])):\n diff = dist(a[i][0], a[i][1])\n count = count + diff\n #print(a[i][0][h], a[i][1][h])\n \"\"\"if a[i][0][h] != a[i][1][h]:\n diff += 1\n h += 1\"\"\"\n i += 1 \n print(count)\n \n print(diff)\n #print(a[i][0], a[i][1])\n \n #open two files at same time, read word by word, read into an array, zip words to pair, pass to function that tells the distance\n #use sum sum([1,2,...]), map sum(map(dist, words))\n \n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n args = get_args()\n File1 = args.FILE[0]\n File2 = args.FILE[1]\n unbug = args.debug\n #print(File1, File2)\n if not os.path.isfile(File1):\n die('\"{}\" is not a file.'.format(File1))\n if not os.path.isfile(File2):\n die('\"{}\" is not a file'.format(File2))\n logging.basicConfig(\n filename='.log',\n filemode='w',\n level=logging.DEBUG if args.debug else logging.CRITICAL\n )\n f1 = open(File1, 'r')\n words1 = list(f1.read().split())\n f2 = open(File2, 'r')\n words2 = list(f2.read().split()) \n a = list(zip(words1, words2))\n dist(File1, File2)\n diff = 0\n for i in range(len(a)):\n\n if len(a[i][0]) > len(a[i][1]):\n j=0\n for j in range(len(a[i][0])):\n l = len(a[i][0])\n b= a[i][1].ljust(l,'0')\n #print(b)\n #print(a[i][0][j], b[i][1][j])\n if a[i][0][j] != b[i][1][j]:\n diff += 1\n j += 1\n\n elif len(a[i][0]) < len(a[i][1]):\n k=0\n for k in range(len(a[i][1])):\n l = len(a[i][1])\n b = a[i][0].ljust(l,'0')\n #print(b)\n #print(b[k], a[i][1][k]) \n if b[k] != a[i][1][k]:\n diff += 1\n k += 1\n\n else:\n h=0\n for h in range(len(a[i][1])):\n #print(a[i][0][h], a[i][1][h])\n if a[i][0][h] != a[i][1][h]:\n diff += 1\n h += 1\n i += 1\n print(diff)\n \"\"\"count = 0\n for i in range(len(a)):\n if len(a[i][0]) > len(a[i][1]):\n j=0\n for j in range(len(a[i][0])):\n l = len(a[i][0])\n b= a[i][1].ljust(l,'0') \n dist(a[i][0], b[k])\n count += 1 \n elif len(a[i][0]) < len(a[i][1]):\n k=0\n for k in range(len(a[i][1])):\n l = len(a[i][1])\n b = a[i][0].ljust(l,'0')\n dist(b[k], a[i][1])\n count += 1\n else: \n k=0\n for k in range(len(a[i][1])):\n l = len(a[i][1])\n b = a[i][0].ljust(l,'0')\n dist(b[k], a[i][1])\n count += 1\n print(count)\"\"\" \n #dict(zip(a, l)) \n\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/13-hamm/hamm.py","file_name":"hamm.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"330632644","text":"from time import sleep\nimport requests, argparse, urllib.request, os, tkinter\n\t\nwindow = tkinter.Tk()\n\ndo_download_videos = tkinter.IntVar(value=0)\n\ndef download_images(username):\n\trequest_url = \"https://www.instagram.com/\"+username+\"/media/\"\n\t\n\theaders = {\n\t'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.12; rv:55.0) Gecko/20100101 Firefox/55.0'\n\t}\n\t\n\tparams = {'max_id': '0'}\n\t\n\tmore_available = True\n\t\n\tmake_folder(entry.get())\n\t\n\twhile more_available:\n\t\tr = requests.get(request_url, headers=headers, params=params)\n\t\t\n\t\ttry:\n\t\t\tdata = r.json()\n\t\texcept:\n\t\t\tprint(\"Invalid username!\")\n\t\t\tos.removedirs(entry.get())\n\t\t\tbreak;\n\t\t\n\t\tfor content in data[\"items\"]:\n\t\t\tif content[\"type\"] == \"video\" and do_download_videos.get() == 1:\n\t\t\t\tfile_url = content[\"videos\"][\"standard_resolution\"][\"url\"]\n\t\t\telse:\n\t\t\t\tfile_url = content[\"images\"][\"standard_resolution\"][\"url\"]\n\t\t\t\t\n\t\t\tfile_url = file_url.replace(\"s640x640\",\"s1080x1080\")\n\t\t\t\n\t\t\tfile_name = file_url.split(\"/\")[-1]\n\t\t\t\n\t\t\tpath = entry.get()+\"/\"+username+\"_\"+file_name\n\t\t\tif not os.path.isfile(path):\n try:\n urllib.request.urlretrieve(file_url,path)\n print(\"Downloaded: \"+path)\n sleep(0.5)\n except:\n print(\"----Skipping this image----\")\n\t\t\t\t\n\t\tmore_available = data[\"more_available\"]\n\t\t\n\t\tnew_max_id = data[\"items\"][len(data[\"items\"]) - 1][\"id\"]\n\t\t\n\t\tparams = {'max_id': new_max_id}\n\t\t\n\t\tif more_available:\n\t\t\tprint(\"Getting next page of images with maximum id: \"+new_max_id)\n\t\tprint(\"--------------Completed--------------\")\ndef action():\n download_images(entry.get())\n \n#Make folder with given username\ndef make_folder(username):\n try:\n os.makedirs(username)\n except OSError:\n os.removedirs(username)\n os.makedirs(username)\n\n#Building the UI\t\t\nwindow.configure(background=\"grey90\")\nwindow.title(\"insta-dl v.0.2.4\")\nwindow.geometry(\"300x200\")\nwindow.resizable(False, False)\n\nentry = tkinter.Entry(window)\nentry.place(x=70,y=68)\nentry.configure(highlightbackground=\"grey90\")\n\nbutton = tkinter.Button(window, text=\"Download\")\nbutton.place(x=110,y=120)\nbutton.configure(command=lambda:action(),highlightbackground=\"grey90\")\n\nvideo_checkbox = tkinter.Checkbutton(window, text=\"Download Video\", variable=do_download_videos, bg=\"grey90\")\nvideo_checkbox.place(x=95,y=150)\n\nnotice = tkinter.Label(window, text=\"insta-dl is not affiliated with Instagram\",\n fg=\"grey60\",bg=\"grey90\")\nnotice.place(x=30, y=180)\n\nwindow.mainloop()\n","sub_path":"insta-dl.py","file_name":"insta-dl.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"63599155","text":"import sys\nimport logging\nimport time\nimport os\nimport threading\nfrom signal import signal, getsignal, SIGINT, SIGTERM, SIGKILL, SIGUSR1\nfrom cloudkeeper.graph import GraphContainer\nfrom cloudkeeper.pluginloader import PluginLoader\nfrom cloudkeeper.baseplugin import PluginType\nfrom cloudkeeper.web import WebServer\nfrom cloudkeeper.scheduler import Scheduler\nfrom cloudkeeper.args import get_arg_parser, ArgumentParser\nfrom cloudkeeper.processor import Processor\nfrom cloudkeeper.cleaner import Cleaner\nfrom cloudkeeper.metrics import GraphCollector\nfrom cloudkeeper.utils import log_stats, signal_on_parent_exit\nfrom cloudkeeper.cli import Cli\nfrom cloudkeeper.event import add_event_listener, dispatch_event, Event, EventType, add_args as event_add_args\nfrom prometheus_client import REGISTRY\n\n\ntry:\n os.setpgid(0, 0)\nexcept (PermissionError, AttributeError):\n pass\n\nlog_format = '%(asctime)s - %(levelname)s - %(process)d/%(threadName)s - %(message)s'\nlogging.basicConfig(level=logging.WARN, format=log_format)\nlogging.getLogger('cloudkeeper').setLevel(logging.INFO)\nlog = logging.getLogger(__name__)\n\n# Plugins might produce debug logging during arg parsing so we manually\n# look for verbosity and set the log level before using the arg parser.\nargv = sys.argv[1:]\nif '-v' in argv or '--verbose' in argv:\n logging.getLogger('cloudkeeper').setLevel(logging.DEBUG)\n\n# This will be used in main() and signal_handler()\nshutdown_event = threading.Event()\nparent_pid = os.getpid()\noriginal_sigint_handler = getsignal(SIGINT)\noriginal_sigterm_handler = getsignal(SIGTERM)\n\n\ndef main() -> None:\n # Add cli args\n arg_parser = get_arg_parser()\n\n Cli.add_args(arg_parser)\n WebServer.add_args(arg_parser)\n Scheduler.add_args(arg_parser)\n Processor.add_args(arg_parser)\n Cleaner.add_args(arg_parser)\n PluginLoader.add_args(arg_parser)\n GraphContainer.add_args(arg_parser)\n event_add_args(arg_parser)\n\n # Find cloudkeeper Plugins in the cloudkeeper.plugins module\n plugin_loader = PluginLoader()\n plugin_loader.add_plugin_args(arg_parser)\n\n # At this point the CLI, all Plugins as well as the WebServer have added their args to the arg parser\n arg_parser.parse_args()\n\n # Write log to a file in addition to stdout\n if ArgumentParser.args.logfile:\n log_formatter = logging.Formatter(log_format)\n fh = logging.FileHandler(ArgumentParser.args.logfile)\n fh.setFormatter(log_formatter)\n logging.getLogger().addHandler(fh)\n\n # Handle Ctrl+c and other means of termination/shutdown\n signal_on_parent_exit()\n add_event_listener(EventType.SHUTDOWN, shutdown, blocking=False)\n signal(SIGINT, signal_handler)\n signal(SIGTERM, signal_handler)\n signal(SIGUSR1, signal_handler)\n\n # We're using a GraphContainer() to contain the graph which gets replaced at runtime.\n # This way we're not losing the context in other places like the webserver when the\n # graph gets reassigned.\n graph_container = GraphContainer()\n\n # GraphCollector() is a custom Prometheus Collector that\n # takes a graph and yields its metrics\n graph_collector = GraphCollector(graph_container)\n REGISTRY.register(graph_collector)\n\n # Scheduler() starts an APScheduler instance\n scheduler = Scheduler(graph_container)\n scheduler.daemon = True\n scheduler.start()\n\n # Cli() is the CLI Thread\n cli = Cli(graph_container, scheduler)\n cli.daemon = True\n cli.start()\n\n # WebServer is handed the graph container context so it can e.g. produce graphml from it\n # The webserver serves Prometheus Metrics as well as different graph endpoints\n web_server = WebServer(graph_container)\n web_server.daemon = True\n web_server.start()\n\n for Plugin in plugin_loader.plugins(PluginType.PERSISTENT):\n try:\n log.debug(f'Starting persistent Plugin {Plugin}')\n plugin = Plugin()\n plugin.daemon = True\n plugin.start()\n except Exception as e:\n log.exception(f'Caught unhandled persistent Plugin exception {e}')\n\n collector = Processor(graph_container, plugin_loader.plugins(PluginType.COLLECTOR))\n collector.daemon = True\n collector.start()\n\n # Dispatch the STARTUP event\n dispatch_event(Event(EventType.STARTUP))\n\n # We wait for the shutdown Event to be set() and then end the program\n # While doing so we print the list of active threads once per 15 minutes\n while not shutdown_event.is_set():\n log_stats()\n shutdown_event.wait(900)\n time.sleep(5)\n log.info('Shutdown complete')\n quit()\n\n\ndef shutdown(event: Event) -> None:\n reason = event.data.get('reason')\n emergency = event.data.get('emergency')\n\n if emergency:\n log.fatal(f'EMERGENCY SHUTDOWN: {reason}')\n os.killpg(os.getpgid(0), SIGKILL)\n\n current_pid = os.getpid()\n if current_pid != parent_pid:\n return\n\n if reason is None:\n reason = 'unknown reason'\n log.info(f'Received shut down event {event.event_type}: {reason} - killing all threads and child processes')\n os.killpg(os.getpgid(0), SIGUSR1)\n kt = threading.Thread(target=force_shutdown, name='shutdown')\n kt.start()\n shutdown_event.set() # and then end the program\n\n\ndef force_shutdown(delay: int = 10) -> None:\n time.sleep(delay)\n log_stats()\n log.error('Some child process or thread timed out during shutdown - killing process group')\n os.killpg(os.getpgid(0), SIGKILL)\n os._exit(0)\n\n\ndef delayed_exit(delay: int = 3) -> None:\n time.sleep(delay)\n os._exit(0)\n\n\ndef signal_handler(sig, frame) -> None:\n \"\"\"Handles Ctrl+c by letting the Collector() know to shut down\"\"\"\n signal(SIGINT, original_sigint_handler)\n signal(SIGTERM, original_sigterm_handler)\n\n current_pid = os.getpid()\n if current_pid == parent_pid:\n if sig != SIGUSR1:\n reason = f'Received shutdown signal {sig}'\n log.debug(f'Parent caught signal {sig} - dispatching shutdown event')\n # Dispatch shutdown event in parent process which also causes SIGUSR1 to be sent to\n # the process group and in turn causes the shutdown event in all child processes.\n dispatch_event(Event(EventType.SHUTDOWN, {'reason': reason, 'emergency': False}))\n else:\n log.debug('Parent received SIGUSR1 and ignoring it')\n else:\n log.debug(f\"Shutting down child process {current_pid} - you might see exceptions from interrupted worker threads\")\n reason = f'Received shutdown signal {sig} from parent process'\n # Child's threads have 3s to shut down before the following thread will shut them down hard.\n kt = threading.Thread(target=delayed_exit, name='shutdown')\n kt.start()\n # Dispatch shutdown event in child process\n dispatch_event(Event(EventType.SHUTDOWN, {'reason': reason, 'emergency': False}), blocking=False)\n sys.exit(0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"cloudkeeper/cloudkeeper/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"205907923","text":"\"\"\"\nCopyright (c) 2015 Red Hat, Inc\nAll rights reserved.\n\nThis software may be modified and distributed under the terms\nof the BSD license. See the LICENSE file for details.\n\n\nPre build plugin which injects custom yum repository in dockerfile.\n\"\"\"\nimport os\nimport shutil\nfrom io import StringIO\n\nfrom atomic_reactor.constants import YUM_REPOS_DIR, RELATIVE_REPOS_PATH, INSPECT_CONFIG\nfrom atomic_reactor.plugin import PreBuildPlugin\nfrom atomic_reactor.plugins.pre_reactor_config import get_builder_ca_bundle\nfrom atomic_reactor.util import df_parser\nfrom atomic_reactor.utils.yum import YumRepo\n\n\nclass InjectYumRepoPlugin(PreBuildPlugin):\n key = \"inject_yum_repo\"\n is_allowed_to_fail = False\n\n def _final_user_line(self):\n user = self._find_final_user()\n if user:\n return user\n\n builder = self.workflow.builder\n if not builder.dockerfile_images.base_from_scratch:\n inspect = builder.base_image_inspect\n user = inspect.get(INSPECT_CONFIG).get('User')\n if user:\n return f'USER {user}'\n\n return ''\n\n def _find_final_user(self):\n \"\"\"Find the user in USER instruction in the last build stage\"\"\"\n for insndesc in reversed(self._dockerfile.structure):\n if insndesc['instruction'] == 'USER':\n return insndesc['content'] # we will reuse the line verbatim\n if insndesc['instruction'] == 'FROM':\n break # no USER specified in final stage\n\n def _cleanup_lines(self):\n lines = [\n \"RUN rm -f \" + \" \".join(\n (f\"'{repo_file}'\" for repo_file in self.workflow.files)\n )\n ]\n if self._builder_ca_bundle:\n lines.append(f'RUN rm -f /tmp/{self._ca_bundle_pem}')\n\n final_user_line = self._final_user_line()\n if final_user_line:\n lines.insert(0, \"USER root\")\n lines.append(final_user_line)\n\n return lines\n\n def __init__(self, tasker, workflow, *args, **kwargs):\n super().__init__(tasker, workflow, *args, **kwargs)\n self._builder_ca_bundle = None\n self._ca_bundle_pem = None\n self._dockerfile = None\n\n def _inject_into_repo_files(self):\n \"\"\"Inject repo files into a relative directory inside the build context\"\"\"\n host_repos_path = os.path.join(self.workflow.builder.df_dir, RELATIVE_REPOS_PATH)\n self.log.info(\"creating directory for yum repos: %s\", host_repos_path)\n os.mkdir(host_repos_path)\n\n for repo_filename, repo_content in self.workflow.files.items():\n # Update every repo accordingly in a repofile\n # input_buf ---- updated ----> updated_buf\n with StringIO(repo_content) as input_buf, StringIO() as updated_buf:\n for line in input_buf:\n updated_buf.write(line)\n # Apply sslcacert to every repo in a repofile\n if line.lstrip().startswith('[') and self._builder_ca_bundle:\n updated_buf.write(f'sslcacert=/tmp/{self._ca_bundle_pem}\\n')\n\n yum_repo = YumRepo(repourl=repo_filename,\n content=updated_buf.getvalue(),\n dst_repos_dir=host_repos_path,\n add_hash=False)\n yum_repo.write_content()\n\n def _inject_into_dockerfile(self):\n self._dockerfile.add_lines(\n \"ADD %s* %s\" % (RELATIVE_REPOS_PATH, YUM_REPOS_DIR),\n all_stages=True, at_start=True, skip_scratch=True\n )\n\n if self._builder_ca_bundle:\n shutil.copyfile(\n self._builder_ca_bundle,\n os.path.join(self.workflow.builder.df_dir, self._ca_bundle_pem)\n )\n self._dockerfile.add_lines(\n f'ADD {self._ca_bundle_pem} /tmp/{self._ca_bundle_pem}',\n all_stages=True, at_start=True, skip_scratch=True\n )\n\n if not self.workflow.builder.dockerfile_images.base_from_scratch:\n self._dockerfile.add_lines(*self._cleanup_lines())\n\n def run(self):\n \"\"\"\n run the plugin\n \"\"\"\n yum_repos = {k: v for k, v in self.workflow.files.items() if k.startswith(YUM_REPOS_DIR)}\n if not yum_repos:\n return\n\n self._dockerfile = df_parser(self.workflow.builder.df_path, workflow=self.workflow)\n if self._dockerfile.baseimage is None:\n raise RuntimeError(\"No FROM line in Dockerfile\")\n\n self._builder_ca_bundle = get_builder_ca_bundle(self.workflow, None)\n if self._builder_ca_bundle:\n self._ca_bundle_pem = os.path.basename(self._builder_ca_bundle)\n\n self._inject_into_repo_files()\n self._inject_into_dockerfile()\n\n for repo in self.workflow.files:\n self.log.info(\"injected yum repo: %s\", repo)\n","sub_path":"atomic_reactor/plugins/pre_inject_yum_repo.py","file_name":"pre_inject_yum_repo.py","file_ext":"py","file_size_in_byte":4915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"511023503","text":"from math import sqrt\nfrom random import randrange\nfrom collections import defaultdict\nfrom statistics import mean\n\n\ndef sqr(a):\n return a*a\n\n\ndef distance(a, b):\n if len(a) != len(b):\n raise ValueError('Vectors have different dimensions')\n return sqrt(sum(map(sqr, (x-y for x, y in zip(a, b)))))\n\n\ndef allocate_clusters(vectors, centroids):\n clusters = defaultdict(list)\n for vector_index, vector in enumerate(vectors):\n centroid = min(centroids, key=lambda x: distance(vector, x))\n clusters[centroid].append(vector_index)\n return clusters\n\n\ndef average_centroids_distance(clusters: dict):\n return mean([distance(x, y) for x in clusters.keys() for y in clusters.keys()])\n\n\ndef get_new_centroid_index(vectors, clusters):\n result = None\n maxes = []\n for centroid in clusters.keys():\n maxes.append(max(((i, distance(centroid, vectors[i])) for i in clusters[centroid]), key=(lambda x: x[1])))\n true_max = max(maxes, key=(lambda x: x[1]))\n if true_max[1] > (average_centroids_distance(clusters) / 2):\n result = true_max[0]\n return result\n\n\ndef maximin(vectors):\n centroids = list()\n clusters = None\n new_centroid_index = randrange(len(vectors))\n while not (new_centroid_index is None):\n centroids.append(vectors[new_centroid_index])\n clusters = allocate_clusters(vectors, centroids)\n new_centroid_index = get_new_centroid_index(vectors, clusters)\n return clusters\n","sub_path":"maximin.py","file_name":"maximin.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"390480264","text":"from typing import MutableSequence\n\n# 양방향 버블 정렬\ndef shaker_sort(a: MutableSequence) -> None:\n\n left = 0\n right = len(a) - 1\n last = right\n while left < right:\n for j in range(right, left, -1):\n if a[j - 1] > a[j]:\n a[j - 1], a[j] = a[j], a[j - 1]\n last = j\n left = last\n\n for j in range(left, right):\n if a[j] > a[j + 1]:\n a[j], a[j + 1] = a[j + 1], a[j]\n last = j\n\n right = last","sub_path":"algorithm/sorting/shaker_sort.py","file_name":"shaker_sort.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"362453192","text":"# -*- coding: utf-8 -*-\nimport os\nimport re\nimport time\nimport redis\nimport socket\nimport pymongo\nimport datetime\nimport csv\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n\nMONGOD_HOST = '219.224.135.47'\nMONGOD_PORT = 27019\n\ndef default_mongo(host=MONGOD_HOST, port=MONGOD_PORT, usedb='54api_weibo_v2'):\n connection = pymongo.MongoClient(host=host, port=port, j=True, w=1)\n db = connection.admin\n db = getattr(connection, usedb)\n return db\n\ndef read():\n idlist = []\n path = '/home/jiangln/all_uidlist.txt'\n uid_file = open(path,'r')\n for line in uid_file:\n idlist.append(line)\n return idlist\n\ndef out():\n uidlist = read()\n #items = []\n db = default_mongo()\n path = '/home/jiangln/weibo_user_no.csv'\n csvfile = open(path,'wb')\n writer = csv.writer(csvfile)\n for uid in uidlist:\n try:\n query = {\"_id\":int(uid)}\n if db.master_timeline_user.find(query).count()>0:\n pass\n else:\n writer.writerow([uid])\n except ValueError:\n writer.writerow([uid])\n csvfile.close()\n\nif __name__ == '__main__':\n out()\n","sub_path":"user_mingo_improve.py","file_name":"user_mingo_improve.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"333339242","text":"from admin_interface.forms import CourseForm\nfrom admin_interface.forms import DeadlineForm\nfrom admin_interface.forms import FeedbackForm\nfrom admin_interface.forms import QuestionForm\nfrom admin_interface.forms import UserForm\nfrom admin_interface.models import Course\nfrom admin_interface.models import Deadline\nfrom admin_interface.models import Feedback\nfrom admin_interface.models import Instructor\nfrom admin_interface.models import Question\nfrom admin_interface.models import Student\nfrom admin_interface.models import Objectiveanswer\n\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import get_object_or_404\nfrom django.shortcuts import redirect\nfrom django.shortcuts import render\n\nfrom django.conf import settings\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import login\nfrom django.contrib.auth import logout\nfrom django.forms.formsets import formset_factory\nfrom django.http import HttpResponse\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.core import serializers\n\n\n@csrf_exempt\ndef student_login(request):\n\tvalid = False\n\terror = \"\"\n\tname = \"\"\n\tif request.method == 'POST':\n\t\trollno = request.POST['rollno']\n\t\tpassword = request.POST['password']\n\t\tif Student.objects.filter(pk=rollno).count() > 0:\n\t\t\tstudent = Student.objects.get(pk=rollno)\n\t\t\tif student.password == password:\n\t\t\t\tvalid = True\n\t\t\t\tname = student.name\n\t\t\telse:\n\t\t\t\terror = \"Invalid credentials\"\n\t\telse:\n\t\t\terror = \"Account does not exist\"\n\telse:\n\t\terror = \"Invalid request\"\n\n\treturn JsonResponse(\n\t\t{\n\t\t\t'valid': valid,\n\t\t\t'name': name,\n\t\t\t'error': error,\n\t\t})\n\n@csrf_exempt\ndef student_deadlines(request):\n\tdeadlines = []\n\terror = \"\"\n\tif request.method == 'POST':\n\n\t\tif 'rollno' in request.POST:\n\t\t\trollno = request.POST['rollno']\n\t\t\tdateExists = False\n\n\t\t\tif 'date' in request.POST:\n\t\t\t\tdate = request.POST['date']\n\t\t\t\tdateExists = True\n\n\t\t\tcourses = Student.objects.get(pk=rollno).course_set.all()\n\n\t\t\tfor course in courses:\n\t\t\t\tif dateExists:\n\t\t\t\t\tobjs = course.deadline_set.filter(submission_date=date)\n\t\t\t\telse:\n\t\t\t\t\tobjs = course.deadline_set.all()\n\n\t\t\t\tfor obj in objs:\n\t\t\t\t\tdeadline = {\n\t\t\t\t\t\t'course': obj.course.code,\n\t\t\t\t\t\t'assignment': obj.assignment,\n\t\t\t\t\t\t'is_feedback': obj.is_feedback,\n\t\t\t\t\t\t'submission_date': obj.submission_date,\n\t\t\t\t\t\t'submission_time': obj.submission_time,\n\t\t\t\t\t\t'feedback_id': None,\n\t\t\t\t\t}\n\t\t\t\t\tif hasattr(obj, 'feedback'):\n\t\t\t\t\t\tdeadline['feedback_id'] = obj.feedback.id\n\n\t\t\t\t\tdeadlines.append(deadline)\n\t\telse:\n\t\t\terror = \"Invalid request\"\n\treturn JsonResponse({\n\t\t\t'deadlines': deadlines,\n\t\t})\n\n@csrf_exempt\ndef student_courses(request):\n\tcourses = []\n\terror = \"\"\n\tif request.method == 'POST':\n\t\tif 'rollno' in request.POST:\n\t\t\trollno = request.POST['rollno']\n\t\t\tif Student.objects.filter(pk=rollno).count() > 0:\n\t\t\t\tobjs = Student.objects.get(pk=rollno).course_set.all()\n\t\t\t\tfor course in objs:\n\t\t\t\t\tcourses.append({\n\t\t\t\t\t\t\t'code': course.code,\n\t\t\t\t\t\t\t'name': course.name,\n\t\t\t\t\t\t})\n\t\t\telse:\n\t\t\t\terror = \"Student does not exists\"\n\t\telse:\n\t\t\terror = \"Invalid request\"\n\n\treturn JsonResponse({\n\t\t\t'courses': courses,\n\t\t\t'error': error,\n\t\t})\n\n@csrf_exempt\ndef student_feedback(request):\n\tresponse = {}\n\tif request.method == 'POST':\n\t\tfeedback_id = int(request.POST['id'])\n\t\tif Feedback.objects.filter(pk=feedback_id).count() > 0:\n\t\t\tfeedback = Feedback.objects.get(pk=feedback_id)\n\t\t\tresponse['course'] = feedback.course.code\n\t\t\tresponse['title'] = feedback.title;\n\t\t\tresponse['description'] = feedback.description\n\t\t\tquestions = []\n\t\t\tfor q in feedback.questions.all():\n\t\t\t\tquestion = {\n\t\t\t\t\t'question': q.question,\n\t\t\t\t\t'question_id': q.id,\n\t\t\t\t\t'a': q.a,\n\t\t\t\t\t'b': q.b,\n\t\t\t\t\t'c': q.c,\n\t\t\t\t\t'd': q.d,\n\t\t\t\t\t'e': q.e,\n\t\t\t\t}\n\t\t\t\tquestions.append(question)\n\t\t\tresponse['questions'] = questions\n\treturn JsonResponse(response)\n\n@csrf_exempt\ndef student_feedback_submit(request):\n\tresponse = \"\"\n\tif request.method == 'POST':\n\t\tquestion_id = int(request.POST['id'])\n\t\tanswer = int(request.POST['option'])\n\t\tif answer != -1:\n\t\t\tif Question.objects.filter(pk=question_id).count() > 0:\n\t\t\t\tans = Question.objects.get(pk=question_id).objectiveanswer\n\t\t\t\tif answer == 1:\n\t\t\t\t\tans.count_a += 1\n\t\t\t\telif answer == 2:\n\t\t\t\t\tans.count_b += 1\n\t\t\t\telif answer == 3:\n\t\t\t\t\tans.count_c += 1\n\t\t\t\telif answer == 4:\n\t\t\t\t\tans.count_d += 1\n\t\t\t\telif answer == 5:\n\t\t\t\t\tans.count_e += 1\n\t\t\t\tans.save()\n\t\t\t\tresponse = \"Marked \" + str(answer) + \" for question \" + str(question_id)\n\t\t\telse:\n\t\t\t\tresponse = \"Question does not exists\"\n\t\telse:\n\t\t\tresponse = \"Answer not marked for question \" + str(question_id)\n\treturn JsonResponse({\n\t\t\t'response': response, \n\t\t})\n\n\n\ndef index(request):\n\n\tauthenticated = False\n\n\tif request.user.is_authenticated:\n\t\tauthenticated = True\n\n\tcontext = {\n\t\t'authenticated': authenticated,\n\t}\n\n\treturn render(request, 'index.html', context)\n\n\ndef google_login(request):\n\n\tlogged_in = False\n\n\tif request.method == 'POST' and 'ID' in request.POST:\n\t\temail = request.POST['email']\n\t\tname = request.POST['name'].split(' ')[0]\n\t\tid = request.POST['ID']\n\n\t\tif Instructor.objects.filter(email=email).count() > 0:\n\t\t\tinstructor = Instructor.objects.get(email=email)\n\t\t\tif instructor.google_login == True:\n\t\t\t\t# google ID is the password for google_login user\n\t\t\t\tuser = authenticate(username=email, password=id)\n\t\t\t\tif user:\n\t\t\t\t\tif user.is_active:\n\t\t\t\t\t\tlogin(request, user)\n\t\t\t\t\t\tlogged_in = True\n\t\t\t\t\telse:\n\t\t\t\t\t\terror = \"Your account is disabled\"\n\t\t\t\telse:\n\t\t\t\t\terror = \"Invalid credentials\"\n\t\t\n\t\telse:\n\t\t\tprint('new google user')\n\n\t\t\tuser = User()\n\t\t\tuser.username = email\n\t\t\tuser.email = email\n\t\t\tuser.set_password(id)\n\t\t\tuser.save()\n\n\t\t\tnew_instructor = Instructor(\n\t\t\t\tuser=user,\n\t\t\t\temail=email,\n\t\t\t\tgoogle_login=True)\n\t\t\tnew_instructor.save()\n\t\t\t# google ID is the password for google_login user\n\t\t\tuser = authenticate(username=email, password=id)\n\t\t\tif user:\n\t\t\t\tif user.is_active:\n\t\t\t\t\tlogin(request, user)\n\t\t\t\t\tlogged_in = True\n\t\t\t\telse:\n\t\t\t\t\terror = \"Your account is disabled\"\n\t\t\telse:\n\t\t\t\terror = \"Invalid credentials\"\n\n\tprint(\"here \" + str(logged_in))\n\treturn render(request, 'google-login.html', {'logged_in': logged_in})\n\ndef user_login(request):\n\n\terror = \"\"\n\n\tif request.method == 'POST':\n\n\t\tusername = request.POST.get('username')\n\t\tpassword = request.POST.get('password')\n\n\t\tuser = authenticate(username=username, password=password)\n\t\tif user:\n\n\t\t\tif (Instructor.objects.get(email=user.email)).google_login:\n\t\t\t\tprint(Instructor.objects.get(email=user.email).google_login)\n\t\t\t\terror = \"This account already has a social login\"\n\t\t\t\tcontext = {'error_msg': error,}\n\t\t\t\treturn render(request, 'login.html', context)\n\n\t\t\telif user.is_active:\n\n\t\t\t\tlogin(request, user)\n\t\t\t\treturn redirect('home')\n\n\t\t\telse:\n\t\t\t\terror = \"Your account is disabled\"\n\n\t\telse:\n\t\t\terror = \"Invalid credentials\"\n\n\telse:\n\t\terror = \"\"\n\n\tcontext = {\n\t\t'error_msg': error,\n\t}\n\n\treturn render(request, 'login.html', context)\n\n\ndef user_logout(request):\n\tlogout(request)\n\treturn redirect('index')\n\n\ndef register(request):\n\n\tregistered = False\n\terror_passwd = \"\"\n\terror_email = \"\"\n\n\tif request.method == 'POST':\n\n\t\tuser_form = UserForm(data=request.POST)\n\n\t\tif (not user_form.is_valid()):\n\t\t\tprint(user_form.errors)\n\n\t\tif (user_form.is_valid() and user_form.data['password'] == request.POST.get('confirm_password')):\n\n\t\t\temail = user_form.cleaned_data.get('username') \n\n\t\t\tuser = user_form.save()\n\t\t\tuser.email = email\n\t\t\tuser.set_password(user.password)\n\t\t\tuser.save()\n\n\t\t\tprofile = Instructor()\n\t\t\tprofile.user = user\n\t\t\tprofile.email = user.email\n\t\t\tprofile.save()\n\n\t\t\tregistered = True\n\n\t\t\n\t\telif (user_form.data['password'] != request.POST.get('confirm_password')):\n\t\t\terror_passwd = \"passwords do not match\"\n\n\n\telse:\n\t\tuser_form = UserForm()\n\n\tcontext = {\n\t\t'user_form': user_form,\n\t\t'registered': registered,\n\t\t'error_passwd': error_passwd,\n\t\t'error_email': error_email,\n\t}\n\n\treturn render(request, 'register.html', context)\n\n\ndef home(request):\n\n\tif request.user.is_authenticated:\n\n\t\tif Instructor.objects.filter(user=request.user).count() > 0:\n\t\t\tinstrctor = Instructor.objects.get(user=request.user)\n\t\t\tis_special = instrctor.special_admin\n\t\t\tif instrctor.user.first_name == '':\n\t\t\t\tname = instrctor.email\n\t\t\telse:\n\t\t\t\tname = instrctor.user.first_name + \" \" + instrctor.user.last_name\n\t\t\tcontext = {\n\t\t\t\t'is_special': is_special,\n\t\t\t\t'name': name,\n\t\t\t}\n\n\t\t\treturn render(request, 'home.html', context)\n\n\treturn render(request, 'permission-denied.html', {})\n\n\n'''\n\tview functions for special admin\n'''\n# creates the default midsem and endsem feedbacks\ndef createFeedbacks(course_code, midsem_date, midsem_time, endsem_date, endsem_time):\n\tcourse = Course.objects.get(pk=course_code)\n\n\t# midsem deadline\n\tmidsem_deadline = Deadline(\n\t\tcourse=course,\n\t\tassignment='Mid-semester Feedback',\n\t\tsubmission_date=midsem_date,\n\t\tsubmission_time=midsem_time,\n\t\tis_feedback=True)\n\tmidsem_deadline.save()\n\n\t# midsem deadline\n\tendsem_deadline = Deadline(\n\t\tcourse=course,\n\t\tassignment='End-semester Feedback',\n\t\tsubmission_date=endsem_date,\n\t\tsubmission_time=endsem_time,\n\t\tis_feedback=True)\n\tendsem_deadline.save()\n\n\t# Midsem feedback\n\tquestion1 = Question(\n\t\tquestion=\"This course as a whole so far has been\",\n\t\ta='Poor',\n\t\tb='Fair',\n\t\tc='Good',\n\t\td='Very Good',\n\t\te='Excellent')\n\tquestion1.save()\n\tanswer = Objectiveanswer(question=question1)\n\tanswer.save()\n\n\tquestion2 = Question(\n\t\tquestion=\"Feedback on course content\",\n\t\ta='Difficult',\n\t\tb='Well-designed',\n\t\tc='Poorly graded',\n\t\td='Learning experience',\n\t\te='Just right')\n\tquestion2.save()\n\tanswer = Objectiveanswer(question=question2)\n\tanswer.save()\n\n\tmidsem_feedback = Feedback(\n\t\tcourse=course,\n\t\ttitle=\"Mid-semester Feedback\",\n\t\tdescription=\"\",\n\t\tdeadline=midsem_deadline)\n\tmidsem_feedback.save()\n\tmidsem_feedback.questions.add(question1)\n\tmidsem_feedback.questions.add(question2)\n\n\t# endsem feedback\n\tquestion3 = Question(\n\t\tquestion=\"This course as a whole so far has been\",\n\t\ta='Poor',\n\t\tb='Fair',\n\t\tc='Good',\n\t\td='Very Good',\n\t\te='Excellent')\n\tquestion3.save()\n\tanswer = Objectiveanswer(question=question3)\n\tanswer.save()\n\n\tquestion4 = Question(\n\t\tquestion=\"Feedback on course content\",\n\t\ta='Difficult',\n\t\tb='Well-designed',\n\t\tc='Poorly graded',\n\t\td='Learning experience',\n\t\te='Just right')\n\tquestion4.save()\n\tanswer = Objectiveanswer(question=question4)\n\tanswer.save()\n\n\tendsem_feedback = Feedback(\n\t\tcourse=course,\n\t\ttitle=\"End-semester Feedback\",\n\t\tdescription=\"\",\n\t\tdeadline=endsem_deadline)\n\tendsem_feedback.save()\n\tendsem_feedback.questions.add(question1)\n\tendsem_feedback.questions.add(question2)\n\n\ndef add_course(request):\n\n\t# only special admin can access this feature (adding course)\n\tif request.user.is_authenticated and Instructor.objects.get(user=request.user).special_admin:\n\n\t\tif request.method == 'POST':\n\n\t\t\tcourse_form = CourseForm(data=request.POST)\n\n\t\t\tif course_form.is_valid():\n\n\t\t\t\tcourse_form.save(commit=False)\n\n\t\t\t\tcourse = Course()\n\t\t\t\tcourse.name = course_form.cleaned_data.get('name')\n\t\t\t\tcourse.code = course_form.cleaned_data.get('code')\n\t\t\t\tcourse.save()\n\n\t\t\t\tmidsem_date = course_form.cleaned_data.get('midsem_date')\n\t\t\t\tendsem_date = course_form.cleaned_data.get('endsem_date')\n\t\t\t\tmidsem_time = course_form.cleaned_data.get('midsem_time')\n\t\t\t\tendsem_time = course_form.cleaned_data.get('endsem_time')\n\n\n\t\t\t\tcreateFeedbacks(course.code, midsem_date, midsem_time, endsem_date, endsem_time)\n\n\t\t\t\treturn redirect('home')\n\n\t\t\telse:\n\t\t\t\tprint(\"invalid\")\n\t\t\t\tcontext = {\n\t\t\t\t\t'form': course_form,\n\t\t\t\t}\n\t\t\t\treturn render(request, 'addcourse.html', context)\n\n\t\telse:\n\n\t\t\tif Instructor.objects.filter(user=request.user).count() > 0:\n\t\t\t\t\n\t\t\t\tinstrctor = Instructor.objects.get(user=request.user)\n\t\t\t\tif (instrctor.special_admin):\n\n\t\t\t\t\tform = CourseForm()\n\t\t\t\t\tcontext = {\n\t\t\t\t\t\t'form': form,\n\t\t\t\t\t}\n\t\t\t\t\treturn render(request, 'addcourse.html', context)\n\n\telse:\n\t\t\n\t\treturn render(request, 'permission-denied.html', {})\n\n\ndef view_courses(request):\n\n\tif request.user.is_authenticated and Instructor.objects.get(user=request.user).special_admin:\n\n\t\t# true when 'remove' button is clicked\n\t\tif request.method == 'POST':\n\t\t\tcourse_code = request.POST['course_code']\n\t\t\tCourse.objects.filter(pk=course_code).delete()\n\t\t\n\t\tcourses = Course.objects.all()\n\n\t\tcontext = {\n\t\t\t'courses': courses,\n\t\t}\n\n\t\treturn render(request, 'viewcourses.html', context)\n\t\t\t\t\n\telse:\n\n\t\treturn render(request, 'permission-denied.html', {})\n\n\ndef course_detail(request, course_code):\n\n\tif request.user.is_authenticated and Instructor.objects.get(user=request.user).special_admin:\n\t\tcourse = get_object_or_404(Course, pk=course_code)\n\n\t\tstudents = course.students.all()\n\t\tcourse_name = course.name\n\t\t# print(students)\n\n\t\tcontext = {\n\t\t\t'course_name': course_name,\n\t\t\t'students': students,\n\t\t\t'course_code': course_code,\n\t\t}\n\n\t\treturn render(request, 'course-detail.html', context)\n\n\telse:\n\t\treturn render(request, 'permission-denied.html', {})\t\n\t\n\ndef enroll(request):\n\n\tif request.user.is_authenticated and Instructor.objects.get(user=request.user).special_admin:\n\n\t\tif request.method == 'POST':\n\n\t\t\tif 'enroll' in request.POST:\n\t\t\t\tcourse_code = request.POST['code']\n\t\t\t\tcourse = Course.objects.get(pk=course_code)\n\t\t\t\t# key == rollno\n\t\t\t\tfor key in request.POST:\n\t\t\t\t\tif (request.POST[key] == 'on'):\n\t\t\t\t\t\tprint(key)\n\t\t\t\t\t\tstudent = Student.objects.get(pk=key)\n\t\t\t\t\t\tcourse.students.add(student)\n\n\t\t\telif 'dismiss' in request.POST:\n\t\t\t\tcourse_code = request.POST['course_code']\n\t\t\t\tstudent = request.POST['student']\n\t\t\t\t# return HttpResponse(str(Student.objects.get(pk=student)))\n\t\t\t\tStudent.objects.get(pk=student).course_set.remove(course_code)\n\t\t\t\treturn redirect('viewcourses')\n\n\t\t\telse:\n\t\t\t\tcourse_code = request.POST['course_code']\n\n\t\t\t\tstudents_list = []\n\n\t\t\t\tstudents = Student.objects.all()\n\t\t\t\tfor student in students:\n\t\t\t\t\tif student.course_set.filter(code=course_code).count() == 0:\n\t\t\t\t\t\tstudents_list.append(student)\n\n\t\t\t\tcontext = {\n\t\t\t\t\t'students': students_list,\n\t\t\t\t\t'course_code': course_code,\n\t\t\t\t}\n\n\t\t\t\treturn render(request, 'enroll.html', context)\n\n\t\treturn redirect('viewcourses')\n\n\telse :\n\t\treturn render(request, 'permission-denied.html', {})\n\n\n'''\n\tview functions for any instructor\n'''\ndef addfeedback(request):\n\n\tif request.user.is_authenticated:\n\t\t\n\t\tcourses = Course.objects.all()\n\t\tcontext = {\n\t\t\t'courses': courses,\n\t\t}\n\t\treturn render(request, 'addfeedback.html', context)\n\n\telse:\n\t\treturn redirect('login')\n\n\ndef newfeedback(request, course_code):\n\n\tif request.user.is_authenticated:\n\n\t\tQFormSet = formset_factory(QuestionForm)\n\t\terrors = \"\"\n\n\t\tif request.method == 'POST':\n\t\t\t\n\t\t\tfeedback_form = FeedbackForm(request.POST)\n\t\t\tquestion_formset = QFormSet(request.POST)\n\n\t\t\tif feedback_form.is_valid() and question_formset.is_valid():\n\n\t\t\t\tcourse = Course.objects.get(pk=course_code)\n\n\t\t\t\ttitle = feedback_form.cleaned_data.get('title')\n\t\t\t\tdescription = feedback_form.cleaned_data.get('description')\n\t\t\t\tsubmission_date = feedback_form.cleaned_data.get('submission_date')\n\t\t\t\tsubmission_time = feedback_form.cleaned_data.get('submission_time')\n\n\t\t\t\tnew_deadline = Deadline(\n\t\t\t\t\tcourse=course,\n\t\t\t\t\tassignment=title,\n\t\t\t\t\tsubmission_date=submission_date,\n\t\t\t\t\tsubmission_time=submission_time,\n\t\t\t\t\tis_feedback=True)\n\t\t\t\tnew_deadline.save()\n\n\t\t\t\tnew_feedback = Feedback(\n\t\t\t\t\t\t\ttitle=title,\n\t\t\t\t\t\t\tdescription=description,\n\t\t\t\t\t\t\tcourse=course,\n\t\t\t\t\t\t\tdeadline=new_deadline)\n\t\t\t\tnew_feedback.save()\n\n\t\t\t\tfor qform in question_formset:\n\t\t\t\t\tquestion = qform.cleaned_data.get('question')\n\t\t\t\t\ta = qform.cleaned_data.get('a')\n\t\t\t\t\tb = qform.cleaned_data.get('b')\n\t\t\t\t\tc = qform.cleaned_data.get('c')\n\t\t\t\t\td = qform.cleaned_data.get('d')\n\t\t\t\t\te = qform.cleaned_data.get('e')\n\n\t\t\t\t\tnew_question = Question(question=question, a=a, b=b, c=c, d=d, e=e)\n\t\t\t\t\tnew_question.save()\n\t\t\t\t\tanswer = Objectiveanswer(question=new_question)\n\t\t\t\t\tanswer.save()\n\t\t\t\t\tnew_feedback.questions.add(new_question)\n\n\t\t\telse:\n\t\t\t\tprint(\"validation error - feedback form\")\n\t\t\t\tcontext = {\n\t\t\t\t\t'errors': errors,\n\t\t\t\t\t'form': feedback_form,\n\t\t\t\t\t'qformset': question_formset,\n\t\t\t\t\t'course_code': course_code,\n\t\t\t\t}\n\t\t\t\treturn render(request, 'feedback-form.html', context)\n\n\n\t\telse:\n\t\t\tform = FeedbackForm()\n\t\t\tqformset = QFormSet()\n\t\t\tcontext = {\n\t\t\t\t'form': form,\n\t\t\t\t'qformset': qformset,\n\t\t\t\t'course_code': course_code,\n\t\t\t}\n\t\t\treturn render(request, 'feedback-form.html', context)\n\n\t\treturn redirect('home')\n\telse:\n\t\treturn redirect('login')\n\n\n\ndef viewfeedback(request):\n\n\tif request.user.is_authenticated:\n\n\t\tcourses = Course.objects.all()\n\t\tcontext = {\n\t\t\t'courses': courses,\n\t\t}\n\t\treturn render(request, 'viewfeedback.html', context)\n\n\telse:\n\t\treturn redirect('login')\n\n\ndef coursefeedbacks(request, course_code):\n\n\tif request.user.is_authenticated:\n\n\t\tfeedbacks = Course.objects.get(pk=course_code).feedback_set.all()\n\n\t\tcontext = {\n\t\t\t'feedbacks': feedbacks,\n\t\t}\n\t\treturn render(request, 'course-feedbacks.html', context)\n\n\telse:\n\t\treturn redirect('login')\n\n\ndef feedback_details(request, feedback_id):\n\n\tif request.user.is_authenticated:\n\n\t\tfeedback = Feedback.objects.get(pk=feedback_id)\n\t\tquestions = feedback.questions\n\t\tanswers = []\n\t\t# for q in questions:\n\t\t# \tif (hasattr(q, 'objectiveanswer'))\n\n\t\tcontext = {\n\t\t\t'feedback': feedback,\n\t\t}\n\t\treturn render(request, 'feedback-detail.html', context)\n\n\telse:\n\t\treturn redirect('login')\n\t\n\ndef add_deadline(request):\n\n\tif request.user.is_authenticated:\n\n\t\tcourses = Course.objects.all()\n\t\tcontext = {\n\t\t\t'courses': courses,\n\t\t}\n\t\treturn render(request, 'add-deadline.html', context)\n\n\treturn redirect('login')\n\n\ndef newdeadline(request, course_code):\n\n\tif request.user.is_authenticated:\n\n\t\tdeadline_form = DeadlineForm()\n\n\t\tif request.method == 'POST':\n\n\t\t\tdeadline_form = DeadlineForm(request.POST)\n\n\t\t\tif deadline_form.is_valid():\n\t\t\t\tassignment = deadline_form.cleaned_data.get('assignment')\n\t\t\t\tsubmission_date = deadline_form.cleaned_data.get('submission_date')\n\t\t\t\tsubmission_time = deadline_form.cleaned_data.get('submission_time')\n\t\t\t\t\n\t\t\t\tnew_deadline = Deadline(\n\t\t\t\t\tcourse=Course.objects.get(pk=course_code),\n\t\t\t\t\tassignment=assignment,\n\t\t\t\t\tsubmission_time=submission_time,\n\t\t\t\t\tsubmission_date=submission_date)\n\t\t\t\tnew_deadline.save()\n\n\t\t\t\treturn redirect('home')\n\t\t\n\t\tcontext = {\n\t\t\t'form': deadline_form,\n\t\t\t'course_code': course_code,\n\t\t}\n\t\treturn render(request, 'deadline-form.html', context)\n\n\telse:\n\t\treturn redirect('login')\n\n\ndef viewdeadlines(request):\n\n\tif request.user.is_authenticated:\n\n\t\trunning_deadlines = []\n\t\tpast_deadlines = []\n\n\t\tdeadlines = Deadline.objects.all().order_by('submission_date', 'submission_time')\n\n\t\tfor deadline in deadlines:\n\t\t\tif deadline.is_past_due():\n\t\t\t\tpast_deadlines.append(deadline)\n\t\t\telse:\n\t\t\t\trunning_deadlines.append(deadline)\n\n\t\tcontext = {\n\t\t\t'deadlines': deadlines,\n\t\t\t'past_deadlines': past_deadlines,\n\t\t\t'running_deadlines': running_deadlines,\n\t\t}\n\t\treturn render(request, 'viewdeadlines.html', context)\n\n\telse:\n\t\treturn redirect('login')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"cs251project/django/admin_interface/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":18946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"323584025","text":"import os\nimport secrets\nfrom flask import Flask, render_template, flash, redirect, url_for, request\nfrom forms import RegistrationForm, LoginForm, UpdateAccountForm, PictureForm, DeletePictureForm, CommentForm, \\\n DeleteCommentForm, UpdatePasswordForm\nfrom flask_sqlalchemy import SQLAlchemy\nfrom flask_migrate import Migrate\nfrom flask import request\nfrom flask_bcrypt import Bcrypt\nfrom datetime import datetime\nfrom flask_login import UserMixin, LoginManager\nfrom flask_login import login_user, current_user, logout_user, login_required\nfrom flask_ckeditor import CKEditor\n\napp = Flask(__name__)\n\napp.config['SQLALCHEMY_DATABASE_URI'] = \"postgresql://18_vrublevska:bM^75M#8@localhost:5432/18_vrublevska\"\ndb = SQLAlchemy(app)\nmigrate = Migrate(app, db)\nbcrypt = Bcrypt()\nlogin_manager = LoginManager(app)\nlogin_manager.init_app(app)\nlogin_manager.login_view = 'login'\nlogin_manager.login_message_category = 'info'\nckeditor = CKEditor(app)\n\napp.config['SECRET_KEY'] = 'secretkey123456'\n\n\nclass Picture(db.Model):\n __tablename__ = 'picture'\n\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(60), unique=True, nullable=False)\n link = db.Column(db.String(30), unique=True, nullable=False)\n description = db.Column(db.Text, nullable=False)\n year = db.Column(db.Integer)\n origin = db.Column(db.String(80))\n artist_id = db.Column(db.Integer, db.ForeignKey('artist.id'), nullable=False)\n comments = db.relationship('Comment', backref='picture', lazy=True)\n sizeh = db.Column(db.String(30), nullable=False, default='H')\n\n def __init__(self, name, description, year, origin, link, artist_id):\n self.name = name\n self.description = description\n self.year = year\n self.origin = origin\n self.link = link\n self.artist_id = artist_id\n\n def __repr__(self):\n return f\"Picture('{self.name}')\"\n\n\nclass Artist(db.Model):\n __tablename__ = 'artist'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(30), unique=True, nullable=False)\n pictures = db.relationship('Picture', backref='author', lazy=True)\n\n def __repr__(self):\n return f\"Artist('{self.name}')\"\n\n\nclass Comment(db.Model):\n __tablename__ = 'comment'\n id = db.Column(db.Integer, primary_key=True)\n content = db.Column(db.Text, nullable=False)\n date_posted = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)\n user_id = db.Column(db.Integer, db.ForeignKey('user.id'), nullable=False)\n picture_id = db.Column(db.Integer, db.ForeignKey('picture.id'), nullable=False)\n\n # parent = relationship(Picture, cascade=\"all,delete\", backref=\"children\") , ondelete='CASCADE' passive_deletes=True,\n\n def __init__(self, content, user_id, picture_id):\n self.content = content\n self.user_id = user_id\n self.picture_id = picture_id\n\n\nclass User(db.Model, UserMixin):\n __tablename__ = 'user'\n id = db.Column(db.Integer, primary_key=True)\n email = db.Column(db.String(40), unique=True, nullable=False)\n username = db.Column(db.String(20), unique=True, nullable=False)\n # avatar = db.Column(db.String(20), nullable=False, default='one.jpg')\n role = db.Column(db.String(20), nullable=False, default='USER')\n password = db.Column(db.String(60), nullable=False)\n comments = db.relationship('Comment', backref='author', lazy=True)\n\n def __init__(self, username, email, password):\n self.username = username\n self.email = email\n self.password = password\n\n def __repr__(self):\n return f\"User('{self.username}')\"\n\n\n@app.route('/', methods=['POST', 'GET'])\ndef mainPage():\n return render_template('mainPage.html')\n\n\n@app.route('/about')\ndef about():\n return render_template('about.html')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n return User.query.get(user_id)\n\n\n@app.route('/gallery', methods=['POST', 'GET'])\ndef picturesMenu():\n if request.method == 'GET':\n page = request.args.get('page', 1, type=int)\n pictures = Picture.query.order_by(Picture.id.desc()).paginate(page=page, per_page=6)\n results = [\n {\n \"id\": picture.id,\n \"name\": picture.name,\n \"description\": picture.description,\n \"year\": picture.year,\n \"origin\": picture.origin,\n \"link\": picture.link,\n \"artist\": picture.author.name,\n \"sizeh\": picture.sizeh\n\n } for picture in pictures.items]\n return render_template('picturesMenu.html', results=results, pictures=pictures)\n\n\n@app.route('/gallery/')\ndef gallery_artist(artist_id):\n page = request.args.get('page', 1, type=int)\n button = artist_id\n artist = Artist.query.filter_by(id=artist_id).first_or_404()\n pictures = Picture.query.filter_by(artist_id=artist.id).order_by(Picture.id.desc()).paginate(page=page, per_page=8)\n results = [\n {\n \"id\": picture.id,\n \"name\": picture.name,\n \"description\": picture.description,\n \"year\": picture.year,\n \"origin\": picture.origin,\n \"artist\": picture.author.name,\n \"link\": picture.link,\n \"sizeh\": picture.sizeh\n } for picture in pictures.items]\n return render_template('picturesMenu.html', results=results, pictures=pictures, button=button)\n\n\n@app.route('/picture/', methods=['GET', 'PUT', 'POST', 'DELETE'])\ndef pictureOne(id):\n # Picture.query.filter(Picture.id==(id).first)\n picture = Picture.query.get_or_404(id)\n\n # artist = Artist.query.filter_by(artist_id).first_or_404()\n comments = Comment.query.all()\n form = CommentForm()\n if form.validate_on_submit():\n comment = Comment(content=form.content.data, user_id=current_user.id, picture_id=id)\n db.session.add(comment)\n db.session.commit()\n flash('Comment has been added!', 'success')\n return redirect(url_for('pictureOne', id=picture.id))\n if request.method == 'GET':\n response = {\n \"id\": picture.id,\n \"name\": picture.name,\n \"description\": picture.description,\n \"year\": picture.year,\n \"origin\": picture.origin,\n \"link\": picture.link,\n \"sizeh\": picture.sizeh\n }\n return render_template('pictureOne.html', title=picture.name, picture=picture, form=form, comments=comments, response=response)\n elif request.method == 'PUT':\n data = request.get_json()\n picture.name = data['name']\n picture.description = data['description']\n picture.year = data['year']\n picture.name = data['name']\n picture.link = data['link']\n picture.origin = data['origin']\n db.session.add(picture)\n db.session.commit()\n return {\"message\": f\"car {picture.name} successfully updated\"}\n\n return render_template('pictureOne.html', title=picture.name, picture=picture)\n\n\n@app.route('/picture//comment//delete', methods=['GET', 'PUT', 'POST', 'DELETE'])\n@login_required\ndef comment_delete(id, picture_id):\n if adminhe():\n comment = Comment.query.get_or_404(id)\n picture = Picture.query.get_or_404(picture_id)\n formone = DeleteCommentForm()\n if formone.is_submitted():\n db.session.delete(comment)\n db.session.commit()\n flash('Komentarz został usunięty', 'success')\n return redirect(url_for('pictureOne', id=picture_id))\n return render_template('commentDelete.html', comment=comment, picture=picture)\n else:\n return redirect(url_for('pictureOne', id=picture_id))\n\n\ndef adminhe():\n if current_user.role == 'ADMIN':\n return True\n elif current_user.role == 'USER':\n return False\n\n\n@app.route('/picture//update', methods=['GET', 'POST', 'DELETE'])\ndef picture_update(id):\n if adminhe():\n picture = Picture.query.get_or_404(id)\n artists = db.session.query(Artist).all()\n artist_list = [(artist.id, artist.name) for artist in artists]\n # Picture.query.filter(Picture.id==(id).first)\n\n # if current_user.role != 'ADMIN':\n # abort(403)\n form = PictureForm()\n form.artist.choices = artist_list\n if form.validate_on_submit():\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n picture.link = picture_file\n picture.name = form.name.data\n picture.description = form.description.data\n picture.year = form.year.data\n picture.origin = form.origin.data\n picture.artist_id = form.artist.data\n\n db.session.commit()\n flash('Edycja zapisana', 'success')\n return redirect(url_for('pictureOne', id=picture.id))\n elif request.method == 'GET':\n form.name.data = picture.name\n form.description.data = picture.description\n form.year.data = picture.year\n form.origin.data = picture.origin\n return render_template('pictureEdit.html', title='Update picture', form=form, legend='Update picture',\n artists=artists)\n\n return render_template('pictureEdit.html', title='Update picture', form=form, legend='Update picture',\n artists=artists)\n else:\n return redirect(url_for('pictureOne', id=id))\n\n\n@app.route('/picture//delete', methods=['GET', 'PUT', 'POST', 'DELETE'])\ndef picture_delete(id):\n if adminhe():\n picture = Picture.query.filter(Picture.id == id).first()\n form = DeletePictureForm()\n if form.is_submitted():\n if picture.comments:\n komentarze = Comment.query.filter(Comment.picture_id == id).all()\n print(komentarze)\n for a in komentarze:\n db.session.delete(a)\n db.session.delete(picture)\n db.session.commit()\n flash('Obraz został usuniety', 'success')\n return redirect(url_for('picturesMenu'))\n return render_template('pictureDelete.html', picture=picture, form=form, css='delete.css')\n else:\n return redirect(url_for('pictureOne', id=id))\n\n\ndef save_picture(form_picture):\n randomHex = secrets.token_hex(8)\n # filename itself and an extension\n _, f_ext = os.path.splitext(form_picture.filename)\n # underscore here to throw away an unused variable\n picture_filename = randomHex + f_ext\n # making a path\n picture_path = os.path.join(app.root_path, 'static/img/pictures/', picture_filename)\n\n form_picture.save(picture_path)\n return picture_filename\n\n\n@app.route('/picture/new', methods=['POST', 'GET'])\ndef newPicture():\n if adminhe():\n artists = db.session.query(Artist).all()\n artist_list = [(artist.id, artist.name) for artist in artists]\n form = PictureForm()\n form.artist.choices = artist_list\n\n if form.validate_on_submit():\n if form.picture.data:\n picture_file = save_picture(form.picture.data)\n\n picture = Picture(name=form.name.data, description=form.description.data, year=form.year.data,\n origin=form.origin.data, link=picture_file, artist_id=form.artist.data)\n # picture.artist_id = int(form.artist.data)\n db.session.add(picture)\n db.session.commit()\n flash('Obraz został dodany!', 'success')\n return redirect(url_for('picturesMenu'))\n return render_template('newPicture.html', title='new picture', form=form, legend='Dodaj obraz', artists=artists)\n else:\n return redirect(url_for('picturesMenu'))\n\n\n@app.route('/register', methods=['POST', 'GET'])\ndef register():\n if current_user.is_authenticated:\n return redirect(url_for('picturesMenu'))\n form = RegistrationForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n user = User(username=form.username.data, email=form.email.data, password=hashed_password)\n db.session.add(user)\n db.session.commit()\n flash(f'Your account has been created! Hello and feel free to log in!', 'success')\n return redirect(url_for('login'))\n return render_template('register.html', title='Register', form=form)\n\n\n@app.route('/login', methods=['POST', 'GET'])\ndef login():\n if current_user.is_authenticated:\n return redirect(url_for('picturesMenu'))\n form = LoginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data).first()\n if user and bcrypt.check_password_hash(user.password, form.password.data):\n login_user(user, remember=form.remember.data)\n next_page = request.args.get('next') # if next exists it will be the parameter\n return redirect(next_page) if next_page else redirect(url_for('mainPage'))\n # return redirect(url_for('picturesMenu'))\n else:\n flash(f'Nieudane logowanie', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route('/logout')\ndef logout():\n logout_user()\n return redirect(url_for('picturesMenu'))\n\n\n@app.route(\"/account\")\n@login_required\ndef account():\n return render_template('account.html', title='Account')\n\n\n@app.route('/account//edit', methods=['GET', 'PUT', 'POST', 'DELETE'])\n@login_required\ndef accountEdit(id):\n form = UpdateAccountForm()\n if form.validate_on_submit():\n current_user.username = form.username.data\n current_user.email = form.email.data\n db.session.commit()\n flash('Twoje dane zostały zapisane', 'success')\n return redirect(url_for('account'))\n elif request.method == 'GET':\n form.username.data = current_user.username\n form.email.data = current_user.email\n return render_template('accountEdit.html', title='Account', form=form)\n\n\n@app.route('/account//editpassword', methods=['GET', 'PUT', 'POST', 'DELETE'])\n@login_required\ndef passwordEdit(id):\n form = UpdatePasswordForm()\n if form.validate_on_submit():\n hashed_password = bcrypt.generate_password_hash(form.password.data).decode('utf-8')\n current_user.password = hashed_password\n db.session.commit()\n flash(f'Hasło zostało zmienione', 'success')\n return redirect(url_for('account'))\n return render_template('passwordEdit.html', title='Password', form=form)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":14651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"278805705","text":"from typing import Dict\n\nimport pandas as pd\n\nBAR_LAYOUT = {\n \"xaxis\": dict(\n showgrid=False,\n showline=False,\n showticklabels=False,\n zeroline=False,\n fixedrange=True,\n ),\n \"yaxis\": dict(\n showgrid=False,\n showline=False,\n zeroline=False,\n autorange=\"reversed\",\n ticks=\"outside\",\n tickcolor=\"white\",\n ticklen=1,\n fixedrange=True,\n ),\n \"plot_bgcolor\": \"#FAFAFA\",\n \"paper_bgcolor\": \"#FAFAFA\",\n \"margin\": dict(l=0, r=0, t=0, b=0),\n \"barmode\": \"group\",\n \"bargap\": 0.10,\n \"bargroupgap\": 0.0,\n \"font\": {\"size\": 12, \"color\": \"black\"},\n}\n\nCURVE_LAYOUT = {\n \"xaxis_showgrid\": False,\n \"yaxis_showgrid\": False,\n \"hovermode\": \"x unified\",\n \"plot_bgcolor\": \"#FFF\",\n \"margin\": dict(t=0, b=0, l=0, r=0),\n \"font\": {\"size\": 12, \"color\": \"black\"},\n \"legend\": dict(orientation=\"h\", yanchor=\"bottom\", y=1.02, xanchor=\"right\", x=1),\n \"hoverlabel\": {\"namelength\": -1},\n \"xaxis\": {\"fixedrange\": True},\n \"yaxis\": {\"fixedrange\": True},\n}\n\nPIE_LAYOUT = {\n \"autosize\": True,\n \"height\": 400,\n \"width\": 400,\n \"plot_bgcolor\": \"#FFF\",\n \"margin\": dict(t=1, b=1, l=0, r=0),\n \"legend\": dict(orientation=\"h\", yanchor=\"bottom\", y=1.02, xanchor=\"right\", x=1),\n \"hoverlabel\": dict(\n bgcolor=\"white\",\n bordercolor=\"white\",\n font=dict(color=\"black\", size=14, family=\"Roboto\"),\n ),\n}\n\nPIE_TRACES = {\"marker\": dict(line=dict(color=\"#000000\", width=1))}\n\nMESUSAGE_STACKED_BAR_CHART_LAYOUT = {\n \"plot_bgcolor\": \"#FFF\",\n \"margin\": dict(l=0, r=0, t=0, b=0),\n \"legend\": dict(orientation=\"h\", yanchor=\"bottom\", y=1.02, xanchor=\"right\", x=1),\n \"font\": {\"color\": \"black\"},\n \"hovermode\": \"x unified\",\n \"hoverlabel\": {\"namelength\": -1},\n \"barmode\": \"stack\",\n}\n\nSTACKED_BAR_CHART_LAYOUT = {\n \"xaxis\": dict(\n showgrid=False,\n showline=False,\n zeroline=False,\n tickformat=\"%\",\n fixedrange=True,\n ),\n \"yaxis\": dict(\n showgrid=False,\n showline=False,\n zeroline=False,\n ticks=\"outside\",\n tickcolor=\"white\",\n ticklen=1,\n visible=False,\n showticklabels=False,\n fixedrange=True,\n ),\n \"plot_bgcolor\": \"#FFF\",\n \"margin\": dict(l=0, r=0, t=0, b=0),\n \"legend\": dict(\n title=None, orientation=\"h\", yanchor=\"bottom\", y=1.02, xanchor=\"right\", x=1\n ),\n \"barmode\": \"stack\",\n \"hoverlabel\": dict(\n bgcolor=\"white\",\n bordercolor=\"white\",\n font=dict(color=\"black\", size=12, family=\"Roboto\"),\n ),\n}\n\nSTACKED_BAR_CHART_TRACES = {\"marker\": dict(line=dict(color=\"#000000\", width=1))}\n\nTREEMAP_LAYOUT = {\n \"xaxis_showgrid\": False,\n \"yaxis_showgrid\": False,\n \"hovermode\": \"x unified\",\n \"paper_bgcolor\": \"#FFF\",\n \"margin\": dict(t=0, b=0, l=0, r=0),\n \"font\": {\"size\": 12, \"color\": \"black\"},\n \"legend\": dict(orientation=\"h\", yanchor=\"bottom\", y=1.02, xanchor=\"right\", x=1),\n}\n\nDECLARATIONS_BY_TYPE_COLORS = {\n \"rupture\": \"#EF7D00\",\n \"risque\": \"#5E2A7E\",\n \"décret stock - sans risque\": \"#00B3CC\",\n}\n\nRUPTURES_BAR_LAYOUT = {\n \"xaxis\": dict(\n showgrid=False,\n showline=False,\n showticklabels=False,\n zeroline=False,\n fixedrange=True,\n ),\n \"yaxis\": dict(\n showgrid=False,\n showline=False,\n showticklabels=True,\n zeroline=False,\n ticks=\"outside\",\n tickcolor=\"white\",\n ticklen=1,\n fixedrange=True,\n ),\n \"plot_bgcolor\": \"#FFF\",\n \"paper_bgcolor\": \"#FFF\",\n \"margin\": dict(l=0, r=0, t=0, b=0),\n \"barmode\": \"group\",\n \"bargap\": 0.10,\n \"bargroupgap\": 0.0,\n \"font\": {\"size\": 12, \"color\": \"black\"},\n \"hovermode\": \"x unified\",\n \"hoverlabel\": {\"namelength\": -1},\n \"legend\": dict(orientation=\"h\", yanchor=\"bottom\", y=1.02, xanchor=\"right\", x=1),\n}\n\n# TODO: check all that style with Joelle\nGLOBALDEC_BAR_LAYOUT = RUPTURES_BAR_LAYOUT.copy()\ntickpos = list(range(0, 101, 25))\nGLOBALDEC_BAR_LAYOUT[\"xaxis\"] = dict(\n range=[0, 101],\n showgrid=False,\n gridwidth=1,\n gridcolor=\"black\",\n showline=True,\n showticklabels=True,\n tickmode=\"array\",\n tickvals=tickpos,\n ticktext=[f\"{i}%\" for i in tickpos],\n zeroline=False,\n fixedrange=True,\n side=\"top\",\n mirror=\"ticks\",\n)\nGLOBALDEC_BAR_LAYOUT[\"hovermode\"] = \"y\"\n\n\ndef get_ruptures_curve_layout(tickvals: pd.Series) -> Dict:\n return {\n \"xaxis\": {\"tickmode\": \"array\", \"tickvals\": tickvals, \"fixedrange\": True},\n \"yaxis\": {\"fixedrange\": True},\n \"xaxis_showgrid\": False,\n \"yaxis_showgrid\": False,\n \"hovermode\": \"x unified\",\n \"plot_bgcolor\": \"#FFF\",\n \"paper_bgcolor\": \"#FFF\",\n \"margin\": dict(t=0, b=0, l=0, r=0),\n \"font\": {\"size\": 12, \"color\": \"black\"},\n \"legend\": dict(orientation=\"h\", yanchor=\"bottom\", y=1.02, xanchor=\"right\", x=1),\n \"hoverlabel\": {\"namelength\": -1},\n \"showlegend\": True,\n }\n","sub_path":"datamed/web/apps/constants/layouts.py","file_name":"layouts.py","file_ext":"py","file_size_in_byte":5000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"183903898","text":"import os\nimport numpy as np\n\nfrom fireworks import FireTaskBase, Firework, FWAction, explicit_serialize\n\nfrom atomate.utils.utils import get_logger, env_chk, load_class\nfrom atomate.vasp.firetasks.write_inputs import ModifyIncar\nfrom atomate.vasp.firetasks.glue_tasks import CopyVaspOutputs, get_calc_loc\nfrom atomate.vasp.firetasks.run_calc import RunVaspCustodian, RunVaspFake\n\nfrom mpmorph.runners.rescale_volume import RescaleVolume\nfrom mpmorph.analysis.md_data import MD_Data\n\n__authors__ = 'Nicholas Winner, Muratahan Aykol'\n\nlogger = get_logger(__name__)\n\n\n@explicit_serialize\nclass SpawnMDFWTask(FireTaskBase):\n \"\"\"\n The spawn task dictating volume searching. Given a VASP run, decide if the MD simulation has operated at the\n equilibrium density. i.e. see if the ensemble average pressure is approximately zero. If it is close enough to\n the equilibrium density, either complete the current sequence of calculations, or alternatively you can take\n the equilibrium density and proceed to a production run (via ProductionSpawnTask).\n\n Required parameters:\n\n pressure_threshold: (int or float) The window of acceptable pressures after which to stop.\n i.e: for pressure threshold = 5, if the pressure was -2.4, we'd consider it complete\n\n max_rescales: (int) The maximum number of times to rescale the volume before canceling the sequence\n vasp_cmd: (str) command to run vasp\n wall_time: (int) wall time, in seconds\n db_file: (str) path to file with database credentials\n spawn_count: (int) The number of spawns/rescales that have been executed prior to the current one\n\n Optional parameters:\n\n averaging_fraction: (float) given a VASP run of X steps, get the ensemble averaged pressure only for\n steps X*(1-averaging_fraction) to the end\n Default: .5\n\n production: (dict) The production run dictionary. If this quantity is not set, then the sequence of\n runs will stop when the density is found. If it is set, it dictates how the production\n run will be carried out. (see SingleMultiSpawn)\n\n p_v: (list) a list of tuples of the form [(pressure1, volume1),(pressure2, volume2),...] that is updated\n with each successive iteration of the spawn task. It is used to calcualte equilibrium volume via\n equation of state fitting once at least 3 datapoints exist.\n\n \"\"\"\n required_params = [\"pressure_threshold\", \"max_rescales\", \"vasp_cmd\", \"wall_time\",\n \"db_file\", \"spawn_count\"]\n optional_params = [\"averaging_fraction\", 'production', 'p_v']\n\n def run_task(self, fw_spec):\n vasp_cmd = self.get(\"vasp_cmd\")\n wall_time = self.get(\"wall_time\")\n db_file = self.get(\"db_file\")\n max_rescales = self.get(\"max_rescales\")\n pressure_threshold = self.get(\"pressure_threshold\")\n spawn_count = self.get(\"spawn_count\")\n production = self.get('production', {})\n p_v = self.get('p_v', [])\n\n if spawn_count > max_rescales:\n logger.info(\"WARNING: The max number of rescales has been reached... stopping density search.\")\n return FWAction(defuse_workflow=True)\n\n name = (\"spawnrun\"+str(spawn_count))\n\n current_dir = os.getcwd()\n\n averaging_fraction = self.get(\"averaging_fraction\", 0.5)\n data = MD_Data()\n data.parse_md_data(current_dir)\n pressure = data.get_md_data()['pressure']\n v = data.get_volume\n p = np.mean(pressure[int(averaging_fraction*(len(pressure)-1)):])\n p_v.append([p, v])\n\n logger.info(\"LOGGER: Current pressure is {}\".format(p))\n if np.fabs(p) > pressure_threshold:\n logger.info(\"LOGGER: Pressure is outside of threshold: Spawning another MD Task\")\n t = []\n t.append(CopyVaspOutputs(calc_dir=current_dir, contcar_to_poscar=True))\n t.append(RescaleVolumeTask(initial_pressure=p*1000.0, initial_temperature=1, p_v=p_v))\n t.append(RunVaspCustodian(vasp_cmd=vasp_cmd, gamma_vasp_cmd=\">>vasp_gam<<\",\n handler_group=\"md\", wall_time=wall_time))\n\n t.append(SpawnMDFWTask(pressure_threshold=pressure_threshold,\n max_rescales=max_rescales,\n wall_time=wall_time,\n vasp_cmd=vasp_cmd,\n db_file=db_file,\n spawn_count=spawn_count+1,\n averaging_fraction=averaging_fraction,\n production=production))\n new_fw = Firework(t, name=name, spec={'p_v': p_v})\n return FWAction(stored_data={'pressure': p}, detours=[new_fw])\n\n elif production:\n logger.info(\"LOGGER: Pressure is within the threshold: Moving to production runs...\")\n t = []\n t.append(CopyVaspOutputs(calc_dir=current_dir, contcar_to_poscar=True))\n t.append(RunVaspCustodian(vasp_cmd=vasp_cmd, gamma_vasp_cmd=\">>vasp_gam<<\",\n handler_group=\"md\", wall_time=wall_time))\n t.append(ProductionSpawnTask(vasp_cmd=vasp_cmd, wall_time=wall_time, db_file=db_file, spawn_count=1,\n production=production))\n production_fw = Firework(t, name=\"ProductionRun1\")\n return FWAction(stored_data={'pressure': p, 'density_calculated': True}, detours=[production_fw])\n\n else:\n return FWAction(stored_data={'pressure': p, 'density_calculated': True})\n\n\n@explicit_serialize\nclass ProductionSpawnTask(FireTaskBase):\n\n \"\"\"\n A task for spawning MD calculations in production runs. Only considers whether or not the number of\n production tasks is reached for the stop criteria at the moment. It also stores where all the\n checkpoints of a production run are located. This list of directories is used for assembling the\n checkpoints into a single analysis task.\n\n Required Params:\n vasp_cmd (str): command to run vasp\n wall_time (int): wall time for each checkpoint in seconds\n db_file (str): path to file with db credentials\n spawn_count (int): The number of MD checkpoints that have been spawned. Used to track when production\n is completed.\n production (int): The number of MD checkpoints in total for this production run.\n\n Optional Params:\n checkpoint_dirs (list): A list of all directories where checkpoints exist for this production\n MD run. Is listed as optional because the first spawn will not have\n any checkpoint directories\n\n\n\n \"\"\"\n\n required_params = ['vasp_cmd', 'wall_time', 'spawn_count', 'production']\n optional_params = ['checkpoint_dirs', 'db_file', 'modify_incar']\n\n def run_task(self, fw_spec):\n\n prev_checkpoint_dirs = fw_spec.get(\"checkpoint_dirs\", []) # If this is the first spawn, have no prev dirs\n prev_checkpoint_dirs.append(os.getcwd()) # add the current directory to the list of checkpoints\n\n vasp_cmd = self[\"vasp_cmd\"]\n wall_time = self[\"wall_time\"]\n db_file = self.get(\"db_file\", None)\n spawn_count = self[\"spawn_count\"]\n production = self['production']\n num_checkpoints = production.get('num_checkpoints',1)\n incar_update = production.get('incar_update', None)\n\n if spawn_count > num_checkpoints:\n logger.info(\"LOGGER: Production run completed. Took {} spawns total\".format(spawn_count))\n return FWAction(stored_data={'production_run_completed': True})\n\n else:\n name = (\"ProductionRun\" + str(abs(spawn_count)))\n\n logger.info(\"LOGGER: Starting spawn {} of production run\".format(spawn_count))\n\n t = []\n\n t.append(CopyVaspOutputs(calc_dir=os.getcwd(), contcar_to_poscar=True))\n\n if incar_update:\n t.append(ModifyIncar(incar_update=incar_update))\n\n t.append(RunVaspCustodian(vasp_cmd=vasp_cmd, gamma_vasp_cmd=\">>vasp_gam<<\",\n handler_group=\"md\", wall_time=wall_time))\n t.append(ProductionSpawnTask(wall_time=wall_time,\n vasp_cmd=vasp_cmd,\n db_file=db_file,\n spawn_count=spawn_count + 1,\n production=production))\n new_fw = Firework(t, name=name, spec={'checkpoint_dirs': prev_checkpoint_dirs})\n\n return FWAction(stored_data={'production_run_completed': False},\n update_spec={'checkpoint_dirs': prev_checkpoint_dirs}, detours=[new_fw])\n\n\n@explicit_serialize\nclass RescaleVolumeTask(FireTaskBase):\n \"\"\"\n Volume rescaling\n \"\"\"\n required_params = [\"initial_temperature\", \"initial_pressure\"]\n optional_params = [\"target_pressure\", \"target_temperature\", \"target_pressure\", \"alpha\", \"beta\", \"p_v\"]\n\n def run_task(self, fw_spec):\n # Initialize volume correction object with last structure from last_run\n initial_temperature = self[\"initial_temperature\"]\n initial_pressure = self[\"initial_pressure\"]\n target_temperature = self.get(\"target_temperature\", initial_temperature)\n target_pressure = self.get(\"target_pressure\", 0.0)\n alpha = self.get(\"alpha\", 10e-6)\n beta = self.get(\"beta\", 10e-7)\n p_v = self.get('p_v', [])\n\n corr_vol = RescaleVolume.of_poscar(poscar_path=\"./POSCAR\", initial_temperature=initial_temperature,\n initial_pressure=initial_pressure,\n target_pressure=target_pressure,\n target_temperature=target_temperature, alpha=alpha, beta=beta)\n # Rescale volume based on temperature difference first. Const T will return no volume change:\n corr_vol.by_thermo(scale='temperature')\n # TO DB (\"Rescaled volume due to delta T: \", corr_vol.structure.volume)\n # Rescale volume based on pressure difference:\n if len(p_v) > 2:\n corr_vol.by_EOS(p_v=p_v)\n else:\n corr_vol.by_thermo(scale='pressure')\n # TO DB (\"Rescaled volume due to delta P: \", corr_vol.structure.volume)\n corr_vol.poscar.write_file(\"./POSCAR\")\n # Pass the rescaled volume to Poscar\n return FWAction(stored_data=corr_vol.structure.as_dict())","sub_path":"mpmorph/workflow/firetasks/glue_tasks.py","file_name":"glue_tasks.py","file_ext":"py","file_size_in_byte":10642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64643337","text":"import numpy as np \nimport scipy.integrate as sciint\n\nfrom ..project_1.fem_2d_solver import Poisson2DSolver\n\nfrom gaussian_quad import quadrature1D, quadrature2D, \\\n gaussquad1d_points_weights, gaussquad2d_points_weights\n\n\ndef generate_triangle_jacobian(self, p1, p2, p3):\n \"\"\"\n Function to generate the Jacobian J = ∂(x,y)/∂(r, s)\\n\n for transforming from the reference triangle to global coordinates.\\n\n element: The target element (triangle) of the transformation from the reference element.\n \"\"\"\n J = np.column_stack([p2-p1, p3-p1])\n return J\n\n\ndef test_triangle_integral():\n \n # Source Function:\n def f(p):\n \"\"\"\n Source function f(r, theta) = −8π*cos(2πr²)+ 16π²r²sin(2πr²)\n p: np.array([x, y])\n \"\"\"\n r_squared = p[0]**2 + p[1]**2\n term_1 = -8.0*np.pi*np.cos(2*np.pi*r_squared)\n term_2 = 16*np.pi**2*r_squared*np.sin(2*np.pi*r_squared)\n return term_1 + term_2\n \n x0, y0 = 0.1, 0.5\n a = 0.02\n b = 0.06\n\n # Scipy-integration:\n x_start, x_end = x0, x0 + a\n y_start = lambda x: y0\n y_end = lambda x: -(b/a)*(x - x0) + (y0 + b)\n\n integrand = lambda y, x: f([x, y])\n\n sci_int = sciint.dblquad(integrand, x_start, x_end, y_start, y_end, )\n\n # Vertex integration:\n p1 = np.array([x0, y0])\n p2 = np.array([x0 + a, y0])\n p3 = np.array([x0, y0 + b])\n\n quad_int = quadrature2D(f, p1, p2, p3, Nq=4)\n print(\"Tull!\")\n # Make a test solver:\n # N = 100\n # test_solver = Poisson2DSolver(N=)\n\n\ndef test_quadrature1D(show_weights_points=False, Nq=4):\n # Testing functionality of 1D-Gaussian Quadrature:\n\n if show_weights_points: # Display Gaussian points and weights.\n for key, value in gaussquad1d_points_weights.items():\n print(f\"Gauss Points: {key}\\nPoints: {value[0]}\\nWeights: {value[1]}\\n\")\n\n func = lambda x: np.exp(x)\n a, b = 1.0, 2.0\n I_quad = quadrature1D(func, a, b, Nq)\n\n I_exact = np.exp(b) - np.exp(a)\n\n print(f\"\\nExact answer: {I_exact}\\\n \\nGauss Quad. answer: {I_quad}\\\n \\nRelative Error: {abs((I_exact - I_quad)/I_exact):.6e}\")\n\n\ndef test_vector1DGauss():\n a = np.array([2.0, 2.0])\n b = np.array([6.0, 4.0])\n\n f = lambda p: 3*p[0]**2 + 4*p[1] + 5\n\n result = quadrature1D(f, a, b, Nq=4)\n print(\"Result:\", result)\n\n\ndef test_quadrature2D(show_weights_points=False):\n if show_weights_points: # Display Gaussian points and weights.\n for key, value in gaussquad2d_points_weights.items():\n print(f\"Gauss Points: {key}\\nPoints: {value[0]}\\nWeights: {value[1]}\\n\") \n \n def problem_2_test():\n print(\"\\nTesting Problem 2. Integral of log(x+y):\")\n func = lambda p: np.log(p[0] + p[1])\n p1 = np.array([1.0, 0.0])\n p2 = np.array([3.0, 1.0])\n p3 = np.array([3.0, 2.0])\n\n I_exact = 1.165417026740377\n I_quad = quadrature2D(func, p1, p2, p3, Nq=4)\n\n # Ugly expression:\n # I_exact1 = 1.165417026740377\n # I_quad1 = quadrature2D(func, p1, p2, p3, Nq=4)\n\n print(f\"\\nExact answer: {I_exact}\\\n \\nGauss Quad. answer: {I_quad}\\\n \\nRelative Error: {abs((I_exact - I_quad)/I_exact):.6e}\")\n\n def polynomial_triangle_test():\n print(\"\\nTesting Polynomial integration: f(x,y) = 2x²-y:\")\n a = 4.0 \n b = 12.0\n\n func = lambda p: 2*p[0]**2 - p[1]\n p1 = np.array([0.0, 0.0])\n p2 = np.array([a, 0.0])\n p3 = np.array([0.0, b])\n \n I_exact = (a*b/6.0)*(a**2 - b)\n I_quad = quadrature2D(func, p1, p2, p3, Nq=3)\n\n # I_quad = sciint.dblquad(lambda y, x: func([x, y]), 0.0, a, y_0_x, y_1_x)[0]\n\n print(f\"\\nExact answer: {I_exact}\\\n \\nGauss Quad. answer: {I_quad}\\\n \\nRelative Error: {abs((I_exact - I_quad)/I_exact):.6e}\")\n\n problem_2_test() \n polynomial_triangle_test()\n\n\n\n\nif __name__ == \"__main__\":\n test_quadrature1D(show_weights_points=False, Nq=4)\n test_quadrature2D()\n # test_triangle_integral()\n test_vector1DGauss()\n \n ","sub_path":"root/tools/test_gauss_quad.py","file_name":"test_gauss_quad.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"424844014","text":"import sys\nimport logging\n\nfrom django.core.management.base import BaseCommand, CommandError\n\nfrom mail_interface.keywords import keywords\nfrom mail_interface.models import Keyword\n\nlogger = logging.getLogger(__name__)\n\nclass Command(BaseCommand):\n\n args = ' '\n help = 'Rename a keyword in the database'\n\n def handle(self, *args, **options):\n if len(args) < 2:\n raise CommandError('Two arguments expected.')\n\n old_keyword = args[0]\n new_keyword = args[1]\n\n if old_keyword in keywords.keys():\n raise CommandError('Please rename %s to %s in the keywords file first.' % (old_keyword,\n new_keyword))\n\n if not new_keyword in keywords.keys():\n raise CommandError('Cannot find %s in the keywords file' % new_keyword)\n\n if Keyword.objects.filter(name=old_keyword).count() == 0:\n raise CommandError('Cannot find keyword \\'%s\\' in the database.' % old_keyword)\n\n k = Keyword.objects.get(name=old_keyword)\n if k:\n k.name = new_keyword\n k.save()\n\n logger.info(\"Renamed '%s' to '%s'\")\n","sub_path":"mail_interface/management/commands/rename_keyword.py","file_name":"rename_keyword.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"579333570","text":"#\n# Hello World client in Python\n# Connects REQ socket to tcp://localhost:5555\n# Sends \"Hello\" to server, expects \"World\" back\n#\n\ndef connect_server(url='127.0.0.1', port=6789):\n import zmq\n\n context = zmq.Context()\n\n # Socket to talk to server\n print(\"Connecting to hello world server…\")\n sub = context.socket(zmq.SUB)\n sub.connect(f'tcp://{url}:{port}')\n sub.setsockopt(zmq.SUBSCRIBE, b'vowels')\n sub.setsockopt(zmq.SUBSCRIBE, b'five')\n while True:\n topic, word = sub.recv_multipart()\n print(topic, word)\n","sub_path":"ch11/ex05/zeromq_client_send_word.py","file_name":"zeromq_client_send_word.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"636074296","text":"n=int(input())\r\n\r\nT=sorted([int(input()) for _ in range(n)])[::-1]\r\n\r\nx=y=0\r\n\r\nfor t in T:\r\n if x 0) else \"cpu\")\n\n# Plot some training images\nreal_batch = next(iter(dataloader))\nplt.figure(figsize=(8, 8))\nplt.axis(\"off\")\nplt.title(\"Training Images\")\nplt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=2, normalize=True).cpu(), (1, 2, 0)))\n\n\n# custom weights initialization called on netG and netD\ndef weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv') != -1:\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif classname.find('BatchNorm') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0)\n\n\nclass SelfAttn(nn.Module):\n \"\"\" Self attention Layer\"\"\"\n\n def __init__(self, in_dim, activation):\n super(SelfAttn, self).__init__()\n self.chanel_in = in_dim\n self.activation = activation\n\n self.query_conv = spectral_norm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1))\n self.key_conv = spectral_norm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1))\n self.value_conv = spectral_norm(nn.Conv2d(in_channels=in_dim, out_channels=in_dim, kernel_size=1))\n self.gamma = nn.Parameter(torch.zeros(1))\n\n self.softmax = nn.Softmax(dim=-1) #\n\n def forward(self, x):\n \"\"\"\n inputs :\n x : input feature maps( B X C X W X H)\n returns :\n out : self attention value + input feature\n attention: B X N X N (N is Width*Height)\n \"\"\"\n m_batch_size, c, width, height = x.size()\n proj_query = self.query_conv(x).view(m_batch_size, -1, width * height).permute(0, 2, 1) # B X CX(N)\n proj_key = self.key_conv(x).view(m_batch_size, -1, width * height) # B X C x (*W*H)\n energy = torch.bmm(proj_query, proj_key) # transpose check\n attention = self.softmax(energy) # BX (N) X (N)\n proj_value = self.value_conv(x).view(m_batch_size, -1, width * height) # B X C X N\n\n out = torch.bmm(proj_value, attention.permute(0, 2, 1))\n out = out.view(m_batch_size, c, width, height)\n\n out = self.gamma * out + x\n return out\n\n\n# Generator Code\nclass ResBlockUp(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n self.skip_conv = spectral_norm(nn.Conv2d(self.in_channels, self.out_channels, 1, 1, 0, bias=False))\n\n self.main_conv1 = spectral_norm(nn.Conv2d(self.in_channels, self.out_channels, 3, 1, 1, bias=False))\n self.main_bn = nn.BatchNorm2d(self.out_channels)\n self.main_relu = nn.ReLU(True)\n self.main_conv2 = spectral_norm(nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1, bias=False))\n\n self.out_bn = nn.BatchNorm2d(self.out_channels)\n self.out_relu = nn.ReLU(True)\n\n def forward(self, input):\n main = input\n skip = input\n\n skip = nn.functional.interpolate(skip, scale_factor=2)\n skip = self.skip_conv(skip)\n\n main = nn.functional.interpolate(main, scale_factor=2)\n main = self.main_conv1(main)\n main = self.main_bn(main)\n main = self.main_relu(main)\n main = self.main_conv2(main)\n\n out = main + skip\n out = self.out_bn(out)\n out = self.out_relu(out)\n return out\n\n\nclass Generator(nn.Module):\n def __init__(self):\n super(Generator, self).__init__()\n\n self.main = nn.Sequential(\n spectral_norm(nn.ConvTranspose2d(nz, ngf * 16, 4, 1, 0, bias=False)),\n nn.BatchNorm2d(ngf * 16),\n nn.ReLU(True),\n ResBlockUp(ngf * 16, ngf * 8),\n ResBlockUp(ngf * 8, ngf * 4),\n ResBlockUp(ngf * 4, ngf * 2),\n SelfAttn(ngf * 2, 'relu'),\n ResBlockUp(ngf * 2, ngf),\n spectral_norm(nn.Conv2d(ngf, nc, 3, 1, 1, bias=False)),\n nn.Tanh()\n )\n\n def forward(self, input):\n return self.main(input)\n\n\n# Create the generator\nnetG = Generator().to(device)\n\n# # Create EMA generator\n# netG_ema = Generator().to(device)\n\n# Handle multi-gpu if desired\nif (device.type == 'cuda') and (ngpu > 1):\n netG = nn.DataParallel(netG, list(range(ngpu)))\n\nif not have_checkpoint:\n # Apply the weights_init function to randomly initialize all weights\n # to mean=0, stdev=0.2.\n netG.apply(weights_init)\nelse:\n netG.load_state_dict(checkpoint['netG_state_dict'])\n# netG_ema.load_state_dict(checkpoint['netG_ema_state_dict'])\n\n# Print the model\nprint(netG)\n\n\nclass ResBlockDown(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n\n self.in_channels = in_channels\n self.out_channels = out_channels\n\n self.skip_conv = spectral_norm(nn.Conv2d(self.in_channels, self.out_channels, 1, 1, 0))\n\n self.main_conv1 = spectral_norm(nn.Conv2d(self.in_channels, self.out_channels, 3, 1, 1))\n self.main_relu = nn.ReLU(True)\n self.main_conv2 = spectral_norm(nn.Conv2d(self.out_channels, self.out_channels, 3, 1, 1))\n\n self.out_relu = nn.ReLU(True)\n\n def forward(self, input):\n main = input\n skip = input\n\n skip = self.skip_conv(skip)\n skip = nn.functional.avg_pool2d(skip, 2)\n\n main = self.main_conv1(main)\n main = self.main_relu(main)\n main = self.main_conv2(main)\n main = nn.functional.avg_pool2d(main, 2)\n\n out = main + skip\n out = self.out_relu(out)\n return out\n\n\nclass Discriminator(nn.Module):\n def __init__(self):\n super(Discriminator, self).__init__()\n self.main = nn.Sequential(\n # input is (nc) x 64 x 64\n ResBlockDown(nc, ndf),\n SelfAttn(ndf, 'relu'),\n # state size. (ndf) x 32 x 32\n ResBlockDown(ndf, ndf * 2),\n # state size. (ndf*2) x 16 x 16\n ResBlockDown(ndf * 2, ndf * 4),\n # state size. (ndf*4) x 8 x 8\n ResBlockDown(ndf * 4, ndf * 8),\n # state size. (ndf*8) x 4 x 4\n spectral_norm(nn.Conv2d(ndf * 8, 1, 3, 1, 0, bias=False))\n )\n\n def forward(self, input):\n return self.main(input)\n\n\n# Create the Discriminator\nnetD = Discriminator().to(device)\n\n# Handle multi-gpu if desired\nif (device.type == 'cuda') and (ngpu > 1):\n netD = nn.DataParallel(netD, list(range(ngpu)))\n\nif not have_checkpoint:\n # Apply the weights_init function to randomly initialize all weights\n # to mean=0, stdev=0.2.\n netD.apply(weights_init)\nelse:\n netD.load_state_dict(checkpoint['netD_state_dict'])\n\n# Print the model\nprint(netD)\n\n# Setup Adam optimizers for both G and D\noptimizerD = optim.Adam(netD.parameters(), lr=lr_d, betas=betas)\noptimizerG = optim.Adam(netG.parameters(), lr=lr_g, betas=betas)\n\n# mixed precision initialization\nnetD, optimizerD = amp.initialize(\n netD, optimizerD, opt_level=\"O2\",\n keep_batchnorm_fp32=True, loss_scale=\"dynamic\"\n)\n\nnetG, optimizerG = amp.initialize(\n netG, optimizerG, opt_level=\"O2\",\n keep_batchnorm_fp32=True, loss_scale=\"dynamic\"\n)\n\nif have_checkpoint:\n optimizerD.load_state_dict(checkpoint['optimizerD_state_dict'])\n optimizerG.load_state_dict(checkpoint['optimizerG_state_dict'])\n amp.load_state_dict(checkpoint['amp'])\n\n# Training Loop\n\n# Lists to keep track of progress\nG_losses = []\nD_losses = []\niters = 0\n\nfinal_epoch = num_epochs + completed_epochs\n\nprint(\"Starting Training Loop...\")\n# For each epoch\nfor epoch in range(num_epochs):\n start_time = datetime.now()\n\n # For each batch in the dataloader\n for i, data in enumerate(dataloader, 0):\n for j in range(d_steps_per_g_step):\n netD.zero_grad()\n\n ############################\n # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))\n ###########################\n # Train with all-real batch\n # Format batch\n real_cpu = data[0].to(device)\n # Forward pass real batch through D\n output = netD(real_cpu).view(-1)\n # Calculate loss on all-real batch\n errD_real = torch.nn.ReLU()(1.0 - output).mean()\n D_x = errD_real.item()\n\n # Train with all-fake batch\n # Generate batch of latent vectors\n noise = torch.randn(batch_size, nz, 1, 1).to(device)\n # Generate fake image batch with G\n fake = netG(noise)\n # Classify all fake batch with D\n output = netD(fake.detach()).view(-1)\n # Calculate D's loss on the all-fake batch\n errD_fake = torch.nn.ReLU()(1.0 + output).mean()\n D_G_z1 = errD_fake.item()\n\n errD = errD_fake + errD_real\n with amp.scale_loss(errD, optimizerD) as scaled_loss:\n scaled_loss.backward()\n\n # Update D\n optimizerD.step()\n\n netG.zero_grad()\n\n ############################\n # (2) Update G network: maximize log(D(G(z)))\n ###########################\n\n # Since we just updated D, perform another forward pass of all-fake batch through D\n output = netD(fake).view(-1)\n # Calculate G's loss based on this output\n errG = - output.mean()\n # Calculate gradients for G\n with amp.scale_loss(errG, optimizerG) as scaled_loss:\n scaled_loss.backward()\n D_G_z2 = errG.item()\n\n # Update G\n optimizerG.step()\n\n # update netG_ema\n # with torch.no_grad():\n # for key in netG.state_dict():\n # netG_ema.state_dict()[key].data.copy_(netG_ema.state_dict()[key] * ema_decay\n # + netG.state_dict()[key] * (1 - ema_decay))\n\n # Output training stats\n if i % math.ceil(len(dataloader) / 5) == 0:\n print('[%d/%d][%d/%d]\\tLoss_D: %.4f\\tLoss_G: %.4f\\tD(x): %.4f\\tD(G(z)): %.4f / %.4f'\n % (1 + completed_epochs, final_epoch, i, len(dataloader),\n errD.item(), errG.item(), D_x, D_G_z1, D_G_z2))\n\n # Save Losses for plotting later\n G_losses.append(errG.item())\n D_losses.append(errD.item())\n\n iters += 1\n\n # save models and optimizers\n completed_epochs += 1\n torch.save({\n 'netG_state_dict': netG.state_dict(),\n 'netD_state_dict': netD.state_dict(),\n 'optimizerG_state_dict': optimizerG.state_dict(),\n 'optimizerD_state_dict': optimizerD.state_dict(),\n # 'netG_ema_state_dict': netG_ema.state_dict(),\n 'completed_epochs': completed_epochs,\n 'amp': amp.state_dict()\n }, checkpoint_file_path)\n\n print(\"training time: \" + str(datetime.now() - start_time))\n\n if completed_epochs % 5 == 0:\n print_frechet_inception_distance()\n\n# plot of D & G’s losses versus training iterations.\n\nplt.figure(figsize=(10, 5))\nplt.title(\"Generator and Discriminator Loss During Training\")\nplt.plot(G_losses, label=\"G\")\nplt.plot(D_losses, label=\"D\")\nplt.xlabel(\"iterations\")\nplt.ylabel(\"Loss\")\nplt.legend()\nplt.show()\n\n# plot real images and fake images side by side\n\n# Grab a batch of real images from the dataloader\nreal_batch = next(iter(dataloader))\n\n# Plot the real images\nplt.figure(figsize=(15, 15))\nplt.subplot(1, 2, 1)\nplt.axis(\"off\")\nplt.title(\"Real Images\")\nplt.imshow(np.transpose(vutils.make_grid(real_batch[0].to(device)[:64], padding=5, normalize=True).cpu(), (1, 2, 0)))\n\n# Plot the fake images from the last epoch\nwith torch.no_grad():\n fake = netG(torch.randn(64, nz, 1, 1).to(device)).detach().cpu()\n\nfakes = vutils.make_grid(fake, padding=2, normalize=True)\n\nplt.subplot(1, 2, 2)\nplt.axis(\"off\")\nplt.title(\"Fake Images\")\nplt.imshow(np.transpose(fakes, (1, 2, 0)))\nplt.show()\n","sub_path":"11-celeba-gan-experiment.py","file_name":"11-celeba-gan-experiment.py","file_ext":"py","file_size_in_byte":15574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"175906058","text":"import pygame\nimport time\nimport random\nimport CombatSystem\nimport colors\nimport gameobjects\nimport printfunctions\nimport time\nfrom combatscenes import gabriel_truss\nfrom combatscenes import hand_vise\nfrom combatscenes import serious_spring\n\npygame.init()\n\ndisplay_width = 800\ndisplay_height = 600\ngameDisplay = pygame.display.set_mode((display_width,display_height))\nsmallfont = pygame.font.SysFont(\"comicsansms\",15)\nmediumfont = pygame.font.SysFont(\"comicsansms\",50)\nlargefont = pygame.font.SysFont(\"comicsansms\",80)\nclock = pygame.time.Clock()\nFPS = 80\n\ndef isLegalinMap(playerx, playery, playerxwidth, playeryheight):\n if 0 <= playerx and playerx + playerxwidth <= display_width:\n if 0 <= playery and playery + playeryheight <= display_height:\n return True\n return False\n\ndef checkglobalEquality(playerx, playery, playerxwidth, playeryheight, buildingsList):\n if len(buildingsList) > 0:\n for building in buildingsList:\n if not building.areLegalCoords(playerx, playery, playerxwidth, playeryheight):\n return False\n\n if not isLegalinMap(playerx, playery, playerxwidth, playeryheight):\n return False\n\n return True\n\n#temparr will have one number in it if the boss is defeated\ndef mapLoop(index, ctr = 0, bossDefeated = False):\n \n BD = bossDefeated\n buildingsList = []\n if index == 5:\n characterXloc = 20\n characterYloc = display_height/2\n else:\n characterXloc = display_width / 2\n characterYloc = display_height / 2\n characterWidth = 30\n characterXchange = 0\n characterYchange = 0\n inMap = True\n\n #adding buildings, trees, etc...\n \n characterStatus = gameobjects.Building(0,15*display_height/16, display_width,1*display_height/16,colors.yellow)\n buildingsList.append(characterStatus)\n kings_mansion = gameobjects.Building(0,0,display_width,display_height/3,colors.gray)\n buildingsList.append(kings_mansion)\n middlebuilding = gameobjects.Building(7*display_width/8,display_height/3,display_width/8,2*(1.5 + 6/8 - 1/3)*display_height/8,colors.gray)\n buildingsList.append(middlebuilding)\n forest = gameobjects.Building(0, 6*display_height/8,display_width,1.5*display_height/8,colors.green)\n buildingsList.append(forest)\n springCivilian1 = gameobjects.Building(5*display_width/8 - 1.5*characterWidth,3*display_height/8,characterWidth,characterWidth,colors.yellow, message = \"Please, get out of here. THE BOSS... he's a serious man.\",img = \"SpringCitizen.jpg\")\n buildingsList.append(springCivilian1)\n springCivilian2 = gameobjects.Building(5*display_width/8 - 1.5*characterWidth,3*display_height/8 + 3*characterWidth,characterWidth,characterWidth,colors.yellow, message = \"I have to machine this piece... or THE BOSS will get me...\",img = \"SpringCitizen.jpg\")\n buildingsList.append(springCivilian2)\n springCivilian3 = gameobjects.Building(5*display_width/8 - 1.5*characterWidth,3*display_height/8 + 6*characterWidth,characterWidth,characterWidth,colors.yellow, message = \"Some days I wish I had a little bit of free time...\",img = \"SpringCitizen.jpg\")\n buildingsList.append(springCivilian3)\n springCivilian4 = gameobjects.Building(5*display_width/8 - 8*characterWidth,3*display_height/8 + 50,characterWidth,characterWidth,colors.yellow, message = \"THE BOSS isn't going to be happy when he sees this...\",img = \"SpringCitizen.jpg\")\n buildingsList.append(springCivilian4)\n springCivilian5 = gameobjects.Building(5*display_width/8 - 8*characterWidth,3*display_height/8 + 150,characterWidth,characterWidth,colors.yellow, message = \"My SPRING CONSTANT has been steadily decreasing since I joined...\",img = \"SpringCitizen.jpg\")\n buildingsList.append(springCivilian5)\n seriousSpring = gameobjects.Building(6*display_width/8, 2.8*display_height/8 + 3*characterWidth,2*characterWidth,2*characterWidth,colors.yellow, message = \"You got the ELASTIC KEY!\",img = \"serious_spring_small.jpg\")\n buildingsList.append(seriousSpring)\n while inMap:\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_LEFT]:\n characterXchange = -characterWidth/10\n\n if keys[pygame.K_RIGHT]:\n characterXchange = characterWidth/10\n\n if keys[pygame.K_UP]:\n characterYchange = -characterWidth/10\n\n if keys[pygame.K_DOWN]:\n characterYchange = characterWidth/10\n \n if springCivilian1.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n if keys[pygame.K_e]:\n springCivilian1.state = \"clicked\"\n \n elif springCivilian2.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n if keys[pygame.K_e]:\n springCivilian2.state = \"clicked\"\n \n elif springCivilian3.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n if keys[pygame.K_e]:\n springCivilian3.state = \"clicked\"\n \n elif springCivilian4.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n if keys[pygame.K_e]:\n springCivilian4.state = \"clicked\"\n\n elif springCivilian5.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n if keys[pygame.K_e]:\n springCivilian5.state = \"clicked\"\n\n elif seriousSpring.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n if keys[pygame.K_e] and not BD:\n test = serious_spring.combatLoop()\n if test == None:\n return None\n BD = True\n\n elif keys[pygame.K_e]:\n seriousSpring.state = \"clicked\"\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n inMap = False\n return None\n\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n inMap = False\n return None\n\n if event.type == pygame.KEYUP:\n characterXchange = 0\n characterYchange = 0\n\n if checkglobalEquality(characterXloc + characterXchange,characterYloc + characterYchange,characterWidth, characterWidth, buildingsList):\n characterXloc += characterXchange\n characterYloc += characterYchange\n \n\n if springCivilian1.state == \"clicked\" and not springCivilian1.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n springCivilian1.state = \"regular\"\n if springCivilian2.state == \"clicked\" and not springCivilian2.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n springCivilian2.state = \"regular\"\n if springCivilian3.state == \"clicked\" and not springCivilian3.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n springCivilian3.state = \"regular\"\n if springCivilian4.state == \"clicked\" and not springCivilian4.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n springCivilian4.state = \"regular\"\n if springCivilian5.state == \"clicked\" and not springCivilian5.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n springCivilian5.state = \"regular\"\n if seriousSpring.state == \"clicked\" and not seriousSpring.isWithinQueryDistance(characterXloc,characterYloc,characterWidth,characterWidth):\n seriousSpring.state = \"regular\"\n if characterXloc < 20:\n if not BD:\n return (6,\"\")\n else:\n return (6,\"defeated\")\n\n\n #draw things here\n if not bossDefeated:\n if characterXloc > 2*display_width/5 and ctr == 0:\n test1 = hand_vise.combatLoop()\n if test1 == None:\n return None\n ctr += 1\n elif characterXloc > 3*display_width/5 and ctr ==1:\n test2 = gabriel_truss.combatLoop()\n if test2 == None:\n return None\n ctr += 1\n\n gameDisplay.fill(colors.white)\n characterStatus.drawToScreen()\n kings_mansion.drawToScreen()\n middlebuilding.drawToScreen()\n forest.drawToScreen()\n springCivilian1.drawToScreen()\n springCivilian2.drawToScreen()\n springCivilian3.drawToScreen()\n springCivilian4.drawToScreen()\n springCivilian5.drawToScreen()\n seriousSpring.drawToScreen()\n printfunctions.message_to_screen(\"Springfield Factory\",colors.black,3.7*display_height / 8)\n pygame.draw.rect(gameDisplay, colors.red, [characterXloc,characterYloc,characterWidth,characterWidth])\n\n\n\n\n pygame.display.update()\n clock.tick(FPS)","sub_path":"StaticsRPG/exe.win32-3.4/locales/SpringfieldFactory.py","file_name":"SpringfieldFactory.py","file_ext":"py","file_size_in_byte":8959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"339778155","text":"from math import *\r\n\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\n\r\nimport pygame\r\nfrom pygame.locals import *\r\n\r\nfrom gameobjects.matrix44 import *\r\nfrom gameobjects.vector3 import *\r\n\r\n# External Files\r\nfrom Cube import *\r\nfrom Plane import *\r\n\r\n# OpenGL setup for specified display.\r\ndef resize(width, height):\r\n glViewport(0, 0, width, height)\r\n glMatrixMode(GL_PROJECTION)\r\n glLoadIdentity()\r\n gluPerspective(60.0, float(width)/height, .1, 50.)\r\n glMatrixMode(GL_MODELVIEW)\r\n glLoadIdentity()\r\n\r\n# OpenGL setup for other settings.\r\ndef init():\r\n glEnable(GL_DEPTH_TEST)\r\n glClearColor(0.0,0.6,1.0,0.5)\r\n \r\n# Main Function.\r\ndef run():\r\n pygame.init()\r\n global width;width=900\r\n global height;height=width/16*9\r\n global screen;screen=pygame.display.set_mode([width,height],HWSURFACE|OPENGL|DOUBLEBUF)\r\n pygame.display.set_caption(\"Steampunk\")\r\n \r\n resize(width,height)\r\n init()\r\n \r\n clock=pygame.time.Clock()\r\n \r\n # Setup of camera matrices putting the player in a certain position.\r\n camera_matrix=Matrix44()\r\n camera_matrix.translate=(10.0,0.0,10.0)\r\n \r\n # Setting up rotation and movement vectors to calculate movement.\r\n rotation_direction=Vector3()\r\n rotation_speed=radians(90.0)\r\n movement_direction=Vector3()\r\n movement_speed=5.0\r\n \r\n plane1=Plane([10.0,0.0,0.0],20.0,20.0,[0.0,0.6,0.0])\r\n \r\n # Game loop.\r\n while True:\r\n for event in pygame.event.get():\r\n if event.type==QUIT:\r\n pygame.quit();quit()\r\n if event.type==KEYUP and event.key==K_ESCAPE:\r\n return\r\n \r\n \r\n #Clear the screen.\r\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\r\n \r\n # Calculate time passed for frame rate.\r\n time_passed_seconds=clock.tick()/1000.\r\n \r\n # Get pressed keys for input.\r\n pressed=pygame.key.get_pressed()\r\n \r\n # Set the rotation and movement vectors each loop.\r\n rotation_direction.set(0.0,0.0,0.0)\r\n movement_direction.set(0.0,0.0,0.0)\r\n \r\n # Input handling.\r\n if pressed[K_a]:\r\n rotation_direction.y=+1.0\r\n if pressed[K_d]:\r\n rotation_direction.y=-1.0\r\n if pressed[K_w]:\r\n movement_direction.z=-1.0\r\n if pressed[K_s]:\r\n movement_direction.z=+1.0\r\n \r\n \r\n # Transform every matrix based upon movement and time passed in order to display the scene.\r\n rotation=rotation_direction*rotation_speed*time_passed_seconds\r\n rotation_matrix=Matrix44.xyz_rotation(*rotation)\r\n camera_matrix*=rotation_matrix\r\n \r\n heading=Vector3(camera_matrix.forward)\r\n movement=heading*movement_direction.z*movement_speed\r\n camera_matrix.translate+=movement*time_passed_seconds\r\n \r\n # Load the matrix into OpenGL to be displayed.\r\n glLoadMatrixd(camera_matrix.get_inverse().to_opengl())\r\n \r\n # Draw the cube.\r\n plane1.render()\r\n \r\n # Update the display.\r\n pygame.display.flip()\r\n \r\nrun()","sub_path":"Steampunk/Steampunk/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"410402391","text":"import json\nimport urllib.request\nimport urllib.parse\nimport urllib.error\nfrom tmdb_helpers import get_user_api_key\nfrom tmdb_helpers import make_tmdb_api_request\n\n\ndef load_films(user_api_key, films_amount=1000): # this shit is not working!!\n all_films = []\n for film_id in range(films_amount):\n try:\n all_films.append(make_tmdb_api_request(method='/movie/%d' % film_id, api_key=user_api_key))\n except urllib.error.HTTPError as err:\n if err.code == 404: #if no film on this id\n continue\n else:\n raise\n finally:\n print('%s percent complete' % str(film_id*100 / films_amount))\n return all_films\n\n\nif __name__ == '__main__':\n user_api_key = get_user_api_key()\n if not user_api_key:\n print('Invalid api key')\n raise SystemExit\n films_amount = 100\n print('please, wait, this operation may take smth like 15-20 minutes')\n all_films = load_films(user_api_key, films_amount)\n with open('./MyFilmDB.json', mode='w', encoding='utf-8') as my_file: # \"./\" instead of path\n json.dump(all_films, my_file)\n","sub_path":"make_own_db.py","file_name":"make_own_db.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"120544166","text":"import acm\nimport FUxCore\n \n\ndef Show(shell, caption, choiceList, names = None):\n customDlg = ChoiceListAddEditDialog()\n customDlg.m_caption = caption\n customDlg.m_choiceList = choiceList\n customDlg.m_originalName = choiceList.Name()\n customDlg.m_names = names\n\n builder = customDlg.CreateLayout()\n \n ret = None\n if acm.UX().Dialogs().ShowCustomDialogModal(shell, builder, customDlg ) :\n ret = customDlg.m_choiceList\n\n return ret\n \nclass ChoiceListAddEditDialog (FUxCore.LayoutDialog):\n def __init__(self):\n self.m_okButton = None\n self.m_nameInput = None\n self.m_descriptionInput = None\n self.m_choiceList = ''\n self.m_caption = ''\n self.m_names = None\n self.m_originalName = ''\n self.m_maxLength = 39 #max length of name and description\n \n def HandleApply( self ):\n ret = True\n name = self.m_nameInput.GetData()\n description = self.m_descriptionInput.GetData()\n\n\n if self.m_names != None:\n if name in self.m_names and name != self.m_choiceList.Name():\n acm.UX().Dialogs().MessageBoxInformation(self.m_fuxDlg.Shell(), 'The name ' + name + ' is already used, please select another name')\n self.m_nameInput.SetFocus()\n self.m_nameInput.SetTextSelection(0, -1)\n\n\n ret = None\n\n if ret :\n self.m_choiceList.Name(name)\n self.m_choiceList.Description(description)\n\n return ret\n\n def OnEditChanged(self, ud, cd):\n self.UpdateControls()\n \n def UpdateControls(self) :\n self.m_okButton.Enabled(len(self.m_nameInput.GetData()) > 0)\n \n def HandleCreate( self, dlg, layout):\n self.m_fuxDlg = dlg\n self.m_fuxDlg.Caption(self.m_caption)\n self.m_okButton = layout.GetControl('ok')\n\n self.m_nameInput = layout.GetControl('nameInput')\n self.m_descriptionInput = layout.GetControl('descriptionInput')\n \n self.m_nameInput.AddCallback( 'Changed', self.OnEditChanged, self )\n\n self.m_nameInput.SetData(self.m_choiceList.Name())\n self.m_nameInput.SetTextSelection(0, -1)\n self.m_nameInput.MaxTextLength(self.m_maxLength)\n \n\n self.m_descriptionInput.SetData(self.m_choiceList.Description())\n self.m_descriptionInput.MaxTextLength(self.m_maxLength)\n\n self.UpdateControls()\n\n def CreateLayout(self):\n b = acm.FUxLayoutBuilder()\n b.BeginVertBox()\n b. AddInput('nameInput', 'Name', 50)\n b. AddInput('descriptionInput', 'Description', 50)\n b. AddSpace(10)\n b. BeginHorzBox()\n b. AddFill()\n b. AddButton('ok', 'OK')\n b. AddButton('cancel', 'Cancel')\n b. EndBox() \n b.EndBox()\n return b\n\n\n","sub_path":"Extensions/Default/FPythonCode/ChoiceListAddEditDialog.py","file_name":"ChoiceListAddEditDialog.py","file_ext":"py","file_size_in_byte":2856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"119109087","text":"from rest_framework import serializers\n\nfrom adapter.AliOss import AliOss\nfrom deposit.models import MemberDepositRecord\nfrom operation.models import MemberDuesRecord\nfrom operation.rest.serializers import CustomerSerializer, TradeRecordSerializer\nfrom order.models import ParkingFee, Order\nfrom order.rest.serializers import OrderSerializer\n\n\nclass DepositRecordSerializer(serializers.ModelSerializer):\n \"\"\"\n 会员违章预缴金记录\n \"\"\"\n customer = CustomerSerializer(read_only=True)\n trade_record = TradeRecordSerializer(read_only=True)\n\n class Meta:\n model = MemberDepositRecord\n fields = ('id', 'customer', 'amount', 'real_name', 'created_at', 'state_name', 'pay_at',\n 'pay_type_name', 'trade_record', 'op_source', 'op_user_id', 'op_desc',\n 'apply_return_at', 'return_amount', 'mod_amount', 'mod_desc', 'confirm_at',\n 'return_pay_at', 'is_curr', 'memo', 'state', 'client', 'client_version',\n 'last_operator_name', 'grade_type')\n read_only_fields = ('id', 'customer', 'amount', 'created_at', 'state_name', 'pay_at',\n 'pay_type_name', 'trade_record', 'op_source', 'op_user_id', 'op_desc',\n 'apply_return_at', 'return_amount', 'mod_amount', 'mod_desc',\n 'confirm_at', 'return_pay_at', 'is_curr', 'memo', 'state', 'client',\n 'client_version', 'last_operator_name', 'grade_type')\n\n\nclass DepositRecordMemberSerializer(serializers.ModelSerializer):\n class Meta:\n model = MemberDepositRecord\n fields = ('id', 'state', 'state_name', 'pay_at', 'amount', 'state_name', 'grade_type')\n\n\nclass DuesRecordSerializer(serializers.ModelSerializer):\n \"\"\"\n 用户会费记录\n \"\"\"\n customer = CustomerSerializer(read_only=True)\n\n class Meta:\n model = MemberDuesRecord\n read_only_fields = ('id', 'customer', 'real_name', 'amount', 'city_name', 'created_at',\n 'pay_at', 'pay_name', 'grade_name', 'member_sdate', 'member_edate',\n 'trade_record_id', 'op_source', 'op_user_id', 'state_name', 'memo',\n 'state', 'client', 'client_version')\n fields = ('id', 'customer', 'real_name', 'amount', 'city_name', 'created_at', 'pay_at',\n 'pay_name', 'grade_name', 'member_sdate', 'member_edate', 'trade_record_id',\n 'op_source', 'op_user_id', 'state_name', 'is_curr', 'memo', 'state', 'client',\n 'client_version')\n\n\nclass ParkingFeeOrderSerializer(serializers.ModelSerializer):\n mileage = serializers.CharField(source=\"trip.mileage\", read_only=True)\n\n class Meta:\n model = Order\n fields = ('id', 'user_real_name', 'order_no', 'mileage', 'service_city_name', 'trip',\n 'started_at', 'last_order_time_diff', 'user_mobile', 'car_number', 'co_name_s',\n 'co_name_e', 's_addr', 'e_addr')\n\n\nclass ParkingFeeSerializer(serializers.ModelSerializer):\n \"\"\"\n 停车费报销记录\n \"\"\"\n order = ParkingFeeOrderSerializer(read_only=True)\n img_url = serializers.SerializerMethodField(read_only=True)\n order_type = serializers.CharField(source='order.order_type')\n\n class Meta:\n model = ParkingFee\n fields = ('id', 'order', 'amounts', 'img_url', 'state_name', 'memo', 'op_source', 'op_user',\n 'op_time', 'op_desc', 'state', 'cr_time', 'origin_img_url', 'target',\n 'order_type', 'last_operator_name')\n read_only_fields = ('id', 'order', 'amounts', 'img_url', 'op_source', 'op_user', 'op_time',\n 'op_desc', 'cr_time', 'origin_img_url', 'target', 'order_type',\n 'last_operator_name')\n\n def get_img_url(self, obj):\n return AliOss.get_resize_img_url(self.context, obj.img_key)\n\n\nclass DepositRecordVASerializer(serializers.ModelSerializer):\n customer = CustomerSerializer(read_only=True)\n trade_record = TradeRecordSerializer(read_only=True)\n\n class Meta:\n model = MemberDepositRecord\n fields = ('id', 'mod_desc', 'customer', 'trade_record', 'mod_amount', 'pay_type')\n\n\nclass BatchDepositRecordSerializer(serializers.ModelSerializer):\n ids = serializers.PrimaryKeyRelatedField(\n many=True,\n queryset=MemberDepositRecord.objects.filter(state=MemberDepositRecord.States.APPLY_RETURN))\n\n class Meta:\n model = MemberDepositRecord\n fields = ('ids', )\n\n\nclass ParkingFeeVASerializer(serializers.ModelSerializer):\n order = OrderSerializer(read_only=True)\n op = serializers.IntegerField(required=True)\n\n class Meta:\n model = ParkingFee\n fields = ('id', 'order', 'amounts', 'op_desc', 'op')\n\n def validate(self, data):\n from .apis import ApplyParkingFee\n if data['op'] not in (ApplyParkingFee.Ops.PASS, ApplyParkingFee.Ops.FAIL):\n raise serializers.ValidationError(\"该操作不允许\")\n return data\n\n\nclass BatchPayDepositRecordSerializer(serializers.ModelSerializer):\n ids = serializers.PrimaryKeyRelatedField(\n many=True,\n queryset=MemberDepositRecord.objects.filter(state=MemberDepositRecord.States.CONFIRMED))\n\n class Meta:\n model = MemberDepositRecord\n fields = ('ids', 'pay_type', 'mod_desc')\n","sub_path":"simplegit/zerocar-master/deposit/rest/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"141335524","text":"import logging\nimport time\nimport io\nimport random\nimport shutil\nimport os\nimport itertools\nimport codecs\nimport json\nimport datetime\nimport textwrap\nimport click\nimport attr\nimport progressbar # type: ignore\nimport mutagen # type: ignore\nfrom watchdog.observers import Observer # type: ignore\nfrom prettytable import PrettyTable # type: ignore\nfrom click_skeleton import AdvancedGroup\nfrom click_skeleton.helpers import PrettyDefaultDict\nfrom musicbot.helpers import genfiles\nfrom musicbot.watcher import MusicWatcherHandler\nfrom musicbot.player import play\nfrom musicbot.playlist import print_playlist\nfrom musicbot.config import Conf\nfrom musicbot.music.file import File\nfrom musicbot.music.helpers import bytes_to_human, all_files, empty_dirs, except_directories\nfrom musicbot.cli.file import flat_option, checks_and_fix_options, folder_argument\nfrom musicbot.cli.music_filter import music_filter_options, interleave_option\nfrom musicbot.cli.user import user_options\nfrom musicbot.cli.options import yes_option, save_option, folders_argument, output_option, dry_option\n\n\nlogger = logging.getLogger(__name__)\n\n\n@click.group('local', help='Local music management', cls=AdvancedGroup)\ndef cli() -> None:\n pass\n\n\n@cli.command(help='Count musics')\n@user_options\ndef count(user):\n print(user.count_musics())\n\n\n@cli.command(help='Raw query', aliases=['query', 'fetch'])\n@click.argument('query')\n@user_options\ndef execute(user, query):\n print(json.dumps(user.fetch(query)))\n\n\n@cli.command(aliases=['stat'], help='Generate some stats for music collection with filters')\n@output_option\n@user_options\n@music_filter_options\ndef stats(user, output, music_filter):\n stats = user.do_stat(music_filter)\n if output == 'json':\n print(json.dumps(stats))\n elif output == 'table':\n pt = PrettyTable([\"Stat\", \"Value\"])\n pt.add_row([\"Music\", stats['musics']])\n pt.add_row([\"Artist\", stats['artists']])\n pt.add_row([\"Album\", stats['albums']])\n pt.add_row([\"Genre\", stats['genres']])\n pt.add_row([\"Keywords\", stats['keywords']])\n pt.add_row([\"Size\", bytes_to_human(int(stats['size']))])\n pt.add_row([\"Total duration\", datetime.timedelta(seconds=int(stats['duration']))])\n print(pt)\n\n\n@cli.command(help='List folders')\n@output_option\n@user_options\ndef folders(user, output):\n _folders = user.folders()\n if output == 'json':\n print(json.dumps(_folders))\n elif output == 'table':\n pt = PrettyTable([\"Folders\"])\n for f in _folders:\n pt.add_row([f])\n print(pt)\n\n\n@cli.command(help='Load musics')\n@folders_argument\n@save_option\n@user_options\ndef scan(user, save, folders):\n user_folders = user.folders()\n if not folders:\n folders = user_folders\n files = genfiles(folders)\n user.bulk_insert(files)\n\n if save:\n Conf.config.configfile['musicbot']['folders'] = ','.join(set(folders))\n Conf.config.write()\n\n\n@cli.command(help='Watch files changes in folders')\n@user_options\ndef watch(user):\n click.echo(f'Watching: {user.folders()}')\n event_handler = MusicWatcherHandler(user=user)\n observer = Observer()\n for f in user.folders():\n observer.schedule(event_handler, f, recursive=True)\n observer.start()\n try:\n while True:\n time.sleep(50)\n except KeyboardInterrupt:\n observer.stop()\n observer.join()\n\n\n@cli.command(help='Clean all musics')\n@user_options\n@yes_option\ndef clean(user, yes):\n if yes or click.confirm(\"Are you sure to delete all musics from DB?\"):\n user.clean_musics()\n\n\n@cli.command(help='Clean and load musics')\n@folders_argument\n@user_options\ndef rescan(user, folders):\n if not folders:\n folders = user.folders()\n files = genfiles(folders)\n user.clean_musics()\n user.bulk_insert(files)\n\n\n@cli.command(help='Copy selected musics with filters to destination folder')\n@dry_option\n@yes_option\n@user_options\n@music_filter_options\n@flat_option\n@click.option('--delete', help='Delete files on destination if not present in library', is_flag=True)\n@click.argument('destination')\ndef sync(user, delete, yes, dry, destination, music_filter, flat):\n logger.info(f'Destination: {destination}')\n musics = user.do_filter(music_filter)\n if not musics:\n click.secho('no result for filter, nothing to sync')\n return\n\n music_files = [File(path=m['path'], folder=m['folder']) for m in musics]\n\n files = list(all_files(destination))\n logger.info(f\"Files : {len(files)}\")\n if not files:\n logger.warning(\"no files found in destination\")\n\n destinations = {f[len(destination) + 1:]: f for f in files}\n\n logger.info(f\"Destinations : {len(destinations)}\")\n if flat:\n sources = {m.flat_filename: m.path for m in music_files}\n else:\n sources = {m.filename: m.path for m in music_files}\n\n logger.info(f\"Sources : {len(sources)}\")\n to_delete = set(destinations.keys()) - set(sources.keys())\n if delete and (yes or click.confirm(f'Do you really want to delete {len(to_delete)} files and playlists ?')):\n with Conf.progressbar(max_value=len(to_delete)) as pbar:\n for d in to_delete:\n try:\n pbar.desc = f\"Deleting musics and playlists: {os.path.basename(destinations[d])}\"\n if dry:\n logger.info(f\"[DRY-RUN] Deleting {destinations[d]}\")\n continue\n try:\n logger.info(f\"Deleting {destinations[d]}\")\n os.remove(destinations[d])\n except OSError as e:\n logger.error(e)\n finally:\n pbar.value += 1\n pbar.update()\n\n to_copy = set(sources.keys()) - set(destinations.keys())\n with Conf.progressbar(max_value=len(to_copy)) as pbar:\n logger.info(f\"To copy: {len(to_copy)}\")\n for c in sorted(to_copy):\n final_destination = os.path.join(destination, c)\n try:\n pbar.desc = f'Copying {os.path.basename(sources[c])} to {destination}'\n if dry:\n logger.info(f\"[DRY-RUN] Copying {sources[c]} to {final_destination}\")\n continue\n logger.info(f\"Copying {sources[c]} to {final_destination}\")\n os.makedirs(os.path.dirname(final_destination), exist_ok=True)\n shutil.copyfile(sources[c], final_destination)\n except KeyboardInterrupt:\n logger.debug(f\"Cleanup {final_destination}\")\n try:\n os.remove(final_destination)\n except OSError:\n pass\n raise\n finally:\n pbar.value += 1\n pbar.update()\n\n for d in empty_dirs(destination):\n if any(e in d for e in except_directories):\n logger.debug(f\"Invalid path {d}\")\n continue\n if not dry:\n shutil.rmtree(d)\n logger.info(f\"[DRY-RUN] Removing empty dir {d}\")\n\n\n@cli.command(help='Generate a new playlist', aliases=['tracks'])\n@output_option\n@user_options\n@music_filter_options\n@interleave_option\ndef playlist(user, output, music_filter, interleave):\n tracks = user.do_filter(music_filter)\n\n if interleave:\n tracks_by_artist = PrettyDefaultDict(list)\n for track in tracks:\n tracks_by_artist[track['artist']].append(track)\n tracks = [\n track\n for track in itertools.chain(*itertools.zip_longest(*tracks_by_artist.values()))\n if track is not None\n ]\n\n if music_filter.shuffle:\n random.shuffle(tracks)\n\n if output == 'm3u':\n p = '#EXTM3U\\n'\n p += '\\n'.join([track['path'] for track in tracks])\n print(p)\n return\n\n if output == 'json':\n print(json.dumps(tracks))\n return\n\n if output == 'table':\n print_playlist(tracks)\n\n\n@cli.command(help='Generate bests playlists with some rules')\n@click.option('--prefix', envvar='MB_PREFIX', help=\"Append prefix before each path (implies relative)\", default='')\n@click.option('--suffix', envvar='MB_SUFFIX', help=\"Append this suffix to playlist name\", default='')\n@folder_argument\n@dry_option\n@user_options\n@music_filter_options\ndef bests(user, dry, folder, prefix, suffix, music_filter):\n if prefix:\n music_filter = attr.evolve(music_filter, relative=True)\n if not prefix.endswith('/'):\n prefix += '/'\n playlists = user.bests(music_filter)\n with Conf.progressbar(max_value=len(playlists)) as pbar:\n for p in playlists:\n try:\n playlist_filepath = os.path.join(folder, p['name'] + suffix + '.m3u')\n content = textwrap.indent(p['content'], prefix, lambda line: line != '#EXTM3U\\n')\n if dry:\n logger.info(f'DRY RUN: Writing playlist to {playlist_filepath} with content:\\n{content}')\n continue\n try:\n with codecs.open(playlist_filepath, 'w', \"utf-8-sig\") as playlist_file:\n logger.debug(f'Writing playlist to {playlist_filepath} with content:\\n{content}')\n playlist_file.write(content)\n except (OSError, LookupError, ValueError, UnicodeError) as e:\n logger.warning(f'Unable to write playlist to {playlist_filepath} because of {e}')\n finally:\n pbar.value += 1\n pbar.update()\n\n\n@cli.command(aliases=['play'], help='Music player')\n@user_options\n@music_filter_options\ndef player(user, music_filter):\n if not Conf.config.quiet:\n progressbar.streams.unwrap_stderr()\n progressbar.streams.unwrap_stdout()\n try:\n tracks = user.do_filter(music_filter)\n play(tracks)\n except io.UnsupportedOperation:\n logger.critical('Unable to load UI')\n\n\n@cli.command(aliases=['consistency'], help='Check music consistency')\n@checks_and_fix_options\n@dry_option\n@user_options\n@music_filter_options\ndef inconsistencies(user, dry, fix, checks, music_filter):\n tracks = user.do_filter(music_filter)\n pt = PrettyTable([\"Folder\", \"Path\", \"Inconsistencies\"])\n for t in tracks:\n try:\n m = File(t['path'], t['folder'])\n if fix:\n m.fix(dry=dry, checks=checks)\n if m.inconsistencies:\n pt.add_row([m.folder, m.path, ', '.join(m.inconsistencies)])\n except (OSError, mutagen.MutagenError):\n pt.add_row([t['folder'], t['path'], \"could not open file\"])\n print(pt)\n","sub_path":"musicbot/commands/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":10663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"197279880","text":"#!/usr/bin/python3.7\n# -*- coding: utf-8 -*-\n\"\"\"\nHelper functions related to MySQL\n\nCreated on Wed Jul 15 11:00:42 2020\n\n@author: V.R.Marcelino\n\"\"\"\n\nimport mysql.connector\n\n### SQL settings\nMYSQL_HOST = 'localhost'\nMYSQL_USER = 'root'\nMYSQL_PWD = '4*4Genomes'\nMYSQL_DB = 'genome_sequencing'\n\n\n### Function to connect to the database\ndef db_connection():\n # from Sam's script\n # try to connect to the database otherwise die\n connection = None\n try:\n # SQLCONNECT: for adding sample and raw files\n connection = mysql.connector.connect(host=MYSQL_HOST, user=MYSQL_USER,\n password=MYSQL_PWD, db=MYSQL_DB)\n return connection\n\n except:\n errmsg = \"ERROR: Unable to connect to the database\"\n print(errmsg)\n exit(-1)\n\n\n### Function to get a tupple of the editable column names\n### excludes the first (idxxx) and the last (timestamp) columns\ndef get_col_names(table_name, in_cursor):\n col_names_l = []\n in_cursor.execute(\"SHOW COLUMNS FROM {};\".format(table_name))\n\n for x in in_cursor:\n col_names_l.append(x[0])\n\n del col_names_l[0] # skip the first header (id, automatically generated)\n del col_names_l[-1] # skip the last header (timestamp, automatically generated)\n col_names = tuple(col_names_l)\n return (col_names)\n\n\n### Function to find numeric identifiers for column names\n### Requires a list of unique names, the table storing the unique ids and the cursor\n### returns a dictionary with names and corresponding ids.\ndef get_ids(names, table, cursor):\n names2ids = {}\n\n # guess table and idnames\n idname = \"id\" + table\n\n if table == 'pure_culture':\n col_name = 'ausmicc_name'\n else:\n col_name = table + \"_name\"\n\n for name in names:\n query = \"SELECT {} FROM {} WHERE {} = '{}'\".format(idname, table, col_name, name)\n\n # try to fetch data, return an error if more than one id is found per sample\n cursor.execute(query)\n wanted_ids = cursor.fetchall()\n if len(wanted_ids) > 1:\n print(\"more than one entry found for name %s\" % (name))\n exit(-1)\n elif len(wanted_ids) == 0:\n print(\n \"\\nSample %s not found in the database - make sure it has been added before and that there are no differences in the name (e.g. A1 vs A01)\\n\" % (\n name))\n else:\n names2ids[name] = wanted_ids[0][0]\n\n return (names2ids)\n\n","sub_path":"ausmicc_f_db_connection.py","file_name":"ausmicc_f_db_connection.py","file_ext":"py","file_size_in_byte":2488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"446073196","text":"'''\nThis code is for training either modified VGG16 \nor a fully connected neural network on the FashionMNIST\ndataset. Included help functions check accuracy, \nload model, save model, etc. Depending on what \nregularization technique you want to use, set\ndropout rate and weight_decay for l2 regularization\n\n'''\n\nimport torchvision.models as models\nimport torch.nn as nn\nimport torch\nimport torchvision\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nfrom simple_fullynet import fullyNet\n\n# Train CIFAR10 with a CNN or Fully Connected network\ntrain_CNN = False\ntrain_FC = True\nassert (train_CNN or train_FC) == 1 and (train_CNN and train_FC) == 0 # must train on either FC or CNN\n\nclass CNN_FashionMNIST(object):\n def __init__(self):\n self.learning_rate = 0.001\n self.dropout = 0.0\n self.weight_decay = 0.0\n self.num_epochs = 100000\n self.batch_size = 64\n self.num_workers = 0\n self.device = 'cuda' if torch.cuda.is_available() else 'cpu'\n self.dtype = torch.float32\n self.save_model = False\n self.shuffle = True\n self.pin_memory = True\n self.checkpoint_file = 'checkpoint/MNISTFashion_VGG16'\n \n def setup_model(self, drop_rate):\n if train_CNN:\n # Initialize modified VGG16\n model = models.vgg16(pretrained=True)\n model.features[0] = nn.Conv2d(in_channels=1,out_channels=64,kernel_size=3,stride=1,padding=1)\n model.features[4] = nn.Identity()\n model.features[16] = nn.Identity()\n model.features[23] = nn.Identity()\n model.classifier[6] = nn.Linear(in_features=4096, out_features=10, bias=True)\n model.classifier[2] = nn.Dropout(p=drop_rate)\n model.classifier[5] = nn.Dropout(p=drop_rate)\n \n elif train_FC:\n # Initialize Fully Connected\n model = fullyNet(input_size=28*28*1, drop_rate=drop_rate, init_weights=True)\n \n return model\n \n def load_data(self):\n self.transform_train, self.transform_test = self.transformations()\n train_data, validation_data = torch.utils.data.random_split(torchvision.datasets.FashionMNIST('./fashionMNIST', train=True, transform=self.transform_train), [50000, 10000]) \n test_data = torchvision.datasets.FashionMNIST('./fashionMNIST', train=False, transform=self.transform_train)\n \n train_loader = DataLoader(dataset = train_data, batch_size = self.batch_size, num_workers = self.num_workers)\n validation_loader = DataLoader(dataset = validation_data, batch_size = self.batch_size, num_workers = self.num_workers)\n test_loader = DataLoader(dataset = test_data, batch_size = self.batch_size, num_workers = self.num_workers)\n \n return train_loader, validation_loader, test_loader\n \n # Mean, std values previously computed from dataset\n def transformations(self):\n transform_train = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)),\n ])\n \n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.1307,), (0.3081,)),\n ])\n\n return transform_train, transform_test\n \n def check_accuracy(self, loader, model):\n num_correct = 0\n num_samples = 0\n model.eval() # set model to evaluation mode\n \n with torch.no_grad():\n for x, y in loader:\n x = x.to(device=self.device, dtype=self.dtype) # move to device, e.g. GPU\n y = y.to(device=self.device, dtype=torch.long)\n \n if train_FC:\n x = x.reshape(x.shape[0], -1)\n \n scores = model(x)\n _, preds = scores.max(1)\n num_correct += (preds == y).sum()\n num_samples += preds.size(0)\n acc = (float(num_correct) / num_samples) * 100.0\n \n print('Got %d / %d correct (%.2f)' % (num_correct, num_samples, acc))\n \n model.train() # set model to training mode again\n return acc\n \n def save_checkpoint(self, filename, model, optimizer, epoch):\n save_state = {\n 'state_dict' : model.state_dict(),\n 'epoch' : epoch + 1,\n 'optimizer' : optimizer.state_dict(),\n }\n print()\n print('Saving current parameters')\n print('___________________________________________________________')\n \n torch.save(save_state, filename)\n \n def load_model(self, model, optimizer, checkpoint_file):\n checkpoint = torch.load(checkpoint_file)\n model.load_state_dict(checkpoint['state_dict'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n \n #Update lr rate and weight decay when loaded model\n for param_group in optimizer.param_groups:\n param_group['lr'] = self.learning_rate\n param_group['weight_decay'] = self.weight_decay\n \n print(\"=> loaded checkpoint\")\n \n def main(self):\n model = self.setup_model(self.dropout).to(self.device)\n criterion = nn.CrossEntropyLoss()\n optimizer = torch.optim.SGD(model.parameters(), lr=self.learning_rate, weight_decay=self.weight_decay)\n train_loader, validation_loader, test_loader = self.load_data()\n \n ## Uncomment if load model\n #self.load_model(model, optimizer, self.checkpoint_file)\n\n for epoch in range(self.num_epochs):\n num_correct, total_checked = 0, 0\n losses = []\n \n for batch_idx, (data,target) in enumerate(train_loader):\n data = data.to(device=self.device, dtype=self.dtype)\n target = target.to(device=self.device, dtype=torch.long)\n \n if train_FC:\n data = data.reshape(data.shape[0], -1)\n \n #forward prop\n scores = model(data)\n loss = criterion(scores, target)\n losses.append(loss.item()) # add to keep track of loss\n \n #backward pass\n optimizer.zero_grad() # Zero gradients from prev. batch\n loss.backward() # Backpropogation\n optimizer.step() # GD step\n \n # For running training accuracy accuracy, NOTE:\n # Running training accuracy is not accurate (and especially not)\n # after a single epoch, but saves on compute\n _, preds = scores.max(1)\n num_correct += (preds == target).sum()\n total_checked += preds.size(0)\n \n if self.save_model:\n self.save_checkpoint(self.checkpoint_file, model, optimizer, epoch)\n \n # Print metrics after 1 training epoch\n print(f'Mean loss this epoch: {sum(losses)/len(losses):.4f}')\n print('VALIDATION:')\n self.check_accuracy(validation_loader, model)\n print(f'Accuracy Training: {float(num_correct)/float(total_checked):.4f}')\n print('\\n')\n \n \ntraining = CNN_FashionMNIST()\ntraining.main()\n","sub_path":"Courses/BachelorThesis/PyTorch/FashionMNIST/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203911379","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/zhanghang/Desktop/workspace/molbase/molbase_2017_7_3/release/easyspider/middlewares/redirect.py\n# Compiled at: 2017-09-08 23:28:21\nimport logging\nfrom six.moves.urllib.parse import urljoin\nfrom scrapy.downloadermiddlewares.redirect import BaseRedirectMiddleware\nlogger = logging.getLogger(__name__)\n\nclass myBaseRedirectMiddleware(BaseRedirectMiddleware):\n\n def _redirect(self, redirected, request, spider, reason, response):\n ttl = request.meta.setdefault('redirect_ttl', self.max_redirect_times)\n redirects = request.meta.get('redirect_times', 0) + 1\n if ttl and redirects <= self.max_redirect_times:\n redirected.meta['redirect_times'] = redirects\n redirected.meta['redirect_ttl'] = ttl - 1\n redirected.meta['redirect_urls'] = request.meta.get('redirect_urls', []) + [\n request.url]\n redirected.priority = request.priority + self.priority_adjust\n logger.debug('Redirecting (%(reason)s) to %(redirected)s from %(request)s', {'reason': reason, 'redirected': redirected, 'request': request})\n return redirected\n else:\n response.request = request.copy()\n spider.report_this_crawl_2_log(response, 'Discarding %(request)s: max redirections reached' % {'request': request})\n return response\n\n\nclass directReturnRedirectMiddleware(myBaseRedirectMiddleware):\n \"\"\"Handle redirection of requests based on response status and meta-refresh html tag\"\"\"\n\n def process_response(self, request, response, spider):\n if request.meta.get('dont_redirect', False):\n return response\n if request.method == 'HEAD':\n if response.status in (301, 302, 303, 307) and 'Location' in response.headers:\n redirected_url = urljoin(request.url, response.headers['location'])\n redirected = request.replace(url=redirected_url)\n redirected = redirected.replace(dont_filter=True)\n return self._redirect(redirected, request, spider, response.status, response)\n else:\n return response\n\n if response.status in (302, 303) and 'Location' in response.headers:\n redirected_url = urljoin(request.url, response.headers['location'])\n redirected = self._redirect_request_using_get(request, redirected_url)\n redirected = redirected.replace(dont_filter=True)\n return self._redirect(redirected, request, spider, response.status, response)\n if response.status in (301, 307) and 'Location' in response.headers:\n redirected_url = urljoin(request.url, response.headers['location'])\n redirected = request.replace(url=redirected_url)\n redirected = redirected.replace(dont_filter=True)\n return self._redirect(redirected, request, spider, response.status, response)\n return response","sub_path":"pycfiles/easyspider-1.4.5-py2-none-any/redirect.py","file_name":"redirect.py","file_ext":"py","file_size_in_byte":3066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"464524140","text":"# So the scenario is we have a character who has some health.\n# Let's say he has 45 points.\n# We imagine that this character has walked over a health potion and the health potion magically increases\n# their health by a random amount.\n# I guess in this case quite a lot but the game also has three difficulty settings easy medium and hard\n# depending on what difficulty is selected.\n# The amount of health that the person gets from the potion will change and this will be less on higher difficulties.\n\nimport random\n\nhealth = 50\n\n#difficulty = 1(easy), 2(medium), 3(hard)\ndifficulty = 1\n\npotionHealth = int(random.randint(25, 50) / difficulty)\nhealth = health + potionHealth\nprint(health)\n\n\n","sub_path":"HealthPotion.py","file_name":"HealthPotion.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"414272320","text":"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom .views import *\n\n\n\nurlpatterns = [\n path('', index, name='home'),\n path('login', login, name='login'),\n path('logout', logout, name='logout'),\n path('home', home, name='home_logado'),\n path('postar', postar, name='postar'),\n path('grupos', grupos, name='grupos'),\n path('sair_grupo/', sair_grupo, name='sair_grupo'),\n path('entrar_grupo/', entrar_grupo, name='entrar_grupo'),\n path('postagem//editar', postar_editar, name='postagem_editar'),\n path('postagem//deletar', postar_deletar, name='postagem_deletar'),\n path('pesquisar/amigo', pesquisar_amigo, name='pesquisar_amigo'),\n path('grupos/add_grupo', add_grupo, name='add_grupo'),\n path('convidar/', convidar, name='convidar'),\n path('usuario//perfil', perfil, name='perfil'),\n path('amigos/convites', convites, name='convites'),\n path('amigos/convites/aceitar/', aceitar, name='aceitar'),\n]\n\nif settings.DEBUG:\n urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"social/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"206979936","text":"from django.conf.urls import url\nfrom . import views\n\napp_name = 'calculation'\n\nurlpatterns = [\n # GENERAL URLS\n url(r'^$', views.home_view, name='home'),\n url(r'^about/$', views.about_view, name='about'),\n url(r'^help/$', views.help_view, name='help'),\n url(r'^terms/$', views.terms_view, name='terms'),\n url(r'^privacy/$', views.privacy_view, name='privacy'),\n url(r'^message-board/$', views.message_board_view, name='message'),\n # CALCULATION URLS\n url(r'^calculation/(?P[-\\w]+)/$', views.category_view, name='category'),\n url(r'^calculation/(?P[-\\w]+)/(?P[-\\w]+)/$', views.calculation_view,\n name='calculation'),\n # API URLS\n url(r'^api/posts/$', views.APIRoutePosts.as_view(), name='api_posts'),\n url(r'^api/calculation/(?P[-\\w]+)/$', views.APIRouteCategory.as_view(), name='api_category'),\n url(r'^api/calculation/(?P[-\\w]+)/(?P[-\\w]+)/$',\n views.APIRouteCalculation.as_view(), name='api_calculation'),\n]\n","sub_path":"calculation/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"190873572","text":"from scipy.misc import imread\nimport bsc_functions as bsc\nimport numpy as np\nimport cv2\n\ndef main():\n print (\"BSC (1) or PRZEPLOT (2): \")\n operation_check = int(input())\n print (\"Podaj prawdopodobienstwo bledu (0-100): \")\n fault_prob = float(input())\n\n img = cv2.imread(\"zdjecie.png\", 0)\n\n bits = bsc.imageToBitArray(img) # konwersja na tablicę bitów\n #tworzenie przeplotu dla zczytanegho ciągu bitów, jeżeli wybrano metode z przeplotem\n if (operation_check == 2):\n bits = bsc.imageToBitArrayTrestle(bits)\n\n bsc.saveToFile(bits, 'start.txt') # zapis do pliku\n \n\n bits_errors = bsc.generateErrors(bits, fault_prob, 30) # generowanie błędów\n bsc.saveToFile(bits_errors, 'wynik.txt') # zapis bitów z błędami do pliku\n\n # porównanie bitów bez błędów i tych z błędami\n incorrect_bits_rate, incorrect_byte_rate = bsc.countErrors(bsc.readFromFile('start.txt'), bsc.readFromFile('wynik.txt'))\n print(\"Procent prawidlowo przeslanych pikseli (ciag 8 bitów): %.3f%%\" %incorrect_byte_rate)\n print(\"Procent prawidlowo przeslanych bitów: %.3f%%\" %incorrect_bits_rate)\n\n # xbytes = bsc.bitsToBytes(bits_errors)\n #odkodowywanie przeplotu(w celu odtworzenia obrazu), jeżeli został wcześniej wykonany\n if (operation_check == 2):\n bits_errors = bsc.decodeTrestle(bits_errors)\n xbytes = bsc.bitsToBytes(bits_errors)\n bsc.bytesToImg(xbytes, 'wynik.png')\n\nmain()","sub_path":"bsc2.py","file_name":"bsc2.py","file_ext":"py","file_size_in_byte":1476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"455741221","text":"from phase import *\nfrom Formula import *\nfrom GEExpansion import *\n\n\ndef compare_formula(substance_a, substance_b):\n if len(substance_a.formula.elements) != len(substance_b.formula.elements):\n return False\n all_good = True\n for i in range(len(substance_a.formula.elements)):\n if substance_a.formula.coefficients[i] != substance_b.get_element(substance_a.formula.elements[i]):\n all_good = False\n return all_good\n\n\nclass Substance():\n def __init__(self, filename):\n self.name = None\n self.phase = None\n self.formula = None\n self.ge_expansions = []\n substance_file = open(filename, 'r')\n for string in substance_file:\n tokens = string.lower().split(\"=\")\n command = tokens[0].strip()\n parameters = tokens[1].strip()\n if command == \"name\":\n self.name = parameters\n elif command == \"phase\":\n self.phase = phase_ok(parameters)\n elif command == \"range\":\n self.ge_expansions.append(GEEexpansion(parameters))\n elif command == \"stochiometry\":\n self.formula = Formula(parameters)\n\n def contains_element(self, some_element):\n\n \"\"\"\n Checks, whether the substance contains named element\n @param some_element: a string, symbol of element (\"si\", \"h\", etc)\n \"\"\"\n for element in self.formula.elements:\n if element.name == some_element:\n return True\n return False\n\n def get_element(self, some_element):\n\n \"\"\"\n Checks, whether the substance contains named element\n @param some_element: a string, symbol of element (\"si\", \"h\", etc)\n \"\"\"\n for i in range(len(self.formula.elements)):\n if self.formula.elements[i] == some_element:\n return self.formula.coefficients[i]\n return 0\n\n\n def has_data(self, temperature):\n for ge_expansion in self.ge_expansions:\n if ge_expansion.have_data(temperature):\n return True\n return False\n\n def GE(self, temperature):\n print(self.ge_expansions[0].GE(500))\n\n for ge_expansion in self.ge_expansions:\n if ge_expansion.have_data(temperature):\n return ge_expansion.GE(temperature)\n return None\n\n def Enthropy(self, temperature):\n for ge_expansion in self.ge_expansions:\n if ge_expansion.have_data(temperature):\n return ge_expansion.Enthropy(temperature)\n return None\n\n def Enthalpy(self, temperature):\n for ge_expansion in self.ge_expansions:\n if ge_expansion.have_data(temperature):\n return ge_expansion.Enthalpy(temperature)\n return None\n","sub_path":"Substance.py","file_name":"Substance.py","file_ext":"py","file_size_in_byte":2785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"269498856","text":"#!/usr/bin/python3\n\"\"\"\nScript\n\"\"\"\n\n\nif __name__ == \"__main__\":\n \"\"\"request\"\"\"\n import requests\n from sys import argv\n res = requests.get(argv[1])\n if res.status_code >= 400:\n print(\"Error code: {}\".format(res.status_code))\n else:\n print(res.text)\n","sub_path":"0x11-python-network_1/7-error_code.py","file_name":"7-error_code.py","file_ext":"py","file_size_in_byte":279,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"629188124","text":"from preprocess import *\nimport keras\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, Input\nfrom keras.utils import to_categorical\nimport wandb\nfrom wandb.keras import WandbCallback\n\nwandb.init()\nconfig = wandb.config\n\nconfig.max_len = 11\nconfig.buckets = 20\n\n\n# Save data to array file first\n#save_data_to_array(max_len=config.max_len, n_mfcc=config.buckets)\n\nlabels = [\"bed\", \"happy\", \"cat\"]\n\n# # Loading train set and test set\nX_train, X_test, y_train, y_test = get_train_test()\n\n# # Feature dimension\nchannels = 1\nconfig.epochs = 50\nconfig.batch_size = 100\n\nnum_classes = 3\nprint(\"Before \", X_train.shape)\nX_train = X_train.reshape(\n X_train.shape[0], config.buckets, config.max_len, channels)\nX_test = X_test.reshape(\n X_test.shape[0], config.buckets, config.max_len, channels)\nprint(\"After \", X_train.shape)\n\ny_train_hot = to_categorical(y_train)\ny_test_hot = to_categorical(y_test)\n\n#model = Sequential()\n#model.add(Flatten(input_shape=(config.buckets, config.max_len, channels)))\n#model.add(Dense(num_classes, activation='softmax'))\n\ninp = Input(shape=(config.buckets, config.max_len, channels))\nconv_1 = Conv2D(512, (3,3), padding='valid', activation='relu')(inp)\nmax_1 = MaxPooling2D(pool_size=(2,2))(conv_1)\ndrop_1 = Dropout(0.30)(max_1)\n\nconv_2 = Conv2D(175, (3,3), padding='valid', activation='relu')(drop_1)\n#max_2 = MaxPooling2D(pool_size=(2,2))(conv_2)\ndrop_2 = Dropout(0.30)(conv_2)\n\nconv_3 = Conv2D(96, (3,3), padding='same', activation='relu')(drop_2)\ndrop_3 = Dropout(0.30)(conv_3)\n\nflat_1 = Flatten()(drop_3)\ndrop_6 = Dropout(0.3)(flat_1)\ndense_1 = Dense(200, activation='relu')(drop_6)\ndrop_4 = Dropout(0.30)(dense_1)\ndense_2 = Dense(100, activation=\"relu\")(drop_4)\ndrop_5 = Dropout(0.25)(dense_2)\ndense_3 = Dense(num_classes, activation='softmax')(drop_5)\nmodel = Model(inp, dense_3)\n\nmodel.compile(loss=\"categorical_crossentropy\",\n optimizer=\"adam\",\n metrics=['accuracy'])\nconfig.total_params = model.count_params()\n\n\nmodel.fit(X_train, y_train_hot, batch_size=config.batch_size, epochs=config.epochs, validation_data=(X_test, y_test_hot), callbacks=[WandbCallback(data_type=\"image\", labels=labels)])\n\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":2225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"451877906","text":"from bs4 import BeautifulSoup\nimport requests\n\nuserful_herfs_raw = []\nuserful_herfs = []\ninfo = []\nurls = ['http://bj.58.com/pbdn/0/pn{}/'.format(str(i)) for i in range(3)]\n\n\"\"\"得到有用链接 \"\"\"\ndef geturls(url):\n web_data = requests.get(url)\n soup = BeautifulSoup(web_data.text,'lxml')\n userful = soup.select('a[data-addtype=\"level2\"]')\n for i in userful:\n userful_herfs_raw.append(i.get('href'))\n\n\n\"\"\"爬取详细信息\"\"\"\ndef getdata(url):\n web_data = requests.get(url)\n soup = BeautifulSoup(web_data.text,'lxml')\n aclasses = soup.select('div.breadCrumb.f12 > span:nth-of-type(3) > a')\n titles = soup.select('div.person_add_top.no_ident_top > div.per_ad_left > div.col_sub.mainTitle > h1')\n dates = soup.select('ul.mtit_con_left.fl > li.time')\n prices = soup.select('div.person_add_top.no_ident_top > div.per_ad_left > div.col_sub.sumary > ul > li:nth-of-type(1) > div.su_con')\n chengshes = soup.select('div.person_add_top.no_ident_top > div.per_ad_left > div.col_sub.sumary > ul > li:nth-of-type(2) > div.su_con > span')\n locates = soup.select('div.person_add_top.no_ident_top > div.per_ad_left > div.col_sub.sumary > ul > li:nth-of-type(3) > div.su_con > span > a')\n counts = soup.select('#totalcount')\n\n for aclass,title,date,price,chengshe,locate,count in zip(aclasses,titles,dates,prices,chengshes,locates,counts):\n data={\n 'class': aclass.get_text(),\n 'title':title.get_text(),\n 'date':date.get_text(),\n 'price':price.get_text(),\n '成色':chengshe.get_text(),\n 'locate':locate.get_text(),\n 'count':count.get_text()\n }\n info.append(data)\n\nfor url in urls:\n geturls(url)\n\nuserful_herfs = list(set(userful_herfs_raw))\n\nfor k in userful_herfs:\n print(k)\n\nfor userful_herf in userful_herfs:\n getdata(userful_herf)\n\nfor j in info:\n print (j['class'].strip(),j['title'].strip(),j['date'].strip(),j['price'].strip(),j['成色'].strip(),j['locate'].strip(),j['count'].strip())\n\n\"\"\"\nhttp://jst1.58.com/counter?infoid=26088204291258&userid=&uname=&sid=517158095&lid=1&px=&cfpath=5,38484\n\"\"\"\n","sub_path":"script/58.py","file_name":"58.py","file_ext":"py","file_size_in_byte":2154,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"210885004","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 22 12:26:52 2019\n\n@author: rpsworker\n\"\"\"\n# %%\n\nimport pickle\nimport itertools\nimport random\nfrom tqdm import tqdm\nfrom sklearn.utils import shuffle\nimport pandas as pd\nimport numpy as np\nimport sys\n\n\n\ndef get_ids(head, word_2_id, dataset=\"WN11\"):\n id_list = []\n head = head.split('.')\n for val in head:\n val = val.split('_')\n for v in val:\n if v in word_2_id.keys():\n id_list.append(word_2_id[v])\n else:\n id_list.append(word_2_id['unk'])\n return id_list\n\n# def get_ids(head, word_2_id, dataset=\"WN11\"):\n# temp_list = []\n# if dataset == \"WN11\":\n# head = head.strip(\"__\")\n# en = head.split(\"_\")\n# for k in en:\n# if k in word_2_id.keys():\n# temp_list.append(word_2_id[k])\n# else:\n# temp_list.append(word_2_id['unk'])\n# return temp_list\n\n\ndef build_vector(head_list, tail_list, relation_list, y_true, interchange=False):\n df = pd.DataFrame(columns=[\"Head\", \"relation\", \"tail\", \"score\"])\n if interchange:\n df[\"Head\"] = tail_list\n df[\"relation\"] = relation_list\n df[\"tail\"] = head_list\n df[\"score\"] = y_true\n else:\n df[\"Head\"] = head_list\n df[\"relation\"] = relation_list\n df[\"tail\"] = tail_list\n df[\"score\"] = y_true\n return df\n\n\ndef main():\n DATA_LOCATION = sys.argv[1]\n STORE_LOCATION = DATA_LOCATION\n data_location = DATA_LOCATION\n change = False\n data_type = sys.argv[2]\n if data_type == 'train':\n load_data = pd.read_csv(DATA_LOCATION + \"train.txt\", sep='\\t', names=[\"Head\", \"relation\", \"tail\"])\n if data_type == 'valid':\n load_data = pd.read_csv(DATA_LOCATION + \"valid.txt\", sep='\\t', names=[\"Head\", \"relation\", \"tail\"])\n if data_type == 'test':\n load_data = pd.read_csv(DATA_LOCATION + \"valid.txt\", sep='\\t', names=[\"Head\", \"relation\", \"tail\"])\n file_name = sys.argv[3]\n lower = int(sys.argv[4])\n upper = int(sys.argv[5])\n epoch = int(sys.argv[6])\n head_list = load_data[\"Head\"].tolist()\n relation_list = load_data[\"relation\"].tolist()\n tail_list = load_data[\"tail\"].tolist()\n if change == \"False\":\n relation_dict = pickle.load(open(DATA_LOCATION + \"relation_2_tail.pkl\", \"rb\"))\n else:\n relation_dict = pickle.load(open(DATA_LOCATION + \"relation_2_head.pkl\", \"rb\"))\n word_2_id = pickle.load(open(DATA_LOCATION + \"word_2_id.pkl\", \"rb\"))\n g = 512\n head_vec_1 = []\n tail_vec_1 = []\n relation_vec = []\n y_output = []\n for _ in range(epoch):\n for k in (range(len(head_list))):\n head_en = get_ids(head_list[k], word_2_id)\n tail_en = get_ids(tail_list[k], word_2_id)\n relation1 = relation_list[k]\n # print(head_list[k], relation1, tail_list[k])\n if relation1 == \"gender\":\n relation_en = get_ids(relation_list[j], word_2_id)\n head_ = [q for q in itertools.repeat(head_en, times=2)]\n relation = [q for q in itertools.repeat(relation_en, times=2)]\n negative_sample = random.sample(relation_dict[relation1], 1)\n else:\n relation_en = get_ids(relation_list[k], word_2_id)\n k1 = random.randint(lower, upper)\n head_ = [q for q in itertools.repeat(head_en, times=k1)]\n relation = [q for q in itertools.repeat(relation_en, times=k1)]\n # negative_sample = random.sample(tail_list, k1-1)\n key_list = list(relation_dict.keys())\n key_list.remove(relation1)\n rel_sample = random.sample(key_list, k1 - 1)\n negative_sample = []\n for val in rel_sample:\n negative_sample += random.sample(relation_dict[val], 1)\n print(\"head: \", head_list[k], \"relation:\", relation1,\n \"tail: \", tail_list[k], \"val:\", val, \"neg_sample\", negative_sample[-1])\n tail_in = []\n for t in negative_sample:\n tail_in.append(get_ids(t, word_2_id))\n tail_in.append(tail_en)\n head_vec_1 += head_\n tail_vec_1 += tail_in\n relation_vec += relation\n y_true = []\n for u in range(len(head_)):\n if tail_in[u] == tail_en:\n y_true.append(1)\n else:\n y_true.append(0)\n y_output += y_true\n print(len(head_list))\n print(change)\n sampled_data = build_vector(head_vec_1, tail_vec_1, relation_vec, y_output, False)\n sampled_data.to_pickle(STORE_LOCATION + file_name, protocol=2)\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"KGE_asLM/data/WN18RR/data_collector.py","file_name":"data_collector.py","file_ext":"py","file_size_in_byte":4755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"468899554","text":"import tkinter as tk\r\nfrom PIL import Image, ImageDraw\r\nimport tensorflow as tf\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nfrom tkinter import messagebox\r\nfrom PIL import ImageFilter\r\n\r\ndef NN_minst():\r\n # # Step 1: Initial Setup\r\n # X = tf.placeholder(tf.float32, [None, 784])\r\n # y = tf.placeholder(tf.float32, [None, 10])\r\n #\r\n # L1 = 200\r\n # L2 = 100\r\n # L3 = 60\r\n # L4 = 30\r\n #\r\n # #[784,L1] input , number neutron\r\n # W1 = tf.Variable(tf.truncated_normal([784, L1], stddev=0.1))\r\n # B1 = tf.Variable(tf.zeros([L1]))\r\n # W2 = tf.Variable(tf.truncated_normal([L1, L2], stddev=0.1))\r\n # B2 = tf.Variable(tf.zeros([L2]))\r\n # W3 = tf.Variable(tf.truncated_normal([L2, L3], stddev=0.1))\r\n # B3 = tf.Variable(tf.zeros([L3]))\r\n # W4 = tf.Variable(tf.truncated_normal([L3, L4], stddev=0.1))\r\n # B4 = tf.Variable(tf.zeros([L4]))\r\n # W5 = tf.Variable(tf.truncated_normal([L4, 10], stddev=0.1))\r\n # B5 = tf.Variable(tf.zeros([10]))\r\n #\r\n # # Step 2: Setup Model\r\n # # Y1 = tf.nn.sigmoid(tf.matmul(X, W1) + B1)\r\n # # Y2 = tf.nn.sigmoid(tf.matmul(Y1, W2) + B2)\r\n # # Y3 = tf.nn.sigmoid(tf.matmul(Y2, W3) + B3)\r\n # # Y4 = tf.nn.sigmoid(tf.matmul(Y3, W4) + B4)\r\n # Y1 = tf.nn.relu(tf.matmul(X, W1) + B1)\r\n # Y2 = tf.nn.relu(tf.matmul(Y1, W2) + B2)\r\n # Y3 = tf.nn.relu(tf.matmul(Y2, W3) + B3)\r\n # Y4 = tf.nn.relu(tf.matmul(Y3, W4) + B4)\r\n # Ylogits = tf.matmul(Y4, W5) + B5\r\n # yhat = tf.nn.softmax(Ylogits)\r\n #\r\n # # Step 3: Loss Functions\r\n # loss = tf.reduce_mean(\r\n # tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=Ylogits))\r\n #\r\n # # Step 4: Optimizer\r\n # #optimizer = tf.train.GradientDescentOptimizer(learning_rate)\r\n # optimizer = tf.train.AdamOptimizer()\r\n # train = optimizer.minimize(loss)\r\n #\r\n # # accuracy of the trained model, between 0 (worst) and 1 (best)\r\n # is_correct = tf.equal(tf.argmax(y,1),tf.argmax(yhat,1))\r\n # accuracy = tf.reduce_mean(tf.cast(is_correct,tf.float32))\r\n\r\n # sess = tf.Session()\r\n # saver = tf.train.Saver()\r\n # init = tf.global_variables_initializer()\r\n # sess.run(init)\r\n #\r\n # # Step 5: Restore\r\n # saver.restore(sess, \"./tmp/mnist.ckpt\")\r\n # print(\"Model Restore: \")\r\n #---------------------------------------------------------------\r\n\r\n sess = tf.Session()\r\n # saver = tf.train.Saver()\r\n init = tf.global_variables_initializer()\r\n sess.run(init)\r\n saver = tf.train.import_meta_graph('./tmp_a/mnist.ckpt.meta')\r\n saver.restore(sess, tf.train.latest_checkpoint('./tmp_a'))\r\n print(\"Model Restore: \")\r\n graph = tf.get_default_graph()\r\n X = graph.get_tensor_by_name(\"X_input:0\")\r\n result = graph.get_tensor_by_name(\"yhat_output:0\")\r\n return sess, result, X\r\n\r\n\r\nclass ImageGenerator:\r\n def __init__(self,parent,posx,posy,*kwargs):\r\n self.parent = parent\r\n self.posx = posx\r\n self.posy = posy\r\n self.sizex = 200\r\n self.sizey = 200\r\n self.b1 = \"up\"\r\n self.xold = None\r\n self.yold = None \r\n self.drawing_area=tk.Canvas(self.parent,width=self.sizex,height=self.sizey)\r\n self.drawing_area.place(x=self.posx,y=self.posy)\r\n self.drawing_area.bind(\"\", self.motion)\r\n self.drawing_area.bind(\"\", self.b1down)\r\n self.drawing_area.bind(\"\", self.b1up)\r\n self.button=tk.Button(self.parent,text=\"Done!\",width=10,bg='white',command=self.save)\r\n self.button.place(x=self.sizex/7,y=self.sizey+20)\r\n self.button1=tk.Button(self.parent,text=\"Clear!\",width=10,bg='white',command=self.clear)\r\n self.button1.place(x=(self.sizex/7)+80,y=self.sizey+20)\r\n\r\n self.image=Image.new(\"RGB\",(200,200),(255,255,255))\r\n self.draw=ImageDraw.Draw(self.image)\r\n self.nnsess= NN_minst()\r\n\r\n # def save(self):\r\n # filename = \"temp.jpg\"\r\n # self.image.save(filename)\r\n # self.minst_nn_pred()\r\n\r\n def save(self):\r\n filename = \"temp.jpg\"\r\n self.image.save(filename)\r\n img = Image.open(filename) # image extension *.png,*.jpg\r\n new_width = 28\r\n new_height = 28\r\n img = img.resize((new_width, new_height), Image.ANTIALIAS)\r\n img2 = img.convert('L')\r\n img2.save('mnist.jpg') # format may what u want ,*.png,*jpg,*.gif\r\n self.minst_nn_pred()\r\n \r\n\r\n def clear(self):\r\n self.drawing_area.delete(\"all\")\r\n self.image=Image.new(\"RGB\",(200,200),(255,255,255))\r\n self.draw=ImageDraw.Draw(self.image)\r\n\r\n def b1down(self,event):\r\n self.b1 = \"down\"\r\n\r\n def b1up(self,event):\r\n self.b1 = \"up\"\r\n self.xold = None\r\n self.yold = None\r\n\r\n def motion(self,event):\r\n if self.b1 == \"down\":\r\n if self.xold is not None and self.yold is not None:\r\n event.widget.create_line(self.xold,self.yold,event.x,event.y,smooth='true',width=20,fill='black')\r\n self.draw.line(((self.xold,self.yold),(event.x,event.y)),(0,128,0),width=20)\r\n\r\n self.xold = event.x\r\n self.yold = event.y\r\n\r\n def minst_nn_pred(self):\r\n imgnew = Image.open('mnist.jpg')\r\n im = np.asarray(imgnew)\r\n print(im.shape)\r\n im = np.expand_dims(im, axis=0)\r\n print(im.shape)\r\n im = im.reshape(1,784)\r\n print(im.shape)\r\n classification = self.nnsess[0].run(tf.argmax(self.nnsess[1], 1), feed_dict={self.nnsess[2]: im})\r\n print('predicted', classification[0])\r\n messagebox.showinfo(\"predicted number\", str(classification[0]))\r\n # plt.imshow(im.reshape(28, 28), cmap=plt.cm.binary)\r\n # plt.show()\r\n\r\n # def minst_nn_pred(self):\r\n # imgnew = Image.open('temp.jpg').convert('L')\r\n # imgnew = imgnew.resize((28, 28), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)\r\n # im = np.asarray(imgnew)\r\n # im = np.expand_dims(im, axis=0)\r\n # im = im.reshape(1,784)\r\n # print(im.shape)\r\n # classification = self.nnsess[0].run(tf.argmax(self.nnsess[1], 1), feed_dict={self.nnsess[2]: im})\r\n # print('predicted', classification[0])\r\n # messagebox.showinfo(\"predicted number\", str(classification[0]))\r\n # # plt.imshow(im.reshape(28, 28), cmap=plt.cm.binary)\r\n # # plt.show()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n root=tk.Tk()\r\n root.wm_geometry(\"%dx%d+%d+%d\" % (250, 250, 10, 10))\r\n root.config(bg='white')\r\n ImageGenerator(root,10,10)\r\n root.mainloop()","sub_path":"exercises/Module_3_DeepLearning/painting_mnist_nnA.py","file_name":"painting_mnist_nnA.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"61794028","text":"# -*- coding: utf-8 -*-\n# pylint: disable=all\n\"\"\"\npytests for multi time resource handlers\n\"\"\"\nimport numpy as np\nimport os\nfrom pandas.testing import assert_frame_equal\nimport pytest\n\nfrom rex import TESTDATADIR\nfrom rex.multi_time_resource import (MultiTimeH5, MultiTimeNSRDB,\n MultiTimeWindResource)\nfrom rex.resource import Resource\n\n\n@pytest.fixture\ndef MultiTimeNSRDB_res():\n \"\"\"\n Init NSRDB resource handler\n \"\"\"\n path = os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_*.h5')\n\n return MultiTimeNSRDB(path)\n\n\n@pytest.fixture\ndef MultiTimeNSRDB_list_res():\n \"\"\"\n Init NSRDB resource handler\n \"\"\"\n files = [os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2012.h5'),\n os.path.join(TESTDATADIR, 'nsrdb/ri_100_nsrdb_2013.h5')]\n\n return MultiTimeNSRDB(files)\n\n\n@pytest.fixture\ndef MultiTimeWind_res():\n \"\"\"\n Init WindResource resource handler\n \"\"\"\n path = os.path.join(TESTDATADIR, 'wtk/ri_100_wtk_*.h5')\n\n return MultiTimeWindResource(path)\n\n\ndef check_res(res_cls):\n \"\"\"\n Run test on len and shape methods\n \"\"\"\n time_index = None\n for file in res_cls.h5_files:\n with Resource(file) as f:\n if time_index is None:\n time_index = f.time_index\n else:\n time_index = time_index.append(f.time_index)\n\n with Resource(res_cls.h5_files[0]) as f:\n meta = f.meta\n\n res_shape = (len(time_index), len(meta))\n\n assert len(res_cls) == len(time_index)\n assert res_cls.shape == res_shape\n\n assert np.all(np.isin(['meta', 'time_index'],\n res_cls.datasets))\n assert np.all(~np.isin(['meta', 'time_index', 'coordinates'],\n res_cls.resource_datasets))\n\n\ndef check_attrs(res_cls, dset):\n \"\"\"\n Check dataset attributes extraction\n \"\"\"\n truth = res_cls.get_attrs(dset=dset)\n test = res_cls.attrs[dset]\n\n msg = \"{} attributes do not match!\".format(dset)\n assert truth == test, msg\n\n truth = res_cls.get_scale_factor(dset)\n test = res_cls.scale_factors[dset]\n\n msg = \"{} scale factors do not match!\".format(dset)\n assert truth == test, msg\n\n truth = res_cls.get_units(dset)\n test = res_cls.units[dset]\n\n msg = \"{} units do not match!\".format(dset)\n assert truth == test, msg\n\n\ndef check_properties(res_cls, dset):\n \"\"\"\n Check dataset properties extraction\n \"\"\"\n shape, dtype, chunks = res_cls.get_dset_properties(dset)\n\n test = res_cls.shapes[dset]\n msg = \"{} shape does not match!\".format(dset)\n assert shape == test, msg\n\n test = res_cls.dtypes[dset]\n msg = \"{} dtype does not match!\".format(dset)\n assert dtype == test, msg\n\n test = res_cls.chunks[dset]\n msg = \"{} chunks do not match!\".format(dset)\n assert chunks == test, msg\n\n\ndef check_meta(res_cls):\n \"\"\"\n Run tests on meta data\n \"\"\"\n with Resource(res_cls.h5_files[0]) as f:\n truth = f.meta\n\n test = res_cls['meta']\n assert_frame_equal(truth, test, check_dtype=False)\n\n test = res_cls.lat_lon\n assert np.allclose(truth[['latitude', 'longitude']].values, test)\n\n\ndef check_time_index(res_cls):\n \"\"\"\n Run tests on time_index\n \"\"\"\n truth = None\n for file in res_cls.h5_files:\n with Resource(file) as f:\n if truth is None:\n truth = f.time_index\n else:\n truth = truth.append(f.time_index)\n\n test = res_cls.time_index\n\n assert np.all(test == truth)\n\n\ndef check_dset(res_cls, ds_name):\n \"\"\"\n Run tests on dataset ds_name\n \"\"\"\n truth = []\n for h5 in res_cls.h5._h5_map['h5'].unique():\n truth.append(h5[ds_name])\n\n truth = np.concatenate(truth, axis=0)\n\n test = res_cls[ds_name]\n assert np.allclose(truth, test)\n\n test = res_cls[ds_name, :, 10]\n assert np.allclose(truth[:, 10], test)\n\n test = res_cls[ds_name, :, 10:20]\n assert np.allclose(truth[:, 10:20], test)\n\n test = res_cls[ds_name, :, [1, 3, 5, 7]]\n assert np.allclose(truth[:, [1, 3, 5, 7]], test)\n\n test = res_cls[ds_name, :, [2, 6, 3, 20]]\n assert np.allclose(truth[:, [2, 6, 3, 20]], test)\n\n\ndef test_time_index_error():\n \"\"\"\n Test time_index RuntimeError when file time_index overlap\n \"\"\"\n path = os.path.join(TESTDATADIR, 'wtk/wtk_2010_*m.h5')\n with pytest.raises(RuntimeError):\n with MultiTimeWindResource(path) as f:\n f.time_index # pylint: disable=pointless-statement\n\n\nclass TestMultiTimeNSRDB:\n \"\"\"\n Multi Year NSRDB Resource handler tests\n \"\"\"\n @staticmethod\n def test_res(MultiTimeNSRDB_res):\n \"\"\"\n test NSRDB class calls\n \"\"\"\n check_res(MultiTimeNSRDB_res)\n MultiTimeNSRDB_res.close()\n\n @staticmethod\n def test_meta(MultiTimeNSRDB_res):\n \"\"\"\n test extraction of NSRDB meta data\n \"\"\"\n check_meta(MultiTimeNSRDB_res)\n MultiTimeNSRDB_res.close()\n\n @staticmethod\n def test_time_index(MultiTimeNSRDB_res):\n \"\"\"\n test extraction of NSRDB time_index\n \"\"\"\n check_time_index(MultiTimeNSRDB_res)\n MultiTimeNSRDB_res.close()\n\n @staticmethod\n def test_ds(MultiTimeNSRDB_res, ds_name='dni'):\n \"\"\"\n test extraction of a variable array, attributes, and properties\n \"\"\"\n check_dset(MultiTimeNSRDB_res, ds_name)\n check_attrs(MultiTimeNSRDB_res, ds_name)\n check_properties(MultiTimeNSRDB_res, ds_name)\n MultiTimeNSRDB_res.close()\n\n\nclass TestMultiTimeList:\n \"\"\"\n Test multi time resource handler from list of files\n \"\"\"\n @staticmethod\n def test_res(MultiTimeNSRDB_list_res):\n \"\"\"\n test NSRDB class calls\n \"\"\"\n check_res(MultiTimeNSRDB_list_res)\n MultiTimeNSRDB_list_res.close()\n\n @staticmethod\n def test_meta(MultiTimeNSRDB_list_res):\n \"\"\"\n test extraction of NSRDB meta data\n \"\"\"\n check_meta(MultiTimeNSRDB_list_res)\n MultiTimeNSRDB_list_res.close()\n\n @staticmethod\n def test_time_index(MultiTimeNSRDB_list_res):\n \"\"\"\n test extraction of NSRDB time_index\n \"\"\"\n check_time_index(MultiTimeNSRDB_list_res)\n MultiTimeNSRDB_list_res.close()\n\n @staticmethod\n def test_ds(MultiTimeNSRDB_list_res, ds_name='dni'):\n \"\"\"\n test extraction of a variable array, attributes, and properties\n \"\"\"\n check_dset(MultiTimeNSRDB_list_res, ds_name)\n check_attrs(MultiTimeNSRDB_list_res, ds_name)\n check_properties(MultiTimeNSRDB_list_res, ds_name)\n MultiTimeNSRDB_list_res.close()\n\n\nclass TestMultiTimeWindResource:\n \"\"\"\n Multi Year WindResource Resource handler tests\n \"\"\"\n @staticmethod\n def test_res(MultiTimeWind_res):\n \"\"\"\n test WindResource class calls\n \"\"\"\n check_res(MultiTimeWind_res)\n MultiTimeWind_res.close()\n\n @staticmethod\n def test_meta(MultiTimeWind_res):\n \"\"\"\n test extraction of WindResource meta data\n \"\"\"\n check_meta(MultiTimeWind_res)\n MultiTimeWind_res.close()\n\n @staticmethod\n def test_time_index(MultiTimeWind_res):\n \"\"\"\n test extraction of WindResource time_index\n \"\"\"\n check_time_index(MultiTimeWind_res)\n MultiTimeWind_res.close()\n\n @staticmethod\n def test_ds(MultiTimeWind_res, ds_name='windspeed_100m'):\n \"\"\"\n test extraction of a variable array, attributes, and properties\n \"\"\"\n check_dset(MultiTimeWind_res, ds_name)\n check_attrs(MultiTimeWind_res, ds_name)\n check_properties(MultiTimeWind_res, ds_name)\n MultiTimeWind_res.close()\n\n @staticmethod\n def test_new_hubheight(MultiTimeWind_res, ds_name='windspeed_90m'):\n \"\"\"\n test extraction of interpolated hub-height\n \"\"\"\n check_dset(MultiTimeWind_res, ds_name)\n MultiTimeWind_res.close()\n\n\ndef test_map_hsds_files():\n \"\"\"\n Test map hsds files method\n \"\"\"\n files = [f'/nrel/US_wave/West_Coast/West_Coast_wave_{year}.h5'\n for year in range(1979, 2011)]\n hsds_kwargs = {'endpoint': 'https://developer.nrel.gov/api/hsds',\n 'api_key': 'oHP7dGu4VZeg4rVo8PZyb5SVmYigedRHxi3OfiqI'}\n path = '/nrel/US_wave/West_Coast/West_Coast_wave_*.h5'\n hsds_fps = MultiTimeH5._get_file_paths(path, hsds=True,\n hsds_kwargs=hsds_kwargs)\n\n missing = [f for f in files if f not in hsds_fps]\n wrong = [f for f in hsds_fps if f not in files]\n assert not any(missing), 'Missed files: {}'.format(missing)\n assert not any(wrong), 'Wrong files: {}'.format(wrong)\n\n\ndef execute_pytest(capture='all', flags='-rapP'):\n \"\"\"Execute module as pytest with detailed summary report.\n\n Parameters\n ----------\n capture : str\n Log or stdout/stderr capture option. ex: log (only logger),\n all (includes stdout/stderr)\n flags : str\n Which tests to show logs and results for.\n \"\"\"\n\n fname = os.path.basename(__file__)\n pytest.main(['-q', '--show-capture={}'.format(capture), fname, flags])\n\n\nif __name__ == '__main__':\n execute_pytest()\n","sub_path":"tests/test_multi_time_resource.py","file_name":"test_multi_time_resource.py","file_ext":"py","file_size_in_byte":9205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"186177385","text":"import matplotlib.pyplot as plt\nimport time as tm\nimport os\nimport numpy as np\nimport copy\nimport sys\n\ntest_list = [\"01-main\", \"02-switch\", \"11-join\", \"12-join-main\", \"21-create-many\", \"22-create-many-recursive\", \"23-create-many-once\", \"31-switch-many\", \"32-switch-many-join\", \"51-fibonacci\"]\narg_list = [(4,1), (5,1), (6,1), (7,2), (8,2), (9,1)] #tests which need n arguments to be given !!! THIS LIST IS SORTED FOR PERFORMANCES !!!\narg = sys.argv\nnb_test = 10\narg_test = int(arg[1])\nnb_arg = 0\nif (arg_test >= 4):\n nb_arg = arg_list[arg_test-4][1]\nlength = int(arg[2])\ndirectory_thread = \"./install/bin/\"\ndirectory_pthread = \"./install/bin/\"\ndirectory= \"./install/bin/\"\n\n\ndef print_graph_test():\n suffix = \"_pthread\"\n length = len(test_list)\n res = []\n res_pthread = []\n maxi = 0\n arg_pointer = 0\n \n for i in range(length):\n argument = \" \"\n\n if arg_list[arg_pointer][0] == i:\n for j in range(arg_list[arg_pointer][1]):\n argument += \"10\" + \" \"\n arg_pointer += 1\n \n \n tempo = tm.clock_gettime(tm.CLOCK_MONOTONIC)\n os.system(directory_thread + test_list[i] + argument)\n tempo = tm.clock_gettime(tm.CLOCK_MONOTONIC) - tempo\n res += [tempo]\n if tempo > maxi:\n maxi = tempo\n tempo = tm.clock_gettime(tm.CLOCK_MONOTONIC)\n os.system(directory_pthread + test_list[i] + argument)\n tempo = tm.clock_gettime(tm.CLOCK_MONOTONIC) - tempo\n res_pthread += [tempo]\n if tempo > maxi:\n maxi = tempo\n width_ = 0.6\n plt.bar(range(length), res, width = width_, color = 'yellow', edgecolor = 'blue', linewidth = 2)\n plt.bar([x + width_ for x in range(length)], res_pthread, width = width_, color = 'red', edgecolor = 'blue', linewidth = 2)\n plt.show()\n \ndef print_graph_thread():\n res_thread = []\n res_pthread = []\n suffix = \"_pthread\"\n for i in range(length):\n value_thread = 0\n value_pthread = 0\n for j in range(nb_test):\n argument = \" \"\n for k in range(nb_arg):\n argument += str(i) + \" \"\n tempo1 = tm.clock_gettime(tm.CLOCK_MONOTONIC_RAW)\n os.system(directory_thread + test_list[arg_test] + argument)\n tempo2 = tm.clock_gettime(tm.CLOCK_MONOTONIC_RAW)\n tempo = tempo2 - tempo1\n value_thread += tempo\n tempo1 = tm.clock_gettime(tm.CLOCK_MONOTONIC_RAW)\n os.system(directory_pthread + test_list[arg_test] + suffix + argument)\n tempo2 = tm.clock_gettime(tm.CLOCK_MONOTONIC_RAW)\n tempo = tempo2 - tempo1\n value_pthread += tempo\n value_thread = value_thread / nb_test\n value_pthread = value_pthread / nb_test\n res_thread += [value_thread]\n res_pthread += [value_pthread]\n if (arg_test == 9):\n plt.plot(res_thread, color = 'blue', label='thread')\n plt.plot(res_pthread, color='purple', label='pthread')\n else:\n final_res_thread = copy.deepcopy(res_thread)\n for i in range(20,len(res_thread)-20):\n value = 0\n for j in range(40):\n value += res_thread[i-20+j]\n final_res_thread[i] = value/40\n final_res_pthread = copy.deepcopy(res_pthread)\n for i in range(20,len(res_pthread)-20):\n value = 0\n for j in range(40):\n value += res_pthread[i-20+j]\n final_res_pthread[i] = value/40\n plt.plot(final_res_thread, color = 'blue', label='thread')\n plt.plot(final_res_pthread, color='purple', label='pthread')\n plt.legend()\n plt.show()\n\nprint_graph_thread()\n \n","sub_path":"src/graphics_drawing.py","file_name":"graphics_drawing.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"416361613","text":"cases = int(input())\r\n\r\nfor i in range(cases):\r\n name_dates = input().split()\r\n name = name_dates[0]\r\n enrolled_date = name_dates[1].split('/')\r\n date_of_birth = name_dates[2].split('/')\r\n classes = int(name_dates[3])\r\n \r\n if int(enrolled_date[0]) >= 2010:\r\n print(name + \" eligible\")\r\n elif int(date_of_birth[0]) >= 1991:\r\n print(name + \" eligible\")\r\n elif classes >= 41:\r\n print(name + \" ineligible\")\r\n else:\r\n print(name + \" coach petitions\")\r\n","sub_path":"Python/Eligibility.py","file_name":"Eligibility.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"611687649","text":"class Page():\n def __init__(self, driver):\n\n self.driver = driver\n self._search_bar = None\n self._search_result = None\n self._extras_bar = None\n self._route_bar = None\n\n @property\n def search_bar(self):\n from online.helpers.search_bar import SearchBar\n\n if self._search_bar is None:\n self._search_bar = SearchBar(self.driver, self.driver.find_element_by_css_selector(SearchBar.selectors['self']))\n return self._search_bar\n\n @property\n def search_result(self):\n from online.helpers.search_result import SearchResult\n\n if self._search_result is None:\n self._search_result = SearchResult(self.driver, self.driver.find_element_by_css_selector(SearchResult.selectors['self']))\n return self._search_result\n\n @property\n def extras_bar(self):\n from online.helpers.extras_bar import ExtrasBar\n\n if self._extras_bar is None:\n self._extras_bar = ExtrasBar(self.driver, self.driver.find_element_by_css_selector(ExtrasBar.selectors['extras_button']))\n return self._extras_bar\n\n @property\n def route_bar(self):\n from online.helpers.route_bar import RouteBar\n\n if self._route_bar is None:\n self._route_bar = RouteBar(self.driver, self.driver.find_element_by_css_selector(RouteBar.selectors['route']))\n return self._route_bar\n\n def open(self, url):\n self.driver.get(url)\n\n","sub_path":"online/helpers/page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1456,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"567033130","text":"# Create three strings using three different methods. Save your result to result_string_1, result_string_2,\n# result_string_3 variables\n\nresult_string_1 = 'hello earth'\nprint(result_string_1)\nresult_string_2 = \"hello dev\"\nprint(result_string_2)\nresult_string_3 = '''hello world '''\nprint(result_string_3)\n\n\n# Enter your first and last name. Join them together with a space in\n# between. Save a result in a variable result_full_name and\n# save the length of the whole name in result_full_name_length variable.\n\nfirst_name = input('enter the first name:')\nlast_name = input('enter the last name:')\nresult_full_name = (first_name + \" \" + last_name)\nprint(result_full_name)\nresult_full_name_length = len(result_full_name)\nprint(result_full_name_length)\n\n# Enter the capital city of California State in lower case. Change the case to title case.\n# Save the result in result_ca_capital variable\ncapital_ca = input('enter the capital city of california :')\nresult_ca_capital = capital_ca.title()\nprint(result_ca_capital)\n\n\n# Enter the name of our planet. Change the case to upper case. Save the result in\n# result_planet variable\nplanet_name = input('enter the name of our planet:')\nresult_planet = planet_name.upper()\nprint(result_planet)\n","sub_path":"lesson_2/homework_2_1.py","file_name":"homework_2_1.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461928052","text":"from Queue import Queue\n\n#\n# Test a queue\n#\ndef main():\n q = Queue()\n\n command = input()\n while len(command) > 0:\n print(command + \":\", end=\"\")\n if command[0] == 'a': # add\n item = command.split()[1]\n q.enqueue(int(item));\n elif command[0] == 'r': # remove\n print(q.dequeue(), end=\"\");\n else:\n print(\"Unknown command!\")\n print(\" _\" if q.isempty() else \" *\")\n command = input()\n print()\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"year2_1819/computer_programming_3_algorithms_data_structures/labs/w3/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"269306111","text":"#!/usr/bin/env python\n#\n\nfrom copy import deepcopy\n\nimport rospy\n\nfrom sensor_msgs.msg import Imu\nfrom nav_msgs.msg import Odometry\n\nfrom vectornav.msg import sensors\nfrom vectornav.msg import ins\nfrom vectornav.msg import gps\n\nfrom tf.transformations import quaternion_from_euler\n\nimport math\n\n\ndef sub_gpsCB(msg_in):\n global pub_gps \n global msg_gps\n\n msg_gps.header.stamp = msg_in.header.stamp # time of gps measurement\n msg_gps.header.frame_id = 'base_footprint' # the tracked robot frame\n msg_gps.pose.pose.position.x = msg_in.LLA.x # x measurement GPS.\n msg_gps.pose.pose.position.y = msg_in.LLA.y # y measurement GPS.\n msg_gps.pose.pose.position.z = msg_in.LLA.z # z measurement GPS.\n msg_gps.pose.pose.orientation.x = -1 # identity quaternion: 0\n msg_gps.pose.pose.orientation.y = -1 # identity quaternion: 0\n msg_gps.pose.pose.orientation.z = -1 # identity quaternion: 0\n msg_gps.pose.pose.orientation.w = -1 # identity quaternion: 1\n msg_gps.pose.covariance = {1, 0, 0, 0, 0, 0, # covariance on gps_x\n 0, 1, 0, 0, 0, 0, # covariance on gps_y\n 0, 0, 1, 0, 0, 0, # covariance on gps_z\n 0, 0, 0, 99999, 0, 0, # large covariance on rot x\n 0, 0, 0, 0, 99999, 0, # large covariance on rot y\n 0, 0, 0, 0, 0, 99999} # large covariance on rot z\n pub_gps.publish(msg_gps)\n\n\ndef sub_odomCB(msg_in):\n global pub_odom\n global msg_odom\n\n msg_odom = msg_in\n msg_odom.pose.covariance = [1.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n\t\t\t 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, \n\t\t\t 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, \n\t\t\t 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, \n\t\t\t 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, \n\t\t\t 0.0, 0.0, 0.0, 0.0, 0.0, 1.0]\n msg_odom.header.frame_id = \"odom\"\n pub_odom.publish(msg_odom)\n\n\ndef sub_insCB(msg_in):\n global msg_imu \n global pub_imu\n\n msg_imu.header.stamp = msg_in.header.stamp\n msg_imu.header.frame_id = 'base_footprint'\n\n q = quaternion_from_euler(msg_in.RPY.x/180.0 * math.pi, msg_in.RPY.y/180.0 * math.pi, msg_in.RPY.z/180.0 * math.pi)\n \n msg_imu.orientation.x = q[0] \n msg_imu.orientation.y = q[1]\n msg_imu.orientation.z = q[2]\n msg_imu.orientation.w = q[3]\n\n msg_imu.orientation_covariance = [1.0, 0.0, 0.0, \n\t\t\t\t 0.0, 1.0, 0.0, \n\t\t\t\t 0.0, 0.0, 1.0]\n\n pub_imu.publish(msg_imu)\n\n\nif __name__ == '__main__':\n rospy.init_node('vectornav_sensor_msgs')\n\n global pub_odom\n global pub_imu\n global pub_gps\n\n global msg_imu\n global msg_odom\n global msg_gps\n\n msg_odom = Odometry()\n msg_imu = Imu()\n msg_gps = Odometry()\n\n pub_odom = rospy.Publisher(\"/vo\", Odometry, queue_size=10)\n pub_imu = rospy.Publisher(\"/imu_data\", Imu, queue_size=10)\n # pub_gps = rospy.Publisher(\"/odom\", Odometry, queue_size=10)\n\n rospy.Subscriber(\"/zed/odom\", Odometry, sub_odomCB)\n # rospy.Subscriber(\"/vectornav/gps\", gps, sub_gpsCB)\n rospy.Subscriber(\"/vectornav/ins\", ins, sub_insCB)\n\n rospy.spin()\n","sub_path":"scripts/convert_and_fix.py","file_name":"convert_and_fix.py","file_ext":"py","file_size_in_byte":3137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"279867184","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport os\nimport cv2\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom tqdm import tqdm\nfrom models.ENet import ENet\n\n\ndef imreadFloatRgb(fileName):\n img = cv2.imread(fileName)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB, dst=img)\n return np.divide(img, 255.0, dtype=np.float32)\n\n\ndef loader(training_path, segmented_path, batch_size, h=512, w=512):\n filenames_t = os.listdir(training_path)\n total_files_t = len(filenames_t)\n\n filenames_s = os.listdir(segmented_path)\n total_files_s = len(filenames_s)\n\n assert (total_files_t == total_files_s)\n\n if str(batch_size).lower() == 'all':\n batch_size = total_files_s\n\n assert batch_size <= total_files_s\n\n while (1):\n # Choosing random indexes of images and labels\n batch_idxs = np.random.permutation(total_files_s)[:batch_size]\n\n inputs = []\n labels = []\n\n for jj in batch_idxs:\n img = imreadFloatRgb(training_path + filenames_t[jj])\n img = cv2.resize(img, (h, w), cv2.INTER_NEAREST)\n inputs.append(img)\n\n # Reading semantic image\n img = cv2.imread(segmented_path + filenames_s[jj], cv2.IMREAD_GRAYSCALE)\n img = cv2.resize(img, (h, w), cv2.INTER_NEAREST)\n labels.append(img)\n\n inputs = np.stack(inputs, axis=2)\n # Changing image format to C x H x W\n inputs = torch.tensor(inputs).transpose(0, 2).transpose(1, 3)\n\n labels = torch.tensor(labels)\n\n yield inputs, labels\n\n\ndef get_class_weights(num_classes, c=1.02):\n pipe = loader('./content/train/', './content/trainannot/', batch_size='all')\n _, labels = next(pipe)\n all_labels = labels.flatten()\n each_class = np.bincount(all_labels, minlength=num_classes)\n prospensity_score = each_class / len(all_labels)\n class_weights = 1 / (np.log(c + prospensity_score))\n return class_weights\n\n\ndef main():\n device = torch.device('cuda:0')\n enet = ENet(12).to(device)\n\n lr = 5e-4\n batch_size = 20\n class_weights = get_class_weights(12)\n criterion = nn.CrossEntropyLoss(weight=torch.FloatTensor(class_weights).to(device))\n optimizer = torch.optim.Adam(enet.parameters(),\n lr=lr,\n weight_decay=2e-4)\n\n print_every = 5\n eval_every = 5\n\n # ## Training loop\n\n # In[17]:\n\n train_losses = []\n eval_losses = []\n\n bc_train = 367 // batch_size # mini_batch train\n bc_eval = 101 // batch_size # mini_batch validation\n\n iterationsPerEpochs = 1000\n\n # Define pipeline objects\n pipe = loader('./content/train/', './content/trainannot/', batch_size)\n eval_pipe = loader('./content/val/', './content/valannot/', batch_size)\n\n epochs = 100\n print()\n for e in range(1, epochs + 1):\n\n train_loss = 0\n print('-' * 15, 'Epoch %d' % e, '-' * 15)\n\n enet.train()\n\n for _ in tqdm(range(iterationsPerEpochs)):\n X_batch, mask_batch = next(pipe)\n\n # assign data to cpu/gpu\n X_batch, mask_batch = X_batch.to(device), mask_batch.to(device)\n\n optimizer.zero_grad()\n\n out = enet(X_batch.float())\n\n # loss calculation\n loss = criterion(out, mask_batch.long())\n # update weights\n loss.backward()\n optimizer.step()\n\n train_loss += loss.item()\n\n print()\n train_losses.append(train_loss)\n\n if (e + 1) % print_every == 0:\n print(f'Epoch {e}/{epochs}...',\n f'Loss {train_loss:6f}')\n\n if e % eval_every == 0:\n with torch.no_grad():\n enet.eval()\n\n eval_loss = 0\n\n # Validation loop\n for _ in tqdm(range(bc_eval)):\n inputs, labels = next(eval_pipe)\n\n inputs, labels = inputs.to(device), labels.to(device)\n\n out = enet(inputs)\n\n out = out.data.max(1)[1]\n\n eval_loss += (labels.long() - out.long()).sum()\n\n print()\n print(f'Loss {eval_loss:6f}')\n\n eval_losses.append(eval_loss)\n\n if e % print_every == 0:\n checkpoint = {\n 'epochs': e,\n 'state_dict': enet.state_dict()\n }\n torch.save(checkpoint, f'./checkpoints/ckpt-enet-{e}-{train_loss}.pth')\n print('Model saved!')\n\n print(f'Epoch {e}/{epochs}...',\n f'Total Mean Loss: {sum(train_losses) / epochs:6f}')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"experiments/CamVid/train_CamVid.py","file_name":"train_CamVid.py","file_ext":"py","file_size_in_byte":4646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"254016434","text":"import xlsxwriter\nfrom datetime import datetime\nfrom api.models import VisitaFormularioLira\nfrom django.db.models import Sum,Q\n\n\n## DOCUMENTAÇÃO DA BIBLIOTECA\n# http://xlsxwriter.readthedocs.io \n\n\ndef createConsolidados(workbook,DISTRITO_SANITARIO,NUMERO_ESTRATOS,DATA_INICIAL,DATA_FINAL):\n\n #A Planilha dos Consolidados consiste de 2 Tabelas com cabeçalhos diferente;\n\n #Não temos informações sobre o albopictus, logo\n #campos relacionados ao Albopictus terão 0 como padrão;\n\n #Worksheet.merge_range() produz celulas retangulares que abrangem mais de uma linha/coluna;\n\n #Criando estilos visuias paras as celulas que serão usados na planilha\n header_format = workbook.add_format({'bold':True,'font_size':8,'font_name':'Arial'})\n big_font = workbook.add_format({'bold':True,'font_size':14,'font_name':'Arial'})\n normal_font = workbook.add_format({'bold':True,'font_size':10,'font_name':'Arial'})\n vertical_text_font_gray = workbook.add_format({'bold':True,'font_size':8,'border':1,'border_color':'black','bg_color':'gray','valign':'vjustify','align':'center','font_name':'Arial','rotation':90}) \n horizontal_text_font_gray = workbook.add_format({'bold':True,'font_size':8,'border':1,'border_color':'black','bg_color':'gray','valign':'vcenter','align':'center','font_name':'Arial'}) \n\n #Formatando a Data\n DATA_INICIAL_FORMATTED = datetime.strptime(DATA_INICIAL, '%Y-%m-%d').date().strftime('%d/%m/%Y')\n DATA_FINAL_FORMATTED = datetime.strptime(DATA_FINAL, '%Y-%m-%d').date().strftime('%d/%m/%Y')\n\n #Nome da Planilha\n worksheet = workbook.add_worksheet(\"DS \" + str(DISTRITO_SANITARIO)) \n\n #Informações da Prefeitura\n worksheet.insert_image(\"A1\",\"prefeitura.jpg\",{'x_scale': 0.28, 'y_scale': 0.39})\n worksheet.write(\"C1\",\"Prefeitura do Recife\",header_format)\n worksheet.write(\"C2\",\"Secretaria de Saúde\",header_format)\n worksheet.write(\"C3\",\"Secretaria Executiva de Vigilância à Saúde\",header_format)\n worksheet.write(\"C4\",\"Gerência de Vigilância Ambiental e Controle de Zoonoses\",header_format)\n worksheet.write(\"C5\",\"Programa de Saúde Ambiental\",header_format)\n worksheet.write(\"C6\",\"Coordenação de Apoio Diagnóstico\",header_format)\n\n #Informações sobre a Tabela(Distrito,Semestre,Data)\n worksheet.write(\"G8\",\"Levantamento de Indice Rápido para Aedes aegypti\",big_font)\n worksheet.write(\"B10\",\"Distrito Sanitário:\" + str(DISTRITO_SANITARIO),normal_font)\n worksheet.write(\"J10\",\"LIRAa: \"+ DATA_INICIAL_FORMATTED,normal_font)\n worksheet.write(\"S10\",\"Período: \" + DATA_INICIAL_FORMATTED + \" à \" + DATA_FINAL_FORMATTED,normal_font)\n\n\n #Cabeçalho da Tabela (Linha 1)\n worksheet.merge_range('A12:A14',\"Estratos\",vertical_text_font_gray)\n worksheet.merge_range('B12:I12',\"Imóveis\",horizontal_text_font_gray)\n worksheet.merge_range('J12:R12',\"Tipos de Recipientes para Aedes aegypti\",horizontal_text_font_gray)\n worksheet.merge_range('S12:S14',\"Pend. (%) Informada\",vertical_text_font_gray)\n worksheet.merge_range('T12:T14',\"Recip. p/ Ae. Albop.\",vertical_text_font_gray)\n worksheet.merge_range('U12:U14',\"Imóv. à Rec.\",vertical_text_font_gray)\n\n\n\n #Cabeçalho da Tabela (Linha 2)\n worksheet.merge_range('B13:B14',\"Programados\",horizontal_text_font_gray)\n worksheet.merge_range('C13:C14',\"Inspecionados\",horizontal_text_font_gray)\n worksheet.merge_range('D13:F13',\"C/ Ae. aegypti\",horizontal_text_font_gray)\n worksheet.merge_range('G13:I13',\"C/ Ae. albopictus\",horizontal_text_font_gray)\n worksheet.merge_range('J13:J14',\"A1\",horizontal_text_font_gray)\n worksheet.merge_range('K13:K14',\"A2\",horizontal_text_font_gray)\n worksheet.merge_range('L13:L14',\"B\",horizontal_text_font_gray)\n worksheet.merge_range('M13:M14',\"C\",horizontal_text_font_gray)\n worksheet.merge_range('N13:N14',\"D1\",horizontal_text_font_gray)\n worksheet.merge_range('O13:O14',\"D2\",horizontal_text_font_gray)\n worksheet.merge_range('P13:P14',\"E\",horizontal_text_font_gray)\n worksheet.merge_range('Q13:Q14',\"Total\",horizontal_text_font_gray)\n worksheet.merge_range('R13:R14',\"Crítica\",horizontal_text_font_gray)\n\n\n #Cabeçalho da Tabela (Linha 3)\n worksheet.write('D14',\"TB\",horizontal_text_font_gray)\n worksheet.write('E14',\"Outros\",horizontal_text_font_gray)\n worksheet.write('F14',\"Total\",horizontal_text_font_gray)\n worksheet.write('G14',\"TB\",horizontal_text_font_gray)\n worksheet.write('H14',\"Outros\",horizontal_text_font_gray)\n worksheet.write('I14',\"Total\",horizontal_text_font_gray)\n\n\n ########### Listas para armazenar valores que serão usados nas Tabela 1 e 2###\n ########### Isto evita o uso de laços desnecessarios e acessos ao Bando de Dados\n InspecionadosList = []\n TbAegyptiList = []\n OutrosAegyptiList = []\n dicionarioFocosList = []\n totalFocosAegyptiList = []\n ImoveisPositivosAegyptiList = []\n\n for e in range (1,NUMERO_ESTRATOS+2):\n InspecionadosList.append(0)\n TbAegyptiList.append(0)\n OutrosAegyptiList.append(0)\n dicionarioFocosList.append(0)\n totalFocosAegyptiList.append(0)\n ImoveisPositivosAegyptiList.append(0)\n ###################################################################\n\n\n #########################################\n ## Conteudo da tabela 1 ###\n #########################################\n\n #Imprime linhas para cada Estratos do Distrito + uma linha adicional com o Total \n for numEstrato in range(1,NUMERO_ESTRATOS+2):\n #Para cada estrato do Distrito\n if (numEstrato <= NUMERO_ESTRATOS):\n #Dicionario com objetos selecionados entre a data requisitada\n queryDictList = VisitaFormularioLira.objects.filter(distritoSanitario=DISTRITO_SANITARIO,estrato=numEstrato,dataVisita__range=(DATA_INICIAL,DATA_FINAL)).values()\n else:\n #Linha TOTAL\n queryDictList= VisitaFormularioLira.objects.filter(distritoSanitario=DISTRITO_SANITARIO,dataVisita__range=(DATA_INICIAL,DATA_FINAL)).values()\n \n row = numEstrato + 13 # Linha onde começa as informações da Tabela (Diferente do numero da Linha exibida na Tabela)\n column = 0 # Coluna 'A'\n\n if (numEstrato <= NUMERO_ESTRATOS):\n worksheet.write(row,column,numEstrato) #Estratos\n else:\n worksheet.write(row,column,'Total')\n\n #-------Programados---------\n worksheet.write(row,column+1,-1) \n\n \n #Captura de valores usados no Preenchimento das Tabelas\n Inspecionados = 0\n TbAegypti = 0\n ImoveisPositivosAegypti = 0\n for data in queryDictList:\n Inspecionados += 1 #Inspecionados\n if (data['tipoImovel'] == 4):\n TbAegypti += 1 #TB Ae. aegypti\n if (data['aedesA1']!=0 or data['aedesA2']!=0 or data['aedesB']!=0 or data['aedesC']!=0 or data['aedesD1']!=0 or data['aedesD2']!=0 or data['aedesE']!=0):\n ImoveisPositivosAegypti += 1 #IIP\n ImoveisPositivosAegyptiList.insert(numEstrato,ImoveisPositivosAegypti)\n\n\n #-------Inspecionados---------\n InspecionadosList.insert(numEstrato,Inspecionados)\n worksheet.write(row,column+2,Inspecionados) \n\n #-------TB Ae. aegypti---------\n #Formularios realizados em Terrenos Baldios \n TbAegyptiList.insert(numEstrato,TbAegypti)\n worksheet.write(row,column+3,TbAegypti) \n\n #-------Outros Ae. aegypti---------\n #Formularios que NÃO foram realizados em Terrenos Baldios\n #OutrosAegypti = Inspecionados - TbAegypti\n OutrosAegypti = Inspecionados - TbAegypti\n OutrosAegyptiList.insert(numEstrato, OutrosAegypti) \n worksheet.write(row,column+4,OutrosAegypti) \n\n #-------Total Ae. aegypti---------\n TotalAegypti = TbAegypti + OutrosAegypti\n worksheet.write(row,column+5,TotalAegypti) \n\n #-------TB Ae. albopictus---------\n worksheet.write(row,column+6,0) \n\n #-------Outros Ae. albopictus---------\n worksheet.write(row,column+7,0) \n\n #-------Total Ae. albopictus---------\n worksheet.write(row,column+8,0) \n\n #------Tipos de recipientes para Aedes aegypti---------\n #Calcula a soma de todos os tipos de recipientes encontrados \n dicionarioFocos = {'aedesA1__sum':0,'aedesA2__sum':0,'aedesB__sum':0,'aedesC__sum':0,'aedesD1__sum':0,'aedesD2__sum':0,'aedesE__sum':0}\n for data in queryDictList:\n dicionarioFocos['aedesA1__sum'] += data['aedesA1']\n dicionarioFocos['aedesA2__sum'] += data['aedesA2']\n dicionarioFocos['aedesB__sum' ] += data['aedesB' ]\n dicionarioFocos['aedesC__sum' ] += data['aedesC' ]\n dicionarioFocos['aedesD1__sum'] += data['aedesD1']\n dicionarioFocos['aedesD2__sum'] += data['aedesD2']\n dicionarioFocos['aedesE__sum' ] += data['aedesE' ]\n\n dicionarioFocosList.insert(numEstrato,dicionarioFocos)\n\n \n\n #------Focos Aedes---------\n worksheet.write(row,column+9 ,dicionarioFocos['aedesA1__sum']) #Focos A1\n worksheet.write(row,column+10,dicionarioFocos['aedesA2__sum']) #Focos A2\n worksheet.write(row,column+11,dicionarioFocos['aedesB__sum']) #Focos B\n worksheet.write(row,column+12,dicionarioFocos['aedesC__sum']) #Focos C\n worksheet.write(row,column+13,dicionarioFocos['aedesD1__sum']) #Focos D1\n worksheet.write(row,column+14,dicionarioFocos['aedesD2__sum']) #Focos D2\n worksheet.write(row,column+15,dicionarioFocos['aedesE__sum']) #Focos E\n\n #------Total tipos de recipientes---------\n #Não temos informações sobre o albopictus\n #Logo o total de recipientes deve ser igual ao Total Ae. aegypti\n totalFocosAegypti = 0\n for key in dicionarioFocos:\n totalFocosAegypti += dicionarioFocos[key]\n totalFocosAegyptiList.insert(numEstrato,totalFocosAegypti)\n worksheet.write(row,column+16,totalFocosAegypti) \n\n #------Critica---------\n #Depende do Supervisor, ficará em branco\n worksheet.write(row,column+17,' ') \n\n #------Recipientes para Albopictus---------\n worksheet.write(row,column+18,0) \n\n \n #----------------------Fim da Tabela 1---------------------------\n\n\n ######################################################################\n ### AQUI SE INICIA A IMPRESSÃO DA SEGUNDA TABELA DA PLANILHA ###\n ######################################################################\n\n #Define a linha inicial da Tabela 2, que varia dependendo do número de estratos do Distrito\n initialRowTable2 = 13 + NUMERO_ESTRATOS + 3 \n\n #Cabeçalho da Tabela 2 (Linha 1)\n worksheet.merge_range(initialRowTable2,0,initialRowTable2+2,0,\"Estratos\",vertical_text_font_gray)\n worksheet.merge_range(initialRowTable2,1,initialRowTable2,6,\"Imóveis\",horizontal_text_font_gray)\n worksheet.merge_range(initialRowTable2,7,initialRowTable2,18,\"Indicadores\",horizontal_text_font_gray)\n\n\n #Cabeçalho da Tabela 2 (Linha 2)\n worksheet.merge_range(initialRowTable2+1,1,initialRowTable2+2,1,\"Programados\",horizontal_text_font_gray)\n worksheet.merge_range(initialRowTable2+1,2,initialRowTable2+2,2,\"Inspecionados\",horizontal_text_font_gray)\n worksheet.merge_range(initialRowTable2+1,3,initialRowTable2+1,4,\"C/ Ae. aegypti (%)\",horizontal_text_font_gray)\n worksheet.merge_range(initialRowTable2+1,5,initialRowTable2+1,6,\"C/ Ae. albopictus (%)\",horizontal_text_font_gray)\n worksheet.merge_range(initialRowTable2+1,7,initialRowTable2+2,7,\"% Perda na amostragem\",horizontal_text_font_gray)\n worksheet.merge_range(initialRowTable2+1,8,initialRowTable2+1,9,\"I I P (%)\",horizontal_text_font_gray)\n worksheet.merge_range(initialRowTable2+1,10,initialRowTable2+1,11,\"Indice de Breteau\",horizontal_text_font_gray)\n worksheet.merge_range(initialRowTable2+1,12,initialRowTable2+1,18,\"ITR (Aedes aegypti) (%)\",horizontal_text_font_gray)\n\n\n #Cabeçalho da Tabela 2 (Linha 3)\n worksheet.write(initialRowTable2+2,3,\"TB\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,4,\"Outros\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,5,\"TB\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,6,\"Outros\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,8,\"Aegypti\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,9,\"Albopictus\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,10,\"Aegypti\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,11,\"Albopictus\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,12,\"A1\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,13,\"A2\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,14,\"B\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,15,\"C\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,16,\"D1\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,17,\"D2\",horizontal_text_font_gray)\n worksheet.write(initialRowTable2+2,18,\"E\",horizontal_text_font_gray)\n\n\n #########################################\n ## Conteudo da tabela 2 ###\n #########################################\n\n for numEstrato in range(1,NUMERO_ESTRATOS+2):\n contentRowTable2 = initialRowTable2 + 2 # Linha do conteundo da tabela 2\n \n if (numEstrato <= NUMERO_ESTRATOS):\n worksheet.write(contentRowTable2+numEstrato,0,numEstrato) #Estratos\n else:\n worksheet.write(contentRowTable2+numEstrato,0,'Total')\n\n #------Programados---------\n worksheet.write(contentRowTable2 + numEstrato,1,-1) \n\n\n #------Inspecionados---------\n #Total de Formularios\n Inspecionados = InspecionadosList[numEstrato]\n worksheet.write(contentRowTable2 + numEstrato,2,Inspecionados) \n\n\n #------TB Ae. aegypti (%)---------\n #Somente Terrenos Baldio\n TbAegypti = TbAegyptiList[numEstrato]\n TbAegyptiPorcentagem = 0\n if (Inspecionados != 0):\n TbAegyptiPorcentagem = round((TbAegypti/Inspecionados)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,3,TbAegyptiPorcentagem) \n\n #------Outros Ae. aegypti (%)---------\n #Qualquer Tipo de Imóvel que não seja Terreno Baldio\n OutrosAegypti = OutrosAegyptiList[numEstrato]\n OutrosAegyptiPorcentagem = 0\n if (Inspecionados != 0):\n OutrosAegyptiPorcentagem = round((OutrosAegypti/Inspecionados)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,4,OutrosAegyptiPorcentagem) \n\n\n #------TB Ae. aelbopictus (%)---------\n worksheet.write(contentRowTable2 + numEstrato,5,0) \n\n #------Outros Ae. aelbopictus (%)---------\n worksheet.write(contentRowTable2 + numEstrato,6,0) \n\n\n #------Perda na amostragem---------\n worksheet.write(contentRowTable2 + numEstrato,7,0)\n\n\n #------IIP Aegypti---------\n #Indicie Predial IIP (%) == (Imoveis Positivos/Inspecionados)*100 \n\n #Total de Imoveis onde existe pelo menos 1 tipo de foco \n IIP = 0\n ImoveisPositivosAegypti = ImoveisPositivosAegyptiList[numEstrato]\n if (Inspecionados != 0):\n IIP = round((ImoveisPositivosAegypti/Inspecionados)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,8,IIP)\n\n\n #------IIP Albopictus---------\n worksheet.write(contentRowTable2 + numEstrato,9,0)\n\n \n #------IB Aegypti------------\n #Indicie de Breteau IB == (Recipientes positivos/Inspecionados)*100\n IBAegypti = 0\n if (Inspecionados != 0): \n IBAegypti = round((totalFocosAegyptiList[numEstrato]/Inspecionados)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,10,IBAegypti)\n \n \n #------IB Albopictus---------\n worksheet.write(contentRowTable2 + numEstrato,11,0)\n\n\n #----------Indicie de Tipos de Recipiente---------\n #Indicie de Tipos de Recipiente (%) ITR == (Recipiente positivos \"X\"/Total de Recipientes Positivos)*100\n dicionarioFocos = dicionarioFocosList[numEstrato]\n totalFocosAegypti = totalFocosAegyptiList[numEstrato]\n #------ITR Foco A1---------\n ITRAedesA1 = 0\n if (totalFocosAegypti != 0): #Evita divisão por 0\n ITRAedesA1 = round((dicionarioFocos['aedesA1__sum']/totalFocosAegypti)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,12,ITRAedesA1)\n\n #------ITR Foco A2---------\n ITRAedesA2 = 0\n if (totalFocosAegypti != 0): \n ITRAedesA2 = round((dicionarioFocos['aedesA2__sum']/totalFocosAegypti)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,13,ITRAedesA2)\n\n #------ITR Foco B---------\n ITRAedesB = 0\n if (totalFocosAegypti != 0): \n ITRAedesB = round((dicionarioFocos['aedesB__sum']/totalFocosAegypti)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,14,ITRAedesB)\n\n #------ITR Foco C---------\n ITRAedesC = 0\n if (totalFocosAegypti != 0): \n ITRAedesC = round((dicionarioFocos['aedesC__sum']/totalFocosAegypti)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,15,ITRAedesC)\n\n #------ITR Foco D1---------\n ITRAedesD1 = 0\n if (totalFocosAegypti != 0): \n ITRAedesD1 = round((dicionarioFocos['aedesD1__sum']/totalFocosAegypti)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,16,ITRAedesD1)\n\n #------ITR Foco D2---------\n ITRAedesD2 = 0\n if (totalFocosAegypti != 0): \n ITRAedesD2 = round((dicionarioFocos['aedesD2__sum']/totalFocosAegypti)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,17,ITRAedesD2)\n\n #------ITR Foco E---------\n ITRAedesE = 0\n if (totalFocosAegypti != 0): \n ITRAedesE = round((dicionarioFocos['aedesE__sum']/totalFocosAegypti)*100,1)\n worksheet.write(contentRowTable2 + numEstrato,18,ITRAedesE)\n\n #---------------------------Fim da Tabela 2 ----------------------------------\n\n\n #Rodapé da Tabela\n #worksheet.merge_range('A32:T32',\"Estratos\",horizontal_text_font_gray)\n","sub_path":"login/consolidados.py","file_name":"consolidados.py","file_ext":"py","file_size_in_byte":18447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"421567652","text":"1##############################################\n# combine reads over run over multiple lanes\n##############################################\n\nimport os, sys, re\nimport collections\nimport glob\n\n# script for linking files\nscriptsdir = \"/ifs/projects/proj029/src\"\n\n# first link to data in working directory\nos.system(\"python %s/map_samples_rna.py\" % scriptsdir)\n\n# to remove at the end\nto_remove = glob.glob(\"*.fastq*\")\n\n\n# iterate over files and combine those that have\n# the same index i.e. condition\nfile_list = collections.defaultdict(list)\nfor inf in to_remove:\n name_split = inf.split(\"-\")\n index = name_split[1]\n file_list[index].append(inf)\n\nreps = [\"R1\", \"R2\", \"R3\", \"R4\"]\nfor condition, files in file_list.iteritems():\n for rep in reps:\n outprefix = \"stool-\" + condition + \"-%s\" % rep\n p1 = [inf for inf in files if inf.endswith(\".1.gz\") and inf.find(rep) != -1]\n p1.sort()\n p1 = \" \".join(p1)\n outname1 = outprefix + \".fastq.1.gz\"\n if os.path.exists(outname1): continue\n statement = \"zcat %(p1)s | gzip > %(outname1)s\" % locals()\n os.system(statement)\n p2 = [inf for inf in files if inf.endswith(\".2.gz\") and inf.find(rep) != -1]\n p2.sort()\n p2 = \" \".join(p2)\n outname2 = outprefix + \".fastq.2.gz\"\n \n if os.path.exists(outname2): continue\n statement = \"zcat %(p2)s | gzip > %(outname2)s\" % locals()\n os.system(statement)\n\nto_remove = \" \".join(to_remove)\n#os.system(\"rm -rf %s\" % to_remove)\n","sub_path":"scripts/combine_lanes_rna.py","file_name":"combine_lanes_rna.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"602427758","text":"from heapq import heapify, heappop, heappush\nfrom inferences import recursive_inference\nfrom fixedbaseline import asgn_val\nfrom constraints import constraints_violation, goal_checker\nfrom copy import deepcopy\n\n# Most Constrained Variable: Most Remaining Value heuristic function\n# use priority queue\n\ndef get_heap(cell_status):\n pq = []\n heapify(pq)\n for i in range(81):\n if len(cell_status[i][1]) != 0:\n heappush(pq, (len(cell_status[i][1]), i))\n return pq\n\ndef mcv(cell_status, n, inferences):\n\n # check if it satisfy goal state condition\n if goal_checker(cell_status):\n return True, cell_status, n\n\n # Check if violate constraints\n if constraints_violation(cell_status):\n return False, None, n\n\n # Give a reasonable fixed bound on the number of search steps, say 1000, for each experiment.\n if n > 1000:\n return False, None, n\n\n # apply inferences here #\n recursive_inference(cell_status, inferences, 1000)\n # Check if violate constraints\n if constraints_violation(cell_status):\n return False, None, n\n # check if it satisfy goal state condition\n if goal_checker(cell_status):\n return True, deepcopy(cell_status), n\n\n pq = get_heap(cell_status)\n\n if len(pq) == 0:\n return False, None, n\n\n _, idx = heappop(pq)\n\n val_dom = cell_status[idx][1]\n for val in val_dom:\n cp = deepcopy(cell_status)\n asgn_val(idx, cp, val)\n succ, res_cell, step = mcv(cp, n + 1, inferences)\n if succ:\n cell_status = deepcopy(res_cell)\n return True, cell_status, step\n n = step\n return False, None, n","sub_path":"mcv.py","file_name":"mcv.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"174496690","text":"# coding: utf-8\nfrom flask.ext.script import Manager, Shell\nfrom app import create_app, db\nfrom flask_migrate import Migrate, MigrateCommand, upgrade\n\napp = create_app('default')\nmanager = Manager(app)\nmigrate = Migrate(app, db)\n\n\ndef make_shell_context():\n return dict(app=app, db=db)\n\n\nmanager.add_command(\"shell\", Shell(make_context=make_shell_context))\n\nmanager.add_command('db', MigrateCommand)\n\n\n@manager.command\ndef dev():\n from livereload import Server\n live_server = Server(app.wsgi_app)\n live_server.watch('**/*.*')\n live_server.serve(open_url=False)\n\n\n@manager.command\ndef test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)\n\n\n@manager.command\ndef deploy():\n upgrade()\n\n\n@manager.command\ndef recreate_all():\n db.drop_all()\n db.create_all()\n\nif __name__ == '__main__':\n manager.run()\n","sub_path":"backend/manage.py","file_name":"manage.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"598815141","text":"# -*- coding: utf-8 -*-\n'''\n :codeauthor: :email:`Jayesh Kariya `\n'''\n# Import Python libs\nfrom __future__ import absolute_import\n\n# Import Salt Testing Libs\nfrom tests.support.unit import TestCase, skipIf\nfrom tests.support.mock import (\n MagicMock,\n patch,\n NO_MOCK,\n NO_MOCK_REASON\n)\n\n# Import Salt Libs\nfrom salt.modules import djangomod\n\n# Globals\ndjangomod.__grains__ = {}\ndjangomod.__salt__ = {}\ndjangomod.__context__ = {}\n\n\n@skipIf(NO_MOCK, NO_MOCK_REASON)\nclass DjangomodTestCase(TestCase):\n '''\n Test cases for salt.modules.djangomod\n '''\n # 'command' function tests: 1\n\n @patch('salt.modules.djangomod._get_django_admin',\n MagicMock(return_value=True))\n def test_command(self):\n '''\n Test if it runs arbitrary django management command\n '''\n mock = MagicMock(return_value=True)\n with patch.dict(djangomod.__salt__, {'cmd.run': mock}):\n self.assertTrue(djangomod.command('DJANGO_SETTINGS_MODULE',\n 'validate'))\n\n # 'syncdb' function tests: 1\n\n @patch('salt.modules.djangomod._get_django_admin',\n MagicMock(return_value=True))\n def test_syncdb(self):\n '''\n Test if it runs the Django-Admin syncdb command\n '''\n mock = MagicMock(return_value=True)\n with patch.dict(djangomod.__salt__, {'cmd.run': mock}):\n self.assertTrue(djangomod.syncdb('DJANGO_SETTINGS_MODULE'))\n\n # 'createsuperuser' function tests: 1\n\n @patch('salt.modules.djangomod._get_django_admin',\n MagicMock(return_value=True))\n def test_createsuperuser(self):\n '''\n Test if it create a super user for the database.\n '''\n mock = MagicMock(return_value=True)\n with patch.dict(djangomod.__salt__, {'cmd.run': mock}):\n self.assertTrue(djangomod.createsuperuser('DJANGO_SETTINGS_MODULE',\n 'SALT',\n 'salt@slatstack.com'))\n\n # 'loaddata' function tests: 1\n\n @patch('salt.modules.djangomod._get_django_admin',\n MagicMock(return_value=True))\n def test_loaddata(self):\n '''\n Test if it loads fixture data\n '''\n mock = MagicMock(return_value=True)\n with patch.dict(djangomod.__salt__, {'cmd.run': mock}):\n self.assertTrue(djangomod.loaddata('DJANGO_SETTINGS_MODULE',\n 'mydata'))\n\n # 'collectstatic' function tests: 1\n\n @patch('salt.modules.djangomod._get_django_admin',\n MagicMock(return_value=True))\n def test_collectstatic(self):\n '''\n Test if it collect static files from each of your applications\n into a single location\n '''\n mock = MagicMock(return_value=True)\n with patch.dict(djangomod.__salt__, {'cmd.run': mock}):\n self.assertTrue(djangomod.collectstatic('DJANGO_SETTINGS_MODULE'))\n","sub_path":"tests/unit/modules/test_djangomod.py","file_name":"test_djangomod.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"220377663","text":"# because each callback has its own stack,\n# the local variables in the stack frame will be lost on returning\n# it means it lost the state and context of where I am doing.\n# To keep state, the stacks must be stored in the heap and passed into callbacks as parameters \n# it ls called \"stack ripping\" \nimport socket \nimport ssl \nfrom selectors import DefaultSelector, EVENT_WRITE, EVENT_READ \nfrom bs4 import BeautifulSoup\n\nurls_go=set(['/'])\nurls_done=set()\n\nselector=DefaultSelector()\n\nclass Fetcher:\n\tdef __init__(self, url):\n\t\tself.response=b''\n\t\tself.url=url\n\t\tself.sock=None\n\t\tself.ss=None\n\n\tdef fetch(self):\n\t\tself.sock=socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\tself.sock.setblocking(False)\n\t\tself.ss=ssl.wrap_socket(self.sock, ssl_version=ssl.PROTOCOL_TLS)\n\t\t\n\t\ttry:\n\t\t\tself.ss.connect(('xkcd.com', 443))\n\t\texcept:\n\t\t\tpass \n\t \t\n\t\tselector.register(self.ss.fileno(), EVENT_WRITE, self.connected) \n\t\n\tdef connected(self, key, mask):\n\t\tselector.unregister(self.ss.fileno())\n\t\trequest='GET {} HTTP1.1\\r\\nHost: xkcd.com\\r\\nConnection: close\\r\\n\\r\\n'.format(self.url)\n\t\t\t\n\t\tself.ss.send(request.encode())\t\t\n\t\tselector.register(self.ss.fileno(), EVENT_READ, self.read_response)\n\t\t\n\tdef read_response(self, key, mask):\n\t\tchunk=self.ss.recv(4096)\n\t\tif chunk:\n\t\t\tself.response+=chunk\n\t\telse:\n\t\t\tprint(self.response)\n\t\t\tselector.unregister(self.ss.fileno())\n\t\t\tlinks=self.parse_links()\n\n\t\t\tfor link in links.difference(urls_done):\n\t\t\t\turls_go.add(link)\n\t\t\t\tFetcher(link).fetch()\n\n\t\t\turls_done.update(links)\n\t\t\turls_go.remove(self.url)\n\n\tdef parse_links(self):\n\t\tsoup=BeautifulSoup(self.response, 'html.parser')\n\t\tanchors=soup.find_all('a')\n\t\tlinks=set()\n\t\tfor anchor in anchors:\n\t\t\tif anchor.get('href'):\n\t\t\t\tlinks.add(anchor['href'])\n\t\t\n\t\treturn links\n\ndef loop():\n\twhile True: \n\t\tevents=selector.select()\n\t\tfor key, mask in events:\n\t\t\tcallback=key.data\n\t\t\tprint(callback.__name__)\n\t\t\tcallback(key, mask)\n\nfetcher=Fetcher('/')\nfetcher.fetch()\n\nloop()\n","sub_path":"asyncIO/async/loop_with_callbacks.py","file_name":"loop_with_callbacks.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224805416","text":"\"\"\"\n\nTaken from https://djangosnippets.org/snippets/2405/\n\nThis example assumes you have placed this code into a file called \"template_additions.py\"\nand placed that folder into a \"templatetags\" folder inside a module.\nIt can live wherever you need.\n\nExample folder structure:\n/lib/\n /__init__.py\n /templatetags/\n /__init__.py\n /template_additions.py\n\"\"\"\n\nfrom django import template\nfrom django.conf import settings\nfrom django.utils.html import strip_spaces_between_tags\n\nregister = template.Library()\n\n\nclass SmartSpacelessNode(template.Node):\n def __init__(self, nodelist):\n self.nodelist = nodelist\n\n def render(self, context):\n content = self.nodelist.render(context)\n return content if settings.DEBUG else strip_spaces_between_tags(content.strip())\n\n\n@register.tag\ndef smart_spaceless(parser, token):\n \"\"\"\n Removes whitespace between HTML tags, including tab and newline characters,\n but only if settings.DEBUG = False\n\n Example usage:\n {% load template_additions %}\n {% smart_spaceless %}\n

\n Foo\n

\n {% end_smart_spaceless %}\n\n This example would return this HTML:\n\n

Foo

\n\n Only space between *tags* is normalized -- not space between tags and text.\n In this example, the space around ``Hello`` won't be stripped:\n\n {% smart_spaceless %}\n \n Hello\n \n {% end_smart_spaceless %}\n \"\"\"\n nodelist = parser.parse(('end_smart_spaceless',))\n parser.delete_first_token()\n return SmartSpacelessNode(nodelist)\n","sub_path":"galaxy_conqueror/apps/core/templatetags/smart_spaceless.py","file_name":"smart_spaceless.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"245424494","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\nimport django \nimport os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'fsociety.settings'\ndjango.setup()\nfrom movie.models import Movie,Seed\nclass PianyuanPipeline(object):\n\n def process_item(self, item, spider):\n if spider.name == 'seed':\n s = Seed.objects.filter(filename=item['filename'])\n if not s:\n seed = Seed()\n seed.imdb = item['imdb']\n seed.filename = item['filename']\n seed.size = item['size']\n seed.quality = item['quality']\n seed.magnet = item['magnet']\n seed.save()\n\n else:\n m = Movie.objects.filter(imdb=item['imdb'])\n if not m:\n mv = Movie()\n mv.name = item['name']\n mv.name_en = item['name_en']\n mv.year = item['year']\n try:\n mv.fullname = item['fullname']\n except:\n mv.fullname = ''\n try:\n mv.category = item['category']\n except:\n mv.category = ''\n try:\n mv.director = ','.join(item['director'])\n except:\n mv.director = ''\n try:\n mv.writer = ','.join(item['writer'])\n except:\n mv.writer = ''\n mv.imdb = item['imdb']\n douban = item['douban'].replace('//movie.douban.com/subject/','')\\\n .replace('/','')\n mv.douban = douban\n mv.rating = item['rating']\n try:\n mv.actor = ','.join(item['actor'])\n except:\n mv.actor = ''\n mv.cover = item['images'][0]['path']\n if item['is_movie'] == 1:\n mv.is_movie = True\n mv.url = item['url']\n mv.save()\n\n return item\n\n","sub_path":"pianyuan/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":2184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"414796996","text":"#!/usr/bin/python3\n\n# name: batch_regiosqm.py\n# author: nbehrnd@yahoo.com\n# license: 2020-2021, MIT\n# date: 2020-09-24 (YYYY-MM-DD)\n# edit: <2023-08-28 Mon>\n#\n\"\"\"This is a moderator script to interact with regiosqm.\n\n+ Background\n To work successfully, this moderator script is expected to reside in\n the same folder as the scripts provided by Jensen and Kroman, i.e.\n\n + __init__.py\n + molecule_formats.py\n + molecule_svg.py\n + protonate.py\n + regiosqm.py\n + validate.py\n\n These scripts, initially written for Python 2, were ported to work\n with Python 3. The aim was to conserve as much as possible their\n functionality; as a result, they still may be used independent of\n this moderator script. To work successfully, the non-standard Python\n libraries of OpenBabel, numpy, and RDKit have to be installed. The\n computation equally depends on an installation of MOPAC2016. To\n accelerate the scrutiny, the use of GNU Parallel (cf. section \"use\")\n to distribute the computation on multiple CPU is recommended.\n\n This moderator script is written for the CLI of Python 3 only. In\n contrast to the scripts by Jensen and Kroman, a working installation\n of GNU Parallel now is a dependency to work with batch_regiosqm.py.\n\n+ Motivation\n The script was written to facilitate the non-supervised scrutiny of\n multiple input files, especially if the work ahead may be divided\n into multiple smaller input files replacing one large one. These\n tranches are then individually archived in .zip archives. For this,\n the input files may be mentioned explicitly by name. The moderator\n script however equally allows to identify automatically all input\n files suitable, here the user just types a \"-a\" flag.\n\n With the moderator script, the individual scrutiny of a substrate,\n expressed as a SMILES string, is possible, too. This removes the\n need to to set up a dedicated input file.\n\n+ Use of the moderator\n The general syntax to work with this moderator script is\n\n python3 batch_regiosqm.py [-a | -s SMILES | FILES]\n\n It is recommended to benefit from the shebang (set up and tested for\n Linux Debian 11/bullseye, branch testing, which the following guide\n assumes) and provision of the executable bit. To accelerate the\n overall computation, MOPAC's work is distributed to four concurrent\n threads. If the computer at your disposition has a higher number of\n CPUs, consider an adjustment of this parameter.\n\n a) To process one, or multiple input files you know by name,\n call the script in a pattern like\n\n python3 batch_regiosqm.py benzenes_smiles.csv pyridines_smiles.csv\n\n The scrutiny will be performed in groups of the input files and\n stored as such in a .zip archive. In the present case, you thus\n find benzenes.zip and pyridines.zip including all files relevant\n to the prediction including a parameter log to document the setup\n of the prediction itself. The later aims to monitor if changes\n in the tools used, including MOPAC, affect the outcome of the\n prediction.\n\n In the background, the script will ensure each of the input files\n mentioned by you is used only once. To facilitate tracking the\n advance of these computations, the input files are submitted in\n alphabetic order to the scrutiny.\n\n b) To process one, or multiple input files which you do not know\n all by their name, call the script by\n\n python batch_regiosqm.py -a\n\n The moderator script then submits any file ending in the pattern\n of \"_smiles.csv\" to the scrutiny (again, in alphabetic order).\n order.\n\n c) To submit one individual substrate to the scrutiny expressed as a\n SMILES string, call the script in either pattern of\n\n python batch_regiosqm.py -s \"c1ccncc1\"\n python batch_regiosqm.py -s 'c1ccccc1C'\n\n On the fly, this creates an input file \"special_smiles.csv\" to\n perform the prediction. Thus, all data will be stored in archive\n \"special.zip\", too. To enclose the SMILES string, use either\n double, or single quotes only.\n\n If the SMILES string does not contain characters the bash shell\n may misinterpret as \"special\", e.g., slashes, dashes, plus signs,\n you may skip the quotes altogether. You then do this on your own\n risk.\n\n Aforementioned options a), b), and c) mutually exclude each other.\n Options a) and b) are helpful to split the work ahead into smaller\n tranches running, without additional manual intervention, e.g., in\n batches over night.\n\n+ Use of original script files / what the moderator does for you\n The manual use described below assumes file \"quick_smiles.csv\" in the\n same folder as the script files. To ease replication, sub-folder\n \"quick\" contains a copy of the relevant data.\n\n a) preparation\n Drop the input files named in a pattern of *_smiles.csv into the\n current folder and call\n\n python regiosqm.py -g EAS_smiles.csv > EAS_conf.csv\n\n This causes OpenBabel to set up input files about regioisomers of\n protonated intermediates. If the substrate is identified as a\n conformational flexible structure, by default, up to 20 different\n conformers per site to be tested for the electrophilic aromatic\n substitution will be initialized.\n\n b) MOPAC's computation\n The MOPAC input files are identified and relayed to MOPAC by\n\n ls *.mop | parallel -j4 \"/opt/mopac/MOPAC2016.exe {}\"\n\n On a computer with more than four CPUs, you may distribute the\n processing into a higher number of concurrently running, parallel\n tasks by adjusting the -j4 parameter.\n\n c) scrutiny of MOPAC's results\n\n The instruction\n\n python regiosqm.py -a EAS_smiles.csv EAS_conf.csv > EAS_results.csv\n\n creates for each SMILES string in the submitted file EAS_smiles.csv\n a .svg to highlight the sites predicted as more susceptible to the\n electrophilic aromatic substitution. The results are recapitulated\n in a synopsis, EAS_results.csv, too.\n\n d) space cleaning\n The moderator script would move the of the data relevant to the\n computation into a space saving .zip archive for you. Performing\n the prediction manually, this task is yours.\"\"\"\n\n# modules of Python's standard library:\nimport argparse\nimport datetime\nimport os\nimport shutil\nimport subprocess as sub\nfrom platform import python_version\nimport zipfile\n\n# non-standard libraries:\nimport openbabel\nimport numpy\nimport rdkit\n\nimport regiosqm\n\n\ndef get_args():\n \"\"\"Provide a minimal menu to the CLI.\"\"\"\n parser = argparse.ArgumentParser(\n description='Moderator script for regiosqm.')\n\n group = parser.add_mutually_exclusive_group(required=True)\n group.add_argument(\n '-a',\n '--all',\n action='store_true',\n help='Process all _smiles.csv files in the current folder.')\n\n group.add_argument(\n '-s',\n '--smiles',\n default=\"\",\n help='Process only one manually given single SMILES string.')\n\n group.add_argument('files',\n metavar='FILE(S)',\n nargs='*',\n default=[],\n help='Manual input of .smi file(s) to process.')\n\n return parser.parse_args()\n\n\ndef specific_smiles(entry=\"\"):\n \"\"\"Enable the submission of a specific SMILES string.\"\"\"\n register = []\n start_file = str(\"special_smiles.csv\")\n\n try:\n with open(start_file, mode=\"w\", encoding=\"utf8\") as newfile:\n retain = str(f\"special\\t{entry}\")\n newfile.write(retain)\n register.append(start_file)\n except OSError:\n print(f\"Error writing file '{start_file}'. Exit.\")\n\n return register\n\n\ndef input_collector():\n \"\"\"Process all suitable input files.\"\"\"\n register = []\n for file in os.listdir(\".\"):\n if file.endswith(\"_smiles.csv\"):\n register.append(file)\n\n register.sort(key=str.lower)\n return register\n\n\ndef prepare_scrutiny(entry=\"\", input_file=\"\", conf_file=\"\"):\n \"\"\"Set up initial .sdf, then .mop MOPAC input files.\"\"\"\n print(f\"Set up scrutiny for EAS group '{entry}'\")\n\n prep = str(f\"python3 regiosqm.py -g {input_file} > {conf_file}\")\n work = sub.Popen(prep, shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)\n work.wait()\n\n\ndef engage_mopac(entry=\"\"):\n \"\"\"Engage MOPAC on four CPUs\"\"\"\n print(f\"Now, MOPAC is working on {entry} data.\")\n compute = str('ls *.mop | parallel -j4 \"mopac {}\"')\n work = sub.Popen(compute, shell=True)\n work.wait()\n\n\ndef analyze_mopac_results(entry=\"\", input_file=\"\", conf_file=\"\", result=\"\"):\n \"\"\"Inspect MOPAC's results, write tables and .svg.\"\"\"\n print(f\"Analysis of MOPAC's work for EAS group '{entry}'\")\n analyze = str(f\"python3 regiosqm.py -a {input_file} {conf_file} > {result}\")\n\n work = sub.Popen(analyze, shell=True, stdout=sub.PIPE, stderr=sub.STDOUT)\n work.wait()\n\n\ndef characterize_scrutiny(entry=\"\", input_file=\"\"):\n \"\"\"Characterize the setup of the scrutiny.\n\n Any change of the tools used may affect which site(s) is / are\n predicted as the more likely to react during an electrophilic\n aromatic substitution. Thus, the versions of the script's tools\n are permanently recorded.\"\"\"\n\n parameter_log = ''.join([entry, \"_parameter.log\"])\n\n # Retrieve the version of MOPAC from a MOPAC .out file.\n for file in os.listdir(\".\"):\n if file.endswith(\".arc\"):\n reference_file = str(file)\n break\n\n with open(reference_file, mode=\"r\", encoding=\"utf8\") as source:\n content = source.readlines()\n mopac_version_line = str(content[4])\n mopac_version_info = mopac_version_line.strip()\n\n # Write the report about the present scrutiny.\n try:\n with open(parameter_log, mode=\"w\", encoding=\"utf8\") as newfile:\n newfile.write(\"Parameters of the scrutiny:\\n\\n\")\n\n newfile.write(f\"input set: {input_file}\\n\")\n\n today = datetime.date.today()\n newfile.write(f\"date: {today} (YYYY-MM-DD)\\n\")\n\n newfile.write(f\"Python: {python_version()}\\n\")\n newfile.write(f\"RegioSQM: {regiosqm.__version__}\\n\")\n\n newfile.write(f\"OpenBabel: {openbabel.__version__}\\n\")\n newfile.write(f\"RDKit: {rdkit.__version__}\\n\")\n newfile.write(f\"numpy: {numpy.__version__}\\n\")\n\n newfile.write(f\"MOPAC: {mopac_version_info[6:]}\\n\")\n\n newfile.write(\"\\nEND\")\n\n print(f\"File '{parameter_log}' reports the setup of the analysis.\")\n except OSError:\n print(f\"Unable to report the analysis' setup to file '{parameter_log}'.\")\n\n\ndef space_cleaning(entry=\"\", input_file=\"\", conf_file=\"\", result=\"\"):\n \"\"\"Archive all relevant data in a .zip file.\"\"\"\n deposit = str(entry).split(\"_smiles\")[0]\n os.mkdir(deposit)\n\n parameter_log = ''.join([deposit, \"_parameter.log\"])\n\n move_by_extension = [\n \".arc\", \".den\", \".end\", \".mop\", \".out\", \".res\", \".sdf\", \".svg\"\n ]\n move_per_run = [input_file, conf_file, result, parameter_log]\n to_move = move_by_extension + move_per_run\n for element in to_move:\n for file in os.listdir(\".\"):\n if file.endswith(element):\n shutil.move(file, deposit)\n\n zip_filename = \"\".join([deposit, \".zip\"])\n backup_zip = zipfile.ZipFile(zip_filename, \"w\")\n for folders, subfolders, filenames in os.walk(deposit):\n backup_zip.write(deposit)\n for filename in filenames:\n backup_zip.write(os.path.join(deposit, filename))\n\n shutil.rmtree(deposit)\n print(f\"Analysis of EAS group '{deposit}' is completed.\\n\")\n\n\ndef main():\n \"\"\"Joining the functions together\"\"\"\n args = get_args()\n if args.smiles:\n smiles = args.smiles\n smi_files = specific_smiles(smiles)\n elif args.all:\n smi_files = input_collector()\n else:\n # Ensure each group of SMILES is submitted once\n smi_files = list(set(args.files))\n smi_files.sort(key=str.lower)\n for smi_file in smi_files:\n\n entry = str(smi_file).split(\"_smiles.csv\")[0]\n input_file = str(smi_file)\n conf_file = str(smi_file).split(\"_smiles.csv\")[0] + str(\"_conf.csv\")\n result = str(smi_file).split(\"_smiles.csv\")[0] + str(\"_results.csv\")\n\n try:\n prepare_scrutiny(entry, input_file, conf_file)\n engage_mopac(entry)\n\n analyze_mopac_results(entry, input_file, conf_file, result)\n\n characterize_scrutiny(entry, input_file)\n space_cleaning(smi_file, input_file, conf_file, result)\n except OSError:\n continue\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"regiosqm/batch_reqiosqm.py","file_name":"batch_reqiosqm.py","file_ext":"py","file_size_in_byte":12677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"430853109","text":"import pickle as pk\nimport json\n\n# Les tweets enregistrés contiennent des données qui ne sont pas pertinentes pour notre analyse\n# On ne va garder que les attributs qui sont suceptibles de nous intéresser pour alléger les fichiers manipulés\n\n\ndef retrieve_tweets(filepath):\n \"\"\"\n Input: str\n Output: list of dict\n La fonction prend le chemin du fichier contenant les tweets au format json en argument et retourne \n une liste de dictionnaires des tweets en ne gardant que les attributs qui nous intéressent\n parmi les tweets.\n \"\"\"\n attributs=['created_at','id','text','user','coordinates','quoted_status','retweeted_status','quote_count','retweet_count','reply_count','favorite_count']\n tweets=[]\n with open(filepath,'r') as file:\n ligne=file.readline()\n while ligne!=\"*STOP*\":\n if ligne.replace(\" \",\"\") not in [\"\",\"\\n\"]:\n try:\n json_token=json.loads(ligne.rstrip('\\n'))\n dico={}\n for cle in json_token.keys():\n if cle in attributs:\n dico[cle]=json_token[cle] \n tweets.append(dico)\n except json.JSONDecodeError:\n None\n ligne=file.readline()\n return tweets\n\ndef save_tweets(liste_tweets,filepath):\n \"\"\"\n Input: list of dict, str\n Output: None\n La fonction enregistre la liste contenant les dictionnaires des tweets dans un document .txt\n qui se trouve au bout du chemin spécifié en second argument\n \"\"\"\n with open(filepath,'wb') as file:\n mon_pickler=pk.Pickler(file)\n mon_pickler.dump(liste_tweets)\n \ndef save_tweets_json(liste_tweets,filepath):\n \"\"\"\n Input: list of dict, str\n Output: None\n La fonction enregistre la liste contenant les dictionnaires des tweets dans un document .txt\n qui se trouve au bout du chemin spécifié en second argument\n \"\"\"\n with open(filepath,'a') as file:\n for tweet in liste_tweets:\n jsonString = json.dumps(tweet)\n file.write(jsonString)\n file.write(\"\\n\\n\")\n\n \n# Remplacer les exemples par les bonnes valeurs\ncheminTweetsBruts = \"../data/tweets/\" + \"exempleTweets.txt\" # Assurez-vous d'avoir bien écrit \"*STOP*\" à la fin de ce fichier après l'exécution du script\ncheminTweetsTransformes = \"../data/tweets/\" + \"exempleTweetsTransformes.txt\"\ncheminTweetsJson = \"../data/tweets/\" + \"exempleTweetsJson.txt\" # Assurez-vous d'avoir bien écrit \"*STOP*\" à la fin de ce fichier après l'exécution du script\n\n# Il ne suffit d'exécuter ce fichier qu'une seule fois pour que la transformation opère\nsave_tweets(retrieve_tweets(cheminTweetsBruts), cheminTweetsTransformes)\nsave_tweets_json(retrieve_tweets(cheminTweetsBruts), cheminTweetsJson) \n \n \n \n \n\n\n\n\n\n","sub_path":"preprocessing/tweetAttributesPurge.py","file_name":"tweetAttributesPurge.py","file_ext":"py","file_size_in_byte":2887,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"240809172","text":"import cx_Oracle\nimport boto3\nimport csv\nimport uuid\nimport json\n\nclient = boto3.client(\"s3\")\n# Create a table in Oracle database\ntry:\n print ( \"Start of the program\")\n# bucket_name = 'munis3tordsbucket'\n# s3_file_name = 'muni123.csv'\n# res = client.get_object(Bucket=bucket_name, Key=s3_file_name)\n# #download_path = '/home/ubuntu/db/'.format(s3_file_name)\n# download_path = '/tmp/{}{}'.format(uuid.uuid4(), s3_file_name)\n# client.download_file(bucket_name,s3_file_name,download_path)\n# csv_data = csv.reader(download_path)\n# print ( \"End of S3 download\")\n\n #dsn_tns = cx_Oracle.makedsn('czoradb011.cnsafvgtfe5w.us-east-2.rds.amazonaws.com', '152', service_name='ORCL')\n dsn_tns = cx_Oracle.makedsn(\"czoradb011.cnsafvgtfe5w.us-east-2.rds.amazonaws.com\", \"1521\", \"ORCL\")\n\n #con = cx_Oracle.connect('admin/cZenix2020@czoradb011.cnsafvgtfe5w.us-east-2.rds.amazonaws.com')\n con = cx_Oracle.connect(user=r'admin', password='cZenix2020', dsn=dsn_tns)\n #con = cx_Oracle.connect('admin/cZenix2020@czoradb011.cnsafvgtfe5w.us-east-2.rds.amazonaws.com')\n print ( \"After DB connect\")\n\n # Now execute the sqlquery\n cursor = con.cursor()\n print ( \"After cursor\")\n# ifh = open(download_path, 'r')\n# csv_data = csv.reader(ifh, delimiter=',')\n# #print (csv_data)\n# for row in csv_data:\n# cursor.bindarraysize = 1\n# cursor.setinputsizes(int, 20, float)\n# print(\"iB4 insert\")\n# print(row)\n# cursor.execute('insert into muni4 (srollno, name1, efees) VALUES(:1, :2, :3)', row)\n# print(\"AF insert\")\n\n #cursor.execute(\"create table newtable45(srollno number, name1 varchar2(10), efees number(10, 2))\")\n# print(\"After table create\")\n #rows = [(1, 'Bob', 11.22), (2, 'Kim', 27.33)]\n# rows = [(5, 'Bob123', 11.22)]\n# cursor.bindarraysize = 1\n## cursor.setinputsizes(int, 20, float)\n# cursor.executemany(\"insert into muni4(srollno, name1, efees) values (:1, :2, :3)\", rows)\n# con.commit()\n# #stmt2 = \"insert into muni4 (srollno,name1,efees) VALUES(\\'2345\\',\\'Muni1234\\',\\'1234.23\\'))\"\n #cursor.execute(stmt2)\n# stmt = 'select * from muni4'\n# cursor.execute(stmt)\n# res=cursor.fetchall()\n# print(res)\n #cursor.execute('insert into muni4 (srollno,name1, efees) VALUES ('1234','Third123', '1012.12');') \n #cursor.execute(\"describe muni4\")\n #print(\"Table Created successful\")\n\nexcept cx_Oracle.DatabaseError as e:\n print(\"There is a problem with Oracle\", e)\n\n# by writing finally if any error occurs\n# then also we can close the all database operation\nfinally:\n if cursor:\n cursor.close()\n if con:\n con.close()\n","sub_path":"minimal.py","file_name":"minimal.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"62633428","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# 一个增强文件复制工具\nimport os\nimport hashlib\nimport logging\nimport logging.handlers\nimport subprocess\nimport time\nimport re\nimport sys\nimport threading\nimport shutil\nimport smtplib\nimport locale\nfrom email import encoders\nfrom email.header import Header\nfrom email.mime.text import MIMEText\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.base import MIMEBase\nfrom email.utils import parseaddr, formataddr\n__author__ = 'Sonny Yang'\n\n# -------------------------- 自定义配置区域 开始 -------------------\n# \"\"\" 源路径配置 \"\"\"\nsource_path = r'F:\\PC CC\\Adobe_Photoshop_CC_64Bit'\n# 账号密码可选\nsource_user = ''\nsource_password = ''\n# \"\"\" 目标路径配置\"\"\"\ntarget_path = r'\\\\172.16.10.4\\inetpub\\wwwroot'\ntarget_user = 'administrator'\ntarget_password = 'dev.5566'\n#\ninclude_subdirectory = True # 是否包含子目录,否为 False\n# 要复制的文件名,为Perl风格的正则表达式, 如只要jar为'^.+\\.jar$'\n# 只要没有后缀名的 '^[^.]+$',\n# 只要jar和py类型为 '^.+\\.(jar|py)$'\n# 任意为 '^.+$',\n# 均不区分大小写\ninclude_file_type = '^.+$'\n\ninclude_min_size = 0 # 最小文件大小,单位kB , 1M=1024KB\ninclude_max_size = -1 # 最大文件大小,负数为不限制\n\n# 要排除的内容,排除优先于包含\nexclude_folder = '' # 排除目录, 排除目录的子目录也将��除\nexclude_file_type = '' # 排除文件类型,Perl风格的正则表达式\nexclude_file = '' # 完整文件名,不区分大小写\n\n# n/minute\ntime_limit_on = True # 开启关闭时间限定\ntime_limit = 24 * 60 # 限定文件修改的时间范围,单位分钟\n\n# 复制前清空目标目录下的文件,不包含文件夹\nemptied = False\n\n# 日志配置\nlog_output_file = True # 保存到文件\nlog_output_terminal = True # 输出到屏幕\n# 其它\nkeep_directory_tree = True # 保持目录结构,False将所有源文件复制到目标根目录,重名的文件将被重命名\n\n# 邮件配置\nemail = False # 开启邮件通知功能\nvariable = {\n # email parameter\n 'smtp_server': 'smtp.exmail.qq.com',\n 'from_address': 'x.x@xx.cn',\n 'password': 'xxxxxx',\n # 收件人信息\n 'to_address': 'x.xx@xxx.cn',\n 'cc_address': '',\n 'mail_text': 'Scripts Program exec complete ',\n 'title': '脚本程序已运行'\n}\n\n# ----------------------------- 自定义配置区域 结束 -------------------------------------\n\n\nclass Public(object):\n system_code = locale.getpreferredencoding()\n system_plat = sys.platform\n\n\nclass Logger(object):\n def __init__(self):\n self.log_stats = True\n self.formatter = logging.Formatter('%(asctime)s - %(levelname)-7s : %(message)s')\n try:\n if not os.path.exists(os.path.join(os.getcwd(), 'logs')):\n os.mkdir('logs')\n except Exception:\n self.log_stats = False\n\n self.__write_file_logger()\n self.__terminal_file_logger()\n\n def __write_file_logger(self):\n \"\"\"只写入文件\"\"\"\n self.write_log = logging.getLogger('file')\n self.write_log.setLevel(logging.DEBUG)\n if self.log_stats and log_output_file:\n of = logging.handlers.RotatingFileHandler(\"logs\\\\%s.log\" % str('run_log'), mode='a',\n maxBytes=1024 * 1024 * 10, backupCount=10)\n of.setLevel(logging.DEBUG)\n of.setFormatter(self.formatter)\n self.write_log.addHandler(of)\n self.write_log.debug('日志写入模块初始化成功!')\n\n def __terminal_file_logger(self):\n \"\"\"输出到终端,并继承写入行为\"\"\"\n self.terminal_log = logging.getLogger('file.terminal')\n ot = logging.StreamHandler()\n ot.setLevel(logging.DEBUG)\n ot.setFormatter(self.formatter)\n if log_output_terminal:\n self.terminal_log.addHandler(ot)\n self.terminal_log.debug('日志终端输出模块初始化成功')\n if not self.log_stats:\n self.terminal_log.error('因为没有写入权限,日志无法写入到文件')\n\n def get_logger(self):\n return self.terminal_log\n\n\ndef emptied_folder(path):\n \"\"\"清空目录下的文件,不包含文件夹\"\"\"\n for file in os.listdir(path):\n file = os.path.join(path, file)\n if os.path.isfile(file):\n os.remove(file)\n\n\nclass GETFILE(object):\n def __init__(self):\n self.__curr_time = time.time()\n self.final_copy_list = {}\n self.__make_file_list()\n\n def __time_filter(self, path, file):\n if not time_limit_on:\n return True\n file_mtime = os.path.getmtime(os.path.join(path, file))\n if self.__curr_time - (time_limit * 60) < file_mtime:\n return True\n else:\n return False\n\n @staticmethod\n def __size_filter(path, file):\n file_size = os.path.getsize(os.path.join(path, file))\n if include_max_size > 0:\n if include_max_size > file_size > include_min_size:\n return True\n else:\n return False\n else:\n if file_size > include_min_size:\n return True\n else:\n return False\n\n @staticmethod\n def __type_filter(file):\n if re.match(include_file_type, file, re.I):\n return True\n else:\n return False\n\n @staticmethod\n def __exclude_property(path='', file=''):\n if file != '' and exclude_file_type != '':\n if re.match(exclude_file_type, file, re.I):\n return False\n if path != '' and exclude_folder != '':\n if re.search('^%s' % exclude_folder, path, re.I):\n return False\n if file != '' and exclude_file.upper() == file.upper():\n return False\n return True\n\n def __make_file_list(self):\n for root, dirs, files in os.walk(source_path):\n if not self.__exclude_property(path=root):\n continue # 被排除的目录\n for file in files:\n if not self.__exclude_property(file=file):\n continue # 被排��的文件\n if not self.__time_filter(root, file):\n continue # 时间限定\n if not self.__size_filter(root, file):\n continue # 大小限定\n if not self.__type_filter(file):\n continue # 类型限定\n\n src_file_full_path = os.path.join(root, file)\n\n if keep_directory_tree: # 保持目录结构\n # 复制目的地的目标路径\n dst_file_path = os.path.join(target_path, root.split(source_path)[-1].strip('\\\\'))\n dst_file_path = dst_file_path.rstrip('\\\\') + '\\\\'\n\n dst_file_full_path = os.path.join(dst_file_path, file)\n\n else:\n dst_file_path = target_path\n dst_file_full_path = os.path.join(target_path, file)\n count = 0\n\n def check(name, count):\n \"\"\"处理可能文件名重名的问题\"\"\"\n test_name = name\n a = '.'.join(test_name.split('.')[:-1])\n b = test_name.split('.')[-1]\n while True:\n for key, value in self.final_copy_list.items():\n if value[-1] == test_name:\n test_name = a + ' - %s.' % str(count) + b\n count += 1\n break\n else:\n return test_name\n dst_file_full_path = check(dst_file_full_path, count)\n\n self.final_copy_list[src_file_full_path] = [dst_file_path, dst_file_full_path]\n\n if not include_subdirectory:\n break # 不包含子目录 一次后跳出\n\n @property\n def get_result(self):\n return self.final_copy_list\n\n\nclass CHECK_PATH(object):\n def __init__(self):\n self.__connect_stats = []\n self.dst_user = target_user\n self.dst_password = target_password\n self.src_user = source_user\n self.src_password = source_password\n self.__connect()\n self._write_check()\n\n def __connect(self):\n for path, user, password in zip([target_path, source_path], [self.dst_user, self.src_user],\n [self.dst_password, self.src_password]):\n\n if re.match(r'^\\\\\\\\', path) and user != \"\":\n logs.info('尝试登陆 %s ...' % path)\n if re.match(r'^(\\\\\\\\\\S+?\\\\\\w\\$)', path):\n command = r'net use %s %s /user:%s' % (re.match(r'^(\\\\\\\\\\S+?\\\\\\w\\$)', path).group(), password, user)\n else:\n command = r'net use %s %s /user:%s' % (re.match(r'^(\\\\\\\\\\S+?\\\\)', path).group(), password, user)\n\n proc = subprocess.Popen(command, stderr=subprocess.PIPE)\n code = proc.wait()\n if code != 0:\n if Public.system_plat == 'win32':\n logs.debug(str(proc.stderr.read(), encoding=Public.system_code).replace('\\r\\n', ' '))\n else:\n logs.debug(str(proc.stderr.read(), encoding=Public.system_code).replace('\\n', ' '))\n logs.error('无法登录到路径 %s ' % path)\n sys.exit(-1)\n else:\n logs.info('登陆成功: %s' % path)\n if self.__dir_check(path):\n self.__connect_stats.append(path)\n else:\n sys.exit(-1)\n else:\n if self.__dir_check(path):\n self.__connect_stats.append(path)\n else:\n logs.critical('无法连接到路径 %s ' % path)\n sys.exit(-1)\n\n def __dir_check(self, path):\n if os.path.exists(path):\n return True\n else:\n try:\n os.makedirs(path)\n except BaseException as err:\n logs.critical('%s ,不存在且无法创建' % err)\n return False\n else:\n return True\n\n @staticmethod\n def _write_check():\n try:\n with open(os.path.join(target_path, 'test'), 'w') as f:\n f.write('write.test')\n except PermissionError as err:\n logs.critical('没有权限写入目标路径,程序结束\\n%s' % str(err))\n sys.exit(-1)\n except FileNotFoundError as err:\n logs.critical(' 目标路径不存在,程序结束\\n%s' % str(err))\n sys.exit(-1)\n else:\n try:\n os.remove(os.path.join(target_path, 'test'))\n except BaseException:\n pass\n\n def dis_connect(self):\n for path in self.__connect_stats:\n os.system(r'net use %s /delete' % path)\n\n\nclass MD5(object):\n def __init__(self, copy_list):\n self.compared_result = {}\n self.compared_list = {}\n self.src_md5 = {}\n self.dst_md5 = {}\n self.raw = copy_list\n\n self.__pre_process()\n\n def __pre_process(self):\n \"\"\"处理成字典 {src:dst}\"\"\"\n for key, value in self.raw.items():\n self.compared_list[key] = value[-1]\n\n @staticmethod\n def __run_file_md5(file):\n m = hashlib.md5()\n with open(file, 'rb') as f:\n while True:\n data = f.read(10240000)\n if not data:\n break\n m.update(data)\n return m.hexdigest()\n\n def make_src_md5(self):\n logs.debug('计算源文件md5...')\n for f_list in self.compared_list.keys():\n self.src_md5[f_list] = self.__run_file_md5(f_list)\n\n def make_dst_md5(self):\n logs.debug(' ----------------------开始计算md5---------------------')\n for dst in self.compared_list.values():\n try:\n md5_text = self.__run_file_md5(dst)\n self.dst_md5[dst] = md5_text\n except FileNotFoundError:\n self.dst_md5[dst] = '0000000000' # 目标文件不存在\n else:\n logs.debug('file md5 %s = %s' % (dst, md5_text))\n self.__compared_md5()\n\n def __compared_md5(self):\n for src, dst in self.compared_list.items():\n if self.src_md5[src] == self.dst_md5[dst]:\n logs.debug('OK! Check through, src file %s, dst file %s, %s = %s' %\n (src, dst, self.src_md5[src], self.dst_md5[dst]))\n self.compared_result[src] = True\n else:\n logs.debug('ERROR! Check Fail, src file %s, dst file %s, %s != %s' %\n (src, dst, self.src_md5[src], self.dst_md5[dst]))\n self.compared_result[src] = False\n\n @property\n def get_result(self):\n return self.compared_result\n\n\nclass COPYFILE(Public):\n def __init__(self, copy_list):\n self.err_log = {}\n self.final_copy_list = copy_list\n if 'win32' == Public.system_plat:\n self.__copy_file_windows()\n else:\n self.__copy_file()\n\n def __copy_file_windows(self):\n for src, dst in self.final_copy_list.items():\n command = 'xcopy \"%s\" \"%s\" /C /R /I /Y' % (src, dst[0])\n proc = subprocess.Popen(command, bufsize=-1, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n recode = proc.wait()\n\n stdout = str(proc.stdout.read(), encoding=Public.system_code).replace('\\r\\n', ' ')\n errout = str(proc.stderr.read(), encoding=Public.system_code)\n logs.debug('OK! file: %s , stdout: %s' % (src, stdout))\n if recode != 0:\n self.err_log[src] = errout\n logs.error('ERROR! file %s is error: %s' % (src, errout))\n\n def __copy_file(self):\n for src, dst in self.final_copy_list.items():\n if not os.path.exists(dst[0]):\n try:\n os.makedirs(dst[0])\n except BaseException as err:\n logs.error('目标路径 %s 无法创建:%s , 文件 %s 停止复制。' % (dst[0], str(err), src))\n continue\n logs.debug('copy source file %s to %s' % (src, shutil.copy2(src=src, dst=dst[0])))\n\n\nclass Send_Mail(object):\n def __init__(self, text=variable['mail_text'], filename='', title=variable['title']):\n # 发件人\n self.from_addr = variable['from_address']\n # 发件人密码\n self.password = variable['password']\n # 收件人列表\n self.to_addr = [x.strip() for x in variable['to_address'].split(';')]\n self.Cc_addr = [x.strip() for x in variable['cc_address'].split(';')]\n # 邮件主题\n self.mail_title = title\n self.smtp_server = variable['smtp_server']\n self.mail_text = text\n self.filename = filename\n\n @staticmethod\n def __format_addr(s):\n name, addr = parseaddr(s)\n return formataddr((Header(name, 'utf-8').encode(), addr))\n\n def __sendmail(self):\n self.msg = MIMEMultipart()\n self.msg.attach(MIMEText('%s' % self.mail_text, 'plain', 'utf-8'))\n\n self.msg['From'] = self.__format_addr('Python Program <%s>' % self.from_addr)\n self.msg['To'] = ';'.join(self.to_addr)\n self.msg['Cc'] = ';'.join(self.Cc_addr)\n self.msg['Subject'] = Header(self.mail_title, 'utf-8').encode()\n\n self.mime = MIMEBase('text', 'xlsx', filename=os.path.split(self.filename)[-1])\n self.mime.add_header('Content-Disposition', 'attachment', filename=os.path.split(self.filename)[-1])\n self.mime.add_header('Content-ID', '<0>')\n self.mime.add_header('X-Attachment-Id', '0')\n if os.path.isfile(self.filename):\n with open(self.filename, 'rb') as f:\n logs.debug('开始读取附件...')\n self.mime.set_payload(f.read())\n encoders.encode_base64(self.mime)\n self.msg.attach(self.mime)\n try:\n logs.info(\"开始解析邮件服务器信息\")\n server = smtplib.SMTP_SSL(self.smtp_server, 465)\n # server.set_debuglevel(1)\n logs.info(\"开始登录到smtp服务器\")\n server.login(self.from_addr, self.password)\n logs.info(\"登录到SMTP服务器成功开始发送邮件\")\n server.sendmail(self.from_addr, self.to_addr, self.msg.as_string())\n server.close()\n except smtplib.SMTPAuthenticationError:\n logs.error(\"登录到smtp服务器失败, 无法发送邮件\")\n except Exception as err:\n logs.error('邮件发送失败\\nError:\\n' + str(err) + '\\n\\nHeader:\\n' + self.msg.as_string())\n else:\n logs.info(\"邮件已成功发送到%s\" % self.to_addr)\n\n def send(self):\n self.__sendmail()\n\n\nclass RESULT(object):\n def __init__(self, compared_result, attached):\n self.fail_list = []\n self.success_list = []\n self.attached = attached\n self.compared_result = compared_result\n # self.err_log = err_log\n self.__check_fail()\n self.__show_fail()\n\n def __check_fail(self):\n logs.debug('--------------# check copy result ----------')\n for file, result in self.compared_result.items():\n if result:\n self.success_list.append(file)\n else:\n self.fail_list.append(file)\n\n def __show_fail(self):\n if len(self.fail_list) == 0:\n logs.info('所有复制已完成,没有错误信息,校验通过!')\n else:\n for file in self.fail_list:\n logs.error('Source File %s Md5 Check fail! ' % file)\n\n def send_mail(self):\n \"\"\"构建邮件内容\"\"\"\n if not email:\n return 0\n title = variable['title']\n if len(self.fail_list) != 0:\n mail_text = '文件复制中出现错误!\\n'\n for file in self.fail_list:\n mail_text += '文件MD5校验失败,请检查。 from %s\\n' % file\n # for filename, err in self.err_log.items():\n # mail_text = mail_text + '错误信息: %s %s\\n' % (filename, err)\n mail_text = mail_text + self.attached\n title += ',但出现了错误!'\n else:\n mail_text = '所有复制已完成,没有错误信息,校验通过!\\n' + self.attached\n title += ', 并正确完成'\n\n mail = Send_Mail(text=mail_text, title=title)\n mail.send()\n\n def delete_fail_file(self):\n pass\n\n\ndef main_process():\n start = time.time()\n # 检查源于目标路径\n check_path = CHECK_PATH()\n # 确定要复制的文件\n get_file = GETFILE()\n copy_file_list = get_file.get_result\n #\n if emptied:\n emptied_folder(target_path)\n # 计算源文件md5\n md5 = MD5(copy_file_list)\n # 多线程处理\n make_md5 = threading.Thread(target=md5.make_src_md5, args=())\n make_copy = threading.Thread(target=COPYFILE, args=(copy_file_list, ))\n make_md5.start()\n make_copy.start()\n make_md5.join()\n make_copy.join()\n # 单线程\n # md5.make_src_md5()\n # COPYFILE(copy_file_list)\n\n md5.make_dst_md5()\n md5_result = md5.get_result\n\n end = time.time()\n check_path.dis_connect()\n use_time = '本次用时 %.2f s' % (end - start)\n logs.debug(use_time)\n result_process = RESULT(md5_result, use_time)\n # 后续邮件\n result_process.send_mail()\n\n\nif __name__ == '__main__':\n logger = Logger()\n logs = logger.get_logger()\n main_process()\n\n","sub_path":"python/copy-file.py","file_name":"copy-file.py","file_ext":"py","file_size_in_byte":20343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"600434076","text":"# _*_ coding: utf-8 _*_\n\"\"\"This file is a AirbusGroup spider created on top of the ATSSpider\nscrapy crawl airbusgroup -a url=\"http://www.airbusgroup.com/int/en/people-careers/jobs-and-applications/search-for-vacancies.html\" -a mining_job_id=999 -a iteration=1 -a extract=1\nsample url:\n http://www.airbusgroup.com/int/en/people-careers/jobs-and-applications/search-for-vacancies.html\n\"\"\"\nfrom urlparse import urljoin\nfrom re import compile\n\nfrom scrapy.selector import Selector\nfrom scrapy.http import Request\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix\n\n\nclass AirbusGroup(ATSSpider):\n\n name = \"airbusgroup\"\n search_url_reg = compile(\"rows=\\d+\")\n ref_reg = compile(\"jobid=([0-9A-F]+)\")\n details_content_map = {\n \"Description\": \"description\",\n \"Tasks\": \"responsibilities\",\n \"Required skills\": \"skills\"\n }\n # xpaths\n count_xpath = \"//div[@class='result-index']/span/b[@class='highlight']/text()\"\n search_url_xpath = \"//button[text()='Show more results']/@data-ajax\"\n jobs_xpath = \"//ul[@class='result-list']/li/div/a\"\n job_link_xpath = \"./@href\"\n items_xpaths = {\n 'jobcategory': \"./div/span[@class='category']/text()\",\n 'title': \"./div/h2[@class='title']/text()\",\n 'location': \"./div//span[@class='location']/text()\",\n 'company': \"./div//span[@class='department']/text()\",\n }\n job_details_xpath = \"//div[@class='content']/node()\"\n\n def parse(self, response):\n sel = Selector(response)\n expected_count = sel.xpath(self.count_xpath).extract()\n if expected_count:\n self.expected_job_count = expected_count[0]\n # replace query parameter value of 'rows' with expected_job_count\n # to get all jobs\n search_sel = sel.xpath(self.search_url_xpath).extract()\n if search_sel:\n search_url = urljoin(response.url, search_sel[0])\n search_url = self.search_url_reg.sub(\n \"rows=\"+expected_count[0],\n search_url\n )\n yield Request(search_url, callback=self.parse_job_list)\n\n def parse_job_list(self, response):\n sel = Selector(response)\n for job in sel.xpath(self.jobs_xpath):\n job_link = job.xpath(self.job_link_xpath).extract()\n if job_link:\n job_url = urljoin(response.url, job_link[0])\n meta = {}\n for item, xpath in self.items_xpaths.iteritems():\n meta[item] = job.xpath(xpath).extract()\n yield Request(\n job_url, meta=meta, callback=self.parse_job_callback()\n )\n\n def parse_job(self, response):\n sel = Selector(response)\n loader = BrightcorpItemLoader(response=response)\n for item in self.items_xpaths:\n loader.add_value(item, response.meta[item])\n loader.add_value(\n 'referencenumber', response.url, Prefix(self.name+\"-\"),\n re=self.ref_reg\n )\n\n \"\"\"job details contains skills,description,responsibilities, get all nodes\n after h3 till the next h3 and add as corresponding item\"\"\"\n job_details = sel.xpath(self.job_details_xpath).extract()\n found_item = \"\"\n contents = []\n for node in job_details:\n if \"> \")\n\nquery \t\t= urllib.parse.urlencode({'q' : query })\n\nresponse \t= urllib.request.urlopen(url + query).read().decode('utf8')\n\ndata \t\t= json.loads(response)\n\nresults \t= data['responseData']['results']\n\nfor result in results:\n\ttitle = result['title']\n\turl = result['url']\n\tprint(title + '; ' + url)","sub_path":"Programs/Google-Command-Line/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"47817525","text":"# Uses the PokeAPI to return stats of the pokemon queried.\r\nimport json\r\nfrom urllib.request import *\r\n\r\nclass Pokemon:\r\n\r\n\tdef __init__(self, name, num, type1, ability, move1, baseHP, baseAtk, baseDef, baseSPatk, baseSPdef, baseSpeed, sprite):\r\n\t\tself.p_name = name\r\n\t\tself.id = num\r\n\t\tself.type1 = type1\r\n\t\tself.ability = ability\r\n\t\tself.move1 = move1\r\n\t\tself.baseHP = baseHP\r\n\t\tself.baseAtk = baseAtk\r\n\t\tself.baseDef = baseDef\r\n\t\tself.baseSPatk = baseSPatk\r\n\t\tself.baseSPdef = baseSPdef\r\n\t\tself.baseSpeed = baseSpeed\r\n\t\tself.sprite = sprite\r\n\r\n # Optional Arguments\r\n\t\t#self.type2\r\n\t\t#self.evolve_lv = \r\n #self.evolve_to = \r\n\t\t#self.level = level\r\n #self.nickname = nickname\r\n\r\n\tdef getName(self):\r\n\t\treturn self.p_name\r\n\r\n\tdef getID(self):\r\n\t\treturn self.id\r\n\r\n\tdef getAbility(self):\r\n\t\treturn self.ability\r\n\r\n\tdef getType(self):\r\n\t\treturn self.type1\r\n\r\n\tdef getMove1(self):\r\n\t\treturn self.move1\r\n\r\n\tdef getBaseHP(self):\r\n\t\treturn self.baseHP\r\n\r\n\tdef getBaseAtk(self):\r\n\t\treturn self.baseAtk\r\n\t\r\n\tdef getBaseDef(self):\r\n\t\treturn self.baseDef\r\n\t\r\n\tdef getBaseSPatk(self):\r\n\t\treturn self.baseSPatk\r\n\r\n\tdef getSPdef(self):\r\n\t\treturn self.baseSPdef\r\n\t\r\n\tdef getSpeed(self):\r\n\t\treturn self.baseSpeed\r\n\t\r\n\tdef getSprite(self):\r\n\t\treturn self.sprite\r\n\r\n\r\n# The url to query\r\nurl = 'https://pokeapi.co/api/v2/pokemon/'\r\n\r\n# query stores the user's input which should be a pokemon name\r\nquery = input('Pokemon Name:')\r\n\r\n# changes the input to lowercase letters\r\nquery = query.lower()\r\n\r\n# add a '/' to the end of the query to complete the url\r\nquery = query + '/'\r\n\r\n# debug check the url\r\nprint(url + query)\r\n\r\n# create the url to search for\r\nurl = url + query\r\n\r\n# request info from the API\r\nreq = Request(url)\r\n\r\n# headers - things that are sometimes required for API queries and idk what\r\n# exactly it's for though hahahaha sorry\r\nreq.add_header('User-Agent', \"pi\")\r\n\r\n# read the url response... tbh don't rememeber exactly how this works too\r\nresponse = urlopen(req)\r\ncontents = response.read()\r\ntext = contents.decode('utf8')\r\n\r\n# data is ALL the data of the pokemon queried\r\ndata = json.loads(text)\r\n\r\n# stores Pokemon name\r\nname = data['name']\r\nprint('Pokemon: ' + name)\r\n\r\nidNum = data['id']\r\nprint('ID: ' + str(idNum))\r\n\r\n#store Pokemon type\r\ntypelist = data['types']\r\ntypedict0 = typelist[0]\r\ntypedict01 = typedict0[\"type\"]\r\ntype0 = typedict01.get(\"name\")\r\n\r\nindices = len(typelist)\r\n\r\nif indices > 1:\r\n typedict1 = typelist[1]\r\n typedict11 = typedict1[\"type\"]\r\n type1 = typedict11.get(\"name\")\r\n print('Type: ' + type0 + \", \" + type1)\r\nelse:\r\n print('Type: ' + type0)\r\n\r\n \r\n#stores Pokemon abilities\r\nabilities = data['abilities']\r\nabilities0 = abilities[0]\r\nhidden0 = abilities0[\"is_hidden\"]\r\nabilities01 = abilities0[\"ability\"]\r\nability0 = abilities01[\"name\"]\r\n\r\nif hidden0 == True:\r\n print('Hidden Ability: ' + ability0)\r\nelse:\r\n print('Ability 1: ' + ability0)\r\n\r\nindices = len(abilities)\r\n \r\nif indices > 1:\r\n abilities1 = abilities[1]\r\n hidden1 = abilities1[\"is_hidden\"]\r\n abilities11 = abilities1[\"ability\"]\r\n ability1 = abilities11[\"name\"]\r\n\r\n if hidden1 == True:\r\n print('Hidden Ability: ' + ability1)\r\n else:\r\n print('Ability 2: ' + ability1)\r\n\r\nif indices > 2:\r\n abilities2 = abilities[2]\r\n hidden2 = abilities2[\"is_hidden\"]\r\n abilities21 = abilities2[\"ability\"]\r\n ability2 = abilities21[\"name\"]\r\n\r\n if hidden2 == True:\r\n print('Hidden Ability: ' + ability2)\r\n else:\r\n print('Ability 3: ' + ability2)\r\n\r\n\r\n#stores Pokemon moves\r\nmoves = data['moves']\r\nmoves1 = moves[0]\r\nmoves01 = moves1['move']\r\nmove1 = moves01['name']\r\nprint('Move 1: ' + move1)\r\n\r\nindices = len(moves)\r\n\r\nif indices > 1:\r\n moves2 = moves[1]\r\n moves02 = moves2['move']\r\n move2 = moves02['name']\r\n print('Move 2: ' + move2)\r\n\r\nif indices > 2:\r\n moves3 = moves[2]\r\n moves03 = moves3['move']\r\n move3 = moves03['name']\r\n print('Move 3: ' + move3)\r\n\r\nif indices > 3:\r\n moves4 = moves[3]\r\n moves04 = moves4['move']\r\n move4 = moves04['name']\r\n print('Move 4: ' + move4)\r\n\r\n\r\n#Pokemon stats\r\nstats = data['stats']\r\n\r\nstats0 = stats[0]\r\nbasestat0 = stats0['base_stat']\r\nstat0 = stats0['stat']\r\nname0 = stat0['name']\r\nprint (name0 + \":\", basestat0)\r\n\r\nstats1 = stats[1]\r\nbasestat1 = stats1['base_stat']\r\nstat1 = stats1['stat']\r\nname1 = stat1['name']\r\nprint (name1 + \":\", basestat1)\r\n\r\nstats2 = stats[2]\r\nbasestat2 = stats2['base_stat']\r\nstat2 = stats2['stat']\r\nname2 = stat2['name']\r\nprint (name2 + \":\", basestat2 )\r\n\r\nstats3 = stats[3]\r\nbasestat3 = stats3['base_stat']\r\nstat3 = stats3['stat']\r\nname3 = stat3['name']\r\nprint (name3 + \":\", basestat3 )\r\n\r\nstats4 = stats[4]\r\nbasestat4 = stats4['base_stat']\r\nstat4 = stats4['stat']\r\nname4 = stat4['name']\r\nprint (name4 + \":\", basestat4 )\r\n\r\nstats5 = stats[5]\r\nbasestat5 = stats5['base_stat']\r\nstat5 = stats5['stat']\r\nname5 = stat5['name']\r\nprint (name5 + \":\", basestat5 )\r\n\r\n\r\n#Pokemon image\r\nsprites = data['sprites']\r\nsprites1 = sprites['front_default']\r\nprint(sprites1)\r\n\r\nPoke = Pokemon(name, idNum, type0, ability0, move1, basestat0, basestat1, basestat2, basestat3, basestat4, basestat5, sprites1)\r\n","sub_path":"PokeAPI.py","file_name":"PokeAPI.py","file_ext":"py","file_size_in_byte":5177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"550505080","text":"import torch\nimport torchvision.datasets as dsets\nfrom torchvision import transforms\n\nclass Data_Loader():\n def __init__(self, image_path, image_size, batch_size, shuf=True):\n self.path = image_path\n self.imsize = image_size\n self.batch = batch_size\n self.shuf = shuf\n self.trans = transforms.Compose(\n [\n transforms.Resize((self.imsize, self.imsize)),\n transforms.ToTensor(),\n transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),\n\n ]\n )\n \n def load_celeb(self):\n dataset = dsets.ImageFolder(self.path, transform=self.trans)\n return dataset\n\n def loader(self):\n dataset = self.load_celeb()\n\n loader = torch.utils.data.DataLoader(\n dataset=dataset,\n batch_size=self.batch,\n shuffle=self.shuf,\n num_workers=0,\n drop_last=True\n )\n\n return loader\n","sub_path":"SAGAN/dataloader.py","file_name":"dataloader.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45037612","text":"def commonListItems(aList, bList):\r\n\t\"\"\"return a list containing all elements common to aList and bList\"\"\"\r\n\tcommonList = []\r\n\tfor item in aList:\r\n\t\tif item in bList:\r\n\t\t\tcommonList.append(item)\r\n\treturn commonList\r\n\r\ndef gcd(a, b):\r\n\t\"\"\"return the greatest common divisor of a and b\"\"\"\r\n\taFactors = []\r\n\tbFactors = []\r\n\t\r\n\t# add factors of a to list\r\n\tfor i in range (1, a+1):\r\n\t\tif a % i == 0: # i is a factor of a\r\n\t\t\taFactors.append(i)\r\n\t\r\n\t# add factors of b to list\r\n\tfor i in range(1, b+1):\r\n\t\tif b % i == 0: # i is a factor of b\r\n\t\t\tbFactors.append(i)\r\n\t\r\n\t# find largest common item in each list\r\n\tcommonFactors = commonListItems(aFactors, bFactors)\r\n\t\r\n\t# sort commonFactors and return the highest number\r\n\tcommonFactors.sort()\r\n\treturn commonFactors[-1]\r\n\t\r\n\t\r\ndef main():\r\n\tprint(gcd(61, 79))\r\n\r\nif __name__ == \"__main__\":\r\n\tmain()","sub_path":"final/gcd.py","file_name":"gcd.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"275271489","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport unittest\nimport yaml\nimport os\nimport shutil\n\nimport marmot\nfrom marmot.representations.wmt_representation_generator import WMTRepresentationGenerator\nfrom marmot.experiment.import_utils import build_object\n\ndef join_with_module_path(loader, node):\n \"\"\" define custom tag handler to join paths with the path of the marmot module \"\"\"\n module_path = os.path.dirname(marmot.representations.tests.__file__)\n resolved = loader.construct_scalar(node)\n return os.path.join(module_path, resolved)\n\n## register the tag handler\nyaml.add_constructor('!join', join_with_module_path)\n\n\nclass WMTRepresentationGeneratorTests(unittest.TestCase):\n\n def setUp(self):\n module_path = os.path.dirname(__file__)\n self.module_path = module_path\n test_config = os.path.join(module_path, 'test_config.yaml')\n\n with open(test_config, \"r\") as cfg_file:\n self.config = yaml.load(cfg_file.read())\n\n self.wmt_target = os.path.join(module_path, 'test_data/EN_ES.tgt_ann.train')\n self.wmt_source = os.path.join(module_path, 'test_data/EN_ES.source.train')\n self.tmp_dir = os.path.join(module_path, 'tmp_dir')\n\n def tearDown(self):\n if os.path.exists(self.tmp_dir) and os.path.isdir(self.tmp_dir):\n shutil.rmtree(self.tmp_dir)\n\n def test_load_from_config(self):\n generator = build_object(self.config['representations']['training'][0])\n data_obj = generator.generate()\n self.assertTrue('target' in data_obj)\n self.assertTrue('source' in data_obj)\n self.assertTrue('tags' in data_obj)\n self.assertTrue(len(data_obj['target']) == len(data_obj['source']))\n self.assertTrue(len(data_obj['target']) == len(data_obj['tags']))\n\n def test_no_saved_files(self):\n generator = WMTRepresentationGenerator(self.wmt_target, self.wmt_source)\n data_obj = generator.generate()\n self.assertTrue('target' in data_obj)\n self.assertTrue('source' in data_obj)\n self.assertTrue('tags' in data_obj)\n self.assertTrue(len(data_obj['target']) == len(data_obj['source']))\n self.assertTrue(len(data_obj['target']) == len(data_obj['tags']))\n\n def test_save_files(self):\n generator = WMTRepresentationGenerator(self.wmt_target, self.wmt_source, tmp_dir=self.tmp_dir, persist=True)\n data_obj = generator.generate()\n target = os.path.join(self.tmp_dir, 'EN_ES.tgt_ann.train.target')\n tags = os.path.join(self.tmp_dir, 'EN_ES.tgt_ann.train.tags')\n source = os.path.join(self.tmp_dir, 'EN_ES.source.train.txt')\n self.assertTrue(os.path.exists(self.tmp_dir) and os.path.isdir(self.tmp_dir))\n self.assertTrue(os.path.exists(target) and os.path.isfile(target))\n self.assertTrue(os.path.exists(tags) and os.path.isfile(tags))\n self.assertTrue(os.path.exists(source) and os.path.isfile(source))\n\n\nif __name__ == '__main__':\n unittest.main()\n\n","sub_path":"marmot/representations/tests/test_wmt_representation_generator.py","file_name":"test_wmt_representation_generator.py","file_ext":"py","file_size_in_byte":2988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"196739441","text":"# https://leetcode.com/problems/network-delay-time/\n# There are N network nodes, labelled 1 to N.\n\n# Given times, a list of travel times as directed edges times[i] = (u, v, w), where u is the source node, v is the target node, and w is the time it takes for a signal to travel from source to target.\n\n# Now, we send a signal from a certain node K. How long will it take for all nodes to receive the signal? If it is impossible, return -1.\n\nimport collections\n\n# Time O(N*N)\ndef networkDelayTime(times, N, K):\n graph = collections.defaultdict(list)\n for s,t,c in times:\n graph[s].append((t,c))\n cost_table = {}.fromkeys(range(1,N+1),float('inf'))\n visited = {}.fromkeys(range(1,N+1), 0)\n node = K\n cost_table[K] = 0\n while node != None: # N time\n for neighbor, cost in graph[node]: # N time\n new_cost = cost_table[node] + cost\n if visited[neighbor] == 0 and new_cost < cost_table[neighbor]:\n cost_table[neighbor] = new_cost\n visited[node] = 1\n min_value = float('inf')\n min_name = None\n for i in cost_table:\n if visited[i]==0 and cost_table[i] < min_value:\n min_value = cost_table[i]\n min_name = i\n node = min_name\n max_cost = max(cost_table.values())\n return max_cost if max_cost < float('inf') else -1\n","sub_path":"network_delay_time.py","file_name":"network_delay_time.py","file_ext":"py","file_size_in_byte":1360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"587355921","text":"from django.contrib.auth import get_user_model\nfrom django.utils.translation import ugettext as _\n\nfrom rest_framework.viewsets import ReadOnlyModelViewSet\nfrom rest_framework import serializers\nfrom rest_framework_jwt.compat import get_username_field\nfrom rest_framework_jwt.serializers import VerificationBaseSerializer\nfrom rest_framework_jwt.settings import api_settings\nfrom rest_framework_jwt.views import JSONWebTokenAPIView\n\nfrom easydmp.auth.models import User\n\n\njwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER\njwt_encode_handler = api_settings.JWT_ENCODE_HANDLER\n\n\ndef truncate_email(email):\n userpart, domain = email.rsplit('@', 1)\n domain = str(len(domain))\n return '{}@{}'.format(userpart, domain)\n\n\nclass AuthorizeJSONWebTokenSerializer(VerificationBaseSerializer):\n AUTHORIZED = ('admin', 'bird')\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.fields[self.username_field] = serializers.CharField()\n\n def authorized_usernames(self):\n usernames = list(getattr(self, 'AUTHORIZED', []))\n superusers = User.objects.filter(is_superuser=True).values_list('username', flat=True)\n usernames.extend(superusers)\n return usernames\n\n @property\n def username_field(self):\n return get_username_field()\n\n def validate(self, attrs):\n token = attrs.get('token')\n username = attrs.get(self.username_field)\n\n if token and username:\n payload = self._check_payload(token=token)\n user = self._check_user(payload=payload)\n\n if user:\n if not user.is_active:\n msg = _('User account is disabled.')\n raise serializers.ValidationError(msg)\n\n # Nobody can impersonate magical users\n if username in self.authorized_usernames():\n msg = _('Username not permitted.')\n raise serializers.ValidationError(msg)\n\n # Only magical users may impersonate\n if not user.username in self.authorized_usernames():\n msg = _('User is not permitted to authorize.')\n raise serializers.ValidationError(msg)\n\n User = get_user_model()\n try:\n impersonated_user = User.objects.get(**{self.username_field: username})\n if not impersonated_user.is_active:\n msg = _('User account to impersonate is disabled.')\n raise serializers.ValidationError(msg)\n\n payload = jwt_payload_handler(impersonated_user)\n\n return {\n 'token': jwt_encode_handler(payload),\n 'user': impersonated_user,\n }\n\n except User.DoesNotExist:\n msg = _('User does not exist.')\n raise serializers.ValidationError(msg)\n\n else:\n msg = _('Unable to log in with provided credentials.')\n raise serializers.ValidationError(msg)\n else:\n msg = _('Must include \"{username_field}\" and \"token\".')\n msg = msg.format(username_field=self.username_field)\n raise serializers.ValidationError(msg)\n\n\n\nclass ObfuscatedUserSerializer(serializers.HyperlinkedModelSerializer):\n url = serializers.HyperlinkedIdentityField(\n view_name='user-detail',\n lookup_field='pk'\n )\n truncated_email = serializers.SerializerMethodField()\n obfuscated_username = serializers.SerializerMethodField()\n\n class Meta:\n model = User\n fields = [\n 'id',\n 'url',\n 'username',\n 'obfuscated_username',\n 'truncated_email',\n ]\n\n def get_obfuscated_username(self, obj):\n if not '@' in obj.username:\n return obj.username\n return truncate_email(obj.username)\n\n def get_truncated_email(self, obj):\n if '@' not in obj.email:\n return ''\n return truncate_email(obj.email)\n\n\nclass UserSerializer(ObfuscatedUserSerializer):\n\n class Meta:\n model = User\n fields = [\n 'id',\n 'url',\n 'username',\n 'obfuscated_username',\n 'email',\n 'truncated_email',\n ]\n\n\nclass UserViewSet(ReadOnlyModelViewSet):\n queryset = User.objects.all()\n serializer_class = UserSerializer\n\n# def get_serializer_class(self):\n# if self.request.user.is_authenticated():\n# return UserSerializer\n# return ObfuscatedUserSerializer\n\n\nclass AuthorizeJSONWebTokenView(JSONWebTokenAPIView):\n serializer_class = AuthorizeJSONWebTokenSerializer\n\n\nauthorize_jwt_token = AuthorizeJSONWebTokenView.as_view()\n","sub_path":"src/easydmp/auth/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4824,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"446939160","text":"#!/usr/bin/env python\r\n# _*_ coding: utf-8 _*_\r\n# @Time : 2018/10/23 16:03\r\n# @Author : viekie\r\n# @Site : www.ml2ai.com\r\n# @File : 01_linear_regression.py\r\n# @Software: PyCharm\r\n\r\nfrom __future__ import absolute_import\r\nfrom __future__ import print_function\r\nfrom __future__ import division\r\n\r\nimport os\r\nimport argparse\r\nimport numpy as np\r\nimport tensorflow as tf\r\n\r\nimport utils.mobile_data as datautils\r\n\r\n\r\nparser = argparse.ArgumentParser()\r\nparser.add_argument('--batch_size', type=int, default=100, help='batch size')\r\nparser.add_argument('--epoches', type=int, default=1000, help='epoches')\r\nargs = parser.parse_args()\r\n\r\n\r\ndef main():\r\n (train_x, train_y), (test_x, test_y) = \\\r\n datautils.parse_csv_by_pandas(os.path.abspath('.' + '\\\\datasets\\\\imports-85.data'))\r\n\r\n train_y /= 1000\r\n test_y /= 1000\r\n\r\n feature_columns = [\r\n tf.feature_column.numeric_column(key='curb-weight'),\r\n tf.feature_column.numeric_column(key='highway-mpg'),\r\n ]\r\n\r\n regressor = tf.estimator.LinearRegressor(feature_columns=feature_columns)\r\n\r\n regressor.train(input_fn=lambda: datautils.input_train_func(train_x, train_y, args.batch_size),\r\n steps=args.epoches)\r\n\r\n eval_result = regressor.evaluate(input_fn=lambda: datautils.input_eval_func(test_x, test_y, args.batch_size))\r\n avg_loss = eval_result['average_loss']\r\n print('avg eval loss {:.0f}'.format(avg_loss**0.5 * 1000))\r\n\r\n pred_x = {\r\n \"curb-weight\": np.array([2000, 3000]),\r\n \"highway-mpg\": np.array([30, 40])\r\n }\r\n\r\n pred_result = regressor.predict(input_fn=lambda: datautils.input_eval_func(pred_x, None))\r\n\r\n template = (\"Curb weight: {: 4d}lbs, Highway: {: 0d}mpg, Prediction: ${: 9.2f}\")\r\n\r\n for i, y_ in enumerate(pred_result):\r\n print(template.format(pred_x['curb-weight'][i], pred_x['highway-mpg'][i], 1000*y_['predictions'][0]))\r\n\r\n\r\nif __name__ == '__main__':\r\n tf.logging.set_verbosity(tf.logging.INFO)\r\n main()\r\n","sub_path":"tensorflow/02_regression/01_linear_regression.py","file_name":"01_linear_regression.py","file_ext":"py","file_size_in_byte":1989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"219149819","text":"\"\"\"\nВ Англии валютой являются фунты стерлингов £ и п��нсы p, и в\nобращении есть восемь монет:\n\n1p, 2p, 5p, 10p, 20p, 50p, £1 (100p) и £2 (200p).\n£2 возможно составить следующим образом:\n\n1×£1 + 1×50p + 2×20p + 1×5p + 1×2p + 3×1p\nСколькими разными способами можно составить £2, используя любое\nколичество монет?\n\"\"\"\n\n\ndef sum_combs(coins: list, sum_coins=0, combs=0, limit=200):\n if sum_coins == limit:\n return 1\n elif sum_coins > limit:\n return 0\n else:\n for i in range(len(coins)):\n combs += sum_combs(coins[i:], sum_coins + coins[i])\n return combs\n\n\nif __name__ == '__main__':\n coins = [1, 2, 5, 10, 20, 50, 100, 200]\n print(sum_combs(coins))\n","sub_path":"euler031.py","file_name":"euler031.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"477837777","text":"def run():\n import wx\n import os,sys\n sys.path.append(os.path.abspath(os.path.dirname(__file__)))\n from imagepy3.ui.imagepy import ImagePy\n app = wx.App(False)\n mainFrame = ImagePy(None)\n mainFrame.Show()\n app.MainLoop()\n\nif __name__ == \"__main__\" :\n run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"553378251","text":"parent = {}\nrank = {}\n\n# Mapping buildings to integer IDs; Union Find; size of set\n\ndef find(v):\n if v not in parent.keys():\n rank[v] = 1\n parent[v] = v\n return v\n elif parent[v] == v:\n return v\n else:\n p = find(parent[v])\n parent[v] = p\n return p\n\ndef union(a,b):\n a = find(a)\n b = find(b)\n\n if a == b:\n print(rank[a])\n return\n\n if rank[a] <= rank[b]:\n parent[a] = b\n rank[b] += rank[a]\n print(rank[b])\n return\n \n elif rank[b] < rank[a]:\n parent[b] = a\n rank[a] += rank[b]\n print(rank[a])\n return\n\nn = int(input())\n\nfor i in range(n):\n a,b = input().split(\" \")\n union(a,b)","sub_path":"Graphs2/bridgesandtunnels/bridgesandtunnels.py","file_name":"bridgesandtunnels.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"296866018","text":"__author__ = 'zhenyang'\n\nimport numpy\nimport logging\nimport theano\nimport theano.tensor as TT\nfrom theano.gradient import grad_clip\n\nfrom sparnn.utils import *\nfrom sparnn.layers import Layer\n\nlogger = logging.getLogger(__name__)\n\n'''\n Conditional (simplified) Convolutional LSTM with Attention by stacking two streams (rgb+flow).\n Cell is not connected with any gate, i.e. input gate, output gate, or forget gate.\n'''\nclass DeepCondConvLSTMDecpLayer(Layer):\n def __init__(self, layer_param):\n super(DeepCondConvLSTMDecpLayer, self).__init__(layer_param)\n assert 5 == self.input.ndim\n assert (\"init_hidden_state\" in layer_param or \"init_cell_state\" in layer_param)\n assert (\"init_context_hidden_state\" in layer_param or \"init_context_cell_state\" in layer_param)\n\n # get dims of context input and output\n self.ctx_dim_in = layer_param.get('ctx_dim_in', None)\n self.ctx_dim_out = layer_param.get('ctx_dim_out', None)\n self.context = layer_param.get('context', None)\n self.context_in = self.ctx_dim_in[0]\n self.context_out = self.ctx_dim_out[0]\n assert 5 == self.context.ndim\n\n self.input_receptive_field = layer_param['input_receptive_field']\n self.transition_receptive_field = layer_param['transition_receptive_field']\n self.context_input_receptive_field = layer_param['context_input_receptive_field']\n self.context_transition_receptive_field = layer_param['context_transition_receptive_field']\n\n self.gate_activation = layer_param.get('gate_activation', 'sigmoid')\n self.modular_activation = layer_param.get('modular_activation', 'tanh')\n self.hidden_activation = layer_param.get('hidden_activation', 'tanh')\n\n self.init_hidden_state = layer_param.get(\"init_hidden_state\", quick_theano_zero((self.minibatch_size,) + self.dim_out))\n self.init_cell_state = layer_param.get(\"init_cell_state\", quick_theano_zero((self.minibatch_size,) + self.dim_out))\n self.init_hidden_state = TT.unbroadcast(self.init_hidden_state, *range(self.init_hidden_state.ndim))\n self.init_cell_state = TT.unbroadcast(self.init_cell_state, *range(self.init_cell_state.ndim))\n self.init_context_hidden_state = layer_param.get(\"init_context_hidden_state\", quick_theano_zero((self.minibatch_size,) + self.ctx_dim_out))\n self.init_context_cell_state = layer_param.get(\"init_context_cell_state\", quick_theano_zero((self.minibatch_size,) + self.ctx_dim_out))\n self.init_context_hidden_state = TT.unbroadcast(self.init_context_hidden_state, *range(self.init_context_hidden_state.ndim))\n self.init_context_cell_state = TT.unbroadcast(self.init_context_cell_state, *range(self.init_context_cell_state.ndim))\n self.learn_padding = layer_param.get('learn_padding', False)\n self.input_padding = layer_param.get('input_padding', None)\n if 'n_steps' in layer_param:\n self.n_steps = layer_param['n_steps']\n else:\n self.n_steps = layer_param.get('n_steps', self.input.shape[0])\n self.kernel_size = (self.feature_out, self.feature_in,\n self.input_receptive_field[0], self.input_receptive_field[1])\n self.transition_mat_size = (self.feature_out, self.feature_out,\n self.transition_receptive_field[0], self.transition_receptive_field[1])\n self.context_kernel_size = (self.context_out, self.context_in,\n self.context_input_receptive_field[0], self.context_input_receptive_field[1])\n self.context_transition_mat_size = (self.context_out, self.context_out,\n self.context_transition_receptive_field[0], self.context_transition_receptive_field[1])\n # self.hybrid_transition_mat_size = (self.context_out, self.feature_out,\n # self.context_transition_receptive_field[0], self.context_transition_receptive_field[1])\n\n self.temperature_inverse = numpy_floatX(layer_param.get('temperature_inverse', 1.))\n self.fmap_size = (self.dim_in[1], self.dim_in[2])\n self.alpha = None\n self.ctx_output = None\n\n ###############################\n # feature input to LSTM-pred\n self.W_pred_xi = quick_init_xavier(self.rng, self.kernel_size, self._s(\"W_pred_xi\"))\n self.W_pred_xf = quick_init_xavier(self.rng, self.kernel_size, self._s(\"W_pred_xf\"))\n self.W_pred_xo = quick_init_xavier(self.rng, self.kernel_size, self._s(\"W_pred_xo\"))\n self.W_pred_xc = quick_init_xavier(self.rng, self.kernel_size, self._s(\"W_pred_xc\"))\n\n # LSTM-pred to LSTM-pred\n self.W_pred_hi = quick_init_xavier(self.rng, self.transition_mat_size, self._s(\"W_pred_hi\"))\n self.W_pred_hf = quick_init_xavier(self.rng, self.transition_mat_size, self._s(\"W_pred_hf\"))\n self.W_pred_ho = quick_init_xavier(self.rng, self.transition_mat_size, self._s(\"W_pred_ho\"))\n self.W_pred_hc = quick_init_xavier(self.rng, self.transition_mat_size, self._s(\"W_pred_hc\"))\n if self.learn_padding:\n self.pred_hidden_padding = quick_zero((self.feature_out, ), self._s(\"pred_hidden_padding\"))\n else:\n self.pred_hidden_padding = None\n\n # bias to LSTM-pred\n self.b_pred_i = quick_zero((self.feature_out, ), self._s(\"b_pred_i\"))\n self.b_pred_f = quick_zero((self.feature_out, ), self._s(\"b_pred_f\"))\n self.b_pred_o = quick_zero((self.feature_out, ), self._s(\"b_pred_o\"))\n self.b_pred_c = quick_zero((self.feature_out, ), self._s(\"b_pred_c\"))\n\n ###############################\n # context input to LSTM-infer\n self.W_infer_xi = quick_init_xavier(self.rng, self.context_kernel_size, self._s(\"W_infer_xi\"))\n self.W_infer_xf = quick_init_xavier(self.rng, self.context_kernel_size, self._s(\"W_infer_xf\"))\n self.W_infer_xo = quick_init_xavier(self.rng, self.context_kernel_size, self._s(\"W_infer_xo\"))\n self.W_infer_xc = quick_init_xavier(self.rng, self.context_kernel_size, self._s(\"W_infer_xc\"))\n\n # LSTM-infer to LSTM-infer\n self.W_infer_hi = quick_init_xavier(self.rng, self.context_transition_mat_size, self._s(\"W_infer_hi\"))\n self.W_infer_hf = quick_init_xavier(self.rng, self.context_transition_mat_size, self._s(\"W_infer_hf\"))\n self.W_infer_ho = quick_init_xavier(self.rng, self.context_transition_mat_size, self._s(\"W_infer_ho\"))\n self.W_infer_hc = quick_init_xavier(self.rng, self.context_transition_mat_size, self._s(\"W_infer_hc\"))\n if self.learn_padding:\n self.infer_hidden_padding = quick_zero((self.context_out, ), self._s(\"infer_hidden_padding\"))\n else:\n self.infer_hidden_padding = None\n\n # bias to LSTM-infer\n self.b_infer_i = quick_zero((self.context_out, ), self._s(\"b_infer_i\"))\n self.b_infer_f = quick_zero((self.context_out, ), self._s(\"b_infer_f\"))\n self.b_infer_o = quick_zero((self.context_out, ), self._s(\"b_infer_o\"))\n self.b_infer_c = quick_zero((self.context_out, ), self._s(\"b_infer_c\"))\n\n # LSTM-pred to LSTM-infer (as contextual input)\n # self.W_infer_ci = quick_init_xavier(self.rng, self.hybrid_transition_mat_size, self._s(\"W_infer_ci\"))\n # self.W_infer_cf = quick_init_xavier(self.rng, self.hybrid_transition_mat_size, self._s(\"W_infer_cf\"))\n # self.W_infer_co = quick_init_xavier(self.rng, self.hybrid_transition_mat_size, self._s(\"W_infer_co\"))\n # self.W_infer_cc = quick_init_xavier(self.rng, self.hybrid_transition_mat_size, self._s(\"W_infer_cc\"))\n\n ###############################\n # attention: input -> hidden\n self.Wc_att = quick_init_xavier(self.rng, (self.feature_in, self.feature_in), self._s(\"Wc_att\"))\n # attention: LSTM-infer -> hidden\n self.Wd_att = quick_init_xavier(self.rng, (self.context_out, self.feature_in), self._s(\"Wd_att\"))\n # attention: LSTM-pred -> hidden\n self.We_att = quick_init_xavier(self.rng, (self.feature_out, self.feature_in), self._s(\"We_att\"))\n # attention: hidden bias\n self.b_att = quick_zero((self.feature_in, ), self._s(\"b_att\"))\n # attention:\n self.U_att = quick_init_xavier(self.rng, (self.feature_in, 1), self._s(\"U_att\"))\n self.c_att = quick_zero((1, ), self._s(\"c_att\"))\n\n # collect all parameters\n self.param = [self.W_pred_xi, self.W_pred_hi, self.b_pred_i,\n self.W_pred_xf, self.W_pred_hf, self.b_pred_f,\n self.W_pred_xo, self.W_pred_ho, self.b_pred_o,\n self.W_pred_xc, self.W_pred_hc, self.b_pred_c,\n self.W_infer_xi, self.W_infer_hi, self.b_infer_i,\n self.W_infer_xf, self.W_infer_hf, self.b_infer_f,\n self.W_infer_xo, self.W_infer_ho, self.b_infer_o,\n self.W_infer_xc, self.W_infer_hc, self.b_infer_c,\n self.Wc_att, self.Wd_att, self.We_att, self.b_att,\n self.U_att, self.c_att]\n if self.learn_padding:\n self.param.append(self.pred_hidden_padding)\n self.param.append(self.infer_hidden_padding)\n\n self.is_recurrent = True\n self.fprop()\n\n def set_name(self):\n self.name = \"DeepCondConvLSTMDecpLayer-\" + str(self.id)\n\n def step_fprop(self, x_t, ctx_t, h_pred_tm1, c_pred_tm1, h_infer_tm1, c_infer_tm1, alpha_, *args):\n # x_t input @ t (BS, IN, H, W)\n # h_pred_tm1 lstm-pred hidden state @ t-1 (BS, OUT, H, W)\n # c_pred_tm1 lstm-pred cell state @ t-1 (BS, OUT, H, W)\n # h_infer_tm1 lstm-infer hidden state @ t-1 (BS, OUT, H, W)\n # c_infer_tm1 lstm-infer cell state @ t-1 (BS, OUT, H, W)\n\n # LSTM-infer (inference layer)\n input_gate_infer = quick_activation(conv2d_same(ctx_t, self.W_infer_xi, (None, ) + self.ctx_dim_in,\n self.context_kernel_size, self.input_padding)\n + conv2d_same(h_infer_tm1, self.W_infer_hi, (None, ) + self.ctx_dim_out,\n self.context_transition_mat_size, self.infer_hidden_padding)\n + self.b_infer_i.dimshuffle('x', 0, 'x', 'x'), \"sigmoid\")\n forget_gate_infer = quick_activation(conv2d_same(ctx_t, self.W_infer_xf, (None, ) + self.ctx_dim_in,\n self.context_kernel_size, self.input_padding)\n + conv2d_same(h_infer_tm1, self.W_infer_hf, (None, ) + self.ctx_dim_out,\n self.context_transition_mat_size, self.infer_hidden_padding)\n + self.b_infer_f.dimshuffle('x', 0, 'x', 'x'), \"sigmoid\")\n c_infer_t = forget_gate_infer * c_infer_tm1 \\\n + input_gate_infer * quick_activation(conv2d_same(ctx_t, self.W_infer_xc, (None, ) + self.ctx_dim_in,\n self.context_kernel_size, self.input_padding)\n + conv2d_same(h_infer_tm1, self.W_infer_hc, (None, ) + self.ctx_dim_out,\n self.context_transition_mat_size, self.infer_hidden_padding)\n + self.b_infer_c.dimshuffle('x', 0, 'x', 'x'), \"tanh\")\n output_gate_infer = quick_activation(conv2d_same(ctx_t, self.W_infer_xo, (None, ) + self.ctx_dim_in,\n self.context_kernel_size, self.input_padding)\n + conv2d_same(h_infer_tm1, self.W_infer_ho, (None, ) + self.ctx_dim_out,\n self.context_transition_mat_size, self.infer_hidden_padding)\n + self.b_infer_o.dimshuffle('x', 0, 'x', 'x'), \"sigmoid\")\n h_infer_t = output_gate_infer * quick_activation(c_infer_t, \"tanh\")\n\n # attention mechanism\n pstate_infer = TT.tensordot(self.Wd_att, h_infer_t, axes=[[0], [1]]) # IN x BS x H x W\n pstate_pred = TT.tensordot(self.We_att, h_pred_tm1, axes=[[0], [1]]) # IN x BS x H x W\n pattend = TT.tensordot(self.Wc_att, x_t, axes=[[0], [1]]) + self.b_att.dimshuffle(0, 'x', 'x', 'x') # IN x BS x H x W\n pattend = quick_activation(pattend + pstate_infer + pstate_pred, 'tanh')\n \n alpha = TT.tensordot(self.U_att, pattend, axes=[[0], [0]]) + self.c_att.dimshuffle(0, 'x', 'x', 'x') # 1 x BS x H x W\n alpha_shp = alpha.shape\n #alpha = quick_activation(alpha.reshape((alpha_shp[1],alpha_shp[2],alpha_shp[3])), 'sigmoid') # BS x H x W\n alpha = quick_activation(alpha.reshape((alpha_shp[1],alpha_shp[2]*alpha_shp[3])), 'softmax') # BS x (H x W)\n alpha = alpha.reshape((alpha_shp[1],alpha_shp[2],alpha_shp[3])) # BS x H x W\n attend = x_t * alpha.dimshuffle(0, 'x', 1, 2) # BS x IN X H x W\n # print '\\n\\ncheck\\n\\n'\n\n # LSTM-pred (prediction layer)\n input_gate_pred = quick_activation(conv2d_same(attend, self.W_pred_xi, (None, ) + self.dim_in,\n self.kernel_size, self.input_padding)\n + conv2d_same(h_pred_tm1, self.W_pred_hi, (None, ) + self.dim_out,\n self.transition_mat_size, self.pred_hidden_padding)\n + self.b_pred_i.dimshuffle('x', 0, 'x', 'x'), \"sigmoid\")\n forget_gate_pred = quick_activation(conv2d_same(attend, self.W_pred_xf, (None, ) + self.dim_in,\n self.kernel_size, self.input_padding)\n + conv2d_same(h_pred_tm1, self.W_pred_hf, (None, ) + self.dim_out,\n self.transition_mat_size, self.pred_hidden_padding)\n + self.b_pred_f.dimshuffle('x', 0, 'x', 'x'), \"sigmoid\")\n c_pred_t = forget_gate_pred * c_pred_tm1 \\\n + input_gate_pred * quick_activation(conv2d_same(attend, self.W_pred_xc, (None, ) + self.dim_in,\n self.kernel_size, self.input_padding)\n + conv2d_same(h_pred_tm1, self.W_pred_hc, (None, ) + self.dim_out,\n self.transition_mat_size, self.pred_hidden_padding)\n + self.b_pred_c.dimshuffle('x', 0, 'x', 'x'), \"tanh\")\n output_gate_pred = quick_activation(conv2d_same(attend, self.W_pred_xo, (None, ) + self.dim_in,\n self.kernel_size, self.input_padding)\n + conv2d_same(h_pred_tm1, self.W_pred_ho, (None, ) + self.dim_out,\n self.transition_mat_size, self.pred_hidden_padding)\n + self.b_pred_o.dimshuffle('x', 0, 'x', 'x'), \"sigmoid\")\n h_pred_t = output_gate_pred * quick_activation(c_pred_t, \"tanh\")\n\n return [h_pred_t, c_pred_t, h_infer_t, c_infer_t, alpha]\n\n def step_masked_fprop(self, x_t, ctx_t, mask_t, h_pred_tm1, c_pred_tm1, h_infer_tm1, c_infer_tm1, alpha_, *args):\n\n h_pred_t, c_pred_t, h_infer_t, c_infer_t, alpha = self.step_fprop(x_t, ctx_t, \\\n h_pred_tm1, c_pred_tm1, h_infer_tm1, c_infer_tm1, alpha_, *args)\n\n h_pred_t = TT.switch(mask_t, h_pred_t, h_pred_tm1)\n c_pred_t = TT.switch(mask_t, c_pred_t, c_pred_tm1)\n h_infer_t = TT.switch(mask_t, h_infer_t, h_infer_tm1)\n c_infer_t = TT.switch(mask_t, c_infer_t, c_infer_tm1)\n\n return [h_pred_t, c_pred_t, h_infer_t, c_infer_t, alpha]\n\n def init_states(self):\n return self.init_hidden_state, self.init_cell_state, self.init_context_hidden_state, self.init_context_cell_state\n\n def fprop(self):\n\n # The dimension of self.mask is (Timestep, Minibatch).\n # We need to pad it to (Timestep, Minibatch, FeatureDim)\n # and keep the last one added dimensions broadcastable. TT.shape_padright\n # function is thus a good choice\n # input should be (Timestep, Minibatch, FeatureDim, Region)\n # however, x should be (Timestep, Minibatch, Region, FeatureDim) to scan\n # if it's in from of (TS, BS, DIN, RS), transform it to (TS, BS, RS, DIN)\n # self.input = self.input.dimshuffle((0, 1, 3, 2))\n\n #self.input = self.input.dimshuffle((0, 1, 3, 2))\n if self.mask is None:\n scan_input = [self.input, self.context]\n scan_fn = self.step_fprop\n else:\n scan_input = [self.input, self.context, TT.shape_padright(self.mask, 3)]\n scan_fn = self.step_masked_fprop\n\n non_seqs = self.param\n #[self.output, self.cell_output, self.ctx_output, self.ctx_cell_output, self.alpha], self.output_update = quick_unroll_scan(fn=scan_fn,\n [self.output, self.cell_output, self.ctx_output, self.ctx_cell_output, self.alpha], self.output_update = theano.scan(fn=scan_fn,\n outputs_info=[self.init_hidden_state,\n self.init_cell_state,\n self.init_context_hidden_state,\n self.init_context_cell_state,\n quick_theano_zero(((self.minibatch_size,) + self.fmap_size))],\n sequences=scan_input,\n non_sequences=non_seqs,\n n_steps=self.n_steps\n )","sub_path":"sparnn/layers/basic/deep_cond_conv_lstm_decp_layer.py","file_name":"deep_cond_conv_lstm_decp_layer.py","file_ext":"py","file_size_in_byte":18510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"265013179","text":"\r\npriority = (5, 1, 3, 2)\r\ninput = [1, 7, 5, 8, 2]\r\n\r\n# hash = [0]*len(priority)\r\n# print('The hash is:', hash(priority))\r\nmaximum = max(input)\r\nhash = [0]*(maximum+1)\r\nprint(hash)\r\n\r\ndef insert(val):\r\n n = len(val)\r\n for i in range(0, n):\r\n hash[val[i]] = 1\r\n\r\n\r\ninsert(input)\r\nprint(hash)\r\n\r\n\r\ndef sortByPriority(input_hash, priority):\r\n arr = []\r\n for i in range(0, len(priority)):\r\n if input_hash[priority[i]] == 1:\r\n arr.append(priority[i])\r\n input_hash[priority[i]] = 0\r\n print(input_hash)\r\n for j in range(len(input_hash)):\r\n if input_hash[j] == 1:\r\n arr.append(j)\r\n print(arr)\r\n\r\nsortByPriority(hash, priority)","sub_path":"SortByPriority.py","file_name":"SortByPriority.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"592622232","text":"\nimport random\nimport gym\nimport math\nimport numpy as np\nfrom collections import deque\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom nn import NN\n\nclass DQNCartPoleSolver():\n def __init__(self):\n self.env = gym.make('CartPole-v0')\n self.gamma = 0.9\n self.epsilon = 0.5\n self.epsilon_min = 0.01\n self.epsilon_decay = 0.999\n self.epsilon_max = 0.25\n self.epsilon_inc = 1.001\n self.alpha = 1e-4\n self.alpha_decay = 0.00\n self.n_episodes = 100000\n self.n_win_ticks = 195\n self.max_env_steps = 200\n\n EPSILON = 0.12\n \n LAYER1 = 4\n LAYER2 = 24\n LAYER3 = 2\n \n weights1 = np.random.uniform(0.0, 1.0, size=(LAYER1 + 1, LAYER2)) * 2 * EPSILON - EPSILON\n weights2 = np.random.uniform(0.0, 1.0, size=(LAYER2 + 1, LAYER3)) * 2 * EPSILON - EPSILON\n \n self.model = NN(size=[LAYER1, LAYER2, LAYER3], weights=[weights1, weights2], alpha=self.alpha, bias=True)\n\n def choose_action(self, state):\n values = self.model.predict(state)\n \n if (np.random.random() <= self.epsilon):\n action = self.env.action_space.sample()\n else:\n action = np.argmax(self.model.predict(state))\n \n if (np.any(np.isnan(values)) or np.any(np.isinf(values))):\n assert(False)\n \n return action, values\n\n def train(self, state, action, reward, values, next_value, done): \n if done:\n values[action] = reward\n else:\n values[action] = reward + self.gamma * next_value\n \n self.model.train(state, values)\n\n def run(self):\n scores = deque(maxlen=100)\n\n for e in range(self.n_episodes):\n i = 0\n total_reward = 0\n\n done = False\n state = self.env.reset()\n\n action, values = self.choose_action(state)\n next_state, reward, done, _ = self.env.step(action)\n \n while not done:\n next_action, next_values = self.choose_action(next_state)\n self.train(state, action, reward, values, next_values[next_action], done) \n state = next_state\n action = next_action\n values = next_values\n # print (values)\n next_state, reward, done, _ = self.env.step(action)\n \n i += 1\n total_reward += reward\n\n scores.append(i)\n mean_score = np.mean(scores)\n \n \n if total_reward <= mean_score and self.epsilon < self.epsilon_max:\n self.epsilon *= self.epsilon_inc\n elif total_reward > mean_score and self.epsilon > self.epsilon_min:\n self.epsilon *= self.epsilon_decay\n \n if e % 10 == 0:\n print(mean_score, self.epsilon)\n\nif __name__ == '__main__':\n agent = DQNCartPoleSolver()\n agent.run()\n \n \n \n \n \n","sub_path":"cartpoleDFA/cartpole.py","file_name":"cartpole.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"484652338","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/insights/parsers/tests/test_tmpfilesd.py\n# Compiled at: 2019-05-16 13:41:33\nfrom insights.parsers.tmpfilesd import TmpFilesD\nfrom insights.tests import context_wrap\nSAP_CONF = ('\\n# systemd tmpfiles exclude file for SAP\\n# SAP software stores some important files\\n# in /tmp which should not be deleted\\n\\n# Exclude SAP socket and lock files\\nx /tmp/.sap*\\n\\n# Exclude HANA lock file\\nx /tmp/.hdb*lock\\n').strip()\n\ndef test_tmpfilesd():\n ctx = context_wrap(SAP_CONF, path='/etc/tmpfiles.d/sap.conf')\n data = TmpFilesD(ctx)\n assert len(data.files) == 2\n assert data.files == ['/tmp/.sap*', '/tmp/.hdb*lock']\n assert data.rules == [\n {'type': 'x', 'mode': None, \n 'path': '/tmp/.sap*', \n 'uid': None, \n 'gid': None, \n 'age': None, \n 'argument': None},\n {'type': 'x', 'path': '/tmp/.hdb*lock', \n 'mode': None, \n 'uid': None, \n 'gid': None, \n 'age': None, \n 'argument': None}]\n assert data.file_path == '/etc/tmpfiles.d/sap.conf'\n assert data.file_name == 'sap.conf'\n return\n\n\ndef test_find_file():\n ctx = context_wrap(SAP_CONF, path='/etc/tmpfiles.d/sap.conf')\n data = TmpFilesD(ctx)\n assert data.find_file('.sap*') == [\n {'path': '/tmp/.sap*', 'type': 'x', 'mode': None, 'age': None, \n 'gid': None, 'uid': None, 'argument': None}]\n assert data.find_file('.hdb*lock') == [\n {'path': '/tmp/.hdb*lock', 'type': 'x', 'mode': None, \n 'uid': None, 'gid': None, 'age': None, \n 'argument': None}]\n assert data.find_file('bar') == []\n return","sub_path":"pycfiles/insights_core-3.0.161-py2.7/test_tmpfilesd.py","file_name":"test_tmpfilesd.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"162120043","text":"import pygame\nfrom tile import Tile\nfrom pathfinder import Pathfinder\n\n\nclass Map:\n def __init__(self, width, height, tile_side_length):\n self.start_tile = None\n self.finish_tile = None\n self.width = width\n self.height = height\n self.tiles = [height * [0] for _ in range(width)]\n self.tiles_unordered = []\n self.tile_images = pygame.sprite.Group()\n\n for x in range(width):\n for y in range(height):\n self.tiles[x][y] = Tile(x*tile_side_length, y*tile_side_length, tile_side_length)\n\n for col in self.tiles:\n for tile in col:\n self.tile_images.add(tile.sprite)\n self.tiles_unordered += [tile]\n\n self.establish_connections()\n\n self.pathfinder = Pathfinder(self.tiles_unordered, self.start_tile, self.finish_tile)\n\n def set_start_tile(self, tile):\n if not tile.is_finish:\n if self.start_tile:\n self.start_tile.set_start(False)\n tile.set_start(True)\n self.start_tile = tile\n self.pathfinder.start_node = tile\n if tile.is_blocking:\n tile.toggle_blocking()\n\n def set_finish_tile(self, tile):\n if not tile.is_start:\n if self.finish_tile:\n self.finish_tile.set_finish(False)\n tile.set_finish(True)\n self.finish_tile = tile\n self.pathfinder.finish_node = tile\n if tile.is_blocking:\n tile.toggle_blocking()\n\n def establish_connections(self):\n for x in range(self.width):\n for y in range(self.height):\n if x > 0:\n self.tiles[x][y].connections += [self.tiles[x - 1][y]]\n if y > 0:\n self.tiles[x][y].connections += [self.tiles[x - 1][y - 1]]\n if y < self.height - 1:\n self.tiles[x][y].connections += [self.tiles[x - 1][y + 1]]\n if x < self.width - 1:\n self.tiles[x][y].connections += [self.tiles[x + 1][y]]\n if y > 0:\n self.tiles[x][y].connections += [self.tiles[x + 1][y - 1]]\n if y < self.height - 1:\n self.tiles[x][y].connections += [self.tiles[x + 1][y + 1]]\n if y > 0:\n self.tiles[x][y].connections += [self.tiles[x][y - 1]]\n if y < self.height - 1:\n self.tiles[x][y].connections += [self.tiles[x][y + 1]]\n\n","sub_path":"map.py","file_name":"map.py","file_ext":"py","file_size_in_byte":2560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"452391410","text":"import numpy\n\nfrom .._io._helpers import Output\n\n__all__ = [\n \"extract\",\n]\n\n\ndef extract(argv=None):\n import os\n\n parser = _get_parser()\n args = parser.parse_args(argv)\n\n # Check that TOUGH output and MESH file exist\n if not os.path.isfile(args.infile):\n raise ValueError(\"TOUGH output file '{}' not found.\".format(args.infile))\n if not os.path.isfile(args.mesh):\n raise ValueError(\"MESH file '{}' not found.\".format(args.mesh))\n\n # Read MESH and extract X, Y and Z\n nodes, is_eleme = {}, False\n with open(args.mesh, \"r\") as f:\n for line in f:\n line = line.upper().strip()\n if line[:5].startswith(\"ELEME\"):\n is_eleme = True\n line = next(f)\n while line.strip():\n label = line[:5]\n X = float(line[50:60]) if line[50:60].strip() else 0.0\n Y = float(line[60:70]) if line[60:70].strip() else 0.0\n Z = float(line[70:80]) if line[70:80].strip() else 0.0\n nodes[label] = [X, Y, Z]\n line = next(f)\n headers = [\"X\", \"Y\", \"Z\"]\n break\n if not is_eleme:\n raise ValueError(\"Invalid MESH file '{}'.\".format(args.mesh))\n\n # Read TOUGH output file\n out = []\n with open(args.infile, \"r\") as f:\n for line in f:\n line = line.upper().strip()\n if line.startswith(\"OUTPUT DATA AFTER\"):\n out.append(_read_table(f, nodes))\n\n # Write TOUGH3 element output file\n if not args.split or len(out) == 1:\n with open(args.output_file, \"w\") as f:\n _write_header(f, headers, out[0])\n for data in out:\n _write_table(f, data, nodes)\n else:\n head, ext = os.path.splitext(args.output_file)\n for i, data in enumerate(out):\n with open(\"{}_{}{}\".format(head, i + 1, ext), \"w\") as f:\n _write_header(f, headers, data)\n _write_table(f, data, nodes)\n\n\ndef _get_parser():\n import argparse\n\n # Initialize parser\n parser = argparse.ArgumentParser(\n description=(\n \"Extract results from TOUGH main output file and reformat as a TOUGH3 element output file.\"\n ),\n formatter_class=argparse.RawTextHelpFormatter,\n )\n\n # Input file\n parser.add_argument(\n \"infile\", type=str, help=\"TOUGH output file\",\n )\n\n # Mesh file\n parser.add_argument(\n \"mesh\", type=str, help=\"TOUGH MESH file (can be INFILE)\",\n )\n\n # Output file\n parser.add_argument(\n \"--output-file\",\n \"-o\",\n type=str,\n default=\"OUTPUT_ELEME.csv\",\n help=\"TOUGH3 element output file\",\n )\n\n # Split or not\n parser.add_argument(\n \"--split\",\n \"-s\",\n default=False,\n action=\"store_true\",\n help=\"Write one file per time step\",\n )\n\n return parser\n\n\ndef _read_table(f, points):\n def str2float(s):\n \"\"\"Convert variable string to float.\"\"\"\n try:\n return float(s)\n except ValueError:\n # It's probably something like \"0.0001-001\"\n significand, exponent = s[:-4], s[-4:]\n return float(\"{}e{}\".format(significand, exponent))\n\n # Look for \"TOTAL TIME\"\n while True:\n line = next(f).strip()\n if line.startswith(\"TOTAL TIME\"):\n break\n\n # Read time step in following line\n line = next(f).strip()\n time = float(line.split()[0])\n\n # Look for \"ELEM.\"\n while True:\n line = next(f).strip()\n if line.startswith(\"ELEM.\"):\n break\n\n # Read headers once (ignore \"ELEM.\" and \"INDEX\")\n headers = line.split()[2:]\n\n # Look for next non-empty line\n while True:\n line = next(f).strip()\n if line:\n break\n\n # Loop until end of output block\n count = 0\n variables, labels = [], []\n while True:\n if line[:5] in points.keys():\n count += 1\n labels.append(line[:5])\n variables.append([str2float(x) for x in line[5:].split()[1:]])\n\n line = next(f).strip()\n if line[1:].startswith(\"@@@@@\"):\n break\n if count != len(points):\n raise ValueError(\"Inconsistent number of elements.\")\n\n return Output(\n time, labels, {k: v for k, v in zip(headers, numpy.transpose(variables))}\n )\n\n\ndef _write_table(f, data, nodes):\n # Write time step\n f.write('\"TIME [sec] {:.8e}\"\\n'.format(data.time))\n\n # Loop over elements\n formats = ['\"{:>18}\"'] + (len(data.data.keys()) + 3) * [\" {:>.12e}\"]\n for i, label in enumerate(data.labels):\n record = [label] + nodes[label] + [v[i] for v in data.data.values()]\n record = \",\".join(fmt.format(rec) for fmt, rec in zip(formats, record)) + \"\\n\"\n f.write(record)\n\n\ndef _write_header(f, headers, data):\n headers = [\"ELEM\"] + headers + list(data.data.keys())\n units = [\"\"] + 3 * [\"(M)\"] + len(data.data.keys()) * [\"(-)\"]\n f.write(\",\".join('\"{:>18}\"'.format(header) for header in headers) + \"\\n\")\n f.write(\",\".join('\"{:>18}\"'.format(unit) for unit in units) + \"\\n\")\n","sub_path":"toughio/_cli/_extract.py","file_name":"_extract.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"420868815","text":"import numpy as np\nimport random\n\npath_w = 'result/param.txt'\n\n\nclass Layer(object):\n def __init__(self, lr=0.1):\n self.params = {}\n self.grads = {}\n self.lr = lr\n\n def update(self):\n for k in self.params.keys():\n self.params[k] = self.params[k] - self.lr * self.grads[k]\n for i in range(len(self.params[k])):\n if self.params[k][i] > 1.0 or self.params[k][i] < 0.0:\n self.params[k][i] = self.params[k][i] + \\\n self.lr*self.grads[k][i]\n\n def zerograd(self):\n for k in self.params.keys():\n self.grads[k] = np.zeros(\n shape=self.params[k].shape, dtype=self.params[k].dtype)\n\n\nclass Sequential:\n def __init__(self, layers=[]):\n self.layers = layers\n\n def addlayer(self, layer):\n self.layers.append(layer)\n\n def forward(self, x):\n for l in self.layers:\n x = l.forward(x)\n return x\n\n def backward(self, y):\n for l in reversed(self.layers):\n y = l.backward(y)\n return y\n\n def update(self):\n for l in self.layers:\n l.update()\n\n def zerograd(self):\n for l in self.layers:\n l.zerograd()\n\n def print_param(self):\n for l in self.layers:\n l.print_param()\n\n def save_param(self):\n for l in self.layers:\n l.save_param()\n\n\nclass ANDLayer(Layer):\n def __init__(self, input_dim, output_dim):\n super(ANDLayer, self).__init__()\n\n self.params['p'] = np.random.rand(int(output_dim/2))\n self.shuffle = random.sample(range(input_dim*3), k=output_dim)\n self.x_mid = np.zeros(input_dim*3)\n self.x_out = np.zeros(output_dim)\n self.grads['p'] = np.zeros(int(output_dim/2))\n self.grads['X'] = np.zeros(input_dim)\n # print(self.params['p'])\n # print(self.shuffle)\n\n def forward(self, x):\n self.x = x\n for i in range(len(x)):\n self.x_mid[i*3] = x[i]\n self.x_mid[i*3+1] = x[i]\n self.x_mid[i*3+2] = 1 - x[i]\n\n for i in range(int(len(self.x_out)/2)):\n self.x_out[i*2] = self.params['p'][i]*self.x_mid[self.shuffle[i*2]] + self.x_mid[self.shuffle[i*2]] * \\\n self.x_mid[self.shuffle[i*2+1]] - self.params['p'][i] * \\\n self.x_mid[self.shuffle[i*2]]*self.x_mid[self.shuffle[i*2+1]]\n self.x_out[i*2+1] = (1-self.params['p'][i])*self.x_mid[self.shuffle[i*2]\n ] + self.params['p'][i]*self.x_mid[self.shuffle[i*2+1]]\n\n # print(self.x_out)\n return self.x_out\n\n def backward(self, y):\n for i in range(int(len(self.x_out)/2)):\n self.grads['p'][i] = y[i*2]*(self.x_mid[self.shuffle[i*2]]*(1-self.x_mid[self.shuffle[i*2+1]])) + \\\n y[i*2+1]*(self.x_mid[self.shuffle[i*2+1]] -\n self.x_mid[self.shuffle[i*2]])\n\n # print(self.grads['p'])\n grad_Xmid = np.zeros(len(self.x_mid))\n\n for i in range(int(len(self.x_out)/2)):\n grad_Xmid[self.shuffle[i*2]] = y[i*2]*(self.params['p'][i]+self.x_mid[self.shuffle[i*2+1]] -\n self.params['p'][i]*self.x_mid[self.shuffle[i*2+1]]) + y[i*2+1]*(1-self.params['p'][i])\n grad_Xmid[self.shuffle[i*2+1]] = y[i*2]*(self.x_mid[self.shuffle[i*2]]-self.params['p']\n [i]*self.x_mid[self.shuffle[i*2]]) + y[i*2+1]*self.params['p'][i]\n\n for i in range(len(self.x)):\n self.grads['X'][i] = grad_Xmid[i*3] + \\\n grad_Xmid[i*3+1] - grad_Xmid[i*3+2]\n\n # print(self.grads['X'])\n return self.grads['X']\n\n def print_param(self):\n\n print(self.shuffle)\n print(self.params['p'])\n print('---------------------------------------------------')\n\n def save_param(self):\n shuffle_str = [str(n) for n in self.shuffle]\n param_str = [str(n) for n in self.params['p']]\n with open(path_w, mode='a') as f:\n f.write('[' + ','.join(shuffle_str) + ']\\n')\n f.write('[' + ','.join(param_str) + ']\\n')\n f.write(\n '----------------------------------------------------------------\\n')\n\n\nclass Classifier:\n def __init__(self, model):\n self.model = model\n\n def update(self, x, t):\n self.model.zerograd()\n y = self.model.forward(x)\n loss = (y[0] - t)**2 / 2\n # print(loss)\n dout = np.zeros(len(y))\n dout[0] = y[0] - t\n dout = self.model.backward(dout)\n self.model.update()\n\n def test(self, x):\n y = self.model.forward(x)\n return y[0]\n\n def print_param(self):\n self.model.print_param()\n\n def save_param(self):\n self.model.save_param()\n with open(path_w, mode='a') as f:\n f.write('\\n')\n","sub_path":"Layer.py","file_name":"Layer.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"216543457","text":"from tkinter import*\nwindow=Tk()\n#all window infomation will be in this\n#need to define four labels like title author year and ISBN \nwindow.title(\"Yeasin \")\nwindow.minsize(450,200)\nwindow.configure(background='#416383')\nbut2=Button(window,text='click',width=10,height=3)\nbut2.pack() #place(x=,y=) #grid=(rows=,column=)\nbut2.place(x=100,y=50)\nprint('hey are you')\n\n\nwindow.mainloop()\n","sub_path":"new.py","file_name":"new.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"32393741","text":"import os\nimport click\nimport json\nfrom json import JSONDecoder\nfrom functools import partial\nfrom jsonschema import validate\n\ntileset_arg = click.argument('tileset', required=True, type=str)\n\ndef absoluteFilePaths(directory):\n for dirpath,_,filenames in os.walk(directory):\n for f in filenames:\n yield os.path.abspath(os.path.join(dirpath, f))\n\n# takes a list of files or directories and converts\n# all directories into absolute file paths\ndef flatten(files):\n for f in files:\n if os.path.isdir(f):\n for dir_file in absoluteFilePaths(f):\n yield dir_file\n else:\n yield f\n\n\ndef print_response(text):\n try:\n j = json.loads(text)\n msg = json.dumps(j, indent=2, sort_keys=True)\n click.echo(msg)\n except:\n click.echo('Failure \\n' + text)\n\n\ndef validate_geojson(feature):\n schema = {\n \"definitions\": {},\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"$id\": \"http://example.com/root.json\",\n \"type\": \"object\",\n \"title\": \"GeoJSON Schema\",\n \"required\": [\n \"type\",\n \"geometry\",\n \"properties\"\n ],\n \"properties\": {\n \"type\": {\n \"$id\": \"#/properties/type\",\n \"type\": \"string\",\n \"title\": \"The Type Schema\",\n \"default\": \"\",\n \"examples\": [\n \"Feature\"\n ],\n \"pattern\": \"^(.*)$\"\n },\n \"geometry\": {\n \"$id\": \"#/properties/geometry\",\n \"type\": \"object\",\n \"title\": \"The Geometry Schema\",\n \"required\": [\n \"type\",\n \"coordinates\"\n ],\n \"properties\": {\n \"type\": {\n \"$id\": \"#/properties/geometry/properties/type\",\n \"type\": \"string\",\n \"title\": \"The Type Schema\",\n \"default\": \"\",\n \"examples\": [\n \"Point\"\n ],\n \"pattern\": \"^(.*)$\"\n },\n \"coordinates\": {\n \"$id\": \"#/properties/geometry/properties/coordinates\",\n \"type\": \"array\",\n \"title\": \"The Coordinates Schema\"\n }\n }\n },\n \"properties\": {\n \"$id\": \"#/properties/properties\",\n \"type\": \"object\",\n \"title\": \"The Properties Schema\",\n }\n }\n }\n\n return validate(instance=feature, schema=schema)\n","sub_path":"tilesets/scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"537835260","text":"\"\"\"\nYou are given an array of integers, max N elements, range -R to +R.\nRearrange the array such that\n - After rearrangement the array has all -ve numbers together, followed by all zeros, followed by all +ve numbers.\nPlease note that, you should not change the relative arrangement among the -ve and +ve numbers.\n\nInput: [9,-6,5,-3,0,10,-1,0]\n\nOutput: [-6, -3, -1, 0, 0, 9, 5, 10]\n\n\nNot that all 3 -ve numbers are to in the first 3 spots, and they are in the same order they are in the array.\n\"\"\"\n\ndef moveZeroesToEnd(array):\n i, j = 0, 1\n while i < len(array) and j < len(array):\n if array[i] != 0: i += 1\n elif array[j] == 0 or j <= i: j += 1\n else: array[i], array[j] = array[j], array[i]\n return array\n\ndef moveZeroesToMiddle(array, minus, zeroes, plus):\n for x in range(len(array) - 1, minus + zeroes - 1, -1):\n array[x] = array[x - zeroes]\n for x in range(minus, minus + zeroes):\n array[x] = 0\n return array\n\ndef rotate(array, i, j, k):\n # print(array[i:j], k)\n count = 0\n start = i\n while count < j - i:\n do = True\n current = start\n prev = array[start]\n while current != start or do:\n if do: do = False\n next = (current + k - i) % (j - i) + i\n temp = array[next]\n array[next] = prev\n prev = temp\n current = next\n count += 1\n start += 1\n # print(array)\n\ndef mergeSort(array, low, high):\n if high - low <= 1: return\n mid = (high + low) // 2\n mergeSort(array, low, mid)\n mergeSort(array, mid, high)\n i = low\n j = high - 1\n while array[i] < 0 and i < mid + 1: i += 1\n while array[j] > 0 and j >= mid: j -= 1\n rotate(array, i, j + 1, j - mid + 1)\n\ndef swapMinusPlus(array, minus, plus):\n mergeSort(array, 0, minus + plus)\n return array\n\ndef rearrange(array):\n zeroes = array.count(0)\n minus = sum(1 if x < 0 else 0 for x in array)\n plus = sum(1 if x > 0 else 0 for x in array)\n print(minus, zeroes, plus)\n print(array)\n array = moveZeroesToEnd(array)\n # print(array)\n array = swapMinusPlus(array, minus, plus)\n # print(array)\n array = moveZeroesToMiddle(array, minus, zeroes, plus)\n print(array)\n\nrearrange([9,-6,5,-3,0,10,-1,0])\nrearrange([-3,0,5,9,0,-1,10,-6,0])\nrearrange([0,0,0,1,1,1,-1,-1,-1])\nrearrange([0,0,0,-1,-1,-1,1,1,1])\nrearrange([-1,-1,-1,0,0,0,1,1,1])\nrearrange([-1,-1,-1,1,1,1,0,0,0])\nrearrange([1,1,1,-1,-1,-1,0,0,0])\nrearrange([1,1,1,0,0,0,-1,-1,-1])\n","sub_path":"plus_zero_minus/plus_zero_minus.py","file_name":"plus_zero_minus.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"386259559","text":"import matplotlib.pyplot as plt\nimport argparse\nimport os\nimport numpy as np\nfrom bin.common import *\n\nCOLORS = ['C{}'.format(i) for i in range(10)]\n\nparser = argparse.ArgumentParser(description='Compare network outputs for reference and for alternative sequences')\nparser.add_argument('--name', '--test_namespace', metavar='NAMES', nargs='+', default=['test'],\n help='Namespaces of test analyses, default: test')\nparser.add_argument('--name_pos', action='store', metavar='INT', type=int, default=None,\n help='Position of sequence name in the fasta header, by default created as CHR:POSITION')\nparser = basic_params(parser)\nargs = parser.parse_args()\npath, outdir, namespace, seed = parse_arguments(args, None, model_path=True)\n\n\ndef plot_ref_alt(name):\n name = name.replace('_', '-')\n outputs_file = os.path.join(path, '{}_{}_outputs.npy'.format(namespace, name))\n outputs = np.load(outputs_file, allow_pickle=True)\n print('Loaded network outputs from {}'.format(outputs_file))\n labels_file = os.path.join(path, '{}_{}_labels.npy'.format(namespace, name))\n labels = list(np.load(labels_file, allow_pickle=True))\n print('Loaded sequences labels from {}'.format(labels_file))\n ids_file = os.path.join(path, '{}_{}.txt'.format(namespace, name))\n seq_ids = open(ids_file, 'r').read().strip().split('\\n')\n print('Loaded sequences IDs from {}'.format(ids_file))\n num_seqs = len(seq_ids)\n seq_file = None\n with open(os.path.join(path, '{}_test_results.tsv'.format(namespace)), 'r') as f:\n f.readline()\n for line in f:\n line = line.strip().split('\\t')\n if line[1] == name:\n seq_file = line[0]\n break\n\n label_names = ['' for _ in range(num_seqs)]\n if os.path.isfile(seq_file):\n seqs = ['' for _ in range(num_seqs)]\n patients = ['' for _ in range(num_seqs)]\n ref_seq = None\n with open(seq_file, 'r') as f:\n for line in f:\n if line.startswith('>'):\n l = line.strip('>\\n ').split(' ')\n if args.name_pos is not None:\n pos = args.name_pos\n id = l[pos]\n else:\n id = '{}_{}'.format(l[0].lstrip('chr'), l[1])\n pos = seq_ids.index(id)\n label_names[pos] = '{} {}'.format(l[3], l[4])\n patients[pos] = id\n else:\n if l[-1] == 'REF':\n ref_seq = line.strip().upper()\n seqs[pos] = line.strip().upper()\n if ref_seq is not None:\n num_snp = [len([a for a, r in zip(seq, ref_seq) if a != r]) for seq in seqs]\n min_nsnp = min([el for el in num_snp if el != 0])\n max_nsnp = max([el for el in num_snp if el != 0])\n dots = [(el - min_nsnp + 1) * 30 if el != 0 else 12 for el in num_snp]\n else:\n min_nsnp, max_nsnp = 0, 0\n dots = [12 for _ in seqs]\n\n else:\n patients = seq_ids\n dots = [12 for _ in range(len(seq_ids))]\n print('Alternative and reference sequences read from {}'.format(seq_file))\n\n classes = get_classes_names(os.path.join(path, '{}_params.txt'.format(namespace)))\n xvalues = {'True class': [], 'False class': []}\n yvalues = {'True class': [], 'False class': []}\n sizes = {'True class': [], 'False class': []}\n\n correct_classified = 0\n for i, (label, n) in enumerate(zip(labels, label_names)):\n output = outputs[label]\n seq_pos = len([el for el in labels[:i] if el == label])\n xvalues['True class'].append(label * num_seqs + i + label + 1)\n correct_out = output[label][seq_pos]\n yvalues['True class'].append(correct_out)\n sizes['True class'].append(dots[i])\n classified = True\n for wrong_name in [el for el in classes if el != n]:\n wrong_label = classes.index(wrong_name)\n xvalues['False class'].append(wrong_label * num_seqs + i + wrong_label + 1)\n wrong_out = output[wrong_label][seq_pos]\n yvalues['False class'].append(wrong_out)\n if wrong_out >= correct_out:\n classified = False\n sizes['False class'].append(dots[i])\n if classified:\n correct_classified += 1\n print('Number of sequences: {}, number of classes: {}'.format(num_seqs, len(classes)))\n\n plt.figure(figsize=(20, 10))\n for legend_label, color, marker in zip(['True class', 'False class'], ['C2', 'C1'], ['*', 'o']):\n plt.scatter(xvalues[legend_label], yvalues[legend_label], s=sizes[legend_label], color=color, marker=marker,\n label=legend_label, alpha=0.8)\n xticks = [la for el in xvalues.values() for la in el]\n xticks.sort()\n plt.xticks(xticks, patients*len(classes), fontsize=10, rotation=90, ha='center')\n plt.xlabel((' ' * num_seqs).join(classes), fontsize=16)\n plt.ylabel('Output value', fontsize=16)\n plt.legend(fontsize=12, prop={'size': 16})\n plt.title('{} - {}'.format(namespace, name), fontsize=20)\n plt.ylim((0.45, 1.05))\n if min_nsnp == max_nsnp == 0:\n plt.text(0.0, 1.045, 'Correctly classified seqs: {}/{}'.\n format(correct_classified, num_seqs), fontsize=12, va='top')\n else:\n plt.text(0.0, 1.045, 'Correctly classified seqs: {}/{}\\nNumber of SNPs in alt seqs: {}-{}'.\n format(correct_classified, num_seqs, min_nsnp, max_nsnp), fontsize=12, va='top')\n plt.tight_layout()\n plot_file = os.path.join(outdir, '{}_{}_ref:alt.png'.format(namespace, name))\n plt.savefig(plot_file)\n plt.show()\n print('Plot saved to {}'.format(plot_file))\n\n\nfor name in args.name:\n print('\\nPlot for {}'.format(name))\n plot_ref_alt(name)\n","sub_path":"reference_vs_alternative.py","file_name":"reference_vs_alternative.py","file_ext":"py","file_size_in_byte":5847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"45014526","text":"# get app, get failure message\n# look for next success message for same app\n# do time math\n# store and see if it's the longest\n# max_log_parse.py\n# 2018-08-21\n\nimport os\nimport re\nimport datetime\nfrom datetime import timedelta\nimport math\nmyFile = 'C:/CICD_run.log'\n\n#regex of log file, date, time, then words \nq = re.compile('((\\d{4})-(\\d{2})-(\\d{2}) (\\d{2}):(\\d{2}):(\\d{2}).(\\d{3}) - (\\w+.*) (\\w+.*))')\n\nmyList = []\n\n\n#open file\nwith open(myFile, 'r') as f:\n lines = f.read().splitlines()\n #print(lines)\n #print(' ' + lines[0])\n for line in lines:\n #print(line)\n prev = None\n # find all values in the file\n key_val = q.findall(line)\n for x in key_val:\n #print(key_val)\n #print(line)\n #i = 0\n #it = iter(key_val)\n #prev = next(it)\n #print(x)\n #print ()\n #a = x[:]\n # set variables\n YEAR = x[1]\n MONTH = x[2]\n DAY = x[3]\n HOUR = x[4]\n MINUTE = x[5]\n SEC = x[6]\n MS = x[7]\n APP = x[8]\n STATUS = x[9]\n #print(a[8] + ' ' + a[9])\n # look for failure\n if STATUS == 'failure':\n #print(APP + ' ' + STATUS)\t\t\n #print(i)\n #print('Found Test Failure: ',x[0])\n lookup = x[0]\n # find line # of failure\n line_no = lines.index(lookup)\n #print('Found on line: ', line_no+1)\n\n #print('Looking for next success of that ' + APP + ' Test: ')\n #print(lines)\n # look for successes of that APP\n suc_search = APP + ' ' + 'success'\n # enumerate through successes (line numbers)\n indices = [i for i, s in enumerate(lines) if suc_search in s]\n for i in indices:\n #print(i)\n # if the success is later in the file, aka higher line number, do more\n if i > line_no:\n #print('Success found at line: ', i+1)\n \n #print('That line is: ', lines[i])\n # split line into variables\n t = lines[i].split()\n #print(t[0:])\n DATE = t[0]\n TIME = t[1]\n #print(DATE + ' ' + TIME)\n date1 = YEAR + '-' + MONTH + '-' + DAY + ' ' + HOUR + ':' + MINUTE + ':' + SEC + '.' + MS\n date2 = DATE + ' ' + TIME\n # calculate time difference\n #print(date1)\n #print(date2)\n start = datetime.datetime.strptime(date1, '%Y-%m-%d %H:%M:%S.%f')\n ends = datetime.datetime.strptime(date2, '%Y-%m-%d %H:%M:%S.%f')\n diff = ends - start\n #print(diff)\n days = diff.days\n seconds = int(round(diff.total_seconds()))\n \n mins, seconds = divmod(seconds, 60)\n hours, mins = divmod(mins, 60)\n \n hours = int(diff.seconds // (60 * 60))\n mins = int((diff.seconds // 60) % 60)\n\n # add time difference to a list to compare it to the next iteration\n #print(\"{:d}:{:02d}:{:02d}\".format(hours, mins, seconds))\n myList.append(diff)\n if myList[0] < diff:\n myList[0] = diff\n a = APP\n d = days\n h = hours\n m = mins\n s = seconds\n #print(APP + ' was in failure state for: ', hours, 'hours, ', mins, 'mins, and ', seconds, ' seconds')\n #print('New Max is ', myList[0]) \n break\nprint('')\n# finally print the final top time\nprint(a + ': ', d, ' days, ', h, ' hours, ', m, ' minutes and ', s, ' seconds')\n#RealTimeTradingSystem: 2 days, 3 hours, 15 minutes and 41 seconds\nprint('')\n","sub_path":"max_log_parse.py","file_name":"max_log_parse.py","file_ext":"py","file_size_in_byte":4363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"299528294","text":"#------------------------------------------\n#--- Author: Qi Pan\n#--- Date: 6/6/2020\n#--- Version: 1.0\n#--- Python Ver: 3.6.8\n#------------------------------------------\n\nimport time\nimport json\nimport paho.mqtt.client as mqtt\nimport pandas as pd\n\n# MQTT Settings \nMQTT_Broker = \"placeholder\"\nMQTT_Port = 1883\nKeep_Alive_Interval = 45\nMQTT_Topic = \"placeholder\"\n\nUserName = \"placeholder\"\nPassword = \"placeholder\"\n\n# file path to store data\nfile_path = 'train.csv'\n\n\ndata = []\ncount = 0\n\ndef add_data(topic,payload):\n\tnew_data = json.loads(payload)\n\tnew_data[\"MQTT_Topic\"] = topic\n\t\n\tgateways = new_data['metadata']['gateways'].copy()\n\tdel new_data['metadata']['gateways']\n\n\tnew_data.update(new_data[\"metadata\"])\n\tdel new_data['metadata']\n\n\tfor gateway in gateways:\n\t\tnew_data.update(gateway)\n\t\tdata.append(new_data.copy())\n\n\n#Subscribe to all Sensors at Base Topic\ndef on_connect(mqttc,mosq, obj, rc):\n\tprint(\"Connect success\")\n\tmqttc.subscribe(MQTT_Topic, 0)\n\n#Save Data into DB Table\ndef on_message(mqttc, obj, msg):\n\t\n\t# This is the Master Call for saving MQTT Data into DB\n\t# For details of \"sensor_Data_Handler\" function please refer \"sensor_data_to_db.py\"\n\tglobal count\n\tprint(\"MQTT Data Received...\", str(count))\n\tcount += 1\n\tadd_data(msg.topic, msg.payload)\n\n\ndef on_subscribe(mqttc, obj, mid, granted_qos):\n print(\"Subscribe success\")\n\nclient_id = time.strftime('%Y%m%d%H%M%S', time.localtime(time.time()))\n\nmqttc = mqtt.Client(client_id)# ClientId不能重复,所以使用当前时间\n\nmqttc.username_pw_set(UserName,Password)# 必须设置,否则会返回「Connected with result code 4」\n\n\n# mqttc = mqtt.Client()\n\n# Assign event callbacks\nmqttc.on_message = on_message\nmqttc.on_connect = on_connect\nmqttc.on_subscribe = on_subscribe\n\nmqttc.connect(MQTT_Broker, int(MQTT_Port), int(Keep_Alive_Interval))\n\n\n\n# Continue the network loop\ntry:\n\tmqttc.loop_forever()\nexcept:\n\tprint(\"Connection stopped\")\n\tdata = pd.DataFrame(data)\n\tdata.to_csv(file_path)\n\tprint(\"data write to: \", file_path)\n","sub_path":"start_receive.py","file_name":"start_receive.py","file_ext":"py","file_size_in_byte":2006,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"262242906","text":"import csv #Import CSV to work with csv files\r\noutputfile = open('block.txt','r+') #Creating a new file which stores the output->All transaction id's included in the block\r\nweightTillNow = [0] # Initialization of A list which tracks the current weight of the Block\r\nblockIncludedTrans = []\r\n\r\nclass MempoolTransaction: #Class which verifies the weight and parent of each transaction\r\n \r\n def __init__(self, txid, fee, weight, parents):\r\n self.txid = txid\r\n self.fee = int(fee)\r\n self.weight = int(weight)\r\n self.parents = parents\r\n \r\n def verify_weight(self): #Method definition to check the weight constraint\r\n if weightTillNow[0]+self.weight<=4000000:\r\n weightTillNow[0]+=self.weight\r\n return True\r\n \r\n def verify_parent(self): #Method definition to check the parent constraint\r\n if self.parents:\r\n if self.parents in blockIncludedTrans :\r\n return True\r\n else:\r\n return True\r\n \r\n def isValidTrans(self): #Method definition for final evaluation of transaction to be included\r\n if self.verify_weight() and self.verify_parent():\r\n return self.txid\r\n else:\r\n return False\r\n \r\n \r\n \r\n \r\ndef parse_mempool_csv(): #Function definition to Read and Parse the mempool.csv file \r\n with open(\"mempool.csv\") as f:\r\n data = [line.strip().split(',') for line in f.readlines()] \r\n \r\n \r\n for j,i in enumerate(data): #sending each transaction details to validation\r\n \r\n if j!=0:\r\n objectTrans = MempoolTransaction(*data[j]) #Object Creation for each transaction \r\n taxid = objectTrans.isValidTrans() \r\n \r\n if taxid:\r\n outputfile.writelines(taxid+'\\n') #Writing the Validated transaction id into the output file\r\n blockIncludedTrans.append(taxid)\r\n \r\n\r\nparse_mempool_csv()\r\n \r\n\r\n#outputfile.seek(0)\r\n#outputfile.truncate()\r\n#outputfile.close()\r\n#print(len(blockIncludedTrans))\r\n","sub_path":"SourceCode.py","file_name":"SourceCode.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"3179653","text":"# 2010-08-13 by Gnacik\r\n# Based on Freya PTS\r\nfrom l2server.gameserver.model.quest import State\r\nfrom l2server.gameserver.model.quest.jython import QuestJython as JQuest\r\n\r\nqn = \"10282_ToTheSeedOfAnnihilation\"\r\n\r\n# NPC\r\nKBALDIR = 32733\r\nKLEMIS = 32734\r\n\r\n# ITEMS\r\nSOA_ORDERS = 15512\r\n\r\n\r\nclass Quest(JQuest):\r\n def __init__(self, id, name, descr):\r\n JQuest.__init__(self, id, name, descr)\r\n\r\n def onAdvEvent(self, event, npc, player):\r\n htmltext = event\r\n st = player.getQuestState(qn)\r\n if not st: return\r\n\r\n if event == \"32733-07.htm\":\r\n st.setState(State.STARTED)\r\n st.set(\"cond\", \"1\")\r\n st.giveItems(SOA_ORDERS, 1)\r\n st.playSound(\"ItemSound.quest_accept\")\r\n elif event == \"32734-02.htm\":\r\n st.unset(\"cond\")\r\n st.addExpAndSp(1148480, 99110)\r\n st.takeItems(SOA_ORDERS, -1)\r\n st.exitQuest(False)\r\n return htmltext\r\n\r\n def onTalk(self, npc, player):\r\n htmltext = Quest.getNoQuestMsg(player)\r\n st = player.getQuestState(qn)\r\n if not st: return htmltext\r\n\r\n npcId = npc.getNpcId()\r\n id = st.getState()\r\n cond = st.getInt(\"cond\")\r\n\r\n if id == State.COMPLETED:\r\n if npcId == KBALDIR:\r\n htmltext = \"32733-09.htm\"\r\n elif npcId == KLEMIS:\r\n htmltext = \"32734-03.htm\"\r\n elif id == State.CREATED:\r\n if player.getLevel() >= 84:\r\n htmltext = \"32733-01.htm\"\r\n else:\r\n htmltext = \"32733-00.htm\"\r\n else:\r\n if cond == 1:\r\n if npcId == KBALDIR:\r\n htmltext = \"32733-08.htm\"\r\n elif npcId == KLEMIS:\r\n htmltext = \"32734-01.htm\"\r\n return htmltext\r\n\r\n\r\nQUEST = Quest(10282, qn, \"To the Seed of Annihilation\")\r\n\r\nQUEST.addStartNpc(KBALDIR)\r\nQUEST.addTalkId(KBALDIR)\r\nQUEST.addTalkId(KLEMIS)\r\n","sub_path":"data/scripts/quests/10282_ToTheSeedOfAnnihilation/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"643658688","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.macosx-10.7-x86_64/egg/airflow/contrib/operators/gcs_list_operator.py\n# Compiled at: 2019-09-11 03:47:34\n# Size of source mod 2**32: 3703 bytes\nfrom typing import Iterable\nfrom airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook\nfrom airflow.models import BaseOperator\nfrom airflow.utils.decorators import apply_defaults\n\nclass GoogleCloudStorageListOperator(BaseOperator):\n \"\"\"GoogleCloudStorageListOperator\"\"\"\n template_fields = ('bucket', 'prefix', 'delimiter')\n ui_color = '#f0eee4'\n\n @apply_defaults\n def __init__(self, bucket, prefix=None, delimiter=None, google_cloud_storage_conn_id='google_cloud_default', delegate_to=None, *args, **kwargs):\n (super(GoogleCloudStorageListOperator, self).__init__)(*args, **kwargs)\n self.bucket = bucket\n self.prefix = prefix\n self.delimiter = delimiter\n self.google_cloud_storage_conn_id = google_cloud_storage_conn_id\n self.delegate_to = delegate_to\n\n def execute(self, context):\n hook = GoogleCloudStorageHook(google_cloud_storage_conn_id=(self.google_cloud_storage_conn_id),\n delegate_to=(self.delegate_to))\n self.log.info('Getting list of the files. Bucket: %s; Delimiter: %s; Prefix: %s', self.bucket, self.delimiter, self.prefix)\n return hook.list(bucket=(self.bucket), prefix=(self.prefix),\n delimiter=(self.delimiter))","sub_path":"pycfiles/apache_ariatosca-0.2.0-py2-none-any/gcs_list_operator.cpython-36.py","file_name":"gcs_list_operator.cpython-36.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"527834159","text":"# CS121: Treemaps\n# Sample use of ColorKey\n#\n# This code is not explicitly part of the assignment, but rather a demo to\n# provide familiarity with the use of ColorKey\n\nfrom drawing import ChiCanvas, ColorKey\n\n\ndef go():\n # create a canvas\n c = ChiCanvas(10, 10)\n\n # create a color key\n ck = ColorKey(set([\"SR\", \"W\", \"A\"]))\n\n # draw the color key\n ck.draw_color_key(c, .8, 0, 1.0, .30,\n code_to_label={\"SR\": \"Streets/Roads\", \"W\": \"Water\",\n \"A\": \"Airports\"})\n\n # show it\n c.show()\n\nif __name__ == \"__main__\":\n go()\n","sub_path":"pas/pa7/sample_ck.py","file_name":"sample_ck.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"560828428","text":"from os import listdir\nfrom os.path import isfile, join\n\ndef fun(s):\n\tsplitted = s.split(\"_\")\n\tres = \"\"\n\ttwo = False\n\tfor x in splitted:\n\t\tif x == \"2\":\n\t\t\ttwo = True\n\t\tif x == \"rlb\" and two:\n\t\t\tres += \"rlb2 \"\n\t\tif x == \"rlb\" and not two:\n\t\t\tres += \"rlb1 \"\n\t\tif x == \"tts\":\n\t\t\tres += \"tts \"\n\t\tif x == \"ptk\":\n\t\t\tres += \"ptk \"\n\t\tif x == \"5\" or x == \"10\" or x == \"15\" or x == \"20\":\n\t\t\tres += x + \" \"\n\tif res == \"\":\n\t\tres = \"all other\"\n\treturn res\n\nmypath = \"res\"\n\nonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\npmcfg5files = []\npmcfg10files = []\npmcfg15files = []\npmcfg20files = []\n\nfor file in onlyfiles:\n\tif file.startswith(\"pmcfg-5\"):\n\t\tpmcfg5files.append(file)\n\telif file.startswith(\"pmcfg-10\"):\n\t\tpmcfg10files.append(file)\n\telif file.startswith(\"pmcfg-15\"):\n\t\tpmcfg15files.append(file)\n\telif file.startswith(\"pmcfg-20\"):\n\t\tpmcfg20files.append(file)\n\npmcfg5wordtimes = [[],[],[],[],[]]\n\nbench_mcfg = \"bench_mcfg\"\n\nl = [\"bench_mcfg_tts\",\n\t\t\"bench_mcfg_rlb\",\n\t\t\"2_bench_mcfg_rlb\",\n\t\t\"bench_mcfg_tts_rlb\",\n\t\t\"2_bench_mcfg_tts_rlb\",\n\t\t\"bench_mcfg_rlb_tts\",\n\t\t\"2_bench_mcfg_rlb_tts\",\n\t\t\"bench_mcfg_tts_ptk_20\",\n\t\t\"bench_mcfg_tts_ptk_15\",\n\t\t\"bench_mcfg_rlb_tts_ptk_20\",\n\t\t\"2_bench_mcfg_rlb_tts_ptk_20\",\n\t\t\"bench_mcfg_rlb_tts_ptk_15\",\n\t\t\"2_bench_mcfg_rlb_tts_ptk_15\",\n\t\t\"bench_mcfg_tts_rlb_ptk_20\",\n\t\t\"2_bench_mcfg_tts_rlb_ptk_20\",\n\t\t\"bench_mcfg_tts_rlb_ptk_15\",\n\t\t\"2_bench_mcfg_tts_rlb_ptk_15\",\n\t\t\"bench_mcfg_tts_ptk_20_rlb\",\n\t\t\"2_bench_mcfg_tts_ptk_20_rlb\",\n\t\t\"bench_mcfg_tts_ptk_15_rlb\",\n\t\t\"2_bench_mcfg_tts_ptk_15_rlb\"]\n\nabstand = 0.4\n\ntimes = {}\n\ntimes[bench_mcfg] = {}\nfor x in [5, 10, 15, 20]:\n\ttimes[bench_mcfg][x] = []\n\tf = open(\"res/pmcfg-\" + str(x) + \"_\" + bench_mcfg + \".txt\")\n\tlines = f.readlines()\n\tfor y in xrange(0,5):\n\t\tline = lines[1 + y*3]\n\t\tlinevec = line.split(\" \")\n\t\ttimes[bench_mcfg][x].append(float(linevec[1]))\n\nfile_aufrufe = open(\"diagrams/aufrufe.tex\", \"w+\")\n\n\nfor ctf in l:\n\tif ctf not in times:\n\t\ttimes[ctf] = {}\n\tfor x in [5, 10, 15, 20]:\n\t\ttimes[ctf][x] = []\n\t\tf = open(\"res/pmcfg-\" + str(x) + \"_\" + ctf + \".txt\")\n\t\tlines = f.readlines()\n\t\tfor y in xrange(0,5):\n\t\t\tline = lines[1 + y*3]\n\t\t\tlinevec = line.split(\" \")\n\t\t\ttimes[ctf][x].append(float(linevec[1]))\n\n\tf = open(\"diagrams/dia\" + \"\".join(fun(ctf).split(\" \")) + \".tex\", \"w+\")\n\tf.write(\"\\\\begin{figure}\\n\")\n\tf.write(\"\\\\centering\\n\")\n\tf.write(\"\\\\begin{tikzpicture}\\n\")\n\n\tf.write(\"\\\\draw [->] (0,0) to (\" + str(abstand * 25) + \",0);\\n\")\n\tf.write(\"\\\\draw [->] (0,0) to (0,6.5);\\n\")\n\n\tf.write(\"\\\\draw (0,2) to (\" + str(abstand * 25) + \",2);\\n\")\n\n\tf.write(\"\\\\draw (-0.1,2) to (0.1,2);\\n\")\n\tf.write(\"\\\\node [anchor=east] at (-0.2,2) {1};\\n\")\n\tf.write(\"\\\\draw (-0.1,4) to (0.1,4);\\n\")\n\tf.write(\"\\\\node [anchor=east] at (-0.2,4) {2};\\n\")\n\tf.write(\"\\\\draw (-0.1,6) to (0.1,6);\\n\")\n\tf.write(\"\\\\node [anchor=east] at (-0.2,6) {$\\\\geq$ 3};\\n\")\n\n\tf.write(\"\\\\node [rotate=90,anchor=east] at (-0.85,5.5) {multiple of original runtime};\\n\")\n\tf.write(\"\\\\node at (\" + str(abstand*13) + \", -0.8) {size of corpus};\\n\")\n\n\tcurrentpos = 0.1\n\n\tfor x in [5, 10, 15, 20]:\n\t\tfor y in xrange(0,5):\n\t\t\tif y == 2:\n\t\t\t\tf.write(\"\\\\node [anchor = west] at (\" + str(currentpos) + \", -0.3) {\" + str(x) + \"};\\n\")\n\n\t\t\tt = times[ctf][x][y]\n\t\t\tnormaltime = times[bench_mcfg][x][y]\n\t\t\tp = t / normaltime\n\t\t\theight = 6\n\t\t\tif p < 3:\n\t\t\t\theight = p * 2\n\n\t\t\tgray = 60\n\t\t\tif p < 1:\n\t\t\t\tgray = 30\n\n\t\t\tf.write(\"\\\\node [minimum width=\" + str(height) + \"cm,\"\n\t\t\t\t+ \"anchor=north west,\"\n\t\t\t\t+ \"rotate=90,\"\n\t\t\t\t+ \"fill=gray!\" + str(gray) + \"] at (\" + str(currentpos) + \", 0) {};\\n\")\n\n\t\t\tif p > 3:\n\t\t\t\tp = round(p, 1)\n\t\t\t\tf.write(\"\\\\node [anchor=north east,rotate=90,inner sep = 0] at (\" + str(currentpos) + \", 6) {\\\\footnotesize\" + str(p) + \"};\\n\")\n\n\t\t\tcurrentpos += abstand\n\t\tcurrentpos += abstand\n\n\n\tf.write(\"\\\\end{tikzpicture}\\n\")\n\tf.write(\"\\\\caption{approximation sequence: \" + fun(ctf) + \"}\\n\")\n\tf.write(\"\\\\label{fig:\" + \"\".join(fun(ctf).split(\" \")) + \"}\\n\")\n\tf.write(\"\\\\end{figure}\\n\")\n\n\tfile_aufrufe.write(\"\\\\input{diagrams/dia\" + \"\".join(fun(ctf).split(\" \")) + \"}\\n\")\n\n\t","sub_path":"create_diagrams.py","file_name":"create_diagrams.py","file_ext":"py","file_size_in_byte":4029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"471488212","text":"import fileinput, sys\n\ndef solve(k, c, s):\n ans = \"\"\n for i in range(0, k):\n ans += \"%s \" % (i + 1)\n return ans\n\nindex = 0\nfor line in fileinput.input():\n index += 1\n if index == 1:\n continue\n tokens = line.split(\" \")\n k = int(tokens[0])\n c = int(tokens[1])\n s = int(tokens[2])\n print(\"Case #%d: %s\" % (index - 1, solve(k, c, s)))\n","sub_path":"codes/CodeJamCrawler/16_0_4_neat/16_0_4_eratos_D-small.py","file_name":"16_0_4_eratos_D-small.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"409275786","text":"class Solution:\n def spiralOrder(self, matrix: List[List[int]]) -> List[int]:\n output = []\n\n rows_start = columns_start = 0\n rows_end, columns_end = len(matrix), len(matrix[0])\n size = rows_end * columns_end\n\n while True:\n for j in range(columns_start, columns_end):\n output.append(matrix[rows_start][j])\n rows_start += 1\n if len(output) == size:\n break\n\n for i in range(rows_start, rows_end):\n output.append(matrix[i][columns_end - 1])\n columns_end -= 1\n if len(output) == size:\n break\n\n for j in range(columns_end-1, columns_start-1, -1):\n output.append(matrix[rows_end-1][j])\n rows_end -= 1\n if len(output) == size:\n break\n\n for i in range(rows_end-1, rows_start-1, -1):\n output.append(matrix[i][columns_start])\n columns_start += 1\n if len(output) == size:\n break\n\n return output\n","sub_path":"challenges/spiral_matrix_i/spiral_matrix_i.py","file_name":"spiral_matrix_i.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"362279164","text":"# -*- coding:utf-8 -*-\n\nfrom ctypes import *\n\nDLL_FILE = r'c:\\dna\\ezdnaapi.dll'\n\n\ndef trydecode(s):\n try:\n return s.decode('gbk')\n except:\n try:\n return s[:-1].decode('gbk')\n except:\n return s\n\n\nclass EDNA(object):\n api = windll.LoadLibrary(DLL_FILE)\n\n def getServiceList(self, service):\n nCount = c_ushort(service.nCount)\n szType = c_char_p(service.szType.encode('gbk'))\n szStartSvcName = c_char_p(service.szStartSvcName.encode('gbk'))\n\n szSvcName = (c_char_p * service.nCount)()\n nSvcName = c_short(service.nSvcName)\n\n szSvcDesc = (c_char_p * service.nCount)()\n nSvcDesc = c_ushort(service.nSvcDesc)\n\n szSvcType = (c_char_p * service.nCount)()\n nSvcType = c_ushort(service.nSvcType)\n\n szStatus = (c_char_p * service.nCount)()\n nStatus = c_ushort(service.nStatus)\n\n for i in range(service.nCount):\n szSvcName[i] = c_char_p(b'\\0' * service.nSvcName)\n szSvcDesc[i] = c_char_p(b'\\0' * service.nSvcDesc)\n szSvcType[i] = c_char_p(b'\\0' * service.nSvcType)\n szStatus[i] = c_char_p(b'\\0' * service.nStatus)\n\n res = self.api.DnaGetServiceList(nCount, szType, szStartSvcName, szSvcName, nSvcName,\n szSvcDesc, nSvcDesc, szSvcType, nSvcType, szStatus, nStatus)\n\n service.szSvcName = [trydecode(s.strip()) for s in szSvcName]\n service.szSvcDesc = [trydecode(s.strip()) for s in szSvcDesc]\n service.szSvcType = [trydecode(s.strip()) for s in szSvcType]\n service.szStatus = [trydecode(s.strip()) for s in szStatus]\n\n return res\n\n def getPointsFromService(self, servicePoint):\n nCount = c_ushort(servicePoint.nCount)\n szServiceName = c_char_p(servicePoint.szServiceName.encode('gbk'))\n nStarting = c_ushort(servicePoint.nStarting)\n\n dValue = (c_double * servicePoint.nCount)()\n\n szPointId = (c_char_p * servicePoint.nCount)()\n nPointIdLen = c_ushort(servicePoint.nPointIdLen)\n szTime = (c_char_p * servicePoint.nCount)()\n nTimeLen = c_ushort(servicePoint.nTimeLen)\n szStatus = (c_char_p * servicePoint.nCount)()\n nStatusLen = c_ushort(servicePoint.nStatusLen)\n szDesc = (c_char_p * servicePoint.nCount)()\n nDescLen = c_ushort(servicePoint.nDescLen)\n szUnits = (c_char_p * servicePoint.nCount)()\n nUnitsLen = c_ushort(servicePoint.nUnitsLen)\n\n for i in range(servicePoint.nCount):\n szPointId[i] = c_char_p(b'\\0' * servicePoint.nPointIdLen)\n szTime[i] = c_char_p(b'\\0' * servicePoint.nTimeLen)\n szStatus[i] = c_char_p(b'\\0' * servicePoint.nStatusLen)\n szDesc[i] = c_char_p(b'\\0' * servicePoint.nDescLen)\n szUnits[i] = c_char_p(b'\\0' * servicePoint.nUnitsLen)\n\n res = self.api.DnaGetPointList(nCount, szServiceName, nStarting, szPointId, nPointIdLen, dValue,\n szTime, nTimeLen, szStatus, nStatusLen, szDesc, nDescLen, szUnits, nUnitsLen)\n\n servicePoint.szPointId = [trydecode(s.strip()) for s in szPointId]\n servicePoint.szTime = [trydecode(s.strip()) for s in szTime]\n servicePoint.szStatus = [trydecode(s.strip()) for s in szStatus]\n servicePoint.szDesc = [trydecode(s.strip()) for s in szDesc]\n servicePoint.szUnits = [trydecode(s.strip()) for s in szUnits]\n servicePoint.dValue = dValue\n\n return res\n\n def getRTAll(self, point):\n '''\n Get the specified point Real-Time all information.\n '''\n szPoint = c_char_p(point.szPoint.encode('gbk'))\n pdValue = pointer(c_double(point.pdValue))\n szTime = c_char_p(point.szTime.encode('gbk'))\n nTime = c_ushort(point.nTime)\n szStatus = c_char_p(point.szStatus.encode('gbk'))\n nStatus = c_ushort(point.nStatus)\n szDesc = c_char_p(point.szDesc.encode('gbk'))\n nDesc = c_ushort(point.nDesc)\n szUnits = c_char_p(point.szUnits.encode('gbk'))\n nUnits = c_ushort(point.nUnits)\n\n res = self.api.DNAGetRTAll(szPoint, pdValue, szTime, nTime, szStatus, nStatus, szDesc, nDesc, szUnits, nUnits)\n\n point.pdValue = pdValue.contents.value\n point.szTime = trydecode(szTime.value)\n point.szStatus = trydecode(szStatus.value)\n point.szDesc = trydecode(szDesc.value)\n point.szUnits = trydecode(szUnits.value)\n\n return res\n\n\nclass Service(object):\n def __init__(self, nCount, szType, szStartSvcName, nSvcName, nSvcDesc, nSvcType, nStatus):\n self.nCount = nCount\n self.szType = szType\n self.szStartSvcName = szStartSvcName\n self.szSvcName = None\n self.nSvcName = nSvcName\n self.szSvcDesc = None\n self.nSvcDesc = nSvcDesc\n self.szSvcType = None\n self.nSvcType = nSvcType\n self.szStatus = None\n self.nStatus = nStatus\n\n\nclass ServicePoint(object):\n def __init__(self, nCount, szServiceName, nStarting, nPointIdLen, nTimeLen, nStatusLen, nDescLen, nUnitsLen):\n self.nCount = nCount\n self.szServiceName = szServiceName\n self.nStarting = nStarting\n self.nPointIdLen = nPointIdLen\n self.nTimeLen = nTimeLen\n self.nStatusLen = nStatusLen\n self.nDescLen = nDescLen\n self.nUnitsLen = nUnitsLen\n self.dValue = None\n self.szPointId = None\n self.szTime = None\n self.szStatus = None\n self.szDesc = None\n self.szUnits = None\n\n\nclass Point(object):\n def __init__(self, szPoint, pdValue, nTime, nStatus, nDesc, nUnits):\n '''\n param szPoint the Real-Time point name. (const char *szPoint)\n param pdValue return the point value. (double *pdValue)\n param szTime return the point time. (char *szTime)\n param nTime the szTime reserved length. (unsigned short nTime)\n param szStatus return the point status. (char *szStatus)\n param nStatus the szStatus reserved length. (unsigned short nStatus)\n param szDesc return the point description. (char *szDesc)\n param nDesc the szDesc reserved length. (unsigned short nDesc)\n param szUnits return the point. (char *szUnits)\n param nUnits the szUnits reserved length. (unsigned short nUnits)\n '''\n self.szPoint = szPoint\n self.pdValue = pdValue\n self.nTime = nTime\n self.nStatus = nStatus\n self.nDesc = nDesc\n self.nUnits = nUnits\n self.szTime = \"\\0\" * nTime\n self.szStatus = \"\\0\" * nStatus\n self.szDesc = \"\\0\" * nDesc\n self.szUnits = \"\\0\" * nUnits\n\n def __str__(self):\n msg = \"szPoint %s, pdValue %f, szTime %s, nTime %d \" \\\n \", szStatus %s, nStatus %d, szDesc %s, nDesc %d \" \\\n \", szUnits %s, nUnits %d \" \\\n % (self.szPoint, self.pdValue, self.szTime, self.nTime,\n self.szStatus, self.nStatus, self.szDesc, self.nDesc,\n self.szUnits, self.nUnits)\n return msg","sub_path":"edna/_edna.py","file_name":"_edna.py","file_ext":"py","file_size_in_byte":7075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"120859373","text":"import smbus, time\n\nclass Bus:\n def __init__(self, bus_id=1):\n self.bus = smbus.SMBus(bus_id)\n self.lock = False\n\n def write_byte(self, addr, byte):\n while self.lock:\n time.sleep(0.1)\n self.lock = True\n try:\n self.bus.write_byte(addr, byte)\n self.lock = False\n except Exception as e:\n self.lock = False\n raise e\n\n def read_byte(self, addr):\n while self.lock:\n time.sleep(0.1)\n self.lock = True\n try:\n result = self.bus.read_byte(addr)\n except:\n result = None\n self.lock = False\n return result","sub_path":"libs/i2cbus.py","file_name":"i2cbus.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"446830631","text":"__author__ = 'astronaut'\n\nimport os.path\n\n\nICONS_PATH = '../resources/icons'\n\nMAIN_ICON_PATH = os.path.join(ICONS_PATH, 'barcode-icon.png')\n\nSETTINGS_FILE_PATH = '../settings.cfg'\n\n\nAPPLICATION_NAME = 'Barney'\nVERSION = '1.0'\nAUTHORS = ['Maxim Yaskevich ']\n\nUSERS_MANUAL_PAGE = '../help/index.html'\n\nEMODE = {\n 'PLAIN': 1,\n 'SMS': 2,\n 'EMAIL': 3,\n 'EVENT': 4,\n 'DCALL': 5,\n 'URL': 6,\n}\n\ndef emode_to_readable(emode):\n return EMODE.keys()[EMODE.values().index(emode)]\n","sub_path":"barney/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"33522872","text":"import cv2\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimg = cv2.imread('hu.jpg',1)\n\nroi=img[149:194,460:608]\nimg[77:122,640:788]=roi\n\nimg1 = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\nplt.imshow(img1)\nplt.xticks([]), plt.yticks([])\nplt.show()\n\n# cv2.namedWindow('hu', cv2.WINDOW_NORMAL)\n# cv2.imshow('hu', img)\n# cv2.imwrite('hu2.jpg',img)\n# cv2.waitKey(0)\n# cv2.destroyAllWindows()","sub_path":"test/replaceonepiece.py","file_name":"replaceonepiece.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"637731628","text":"#!/usr/bin/env python3\nclass human:\n __age = 0\n __sex = ''\n __height = 0\n __weight = 0\n name = ''\n\n def __init__(self,age,sex,height,weight):\n self.__age = age\n self.__sex = sex\n self.__height = height\n self.__weight = weight\n\n def setname(self,name):\n self.name = name\n\n\n def show(self):\n print('name:' + self.name + '\\n' + 'age:' + str(self.__age) + '\\n' + 'sex:' + self.__sex)\n print('height:' + self.__height + '\\nweight:' + self.__weight)\n\nclass student(human):\n __classes = 0\n __grade = 0\n __num = 0\n\n def __init__(self,classes,grade,num,age,sex,height,weight):\n self.__classes = classes\n self.__grade = grade\n self.__num = num\n human.__init__(self,age,sex,height,weight)\n\n def show(self):\n human.show(self)\n print('class:' + str(self.__classes) + '\\ngrage:' + str(self.__grade) + '\\nnum:' + str(self.__num))\n\n\n\na = student(12,3,2018,19,'male','175','65')\na.setname('Tom')\na.show()\n","sub_path":"scripts/python/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":1018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"311942540","text":"import dateutil.relativedelta as relativedelta\nimport os\nfrom datetime import datetime\nfrom jinja2 import Environment, FileSystemLoader\n\nOVERALL_SKILL_LABEL='Overall'\n\n\ndef render(highscores, min, top, mode, base, dir):\n top = __get_top_5(highscores, top)\n clans = __get_clan_info(highscores, min)\n basedir = os.path.dirname(os.path.abspath(__file__))\n env = Environment(loader=FileSystemLoader(basedir), trim_blocks=True)\n env.filters['duration'] = __duration_filter\n env.filters['xp'] = __xp_filter\n __create_dir(base, dir)\n env.get_template('state.jinja').stream(top=top, date=mode, clans=clans).dump(dir + '/index.html')\n\n\ndef __create_dir(base, dir):\n loc = os.path.abspath(base + '/' + dir)\n if not os.path.exists(loc):\n os.makedirs(loc)\n\n\ndef __duration_filter(mode):\n if mode == 'normal':\n date = datetime.now() - relativedelta.relativedelta(months=1)\n return date.strftime('%B %Y')\n elif mode == 'preview':\n date = datetime.now()\n return date.strftime('%B %Y')\n elif mode == 'year':\n date = datetime.now() - relativedelta.relativedelta(years=1)\n return date.strftime('%Y')\n else:\n return 'Double XP Weekend'\n\n\n\ndef __xp_filter(xp):\n return \"{:,}\".format(xp)\n\n\ndef __get_top_5(highscores, top):\n grouped = __get_grouped_skill(highscores)\n results = []\n for group in grouped:\n result = {'skill': group['skill'], 'highscores': []}\n sortedscore = sorted(group['highscores'], key=lambda highscore: highscore['xp'], reverse=True)\n result['highscores'] = sortedscore[:top]\n results.append(result)\n return results\n\n\ndef __get_clan_info(highscores, min):\n results = []\n for clanscore in highscores:\n result = {'clan': clanscore['clan'], 'averages': clanscore['averages'], 'active': 0, 'xp': 0}\n results.append(result)\n for skillscore in clanscore['highscores']:\n for highscore in skillscore['highscores']:\n if skillscore['skill'] == OVERALL_SKILL_LABEL and highscore['xp'] >= min:\n result['xp'] += highscore['xp']\n result['active'] += 1\n return sorted(results, key = lambda result: result['active'], reverse=True)\n\n\ndef __get_grouped_skill(highscores):\n results = []\n for clanscore in highscores:\n for skillscore in clanscore['highscores']:\n for highscore in skillscore['highscores']:\n found = False\n for result in results:\n if skillscore['skill'] == result['skill']:\n result['highscores'].append(highscore)\n found = True\n break\n if not found:\n newresult = {'skill':skillscore['skill'], 'highscores': []}\n newresult['highscores'].append(highscore)\n results.append(newresult)\n return results","sub_path":"template/state.py","file_name":"state.py","file_ext":"py","file_size_in_byte":2942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"498897473","text":"# (C) Copyright 2014, 2015 Hewlett Packard Enterprise Development Company LP\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom monascaclient.apiclient import base\n\nfrom six.moves.urllib import parse\nfrom six.moves.urllib_parse import unquote\n\n\nclass MonascaManager(base.BaseManager):\n\n def __init__(self, client, **kwargs):\n super(MonascaManager, self).__init__(client)\n\n def _parse_body(self, body):\n if type(body) is dict:\n self.next = None\n for link in body['links']:\n if link['rel'] == 'next':\n self.next = link['href']\n return body['elements']\n else:\n return body\n\n def _list(self, path, dim_key=None, **kwargs):\n \"\"\"Get a list of metrics.\"\"\"\n url_str = self.base_url + path\n if dim_key and dim_key in kwargs:\n dimstr = self.get_dimensions_url_string(kwargs[dim_key])\n kwargs[dim_key] = dimstr\n\n if kwargs:\n url_str += '?%s' % parse.urlencode(kwargs, True)\n resp, body = self.client.json_request(\n 'GET', url_str)\n return self._parse_body(body)\n\n def get_dimensions_url_string(self, dimdict):\n dim_list = list()\n for k, v in dimdict.items():\n # In case user specifies a dimension multiple times\n if isinstance(v, (list, tuple)):\n v = v[-1]\n if v:\n dim_str = k + ':' + v\n else:\n dim_str = k\n dim_list.append(dim_str)\n return ','.join(dim_list)\n\n def list_next(self):\n if hasattr(self, 'next') and self.next:\n self.next = unquote(self.next)\n path = self.next.split(self.base_url, 1)[-1]\n return self._list(path)\n return None\n","sub_path":"python-monascaclient-1.5.0/monascaclient/common/monasca_manager.py","file_name":"monasca_manager.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"211247606","text":"# \"High-level\" networking interface\n\nimport os\ntry:\n import fcntl\nexcept ImportError:\n _fcntl = False\nelse:\n _fcntl = True\nimport errno\n\nimport trio\nfrom trio._util import ConflictDetector\nfrom trio.abc import SendStream, ReceiveStream, AsyncResource\n\n__all__ = [\"ReadFDStream\", \"WriteFDStream\"]\n\n_closed_stream_errnos = {\n # Unix\n errno.EBADF,\n # Windows\n errno.ENOTSOCK,\n}\n\n\nclass _FDStream(AsyncResource):\n def __init__(self, fd):\n if hasattr(fd, 'fileno'):\n # Unwrap Python's IO buffers.\n while hasattr(fd, 'detach'):\n fd = fd.detach()\n # _io.FileIO.closefd is not writeable and we don't want to hold\n # a reference to the original file descriptor, so take the easy\n # way out\n fd = os.dup(fd.fileno())\n if not isinstance(fd, int) or fd < 0:\n raise TypeError(\"ReadFDStream requires a file descriptor\")\n\n self._fd = fd\n if _fcntl:\n self._fdflags = fcntl.fcntl(fd, fcntl.F_GETFL, 0)\n fcntl.fcntl(fd, fcntl.F_SETFL, self._fdflags | os.O_NONBLOCK)\n\n self._send_conflict_detector = ConflictDetector(\n \"another task is currently sending data to this ReadFDStream\"\n )\n\n def fileno(self):\n return self._fd\n\n async def aclose(self):\n self.close()\n await trio.hazmat.checkpoint()\n\n def close(self):\n try:\n # fcntl.fcntl(self._fd, fcntl.F_SETFL, self._fdflags)\n os.close(self._fd)\n except OSError as err:\n if err.errno != errno.EBADF:\n raise\n\n\nclass ReadFDStream(_FDStream, ReceiveStream):\n \"\"\"An implementation of the :class:`trio.abc.ReceiveStream`\n interface based on a raw file descriptor.\n\n Args:\n fd: The file descriptor to wrap.\n\n The file descriptor will *not* be closed by this object's :meth:`close`\n or :meth:`aclose` methods.\n \"\"\"\n\n async def receive_some(self, max_bytes):\n if max_bytes < 1:\n await trio.hazmat.checkpoint()\n raise ValueError(\"max_bytes must be >= 1\")\n await trio.hazmat.wait_readable(self._fd)\n return os.read(self._fd, max_bytes)\n\n\nclass WriteFDStream(_FDStream, SendStream):\n \"\"\"An implementation of the :class:`trio.abc.SendStream`\n interface based on a raw file descriptor.\n\n Args:\n fd: The file descriptor to wrap.\n\n The file descriptor will *not* be closed by this object's :meth:`close`\n or :meth:`aclose` methods.\n \"\"\"\n\n async def send_all(self, data):\n with self._send_conflict_detector.sync:\n with memoryview(data) as data:\n if not data:\n await trio.hazmat.checkpoint()\n return\n total_sent = 0\n while total_sent < len(data):\n with data[total_sent:] as remaining:\n await trio.hazmat.wait_writable(self._fd)\n sent = os.write(self._fd, remaining)\n total_sent += sent\n\n async def wait_send_all_might_not_block(self):\n async with self._send_conflict_detector:\n await self.socket.wait_writable()\n","sub_path":"trio_asyncio/fd_stream.py","file_name":"fd_stream.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"367569361","text":"import os, time, stomp, pickle, requests, json, math \nfrom os import path\nfrom threading import Thread\n\npath_ml_model = os.environ.get(\"MLMODELPATH\",\".\")\n#////////////////////////////////////////////////////////////////////////////\nactivemq_username = os.getenv(\"ACTIVEMQ_USER\",\"aaa\") \nactivemq_password = os.getenv(\"ACTIVEMQ_PASSWORD\",\"111\") \nactivemq_hostname = os.getenv(\"ACTIVEMQ_HOST\",\"localhost\")\nactivemq_port = int(os.getenv(\"ACTIVEMQ_PORT\",\"61613\")) \npersistence_storage_queue = \"/queue/persistent_storage\"\nsubscription_topic = 'performance_model_evaluator'\nps_management_queue = os.environ.get(\"PS_MANAGEMENT_QUEUE\",\"persistent_storage\")\n#/////////////////////////////////////////////////////////////////////////////\ntolerated_error = float(os.environ.get(\"TOLERATED_COMPARISON_ERROR\",\"5\"))\nprediction_precision = int(os.environ.get(\"PREDICTION_PRECISION\",\"90\")) #90%\n#/////////////////////////////////////////////////////////////////////////////\nperformance_model_train_url = os.environ.get(\"PERFORMANCE_MODEL_URL\",\"http://localhost:8766/api/v1/train\")\n\n\nclass EvaluationCandidate():\n def __init__(self, application, target, features, prediction,variant):\n self.application = application\n self.target = target\n self.features = features\n self.prediction = prediction\n self.variant = variant \n self.real_value = None \n self.time = time.time()\n\n def getApplication(self):\n return self.application\n def getTarget(self):\n return self.target\n def getFeatures(self):\n return self.features\n def getVariant(self):\n return self.variant \n def getPrediction(self):\n return self.prediction\n def computeError(self):\n if self.real_value != None:\n return (abs(self.real_value - self.prediction)/self.real_value)*100\n def setRealValue(self,_value):\n self.real_value = _value\n def match(self,features):\n for key, _value in features.items():\n if int(_value) != int(features[key]):\n return False \n return True \n\nclass Listener(object):\n def __init__(self, conn,handler):\n self.conn = conn\n self.handler = handler \n\n def on_error(self, headers, message):\n print('received an error %s' % message)\n\n def on_message(self, headers, message):\n self.handler(message)\n\nclass Evaluation(Thread):\n def __init__(self):\n self.candidates = []\n self.stop = False\n self.subscriptions = []\n self.max_candidates_size = 200\n self.real_measurement = []\n self.mean_squared_error_map = {}\n self.evaluation_period = 60*10\n self.last_evaluation = time.time()\n self.tolerated_error = tolerated_error\n self.readCandidatesFile()\n super(Evaluation,self).__init__()\n\n def createSubscription(self, application):\n conn = stomp.Connection(host_and_ports = [(activemq_hostname, activemq_port)])\n conn.connect(login=activemq_username,passcode=activemq_password)\n data = {'request':'subscribe','application':application,'metrics':[],'queue': subscription_topic,'name': 'performance_model'}\n conn.send(body=json.dumps(data), destination=persistence_storage_queue, persistent='false')\n print(\"Subscription request sent for application {0}\".format(application))\n return True \n\n def stopEvaluator(self):\n self.stop = True \n self.saveCandidates()\n\n def handler(self, data):\n try:\n _json = json.loads(data)\n if type(_json) == type([]):\n for candidate in _json:\n self.addCandidate(candidate['application'],candidate['target'],candidate['features'],candidate['prediction'], candidate['variant'])\n print(\"{0} predictions have been added\".format(len(_json)))\n else:\n if \"metrics\" in _json:\n self.real_measurement.append(_json)\n\n if time.time() - self.last_evaluation > self.evaluation_period:\n self.evaluatePrecision()\n self.last_evaluation = time.time()\n except Exception as e:\n print(\"An error occured while handling data from queue\")\n print(e) \n\n def getFeaturesFromRealMeasurment(self,_json):\n features = _json['metrics']\n features.update(_json['labels'])\n return features\n\n def isClosed(self, _value1, _value2):\n return abs(float(_value1) - float(_value2)) <= self.tolerated_error\n\n def equalFeatues(self, real_features, prediction_features):\n for key, value in prediction_features.items():\n if not key in real_features:\n return False \n if not self.isClosed(real_features[key],value):\n return False \n return True \n def computeDistance(self,real_feature, predict):\n predict_feature = predict.getFeatures()\n real_prediction = real_feature[predict.getTarget()]\n prediction = predict.getPrediction()\n f_sum = 0\n for field, _value in real_feature.items():\n if not field in predict_feature:\n continue\n if type(predict_feature[field]) == type(\"\"):\n continue\n f_sum += (float(_value) - float(predict_feature[field]))**2\n d_f = math.sqrt(f_sum)\n d_precision = (abs(real_prediction - float(prediction))/real_prediction)*100\n return (d_f,d_precision)\n def selectByApplicationName(self,data,application, _type):\n result = []\n if _type == \"real\":\n for real in self.real_measurement:\n if real['labels']['application'] == application:\n result.append(real)\n else:\n for pred in self.candidates:\n if pred.getApplication() == application:\n result.append(pred)\n return result\n\n def evaluatePrecision(self):\n if len(self.real_measurement) == 0:\n if len(self.candidates) > 0:\n del self.subscriptions[:]\n for candidate in self.candidates:\n if not candidate.getApplication() in self.subscriptions:\n self.createSubscription(candidate.getApplication())\n self.subscriptions.append(candidate.getApplication())\n self.saveCandidates()\n print(\"No real data found\")\n return False \n for application in self.subscriptions:\n distance_map = {}\n self.mean_squared_error_map[application] = []\n list_real = self.selectByApplicationName(self.real_measurement,application,\"real\")\n list_pred = self.selectByApplicationName(self.candidates,application,\"predict\")\n for real in list_real:\n real_features = self.getFeaturesFromRealMeasurment(real)\n for predict in list_pred:\n d_f, error = self.computeDistance(real_features,predict)\n distance_map[d_f] = 100 - int(error) \n\n distance_map = dict(sorted(distance_map.items()))\n #select the 10\n print(\"Best candidate\")\n k = list(distance_map.keys())[0]\n print(\"Distance : {0}\".format(k))\n print(\"Precision in percentage : {0}%\".format(distance_map[k]))\n if k < prediction_precision:\n #retrain request \n features = list(list_pred[0].getFeatures().keys())\n target = list_pred[0].getTarget()\n variant = list_pred[0].getVariant()\n application = list_pred[0].getApplication()\n _post = {'url_file': \"\", 'application': application,'target':target,'features': features, 'variant': variant}\n try:\n response = requests.post(performance_model_train_url, data=json.dumps(_post),headers={'Content-Type':'application/json'})\n except Exception as e:\n print(\"An error occured while sending retrain request\")\n else:\n del self.real_measurement[:]\n del self.candidates[:]\n \n\n def listen(self):\n conn = None \n status = False \n try:\n print('Subscribe to the topic {0}'.format(subscription_topic))\n conn = stomp.Connection(host_and_ports = [(activemq_hostname, activemq_port)])\n conn.connect(login=activemq_username,passcode=activemq_password)\n conn.set_listener('', Listener(conn, self.handler))\n conn.subscribe(destination=subscription_topic, id=1, ack='auto')\n status = True \n except Exception as e:\n print(\"Could not subscribe\")\n print(e)\n status = False \n\n if not status:\n time.sleep(10)\n self.listen()\n \n while not self.stop:\n time.sleep(5)\n conn.disconnect()\n self.stop = True \n\n def getStatus(self):\n return not self.stop \n\n def addCandidate(self,application, target, features, prediction, variant):\n candidate = EvaluationCandidate(application,target,features,prediction,variant)\n self.candidates.append(candidate)\n if len(self.candidates) > self.max_candidates_size:\n self.candidates.pop(0)\n if not application in self.subscriptions:\n self.createSubscription(application)\n self.subscriptions.append(application)\n self.saveCandidates()\n\n def readCandidatesFile(self):\n if path.exists(path_ml_model+\"/subscriptions.obj\"):\n self.subscriptions = pickle.load(open(path_ml_model+\"/subscriptions.obj\", 'rb'))\n for application in self.subscriptions:\n self.createSubscription(application)\n\n def saveCandidates(self):\n pickle.dump(self.subscriptions, open(path_ml_model+\"./subscription.obj\",\"wb\"))\n print(\"Candidates and subscriptions struct saved\")\n\n def restart(self):\n self.stopEvaluator()\n print(\"Restart in 10s\")\n time.sleep(10)\n self.readCandidatesFile()\n self.run()\n\n def run(self):\n print(\"Evaluator started ...\")\n self.listen()\n\n\nevaluation = Evaluation()\nevaluation.start()","sub_path":"forecasting/morphemic/morphemic-performance-model/evaluator/src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"100606477","text":"import textwrap\nimport unittest\n\nimport icontract_hypothesis\n\nimport aocdbc.day_8_handheld_halting\n\n\nclass TestDay8(unittest.TestCase):\n def test_case(self) -> None:\n text = textwrap.dedent(\n \"\"\"\\\n nop +0\n acc +1\n jmp +4\n acc +3\n jmp -3\n acc -99\n acc +1\n jmp -4\n acc +6\n \"\"\"\n )\n\n lines = text.splitlines()\n\n instructions = aocdbc.day_8_handheld_halting.parse(lines=lines)\n\n acc = aocdbc.day_8_handheld_halting.execute_instructions(\n instructions=instructions\n )\n\n self.assertEqual(5, acc)\n\n def test_parse_with_icontract_hypothesis(self) -> None:\n icontract_hypothesis.test_with_inferred_strategy(\n aocdbc.day_8_handheld_halting.parse\n )\n\n def test_execute_instructions_with_icontract_hypothesis(self) -> None:\n icontract_hypothesis.test_with_inferred_strategy(\n aocdbc.day_8_handheld_halting.execute_instructions\n )\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_day_8_handheld_halting.py","file_name":"test_day_8_handheld_halting.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"273447523","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom math import pi\nimport sys\nsys.path.insert(1, './')\nfrom gen_matrix import matrix_gen, get_ICA\nfrom get_sample import get_sample, create_strings_for_dataset\nfrom fft import fft_for_sample\nfrom tqdm import tqdm\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\nimport collections\nimport operator\nfrom sklearn import decomposition\n\n## CONSTANTS\n\nCHANALS = 128\nN_COMPONENTS_PCA = 60\nFREQ = 100\nTIME_SEC = 200\nNOISE = 0.5\n\nTIME_SIZE_SEC = 3\nSTEP_TIME_SEC = 1\n\nSAMPLE_SIZE = TIME_SIZE_SEC * FREQ\nSTEP_TIME = STEP_TIME_SEC * FREQ\n\nLINSPACE = 0, TIME_SEC, FREQ*TIME_SEC\n\n\n\n\ndef generate_simple_dataset(linspace, chanals, pandas=False):\n p1,p2,p3 = linspace\n v = np.linspace(p1, p2, p3)\n\n dataset = v\n for i in range(chanals-1):\n dataset = np.vstack((dataset, v))\n\n if pandas:\n return pd.DataFrame(dataset)\n\n return dataset\n\ndef func_for_1class(t, noise=0.5):\n return 2*np.cos(5*2*pi*t) + 5*np.cos(15*2*pi*t) + 3*np.cos(20*2*pi*t) + np.random.normal(0,1)\n\ndef func_for_2class(t, noise=0.5):\n return 3*np.cos(5*2*pi*t) + 2*np.cos(15*2*pi*t) + 3*np.cos(20*2*pi*t) + np.random.normal(0,1)\n\ndef func_for_3class(t, noise=0.5):\n return 4*np.cos(5*2*pi*t) + 10*np.cos(15*2*pi*t) + 3*np.cos(20*2*pi*t) + np.random.normal(0,1)\n\ndef func_general(t, noise=0.5):\n return 9*np.cos(5*2*pi*t) + 10*np.cos(15*2*pi*t) + 3*np.cos(20*2*pi*t) + np.random.normal(0,1)\n\ndef get_cosinus_matrix(chanals, linspace):\n data_simple = generate_simple_dataset(linspace, chanals)\n vec = data_simple[0]\n\n size = (chanals, linspace[2])\n class_ = size[1]//3\n class1 = [0,class_]\n class2 = [class_,class_*2]\n class3 = [class_*2, data_simple.shape[1]]\n\n\n vec[class1[0]:class1[1]] = func_for_1class(vec[class1[0]:class1[1]])\n vec[class2[0]:class2[1]] = func_for_2class(vec[class2[0]:class2[1]])\n vec[class3[0]:class3[1]] = func_for_3class(vec[class3[0]:class3[1]])\n\n\n data_simple = func_general(data_simple)\n data_simple[0] = data_simple[1]\n # data_simple[10] = vec #########!!!!!!!!!!!!!!!!\n data_simple[100] = vec #########!!!!!!!!!!!!!!!!\n # data_simple[65] = vec\n\n size = data_simple.shape\n class_ = size[1] //3\n\n return data_simple, size, class_\n\n\ndef get_i_(sample_calss1):\n i_ = 0\n for i in range(len(sample_calss1)):\n for j in range(sample_calss1[0].shape[0]):\n if sample_calss1[i][j].shape[0] != SAMPLE_SIZE:\n if i_ == 0:\n i_ = i\n return i_\n\n\ndef scoring_fi(feature_importances):\n above_zero = feature_importances['importance'][:np.sum(feature_importances['importance'] > 0)]\n mean_value = above_zero.mean()\n features_good = above_zero[above_zero > mean_value].index.tolist()\n features_normal = above_zero[above_zero <= mean_value].index.tolist()\n features_bad = [i for i in feature_importances.index.tolist() if i not in features_good and\n i not in features_normal]\n\n\n features_good = [i for i in features_good if i not in ['[', ']', ',']]\n features_normal = [i for i in features_normal if i not in ['[', ']', ',']]\n features_bad = [i for i in features_bad if i not in ['[', ']', ',']]\n\n features_good = list(map(lambda x: str(x), features_good))\n features_normal = list(map(lambda x: str(x), features_normal))\n features_bad = list(map(lambda x: str(x), features_bad))\n\n\n return features_good, features_normal, features_bad\n\ndef rf_fit(data_pca, labels):\n fg = []\n fn = []\n fb = []\n for _ in range(200):\n rf = RandomForestClassifier()\n rf.fit(data_pca, labels)\n feature_importances = pd.DataFrame(rf.feature_importances_,\n index = data_pca.columns,\n columns=['importance']).sort_values('importance',ascending=False)\n\n features_good, features_normal, features_bad = scoring_fi(feature_importances)\n fg.extend(features_good)\n fn.extend(features_normal)\n fb.extend(features_bad)\n\n features_good = list(map(lambda x: x[0],\n sorted(collections.Counter(fg).items(), key=operator.itemgetter(1), reverse=True)[:10]))\n features_normal = list(map(lambda x: x[0],\n sorted(collections.Counter(fn).items(), key=operator.itemgetter(1), reverse=True)))\n\n features_normal = list(set(features_normal) - set(features_good))\n\n features_bad = list(set(fb) - set(features_good) - set(features_normal))\n\n return features_good, features_normal, features_bad\n\n\ndef features_imp_pca(train_features, model_pca, X_pca, features_good, features_bad, features_normal, size, FIRST_N_FFT):\n\n global N_COMPONENTS_PCA\n\n reward_best = 50\n reward_max = 10\n reward_med = 5\n reward_min = 1\n\n fe_imp = {}\n for feature in range(0, size[1]):\n fe_imp['feature' + '_' + str(feature)] = 0\n\n component_max_list = [abs(pd.DataFrame(model_pca.components_).loc[i, :]).max() for i in range(N_COMPONENTS_PCA)]\n component_mean_list = [abs(pd.DataFrame(model_pca.components_).loc[i, :]).mean() for i in range(N_COMPONENTS_PCA)]\n\n for feature in tqdm(range(0, size[0]*FIRST_N_FFT)):\n reward = 0\n for component in range(0, N_COMPONENTS_PCA):\n feature_value =abs( model_pca.components_[component, feature])\n component_max = component_max_list[component]\n component_mean = component_mean_list[component]\n\n comparison_max = component_max - component_max / 10\n comparison_med = component_max - component_max / 20\n comparison_min = component_mean\n\n\n\n if feature_value >= comparison_min:\n if str(component) in features_bad:\n reward -= reward_min\n elif str(feature) in features_good or str(feature) in features_normal:\n reward += reward_min\n\n if feature_value >= comparison_med:\n if str(component) in features_bad:\n reward -= reward_med\n elif str(component) in features_normal:\n reward += reward_med\n elif str(component) in features_good:\n reward += reward_max\n\n if feature_value >= comparison_max:\n if str(component) in features_bad:\n reward -= reward_max\n elif str(component) in features_normal:\n reward += reward_max\n elif str(component) in features_good:\n reward += reward_best #best\n\n if feature_value <= comparison_min:\n if str(component) in features_bad:\n reward += reward_min\n elif str(component) in features_good or str(feature) in features_normal:\n reward -= reward_min\n\n\n fe_imp['feature' + '_' + str(feature)] = reward\n\n return fe_imp\n\n\ndef table_recovery(train_features, FIRST_N_FFT, size):\n global N_COMPONENTS_PCA, CHANALS\n ### Восстановим исходный вид таблицы, а именно 128x20x100 (102 в данном примере\n old_table = []\n for i in tqdm(range(train_features.shape[0])):\n sample = pd.DataFrame(np.zeros((CHANALS, FIRST_N_FFT)))\n string = train_features.iloc[i, :]\n\n index_start = 0\n index_end = size[0]\n\n\n for s in range(FIRST_N_FFT):\n sample.iloc[:, s] = string.iloc[index_start : index_end].values\n index_start = index_end\n index_end += CHANALS\n\n if index_end > size[0]*FIRST_N_FFT:\n break\n\n old_table.append(sample.values)\n\n return old_table\n\n\ndef search_important_features(old_table):\n FE_items = []\n\n for table_number, table in tqdm(enumerate(old_table)):\n for column in range(table.shape[1]):\n for idx in range(table.shape[0]):\n if len(str(table[idx, column]).split('_')) > 1:\n FE_items.append((table_number, idx, column))\n\n return FE_items\n\n\n\ndef run():\n print('Start')\n print('Make data')\n matrix, size, class_ = get_cosinus_matrix(chanals=CHANALS, linspace=LINSPACE)\n\n FastICA = decomposition.FastICA(n_components=CHANALS).fit(matrix.T)\n ICA = FastICA.transform(matrix.T)\n matrix = ICA.T\n\n matrix_class1 = matrix[:,0:class_]\n matrix_calss2 = matrix[:, class_:class_*2]\n matrix_calss3 = matrix[:, class_*2:matrix.shape[1]]\n #Получаем семплы для каждого класса\n print('Get samples')\n sample_calss1 = get_sample(matrix_class1, sample_size=SAMPLE_SIZE, step=STEP_TIME)\n sample_calss2 = get_sample(matrix_calss2, sample_size=SAMPLE_SIZE, step=STEP_TIME)\n sample_calss3 = get_sample(matrix_calss3, sample_size=SAMPLE_SIZE, step=STEP_TIME)\n i_ = get_i_(sample_calss1)\n sample_calss1 = sample_calss1[:i_]\n sample_calss2 = sample_calss2[:i_]\n sample_calss3 = sample_calss3[:i_]\n print('Fourier transform')\n samples_fft = list(fft_for_sample(sample_calss1 + sample_calss2 + sample_calss3, freq=FREQ))\n len_class = len(sample_calss1)\n sample_calss1_fft = samples_fft[:len_class]\n sample_calss2_fft = samples_fft[len_class:len_class*2]\n sample_calss3_fft = samples_fft[len_class*2:]\n FIRST_N_FFT = len(sample_calss1_fft[0][0])\n\n #Создание строк для датасета, из матрицы CHANNELS*FIRST_N_FFT -> в вектор\n sample_calss1_fft_str = create_strings_for_dataset(sample_calss1_fft)\n sample_calss2_fft_str = create_strings_for_dataset(sample_calss2_fft)\n sample_calss3_fft_str = create_strings_for_dataset(sample_calss3_fft)\n\n #Создание таблицы объекты-признаки\n\n #Класс 1\n data_class_1 = pd.DataFrame(data=np.zeros((len_class, size[0] * FIRST_N_FFT)))\n data_class_1['label'] = 1\n\n data_class_1 = np.array(data_class_1)\n\n for i in tqdm(range(len(sample_calss1_fft_str))):\n data_class_1[i, :-1] = sample_calss1_fft_str[i]\n\n\n #Класс 2\n data_class_2 = pd.DataFrame(data=np.zeros((len_class, size[0] * FIRST_N_FFT)))\n data_class_2['label'] = 2\n\n data_class_2 = np.array(data_class_2)\n\n for i in tqdm(range(len(sample_calss2_fft_str))):\n data_class_2[i, :-1] = sample_calss2_fft_str[i]\n\n\n #Класс 3\n data_class_3 = pd.DataFrame(data=np.zeros((len_class, size[0] * FIRST_N_FFT)))\n data_class_3['label'] = 3\n\n data_class_3 = np.array(data_class_3)\n\n for i in tqdm(range(len(sample_calss3_fft_str))):\n data_class_3[i, :-1] = sample_calss3_fft_str[i]\n\n data = np.vstack([data_class_1, data_class_2, data_class_3])\n data = pd.DataFrame(data)\n data.columns = [*data.columns[:-1], 'label']\n print(data.shape)\n\n\n ## Понизим размерность до 60 компонент\n from sklearn.decomposition import PCA\n PCA = PCA(n_components=N_COMPONENTS_PCA, random_state=100)\n data_standart = (data).iloc[:, :-1]\n # Понижаем размерность\n data_pca = PCA.fit_transform(data_standart)\n data_pca = pd.DataFrame(data_pca)\n labels = data['label'].values\n\n features_good, features_normal, features_bad = rf_fit(data_pca, labels)\n\n train_features = data_standart\n from sklearn.decomposition import PCA\n model = PCA(n_components=N_COMPONENTS_PCA, random_state=100).fit(train_features)\n X_pc = model.transform(train_features)\n d = features_imp_pca((train_features), model, X_pc, features_good, features_bad, features_normal, size, FIRST_N_FFT)\n sorted_d = sorted(d.items(), key=operator.itemgetter(1), reverse=True)\n best_features = [sorted_d[i][0] for i in range(10)]\n\n ### Главные признаки, с которыми будем рабоать\n train_features = pd.DataFrame(train_features)\n for number_feature in list(best_features):\n number_feature = int(number_feature.split('_')[1])\n train_features.iloc[:, number_feature] = train_features.iloc[:, number_feature].apply(lambda x: str(x) +\n '_FE').values\n\n old_table = table_recovery(train_features, FIRST_N_FFT, size)\n FE_items = search_important_features(old_table)\n\n best_feat = list(map(lambda x: x[0],\n sorted(collections.Counter(list(map(lambda x: x[1], FE_items))).items(),\n key=operator.itemgetter(1), reverse=True)))\n\n ch = []\n for i in best_feat:\n ch.append(np.argmax(np.abs(FastICA.mixing_[:, i])))\n\n print(ch)\n return ch\n\nrun()\n","sub_path":"scripts/Third_step_addingICA.py","file_name":"Third_step_addingICA.py","file_ext":"py","file_size_in_byte":12723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"170634491","text":"from plotly import graph_objects as go\nimport matplotlib.pyplot as plt\n\nfig = go.Figure()\ndados = ['Barra', [1, 2, 3, 4], [200, 150, 12, 250], '#cccccc', 'y', 'name']\n\n\nclass Chart:\n def __init__(self, tipo='plotly'):\n self.manager = tipo\n if tipo == 'plotly':\n self.fig = go.Figure()\n else:\n self.fig = plt.Figure()\n\n\nclass DashChart:\n def __init__(self, bgcolor='#484e53', pagebgcolor='#32383e', textcolor='#ffffff', height=300, width=400):\n \"\"\"\n\n :param bgcolor: cor do bg interno\n :type bgcolor: str\n :param pagebgcolor: cor do bg da pagina\n :type pagebgcolor: str\n :param textcolor: cor do texto no grafico\n :type textcolor: str\n :param height: altura em px ou None - passar None se ajustara ao dcc.Graph (util para unidades rem)\n :type height: int|None\n :param width: largura em px ou None - passar None se ajustara ao dcc.Graph (util para unidades rem)\n :type width: int|None\n \"\"\"\n self.manager = 'plotly'\n self.bgcolor = bgcolor\n self.pagebgcolor = pagebgcolor\n self.font = dict(\n color=textcolor,\n size=12\n )\n self.fig = go.Figure()\n self.height = height\n self.width = width\n self.fig.update_layout(self.get_layout())\n\n def adiciona_dados(self, dados):\n tipo = dados[0]\n cor = dados[3]\n eixo_y = dados[4]\n axis_name = dados[5]\n if self.manager == 'plotly':\n if tipo == 'Barra':\n self.fig.add_bar(x=dados[1], y=dados[2], yaxis=eixo_y, marker={'color': cor}, name=axis_name)\n elif tipo == 'Linha':\n self.fig.add_scatter(x=dados[1], y=dados[2], yaxis=eixo_y, marker={'color': cor}, name=axis_name)\n elif tipo == 'Pontos':\n self.fig.add_scatter(x=dados[1], y=dados[2], yaxis=eixo_y, marker={'color': cor}, mode='markers',\n name=axis_name)\n elif tipo == 'Pizza':\n self.fig.add_pie(labels=dados[1], values=dados[2])\n self.fig.update_layout(self.get_layout())\n self.altera_limites_eixo(eixo_y, 0, max(dados[2]))\n return self.fig\n\n def altera_limites_eixo(self, eixo_y='y', ymin=None, ymax=None):\n if self.manager == 'plotly':\n self.fig.update_layout({eixo_y[:1] + 'axis' + eixo_y[1:]: {'range': (ymin, ymax)}})\n\n def get_layout(self):\n layout = {\n 'barmode': 'group',\n 'hovermode': 'x',\n 'plot_bgcolor': self.bgcolor,\n 'paper_bgcolor': self.pagebgcolor,\n 'height': self.height,\n 'width': self.width,\n 'font': self.font,\n 'margin': {'l': 10, 'b': 40, 't': 30, 'r': 10},\n 'legend': {'orientation': 'h'},\n 'yaxis2': {'anchor': 'x', 'overlaying': 'y', 'side': 'right'},\n 'yaxis3': {'anchor': 'x', 'overlaying': 'y', 'side': 'right'}\n }\n return layout\n\n def set_title(self, title):\n if self.manager == 'plotly':\n self.fig.update_layout({'title': {'text': title,\n # 'y':0.9,\n 'x':0.5,\n 'xanchor': 'center',\n 'yanchor': 'top'}})\n\n def set_yaxis_title(self, eixo_y, title):\n if self.manager == 'plotly':\n if eixo_y[1:] == '':\n self.fig.update_layout({'yaxis': {'title': title}})\n else:\n self.fig.update_layout({'yaxis' + eixo_y[1:]: {'title': title}})\n\n\nclass MplibChart:\n def __init__(self):\n self.manager = 'mplib'\n self.fig = plt.Figure()\n\n def adiciona_dados(self, dados):\n tipo = dados[0]\n cor = dados[3]\n eixo_y = dados[4]\n axis_name = dados[5]\n\n if len(self.fig.axes) > 0:\n if eixo_y[1:] == '':\n ax = self.fig.axes[0]\n elif int(eixo_y[1:]) + 1 > len(self.fig.axes):\n ax = self.fig.axes[0].twinx()\n else:\n ax = self.fig.axes[int(eixo_y[1:])]\n else:\n ax = self.fig.subplots()\n if tipo == 'Barra':\n ax.bar(dados[1], dados[2], color=cor, label=axis_name)\n elif tipo == 'Linha':\n ax.plot(dados[1], dados[2], color=cor, label=axis_name)\n elif tipo == 'Pontos':\n ax.scatter(dados[1], dados[2], color=cor, label=axis_name)\n elif tipo == 'Pizza':\n ax.pie(labels=dados[1], values=dados[2], colors=cor)\n return self.fig\n\n def altera_limites_eixo(self, eixo_y='y', ymin=None, ymax=None):\n\n if eixo_y[1:] == '':\n ax = self.fig.axes[0]\n else:\n ax = self.fig.axes[eixo_y[1:]]\n ax.set_ylim(ymin=ymin, ymax=ymax)\n\n def set_title(self, title):\n self.fig.suptitle(title)\n\n def set_yaxis_title(self, eixo_y, title):\n if eixo_y[1:] == '':\n ax = self.fig.axes[0]\n else:\n ax = self.fig.axes[eixo_y[1:]]\n ax.title = title\n\n","sub_path":"dashboard_lib/figures.py","file_name":"figures.py","file_ext":"py","file_size_in_byte":5046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"64421185","text":"import nltk\nimport pandas as pd\nimport torch\nimport numpy as np\nfrom PIL import Image\nfrom torch.utils.data import Dataset\n\nclass HeritageDataset(Dataset):\n def __init__(self, csv_file, image_dir, bounding_box_dir, vocab, transform, object_transform, train):\n self.dataset = pd.read_csv(csv_file)\n self.image_dir = image_dir\n self.bounding_box_dir = bounding_box_dir\n self.vocab = vocab\n self.transform = transform\n self.object_transform = object_transform\n self.train = train\n def __getitem__(self, index):\n id = self.dataset.iloc[index, 0]\n image = Image.open('{}/{}'.format(self.image_dir, self.dataset.iloc[index, 3])).convert('RGB')\n\n caption = []\n if not self.train:\n caption = [[self.vocab.getIndex('')] for _ in range(5)]\n for i, element in enumerate(caption):\n text = nltk.word_tokenize(self.dataset.iloc[index, 7+i].lower())\n element.extend([self.vocab.getIndex(word) for word in text])\n element.append(self.vocab.getIndex(''))\n else:\n caption = [self.vocab.getIndex('')]\n caption.extend([self.vocab.getIndex(word) for word in nltk.word_tokenize(self.dataset.iloc[index, 7].lower())])\n caption.append(self.vocab.getIndex(''))\n\n with open('{}/{}'.format(self.bounding_box_dir, self.dataset.iloc[index, 3].replace('jpg', 'txt')), 'r') as f:\n lines = f.readlines()\n objects = []\n class_list = []\n for line in lines:\n box = line.split()\n class_list.append(int(box[0]))\n x0 = int((float(box[1]) - float(box[3])/2) * image.width)\n x1 = int((float(box[1]) + float(box[3])/2) * image.width)\n y0 = int((float(box[2]) - float(box[4])/2) * image.height)\n y1 = int((float(box[2]) + float(box[4])/2) * image.height)\n object = image.crop((x0,y0,x1,y1))\n object = self.object_transform(object)\n objects.append(object)\n objects = torch.stack(objects, 0)\n image = self.transform(image)\n return id, image, caption, objects, class_list\n def __len__(self):\n return self.dataset.shape[0]\n\ndef collate_fn(data):\n ids, images, captions, list_objects, class_lists = zip(*data)\n \n ids = list(ids)\n class_lists = list(class_lists)\n images = torch.stack(images, 0)\n\n lengths = []\n max_len = 0\n padded_captions = None\n if type(captions[0][0]) == list:\n lengths = [[len(element) for element in caption] for caption in captions]\n max_len = np.amax(lengths)\n lengths = torch.tensor(lengths)\n \n padded_captions = torch.zeros(lengths.size(0), 5, max_len).long()\n for i, caption in enumerate(captions):\n for j, element in enumerate(caption):\n length = lengths[i][j]\n padded_captions[i,j,:length] = torch.tensor(element)\n else:\n lengths = [len(caption) for caption in captions]\n max_len = max(lengths)\n lengths = torch.tensor(lengths).unsqueeze(1)\n\n padded_captions = torch.zeros(lengths.size(0), max_len).long()\n for i, caption in enumerate(captions):\n length = lengths[i][0]\n padded_captions[i,:length] = torch.tensor(caption)\n \n num_objects = [len(objects) for objects in list_objects]\n max_len = max(num_objects)\n num_objects = torch.tensor(num_objects).unsqueeze(1)\n\n padded_objects = torch.zeros(num_objects.size(0), max_len, list_objects[0].size(1), list_objects[0].size(2), list_objects[0].size(3))\n for i, objects in enumerate(list_objects):\n length = num_objects[i][0]\n padded_objects[i,:length] = objects\n \n return ids, images, padded_captions, lengths, padded_objects, num_objects, class_lists\n","sub_path":"dataset/HeritageDataset.py","file_name":"HeritageDataset.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"244166881","text":"#coding:utf8\n\n# Copyright 2019 longpeng2008. All Rights Reserved.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# If you find any problem,please contact us\n#\n# longpeng2008to2012@gmail.com \n#\n# or create issues\n# =============================================================================\nimport torch\nfrom torch import nn\nclass simpleNet5(nn.Module):\n def __init__(self):\n super(simpleNet5, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(3, 32, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(True),\n )\n self.conv2 = nn.Sequential(\n nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(True),\n )\n self.conv3 = nn.Sequential(\n nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(128),\n nn.ReLU(True),\n )\n self.conv4 = nn.Sequential(\n nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(256),\n nn.ReLU(True),\n )\n self.conv5 = nn.Sequential(\n nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(512),\n nn.ReLU(True),\n )\n self.deconv1 = nn.Sequential(\n nn.ConvTranspose2d(512, 256, 3, 2, 1, 1),\n nn.BatchNorm2d(256),\n nn.ReLU(True)\n )\n self.deconv2 = nn.Sequential(\n nn.ConvTranspose2d(256, 128, 3, 2, 1, 1),\n nn.BatchNorm2d(128),\n nn.ReLU(True)\n )\n self.deconv3 = nn.Sequential(\n nn.ConvTranspose2d(128, 64, 3, 2, 1, 1),\n nn.BatchNorm2d(64),\n nn.ReLU(True)\n )\n self.deconv4 = nn.Sequential(\n nn.ConvTranspose2d(64, 32, 3, 2, 1, 1),\n nn.BatchNorm2d(32),\n nn.ReLU(True)\n )\n self.deconv5 = nn.Sequential(\n nn.ConvTranspose2d(32, 8, 3, 2, 1, 1),\n nn.BatchNorm2d(8),\n nn.ReLU(True)\n )\n self.classifier = nn.Conv2d(8, 3, kernel_size=1)\n\n def forward(self, x): \n out = self.conv1(x) \n out = self.conv2(out) \n out = self.conv3(out) \n out = self.conv4(out)\n out = self.conv5(out)\n out = self.deconv1(out) \n out = self.deconv2(out)\n out = self.deconv3(out)\n out = self.deconv4(out)\n out = self.deconv5(out)\n out = self.classifier(out)\n return out\n\nif __name__ == '__main__':\n img = torch.randn(2, 3, 224, 224)\n net = simpleNet5()\n sample = net(img)\n print(sample.shape)\n","sub_path":"computer_vision/projects/segmentation/pytorch/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":2709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"292152829","text":"import aiofiles\nimport logging\nimport os\nimport sys\nfrom aiohttp import web\n\nimport virtool.api.account\nimport virtool.api.analyses\nimport virtool.api.downloads\nimport virtool.api.files\nimport virtool.api.genbank\nimport virtool.api.groups\nimport virtool.api.history\nimport virtool.api.hmm\nimport virtool.api.indexes\nimport virtool.api.jobs\nimport virtool.api.otus\nimport virtool.api.processes\nimport virtool.api.references\nimport virtool.api.root\nimport virtool.api.samples\nimport virtool.api.settings\nimport virtool.api.software\nimport virtool.api.subtractions\nimport virtool.api.uploads\nimport virtool.api.users\nimport virtool.api.websocket\n\nimport virtool.utils\nimport virtool.http.login\n\nlogger = logging.getLogger(__name__)\n\n\nasync def client_path_error():\n async with aiofiles.open(os.path.join(sys.path[0], \"templates/client_path_error.html\"), \"r\") as f:\n body = await f.read()\n return web.Response(body=body, content_type=\"text/html\")\n\n\nasync def index_handler(req):\n if req.app[\"client_path\"] is None:\n try:\n client_path = await virtool.utils.get_client_path()\n except FileNotFoundError:\n return await client_path_error()\n\n req.app[\"client_path\"] = client_path\n req.app.router.add_static(\"/static\", client_path)\n\n try:\n static_hash = virtool.utils.get_static_hash(req.app[\"client_path\"])\n except FileNotFoundError:\n return await client_path_error()\n\n if not req[\"client\"].user_id:\n keys = virtool.http.login.generate_verification_keys()\n\n session_id = req[\"client\"].session_id\n\n await req.app[\"db\"].sessions.update_one({\"_id\": session_id}, {\n \"$set\": {\n \"keys\": keys\n }\n })\n\n html = virtool.http.login.get_login_template().render(\n key_1=keys[0],\n key_2=keys[1],\n key_3=keys[2],\n hash=static_hash,\n location=req.path\n )\n\n return web.Response(body=html, content_type=\"text/html\")\n\n with open(os.path.join(req.app[\"client_path\"], \"index.html\"), \"r\") as handle:\n return web.Response(body=handle.read(), content_type=\"text/html\")\n\n\ndef setup_routes(app):\n index_paths = [\n \"/\",\n r\"/account{suffix:.*}\",\n r\"/administration{suffix:.*}\",\n r\"/home{suffix:.*}\",\n r\"/hmm{suffix:.*}\",\n r\"/jobs{suffix:.*}\",\n r\"/otus{suffix:.*}\",\n r\"/refs{suffix:.*}\",\n r\"/samples{suffix:.*}\",\n r\"/subtraction{suffix:.*}\"\n ]\n\n for path in index_paths:\n app.router.add_get(path, index_handler)\n\n app.router.add_get(\"/ws\", virtool.api.websocket.root)\n app.router.add_post(\"/login\", virtool.http.login.login_handler)\n\n app.router.add_routes(virtool.api.account.routes)\n app.router.add_routes(virtool.api.analyses.routes)\n app.router.add_routes(virtool.api.downloads.routes)\n app.router.add_routes(virtool.api.files.routes)\n app.router.add_routes(virtool.api.genbank.routes)\n app.router.add_routes(virtool.api.groups.routes)\n app.router.add_routes(virtool.api.history.routes)\n app.router.add_routes(virtool.api.hmm.routes)\n app.router.add_routes(virtool.api.indexes.routes)\n app.router.add_routes(virtool.api.jobs.routes)\n app.router.add_routes(virtool.api.otus.routes)\n app.router.add_routes(virtool.api.processes.routes)\n app.router.add_routes(virtool.api.references.routes)\n app.router.add_routes(virtool.api.root.routes)\n app.router.add_routes(virtool.api.samples.routes)\n app.router.add_routes(virtool.api.settings.routes)\n app.router.add_routes(virtool.api.software.routes)\n app.router.add_routes(virtool.api.subtractions.routes)\n app.router.add_routes(virtool.api.uploads.routes)\n app.router.add_routes(virtool.api.users.routes)\n","sub_path":"virtool/app_routes.py","file_name":"app_routes.py","file_ext":"py","file_size_in_byte":3808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"239986046","text":"# -*- coding: utf-8 -*-\n#! /usr/bin/env python\n\"\"\"\n\"\"\"\nfrom argparse import ArgumentParser\nfrom logging import getLogger\nimport logging\nimport sys\n\n# from word2embeddings.apps import use_theano_development_version\n# use_theano_development_version()\n\nfrom cis.deep.utils import logger_config\nfrom word2embeddings.nn.trainer import HingeSentimentMiniBatchTrainer, \\\n HingeSentiment2MiniBatchTrainer, HingeMiniBatchTrainer, \\\n SimpleVLblNceTrainer, SimpleVLblNceSentimentTrainer, \\\n VLblNceTrainer, VLblNceSentimentTrainer, VLblNceDistributionalTrainer, \\\n NlblNceTrainer, NvLblNceTrainer, SLmNceTrainer, LblNceTrainer\nfrom word2embeddings.tools.util import debug\n\nlog = getLogger(__name__)\nlogger_config(log)\n\nparser = ArgumentParser()\nparser.add_argument('train_file',\n help='Document for training that contains tokenized text')\n\nparser.add_argument('--hidden-layers', dest='hidden_layers',\n help='Width of each hidden layer, comma separated. E.g., ' +\n '\"28,64,32\". This option only has an effect for mlp models and ' +\n 'for slm, where only one hidden layer is allowed.')\n\nparser.add_argument('vocabulary',\n help='Vocabulary file that contains list of tokens.\\nCaution: Add ' +\n 'the special tokens , , , in this exact order at ' +\n 'the first positions in the vocabulary.')\n\n\nparser.add_argument('--sentiment-vocabulary', dest='sent_vocab',\n help='Vocabulary file that contains sentiment words')\n\nparser.add_argument('--predict-vocabulary', dest='pred_vocab',\n help='Vocabulary that contains the items that should be considered ' +\n 'during perplexity computation.\\n' +\n 'Caution: Make sure this includes .\\n' +\n 'Caution2: If this vocabulary does not contain a word that is seen ' +\n 'in prediction this word is not considered during perplexity ' +\n 'calculation.')\n\n\nparser.add_argument('--unigram', dest='unigram',\n help='file containing the unigram count (the probabilities are ' +\n 'calculated automatically given the counts\\n ' +\n 'Caution: Add the ' +\n 'special tokens , , , in this exact order at the ' +\n 'first positions in the vocabulary.')\nparser.add_argument('--noise-samples', dest='noise_samples', type=int,\n help='number of noise samples per data sample')\nparser.add_argument('--nce-seed', dest='nce_seed', type=int, default=2345,\n help='seed for the noise sample generation in NCE')\n\n\nparser.add_argument('--validation-file', dest='validation_file', nargs='+',\n help='Files for validation that contains tokenized text. Multiple ' +\n 'files are supported, with the first file being the main validation ' +\n 'file, i.e., if --dump-best is active, then the performance on the ' +\n 'first file is considered.\\n ' +\n 'Note: For all LBL based models the validation cost will be ' +\n 'different even if you provide the same validation file twice, ' +\n 'because the NCE cost computation involves a randomized process.')\n\nparser.add_argument('--perplexity', action='store_true',\n help='instead of calculating the error on the validation set, ' +\n 'additionally calculate the perplexity. Caution: does only work ' +\n 'for vLBL models. Note: using ppl in validation is slower.')\n\n\nparser.add_argument('--disable-padding', dest='disable_padding',\n action='store_true', default=False,\n help='Disable padding sentences while generating examples')\n\nparser.add_argument('--learn-eos', dest='learn_eos',\n action='store_true', default=False,\n help='Learn word embedding for the end-of-sentence token .')\n\n\nparser.add_argument('--load-model', dest='load_model',\n help='Proceed training with the given model file.')\n\nparser.add_argument('--model-type', dest='model_type',\n choices=['ColWes08', 'sent_1', 'sent_2', 'vlbl', 'nvlbl',\n 'vlbl_sent', 'simple_vlbl', 'simple_vlbl_sent', 'vlbl_dist',\n 'lbl', 'nlbl', 'slm'],\n default='ColWes08',\n help='Type of the model to use for training. All sentiment models ' +\n 'require a sentiment vocabulary.')\n\nparser.add_argument('--activation-func', dest='activation_func', default='rect',\n choices=['sigmoid', 'tanh', 'rect', 'softsign'],\n help='Activation function to use in non-linear models.')\n\n\nparser.add_argument('--left-context', dest='left_context', type=int,\n default=2,\n help='Left context window to be used measured from the current token')\n\nparser.add_argument('--right-context', dest='right_context', type=int,\n default=2,\n help='Right context window measured from the current token')\n\nparser.add_argument('--word-embedding-size', dest='word_embedding_size',\n type=int, default=64)\n\n\n# Argument for MiniBatchTrainer\nparser.add_argument('--epochs-limit', dest='epochs_limit', type=int, default=-1,\n help='maximal number of epochs to train (-1 for no limit)')\n\nparser.add_argument('--examples-limit', dest='examples_limit', type=int,\n default=-1,\n help='maximal number of examples to train (-1 for no limit)')\n\nparser.add_argument('--early-stopping', dest='early_stopping', type=int,\n default=-1,\n help='Stop the training when N consecutive validations resulted in ' + \\\n 'worse results than the validation before. -1 to deactivate this ' + \\\n 'feature.')\n\n\nparser.add_argument('--batch-size', dest='batch_size', type=int, default=16)\n\n\nparser.add_argument('--learning-rate', dest='learning_rate',\n default=0.1,\n help='Learning rate. If this parameter is a float value than the ' +\n 'learning rate is valid for all model parameters. Otherwise, it can ' +\n 'contain parameter specific learning rates in using the pattern ' +\n '\"param_name1:param_learning_rate1,param_name2:param_learning_rate2\\.' +\n 'You can also specify a learning rate for only some of your ' +\n 'parameters and assign the default learning rate for all other ' +\n 'parameters by specifying \"default:default_learning_rate\".')\n\nparser.add_argument('--lr-adaptation', dest='lr_adaptation_method',\n choices=['constant', 'linear', 'adagrad', 'MniTeh12'],\n default='constant',\n help='Sets the method that is used to reduce the learning rate. ' +\n 'Supports \"linear\" (linear reduction) and \"adagrad\" (AdaGrad ' +\n 'algorithm), and \"constant\" (no reduction), \"MniTeh12\" (halves the ' +\n 'learning rate whenever the validation perplexity (if \"--perplexity\" ' +\n 'is given) or error (otherwise) goes up; for details see [MniTeh12])')\n\nparser.add_argument('--learning-method', dest='learning_method',\n choices=['fan_in', 'global'], default='global',\n help='Determine the method that learning rate is calculated. Two ' +\n 'options are available: {fan_in, global}')\n\n\nparser.add_argument('--l1-weight', dest='l1_weight', type=float, default=0.0,\n help='Weight of L1 regularization term. 0 to deactivate. ' +\n 'Only implemented for LBL models and SLM.')\nparser.add_argument('--l2-weight', dest='l2_weight', type=float, default=0.0,\n help='Weight of L2 regularization term. 0 to deactivate. ' +\n 'Only implemented for LBL models and SLM.')\n\nparser.add_argument('--dump-period', dest='dump_period', type=int, default=-1,\n help='A model will be dumped every x seconds/examples (-1 = no ' +\n 'dumping. Only the final model will be dumped.)')\n\nparser.add_argument('--load-params', dest='load_params', nargs=2,\n help='Load initial values from files. This parameter requires two ' +\n 'arguments: (i) and (ii) a comma separated list of ' +\n 'parameter names as specified by the individual model. Each parameter' +\n 'must be stored in csv file format in an own file. The single ' +\n 'parameter files are then expected to be named ' +\n '..\\n ' +\n 'Example usage: ~/my_model \"C,R\" will load ~/my_model.C and ' +\n '~/my_model.R.\\n ' +\n 'Gzip and bz2 files are supported.')\n\nparser.add_argument('--store-params', dest='store_params',\n help='Comma-separated list of parameter names that will be stored ' +\n 'each time the model is stored. The parameter names as specified by ' +\n 'the individual model. Each parameter is stored in a separate file, ' +\n 'e.g., paramter C is stored in .params.C.')\n\nparser.add_argument('--out-dir', dest='out_dir', default='.',\n help='directory where to store the output files')\n\nparser.add_argument('--dump-vocabulary', dest='dump_vocabulary',\n action='store_true',\n help='Dump the vocabulary after importing it to remove duplicates.')\n\nparser.add_argument('--dump-embeddings', dest='dump_embeddings',\n action='store_true',\n help='Dump the embeddings for every dumped model. Caution: might ' +\n 'be a big file.\\n ' +\n 'Caution: This parameter is deprecated. It\\'s not supported by the ' +\n 'new vLBL models. Use --store-params instead.')\n\nparser.add_argument('--validation-period', dest='validation_period',\n type=float, default=-1,\n help='A model will be evaluated every y seconds/examples. (-1 ' +\n 'for never). If a development file is given, the scores on the ' +\n 'training data and the validation data is computed, otherwise only ' +\n 'the former is computed.')\n\nparser.add_argument('--period-type', dest='period_type', default='examples',\n choices=['time', 'examples'],\n help='Set the period to be in seconds or number of examples ' +\n 'by setting the option to time or examples.')\n\nparser.add_argument('--dump-best', dest='dump_best', action='store_true',\n help='Save the best model every validation period. What \"best\" ' + \\\n 'means depends on the type of model. If \"--perplexity\" is given, ' + \\\n 'it\\'s the model with the lowest perplexity. If not, it\\'s the ' + \\\n 'model with the lowest training error.')\n\nparser.add_argument('--dump-each-epoch', dest='dump_each_epoch',\n action='store_true', help='Dump the model after each epoch')\n\nparser.add_argument('--dump-initial-model', dest='dump_initial_model',\n action='store_true',\n help='Dump the initial model before any training is done.')\n\n\nparser.add_argument('--error-function', dest='error_func',\n default='least_squares', choices=['cross_entropy', 'least_squares'],\n help='defines the used error function (default: least_squares); ' +\n 'This parameter is only valid for MLPs.')\n\nparser.add_argument('--count-examples', dest='count_examples',\n action='store_true',\n help='Only count the examples in the training file, don\\'t train a ' +\n 'model.')\n\n\nparser.add_argument('--debug-host', dest='debug_host',\n help='Allow remote debugging at the given host IP. Make sure you ' +\n 'follow the instructions at ' +\n 'http://pydev.org/manual_adv_remote_debugger.html. Especially, the ' +\n 'pydevd source must be in the PYTHONPATH and ' +\n 'PATHS_FROM_ECLIPSE_TO_PYTHON in pydevd_file_utils.py must be adapted.')\n\ndef main(argv=None):\n log.info('started application')\n\n log.warning('This script is obsolete. It will not be updated anymore and ' +\n 'will be deleted in the future. Use train_model.py instead.')\n\n if argv is None:\n argv = sys.argv[1:]\n\n args = parser.parse_args(argv)\n\n check_args(args)\n\n log.info('start parameters: ' + str(args))\n\n if args.debug_host:\n import pydevd\n pydevd.settrace(host=args.debug_host, stdoutToServer=True,\n stderrToServer=True)\n\n if log.level == logging.DEBUG:\n sys.excepthook = debug\n\n log.info('creating trainer')\n\n if args.model_type == 'ColWes08':\n log.info('Using ColWes08 trainer')\n trainer = HingeMiniBatchTrainer()\n elif args.model_type == 'sent_1':\n log.info('Using sent_1 trainer')\n trainer = HingeSentimentMiniBatchTrainer()\n elif args.model_type == 'sent_2':\n log.info('Using sent_2 trainer')\n trainer = HingeSentiment2MiniBatchTrainer()\n elif args.model_type == 'simple_vlbl':\n log.info('Using simple LBL trainer that uses noise-contrastive estimation')\n trainer = SimpleVLblNceTrainer()\n elif args.model_type == 'simple_vlbl_sent':\n log.info('Using simple LBL trainer that uses noise-contrastive estimation to create sentiment embeddings')\n trainer = SimpleVLblNceSentimentTrainer()\n elif args.model_type == 'vlbl':\n log.info('Using LBL trainer that uses noise-contrastive estimation')\n trainer = VLblNceTrainer()\n elif args.model_type == 'vlbl_sent':\n log.info('Using LBL trainer that uses noise-contrastive estimation to create sentiment embeddings')\n trainer = VLblNceSentimentTrainer()\n elif args.model_type == 'nvlbl':\n log.info('Using non-linear vLBL NCE trainer')\n trainer = NvLblNceTrainer()\n elif args.model_type == 'lbl':\n log.info('Using linear LBL trainer that uses noise-contrastive estimation')\n trainer = LblNceTrainer()\n elif args.model_type == 'nlbl':\n log.info('Using non-linear LBL trainer that uses noise-contrastive estimation')\n trainer = NlblNceTrainer()\n elif args.model_type == 'vlbl_dist':\n log.info('Using LBL trainer that uses distributional representation of input')\n trainer = VLblNceDistributionalTrainer()\n elif args.model_type == 'slm':\n log.info('Using shallow neural network lm with NCE')\n trainer = SLmNceTrainer()\n else:\n raise ValueError('Unknown model type. Abort')\n\n if args.count_examples is True:\n log.info('counting examples')\n trainer.configure(args)\n count = trainer.count_examples(args.train_file)\n log.info('examples: %d' % count)\n else:\n trainer.prepare_usage(args)\n log.info('training is about to begin')\n trainer.run()\n\n log.info('finished')\n\ndef check_args(args):\n\n\n\n# if args.epochs_limit == -1 and args.examples_limit == -1:\n# raise ValueError('Either epochs-limit or examples-limit must be given.')\n pass\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"word2embeddings/apps/create_embeddings.py","file_name":"create_embeddings.py","file_ext":"py","file_size_in_byte":14433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"63242890","text":"# coding: utf-8\n\nimport flask\n\nimport auth\nimport forms\nimport model\nimport util\n\nfrom main import app\n\n\n@app.route('/admin/post/')\n@auth.admin_required\ndef post_list():\n post_dbs, cursors = model.Post.get_dbs(\n order='-created',\n is_visible=util.param('is_visible', bool),\n prev_cursor=True,\n )\n return flask.render_template(\n 'post/admin/list.html',\n html_class='post-list',\n title=u'Список новостей',\n post_dbs=post_dbs,\n next_url=util.generate_next_url(cursors['next']),\n prev_url=util.generate_next_url(cursors['prev']),\n api_url=flask.url_for('api.post.list'),\n )\n\n\n@app.route('/admin/post/new/')\n@auth.admin_required\ndef post_new():\n post = model.Post(title=u'Введите название')\n post.put()\n return flask.redirect(flask.url_for('post_edit', post_id=post.key.id()))\n\n\n@app.route('/admin/post/edit//', methods=['GET', 'POST'])\n@auth.admin_required\ndef post_edit(post_id):\n post_db = model.Post.get_by_id(post_id)\n if not post_db:\n return flask.redirect(flask.url_for('post_list'))\n form = forms.PostForm(obj=post_db)\n if form.validate_on_submit():\n form.populate_obj(post_db)\n post_db.put()\n return flask.redirect(flask.url_for('post_list'))\n return flask.render_template(\n 'post/admin/edit.html',\n html_class='post-edit',\n title=u'Редактирование новости',\n form=form,\n post_db=post_db,\n resource_dbs=post_db.resources,\n get_upload_url=flask.url_for(\n 'api.resource.upload_parent', parent_key=post_db.key.urlsafe()),\n back_url_for='post_list'\n )\n","sub_path":"main/control/post.py","file_name":"post.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"311017482","text":"'''\nScript to classify the captured images. \nIt reads the images, displays them and saves them again with the selected value.\n'''\n\n# ----- Imports -------\nimport os\n#import shutil\nimport cv2\n#from matplotlib import pyplot as plt\n\n# -------- change to image folder and start classification ------\nos.chdir(\"/Users/c/Documents/\")\n\n\nfor f in os.listdir():\n\n file_name, file_ext = os.path.splitext(f)\n if file_ext == \".jpg\":\t\t\t\t\t\t\t# only use jpg\n\n img = cv2.imread(f, cv2.IMREAD_COLOR)\t\t# read and display image\n cv2.imshow(f, img)\n\n key = cv2.waitKey(0)\t\t\t\t\t\t# wait for a key\n print(key)\n if key == 48: # 0-key\n key = 0\n elif key == 49: # 1 -key\n key = 1\n elif key == 50: # 2 -key\n key = 2\n elif key == 51: # 3 -key\n key = 3\n elif key == 52: # 4 -key\n key = 4\n elif key == 53: # 5 -key\n key = 5\n elif key == 54: # 6 -key\n key = 6\n elif key == 55: # 7 -key\n key = 7\n elif key == 56: # 8 -key\n key = 8\n elif key == 57: # 9 -key\n key = 9\n elif key == 101: # e -key\n key = \"e\"\n elif key == 110: # n -key\n continue\n # break\n # if ESC pressed, quit script\n elif key == 27:\n cv2.destroyAllWindows()\t\t\t\t\t# close image\n break\n else:\n key = \"e\" # classify as image error\n\n shutil.move(\t\t\t\t\t\t\t\t# move and rename file to new folder\n \"/Users/c/Documents/\" + f,\n \"/Users/c/Documents/class/\"\n + str(key)\n + \"_\"\n + f,\n )\n cv2.destroyAllWindows()\n","sub_path":"classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"108459809","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 6 16:31:20 2016\n\nThis script creates parameter files that have one of each economic indicator.\n\n@author: thasegawa\n\"\"\"\n\nimport os\nimport pandas as pd\n\nbasepath = r'C:\\Users\\thasegawa\\Documents\\53 Port Authority Toll\\06 Python Projects\\Regression Analysis\\data\\regress_para'\ngroup_list = ['pathmid',\n 'pathnj',\n 'pathnyc',\n 'pathtotal',\n 'pathwtc']\n \neconomic_list = list(pd.read_excel('data\\\\fields\\\\economicIndicators_Real.xlsx', header=None)[0])\nfuel_list = list(pd.read_excel('data\\\\fields\\\\fuel_binary.xlsx', header=None)[0]) + [None]\n \nfor group in group_list:\n para = pd.read_excel(os.path.join(basepath, 'templates\\\\regresscols_%s_template_limited.xlsx' % group))\n for fuel in fuel_list: \n for economic in economic_list:\n remove = [column for column in economic_list if column != economic]\n remove += [column for column in fuel_list if column != fuel]\n columns = [column for column in para['column'] if column not in remove]\n df = para[para['column'].isin(columns)]\n df.to_excel(os.path.join(basepath, 'regresscols_{0}_{1}_{2}.xlsx'.format(group, economic, fuel)), index = False)","sub_path":"createEconomicIndicatorParameters.py","file_name":"createEconomicIndicatorParameters.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"454293616","text":"import tables\nimport os, sys\nfrom glob import glob\nimport PIL\nimport numpy as np\nimport cv2\nfrom sklearn.feature_extraction.image import extract_patches\nimport random\nimport copy\nfrom keras.utils import to_categorical\nfrom PIL import Image\nimport stain_normalization\n\n\ndef make_pytable(img_path, label_path, patch_size, stride_size, pad_size, split, num_classes, imgtype, labeltype):\n img_dtype = {}\n img_dtype['mask'] = tables.UInt8Atom()\n img_dtype['img'] = tables.Float32Atom()\n\n train_file = glob(img_path+\"*.\"+imgtype)\n num_train = int(len(train_file)*(1-split))\n val_file = train_file[num_train:]\n train_file = train_file[:num_train]\n\n phases = {}\n phases['train'], phases['val'] = train_file, val_file\n\n block_shape = {}\n block_shape['img'] = np.array((patch_size, patch_size, 3))\n block_shape['mask'] = np.array((patch_size, patch_size, num_classes))\n\n filters = tables.Filters(complevel = 6, complib = 'zlib')\n\n storage = {}\n\n imgtypes = ['img', 'mask']\n\n for phase in phases.key():\n print(phase)\n\n table_file = tables.open_file(f\"./table_{phase}.pytable\", mode = 'w')\n\n for type in imgtypes:\n storage[type] = table_file.create_earray(table_file.root, type, img_dtype[type],\n shape = np.append([0], block_shape[type]),\n chunkshape= np.append([1], block_shape[type]),\n filters = filters)\n\n for f in phases[phase]:\n print(f)\n\n for type in imgtypes:\n if type == \"img\":\n img = cv2.cvtColor(cv2.imread(f), cv2.COLOR_BGR2RGB)\n img = stain_normalization.normalizeStaining(img)\n img = img/255.0\n\n img = np.pad(img, [(pad_size, pad_size), (pad_size, pad_size), (0, 0)])\n img = extract_patches(img, (patch_size, patch_size, 3), stride_size)\n\n img = img.reshape(-1, patch_size, patch_size, 3)\n\n else:\n img = cv2.cvtColor(cv2.imread(label_path+f.replace(imgtype, labeltype)), cv2.IMREAD_GRAYSCALE)\n\n if num_classes > 1:\n img = to_categorical(img, num_classes = num_classes)\n else:\n img = img.reshape(img.shape[0], img.shape[1], 1)\n img = padAndPatch(img, pad_size, patch_size, stride_size)\n\n storage[type].append(img)\n table_file.close()\n\ndef padAndPatch(img, pad_size, patch_size, strides):\n img = np.pad(img, [(pad_size, pad_size), (pad_size, pad_size), (0, 0)], mode = 'reflect')\n img = extract_patches(img, (patch_size, patch_size, img.shape[-1]), strides)\n img = img.reshape(-1, patch_size, patch_size, img.shape[-1])\n\n return img\n\n\n\n\n\n\n\n\n\n","sub_path":"make_pytable.py","file_name":"make_pytable.py","file_ext":"py","file_size_in_byte":2903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606211971","text":"\"\"\"Game of life functions.\"\"\"\nimport numpy as np\nfrom numba import jit, int32, int8\n\n\n@jit((int32[:, :], int32, int32, int32), nopython=True)\ndef update_neighbours(neighbours, i, j, grid_length):\n \"\"\"Update neighbours with nearest neighbours of site (i, j).\"\"\"\n neighbours[0, 0] = i\n neighbours[1, 0] = i\n neighbours[2, 0] = i + 1\n neighbours[3, 0] = i - 1\n neighbours[4, 0] = i - 1\n neighbours[5, 0] = i - 1\n neighbours[6, 0] = i + 1\n neighbours[7, 0] = i + 1\n\n neighbours[0, 1] = j + 1\n neighbours[1, 1] = j - 1\n neighbours[2, 1] = j\n neighbours[3, 1] = j\n neighbours[4, 1] = j + 1\n neighbours[5, 1] = j - 1\n neighbours[6, 1] = j + 1\n neighbours[7, 1] = j - 1\n\n neighbours %= grid_length\n\n\n@jit(int32(int8[:, :], int32[:, :]), nopython=True)\ndef count_neighbours(in_grid, neighbours):\n \"\"\"Count the number of live neighbours of the site.\"\"\"\n count = np.int32(0)\n for n_count in range(neighbours.shape[0]):\n if in_grid[neighbours[n_count, 0], neighbours[n_count, 1]] == 1:\n count += 1\n return count\n\n\n@jit((int32[:, :], int8[:, :], int8[:, :]), nopython=True)\ndef grid_sweep(neighbours, in_grid, out_grid):\n \"\"\"Sweep the grid once with game of life rules.\"\"\"\n for i in range(in_grid.shape[0]):\n for j in range(in_grid.shape[1]):\n\n update_neighbours(neighbours, i, j, in_grid.shape[0])\n n_count = count_neighbours(in_grid, neighbours)\n\n if in_grid[i, j] == 1:\n\n if n_count < 2:\n out_grid[i, j] = 0\n if n_count in (2, 3):\n out_grid[i, j] = 1\n if n_count > 3:\n out_grid[i, j] = 0\n\n if in_grid[i, j] == 0 and n_count == 3:\n out_grid[i, j] = 1\n\n\n@jit((int32, int32, int32[:, :], int8[:, :], int8[:, :, :]), nopython=True)\ndef game_of_life(n_frames, interval, neighbours, in_grid,\n solutions):\n \"\"\"Simulate the game of life.\"\"\"\n out_grid = np.zeros((in_grid.shape[0], in_grid.shape[1]), dtype=np.int8)\n out_grid[:, :] = in_grid\n solutions[:, :, 0] = out_grid[:, :]\n for sweeps in range(n_frames):\n\n for _ in range(interval):\n\n grid_sweep(neighbours, in_grid, out_grid)\n\n in_grid[:, :] = out_grid[:, :]\n solutions[:, :, sweeps + 1] = out_grid[:, :]\n return()\n\n\ndef make_movie(solutions, file_name, fps):\n \"\"\"Make a movie of the game of life.\"\"\"\n import matplotlib.pyplot as plt\n import matplotlib.animation as manimation\n plt.rc('text', usetex=True)\n plt.rc('font', family='serif')\n ffmpeg_writer = manimation.writers['ffmpeg']\n metadata = dict(title='Game of Life',\n artist='James Denholm',\n comment='Movie support!')\n writer = ffmpeg_writer(fps=fps, metadata=metadata)\n\n fig, axis = plt.subplots(1, figsize=(2, 2))\n fig.subplots_adjust(left=0.05, right=0.95, bottom=0.03, top=0.88)\n\n axis.set_xticks([])\n axis.set_yticks([])\n\n print(\"Movie progress\")\n\n with writer.saving(fig, file_name + '.mp4', dpi=500):\n\n for count in range(solutions.shape[2]):\n\n progress_bar(count + 1, solutions.shape[2], decimals=3)\n heat_map = axis.imshow(solutions[:, :, count], vmin=0, vmax=1,\n cmap=\"YlGnBu\")\n axis.set_title(\"t = %.3e\" % count, fontsize=10)\n\n writer.grab_frame()\n heat_map.remove()\n return()\n\n\ndef progress_bar(iteration, total, prefix=' ', suffix=' ', decimals=1,\n length=50, fill=''):\n \"\"\"Call in a loop to create terminal progress bar.\n @params:\n iteration - Required : current iteration (Int)\n total - Required : total iterations (Int)\n prefix - Optional : prefix string (Str)\n suffix - Optional : suffix string (Str)\n decimals - Optional : positive number of decimals (Int)\n length - Optional : character length of bar (Int)\n fill - Optional : bar fill character (Str)\n \"https://stackoverflow.com/questions/3173320/text-progress-bar-in-the-console\"\n \"\"\"\n percent = (\"{0:.\" + str(decimals) + \"f}\").format(100 * (iteration /\n float(total)))\n filled_length = int(length * iteration // total)\n prog_bar = fill * filled_length + '-' * (length - filled_length)\n print('\\r%s |%s| %s%% %s' % (prefix, prog_bar, percent, suffix), end='\\r')\n the_bar = fill * filled_length + '-' * (length - filled_length)\n print('\\r%s |%s| %s%% %s' % (prefix, the_bar, percent, suffix), end='\\r')\n # Print New Line on Complete\n if iteration == total:\n print()\n return()\n","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":4753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"575528931","text":"\"\"\"\nThis module contains a number of pre-created transformations.\n\"\"\"\n\nimport tigger.cluda.dtypes as dtypes\nfrom tigger.core import *\nfrom tigger import Transformation\n\n\ndef identity():\n \"\"\"\n Returns an identity transformation (1 output, 1 input): ``output1 = input1``.\n \"\"\"\n return Transformation(\n inputs=1, outputs=1,\n code=\"${o1.store}(${i1.load});\")\n\n\ndef scale_param():\n \"\"\"\n Returns a scaling transformation with dynamic parameter (1 output, 1 input, 1 scalar):\n ``output1 = input1 * scalar1``.\n \"\"\"\n return Transformation(\n inputs=1, outputs=1, scalars=1,\n code=\"${o1.store}(${func.mul(i1.dtype, s1.dtype, out=o1.dtype)}(${i1.load}, ${s1}));\")\n\n\ndef scale_const(multiplier):\n \"\"\"\n Returns a scaling transformation with fixed parameter (1 output, 1 input):\n ``output1 = input1 * ``.\n \"\"\"\n dtype = dtypes.detect_type(multiplier)\n return Transformation(\n inputs=1, outputs=1,\n code=\"${o1.store}(${func.mul(i1.dtype, numpy.\" + str(dtype) + \", out=o1.dtype)}(\" +\n \"${i1.load}, \" + dtypes.c_constant(multiplier, dtype=dtype) + \"));\")\n\n\ndef split_complex():\n \"\"\"\n Returns a transformation which splits complex input into two real outputs\n (2 outputs, 1 input): ``output1 = Re(input1), output2 = Im(input1)``.\n \"\"\"\n return Transformation(\n inputs=1, outputs=2,\n derive_i_from_os=lambda o1, o2: dtypes.complex_for(o1),\n code=\"\"\"\n ${o1.store}(${i1.load}.x);\n ${o2.store}(${i1.load}.y);\n \"\"\")\n\n\ndef combine_complex():\n \"\"\"\n Returns a transformation which joins two real inputs into complex output\n (1 output, 2 inputs): ``output = input1 + 1j * input2``.\n \"\"\"\n return Transformation(\n inputs=2, outputs=1,\n derive_o_from_is=lambda i1, i2: dtypes.complex_for(i1),\n code=\"${o1.store}(COMPLEX_CTR(${o1.ctype})(${i1.load}, ${i2.load}));\")\n","sub_path":"tigger/transformations.py","file_name":"transformations.py","file_ext":"py","file_size_in_byte":1943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"624631785","text":"try:\r\n from tkinter import *\r\nexcept ImportError:\r\n from Tkinter import *\r\nfrom math import sqrt\r\nimport random\r\nrandom.seed()\r\nSCREEN_WIDTH = 600\r\nSCREEN_HEIGHT = 800\r\nRADIUS = 64\r\nFPS = int(1000/60)\r\nNORMAL_SPEED = 3000\r\nGRAVITY = 0.15\r\nSPEED_LIMIT = 2\r\nLIFES = 5\r\n\r\nNORMAL_BUTTON_COLOR = '#%02x%02x%02x' % (0, 200, 255)\r\nCLICKED_BUTTON_COLOR = '#%02x%02x%02x' % (0, 150, 255)\r\nNORMAL_BUTTON_COLOR_RED = '#%02x%02x%02x' % (255, 50, 25)\r\nCLICKED_BUTTON_COLOR_RED = '#%02x%02x%02x' % (255, 0, 0)\r\nBALL_COLORS = [['#%02x%02x%02x' % (255, 0, 0)], ['#%02x%02x%02x' % (0, 255, 0)], ['#%02x%02x%02x' % (0, 0, 255)],\r\n ['#%02x%02x%02x' % (255, 255, 0)]]\r\n\r\nwindow = Tk()\r\nwindow.geometry('{}x{}+30+30'.format(SCREEN_WIDTH, SCREEN_HEIGHT))\r\ncanvas = Canvas(window, width=SCREEN_WIDTH, height=SCREEN_HEIGHT)\r\ncanvas.place(x=0, y=0)\r\nactive_widgets = []\r\nballs = []\r\n\r\n\r\nclass Ball():\r\n def __init__(self, clicks):\r\n self.x = random.randint(1, SCREEN_WIDTH-RADIUS*2-1)\r\n self.y = -2*RADIUS\r\n self.color = BALL_COLORS[random.randint(0, len(BALL_COLORS)-1)]\r\n self.y_change = 0\r\n self.clicks = clicks\r\n balls.append(self)\r\n\r\n def draw(self):\r\n canvas.create_oval(self.x, self.y, self.x + 2 * RADIUS, self.y + 2 * RADIUS,\r\n fill=self.color, outline=self.color)\r\n canvas.create_text(self.x+RADIUS, self.y+RADIUS, anchor='c', text=self.clicks, font=('TkDefaultFont', 20))\r\n\r\n\r\ndef new_ball():\r\n Ball(3*(int(time//(FPS*30))+1))\r\n window.after(NORMAL_SPEED, new_ball)\r\n\r\n\r\ndef draw_balls():\r\n global time\r\n canvas.delete(ALL)\r\n for i in balls:\r\n i.draw()\r\n canvas.create_text(1, 1, anchor='nw', text=\"Lifes: {}\".format(LIFES), font=('TkDefaultFont', 16))\r\n time += 1\r\n seconds = int(time / (1000 / FPS))\r\n canvas.create_text(SCREEN_WIDTH/2, 0, anchor='n', text='{}'.format(seconds), font=('TkDefaultFont', 16))\r\n canvas.create_text(1, 20, anchor='nw', text=\"Score: {}\".format(score), font=('TkDefaultFont', 16))\r\n canvas.create_text(1, 40, anchor='nw', text=\"Multiplier: x{}\".format(int(time//(FPS*30))+1), font=('TkDefaultFont', 16))\r\n window.after(FPS, draw_balls)\r\n\r\n\r\ndef gravity():\r\n global LIFES\r\n for i in balls:\r\n if i.y < SPEED_LIMIT:\r\n i.y_change += GRAVITY\r\n i.y += i.y_change\r\n if i.y >= SCREEN_HEIGHT:\r\n LIFES -= 1\r\n balls.pop(balls.index(i))\r\n if LIFES <= 0:\r\n canvas.delete(\"all\")\r\n main()\r\n window.after(FPS, gravity)\r\n\r\n\r\ndef click(event):\r\n global score\r\n for i in balls:\r\n if sqrt((i.x+RADIUS-event.x)**2+(i.y+RADIUS-event.y)**2) <= RADIUS and i.y_change >= 0:\r\n i.y_change *= -1\r\n i.clicks -= 1\r\n score += (int(time//(FPS*30))+1)\r\n if i.clicks <= 0:\r\n balls.pop(balls.index(i))\r\n score += (int(time//(FPS*30))+1)**2\r\n\r\n\r\ndef play(arg):\r\n global time, score\r\n for i in active_widgets:\r\n i.place_forget()\r\n time = 0\r\n score = 0\r\n new_ball()\r\n draw_balls()\r\n gravity()\r\n canvas.bind(\"\", click)\r\n canvas.update_idletasks()\r\n\r\n\r\ndef quiting(arg):\r\n quit()\r\n\r\n\r\ndef main():\r\n global active_widgets, window\r\n for i in active_widgets:\r\n i.place_forget()\r\n\r\n naslov = Label(window, text='Balls', font=('TkDefaultFont', 96))\r\n naslov.place(relx=.5, rely=.15, anchor='c')\r\n\r\n play_button = Button(window, text='Play')\r\n play_button.bind('