diff --git "a/2330.jsonl" "b/2330.jsonl" new file mode 100644--- /dev/null +++ "b/2330.jsonl" @@ -0,0 +1,696 @@ +{"seq_id":"22583833","text":"import sqlite3\n\n# Conexão ao banco de dados;\nconn = sqlite3.connect('pi_cross.db')\n\n# Definindo um cursor, que irá executar as queries.\ncursor = conn.cursor()\n\n#Verificando dados das tabelas\ndef selectTable():\n\n ### Este trecho de código é para testes, deve ser substituído quando definidas as rotas\n tb_type = ['Especialidades', 'Paciente', 'Unidade', 'Agendamento']\n tbl_selected = int(input('''\n Qual tabela deseja selecionar?:\\n\n\n 1 - Especialidades\n 2 - Paciente\n 3 - Unidade\n 4 - Agendamento \n ''')) - 1 \n ###\n\n shown = conn.execute(\"SELECT * FROM \" + tb_type[tbl_selected])\n\n ## A linha abaixo retorna no terminal o resultado das tabelas\n print(shown.fetchall())\n conn.commit()\n conn.close()\n\nselectTable()\n\ndef deleteRow():\n ### Este trecho de código é para testes, deve ser substituído quando definidas as rotas\n tb_type = ['Especialidades', 'Paciente', 'Unidade', 'Agendamento']\n tbl_selected = int(input('''\n Qual tabela deseja excluir?:\\n\n\n 1 - Especialidades\n 2 - Paciente\n 3 - Unidade\n 4 - Agendamento \n ''')) - 1 \n\n if tb_type[tbl_selected] == tb_type[0]:\n entidade = 'id_espec'\n chave = str(input(\"Qual é o id desta especialidade?: \"))\n \n elif tb_type[tbl_selected] == tb_type[1]:\n entidade = 'cpf'\n chave = str(input(\"Qual é o cpf?: \"))\n\n elif tb_type[tbl_selected] == tb_type[2]:\n entidade = 'cnpj'\n chave = str(input(\"Qual é o cnpj?: \"))\n\n elif tb_type[tbl_selected] == tb_type[3]:\n entidade = 'cpf'\n chave = str(input(\"Qual é o cpf do paciente?: \"))\n ###\n\n conn.execute(\"DELETE FROM\" + tb_type[tbl_selected] + \"WHERE\" + entidade + \"=\" + chave)\n\n conn.commit()\n conn.close()\n\n# deleteRow()","sub_path":"cross_db/selectTable.py","file_name":"selectTable.py","file_ext":"py","file_size_in_byte":1815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"167769052","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Jan 7 18:12:41 2016\n\n@author: mints\n\"\"\"\nimport pylab as plt\nimport joblib\nimport numpy as np\nfrom error_ellipse import plot_cov_ellipse\n\nm = joblib.load('test2.mix')\n\nmu = []\npi = []\nsigma = []\nsigma2d = []\nfor icomp, comp in enumerate(m.components):\n dist = comp.distList[0]\n pi.append(m.pi[icomp])\n mu.append(dist.mu)\n sigma.append(np.sqrt(np.diag(dist.sigma)))\n sigma2d.append(dist.sigma)\nmu = np.array(mu)\npi = np.array(pi)\nsigma = np.array(sigma)\nsigma2d = np.array(sigma2d)\n\nlabels = ['Fe', 'Age', 'sin_i']\n\nplt.rcParams.update({'font.size': 12})\n\ndef plot_1d(val, save=False):\n col = 'rgbcmyk'\n extend = 1.\n plt.clf()\n plt.xlabel(labels[val])\n plt.ylabel('Fraction')\n dval = mu[:, val].max() - mu[:, val].min()\n x = np.linspace(mu[:, val].min() - extend*dval,\n mu[:, val].max() + extend*dval)\n if val == 2:\n plt.xlim(0., 1.)\n for i in xrange(len(pi)):\n plt.plot(x, pi[i]*np.exp(-(x-mu[i, val])**2 / (2.*sigma[i, val]**2))/ np.sqrt(2.*np.pi*sigma[i, val]**2), color=col[i])\n if save:\n plt.savefig('%s.png' % labels[val])\n else:\n plt.show()\n\n\ndef plot_2d(val1, val2, save=False):\n col = 'rgbcmyk'\n plt.clf()\n xmin = mu[:, val1] - sigma[:, val1]*2.1\n xmax = mu[:, val1] + sigma[:, val1]*2.1\n ymin = mu[:, val2] - sigma[:, val2]*2.1\n ymax = mu[:, val2] + sigma[:, val2]*2.1\n plt.xlim(xmin.min(), xmax.max())\n plt.ylim(ymin.min(), ymax.max())\n plt.xlabel(labels[val1], fontsize=16)\n plt.ylabel(labels[val2], fontsize=16)\n ppi = (pi - pi.min() + 0.01) / (pi.max() - pi.min() + 0.01)\n for j in xrange(len(pi)):\n i = len(pi) - j - 1\n sig = sigma2d[i][[val1, val2]][:, [val1, val2]]\n plot_cov_ellipse(sig, mu[i][[val1, val2]], facecolor=col[i], alpha=ppi[i])\n if save:\n plt.savefig('%s_%s.png' % (labels[val1], labels[val2]))\n else:\n plt.show()\n\n#plot_2d(0, 1, True)\n#plot_2d(0, 2, True)\n#plot_2d(1, 2, True)\nplot_1d(0, True)\nplot_1d(1, True)\nplot_1d(2, True)\n","sub_path":"mixture_plot1.py","file_name":"mixture_plot1.py","file_ext":"py","file_size_in_byte":2074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"243431387","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build\\bdist.win-amd64\\egg\\tome\\writeText.py\n# Compiled at: 2013-04-26 21:45:24\n\"\"\"\nCopyright 2013 Brian Mearns\n\nThis file is part of Tome.\n\nTome is free software: you can redistribute it and/or modify\nit under the terms of the GNU Affero General Public License as published by\nthe Free Software Foundation, either version 3 of the License, or\n(at your option) any later version.\n\nTome is distributed in the hope that it will be useful,\nbut WITHOUT ANY WARRANTY; without even the implied warranty of\nMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\nGNU Affero General Public License for more details.\n\nYou should have received a copy of the GNU Affero General Public License\nalong with Tome. If not, see .\n\"\"\"\nimport Tome, cStringIO, wrapUtil\n\nclass TextWriter(object):\n\n def __init__(self, linewidth=None):\n self.__linewidth = linewidth\n self.__chapterNotes = []\n self.__noteNumber = 1\n\n def writeBlockSegment(self, ostream, begin, end, segment, prefix='', suffix=''):\n ostream.write(begin)\n notFirst = False\n for par in segment:\n if isinstance(par, Tome.TextSegment) and len(par.text().strip()) == 0:\n continue\n if not isinstance(par, Tome.TaggedSegment) or par.tag() != 'p':\n raise Exception('Block node may only contain \"p\" elements: found \"%s\"' % par.tag())\n if notFirst:\n ostream.write('\\n\\n' + prefix)\n notFirst = True\n for cseg in par:\n self.writeSegment(ostream, cseg)\n\n ostream.write(suffix)\n\n ostream.write(end)\n\n def writeSegment(self, ostream, segment):\n if isinstance(segment, Tome.TaggedSegment):\n tag = segment.tag()\n if tag == 'pre':\n close = ''\n else:\n if tag == 'b':\n ostream.write('*')\n close = '*'\n elif tag == 'i':\n ostream.write('/')\n close = '/'\n elif tag == 'em':\n ostream.write('**')\n close = '**'\n elif tag == 'u':\n ostream.write('_')\n close = '_'\n else:\n if tag == 'ellips':\n ostream.write('...')\n return\n if tag == 'md':\n ostream.write('---')\n return\n if tag == 'nd':\n ostream.write('--')\n return\n if tag == 'sp':\n ostream.write(' ')\n return\n if tag == 'lnbrk':\n ostream.write('\\n')\n return\n if tag in ('grave', 'acute', 'circumflex', 'umlaut', 'tilde', 'cedilla'):\n close = ''\n else:\n if tag == 'q':\n return self.writeBlockSegment(ostream, '\"', '\"', segment, prefix='\"')\n if tag == 'sq':\n return self.writeBlockSegment(ostream, \"'\", \"'\", segment, prefix=\"'\")\n if tag == 'n':\n enStream = cStringIO.StringIO()\n self.writeBlockSegment(enStream, '', '', segment)\n note = enStream.getvalue()\n enStream.close()\n self.__chapterNotes[(-1)].append(note)\n ostream.write('[%d]' % self.__noteNumber)\n self.__noteNumber += 1\n return\n if tag == 'bq':\n return self.writeBlockSegment(ostream, '\\n\\n\"', '\"\\n\\n', segment)\n raise Exception('Unhandled tag: %s' % tag)\n for seg in segment:\n self.writeSegment(ostream, seg)\n\n ostream.write(close)\n elif isinstance(segment, Tome.TextSegment):\n content = segment.text()\n ostream.write(content)\n else:\n raise TypeError('Unexpected type for segment.')\n\n def writeParagraphs(self, ostream, text, linewidth=None, indent='', tag=None, parBreak='\\n\\n'):\n if linewidth is None:\n linewidth = self.__linewidth\n if linewidth is not None:\n linewidth -= len(indent)\n for subpar in text.splitlines():\n if len(subpar) > 0:\n lines = self.wrapText(subpar, linewidth)\n self.writeJustified(ostream, lines, linewidth, indent, tag)\n tag = None\n ostream.write(parBreak)\n\n return\n\n def wrapText(self, text, linewidth=None, remain=None):\n \"\"\"\n Generates an array of lines, each line is an array of words, such that the word fit\n into the specified linewidth.\n\n :param int remain:\n Specifies how many columns remain on the current line. Default is all of them.\n\n \"\"\"\n if linewidth is None:\n linewidth = self.__linewidth\n return wrapUtil.wrapText(text, linewidth, remain)\n\n def writeCenteredLine(self, ostream, line, linewidth=None, width=None):\n if linewidth is None:\n linewidth = self.__linewidth\n if linewidth is None:\n ostream.write(line + '\\n')\n return\n else:\n if width is None:\n width = linewidth\n lines = self.wrapText(line, width)\n for line in lines:\n line = (' ').join(line)\n length = len(line)\n diff = linewidth - length\n lpadd = (diff + 1) / 2\n ostream.write(' ' * lpadd + line + '\\n')\n\n return\n\n def writeHr(self, ostream, width, linewidth=None, char='-'):\n hr = char * width\n if linewidth is None:\n linewidth = self.__linewidth\n if linewidth is None:\n ostream.write(hr + '\\n')\n return\n else:\n diff = linewidth - width\n lpadd = (diff + 1) / 2\n ostream.write(' ' * lpadd + hr + '\\n')\n return\n\n def writeJustified(self, ostream, lines, linewidth=None, indent='', tag=None):\n \"\"\"\n Lines should be a list of lines, each line is a list of words.\n \"\"\"\n if linewidth is None:\n linewidth = self.__linewidth\n if linewidth is None:\n ostream.write(('\\n').join(indent + (' ').join(line) for line in lines))\n return\n else:\n prefix = indent\n if tag is not None:\n prefix = tag\n for line in lines[:-1]:\n required = len(('').join(line))\n leftOver = linewidth - required\n spCount = len(line) - 1\n if spCount == 0:\n ostream.write(indent + line[0])\n continue\n spacesPer = int(float(leftOver) / float(spCount))\n padd = ' ' * spacesPer\n leftOver = leftOver - spacesPer * spCount\n spaces = [\n spacesPer] * spCount\n while leftOver > 0:\n longestLength = None\n longestLevel = None\n longestI = None\n for i in xrange(spCount):\n length = len(line[i])\n if longestLength is None:\n longestLength = length\n longestLevel = spaces[i]\n longestI = i\n elif spaces[i] < longestLevel:\n longestLength = length\n longestLevel = spaces[i]\n longestI = i\n elif spaces[i] == longestLevel and length > longestLength:\n longestLength = length\n longestLevel = spaces[i]\n longestI = i\n\n spaces[longestI] += 1\n leftOver -= 1\n\n ostream.write(prefix)\n for i in xrange(spCount):\n ostream.write(line[i] + ' ' * spaces[i])\n if leftOver > 0:\n ostream.write(' ')\n leftOver -= 1\n\n ostream.write(line[(-1)] + '\\n')\n prefix = indent\n\n if len(lines) > 0:\n ostream.write(prefix + (' ').join(lines[(-1)]))\n return\n\n def writeText(self, tome, ostream):\n ostream.write('\\n\\n\\n')\n titleWidth = int(0.7 * float(self.__linewidth))\n if titleWidth < 20:\n titleWidth = self.__linewidth\n lmTitles = tome.allTitles()\n if len(lmTitles) > 0:\n for title in tome.allTitles():\n self.writeCenteredLine(ostream, title, self.__linewidth, titleWidth)\n self.writeHr(ostream, 3, self.__linewidth)\n\n ostream.write('\\n')\n lmAuthors = tome.authors()\n if len(lmAuthors) > 0:\n for author in lmAuthors:\n self.writeCenteredLine(ostream, author, self.__linewidth)\n\n ostream.write('\\n')\n if len(lmTitles) > 0 or len(lmAuthors) > 0:\n ostream.write('\\n\\n\\n')\n chNum = 0\n for part in tome:\n for book in part:\n for chapter in book:\n chNum += 1\n self.__chapterNotes.append([])\n chFirstNoteNum = self.__noteNumber\n self.writeCenteredLine(ostream, 'Chapter %d' % chNum, self.__linewidth, titleWidth)\n chAllTitles = chapter.allTitles()\n if len(chAllTitles) > 0:\n for title in chAllTitles:\n self.writeCenteredLine(ostream, title, self.__linewidth, titleWidth)\n\n else:\n ostream.write('\\n')\n self.writeHr(ostream, 9)\n ostream.write('\\n')\n scCount = len(chapter)\n lastScene = scCount - 1\n for i in xrange(scCount):\n scene = chapter[i]\n parCount = len(scene)\n for j in xrange(parCount):\n paragraph = scene[j]\n tag = paragraph.tag()\n if tag not in ('p', 'pre'):\n raise Exception('Invalid toplevel element in scene: %s' % tag)\n preformatted = tag == 'pre'\n parStream = cStringIO.StringIO()\n for k in xrange(len(paragraph)):\n self.writeSegment(parStream, paragraph[k])\n\n text = parStream.getvalue()\n parStream.close()\n if preformatted:\n ostream.write(text)\n else:\n self.writeParagraphs(ostream, text)\n\n if i < lastScene:\n self.writeHr(ostream, 3, char='*')\n ostream.write('\\n')\n\n ostream.write('\\n')\n if len(self.__chapterNotes[(-1)]) > 0:\n notesTitle = 'Notes for Chapter %d' % chNum\n ostream.write(' %s\\n' % notesTitle)\n ostream.write(' ' + '-' * len(notesTitle) + '\\n')\n self.writeChapterNotes(ostream, self.__chapterNotes[(-1)], chFirstNoteNum)\n ostream.write('\\n')\n\n ostream.write('\\n')\n if self.__noteNumber > 1:\n self.writeCenteredLine(ostream, 'All Chapter Notes', self.__linewidth, titleWidth)\n ostream.write('\\n')\n noteNumber = 1\n chNum = 0\n for notes in self.__chapterNotes:\n chNum += 1\n if len(notes) > 0:\n notesTitle = 'Notes for Chapter %d' % chNum\n ostream.write(' %s\\n' % notesTitle)\n ostream.write(' ' + '-' * len(notesTitle) + '\\n')\n self.writeChapterNotes(ostream, notes, noteNumber)\n noteNumber += len(notes)\n\n def writeChapterNotes(self, ostream, notes, firstNoteNum):\n noteNumber = firstNoteNum\n for note in notes:\n bullet = ' [%d] ' % noteNumber\n lead = len(bullet)\n padd = ' ' * lead\n self.writeParagraphs(ostream, note, self.__linewidth - 4, padd, bullet)\n ostream.write('\\n')\n noteNumber += 1\n\n ostream.write('\\n')\n\n\nif __name__ == '__main__':\n import sys\n parser = Tome.TomeOtlParser(sys.stdin, filename='', debug=True)\n tome = parser.tome()\n writer = TextWriter(78)\n writer.writeText(tome, sys.stdout)","sub_path":"pycfiles/tome-1.5.0.0_r12-py2.7/writeText.py","file_name":"writeText.py","file_ext":"py","file_size_in_byte":13382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"258591237","text":"# -*- coding: utf-8 -*-\n\"\"\"\n.. module:: MyCapytain.resources.xml\n :synopsis: XML based PrototypeText and repository\n\n.. moduleauthor:: Thibault Clérice \n\n\n\"\"\"\nfrom __future__ import unicode_literals\n\nfrom MyCapytain.resources.prototypes import text\nfrom MyCapytain.resources.prototypes.cts import inventory as cts\nfrom MyCapytain.common.reference import Citation as CitationPrototype\nfrom MyCapytain.common.utils import xmlparser\nfrom MyCapytain.common.constants import NS, Mimetypes\n\n\nclass Citation(CitationPrototype):\n \"\"\" Citation XML implementation for PrototypeTextInventory\n\n \"\"\"\n\n @staticmethod\n def ingest(resource, element=None, xpath=\"ti:citation\"):\n \"\"\" Ingest xml to create a citation\n\n :param resource: XML on which to do xpath\n :param element: Element where the citation should be stored\n :param xpath: XPath to use to retrieve citation\n\n :return: Citation\n \"\"\"\n # Reuse of of find citation\n results = resource.xpath(xpath, namespaces=NS)\n if len(results) > 0:\n citation = Citation(\n name=results[0].get(\"label\"),\n xpath=results[0].get(\"xpath\"),\n scope=results[0].get(\"scope\")\n )\n\n if isinstance(element, Citation):\n element.child = citation\n Citation.ingest(\n resource=results[0],\n element=element.child\n )\n else:\n element = citation\n Citation.ingest(\n resource=results[0],\n element=element\n )\n\n return citation\n\n return None\n\n\ndef xpathDict(xml, xpath, cls, parent, **kwargs):\n \"\"\" Returns a default Dict given certain information\n\n :param xml: An xml tree\n :type xml: etree\n :param xpath: XPath to find children\n :type xpath: str\n :param cls: Class identifying children\n :type cls: inventory.Resource\n :param parent: Parent of object\n :type parent: CTSCollection\n :rtype: collections.defaultdict.\n :returns: Dictionary of children\n \"\"\"\n for child in xml.xpath(xpath, namespaces=NS):\n cls.parse(\n resource=child,\n parent=parent,\n **kwargs\n )\n\n\nclass Text(cts.PrototypeText):\n \"\"\" Represents a CTS PrototypeText\n\n \"\"\"\n DEFAULT_EXPORT = Mimetypes.PYTHON.ETREE\n\n @staticmethod\n def __findCitations(obj, xml, xpath=\"ti:citation\"):\n \"\"\" Find citation in current xml. Used as a loop for xmlparser()\n\n :param xml: Xml resource to be parsed\n :param xpath: Xpath to use to retrieve the xml node\n \"\"\"\n\n @staticmethod\n def parse_metadata(obj, xml):\n \"\"\" Parse a resource to feed the object\n\n :param obj: Obj to set metadata of\n :type obj: Text\n :param xml: An xml representation object\n :type xml: lxml.etree._Element\n \"\"\"\n\n for child in xml.xpath(\"ti:description\", namespaces=NS):\n lg = child.get(\"{http://www.w3.org/XML/1998/namespace}lang\")\n if lg is not None:\n obj.set_cts_property(\"description\", child.text, lg)\n\n for child in xml.xpath(\"ti:label\", namespaces=NS):\n lg = child.get(\"{http://www.w3.org/XML/1998/namespace}lang\")\n if lg is not None:\n obj.set_cts_property(\"label\", child.text, lg)\n\n obj.citation = Citation.ingest(xml, obj.citation, \"ti:online/ti:citationMapping/ti:citation\")\n\n \"\"\"\n online = xml.xpath(\"ti:online\", namespaces=NS)\n if len(online) > 0:\n online = online[0]\n obj.docname = online.get(\"docname\")\n for validate in online.xpath(\"ti:validate\", namespaces=NS):\n obj.validate = validate.get(\"schema\")\n for namespaceMapping in online.xpath(\"ti:namespaceMapping\", namespaces=NS):\n obj.metadata[\"namespaceMapping\"][namespaceMapping.get(\"abbreviation\")] = namespaceMapping.get(\"nsURI\")\n \"\"\"\n\n\nclass Edition(cts.PrototypeEdition, Text):\n \"\"\" Create an edition subtyped PrototypeText object\n \"\"\"\n @staticmethod\n def parse(resource, parent=None):\n xml = xmlparser(resource)\n o = Edition(urn=xml.get(\"urn\"), parent=parent)\n Edition.parse_metadata(o, xml)\n\n return o\n\n\nclass Translation(cts.PrototypeTranslation, Text):\n \"\"\" Create a translation subtyped PrototypeText object\n \"\"\"\n @staticmethod\n def parse(resource, parent=None):\n xml = xmlparser(resource)\n lang = xml.get(\"{http://www.w3.org/XML/1998/namespace}lang\")\n\n o = Translation(urn=xml.get(\"urn\"), parent=parent)\n if lang is not None:\n o.lang = lang\n Translation.parse_metadata(o, xml)\n return o\n\n\nclass Work(cts.PrototypeWork):\n \"\"\" Represents a CTS Textgroup in XML\n \"\"\"\n\n @staticmethod\n def parse(resource, parent=None):\n \"\"\" Parse a resource\n\n :param resource: Element rerpresenting a work\n :param type: basestring, etree._Element\n :param parent: Parent of the object\n :type parent: TextGroup\n \"\"\"\n xml = xmlparser(resource)\n o = Work(urn=xml.get(\"urn\"), parent=parent)\n\n lang = xml.get(\"{http://www.w3.org/XML/1998/namespace}lang\")\n if lang is not None:\n o.lang = lang\n\n for child in xml.xpath(\"ti:title\", namespaces=NS):\n lg = child.get(\"{http://www.w3.org/XML/1998/namespace}lang\")\n if lg is not None:\n o.set_cts_property(\"title\", child.text, lg)\n\n # Parse children\n xpathDict(xml=xml, xpath='ti:edition', cls=Edition, parent=o)\n xpathDict(xml=xml, xpath='ti:translation', cls=Translation, parent=o)\n\n return o\n\n\nclass TextGroup(cts.PrototypeTextGroup):\n \"\"\" Represents a CTS Textgroup in XML\n \"\"\"\n\n @staticmethod\n def parse(resource, parent=None):\n \"\"\" Parse a textgroup resource\n\n :param resource: Element representing the textgroup\n :param parent: Parent of the textgroup\n \"\"\"\n xml = xmlparser(resource)\n o = TextGroup(urn=xml.get(\"urn\"), parent=parent)\n\n for child in xml.xpath(\"ti:groupname\", namespaces=NS):\n lg = child.get(\"{http://www.w3.org/XML/1998/namespace}lang\")\n if lg is not None:\n o.set_cts_property(\"groupname\", child.text, lg)\n\n # Parse Works\n xpathDict(xml=xml, xpath='ti:work', cls=Work, parent=o)\n return o\n\n\nclass TextInventory(cts.PrototypeTextInventory):\n \"\"\" Represents a CTS Inventory file\n \"\"\"\n\n @staticmethod\n def parse(resource):\n \"\"\" Parse a resource \n\n :param resource: Element representing the text inventory\n :param type: basestring, etree._Element\n \"\"\"\n xml = xmlparser(resource)\n o = TextInventory(name=xml.xpath(\"//ti:TextInventory\", namespaces=NS)[0].get(\"tiid\") or \"\")\n # Parse textgroups\n xpathDict(xml=xml, xpath='//ti:textgroup', cls=TextGroup, parent=o)\n return o\n","sub_path":"MyCapytain/resources/collections/cts.py","file_name":"cts.py","file_ext":"py","file_size_in_byte":7145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"136126074","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\nfrom django.utils import timezone\nimport datetime\nfrom encrypted_fields import fields\nfrom django_encrypted_filefield.fields import EncryptedFileField, EncryptedImageField\n\n\n\n\n# Create your models here.\nclass User(AbstractUser):\n joined_on = models.DateField(default = datetime.date.today)\n avatar = models.ImageField(upload_to='images', default=\"/images/user.svg\", max_length=300)\n birthday = fields.EncryptedDateField(blank=True, null=True)\n\n def __str__(self):\n return self.username\n\nclass Circle(models.Model):\n name = fields.EncryptedCharField(max_length = 100)\n admin = models.ForeignKey(to=\"User\", related_name=\"is_admin\", on_delete=models.CASCADE)\n members = models.ManyToManyField(to=\"User\", related_name=\"mates\", blank=True)\n pending_members = models.ManyToManyField(to=\"User\", related_name=\"pending\", blank=True)\n created_at = fields.EncryptedDateTimeField(default = timezone.now)\n\n\n def __str__(self):\n return self.name\n\nclass Content(models.Model):\n member = models.ForeignKey(to=\"User\", related_name=\"poster\", on_delete=models.CASCADE)\n author = models.CharField(max_length=255, blank=True, null=True)\n circle = models.ForeignKey(to=\"Circle\", related_name=\"posts\", on_delete=models.CASCADE)\n text_post = fields.EncryptedTextField(blank=True, null=True)\n img_post = models.ImageField(upload_to='images', blank=True, null=True)\n caption = fields.EncryptedCharField(max_length=200, blank=True, null=True)\n likes = models.ManyToManyField(to=\"User\", related_name=\"content_liker\", blank=True)\n created_at = fields.EncryptedDateTimeField(default = timezone.now)\n updated_at = fields.EncryptedDateTimeField(default=timezone.now)\n tags = models.ForeignKey(to=\"User\", related_name=\"tagged\", on_delete=models.CASCADE, blank=True, null=True)\n\n def __str__(self):\n return self.text_post\n\nclass Comments(models.Model):\n author = models.ForeignKey(to=\"User\", related_name=\"comments\", on_delete=models.CASCADE)\n comment = fields.EncryptedTextField()\n likes = models.ManyToManyField(to=\"User\", related_name=\"comment_liker\", blank=True)\n content = models.ForeignKey(to=\"Content\", related_name=\"commented_stuff\", on_delete=models.CASCADE)\n created_at = fields.EncryptedDateTimeField(default = timezone.now)\n updated_at = fields.EncryptedDateTimeField(default=timezone.now)\n\n def __str__(self):\n return self.comment\n","sub_path":"stay_close/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"502868633","text":"#!/usr/bin/env python3\nimport random\n\n# txt file to be used\ntext_in = 'sherlock_full.txt'\n\nwith open(text_in) as f:\n msg = f.read()\n\n# manipulate text file to be more trigram friendly\nstring = msg.lower()\nstring = string.replace(\"\\n\", \" \")\nstring = string.replace(\"--\", \" \")\nstring = string.replace(\"-\", \" \")\nspl_string = string.split(\" \")\n\n# init vals\ni = 0\nj = 0\nstr_dict = {}\n\nfor i in range(0, len(spl_string)-2):\n key = spl_string[i] + \" \" + spl_string[i+1]\n str_dict.setdefault(key, []).append(spl_string[i+2])\n\n\ndef rand_start():\n rand_key = random.choice(list(str_dict.keys()))\n return rand_key\n\n\ndef get_next(x, key):\n out = \"\"\n for i in range(x):\n if key in str_dict:\n next_word_list = str_dict[key]\n # select random index from next word list\n rand_index = random.randint(0, len(next_word_list)-1)\n next_word_rand = next_word_list[rand_index]\n split_key = key.split(\" \")\n key = split_key[1] + \" \" + next_word_rand\n out = out + \" \" + next_word_rand\n else:\n key = rand_start()\n return out\n\n\ndef trigrams(x):\n start = rand_start()\n print(get_next(x, start))\n\n\nif __name__ == \"__main__\":\n print(\"you are in trigram main\")\n print(\"you are currently using text from {}\".format(text_in))\n while_asking_for_size = 1\n while while_asking_for_size == 1:\n sel = int(input(\"what size trigram do you want? ('0' to quit) \"))\n if sel == 0:\n while_asking_for_size = 0\n else:\n trigrams(sel)\n","sub_path":"students/rgpag/lesson04/trigrams.py","file_name":"trigrams.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"518644043","text":"import math\n\nfrom ...maths import *\nfrom ...core import globalSystem\nfrom ..core import *\nfrom .core import PathElement\nfrom .common import *\n\nclass ArclikePathElement(AcceleratableElement):\n\n\t\"\"\"\n\tBase class for arc-like PathElements.\n\t\"\"\"\n\n\tdef initialize(self, **config):\n\t\t# Parse the config and unpack it.\n\t\tinitial_angle, final_angle, duration, speed, repeats = getArcConfig(config)\n\n\t\tself.current_angle = initial_angle\n\t\tself.initial_angle = initial_angle\n\t\tself.final_angle = final_angle\n\n\t\tself.duration = duration\n\t\tself.speed = speed\n\n\t\tself.repeats = repeats\n\t\t# The current iteration number increases up to the given number\n\t\t# of repeats.\n\t\tself._current_iteration = 0\n\n\t\tself._transition_time = 0\n\t\tself._transition_amount = 0\n\n\t# We have to override setSpeed and transitionToSpeed because we have\n\t# to divide the speed by 100 to normalize it. For more information,\n\t# see the second comment in getArcConfig.\n\n\tdef setSpeed(self, speed):\n\t\t\"\"\"Set a new speed.\"\"\"\n\t\tself._timeBasedError()\n\n\t\tself.speed = speed/100\n\t\tself._transition_time = 0\n\n\tdef transitionToSpeed(self, new_speed, time):\n\t\t\"\"\"Smoothly transition to a new speed over a given period of time.\"\"\"\n\t\tself._timeBasedError()\n\n\t\tself._transition_amount = (\n\t\t\tnew_speed/100 - self.speed) / time * globalSystem._timestep\n\t\tself._transition_time = time\n\n\tdef _checkDone(self):\n\t\tif self.duration is not None:\n\t\t\t# If we've completed one iteration.\n\t\t\tif self.current_angle >= self.final_angle or self.current_angle <= self.initial_angle:\n\t\t\t\tself._current_iteration += 1\n\t\t\t\tself.speed *= -1\n\n\t\t\t# If we've completed all iterations.\n\t\t\tif self._current_iteration == self.repeats:\n\t\t\t\tself.done = True\n\nclass ArcPathElement(ArclikePathElement):\n\n\t\"\"\"\n\tA PathElement that represents motion in an arc (circle sector).\n\t\"\"\"\n\n\tdef initialize(self, **config):\n\t\tsuper().initialize(**config)\n\t\t\n\t\tradius = config[\"radius\"]\n\t\tself.radius = radius\n\n\t\tself.pivot = parametricCircle(radius, self.initial_angle)\n\n\tdef updateDisplacement(self):\n\t\t\"\"\"\n\t\tUpdate this PathElement's displacement.\n\t\t\"\"\"\n\t\tself.displacement = parametricCircle(self.radius, self.current_angle) - self.pivot\n\t\tself.current_angle += self.speed\n\n\t\t# Transition the speed (if necessary)\n\t\tself._transition()\n\n\t\tself._checkDone()\n\n\nclass EpitrochoidPathElement(ArclikePathElement):\n\n\t\"\"\"\n\tA PathElement that represents motion in an epitrochoid sector.\n\t\"\"\"\n\n\tdef initialize(self, **config):\n\t\tsuper().initialize(**config)\n\n\t\tself._R = config[\"innerRadius\"]\n\t\tself._r = config[\"outerRadius\"]\n\t\tself._d = config[\"armRadius\"]\n\n\t\tself.pivot = parametricEpitrochoid(self._R, self._r, self._d, self.initial_angle)\n\n\t\t# This part normalizes the speed relative to the periodicity of the\n\t\t# epitrochoid. An epitrochoid is periodic in 2*ri*pi, where ri is the\n\t\t# numerator of r as an integer ratio.\n\t\tduration = self.duration\n\t\tif duration is not None:\n\t\t\tarclength = self.final_angle - self.initial_angle\n\t\t\tperiodicity = float(self._r).as_integer_ratio()[0]\n\t\t\tself.speed = arclength / duration * periodicity * globalSystem._timestep\n\n\n\tdef updateDisplacement(self):\n\t\t\"\"\"\n\t\tUpdate this PathElement's displacement.\n\t\t\"\"\"\n\t\tself.displacement = parametricEpitrochoid(\n\t\t\tself._R, self._r, self._d, self.current_angle) - self.pivot\n\t\tself.current_angle += self.speed\n\n\t\t# Transition the speed (if necessary)\n\t\tself._transition()\n\n\t\tself._checkDone()\n\n\nclass LimaconPathElement(EpitrochoidPathElement):\n\n\t\"\"\"\n\tA PathElement that represents motion in a Limaçon sector.\n\t\"\"\"\n\n\tdef initialize(self, **config):\n\t\t# A Limaçon is just an epitrochoid with outer radius = inner radius.\n\t\tconfig[\"outerRadius\"] = config[\"radius\"]\n\t\tconfig[\"innerRadius\"] = config[\"radius\"]\n\t\tsuper().initialize(**config)\n\n\nclass EpicycloidPathElement(EpitrochoidPathElement):\n\n\t\"\"\"\n\tA PathElement that represents motion in an epicycloid sector.\n\t\"\"\"\n\n\tdef initialize(self, **config):\n\t\t# An epicycloid is just an epitrochoid with arm length = outer radius.\n\t\tconfig[\"armRadius\"] = config[\"outerRadius\"]\n\t\tsuper().initialize(**config)\n\n\nclass HypotrochoidPathElement(ArclikePathElement):\n\n\t\"\"\"\n\tA PathElement that represents motion in a hypotrochoid sector.\n\t\"\"\"\n\n\tdef initialize(self, **config):\n\t\tsuper().initialize(**config)\n\n\t\tself._R = config[\"outerRadius\"]\n\t\tself._r = config[\"innerRadius\"]\n\t\tself._d = config[\"armRadius\"]\n\n\t\tself.pivot = parametricHypotrochoid(self._R, self._r, self._d, self.initial_angle)\n\n\t\t# This part normalizes the speed relative to the periodicity of the\n\t\t# hypotrochoid. A hypotrochoid is periodic in 2*ri*pi, where ri is the\n\t\t# numerator of r as an integer ratio.\n\t\tduration = self.duration\n\t\tif duration is not None:\n\t\t\tarclength = self.final_angle - self.initial_angle\n\t\t\tperiodicity = float(self._r).as_integer_ratio()[0]\n\t\t\tself.speed = arclength / duration * periodicity * globalSystem._timestep\n\n\tdef updateDisplacement(self):\n\t\t\"\"\"\n\t\tUpdate this PathElement's displacement.\n\t\t\"\"\"\n\t\tself.displacement = parametricHypotrochoid(\n\t\t\tself._R, self._r, self._d, self.current_angle) - self.pivot\n\t\tself.current_angle += self.speed\n\n\t\t# Transition the speed (if necessary)\n\t\tself._transition()\n\n\t\tself._checkDone()\n\n\nclass HypocycloidPathElement(HypotrochoidPathElement):\n\n\t\"\"\"\n\tA PathElement that represents motion in a hypocycloid sector.\n\t\"\"\"\n\n\tdef initialize(self, **config):\n\t\t# A hypocycloid is just a hypotrochoid with arm length = inner radius.\n\t\tconfig[\"armRadius\"] = config[\"innerRadius\"]\n\t\tsuper().initialize(**config)\n\n\nclass RosePathElement(ArclikePathElement):\n\n\t\"\"\"\n\tA PathElement that represents motion in a rose curve.\n\t\"\"\"\n\n\tdef initialize(self, **config):\n\t\tsuper().initialize(**config)\n\t\tself.radius = config[\"radius\"]\n\t\tself.petals = config[\"petals\"]\n\n\t\tself.pivot = parametricRose(self.radius, self.petals, self.initial_angle)\n\n\tdef updateDisplacement(self):\n\t\t\"\"\"\n\t\tUpdate this PathElement's displacement.\n\t\t\"\"\"\n\t\tself.displacement = parametricRose(\n\t\t\tself.radius, self.petals, self.current_angle) - self.pivot\n\t\tself.current_angle += self.speed\n\n\t\tself._transition()\n\n\t\tself._checkDone()\n\n\nclass GearPathElement(ArclikePathElement):\n\n\t\"\"\"\n\tA PathElement that represents motion in a gear curve.\n\t\"\"\"\n\n\tdef initialize(self, **config):\n\t\tsuper().initialize(**config)\n\t\tself.radius = config[\"radius\"]\n\t\tself.gear_teeth = config[\"gearTeath\"]\n\t\tself.gear_offset = config.get(\"gearOffset\", 10)\n\n\t\tself.pivot = parametricGear(\n\t\t\tself.radius, self.gear_teeth, self.gear_offset, self.initial_angle)\n\n\tdef updateDisplacement(self):\n\t\t\"\"\"\n\t\tUpdate this PathElement's displacement.\n\t\t\"\"\"\n\t\tself.displacement = parametricGear(\n\t\t\tself.radius, self.gear_teeth, self.gear_offset, self.current_angle) - self.pivot\n\t\tself.current_angle += self.speed\n\n\t\tself._transition()\n\n\t\tself._checkDone()","sub_path":"dml/components/paths/arc.py","file_name":"arc.py","file_ext":"py","file_size_in_byte":6736,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"350691235","text":"from math import ceil\nimport matplotlib.pyplot as plt\nimport math\nimport numpy as np\nimport networkx as nx\nfrom numpy.random import default_rng\n\nlabel_def = {\n -1: 'All Classes',\n 0: 'speed limit 20',\n 1: 'speed limit 30',\n 2: 'speed limit 50',\n 3: 'speed limit 60',\n 4: 'speed limit 70',\n 5: 'left turn',\n 6: 'right turn',\n 7: 'beware pedestrian crossing',\n 8: 'beware children',\n 9: 'beware cycle route ahead'\n}\n\ndef class_to_colour(i):\n classes = {\n 0: 'tab:blue',\n 1: 'tab:orange',\n 2: 'tab:green',\n 3: 'tab:red',\n 4: 'tab:purple',\n 5: 'tab:brown',\n 6: 'tab:pink',\n 7: 'tab:gray',\n 8: 'tab:olive',\n 9: 'tab:cyan'\n }\n return classes.get(i)\n\ng_labels = [label_def.get(i) for i in range(-1, 10)]\n\ndef show_models(models_list, nrows=2, ncols=6, hide_last=True):\n \"\"\"Plot the models edges\n\n :param models_list: model\n :type models_list: list\n :param nrows: Number of rows, defaults to 2\n :type nrows: int, optional\n :param ncols: Number of columns, defaults to 6\n :type ncols: int, optional\n :param hide_last: set visibility of last to false, defaults to True\n :type hide_last: bool, optional\n \"\"\"\n fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(24, 5))\n ax = axes.ravel()\n for i in range(len(models_list)):\n ax[i].axis('off')\n ax[i].set_title(label_def.get(i-1))\n nx.draw(models_list[i][0], with_labels=True, ax=ax[i])\n\n if hide_last:\n ax[-1].set_visible(False)\n fig.tight_layout()\n plt.show()\n\ndef plot_line_graph(data_to_plot, x_label_text='Pixel by rank', y_label_text='Accuracy(%)', title_text='Class by class pixel selection accuracy'):\n \"\"\"Generate n images from the dataset \n\n :param data_to_plot: A list containing the np arrays to plot\n :type data_to_plot: list[np.array]\n :param number_of_lines_to_plot: The number of lines to plot\n :type number_of_lines_to_plot: int, optional\n :param x_label_text: The label of the x axis \n :type x_label_text: string, optional\n :param y_label_text: The label of the y axis\n :type y_label_text: string, optional\n :param title_text: The title of the graph\n :type title_text: string, optional\n \"\"\"\n\n number_of_lines_to_plot = data_to_plot.shape[1]\n\n line_holder = []\n\n # The amount of points to plot on the x-axis\n number_of_points = data_to_plot[:,0].shape[0] + 1 \n\n # Creat the x axis\n x_axis = [i for i in range(1, number_of_points)]\n\n #add our lines to a linder holder list\n for i in range(data_to_plot.shape[1]):\n line_holder.append(data_to_plot[:,i])\n\n # Sets the size of the chart\n fig, ax = plt.subplots(figsize=(15, 15))\n\n # add lines to chart\n # add labels\n for i in range(number_of_lines_to_plot):\n ax.plot(x_axis,line_holder[i],label=label_def[i-1])\n\n # add legends to the graph\n ax.legend()\n\n ax.set(xlabel=x_label_text, ylabel=y_label_text, title=title_text)\n ax.grid()\n plt.show()\n\ndef plot_images(data, n=5, rows=1, cols=10, figsize=(15, 8), shuffle=True):\n \"\"\"Generate n images from the dataset \n\n :param data: matrix of square images\n :type data: numpy.array\n :param n: number of images to print, defaults to 5\n :type n: int, optional\n \"\"\"\n\n fig, axes = plt.subplots(nrows=rows, ncols=cols, figsize=figsize)\n ax = axes.ravel()\n\n if shuffle:\n rng = default_rng()\n rng.shuffle(data)\n\n d = int(round(math.sqrt(data.shape[1]), 0))\n if n > data.shape[0]:\n n = data.shape[0]\n\n for i in range(n):\n row = data[i]\n image = row.reshape(d, d)\n ax[i].imshow(image, cmap='gray')\n\n for i in range(len(ax)):\n ax[i].axis('off')\n ax[i].set_adjustable('box')\n\n plt.subplots_adjust(top = 1, bottom = 0, right = 1, left = 0, \n hspace = 0, wspace = 0)\n plt.margins(0,0)\n fig.tight_layout()\n plt.show()\n\n\ndef unzip_scores(score):\n \"\"\"unzip scores into 2 seperate lists\n\n :param score: list of score tuples\n :type score: list(float, float)\n :return: 2 numpy arrays\n :rtype: (np.array, np.array)\n \"\"\"\n return np.array([i for i, j in score]), np.array([j for i, j in score])\n\ndef convert_percentage(score_tuple):\n \"\"\"multiply the given numpy arrays in the tuple by 100\n\n :param score_tuple: np.array of scores within tuple\n :type score_tuple: (np.array, np.array)\n :return: scores * 100\n :rtype: (np.array, np.array)\n \"\"\"\n return score_tuple[0] * 100, score_tuple[1] * 100\n\n\ndef plot_scores(scores, group_labels, title='Scores for each classifier', bar_width=0.15, labels=g_labels, figure_size=(14, 8), y_label='% Accuracy'):\n \"\"\"Plot the contents of scores as a bar graph\n\n :param scores: A list containing lists of values to plot\n :type scores: list(list(number))\n :param group_labels: A list of labels for each sub-element in scores\n :type group_labels: list(string)\n :param title: Set the title of the figure, defaults to 'Scores for each classifier'\n :type title: str, optional\n :param bar_width: define the width of the bars, defaults to 0.15\n :type bar_width: float, optional\n :param labels: Specify the x-axis labels, defaults to g_labels\n :type labels: list(string), optional\n :param figure_size: size of the figure, defaults to (14, 8)\n :type figure_size: tuple, optional\n \"\"\"\n num_groups = len(group_labels)\n x = np.arange(len(labels))\n \n fig, ax = plt.subplots(figsize=figure_size)\n score_plots = []\n for i in range(num_groups):\n bar = ax.bar(x + (bar_width * i), scores[i], bar_width, label=group_labels[i])\n score_plots.append(bar)\n\n ax.set_title(title)\n ax.set_ylabel(y_label)\n ax.set_xticks(x + (bar_width * math.floor((num_groups / 2))))\n ax.set_xticklabels(labels, rotation=45)\n ax.legend()\n\n # https://matplotlib.org/3.1.1/gallery/lines_bars_and_markers/barchart.html#sphx-glr-gallery-lines-bars-and-markers-barchart-py\n def autolabel(bar_group):\n \"\"\"Attach a text label above each bar in *bar_group*, displaying its height.\"\"\"\n for bar in bar_group:\n height = bar.get_height()\n ax.annotate('{}'.format(height),\n xy=(bar.get_x() + bar.get_width() / 2, height),\n xytext=(0, 3), # 3 points vertical offset\n textcoords=\"offset points\",\n ha='center', va='bottom')\n\n for i in range(num_groups):\n autolabel(score_plots[i])\n\n fig.tight_layout()\n plt.show()\n\n\ndef scatter_clusters_by_class(kmeans, data_labelled, size=(10, 5), alpha=0.6, legend=(0, 10)):\n \"\"\"Produce scatter graph for clustering with classes as legend\n\n :param kmeans: the predicted kmeans labels\n :type kmeans: np.array\n :param data_labelled: data with class labels\n :type data_labelled: pandas.DataFrame\n :param size: size of the figure to plot, defaults to (10, 5)\n :type size: tuple, optional\n :param alpha: the alpha for the plots on the scatter graph, defaults to 0.6\n :type alpha: float, optional\n \"\"\"\n\n fig, ax = plt.subplots(figsize=size)\n num_clusters = np.unique(kmeans)\n for i in num_clusters:\n local_cluster = data_labelled[kmeans == i]\n u_labels = np.unique(local_cluster[['y']].to_numpy().flatten())\n for j in u_labels:\n x_ax = local_cluster[local_cluster['y'] == j][0]\n y_ax = local_cluster[local_cluster['y'] == j][1]\n ax.scatter(x_ax, y_ax, color=class_to_colour(\n j), label=j, alpha=alpha)\n ax.legend(range(legend[0], legend[1]))\n ax.set_title('Class true labels')\n fig.tight_layout()\n plt.show()\n\n\ndef scatter_clusters(kmeans, data, labels, size=(10, 5), alpha=1):\n \"\"\"Plot the clusters on the scatter graph\n\n :param kmeans: the predicted kmeans labels\n :type kmeans: np.array\n :param data: data with class labels\n :type data: pandas.DataFrame\n :param labels: Dataframe of the true labels for the data\n :type labels: pandas.DataFrame\n :param size: size of the figure to plot, defaults to (10, 5)\n :type size: tuple, optional\n :param alpha: the alpha for the plots on the scatter graph, defaults to 1\n :type alpha: int, optional\n \"\"\"\n\n fig, ax = plt.subplots(figsize=size)\n u_labels = np.unique(labels)\n for i in u_labels:\n ax.scatter(data[kmeans == i][0],\n data[kmeans == i][1], label=i, alpha=alpha)\n ax.legend()\n ax.set_title('Cluster assigned labels')\n fig.tight_layout()\n plt.show()\n \n","sub_path":"Tasks/Scripts/plotScripts.py","file_name":"plotScripts.py","file_ext":"py","file_size_in_byte":8633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"257648382","text":"import requests\nfrom bs4 import BeautifulSoup\n\ndef displayTitles(content):\n titles = [];\n for c in content:\n titles.append(c.find_all(\"a\"))\n for t in titles:\n print(t[0].get_text())\n\nr = requests.get(\"https://shop.heise.de/zeitschriften/ct/sonderhefte\")\nsoup = BeautifulSoup(r.text,\"html.parser\")\nnav = soup.find(\"div\",class_=\"pages\").find_all(\"a\")\n# the following can't be used as it there are two navigation
s\n# nav = soup.find(\"div.pages > ol > li > a\")\ncontent = soup.find_all(\"h3\",class_=\"product_element_title\")\n\ndisplayTitles(content)\nfor n in nav:\n if n.has_attr(\"href\") == True and n.has_attr(\"class\") == False :\n r = requests.get(n[\"href\"])\n soup = BeautifulSoup(r.text,\"html.parser\")\n content = soup.find_all(\"h3\",class_=\"product_element_title\")\n displayTitles(content)\n\n","sub_path":"Exercise_19.py","file_name":"Exercise_19.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"593921995","text":"import time\nfrom datetime import datetime\nimport threading, queue\nfrom flask import redirect, url_for, make_response\nfrom .utils import close_bid, check_bid_status, search_bids\nfrom online_matching_system.models.bid_model import open_bids, close_bids\n\n\nclass BidObserver(object):\n\n def __init__(self,*args,**kwargs):\n self.observer_list = []\n\n def attach(self, bid_object, bid_type):\n \"\"\"\n params: bid_object, type of BidObject\n params: bid_type, a string either 'open' or 'close'\n return: -\n \"\"\"\n \n self.observer_list.append(bid_object)\n print(\"The observer list: \"+str(self.observer_list))\n print(self.observer_list)\n if bid_type.lower() == \"open\":\n BidTimer(bid_object, 604800)\n # BidTimer(bid_object, 60)\n elif bid_type.lower() == \"close\":\n BidTimer(bid_object, 604800)\n else:\n raise ValueError(bid_type)\n\n def detach(self, bid_object):\n \"\"\"\n params: bid_object, type of BidObject\n To remove the bid from the observer_list and call the close_bid function to close down the bid\n return: -\n \"\"\"\n\n if not bid_object.bought:\n bid_object.bought = True\n\n try:\n self.observer_list.remove(bid_object)\n except:\n pass\n\n status = close_bid(bid_object.id)\n \n # TODO: log the close bid information\n if (status != 200) | (status != 204):\n print(status)\n\n def find_and_detach(self, bid_id):\n \"\"\"\n params: bid_id, a string if bid ID\n This method is for function that have only bid_id info that wants to detach the bid\n \"\"\"\n # print(\"List before detaching: \"+str(self.observer_list))\n for bid in self.observer_list:\n # print(bid.id)\n if bid.id == bid_id:\n # bid.timer = 0\n self.detach(bid)\n\n\nclass BidTimer():\n\n def __init__(self, bid_object, time):\n self.bid_object = bid_object\n self.timer = time\n self.thread = threading.Thread(target=self.count_down, args=())\n self.thread.start()\n\n def count_down(self):\n \"\"\"\n This count down method will be called once the BidTimer is initialized with attached with the BidObject. Once the timer has reach 0, it will detach the bid\n \"\"\"\n\n while True:\n time.sleep(1)\n self.timer -= 1\n if self.timer == 0:\n if not self.bid_object.bought:\n # make the last bidder as the winner, or close the bid if there's no bidder\n check_bid_status(self.bid_object.id)\n self.bid_object.bought = True\n bid_observer.detach(self.bid_object)\n break\n if self.bid_object.bought:\n break\n\n\nclass BidObject():\n\n def __init__(self, bid_id):\n # self.timer = 0\n self.id = bid_id\n self.bought = False\n\n\nclass BidMonitor():\n \"\"\"\n the bid monitor that stores a list of bid that tutor desire to monitor. It will update the bid every 2 seconds to make sure the bid info is up to date\n \"\"\"\n\n def __init__(self):\n self.monitor_list = []\n self.thread = threading.Thread(target=self.run_monitor, args=())\n self.thread.start()\n\n def get_monitor_list(self):\n return self.monitor_list\n\n def get_monitor_bid(self, bid_id):\n print(bid_id)\n for bid in self.monitor_list:\n if bid['id'] == bid_id:\n return bid\n return None\n\n def add_bid(self, bid_id):\n \"\"\"\n add the bid into the monitor list\n\n Args:\n bid_id ([string]): [the ID of the bid to be added into monitor list]\n\n Returns:\n [boolean]: [True if the bid is found and added to the monitor list, else False if can't find the bid]\n \"\"\"\n bid = search_bids(bid_id)\n\n if bid not in self.monitor_list:\n self.monitor_list.append(bid)\n return True\n \n return False\n\n def remove_bid(self, bid_id):\n bid = search_bids(bid_id)\n self.monitor_list.remove(bid)\n\n def check_bid_closed_down(self):\n \"\"\"\n check if the bid is closed down, then remove it from the monitor list\n \"\"\"\n for bid in self.monitor_list:\n if bid['dateClosedDown']:\n self.monitor_list.remove(bid)\n\n def update_bid(self):\n \"\"\"\n to update the bid info that is in the monitor list\n \"\"\"\n new_monitor_list = []\n for bid in self.monitor_list:\n # search_bid function will update the bid before searching\n new_monitor_list.append(search_bids(bid['id']))\n\n self.monitor_list = new_monitor_list\n\n def run_monitor(self):\n \"\"\"\n use a while loop and sleep function to let the bid_monitor constantly update every 2 secs\n \"\"\"\n while True:\n self.check_bid_closed_down()\n self.update_bid()\n time.sleep(2)\n\n\nbid_observer = BidObserver()\nbid_monitor = BidMonitor()","sub_path":"online_matching_system/bids/observer.py","file_name":"observer.py","file_ext":"py","file_size_in_byte":5161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"146990827","text":"#! /usr/bin/python\n\nimport pysrc.src.fileutils as fileutils\nimport os\nimport sys\n\nsys.path.append(\".\")\n\n\ndef file_size(f):\n try:\n val = os.stat(f).st_size\n return \"%s\" % val\n except PermissionError:\n return \"-1\"\n except FileNotFoundError:\n return \"-1\"\n\n\ndef _main():\n try:\n for root, dirs, files in os.walk(\"/\", topdown=False):\n for name in files:\n f = os.path.abspath(os.path.join(root, name))\n print('\"%s\", %s' % (f, file_size(f)))\n except KeyboardInterrupt:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n _main()\n","sub_path":"pysrc/src/index_all_files.py","file_name":"index_all_files.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"538752192","text":"import random\r\nbattleshipWasHitNumber = 0\r\n\r\ndef getInputFromPlayer():\r\n try:\r\n playerInputX = int(input(\"Podaj współrzędną x punktu, gdzie chcesz strzelić:\\n\"))\r\n\r\n playerInputY = int(input(\"Podaj współrzędną y punktu, gdzie chcesz strzelić:\\n\"))\r\n print(\"\\n\")\r\n except ValueError:\r\n print(\"Błąd. Podane współrzędne muszą być typu int!\")\r\n else:\r\n return (playerInputX, playerInputY)\r\n\r\nclass Battleship():\r\n\r\n def __init__(self, startingPoint, length, direction):\r\n self.startingPoint = startingPoint\r\n self.length = length\r\n self.direction = direction\r\n self.coordinatesList = []\r\n self.hitDetetction = [False] * length\r\n self.createBattleship()\r\n\r\n def createBattleship(self):\r\n for i in range(self.length):\r\n if self.direction == \"UP\":\r\n coordinate = (self.startingPoint[0], self.startingPoint[1] - i)\r\n elif self.direction == \"DOWN\":\r\n coordinate = (self.startingPoint[0], self.startingPoint[1] + i)\r\n elif self.direction == \"LEFT\":\r\n coordinate = (self.startingPoint[0] - i, self.startingPoint[1])\r\n elif self.direction == \"RIGHT\":\r\n coordinate = (self.startingPoint[0] + i, self.startingPoint[1])\r\n \r\n self.coordinatesList.append(coordinate)\r\n\r\n def isDestroyed(self):\r\n for i in self.hitDetetction:\r\n if not i:\r\n return False\r\n return True\r\n\r\n# logic and state of the game\r\nclass Game():\r\n\r\n def __init__(self, boardWidth, boardHeight):\r\n self.boardWidth = boardWidth\r\n self.boardHeight = boardWidth\r\n self.allShots = {} #słownik - zawiera pary ((lokX, lokY), isHit)\r\n self.battleships = [];\r\n\r\n def addBattleship(self, battleship):\r\n for i in battleship.coordinatesList:\r\n # check range\r\n if (i[0] < 1) or (i[0] > self.boardWidth):\r\n return False\r\n if (i[1] < 1) or (i[1] > self.boardHeight):\r\n return False\r\n \r\n # check collisions with other battleships\r\n for j in self.battleships:\r\n for k in j.coordinatesList:\r\n if (i[0] == k[0]) and (i[1] == k[1]):\r\n return False\r\n\r\n self.battleships.append(battleship)\r\n return True\r\n\r\n def placeRandomBattleships(self, numOf5=1, numOf4=2, numOf3=3, numOf2=3):\r\n self.placeRandomBattleshipOfType(numOf5, 5)\r\n self.placeRandomBattleshipOfType(numOf4, 4)\r\n self.placeRandomBattleshipOfType(numOf3, 3)\r\n self.placeRandomBattleshipOfType(numOf2, 2)\r\n\r\n def placeRandomBattleshipOfType(self, numberOfShips, battleshipLength):\r\n directionsList = [\"LEFT\", \"RIGHT\", \"DOWN\", \"UP\"]\r\n battleshipAddSuccess = False\r\n\r\n for i in range(numberOfShips):\r\n while not battleshipAddSuccess:\r\n randomStartingPointX = random.randrange(1, self.boardWidth+1)\r\n randomStartingPointY = random.randrange(1, self.boardHeight+1)\r\n randomDirection = random.randrange(0, 4)\r\n randomDirection = directionsList[randomDirection]\r\n if self.addBattleship(Battleship((randomStartingPointX, randomStartingPointY), battleshipLength, randomDirection)):\r\n battleshipAddSuccess = True\r\n battleshipAddSuccess = False \r\n\r\n def shotIsCorrect(self, myShot):\r\n if (myShot[0] < 1) or (myShot[0] > self.boardWidth):\r\n return False\r\n if (myShot[1] < 1) or (myShot[1] > self.boardHeight):\r\n return False\r\n if myShot in self.allShots:\r\n return False\r\n return True\r\n\r\n def shoot(self, myShot):\r\n\r\n isHit = False\r\n for Battleship in self.battleships:\r\n if myShot in Battleship.coordinatesList:\r\n isHit = True\r\n print(\"Trafiony!\")\r\n try:\r\n myShotIndex = Battleship.coordinatesList.index(myShot)\r\n except ValueError:\r\n print(\"Nie znaleziono indeksu twojego trafienia w Battleship\")\r\n Battleship.hitDetetction[myShotIndex] = isHit\r\n if Battleship.isDestroyed():\r\n self.battleships.remove(Battleship)\r\n print(\"Trafiony zatopiony!\")\r\n break\r\n\r\n self.allShots[myShot] = isHit\r\n return isHit\r\n\r\n def isOver(self):\r\n if self.battleships:\r\n return False\r\n\r\n # print(\"Brawo! Wygrałeś grę!\")\r\n return True\r\n \r\ndef renderBoardRowByRow(game, showBattleships=True):\r\n print(\"\\n\")\r\n\r\n # render top of board\r\n topOfBoard = \" \"\r\n for i in range(1, game.boardWidth+1):\r\n topOfBoard += str(i) + \" \"\r\n print(topOfBoard)\r\n\r\n for y in range(1, game.boardHeight+1):\r\n row = []\r\n\r\n # render left column of numbers\r\n if(y < 10):\r\n row.append(str(y) + \" \")\r\n else:\r\n row.append(str(y))\r\n\r\n # add '.' to row\r\n for x in range(1, game.boardWidth+1):\r\n row.append(\"_\")\r\n\r\n # render all battleships\r\n if showBattleships:\r\n for x in range(1, game.boardHeight+1):\r\n for i in range(len(game.battleships)): # wybiera statek\r\n for j in range(len(game.battleships[i].coordinatesList)): # wybiera krotke\r\n if game.battleships[i].coordinatesList[j][1] == y:\r\n # render front and back of the ship\r\n if (j == 0 and game.battleships[i].direction == \"UP\") or (j == len(game.battleships[i].coordinatesList)-1 and game.battleships[i].direction == \"DOWN\"):\r\n row[game.battleships[i].coordinatesList[j][0]] = \"v\"\r\n elif (j == 0 and game.battleships[i].direction == \"DOWN\") or (j == len(game.battleships[i].coordinatesList)-1 and game.battleships[i].direction == \"UP\"):\r\n row[game.battleships[i].coordinatesList[j][0]] = \"^\"\r\n elif (j == 0 and game.battleships[i].direction == \"LEFT\") or (j == len(game.battleships[i].coordinatesList)-1 and game.battleships[i].direction == \"RIGHT\"):\r\n row[game.battleships[i].coordinatesList[j][0]] = \">\"\r\n elif (j == 0 and game.battleships[i].direction == \"RIGHT\") or (j == len(game.battleships[i].coordinatesList)-1 and game.battleships[i].direction == \"LEFT\"):\r\n row[game.battleships[i].coordinatesList[j][0]] = \"<\"\r\n else:\r\n row[game.battleships[i].coordinatesList[j][0]] = \"#\"\r\n \r\n # render all shots\r\n for x in range(1, game.boardWidth+1):\r\n if(x, y) in game.allShots:\r\n if game.allShots.get((x, y)) == False:\r\n row[x] = \"O\"\r\n else:\r\n row[x] = \"X\"\r\n\r\n print(\" \".join(row))\r\n\r\n print(\"\\n\")\r\n\r\ndef getInputFromComputer(battleshipWasHit, computerLastHits, game):\r\n lastShot = computerLastHits[-1]\r\n if battleshipWasHit:\r\n randomDirX, randomDirY = 1000, 1000 #must be bigger than boardWidth and boardHeight\r\n # shoot close to last hit (+- 1) and check if next shot is inside the board\r\n while (lastShot[0] + randomDirX < 1) or (lastShot[0] + randomDirX > game.boardWidth) or (lastShot[1] + randomDirY < 1) or (lastShot[1] + randomDirY > game.boardWidth) or ((lastShot[0] + randomDirX, lastShot[1] + randomDirY) in game.allShots):\r\n randomDirX = random.randrange(-1, 2)\r\n if randomDirX == 0:\r\n randomDirY = random.randrange(-1, 2, 2)\r\n else:\r\n randomDirY = 0;\r\n return (lastShot[0] + randomDirX, lastShot[1] + randomDirY)\r\n else:\r\n return (random.randrange(1, game.boardWidth+1), random.randrange(1, game.boardWidth+1))\r\n\r\n\r\ndef getInputFromComputer2(battleshipWasHit, computerLastHits, game):\r\n global battleshipWasHitNumber\r\n\r\n lastShot = computerLastHits[-1]\r\n print(lastShot)\r\n # check which battleship was hit\r\n if battleshipWasHit:\r\n for Battleship in game.battleships:\r\n for i in Battleship.hitDetetction:\r\n if i == True:\r\n battleshipWasHitNumber = Battleship\r\n break\r\n\r\n print(battleshipWasHitNumber)\r\n\r\n # if the hit battleship wasn't destroyed\r\n if battleshipWasHitNumber:\r\n # must be bigger than boardWidth and boardHeight\r\n randomDirX, randomDirY = 1000, 1000\r\n\r\n sumOfDirectionsHit = 0\r\n # check if all directions are already shot\r\n if not game.shotIsCorrect((lastShot[0] + 1, lastShot[1] + 0)):\r\n sumOfDirectionsHit += 1\r\n if not game.shotIsCorrect((lastShot[0] - 1, lastShot[1] + 0)):\r\n sumOfDirectionsHit += 1\r\n if not game.shotIsCorrect((lastShot[0] + 0, lastShot[1] + 1)):\r\n sumOfDirectionsHit += 1\r\n if not game.shotIsCorrect((lastShot[0] + 0, lastShot[1] - 1)):\r\n sumOfDirectionsHit += 1\r\n\r\n if sumOfDirectionsHit == 4:\r\n del computerLastHits[-1]\r\n lastShot = computerLastHits[-1]\r\n \r\n # shoot close to last hit (+- 1) and check if next shot is inside the board\r\n while (lastShot[0] + randomDirX < 1) or (lastShot[0] + randomDirX > game.boardWidth) or (lastShot[1] + randomDirY < 1) or (lastShot[1] + randomDirY > game.boardWidth) or ((lastShot[0] + randomDirX, lastShot[1] + randomDirY) in game.allShots):\r\n randomDirX = random.randrange(-1, 2)\r\n if randomDirX == 0:\r\n randomDirY = random.randrange(-1, 2, 2)\r\n else:\r\n randomDirY = 0\r\n return (lastShot[0] + randomDirX, lastShot[1] + randomDirY)\r\n else:\r\n return (random.randrange(1, game.boardWidth+1), random.randrange(1, game.boardWidth+1))\r\n\r\ndef playerVsPlayer():\r\n boardWidth, boardHeight = 10, 10\r\n computerLastHit = (0, 0)\r\n\r\n game1 = Game(boardWidth, boardHeight)\r\n game1.placeRandomBattleships()\r\n\r\n game2 = Game(boardWidth, boardHeight)\r\n game2.placeRandomBattleships()\r\n\r\n while 1:\r\n game1IsHit, game2IsHit = True, True\r\n game1IsOver, game2IsOver = False, False\r\n\r\n print(\"KOLEJ GRACZA 1\")\r\n while game1IsHit and not game1IsOver:\r\n myShot1 = getInputFromPlayer()\r\n \r\n if not game1.shotIsCorrect(myShot1):\r\n print(\"Nieprawidłowy strzał. Powtórz ruch!\")\r\n game1IsHit = True\r\n else:\r\n game1IsHit = game1.shoot(myShot1)\r\n\r\n if not game1IsHit:\r\n print(\"Pudło!\")\r\n\r\n renderBoardRowByRow(game1, False)\r\n game1IsOver = game1.isOver()\r\n\r\n if game1IsOver:\r\n print(\"Brawo! Gracz 1 wygrał grę.\")\r\n break\r\n\r\n print(\"KOLEJ GRACZA 2\")\r\n while game2IsHit and not game2IsOver:\r\n myShot2 = getInputFromPlayer()\r\n\r\n if not game1.shotIsCorrect(myShot1):\r\n print(\"Nieprawidłowy strzał. Powtórz ruch!\")\r\n game1IsHit=True\r\n else:\r\n game1IsHit = game1.shoot(myShot1)\r\n\r\n if not game2IsHit:\r\n print(\"Pudło!\")\r\n\r\n renderBoardRowByRow(game2, False)\r\n game2IsOver = game2.isOver()\r\n\r\n if game2IsOver:\r\n print(\"Brawo! Gracz 2 wygrał grę.\")\r\n break\r\n\r\ndef playerVsComputer():\r\n boardWidth, boardHeight = 10, 10\r\n computerLastHits = []\r\n computerLastHits.append((0,0))\r\n\r\n game1 = Game(boardWidth, boardHeight)\r\n game1.placeRandomBattleships()\r\n # game1.addBattleship(Battleship((1, 1), 2, \"RIGHT\"))\r\n\r\n game2 = Game(boardWidth, boardHeight)\r\n game2.placeRandomBattleships()\r\n # game2.addBattleship(Battleship((1, 1), 1, \"RIGHT\"))\r\n\r\n while 1:\r\n game1IsHit, game2IsHit = True, False\r\n game1IsOver, game2IsOver = False, False\r\n doWhileCondition = True\r\n\r\n print(\"KOLEJ GRACZA\")\r\n while game1IsHit and not game1IsOver:\r\n myShot1 = getInputFromPlayer()\r\n print(myShot1)\r\n if not game1.shotIsCorrect(myShot1):\r\n print(\"Nieprawidłowy strzał. Powtórz ruch!\")\r\n game1IsHit=True\r\n else:\r\n game1IsHit = game1.shoot(myShot1)\r\n\r\n if not game1IsHit:\r\n print(\"Pudło!\")\r\n\r\n renderBoardRowByRow(game1, False)\r\n game1IsOver = game1.isOver()\r\n\r\n if game1IsOver:\r\n print(\"Brawo! Wygrałeś grę!\")\r\n break\r\n\r\n print(\"KOMPUTER ROBI RUCH\")\r\n while doWhileCondition and not game2IsOver:\r\n myShot2 = getInputFromComputer(game2IsHit, computerLastHits, game2)\r\n game2IsHit = game2.shoot(myShot2)\r\n if game2IsHit:\r\n computerLastHits.append(myShot2)\r\n if not game2IsHit:\r\n print(\"Pudło!\")\r\n renderBoardRowByRow(game2, True)\r\n game2IsOver = game2.isOver()\r\n doWhileCondition = game2IsHit\r\n\r\n if game2IsOver:\r\n print(\"Przegana :( Komputer wygrał grę!\")\r\n break\r\n\r\ndef main():\r\n print(\"\\nWitaj w grze battleships!\")\r\n print(\"\\'_\\' oznacza puste miejsce\")\r\n print(\"\\'O\\' oznacza puste pudło\")\r\n print(\"\\'X\\' oznacza puste trafiony\")\r\n print(\"\\'#\\', \\'>\\', \\'<\\', \\'^\\', \\'v\\' oznaczają twoje statki\\n\")\r\n\r\n playerVsComputer()\r\n # playerVsPlayer()\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"Python/Gra_statki/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"166800694","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 29 19:11:28 2018\n\n@author: luoningqi\n\"\"\"\nfrom bcmlp import bcmlp\nimport numpy as np\nimport tensorflow as tf\nfrom funcs import XOR4\nfrom funcs import writeAns\n\ndef getData(N,err):\n inl = np.zeros((N))\n for i in range(N):\n inl[i] = np.random.randint(0,2)\n tag = getTag(inl)\n for i in range(N):# add noise\n itag = np.random.randint(0,3)\n if itag==0:\n inl[i] += err\n elif itag==1:\n inl[i] -= err\n else:\n inl[i] += 0.0\n return inl,tag\n\ndef getTag(inl):#XOR4\n return XOR4(inl[0],inl[1],inl[2],inl[3])\n\ndef test(sess, model, N, err):\n # init param\n rou = 100\n deviation = 0.0\n # test\n for i in range(rou):\n p,q = getData(N, err)\n outp = sess.run(model.outp, {model.inp: p, model.lab: q})\n deviation += abs(round(outp)-q)\n # return accuracy\n return 1-deviation/rou\n\nif __name__==\"__main__\":\n # init state\n model = bcmlp(2)\n sess = tf.Session()\n sess.run(tf.global_variables_initializer())\n ''' debug\n tf.summary.scalar('loss', model.loss)\n merged = tf.summary.merge_all()\n writer = tf.summary.FileWriter('./tfgraphs', sess.graph)\n '''\n err = 0.20 #[0.20,0.30,0.40]:\n ans = []\n for i in range(10000):\n # get test data\n p,q = getData(4,err)\n ''' train '''\n sess.run(model.train, {model.inp: p, model.lab: q})\n ''' test '''\n testacc = test(sess, model, 4, err)\n print('steps:'+str(i))\n print(testacc)\n ans.append(testacc)\n # record\n writeAns(ans,'bcmlp_noisytask_err'+str(int(err*100)))","sub_path":"ExpClassification.py","file_name":"ExpClassification.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"169921558","text":"#for numarical operations\nimport numpy as np\n#for ploting\nimport matplotlib.pyplot as plt\n#for importing and managing data sets\nimport pandas as pd\n\n\n#for prepare X and y\ndataset=pd.read_csv('credit_card_default_train.csv')\n#preprocessing balance limits\ndataset['Balance_Limit_V1'] = dataset['Balance_Limit_V1'].str.replace('M','000000')\ndataset['Balance_Limit_V1'] = dataset['Balance_Limit_V1'].str.replace('K','000')\ndataset['Balance_Limit_V1'] = dataset['Balance_Limit_V1'].astype(float)\n\nX = dataset.iloc[:, 1:24]\ny = dataset.iloc[:, 24].values\n\n\n\n#Get the corelation matrix\ncorr=dataset.corr()\n\n#Finiding missing data\nmissing_data=dataset[dataset.isnull().any(axis=1)]\n\n# Encoding categorical data for training set\nX = pd.get_dummies( X,columns =['Gender','EDUCATION_STATUS','MARITAL_STATUS','AGE','PAY_JULY','PAY_AUG','PAY_SEP','PAY_OCT','PAY_NOV','PAY_DEC'] )\n\nX.insert(71, 'PAY_NOV_1', np.zeros(shape=(len(X),1)))\nX.insert(82, 'PAY_DEC_1', np.zeros(shape=(len(X),1)))\n\n\ncols_to_drop = [ 'Gender_F','EDUCATION_STATUS_Graduate','MARITAL_STATUS_Other','AGE_31-45','PAY_JULY_-2','PAY_AUG_-2','PAY_SEP_-2','PAY_OCT_-2','PAY_NOV_-2','PAY_DEC_-2' ]\nX = X.drop( cols_to_drop, axis = 1 )\n\nX=X.iloc[:,:].values\n\n# Splitting the data into training and Testing sets\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test=train_test_split(X,y, test_size=0.2, random_state=0)\n \n#Feature scalling\nfrom sklearn.preprocessing import StandardScaler\nsc_X=StandardScaler()\nX_train=sc_X.fit_transform(X_train)\nX_test=sc_X.transform(X_test)\n\n#Fitting the data set using XGboost\n'''from xgboost import XGBClassifier\nclassifier_Xgboost=XGBClassifier(n_estimators=130, max_depth=5,learning_rate=0.1, colsample_bytree=0.8) #polsample 0.8 \nclassifier_Xgboost.fit(X_train, y_train)'''\n\n#Fitting data using Catboost\nfrom catboost import CatBoostRegressor\nclassifier_CatBoost=CatBoostRegressor(iterations=270, depth=5, learning_rate=0.1)\nclassifier_CatBoost.fit(X_train, y_train)\n \n#predict the test set\ny_pred=classifier_CatBoost.predict(X_test)\n\ny_pred=(y_pred>0.5)\n\n\n#making the confusion matrix\nfrom sklearn.metrics import confusion_matrix\ncm=confusion_matrix(y_test,y_pred)\nprint(cm)\n\nfrom sklearn.metrics import accuracy_score\nprint(\"Accuracy is \",accuracy_score(y_test,y_pred)*100)\n\n\n\n#Fitting the Training set (Logistic Regression)\nfrom sklearn.linear_model import LogisticRegression\nclassifier_Logistic=LogisticRegression(random_state=0)\nclassifier_Logistic.fit(X_train,y_train)\n\n#Fitting the data set using XGboost\nfrom xgboost import XGBClassifier\nclassifier_Xgboost=XGBClassifier(n_estimators=315, max_depth=4,learning_rate=0.1, colsample_bytree=0.8) #polsample 0.8 n_estimator = 350\nclassifier_Xgboost.fit(X_train, y_train)\n\n#Fitting whole test (Random Forest)\nfrom sklearn.ensemble import RandomForestClassifier\nclassifier_forest=RandomForestClassifier(n_estimators=10, n_jobs=2, criterion='entropy',random_state=0)\nclassifier_forest.fit(X_train,y_train)\n\n#make the network\nimport keras\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import LeakyReLU\n\n#Initializing the ANN\nclassifier=Sequential()\n\n#Adding input layer and the first hidden layer\nclassifier.add(Dense(output_dim=40, init= 'uniform',activation='relu', input_dim=80))\n\n#adding the second hidden layer\nclassifier.add(Dense(output_dim=20 , init= 'uniform',activation='relu'))\n\n#adding the third hidden layer\n#classifier.add(Dense(output_dim=10 , init= 'uniform',activation='relu'))\n\n#Addning the output layer\nclassifier.add(Dense(output_dim=1 , init= 'uniform',activation='sigmoid'))\n\n#compiling the nural network\nclassifier.compile(optimizer='adam',loss='binary_crossentropy', metrics=['accuracy'])\n\n#fitting the ANN to the training set\nclassifier.fit(X_train,y_train,batch_size=10,nb_epoch=100)\n\n#Predicting the test test\ny_pred=classifier.predict(X_test)\n\ny_pred=(y_pred>0.5)\n\n\n\n#predict the test set\ny_pred=classifier_Xgboost.predict(X_test)\n\n#making the confusion matrix\nfrom sklearn.metrics import confusion_matrix\ncm=confusion_matrix(y_test,y_pred)\nprint(cm)\n\nfrom sklearn.metrics import accuracy_score\nprint(\"Accuracy is \",accuracy_score(y_test,y_pred)*100)\n\n\n\nfrom sklearn.preprocessing import LabelEncoder\nlabelEncoder_X=LabelEncoder()\ny_pred=labelEncoder_X.fit_transform(y_pred)\n\ntt=classifier_CatBoost.predict(X_train)\n\n#making the confusion matrix\nfrom sklearn.metrics import confusion_matrix\ncm=confusion_matrix(y_test,y_pred)\n\n#tt_cm=confusion_matrix(y_train,tt)\n \nacc_test=(cm[0,0]+cm[1,1])/sum(sum(cm))*100\nprint(acc_test)\n \nF1_acc=(cm[1,1])/(cm[1,0]+cm[1,1])*100\nprint(F1_acc)\n \nacc_train=(tt_cm[0,0]+tt_cm[1,1])/sum(sum(tt_cm))*100\nprint(acc_train)\n\n\n\n\n \n############################## Train whole data set ################################## \n\n#Feature scalling\nfrom sklearn.preprocessing import StandardScaler\nsc_X=StandardScaler()\nX_tot=sc_X.fit_transform(X)\n\n\n#Fitting the Training set (Logistic Regression)\nfrom sklearn.linear_model import LogisticRegression\nclassifier1=LogisticRegression(random_state=0)\nclassifier1.fit(X_tot,y)\n\n#Fitting whole test (Random Forest)\nfrom sklearn.ensemble import RandomForestClassifier\nclassifier_forest1=RandomForestClassifier(n_estimators=12, criterion='entropy',random_state=0)\nclassifier_forest1.fit(X_tot,y)\n\n\n#Fitting the data set using XGboost\nfrom xgboost import XGBClassifier\nclassifier_Xgboost1=XGBClassifier(n_estimators=315, max_depth=4,learning_rate=0.1, colsample_bytree=0.8) #polsample 0.8 n_estimator = 350\nclassifier_Xgboost1.fit(X_tot,y)\n\n\n#Fitting data using Catboost\nfrom catboost import CatBoostRegressor\nclassifier_CatBoost1=CatBoostRegressor(iterations=270, depth=3, learning_rate=0.1, loss_function='RMSE')\nclassifier_CatBoost1.fit(X_tot,y)\n\n\n######################################################################################\n\n\n#for prepare X_testing\ntest_set=pd.read_csv('credit_card_default_test.csv')\nX_testing = test_set.iloc[:, 1:]\n\n\n#preprocessing balance limits\nX_testing['Balance_Limit_V1'] = X_testing['Balance_Limit_V1'].str.replace('M','000000')\nX_testing['Balance_Limit_V1'] = X_testing['Balance_Limit_V1'].str.replace('K','000')\n\n#Finiding missing data\nmissing_data_testset=test_set[test_set.isnull().any(axis=1)]\n\n\n\n# Encoding categorical data for training set\nX_testing = pd.get_dummies( X_testing,columns =['Gender','EDUCATION_STATUS','MARITAL_STATUS','AGE','PAY_JULY','PAY_AUG','PAY_SEP','PAY_OCT','PAY_NOV','PAY_DEC'] )\n\n#X_testing['PAY_AUG_8']=pd.DataFrame(np.zeros(shape=(len(X_testing),1)))\n\nX_testing.insert(45, 'PAY_AUG_8', np.zeros(shape=(len(X_testing),1)))\nX_testing.insert(49, 'PAY_SEP_1', np.zeros(shape=(len(X_testing),1)))\nX_testing.insert(60, 'PAY_OCT_1', np.zeros(shape=(len(X_testing),1)))\nX_testing.insert(67, 'PAY_OCT_8', np.zeros(shape=(len(X_testing),1)))\nX_testing.insert(71, 'PAY_NOV_1', np.zeros(shape=(len(X_testing),1)))\n\nX_testing.insert(78, 'PAY_NOV_8', np.zeros(shape=(len(X_testing),1)))\nX_testing.insert(82, 'PAY_DEC_1', np.zeros(shape=(len(X_testing),1)))\nX_testing.insert(89, 'PAY_DEC_8', np.zeros(shape=(len(X_testing),1)))\n\n\n\ncols_to_drop = [ 'Gender_F','EDUCATION_STATUS_Graduate','MARITAL_STATUS_Other','AGE_31-45','PAY_JULY_-2','PAY_AUG_-2','PAY_SEP_-2','PAY_OCT_-2','PAY_NOV_-2','PAY_DEC_-2' ]\nX_testing = X_testing.drop( cols_to_drop, axis = 1 )\n\nX_testing=X_testing.iloc[:,:].values\n\n#Feature scalling\nfrom sklearn.preprocessing import StandardScaler\nsc_X=StandardScaler()\nX_testing=sc_X.fit_transform(X_testing)\n\n#Random Forest\ny_testing=classifier_forest1.predict(X_testing)\n\n#XGBoost\ny_testing=classifier_Xgboost1.predict(X_testing)\n\n#CatBoost\ny_testing=classifier_CatBoost1.predict(X_testing)\n\n#CatBoost\ny_testing=(y_testing>0.5)\n\nfrom sklearn.preprocessing import LabelEncoder\nlabelEncoder_X=LabelEncoder()\ny_testing=labelEncoder_X.fit_transform(y_testing)\n\n\n#Visualizing the data\nplt.scatter(X_train[:,0],X_train[:,1],color='red')\nplt.scatter(X_test,y_test,color='black')\nplt.plot(X_train,regression.predict(X_train),color='blue')\nplt.plot(X_test,regression.predict(X_test),color='green')\nplt.title('Salary vs Experience')\nplt.xlabel('Experience')\nplt.ylabel('Salary')\nplt.show()\n\n","sub_path":"Day3/Submission_3.py","file_name":"Submission_3.py","file_ext":"py","file_size_in_byte":8178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"381866321","text":"# coding: utf-8\nfrom __future__ import unicode_literals\n\nimport codecs\nimport json\nimport os\nimport itertools\n\n\ndef get_env(env_name, default_value, logger=None):\n if env_name not in os.environ:\n warning_meg = \"can't find env: {}, use default: {}\".format(env_name, default_value)\n if logger:\n logger.warning(warning_meg)\n else:\n print(warning_meg)\n return default_value\n return os.environ[env_name]\n\n\ndef flat_nested_list(nested_list, flat_depth=1):\n \"\"\"flat nested list\n :param nested_list: nested list\n :type nested_list: list\n :param flat_depth: depth of iterative to flat list\n :type flat_depth: int\n :return: list which nest level = nested_list - flat_depth\n :rtype: list\n \"\"\"\n tmp_lst = nested_list\n for round in range(flat_depth):\n tmp_lst = list(itertools.chain(*tmp_lst))\n return tmp_lst\n\n\ndef judge_label_error(table_y_list, documents, tagged_data_json_path, field_id):\n class_one_num = 0\n class_zero_num = 0\n for table_y in table_y_list:\n if table_y:\n class_one_num += 1\n else:\n class_zero_num += 1\n\n if class_zero_num == 0 or class_one_num == 0:\n error_doc_ids = []\n error_label_items = []\n tagged_data_list = json.load(codecs.open(tagged_data_json_path, encoding='utf-8'))\n for document in documents:\n for tagged_data in tagged_data_list:\n content = tagged_data['content']\n if document == content:\n error_doc_ids.append(tagged_data['doc_id'])\n error_label_items.append(\n [label_item for label_item in tagged_data['fields'] if label_item['field_id'] == field_id])\n error_message = 'one class exception , and the possible table label error is:\\t error_doc_ids: {}\\t error_label_items: {}'.format(\n json.dumps(error_doc_ids), json.dumps(error_label_items, ensure_ascii=False))\n return True, error_message\n else:\n return False, 'not label error but other error leading to svm fit exception'\n","sub_path":"text_classification/app/utils/other_util.py","file_name":"other_util.py","file_ext":"py","file_size_in_byte":2109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"203071551","text":"from Services.SpiderService import init_spider, load_image, load_video\nfrom queue import Queue\nfrom tumblpy.exceptions import TumblpyError\nfrom time import sleep\n\n\ndef spider_image_video(offset, queue):\n res = init_spider()\n if queue.empty():\n blog_queue = res.get(\"blog_queue\")\n else:\n blog_queue = queue\n key_queue = res.get(\"key_queue\")\n while not key_queue.empty():\n key = key_queue.get()\n while not blog_queue.empty():\n blog = blog_queue.get()\n try:\n kwargs = {\n \"key\": key,\n \"blog\": blog,\n \"offset\": offset\n }\n load_image(kwargs)\n load_video(kwargs)\n except TumblpyError as e:\n if e.error_code == 503:\n fail_queue.put(blog)\n key = key_queue.get()\n continue\n\n\ndef spider_catch_up(queue):\n for offset in range(0, 21):\n if queue.empty():\n spider_image_video(offset, queue)\n else:\n spider_image_video(offset, queue)\n\n\nif __name__ == '__main__':\n while True:\n fail_queue = Queue()\n spider_catch_up(fail_queue)\n if not fail_queue.empty():\n spider_catch_up(fail_queue)\n sleep(3600)\n","sub_path":"Spider.py","file_name":"Spider.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"538218221","text":"# Using class-level and static methods\n\nclass Book:\n # Properties defined at the class level are shared by all instances\n BOOK_TYPES = (\"HARDCOVER\", \"PAPERBACK\", \"EBOOK\")\n\n # double-underscore properties are hidden from other classes\n __booklist = None\n \n # create a class method that returns booktype list\n # we are using a decorator @classmethod\n @classmethod\n def getbooktypes(cls):\n return cls.BOOK_TYPES\n\n # create a static method\n # --------------- static Method to create a singleton \n # (only one instance of a particular variable or object is ever created)\n @staticmethod\n def getbooklist():\n #if no book list yet create one\n if Book.__booklist == None:\n Book.__booklist = []\n return Book.__booklist\n\n # instance methods received a specific object instance as an argument\n # and operate on data specific to that object instance\n def setTitle(self, newtitle):\n self.title = newtitle\n\n def __init__(self, title, booktype):\n self.title = title\n # values the class Book allows\n if (not booktype in Book.BOOK_TYPES):\n raise ValueError(f\"{booktype} is not a valid book type\")\n else:\n self.booktype = booktype\n\n\n# Access the class attribute\nprint(\"Book types: \", Book.getbooktypes())\n# Create some book instances\n\nb1 = Book(\"Canada Poppy Sugar\", \"HARDCOVER\")\n\n#Will give an error , comic is not one of predefined book types\n#b2 = Book(\"Montreal Swetten Treats\", \"COMIC\")\nb2 = Book(\"Montreal Swetten Treats\", \"EBOOK\")\n\n# use the static method to access a singleton object\n'''\nStatic methods don't modify the state of either the class or \na specific object instances. There not great cases for static methods\nThey are useful, however for scenarios when you don't need to access\nany properties of a particular object or the class itself.\nThis is a way of taking a global function and put it the class namespace.\n'''\nthebooks = Book.getbooklist()\nthebooks.append(b1)\nthebooks.append(b2)\nprint(thebooks)\n","sub_path":"Python_scripting/Review_Python/ObjOrientPro/4-class_static_methods_attri.py","file_name":"4-class_static_methods_attri.py","file_ext":"py","file_size_in_byte":2053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"364485316","text":"#Create two lists. The first list should consist of odd numbers. The second list is also of even numbers.\n#Merge two lists. Multiply all values in the newlist by 2.\n#Use the loop to print the data type of the all values in the new list.\n\n#Question 1\n\nmylist1 = [3,5,7,9]\nmylist2 = [10,12,14,16]\n\nmylist1.extend(mylist2)\n\nmylist3 =[x*2 for x in mylist1]\n\nfor i in mylist3: #mylist3 elemanlarını yazdırır.\n print(i, end=\" \")\nprint(\"\\n\") \nfor i in mylist3: #mylist3 elemanlarının type'ını yazdırır.\n print(type(i), end=\" \") \n","sub_path":"Homeworks/HW1.py","file_name":"HW1.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"637383480","text":"import json\nimport requests\nfrom requests.auth import HTTPBasicAuth\n\ndef HelloPython():\n url = \"http://public.coindaddy.io:4000/api/\"\n data = {}\n data['content-type'] = 'application/json' \n auth = HTTPBasicAuth('rpc', '1234')\n\n payload = {\n \"method\": \"get_running_info\",\n \"params\": {},\n \"jsonrpc\": \"2.0\",\n \"id\": 0,\n }\n response = requests.post(url, data=json.dumps(payload), headers=data, auth=auth)\n \n return response.text","sub_path":"lib/pkg/python/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"102210570","text":"from PIL import Image \r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\ndef grayscale(rgb):\r\n # f = rgb[0:2]\r\n # s = rgb[2:2]\r\n # l \r\n\r\n return int(rgb[0:2],16)*0.299 + int(rgb[2:4],16)*0.587 + int(rgb[4:6],16)*0.114\r\ncorners = [[17,44],\r\n[18,85],\r\n[30,63],\r\n[31,65],\r\n[42,54],\r\n[43,58],\r\n[43,81],\r\n[43,81],\r\n[54,51],\r\n[55,55],\r\n[55,63],\r\n[67,71]]\r\nf = open(\"C:\\\\Projects\\\\GitHub\\\\NTI_contest\\\\robot_artag_maze\\\\Задача 2.6\\\\test_0.txt\",\"r\")\r\nfor line in f:\r\n mya = line.split(\" \")\r\n break\r\nmyarray = []\r\ncnt = 0\r\nfor i in range(120):\r\n myarray.append([])\r\n for j in range(160):\r\n myarray[i].append(grayscale(mya[cnt]))\r\n cnt=cnt+1\r\nfor i in range(len(corners)):\r\n cur = corners[i]\r\n i1 = cur[0]\r\n j1 = cur[1]\r\n myarray[i1][j1] == 120\r\nmyaq = np.array(myarray)\r\nim = Image.fromarray(myaq) \r\nim.show()","sub_path":"old/visual.py","file_name":"visual.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"592545047","text":"import pathlib\n\nimport warnings\nimport torch as t\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport numpy as np\nimport time\nimport argparse\nimport os\n\nfrom MSRADataset import MSRADataset, _unnormalize_joints\nfrom REN import REN\nfrom loss import Modified_SmoothL1Loss\nfrom utils import adjust_learning_rate, set_default_args, weights_init, save_checkpoint, load_checkpoint, \\\n save_plt, mkdirs\n\nwarnings.simplefilter(\"ignore\")\n\nparser = argparse.ArgumentParser(description='Region Ensemble Network')\nparser.add_argument('--batchSize', type=int, default=128, help='input batch size')\nparser.add_argument('--epoch', type=int, default=40, help='number of epochs')\nparser.add_argument('--test', action='store_true', help='only test without training')\nparser.add_argument('--lr', type=float, default=0.005, help='initial learning rate')\nparser.add_argument('--lr_decay', type=int, default=20, help='decay lr by 10 after _ epoches')\nparser.add_argument('--input_size', type=int, default=96, help='decay lr by 10 after _ epoches')\nparser.add_argument('--num_joints', type=int, default=42, help='decay lr by 10 after _ epoches')\nparser.add_argument('--no_augment', action='store_true', help='dont augment data?')\nparser.add_argument('--no_validate', action='store_true', help='dont validate data when training?')\nparser.add_argument('--augment_probability', type=float, default=1.0, help='augment probability')\nparser.add_argument('--momentum', type=float, default=0.9, help='momentum')\nparser.add_argument('--weight_decay', type=float, default=0.001, help='weight_decay')\nparser.add_argument('--poses', type=str, default=None, nargs='+', help='poses to train on')\nparser.add_argument('--persons', type=str, default=None, nargs='+', help='persons to train on')\nparser.add_argument('--checkpoint', type=str, default=None, help='path/to/checkpoint.pth.tar')\nparser.add_argument('--print_interval', type=int, default=500, help='print interval')\nparser.add_argument('--save_dir', type=str, default=\"experiments/\", help='path/to/save_dir')\nparser.add_argument('--name', type=str, default=None,\n help='name of the experiment. It decides where to store samples and models. if none, '\n 'it will be saved as the date and time')\nparser.add_argument('--finetune', action='store_true', help='use a pretrained checkpoint')\n\n\ndef print_options(opt):\n message = ''\n message += '----------------- Options ---------------\\n'\n for k, v in sorted(vars(opt).items()):\n comment = ''\n default = parser.get_default(k)\n if v != default:\n comment = '\\t[default: %s]' % str(default)\n message += '{:>25}: {:<30}{}\\n'.format(str(k), str(v), comment)\n message += '----------------- End -------------------'\n print(message)\n # save to the disk\n expr_dir = opt.save_dir / opt.name\n mkdirs(expr_dir)\n file_name = expr_dir / 'opt.txt'\n with open(file_name, 'wt') as opt_file:\n opt_file.write(message)\n opt_file.write('\\n')\n\n\ndef main(args):\n set_default_args(args)\n model = REN(args)\n\n model.float()\n model.cuda()\n model.apply(weights_init)\n cudnn.benchmark = True\n criterion = Modified_SmoothL1Loss().cuda()\n\n train_dataset = MSRADataset(training=True, augment=args.augment, args=args)\n test_dataset = MSRADataset(training=False, augment=False, args=args)\n\n train_loader = t.utils.data.DataLoader(\n train_dataset, batch_size=args.batchSize, shuffle=True,\n num_workers=0, pin_memory=False)\n\n val_loader = t.utils.data.DataLoader(\n test_dataset, batch_size=args.batchSize, shuffle=True,\n num_workers=0, pin_memory=False)\n\n optimizer = t.optim.Adam(model.parameters(), args.lr,\n # momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n current_epoch = 0\n if args.checkpoint:\n model, optimizer, current_epoch = load_checkpoint(args.checkpoint, model, optimizer)\n if args.finetune:\n current_epoch = 0\n\n if args.test:\n test(model, args)\n return\n\n train_loss = []\n val_loss = []\n best = False\n\n print_options(args)\n expr_dir = args.save_dir / args.name\n\n for epoch in range(current_epoch, args.epoch):\n\n optimizer = adjust_learning_rate(optimizer, epoch, args)\n # train for one epoch\n loss_train = train(train_loader, model, criterion, optimizer, epoch, args)\n train_loss = train_loss + loss_train\n if args.validate:\n # evaluate on validation set\n loss_val = validate(val_loader, model, criterion, args)\n val_loss = val_loss + loss_val\n\n state = {\n 'epoch': epoch,\n 'arch': type(model).__name__,\n 'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n }\n\n if not (expr_dir / 'model_best.pth.tar').exists():\n save_checkpoint(state, True, args)\n\n if args.validate and epoch > 1:\n best = (loss_val < min(val_loss[:len(val_loss) - 1]))\n if best:\n print(\"saving best performing checkpoint on val\")\n save_checkpoint(state, True, args)\n\n save_checkpoint(state, False, args)\n #\n\n expr_dir = args.save_dir / args.name\n np.savetxt(str(expr_dir / \"train_loss.out\"), train_loss, fmt='%f')\n save_plt(train_loss, \"train_loss\")\n np.savetxt(str(expr_dir / \"val_loss.out\"), val_loss, fmt='%f')\n save_plt(val_loss, \"val_loss\")\n\n\ndef train(train_loader, model, criterion, optimizer, epoch, args):\n # switch to train mode\n model.train()\n loss_train = []\n for i, (input, target) in enumerate(train_loader):\n stime = time.time()\n # measure data loading time\n target = target.float()\n target = target.cuda(non_blocking=False)\n input = input.float()\n input = input.cuda()\n # compute output\n output = model(input)\n\n loss = criterion(output, target)\n # measure accuracy and record loss\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n loss_train.append(loss.data.item())\n optimizer.step()\n # measure elapsed time\n if i % args.print_interval == 0:\n TT = time.time() - stime\n print('epoch: [{0}][{1}/{2}]\\t'\n 'Loss {loss:.4f}\\t'\n 'Time: {time:.2f}\\t'.format(\n epoch, i, len(train_loader), loss=loss.item(), time=TT))\n\n return [np.mean(loss_train)]\n\n\ndef validate(val_loader, model, criterion, args):\n # switch to evaluate mode\n model.eval()\n\n loss_val = []\n with t.no_grad():\n expr_dir = os.path.join(args.save_dir, args.name)\n\n for i, (input, target) in enumerate(val_loader):\n target = target.float()\n target = target.cuda(non_blocking=False)\n # compute output\n input = input.float()\n input = input.cuda()\n output = model(input)\n loss = criterion(output, target)\n\n if i % args.print_interval == 0:\n print('Test: [{0}/{1}]\\t'\n 'Loss {loss:.4f}\\t'.format(\n i, len(val_loader), loss=loss))\n loss_val.append(loss.data.item())\n\n return [np.mean(loss_val)]\n\n\ndef test(model, args):\n # switch to evaluate mode\n model.eval()\n test_dataset = MSRADataset(training=False, augment=False, args=args)\n errors = []\n MAE_criterion = nn.L1Loss()\n with t.no_grad():\n expr_dir = os.path.join(args.save_dir, args.name)\n\n input_size = args.input_size\n for i, (input, target) in enumerate(test_dataset):\n target = target.float()\n target = target.numpy().reshape(21, 2)\n tmp = np.zeros((21, 3))\n for j in range(len(target)):\n tmp[j, :2] = target[j]\n # compute output\n input = input.float()\n input = input.cuda()\n input = input.unsqueeze(0)\n output = model(input)\n output = output.cpu().numpy().reshape(21, 2)\n tmp1 = np.zeros((21, 3))\n for j in range(len(output)):\n tmp1[j, :2] = output[j]\n center = test_dataset.get_center(i)\n # errors.append(compute_distance_error(_unnormalize_joints(tmp1,center,input_size),\n # _unnormalize_joints(tmp,center,input_size)).item())\n output = t.from_numpy(_unnormalize_joints(tmp1, center, input_size))\n target = t.from_numpy(_unnormalize_joints(tmp, center, input_size))\n MAE_loss = MAE_criterion(output, target)\n\n errors.append(MAE_loss.item())\n\n if i % args.print_interval == 0:\n print('Test: [{0}/{1}]\\t'\n 'Loss {loss:.4f}\\t'.format(\n i, len(test_dataset), loss=errors[-1]))\n\n errors = np.mean(errors)\n print(errors)\n if \"model_best\" in args.checkpoint:\n np.savetxt(os.path.join(expr_dir, \"average_MAE_model_best_\" + args.poses[0]), np.asarray([errors]),\n fmt='%f')\n else:\n np.savetxt(os.path.join(expr_dir, \"average_MAE_checkpoint\" + args.poses[0]), np.asarray([errors]), fmt='%f')\n\n\nif __name__ == '__main__':\n args = parser.parse_args()\n args.save_dir = pathlib.Path(args.save_dir)\n main(args)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"4197221","text":"import webbrowser\nimport re\n\nfirefox_path = \"\\/usr\\/bin\\/firefox\"\nnmap_ports = open('/opt/port_browser/nmap_ports.txt', 'r')\n\nregex = re.compile(r'[A-Za-z]')\nprotocol = input(\"Enter 'http' or 'https':\")\n\nfor line in nmap_ports:\n mo1 = regex.search(line)\n if bool(mo1) == True:\n Base_URL = line\n else:\n URL = protocol + '://' + Base_URL + ':' + line\n webbrowser.get('firefox').open(URL)\n\nnmap_ports.close()\n","sub_path":"port_browser.py","file_name":"port_browser.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"141494195","text":"# encoding: UTF-8\r\n#\r\n# train1: 有 10 个单元的单层神经网络\r\n#\r\n# 将图像平铺成一维向量,全连接 softmax 的 10 个单元\r\n#\r\n# X = [batch, 784], W = [784, 10], b = [10], Y = [batch, 10]\r\n#\r\n# tag: GD, minibatch, softmax, learning_rate\r\n\r\nimport numpy as np\r\nimport tensorflow as tf\r\nimport matplotlib.pyplot as plt\r\nfrom mnist import input_data\r\n\r\nprint(\"Tensorflow version \" + tf.__version__)\r\ntf.set_random_seed(1)\r\n\r\n\r\ndef train1(learning_rate=0.005, minibatch_size=100, iterations=2000 + 1):\r\n # Use local mnist dataset (60k for train, 10k for test)\r\n mnist = input_data.read_data_sets(\"mnist\", one_hot=True, reshape=False)\r\n\r\n # input X: 28x28 grayscale images, the first dimension (None) will index the images in the mini-batch\r\n X = tf.placeholder(tf.float32, shape=[None, 28, 28, 1], name=\"x_placeholder\")\r\n # correct answers will go here\r\n Y_ = tf.placeholder(tf.float32, shape=[None, 10], name=\"y_placeholder\")\r\n\r\n # weights W[784, 10] 784=28*28\r\n W = tf.Variable(tf.zeros([784, 10]), name=\"weights_variable\")\r\n # biases b[10]\r\n b = tf.Variable(tf.zeros([10]), name=\"bias_variable\")\r\n\r\n # flatten\r\n XX = tf.reshape(X, shape=[-1, 784])\r\n\r\n # The model\r\n Ylogits = tf.matmul(XX, W) + b\r\n Y = tf.nn.softmax(Ylogits)\r\n\r\n # Loss is defined as cross entropy between the prediction and the real value\r\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=Ylogits, labels=Y_, name=\"lossFunction\")\r\n cross_entropy = tf.reduce_mean(cross_entropy) * minibatch_size\r\n\r\n # accuracy of the trained model, between 0 (worst) and 1 (best)\r\n correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_, 1))\r\n accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32), name=\"accuracy\")\r\n\r\n # training, learning rate = 0.005\r\n train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cross_entropy, name=\"gradDescent\")\r\n\r\n # init\r\n init = tf.global_variables_initializer()\r\n sess = tf.Session()\r\n sess.run(init)\r\n\r\n # save data\r\n train_indexes = []\r\n train_costs = []\r\n train_accuracies = []\r\n test_indexes = []\r\n test_costs = []\r\n test_accuracies = []\r\n\r\n # Feed the next batch and run the training\r\n for i in range(iterations):\r\n # training on batches of 100 images with 100 labels\r\n batch_X, batch_Y = mnist.train.next_batch(minibatch_size)\r\n\r\n # compute training values\r\n if i % 10 == 0:\r\n acc, cost = sess.run([accuracy, cross_entropy], feed_dict={X: batch_X, Y_: batch_Y})\r\n train_indexes.append(i)\r\n train_costs.append(cost)\r\n train_accuracies.append(acc)\r\n print(str(i) + \": accuracy:\" + str(acc) + \" loss: \" + str(cost))\r\n\r\n # compute test values\r\n if i % 50 == 0:\r\n acc, cost = sess.run([accuracy, cross_entropy], feed_dict={X: mnist.test.images, Y_: mnist.test.labels})\r\n test_indexes.append(i)\r\n test_costs.append(cost)\r\n test_accuracies.append(acc)\r\n print(str(i) + \": ********* epoch \" + str(\r\n i * minibatch_size // mnist.train.images.shape[0] + 1) + \" ********* test accuracy:\" + str(\r\n acc) + \" test loss: \" + str(cost))\r\n\r\n # the backpropagation training step\r\n sess.run(train_step, feed_dict={X: batch_X, Y_: batch_Y})\r\n\r\n print(\"max test accuracy: \" + str(max(test_accuracies)))\r\n\r\n # plot train and test costs and accuracies\r\n plt.figure(figsize=(12, 4))\r\n plt.subplot(121)\r\n plt.plot(np.squeeze(train_indexes), np.squeeze(train_accuracies), label=\"train_accuracy\")\r\n plt.plot(np.squeeze(test_indexes), np.squeeze(test_accuracies), label=\"test_accuracy\")\r\n plt.legend()\r\n plt.ylabel('accuracy')\r\n plt.xlabel('iterations')\r\n plt.title(\"Learning rate =\" + str(learning_rate))\r\n plt.subplot(122)\r\n plt.plot(np.squeeze(train_indexes), np.squeeze(train_costs), label=\"train_costs\")\r\n plt.plot(np.squeeze(test_indexes), np.squeeze(test_costs), label=\"test_costs\")\r\n plt.legend()\r\n plt.ylabel('costs')\r\n plt.xlabel('iterations')\r\n plt.title(\"Learning rate =\" + str(learning_rate))\r\n # plt.show()\r\n plt.savefig(\r\n \"output/learning_rate \" + str(learning_rate) + \" iterations \" + str(iterations) + \" max test accuracy \" + str(\r\n max(test_accuracies)) + \" .png\")\r\n\r\n sess.close()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n # for learning_rate in [0.5, 0.05, 0.005]:\r\n train1(learning_rate=0.005, minibatch_size=100, iterations=10000 + 1)\r\n","sub_path":"tensorflow_mnist_1.0_softmax.py","file_name":"tensorflow_mnist_1.0_softmax.py","file_ext":"py","file_size_in_byte":4576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"326928871","text":"import os\nimport glob\nimport h5py\nimport struct\nimport numpy as np\nimport datetime\nfrom scipy.io import readsav\nfrom scipy import stats\nfrom scipy import interpolate\nfrom scipy.optimize import curve_fit\nimport pysolar\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import FixedLocator\nfrom matplotlib import rcParams\nimport matplotlib.gridspec as gridspec\nimport matplotlib.patches as patches\nimport cartopy.crs as ccrs\n\n\n\n\nclass READ_ICT_HSK:\n\n def __init__(self, fname, tmhr_range=None):\n\n f = open(fname, 'r')\n firstLine = f.readline()\n skip_header = int(firstLine.split(',')[0])\n\n vnames = []\n units = []\n for i in range(7):\n f.readline()\n\n line = f.readline()\n vname0, unit0 = self.VARIABLE_INFO(line)\n vnames.append(vname0)\n units.append(unit0)\n Nvar = int(f.readline())\n for i in range(2):\n f.readline()\n for i in range(Nvar):\n line = f.readline()\n vname0, unit0 = self.VARIABLE_INFO(line)\n vnames.append(vname0)\n units.append(unit0)\n f.close()\n\n data = np.genfromtxt(fname, skip_header=skip_header, delimiter=',')\n self.data = {}\n\n if tmhr_range != None:\n tmhr0 = data[:, 0]/3600.0\n logic = (tmhr0>=tmhr_range[0]) & (tmhr0<=tmhr_range[1])\n for i, vname in enumerate(vnames):\n self.data[vname] = data[:, i][logic]\n else:\n for i, vname in enumerate(vnames):\n self.data[vname] = data[:, i]\n\n def VARIABLE_INFO(self, line):\n\n words = line.split(',')\n vname = words[0].strip()\n unit = ','.join(words[1:])\n return vname, unit\n\n\n\n\n\ndef PRH2ZA(ang_pit, ang_rol, ang_head, is_rad=False):\n\n \"\"\"\n input:\n ang_pit (Pitch) [deg]: positive (+) values indicate nose up\n ang_rol (Roll) [deg]: positive (+) values indicate right wing down\n ang_head (Heading) [deg]: positive (+) values clockwise, w.r.t. north\n\n \"vec\": normal vector of the surface of the sensor\n\n return:\n ang_zenith : angle of \"vec\" [deg]\n ang_azimuth: angle of \"vec\" [deg]: positive (+) values clockwise, w.r.t. north\n \"\"\"\n\n if not is_rad:\n rad_pit = np.deg2rad(ang_pit)\n rad_rol = np.deg2rad(ang_rol)\n rad_head = np.deg2rad(ang_head)\n\n uz = np.cos(rad_rol)*np.cos(rad_pit)\n ux = np.sin(rad_rol)\n uy = -np.cos(rad_rol)*np.sin(rad_pit)\n\n vz = uz.copy()\n vx = ux*np.cos(rad_head) + uy*np.sin(rad_head)\n vy = uy*np.cos(rad_head) - ux*np.sin(rad_head)\n\n ang_zenith = np.rad2deg(np.arccos(vz))\n ang_azimuth = np.rad2deg(np.arctan2(vx,vy))\n\n ang_azimuth[ang_azimuth<0.0] += 360.0\n\n return ang_zenith, ang_azimuth\n\n\n\n\n\n\ndef MUSLOPE(sza, saa, iza, iaa, is_rad=False):\n\n if not is_rad:\n rad_sza = np.deg2rad(sza)\n rad_saa = np.deg2rad(saa)\n rad_iza = np.deg2rad(iza)\n rad_iaa = np.deg2rad(iaa)\n\n zs = np.cos(rad_sza)\n ys = np.sin(rad_sza) * np.cos(rad_saa)\n xs = np.sin(rad_sza) * np.sin(rad_saa)\n\n zi = np.cos(rad_iza)\n yi = np.sin(rad_iza) * np.cos(rad_iaa)\n xi = np.sin(rad_iza) * np.sin(rad_iaa)\n\n mu = xs*xi + ys*yi + zs*zi\n\n return mu\n\n\n\n\n\n\ndef CAL_SOLAR_ANGLES(julian_day, longitude, latitude, altitude):\n\n dateRef = datetime.datetime(1, 1, 1)\n jdayRef = 1.0\n\n sza = np.zeros_like(julian_day)\n saa = np.zeros_like(julian_day)\n\n for i, jday in enumerate(julian_day):\n\n dtime_i = (dateRef + datetime.timedelta(days=jday-jdayRef)).replace(tzinfo=datetime.timezone.utc)\n\n sza_i = 90.0 - pysolar.solar.get_altitude(latitude[i], longitude[i], dtime_i, elevation=altitude[i])\n if sza_i < 0.0 or sza_i > 90.0:\n sza_i = np.nan\n sza[i] = sza_i\n\n saa_i = pysolar.solar.get_azimuth(latitude[i], longitude[i], dtime_i, elevation=altitude[i])\n if saa_i >= 0.0:\n if 0.0<=saa_i<=180.0:\n saa_i = 180.0 - saa_i\n elif 180.0\",myfunction)\n\nroot.title(\"Movies and TV series\")\nprint(root.winfo_screenwidth())\nprint(root.winfo_screenheight())\n\napp = Application(root, frame)\n\nroot.geometry(str(root.winfo_screenwidth()) + \"x\" + str(root.winfo_screenheight()))\nroot.mainloop()\n","sub_path":"form.py","file_name":"form.py","file_ext":"py","file_size_in_byte":9376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"258881248","text":"def cat_dog(str):\n\tcount_cat = 0\n\tcount_dog = 0\n\tfor i in range(len(str)-2):\n\t\tif str[i:i+3] == \"cat\":\n\t\t\tcount_cat += 1\n\t\telif str[i:i+3] == \"dog\":\n\t\t\tcount_dog += 1\n\n\tif count_cat == count_dog:\n\t\treturn True\n\telse:\n\t\treturn False","sub_path":"cat_dog.py","file_name":"cat_dog.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"260676592","text":"#!/usr/bin/env python3\n\nimport os\nimport time\nimport sys\nfrom threading import Thread\nfrom queue import Queue\n\nusage = \"\"\"[*]Author: Dion Bosschieter\n[*]Gebruik: ./calc.py [bestandsnaam] [aantalthreads]\n./calc.py sommen.txt 4\n./calc.py sommen.txt\nDe berekeningen worden gelezen uit de file 'sommen.txt'\nRead (P): 6 + 4\nWrite (C): 6 + 4 = 10\nAlle sommen zijn verwerkt, einde programma.\n\"\"\"\n\ndef consumer(cq,mq,thrdn):\n\twhile True:\n\t\t\n\t\tqitem = cq.get()\n\t\t#time.sleep(0.0001)\n\t\n\t\tif qitem==\"exit\": #check if this thread can shutdown\n\t\t\tmq.put_nowait(\"[*]Stopping consumer-thread[\"+str(thrdn+1)+\"]\") #shutdown message\n\t\t\tbreak\n\t\telse:\n\t\t\tarr = qitem.split(' ')\n\t\tif len(arr) == 3: #if 3 values, check if 2nd value is an operator\n\t\t\toperator = arr[1]\n\t\t\t\n\t\t\ttry:\n\t\t\t\tgetal1 = int(arr[0])\n\t\t\t\tgetal2 = int(arr[2])\n\t\t\t\toutput = arr[0] + \" \" + arr[1] + \" \" + arr[2] + \" = \"\n\t\t\texcept ValueError:\n\t\t\t\tconsumerprint(\"dit zijn geen getallen\",mq,thrdn)\n\t\t\t\tcontinue\n\t\t\t\n\t\t\tif operator in (\"+\",\"-\",\"*\",\"/\",\"^\"):\n\t\t\t\tif operator == \"+\":\n\t\t\t\t\toutput += str(getal1 + getal2)\n\t\t\t\telif operator == \"-\":\n\t\t\t\t\toutput += str(getal1 - getal2)\n\t\t\t\telif operator == \"*\":\n\t\t\t\t\toutput += str(getal1 * getal2)\n\t\t\t\telif operator == \"/\":\n\t\t\t\t\toutput += str(getal1 / getal2)\n\t\t\t\telif operator == \"^\":\n\t\t\t\t\toutput += str(getal1 ** getal2)\n\n\t\t\t\tconsumerprint(output,mq,thrdn) #print berekening op het scherm\n\t\t\telse:\n\t\t\t\tconsumerprint(\"die operator ken ik niet!\",mq,thrdn)\n\t\telse: \n\t\t\tconsumerprint(\"Fout: geef een berekening op in de vorm ' \",mq,thrdn)\n\ndef monitor(mq):\n\twhile True:\n\t\tmsg = mq.get()\n\t\tif msg == \"exit\":\n\t\t\tbreak\n\t\tprint(msg)\n\ndef consumerprint(msg,mq,thrdn):\n\tmq.put_nowait(\"Consumer[\"+str(thrdn+1)+\"]: \"+msg)\n\ndef error(msg):\n\tprint(usage)\n\tprint(msg)\n\n\nif __name__ == '__main__':\n\t\n\tif(len(sys.argv) < 2):\n\t\tprint(usage)\n\t\tquit()\n\n\tfilename = sys.argv[1] \n\t\n\t#check if file exists\n\ttry: \n\t\tf = open(filename,'r')\n\texcept:\n\t\terror(\"Bestand bestaat niet\")\n\t\tquit()\n\t\n\t#check if a threadcount argument is given\n\t#check if the given argument is a digit\n\t#else threadcount = 1\n\tif(len(sys.argv) < 3 ):\n\t\tthreadcount = 1\n\telif(sys.argv[2].isdigit() != True):\n\t\terror(\"ValueError: geef een cijfer op voor threadaantal\")\n\t\tquit()\n\telif(sys.argv[2].isdigit()):\n\t\tthreadcount = int(sys.argv[2])\n\n\tprint(\"De berekeningen worden gelezen uit de file\", filename)\n\tprint(\"Aantal threads\", threadcount)\n\n\tcq = [] #calculator queue array\n\tmq = Queue() #monitor queue\n\tct = [] #thread array\n\tmp = Thread(target=monitor, args=(mq,)) #monitor worker(thread)\n\n\t#create the Calculator queue(s) and thread(s)\n\tfor i in range(0,threadcount):\n\t\tcq.append(Queue())\n\t\tt = Thread(target=consumer, args=(cq[i],mq,i,), daemon=True)\n\t\tct.append(t)\n\n\t#read the file and close it\n\tlines = f.read().splitlines()\n\tcount = 0\n\tfor line in lines:\n\t\tmq.put_nowait(\"Read (P): \"+line+\" in queue:\"+str(count))\n\t\tcq[count].put_nowait(line)\n\t\tcount += 1\n\t\tif count == threadcount: count = 0\n\tf.close()\n\n\t#starting thread(s)\n\tfor i in range(0,threadcount):\n\t\tmq.put_nowait(\"[*] Starting consumer-thread: \"+str(i+1))\n\t\tct[i].start()\n\t\n\tmp.start() #start de monitor thread\n\n\t#cleanup\n\tfor i in range(0,threadcount):\n\t\tcq[i].put_nowait(\"exit\") # tell the consumer thread(s) to close\n\t\tct[i].join() # wait for thread\n\t\n\tmq.put(\"exit\") # vertel de monitor thread om te sluiten\n\tmp.join() # wacht op thread","sub_path":"calc.py","file_name":"calc.py","file_ext":"py","file_size_in_byte":3374,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"184199452","text":"\"\"\"\n.. module:: cmb\n :synopsis: define a class for a CMB experiment\n\n\"\"\"\n\nimport numpy as np\nfrom fishercode import Fisher\nimport camb\nfrom camb import model, initialpower\n\nimport multiprocessing as mp\nfrom utilities.functions import degsq2rad, deg2rad\n\nclass CMBExperiment(object):\n \"\"\" CMB experiment class\n\n Args:\n * DeltaOmega: The sky covered in square degrees\n * LMAX: maximum multipole to be considered\n * theta: resolution (FWHM, arcmin)\n * sigmaT: temperature noise per pixel (:math:`\\mu K`)\n * name: [None, \"Planck\", \"WMAP\", \"PIXIE\"] provides the possibility of using a known experiment\n\n \"\"\"\n\n def __init__(self, DeltaOmega=41253., LMAX=2000, beamsize=5.0, w=8000., name=None, alias=None):\n \"\"\"The CMB experiment class.\n\n \"\"\"\n self.lmax=LMAX\n self.theta=deg2rad(beamsize/60.) # beamsize is specified in arcmin\n self.wT=w # (1/(pixel size*noise per pixel^2)\n self.name=name\n self.fsky=DeltaOmega/41253\n\n if (alias):\n self.name=alias\n if name==None:\n self.dOm = degsq2rad(DeltaOmega) # the survey area in degree square is converted to radians square here\n if name==\"CVlimited\":\n \"\"\"define a cosmic variance limited CMB temperature and polarization experiment\n with the following properties\n \"\"\"\n if not(alias):\n self.name=\"CVlimited\"\n self.lmax=2500\n self.fsky=0.8\n # one does not need to worry about wT, wP, and noise per pixel; the weight for the\n # noise term is set to zero for this experiment in the CMBFisher class.\n\n if name==\"Planck\":\n if not(alias):\n self.name=\"Planck\"\n self.lmax=2000\n self.fsky=0.7\n self.frequency=143\n self.theta=deg2rad(7./60.) # in arcmin\n self.noiseppT=6.0 # noise per pixel for temperature in microK\n self.noiseppP=11.5 # noise per pixel for polarization in microK\n self.wT=1.0/(self.theta*self.noiseppT)**2.0\n self.wP=1.0/(self.theta*self.noiseppP)**2.0\n\nclass CMBFisher(Fisher):\n \"\"\"This class is for computing the CMB Fisher matrix given by\n\n .. math::\n\n F_{ij}=\\\\sum_l \\\\frac{(2l+1)}{2} \\\\frac{\\\\frac{\\\\partial C_l}{\\\\partial \\\\alpha_i} \\\\frac{\\\\partial C_l}{\\\\partial \\\\alpha_j}}{(C_l+w^{-1} e^{\\\\sigma^2 l^2})^2}\n\n The :math:`C_l` values are computed from the currently set cosmology.\n\n \"\"\"\n\n def __init__(self, expt, cosmology, params=[], param_values=[], param_names=[], priors=[], pol=False):\n \"\"\"Set the experiment and cosmology for CMB Fisher computations.\n Also, set the parameters and priors if specified\n \"\"\"\n self.experiment=expt\n self.cosmology=cosmology\n self.include_polarization=pol\n Fisher.__init__(self, params, param_values, param_names, priors)\n\n def theoryCls(self, LMAX):\n \"\"\"get the theoretical :math:`C_l` values for the current cosmology using CLASS code.\n The cosmological parameters are set from the current :class:`.cosmology`.\n The power sepctra are also set as variables in the cosmology class.\n\n If the include_polarization switch is set to True, then it also sets\n\n \"\"\"\n # using camb\n pars = camb.CAMBparams()\n pars.set_cosmology(H0=self.cosmology.H0, ombh2=self.cosmology.Ob0*self.cosmology.h**2.0, \n omch2=self.cosmology.Oc0*self.cosmology.h**2.0, omk=0, \n tau=self.cosmology.tau, mnu=self.cosmology.m_nu[-1])\n pars.InitPower.set_params(As=self.cosmology.As, ns=self.cosmology.n, r=self.cosmology.r)\n pars.set_for_lmax(LMAX)\n\n if (self.cosmology.r > 0.0):\n pars.WantTensors = True\n\n results = camb.get_results(pars)\n powers = results.get_cmb_power_spectra(pars)\n totCL = powers['total']\n\n self.cosmology.TTCls = totCL[:LMAX+1,0]\n self.cosmology.ells=np.arange(LMAX+1)\n\n if (self.include_polarization):\n self.cosmology.TECls = totCL[:LMAX+1,3]\n self.cosmology.BBCls = totCL[:LMAX+1,2]\n self.cosmology.EECls = totCL[:LMAX+1,1]\n\n return self.cosmology.TTCls\n\n def getCls(self, ps='tt'):\n \"\"\"return one of the TT, TE, EE, BB Cls\n \"\"\"\n if (ps=='te'):\n return self.cosmology.TECls\n elif (ps=='bb'):\n return self.cosmology.BBCls\n elif (ps=='ee'):\n return self.cosmology.EECls\n else:\n return self.cosmology.TTCls\n\n def Cls_deriv(self, param, param_value, ps='tt'):\n \"\"\"compute the numerical derivative of :math:`C_ls`, the angular temperature power spectrum\n with respect to the parameter specified at the given value\n \"\"\"\n v=getattr(self.cosmology, param)\n pv=param_value\n setfunc=getattr(self.cosmology, \"set_\"+param)\n\n setfunc(pv*(1.+self.diff_percent))\n self.theoryCls(self.experiment.lmax)\n plus_value=self.getCls(ps)\n setfunc(pv*(1.-self.diff_percent))\n self.theoryCls(self.experiment.lmax)\n minus_value=self.getCls(ps)\n finite_diff=plus_value-minus_value\n delta_pv=2*self.diff_percent*pv\n setfunc(v)\n return (finite_diff)/delta_pv\n\n def noise_weight(self, ps='tt'):\n \"\"\"return the noise weight for the power spectrum specified\n \"\"\"\n if (self.experiment.name==\"CVlimited\"):\n return 0.\n if (ps=='tt'):\n return 1./self.experiment.wT\n elif (ps=='ee' or ps =='bb'):\n return 1./self.experiment.wP\n else:\n return 0.\n\n def fisherXX(self, ps, output):\n \"\"\"computes the fisher matrix given the parameters, experiment and cosmology definitions\n \"\"\"\n # loop over multipoles to form the Fisher matrix\n fmatrix=np.array([[0.]*self.nparams]*self.nparams)\n dCij=[0.]*self.nparams\n\n if (self.experiment.lmax>256):\n self.theoryCls(self.experiment.lmax)\n else:\n self.theoryCls(256)\n\n ClXX=self.getCls(ps)\n\n for i in range(self.nparams):\n dCij[i]=self.Cls_deriv(self.parameters[i], self.parameter_values[i], ps)\n\n for i in range(self.nparams):\n for j in range(self.nparams):\n fijl=np.array([((2*l+1)/2)*(dCij[i][l]*dCij[j][l])/(ClXX[l]+self.noise_weight(ps)*np.exp((self.experiment.theta*l)**2.0))**2.0 for l in range(2, self.experiment.lmax-1)])\n fmatrix[i][j]=self.experiment.fsky*np.sum(fijl)\n\n output.put(np.array(np.matrix(fmatrix)))\n return np.array(np.matrix(fmatrix)) # numpy array for easy indexing\n\n def fisher(self, XX=['tt', 'te', 'ee', 'bb']):\n \"\"\"sum over the specified XX=[TT, TE, EE, BB] fisher matrices to get the\n total CMB fisher matrix\n \"\"\"\n if (('te' in XX) or ('ee' in XX) or ('bb' in XX)):\n self.include_polarization = True\n\n fmatrix=np.array([[0.]*self.nparams]*self.nparams)\n\n output=mp.Queue()\n processes=[mp.Process(target=self.fisherXX, args=(xx, output)) for xx in XX]\n\n for p in processes:\n p.start()\n for p in processes:\n p.join()\n\n fmats=[output.get() for p in processes]\n fmatrix=sum(fmats)\n\n self.fisher_matrix=np.matrix(fmatrix)\n return self.fisher_matrix\n\n\n def test_fisher(self):\n \"\"\"\n devise a test case that reproduces previously known (upto some tolerable accuracy)\n Fisher matrix computation as a check of this code\n \"\"\"\n return 0\n","sub_path":"cmb.py","file_name":"cmb.py","file_ext":"py","file_size_in_byte":7747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"311486901","text":"import keras\nfrom keras import backend as K\n \nclass TorchBatchNorm2D(keras.engine.topology.Layer):\n def __init__(self, axis=-1, momentum=0.99, epsilon=1e-3, **kwargs):\n super(TorchBatchNorm2D, self).__init__(**kwargs)\n self.supports_masking = True\n self.axis = axis\n self.momentum = momentum\n self.epsilon = epsilon\n\n def build(self, input_shape):\n dim = input_shape[self.axis]\n if dim is None:\n raise ValueError('Axis ' + str(self.axis) + ' of ' 'input tensor should have a defined dimension ' 'but the layer received an input with shape ' + str(input_shape) + '.')\n shape = (dim,)\n self.gamma = self.add_weight(shape=shape, name='gamma', initializer='ones', regularizer=None, constraint=None)\n self.beta = self.add_weight(shape=shape, name='beta', initializer='zeros', regularizer=None, constraint=None)\n self.moving_mean = self.add_weight(shape=shape, name='moving_mean', initializer='zeros', trainable=False) \n self.moving_variance = self.add_weight(shape=shape, name='moving_variance', initializer='ones', trainable=False) \n self.built = True\n\n def call(self, inputs, training=None):\n input_shape = K.int_shape(inputs)\n\n broadcast_shape = [1] * len(input_shape)\n broadcast_shape[self.axis] = input_shape[self.axis]\n \n broadcast_moving_mean = K.reshape(self.moving_mean, broadcast_shape)\n broadcast_moving_variance = K.reshape(self.moving_variance, broadcast_shape)\n broadcast_gamma = K.reshape(self.gamma, broadcast_shape)\n broadcast_beta = K.reshape(self.beta, broadcast_shape) \n invstd = K.ones (shape=broadcast_shape, dtype='float32') / K.sqrt(broadcast_moving_variance + K.constant(self.epsilon, dtype='float32'))\n \n return (inputs - broadcast_moving_mean) * invstd * broadcast_gamma + broadcast_beta\n \n def get_config(self):\n config = { 'axis': self.axis, 'momentum': self.momentum, 'epsilon': self.epsilon }\n base_config = super(TorchBatchNorm2D, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n","sub_path":"src/graph_transpiler/webdnn/frontend/keras/TorchBatchNorm2D.py","file_name":"TorchBatchNorm2D.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"284336080","text":"def solution(a, b):\n answer = 0\n if a>b:\n for i in range(b,a+1):\n answer = answer+i\n elif a 0:\n prob = {}\n for i in self.game_sequence:\n if i[0] not in prob:\n prob[i[0]] = i[4]\n else:\n prob[i[0]] += i[4]\n for pk, pv in prob.items():\n p = pv / total_info\n multidisciplinary += p * log(p,2)\n self.score['multi'] = -multidisciplinary / log(self.num_options, 2)\n\n # cei2\n stretching = 0\n embracing = 0\n for ck, cv in self.cei2.items():\n if ck in ['q01', 'q03', 'q05', 'q07', 'q09']:\n print(\"stretching: \", cv, cv[3])\n stretching += int(cv[3])\n elif ck in ['q02', 'q04', 'q06', 'q08', 'q10']:\n print(\"embracing: \", cv, cv[3])\n embracing += int(cv[3])\n self.score['stretching'] = stretching\n self.score['embracing'] = embracing\n\n # learning\n print(self.learning)\n learning = 0\n q_asked = 0\n for lk, lv in self.learning.items():\n q_asked += 1\n if lv == 'correct':\n learning += 1\n self.score['learning'] = learning\n self.score['q_asked'] = q_asked\n\n print(self.score)\n self.save()\n\n def save(self):\n store = JsonStore(self.pathname + 'CuriosityScore.txt')\n store.put(self.id.strftime('%Y_%m_%d_%H_%M_%S_%f'),\n init=self.score['init'],\n total=self.score['total_info'],\n multi=self.score['multi'],\n stretching=self.score['stretching'],\n embracing=self.score['embracing'],\n age=self.score['age'],\n gender=self.score['gender'],\n faculty=self.score['faculty'],\n learning=self.score['learning'],\n q_asked=self.score['q_asked'])\n\n def draw(self):\n pass\n # Ellipse:\n # pos: 100, 100\n # size: 200 * wm.value, 201 * hm.value\n # source: 'data/logo/kivy-icon-512.png'\n # angle_start: e1.value\n # angle_end: e2.value\n\n\n\n","sub_path":"curiosity_score.py","file_name":"curiosity_score.py","file_ext":"py","file_size_in_byte":5105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"496737663","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nimport argparse\nfrom PIL import Image\n\n\ndef main():\n try:\n im = Image.open(args.pic)\n except:\n print(\"There's error open the picture\")\n sys.exit(-1)\n\n width, height = im.size\n hist = [0] * N\n s = [0] * N # record the new intensity of new image on a gray scale pixel\n\n im_px = im.load()\n for i in range(width):\n for j in range(height):\n hist[im_px[i, j]] += 1\n\n # histogram equalization\n acc_sum = 0\n for idx in range(len(s)):\n acc_sum += hist[idx]\n s[idx] = 255 * acc_sum / sum(hist)\n\n new_im = Image.new(im.mode, im.size)\n new_im_px = new_im.load()\n for i in range(width):\n for j in range(height):\n new_im_px[i, j] = int(s[im_px[i, j]])\n new_im.save(args.out, im.format)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n description=\"histogram equilization of lena.bmp\")\n parser.add_argument(\"pic\", type=str, metavar=\"\")\n parser.add_argument(\"out\", type=str, metavar=\"\")\n args = parser.parse_args()\n\n N = 256\n main()\n","sub_path":"hw3/equlization.py","file_name":"equlization.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"231119138","text":"#!/usr/bin/env python3\n\nfrom progress_bar5 import *\n\n\ndef main():\n\tprogressLength = 100\n\n\tapp = ProgressBar()\n\tapp.setTitle(\"Progressing...\")\n\tapp.setMax(progressLength)\n\tapp.setColour('green', 'white')\n\tapp.setDeterminante(True)\n\n\tfor progressCount in range(progressLength):\n\t\ttime.sleep(0.05) # do real work here\n\t\tif app.isActive():\n\t\t\t# Alt: Set the value to progressCount\n\t\t\t#app.setPosition(progressCount)\n\t\t\t# Incerement by 1\n\t\t\tapp.setIncrement(1)\n\t\tprint(progressCount)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"Includes/progress_bar/progress_bar5_example.py","file_name":"progress_bar5_example.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"357977428","text":"from urllib import request, parse\nimport gzip\nfrom http import cookiejar\nfrom bs4 import BeautifulSoup\n\n\nclass WebPage:\n def __init__(self, cookieName):\n self.cookieName = cookieName\n httphd = request.HTTPHandler(debuglevel=1)\n httpshd = request.HTTPSHandler(debuglevel=1)\n self.ckojb = cookiejar.LWPCookieJar(self.cookieName)\n cookiehd = request.HTTPCookieProcessor(self.ckojb)\n self.opener = request.build_opener(httphd, httpshd, cookiehd)\n\n def getPage(self, url, postData=None, headerInfo={}):\n if postData:\n postData = parse.urlencode(postData).encode('utf-8')\n reqhd = request.Request(url, data=postData, headers=headerInfo)\n req = self.opener.open(reqhd)\n con = req.read()\n coding = req.headers.get('Content-Encoding')\n if coding == 'gzip':\n con = gzip.decompress(con)\n con = con.decode('utf-8')\n return con\n\n def getHtml5(self, con):\n return BeautifulSoup(con, 'html5lib')\n\n def saveCookie(self):\n self.ckojb.save(ignore_discard=True, ignore_expires=True)\n\n def loadCookie(self):\n self.ckojb.load(self.cookieName, ignore_expires=True, ignore_discard=True)\n","sub_path":"page.py","file_name":"page.py","file_ext":"py","file_size_in_byte":1220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"591700251","text":"from numpy import exp, log, real\n\nfrom phasepack.filtergrid import filtergrid \nfrom phasepack.tools import fft2, ifft2\n\n\ndef monofilt(im, nscale, minWaveLength, mult, sigmaOnf, orientWrap=None):\n if im.dtype not in ['float32', 'float64']:\n im = np.float64(im)\n imgdtype = 'float64'\n else:\n imgdtype = im.dtype\n\n # Generate horizontal and vertical frequency grids that vary from\n # -0.5 to 0.5 \n radius, u1, u2 = filtergrid(*im.shape)\n\n radius = radius.astype(imgdtype)\n u1 = u1.astype(imgdtype)\n u2 = u2.astype(imgdtype)\n\n # Get rid of the 0 radius value in the middle (at top left corner after\n # fftshifting) so that taking the log of the radius, or dividing by the\n # radius, will not cause trouble.\n radius[0, 0] = 1\n\n # The two monogenic filters in the frequency domain\n H1 = (1j) * u1 / radius\n H2 = (1j) * u2 / radius\n \n IM = fft2(im)\n\n # The two monogenic filters H1 and H2 are oriented in frequency space\n # but are not selective in terms of the magnitudes of the\n # frequencies. The code below generates bandpass log-Gabor filters\n # which are point-wise multiplied by H1 and H2 to produce different\n # bandpass versions of H1 and H2\n\n f = [None] * nscale\n h1f = [None] * nscale\n h2f = [None] * nscale\n A = [None] * nscale\n for s in xrange(nscale):\n wavelength = minWaveLength*mult**(s)\n fo = 1.0/wavelength # Centre frequency of filter.\n logGabor = exp((-(log(radius/fo))**2) / (2 * log(sigmaOnf)**2))\n logGabor[0,0] = 0 # undo the radius fudge.\n\n # Generate bandpass versions of H1 and H2 at this scale\n H1s = H1 * logGabor\n H2s = H2 * logGabor \n\n # Apply filters to image in the frequency domain and get spatial\n # results \n f[s] = real(ifft2(IM * logGabor)) \n h1f[s] = real(ifft2(IM * H1s))\n h2f[s] = real(ifft2(IM * H2s))\n\n A[s] = (f[s]**2 + h1f[s]**2 + h2f[s]**2)**0.5 # Magnitude of Energy.\n return f, h1f, h2f, A#, theta, psi\n","sub_path":"phasepack/monofilt.py","file_name":"monofilt.py","file_ext":"py","file_size_in_byte":2080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"104148339","text":"import openning as open\n\nname = input('我是小麦,你叫什么名字?\\n') \nage = input('我8岁了,你呢?\\n') #age是一个字符串str\nage = int(age)\n\n#1. 顺序很重要\nif(age < 50): \n\tprint(f\"{age}岁像一座沉稳的大山!\")\nelif(age < 30): \n\tprint(f\"{age}岁如花似玉的年龄!\")\nelse:\n\tprint(f\"{age}岁有丰富的人生阅历!\")\n\n#2. 语句块可以嵌套\n\n\n#open.icon()\n#open.hello(name) \n\n\n\n\n\n","sub_path":"python/hard_python/robot/529/mai.py","file_name":"mai.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"352046971","text":"import matplotlib.pyplot as plt\nimport psycopg2\n\ndbconn = psycopg2.connect(database='postgis', user='nobody', host='iemdb')\ncursor = dbconn.cursor()\n\ncursor.execute(\"\"\"\n SELECT d, count(*), sum(c) from\n (SELECT extract(doy from issue) as d, extract(year from issue) as yr, \n count(*) as c from sbw\n where status = 'NEW' and significance = 'W' and phenomena in ('SV','TO')\n and issue > '2002-01-01' and issue < '2015-01-01'\n GROUP by d, yr) as foo\n GROUP by d ORDER by d ASC\n\n\"\"\")\n\ndays = []\ncnt = []\navg = []\nfor row in cursor:\n days.append( row[0] )\n cnt.append( float(row[1]) / 13.0 * 100.0)\n avg.append( float(row[2]) / 13.0 )\n\n(fig, ax) = plt.subplots(1,1)\nax.bar(days, cnt, fc='green', ec='green')\n\ny2 = ax.twinx()\ny2.bar(days, avg, fc='skyblue', ec='skyblue')\nax.grid(True)\n#y2.grid(ls=\"-\", lw=5, zorder=-1) \nax.set_xticks( (1,32,60,91,121,152,182,213,244,274,305,335,365) )\nax.set_xticklabels( ('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec') )\nax.set_xlim(0,366)\nax.set_title(\"2002-2012 United States Daily\\nSevere T'Storm + Tornado Warning Frequencies\")\nax.set_ylabel(\"Percent Years with 1+ Warning [%]\", color='green')\ny2.set_ylabel(\"Average Warning Count\", color='skyblue')\n\ny2.tick_params(axis='y', colors='skyblue')\nax.tick_params(axis='y', colors='green')\n\nfig.savefig('test.png')\n","sub_path":"scripts/feature/warnings/daily_freq.py","file_name":"daily_freq.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"182753307","text":"from socket import *\n\nMME_IP = '192.168.0.108'\nMME_SERVER_PORT = 37000\n\nvUser_IP = '192.168.0.103'\nMME_CLIENT_PORT = 21000\n\nwith socket(AF_INET, SOCK_STREAM) as mme_serverSocket:\n mme_serverSocket.bind((MME_IP, MME_SERVER_PORT))\n mme_serverSocket.listen()\n while True:\n conn, addr = mme_serverSocket.accept()\n with conn:\n res = conn.recv(20000)\n xresFile = open('xres.txt', 'r')\n line = str(xresFile.read())\n if res.decode('utf-8') == line:\n print(\"User authenticated\")\n with socket(AF_INET, SOCK_STREAM) as mme_clientSocket:\n mme_clientSocket.connect((vUser_IP, MME_CLIENT_PORT))\n mme_clientSocket.sendall(bytes('200 OK', 'utf-8'))\n mme_clientSocket.close()","sub_path":"Edge/compare_res.py","file_name":"compare_res.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"15751409","text":"from allauth.account.adapter import DefaultAccountAdapter\nfrom allauth.socialaccount.adapter import DefaultSocialAccountAdapter\nfrom allauth.exceptions import ImmediateHttpResponse\nfrom django.contrib.auth.models import Group\nfrom django.http import HttpResponseRedirect\nfrom django.core.urlresolvers import reverse\nfrom django.contrib import messages\nfrom django.db import IntegrityError\n\nfrom .forms import CredentialTwitterForm, CredentialWeiboForm, CredentialTumblrForm\nfrom .models import Credential\n\n\nclass AccountAdapter(DefaultAccountAdapter):\n def save_user(self, request, user, form, commit=True):\n # Automatically create a group for the user.\n user = DefaultAccountAdapter.save_user(self, request, user, form, commit=True)\n group = Group.objects.create(name=user.username)\n group.save()\n user.groups.add(group)\n user.save()\n return user\n\n\nclass SocialAccountAdapter(DefaultSocialAccountAdapter):\n def pre_social_login(self, request, sociallogin):\n credential_name = u\"{}'s {} credential\".format(sociallogin.user.username or request.user.username,\n sociallogin.token.app.name)\n if sociallogin.token.app.provider == 'twitter':\n form = CredentialTwitterForm({\n 'name': credential_name,\n 'platform': Credential.TWITTER,\n 'consumer_key': sociallogin.token.app.client_id,\n 'consumer_secret': sociallogin.token.app.secret,\n 'access_token': sociallogin.token.token,\n 'access_token_secret': sociallogin.token.token_secret,\n })\n elif sociallogin.token.app.provider == 'tumblr':\n form = CredentialTumblrForm({\n 'name': credential_name,\n 'platform': Credential.TUMBLR,\n 'api_key': sociallogin.token.app.client_id,\n })\n elif sociallogin.token.app.provider == 'weibo':\n form = CredentialWeiboForm({\n 'name': credential_name,\n 'platform': Credential.WEIBO,\n 'access_token': sociallogin.token.token,\n })\n else:\n assert False, \"Unrecognized social login provider\"\n form.instance.user = request.user\n try:\n credential = form.save()\n except IntegrityError:\n messages.warning(request, \"Credential already exists.\")\n raise ImmediateHttpResponse(HttpResponseRedirect(reverse('credential_list')))\n\n messages.info(request, \"New credential created.\")\n\n raise ImmediateHttpResponse(HttpResponseRedirect(reverse('credential_detail', args=(credential.pk,))))\n","sub_path":"sfm/ui/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":2711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"653753608","text":"class FClass:\r\n n=10\r\n def total(self,N):\r\n self.total = int(self.n) + int(N)\r\n\r\nclass SClass:\r\n def total(self,s):\r\n self.total = len(str(s))\r\nf = FClass()\r\ns = SClass()\r\nf.total(45)\r\ns.total(45)\r\nprint (f.total) # Вивід: 55\r\nprint (s.total) # Вивід: 2\r\n","sub_path":"I семестр/Програмування (Python)/Лабораторні/Лисенко 6116/Python/Презентації/ex23/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"276780263","text":"#!/usr/bin/env python3\n\nfrom modules import pg8000\nimport configparser\n\n\n# Define some useful variables\nERROR_CODE = 55929\n\n#####################################################\n## Database Connect\n#####################################################\n\ndef database_connect():\n # Read the config file\n config = configparser.ConfigParser()\n config.read('config.ini')\n\n # Create a connection to the database\n connection = None\n try:\n connection = pg8000.connect(database=config['DATABASE']['user'],\n user=config['DATABASE']['user'],\n password=config['DATABASE']['password'],\n host=config['DATABASE']['host'])\n except pg8000.OperationalError as e:\n print(\"\"\"Error, you haven't updated your config.ini or you have a bad\n connection, please try again. (Update your files first, then check\n internet connection)\n \"\"\")\n print(e)\n #return the connection to use\n return connection\n\n#####################################################\n## Login\n#####################################################\n\ndef check_login(email, password):\n # Dummy data\n #val = ['Shadow', 'Mr', 'Evan', 'Nave', '123 Fake Street, Fakesuburb', 'SIT', '01-05-2016', 'Premium', '1']\n # Ask for the database connection, and get the cursor set up\n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n r = None\n try:\n # Try executing the SQL and get from the database\n sql = \"\"\"SELECT * FROM loginchk(%s,%s)\"\"\"\n cur.execute(sql, (email, password))\n r = cur.fetchone()\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Error with Database\")\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return r\n\n\n#####################################################\n## Homebay\n#####################################################\ndef update_homebay(email, bayname):\n # TODO\n # Update the user's homebay\n\t\n\t# Ask for the database connection, and get the cursor set up\n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n \n try:\n # Try executing the SQL and get from the database\n sql = \"\"\"SELECT * FROM uph1(%s)\"\"\"\n cur.execute(sql, (bayname,))\n r = cur.fetchone()\n id = r[0]\n sql2 = \"\"\"UPDATE carsharing.member\n\t\t SET homebay = %s\n\t\t\t WHERE email = %s\"\"\"\n cur.execute(sql2, (id, email))\n conn.commit()\n cur.close()\n conn.close()\n return True\n except:\n # If there were any errors, we print something nice and return false\n print(\"Error with Database\")\n conn.rollback()\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return False\n\n \ndef update_session_homebay(email):\n #val = ['66XY99', 'Ice the Cube','Nissan', 'Cube', '2007', 'auto', 'Luxury', '5', 'SIT', '8', 'http://example.com']\n\n # Get details of member and return current homebay to be stored in session\n \n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n r = None\n try:\n # Try executing the SQL and get from the database\n sql = \"\"\"SELECT * FROM upses(%s)\"\"\"\n cur.execute(sql, (email,))\n r = cur.fetchone()\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Error with Database\")\n cur.close() # Close the cursor\n conn.close()\n return r \n \n#####################################################\n## Booking (make, get all, get details)\n#####################################################\n\ndef make_booking(email, car_rego, date, hour, duration):\n # TODO\n # Insert a new booking\n # Make sure to check for:\n # - If the member already has booked at that time\n # - If there is another booking that overlaps\n # - Etc.\n # return False if booking was unsuccessful :)\n # We want to make sure we check this thoroughly\n \n # Ask for the database connection, and get the cursor set up\n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n try:\n cur.execute(\"\"\"SET TRANSACTION ISOLATION LEVEL SERIALIZABLE\"\"\")\n startTime = date + ' ' + hour +':00:00'\n\t\t\n\t\t#get the endTime = startTime + duration\n dur = duration + ':00:00'\t\n stmt = \"\"\"SELECT (to_timestamp(%s , 'YYYY-MM-DD HH24:MI:SS')+ %s)::timestamp without time zone \"\"\"\n cur.execute(stmt, (startTime, dur))\n result = cur.fetchone()\n endTime = result[0]\n \n #checks to see if car and user are free. If booked a value will be returned, if free None will be returned\n sql = \"\"\"SELECT bookingid\n FROM booking JOIN member ON (madeby = memberno)\n WHERE (car=%s OR email = %s) AND (endtime > %s AND starttime < %s)\n \"\"\"\n cur.execute(sql, (car_rego, email, startTime, endTime))\n r = cur.fetchone()\n print(r)\n \n #If None returned make the booking\n if r == None:\n \n stmt = \"\"\"SELECT memberno \n FROM member\n WHERE email = %s \"\"\"\n cur.execute(stmt, (email,))\n result = cur.fetchone()\n memberno = result[0]\n \n stmt =\"\"\"INSERT INTO Booking(car, madeby, startTime, endTime) VALUES (%s, %s, %s, %s)\"\"\"\n\t\t\n cur.execute(stmt, (car_rego, memberno, startTime,endTime))\n \n stmt =\"\"\"UPDATE member\n SET stat_nrofbookings = stat_nrofbookings + 1\n WHERE email = %s\"\"\"\n\t\t\n cur.execute(stmt, (email,))\n \n #If already booked rollback and return 'booked' to display error message\n else:\n conn.rollback()\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return 'booked'\n\t\t\n\t\t#commit booking and update stat_nrofbookings at the same time\n conn.commit() \n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return True\n except:\n # If there were any errors, we print something nice and return false\n print(\"Error fetching from database\")\n conn.rollback()\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return False \n\ndef update_session_stat_nrofbookings(email):\n #val = ['66XY99', 'Ice the Cube','Nissan', 'Cube', '2007', 'auto', 'Luxury', '5', 'SIT', '8', 'http://example.com']\n\n # Get details of member and update current stat_nrofbookings to be stored in session\n \n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n r = None\n try:\n # Try executing the SQL and get from the database\n sql = \"\"\"SELECT * FROM upses2(%s)\"\"\"\n cur.execute(sql, (email,))\n r = cur.fetchone()\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Error with Database\")\n cur.close() # Close the cursor\n conn.close()\n return r \n \n \n\ndef get_all_bookings(email):\n #val = [['66XY99', 'Ice the Cube', '01-05-2016', '10', '4', '29-04-2016'],['66XY99', 'Ice the Cube', '27-04-2016', '16'], ['WR3KD', 'Bob the SmartCar', '01-04-2016', '6']]\n\n # TODO\n # Get all the bookings made by this member's email\n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n r = None\n try:\n # Try executing the SQL and get from the database\n sql = \"\"\"SELECT car, name, date(starttime), date_part('hour', starttime)::int\n FROM carsharing.booking JOIN carsharing.car ON (car = regno) JOIN carsharing.member ON (madeby = memberno)\n WHERE email=%s\n ORDER BY date(starttime) DESC\"\"\"\n cur.execute(sql, (email,))\n r = cur.fetchall()\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Error with Database\")\n cur.close() # Close the cursor\n conn.close()\n return r\n\ndef get_booking(b_date, b_hour, car):\n #val = ['Shadow', '66XY99', 'Ice the Cube', '01-05-2016', '10', '4', '29-04-2016', 'SIT']\n\n # TODO\n # Get the information about a certain booking\n # It has to have the combination of date, hour and car\n \n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n r = None\n try:\n # Try executing the SQL and get from the database\n sql = \"\"\"SELECT member.nickname, booking.car, car.name, date(starttime), date_part('hour', starttime)::int, ((date_part('hour', endtime)) - (date_part('hour', starttime)))::int, date(whenbooked), carbay.name\n FROM carsharing.booking JOIN carsharing.car ON (car = regno) JOIN carsharing.member ON (madeby = memberno) JOIN carsharing.carbay ON (parkedat = bayid)\n WHERE date(starttime) = %s AND date_part('hour', starttime) = %s AND booking.car = %s\"\"\"\n cur.execute(sql, (b_date, b_hour, car))\n r = cur.fetchone()\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Error with Database\")\n cur.close() # Close the cursor\n conn.close()\n return r\n\n\n#####################################################\n## Car (Details and List)\n#####################################################\n\ndef get_car_details(regno):\n #val = ['66XY99', 'Ice the Cube','Nissan', 'Cube', '2007', 'auto', 'Luxury', '5', 'SIT', '8', 'http://example.com']\n # TODO\n # Get details of the car with this registration number\n # Return the data (NOTE: look at the information, requires more than a simple select. NOTE ALSO: ordering of columns)\n \n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n r = None\n try:\n # Try executing the SQL and get from the database\n sql = \"\"\"SELECT regno, car.name, make, model, year, transmission, category, capacity, carbay.name\n FROM car JOIN carmodel USING (make, model) JOIN carbay ON (parkedat = bayid)\n WHERE regno=%s\"\"\"\n cur.execute(sql, (regno,))\n r = cur.fetchone()\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Error with Database\")\n cur.close() # Close the cursor\n conn.close()\n return r\n \n\ndef get_car_availability(regno):\n # TODO\n # Insert a new booking\n # Make sure to check for:\n # - If the member already has booked at that time\n # - If there is another booking that overlaps\n # - Etc.\n # return False if booking was unsuccessful :)\n # We want to make sure we check this thoroughly\n \n # Ask for the database connection, and get the cursor set up\n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n try:\n i = 0\n availability = []\n \n while i < 24:\n sql = \"\"\"SELECT bookingid\n FROM booking JOIN member ON (madeby = memberno)\n WHERE car=%s AND (date(starttime) = current_date) AND ((extract(hour FROM endtime)) > %s AND (extract(hour FROM starttime)) < %s)\n \"\"\"\n cur.execute(sql, (regno, i, i + 1))\n r = cur.fetchone()\n \n if r == None:\n availability.append(' Available ') \n else:\n availability.append(' Booked ') \n i +=1\n except:\n # If there were any errors, we print something nice and return false\n print(\"Error fetching from database\")\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return availability \n\n\ndef get_all_cars():\n #val = [ ['66XY99', 'Ice the Cube', 'Nissan', 'Cube', '2007', 'auto'], ['WR3KD', 'Bob the SmartCar', 'Smart', 'Fortwo', '2015', 'auto']]\n\n # TODO\n # Get all cars that PeerCar has\n # Return the results\n\t\n\t# Get the database connection and set up the cursor\n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n val = None\n try:\n # Try getting all the information returned from the query\n cur.execute(\"\"\" SELECT regno, name, make, model, year, transmission \n\t\t\t\t\t\tFROM carsharing.car\n\t\t\t\t\t\tORDER BY name ASC\"\"\")\n val = cur.fetchall()\n except:\n # If there were any errors, we print something nice and return a NULL value\n print(\"Error fetching from database\")\n\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return val\n#####################################################\n## Bay (detail, list, finding cars inside bay)\n#####################################################\n\ndef get_all_bays():\n #val = [['SIT', '123 Some Street, Boulevard', '2'], ['some_bay', '1 Somewhere Road, Right here', '1']]\n # TODO\n # Get all the bays that PeerCar has :)\n # And the number of bays\n # Return the results\n \n\t# Get the database connection and set up the cursor\n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n val = None\n try:\n # Try getting all the information returned from the query\n cur.execute(\"\"\" SELECT carbay.name, address, COUNT(regno)\n\t\t\t\t\tFROM carsharing.carbay JOIN carsharing.car ON (bayid = parkedat)\n\t\t\t\t\tGROUP BY carbay.name, address\n\t\t\t\t\tORDER BY carbay.name ASC\"\"\")\n val = cur.fetchall()\n except:\n # If there were any errors, we print something nice and return a NULL value\n print(\"Error fetching from database\")\n\n cur.close() # Close the cursor\n conn.close() # Close the connection to the db\n return val\n\ndef get_bay(name):\n #val = ['SIT', 'Home to many (happy?) people.', '123 Some Street, Boulevard', '-33.887946', '151.192958']\n\n # TODO\n # Get the information about the bay with this unique name\n # Make sure you're checking ordering ;)\n \n # Get the database connection and set up the cursor\n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n r = None\n try:\n # Try executing the SQL and get from the database\n #gps_lat and gps_long are purposely in this order as the sample data/schema do not match and have lat in long and vice versa\n sql = \"\"\"SELECT name, description, address, gps_long, gps_lat, walkscore, mapurl\n FROM carbay\n WHERE name = %s\"\"\"\n cur.execute(sql, (name,))\n r = cur.fetchone()\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Error with Database\")\n cur.close() # Close the cursor\n conn.close()\n return r\n\ndef search_bays(search_term):\n #val = [['SIT', '123 Some Street, Boulevard', '-33.887946', '151.192958']]\n\n # TODO\n # Select the bays that match (or are similar) to the search term\n # You may like this\n \n # Get the database connection and set up the cursor\n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n val = None\n try:\n search_T = '%' + search_term + '%'\n # Try getting all the information returned from the query\n sql = \"\"\" SELECT carbay.name, address, COUNT(regno)\n FROM carsharing.carbay JOIN carsharing.car ON (bayid = parkedat)\n WHERE (Lower(carbay.name) LIKE Lower(%s)) OR Lower(carbay.address) LIKE Lower(%s)\n GROUP BY carbay.name, address\n ORDER BY carbay.name ASC\"\"\"\n cur.execute(sql, (search_T, search_T))\n val = cur.fetchall()\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Error with Database\")\n cur.close() # Close the cursor\n conn.close()\n print(search_term)\n return val\n\ndef get_cars_in_bay(bay_name):\n #val = [ ['66XY99', 'Ice the Cube'], ['WR3KD', 'Bob the SmartCar']]\n\n # TODO\n # Get the cars inside the bay with the bay name\n # Cars who have this bay as their bay :)\n # Return simple details (only regno and name)\n\n # Get the database connection and set up the cursor\n conn = database_connect()\n if(conn is None):\n return ERROR_CODE\n cur = conn.cursor()\n r = None\n try:\n # Try executing the SQL and get from the database\n #gps_lat and gps_long are purposely in this order as the sample data/schema do not match and have lat in long and vice versa\n sql = \"\"\"SELECT car.regno, car.name\n FROM carsharing.carbay JOIN carsharing.car ON (bayid = parkedat)\n WHERE carbay.name = %s\"\"\"\n cur.execute(sql, (bay_name,))\n r = cur.fetchall()\n except:\n # If there were any errors, return a NULL row printing an error to the debug\n print(\"Error with Database\")\n cur.close() # Close the cursor\n conn.close()\n return r","sub_path":"CarShareClient/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":17811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"151947831","text":"from bocadillo import App, Recipe, WebSocket\n\n\ndef test_websocket_recipe_route(app: App, client):\n chat = Recipe(\"chat\")\n\n @chat.websocket_route(\"/room/{name}\", receive_type=\"json\", send_type=\"text\")\n async def chat_room(ws: WebSocket, name: str):\n message = await ws.receive()\n await ws.send(f\"[{name}]: {message['text']}\")\n\n app.recipe(chat)\n\n with client.websocket_connect(\"/chat/room/test\") as ws:\n ws.send_json({\"text\": \"Hello\"})\n assert ws.receive_text() == \"[test]: Hello\"\n","sub_path":"tests/test_recipes_websockets.py","file_name":"test_recipes_websockets.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"577213427","text":"#\n# [246] Strobogrammatic Number\n#\n# https://leetcode.com/problems/strobogrammatic-number/description/\n#\n# algorithms\n# Easy (40.37%)\n# Total Accepted: 36.9K\n# Total Submissions: 91.4K\n# Testcase Example: '\"69\"'\n#\n# A strobogrammatic number is a number that looks the same when rotated 180\n# degrees (looked at upside down).\n#\n# Write a function to determine if a number is strobogrammatic. The number is\n# represented as a string.\n#\n# Example 1:\n#\n#\n# Input: \"69\"\n# Output: true\n#\n#\n# Example 2:\n#\n#\n# Input: \"88\"\n# Output: true\n#\n# Example 3:\n#\n#\n# Input: \"962\"\n# Output: false\n#\n\n\nclass Solution:\n def isStrobogrammatic(self, num):\n \"\"\"\n :type num: str\n :rtype: bool\n \"\"\"\n imps = ['2', '3', '4', '5', '7']\n dic = {'1': '1',\n '6': '9',\n '8': '8',\n '9': '6',\n '0': '0'}\n for c in imps:\n if c in num:\n return False\n rev = ''.join([dic[x] for x in num[::-1]])\n return rev == num\n","sub_path":"246.strobogrammatic-number.python3.py","file_name":"246.strobogrammatic-number.python3.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"559272759","text":"import datetime\nfrom pydriller import RepositoryMining\nfrom collections import Counter\nfrom wordcloud import WordCloud\nfrom services.utilities import Util\nimport matplotlib.pyplot as plt\n\n# Class to analysis all commits from a branch of git repository\nclass CheckCommits:\n # constructor pass the path of repository\n def __init__(self, repository, name):\n self.repository = repository\n self.name = name\n \n # List all Commits from Authors\n # return a dictionary like this: hash, author, date, list of files in commit\n # dictionary = {'hash': ['author', 'date of commit', [file1, file2, ...]]}\n def dictionaryWithAllCommmits(self):\n dictionaryAux = {}\n for commit in RepositoryMining(self.repository).traverse_commits():\n commitAuthorNameFormatted = '{}'.format(commit.author.name)\n commitAuthorDateFormatted = '{}'.format(commit.author_date)\n listFilesModifiedInCommit = []\n for modification in commit.modifications:\n itemMofied = '{}'.format(modification.filename)\n listFilesModifiedInCommit.append(itemMofied)\n dictionaryAux[commit.hash] = [commitAuthorNameFormatted, commitAuthorDateFormatted, listFilesModifiedInCommit] \n return dictionaryAux\n\n # Return a Counter with frequency of each file analysed\n # The Counter like this:\n # Counter({file1: frequency of file1, file2: frequence of file2, ...})\n def counterWithFrequencyOfFile(self):\n listFull = []\n for key, value in self.dictionaryWithAllCommmits().items():\n listAxu = []\n listAxu = value[2]\n for eachItem in listAxu:\n listFull.append(eachItem)\n return Counter(listFull)\n\n # Generate a Word of Cloud about each file according frequence\n def generateWordCloud(self):\n dictionaryOfFileFrequence = self.counterWithFrequencyOfFile()\n wordcloud = WordCloud(width = 3000, height = 2000, random_state=1, background_color='black', colormap='Set2', collocations=False)\n wordcloud.generate_from_frequencies(frequencies=dictionaryOfFileFrequence)\n # Display the generated image:\n plt.figure()\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n # Save the image in the img folder:\n pathFile = \"/Users/armandosoaressousa/testes/sysrepomsr/img/\"\n fileName = pathFile + self.name + \".png\"\n wordcloud.to_file(fileName)","sub_path":"services/check_commits.py","file_name":"check_commits.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"243472847","text":"'''\nAuthor: Eduardo D Contreras\nDate: 01/30/2021\nClass: ENGR498 - Design of Payload for near-space deployment of IR Optics\nProject Sponsors: Kira Hart & Meredith Kupinski\n\nMentor: Catherine Merrill\n\nTeam Members: Nayleth Ramirez, Jeremy Parkinson, Jaclyn John, Thor Niel, Wassim Khawam\n\nDescription: The purpose of this program is\n'''\n\n# put all of your import statements below this line and then delete this comment\nimport os\nimport h5py\n\n# put all of your function definitions below this line and then delete this comment\n\ndef traverse_datasets(hdf5_file):\n\n \"\"\"Traverse all datasets across all groups in HDF5 file.\"\"\"\n\n def h5py_dataset_iterator(g, prefix=''):\n for key in g.keys():\n item = g[key]\n path = '{}/{}'.format(prefix, key)\n if isinstance(item, h5py.Dataset): # test for dataset\n yield (path, item)\n elif isinstance(item, h5py.Group): # test for group (go down)\n yield from h5py_dataset_iterator(item, path)\n\n with h5py.File(hdf5_file, 'r') as f:\n print(\"Filename: \", f)\n print(\"Timestamp: \", f.attrs[\"OS_time\"])\n for (path, dset) in h5py_dataset_iterator(f):\n print(path, dset)\n\n return None\n\n\n\n #==========================================================\ndef main():\n '''\n Main function currently executes a print statement which shows files in the current working directory.\n Utilized for cross-referencing. Main function traverse all datasets across all groups in the given HDF5 file.\n Opens up the given HDF5 file and prints the datasets with File Size in the HDF5 file.\n '''\n # put main code here, make sure each line is indented one level, and delete this comment\n\n #Check Current Working Directory (cwd)\n cwd = os.getcwd()\n files = os.listdir(cwd)\n print(\"Files in %r: %s\" % (cwd, files))\n print()\n for filename in files:\n if filename.endswith(\".hdf5\"):\n #Open the test file in Current Working Directory\n traverse_datasets(filename)\n print()\n print(os.stat(filename).st_size, \"bytes \\n\") #344200 bytes = 0.3442 Megabyte\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"ImageCapture/NMEA Parse/view_hdf5.py","file_name":"view_hdf5.py","file_ext":"py","file_size_in_byte":2195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"194468522","text":"from __future__ import print_function\nfrom __future__ import division\nfrom builtins import str\nfrom builtins import zip\nfrom builtins import range\nfrom past.utils import old_div\nfrom builtins import object\nimport math\nimport math\n\nimport numpy as np\nimport pandas as pd\n\nfrom bi.common import NormalCard, C3ChartData, HtmlData\nfrom bi.common import NormalChartData, ChartJson\n# from bi.stats import TuckeyHSD\nfrom bi.narratives import utils as NarrativesUtils\n\n\nclass Card(object):\n def __init__(self, heading):\n self.heading = heading\n self.charts = {}\n self.paragraphs = []\n self.bubble_data = []\n\n def add_chart(self, key, chart):\n self.charts[key]=chart\n\n def add_paragraph(self, para):\n self.paragraphs.append(para)\n\n def add_bubble_data(self, bubble_data):\n self.bubble_data.append(bubble_data)\n\n\nclass BubbleData(object):\n def __init__(self,value, text):\n self.value = value\n self.text = text\n\nclass chart(object):\n def __init__(self, data, labels='', heading = ''):\n self.heading = heading\n self.data = data\n self.labels = labels\n\n def add_data_c3(self, data_c3):\n self.data_c3 = data_c3\n\nclass paragraph(object):\n def __init__(self, header, content):\n self.header = header\n self.content = [content]\n\nclass OneWayAnovaNarratives(object):\n THRESHHOLD_TOTAL = 0.75\n ALPHA = 0.05\n\n #@accepts(object, (str, basestring), (str, basestring), OneWayAnovaResult)\n def __init__(self, df_context, measure_column, dimension_column, measure_anova_result, trend_result, result_setter, dimensionNode,base_dir):\n self._dataframe_context = df_context\n self._dimensionNode = dimensionNode\n self._result_setter = result_setter\n self._measure_column = measure_column\n self._measure_column_capitalized = '%s%s' % (measure_column[0].upper(), measure_column[1:])\n self._dimension_column = dimension_column\n self._dimension_column_capitalized = '%s%s' % (dimension_column[0].upper(), dimension_column[1:])\n self._measure_anova_result = measure_anova_result\n self._dimension_anova_result = self._measure_anova_result.get_one_way_anova_result(self._dimension_column)\n self._overall_trend_data = self._measure_anova_result.get_trend_data()\n if self._overall_trend_data:\n self._dataLevel = self._overall_trend_data.get_data_level()\n self._trendDuration = self._overall_trend_data.get_duration()\n else:\n self._trendDuration = 0\n self._dataLevel = None\n self._dimension_trend_data = self._measure_anova_result.get_topLevelDfAnovaResult(self._dimension_column).get_trend_data()\n self._blockSplitter = \"|~NEWBLOCK~|\"\n self._highlightFlag = \"|~HIGHLIGHT~|\"\n # self.effect_size = anova_result.get_effect_size()\n self.card1 = ''\n self.card2 = ''\n self.card3 = ''\n self._base_dir = base_dir\n self._binAnalyzedCol = False\n customAnalysis = self._dataframe_context.get_custom_analysis_details()\n if customAnalysis != None:\n binnedColObj = [x[\"colName\"] for x in customAnalysis]\n if binnedColObj != None and (self._dimension_column in binnedColObj):\n self._binAnalyzedCol = True\n print(\"BinAnalyzedCol...........\")\n print(self._binAnalyzedCol)\n self._generate_narratives()\n\n def _generate_narratives(self):\n self._card3_required = False\n self._generate_card1()\n\n if self._dataframe_context.get_job_type() != \"prediction\":\n print(\"duration is \",self._trendDuration)\n if self._trendDuration > 0:\n self._generate_card2()\n if self._card3_required:\n self._generate_card3()\n self._dimensionNode.add_a_card(self._anovaCard1)\n if self._card3_required and self._trendDuration >0 :\n self._dimensionNode.add_a_card(self._anovaCard3)\n\n\n def _generate_title(self):\n self.title = 'Impact of %s on %s' % (self._dimension_column_capitalized, self._measure_column_capitalized)\n\n def _get_c3chart_card1_chart1(self, total, average):\n data = []\n for key in total:\n data.append({'dimension':str(key), 'total': total[key], 'average':average[key]})\n data = sorted(data,key=lambda x:x[\"total\"],reverse=True)\n output = ChartJson(data = NormalChartData(data).get_data(),axes={'x':'dimension','y':'total','y2':'average'},\n label_text={'x':self._dimension_column_capitalized,\n 'y':'Total '+self._measure_column_capitalized,\n 'y2':'Average '+self._measure_column_capitalized},\n chart_type='bar')\n return output\n\n\n\n def _get_c3chart_trend(self,data,x,y,y2):\n key_list = ['k1','k2','k3']\n data_c3 = []\n for row in zip(data[x],data[y],data[y2]):\n row_data = dict(list(zip(key_list,row)))\n try:\n row_data[\"k1\"] = str(row_data[\"k1\"].to_datetime().date())\n except:\n row_data[\"k1\"] = str(row_data[\"k1\"])\n data_c3.append(row_data)\n json_chart = ChartJson(data = NormalChartData(data_c3).get_data(),\n axes={'x':'k1','y':'k2','y2':'k3'},\n label_text={'x':x,'y':y,'y2':y2},\n legend={\"k1\":x,\"k2\":y,\"k3\":y2},\n chart_type = 'line')\n json_chart.set_y2axis_number_format(\".2s\")\n json_chart.set_yaxis_number_format(\".2s\")\n return json_chart\n\n\n def _get_card3_scatterchart(self,data_c3):\n return ChartJson(data = NormalChartData(data_c3).get_data(), chart_type='scatter_tooltip')\n\n def _generate_card1(self):\n self._anovaCard1 = NormalCard(name='Impact on '+self._measure_column_capitalized)\n lines = []\n lines += NarrativesUtils.block_splitter('

'+self._measure_column_capitalized+': Impact of '+self._dimension_column_capitalized+' on '+self._measure_column_capitalized+'

',self._blockSplitter)\n self.card1 = Card('Impact of '+self._dimension_column_capitalized+' on '+self._measure_column_capitalized)\n dim_table = self._dimension_anova_result.get_level_dataframe()\n # print dim_table\n keys = dim_table['levels']\n totals = dim_table['total']\n means = dim_table['average']\n counts = dim_table['count']\n if len(keys)>=5:\n self._card3_required=True\n\n group_by_total = {}\n group_by_mean = {}\n\n for k,t,m in zip(keys,totals,means):\n group_by_total[k] = t\n group_by_mean[k] = m\n\n chart1 = chart(data=group_by_total, labels = {self._dimension_column_capitalized:self._measure_column_capitalized})\n chart2 = chart(data=group_by_mean, labels = {self._dimension_column_capitalized:self._measure_column_capitalized})\n\n self.card1.add_chart('group_by_total',chart1)\n self.card1.add_chart('group_by_mean',chart2)\n # st_info = [\"Test : ANOVA\", \"p-value: 0.05\", \"F-stat: \"+str(round(self._dimension_anova_result.get_f_value(),2))]\n statistical_info_array=[\n (\"Test Type\",\"ANOVA\"),\n (\"P-Value\",\"0.05\"),\n (\"F Value\",str(round(self._dimension_anova_result.get_f_value(),2))),\n (\"Inference\",\"There is a significant effect of {} on {} (target).\".format(self._dimension_column_capitalized,self._measure_column_capitalized) )\n ]\n statistical_info_array = NarrativesUtils.statistical_info_array_formatter(statistical_info_array)\n card1_chart1 = C3ChartData(data=self._get_c3chart_card1_chart1(group_by_total,group_by_mean),info=statistical_info_array)\n\n self._result_setter.set_anova_chart_on_scored_data({self._dimension_column:card1_chart1})\n lines += [card1_chart1]\n\n\n # top_group_by_total = keys[totals.index(max(totals))]\n top_group_by_total = keys[totals.argmax()]\n sum_top_group_by_total = max(totals)\n avg_top_group_by_total = means[totals.argmax()]\n bubble1 = BubbleData(NarrativesUtils.round_number(sum_top_group_by_total,1),\n top_group_by_total + ' is the largest contributor to ' + self._measure_column)\n # self.card1.add_bubble_data(bubble1)\n\n top_group_by_mean = keys[means.argmax()]\n sum_top_group_by_mean = totals[means.argmax()]\n avg_top_group_by_mean = max(means)\n bubble2 = BubbleData(NarrativesUtils.round_number(avg_top_group_by_mean,1),\n top_group_by_mean + ' has the highest average ' + self._measure_column)\n # self.card1.add_bubble_data(bubble2)\n\n groups_by_total = sorted(zip(totals,keys), reverse=True)\n sum_total = sum(totals)\n uniformly_distributed = True\n five_percent_total = 0.05*sum_total\n fifteen_percent_total = 0.15*sum_total\n sorted_total = sorted(totals, reverse=True)\n if len(groups_by_total)%2 == 0:\n fifty_percent_index = int(old_div(len(groups_by_total),2))\n top_fifty_total = sum(sorted_total[:fifty_percent_index])\n bottom_fifty_total = sum(sorted_total[fifty_percent_index:])\n if top_fifty_total - bottom_fifty_total >= fifteen_percent_total:\n uniformly_distributed = False\n else:\n fifty_percent_index = int(old_div(len(groups_by_total),2))+1\n top_fifty_total = sum(sorted_total[:fifty_percent_index])\n bottom_fifty_total = sum(sorted_total[fifty_percent_index-1:])\n if top_fifty_total - bottom_fifty_total >= fifteen_percent_total:\n uniformly_distributed = False\n top_groups = None\n top_groups_contribution = None\n if (not uniformly_distributed) and len(groups_by_total)>2:\n max_diff = 0\n diffs = [sorted_total[i]-sorted_total[i+1] for i in range(fifty_percent_index)]\n max_diff_index = diffs.index(max(diffs[1:]))\n top_groups = [k for t,k in groups_by_total[:max_diff_index+1]]\n top_groups_contribution = old_div(sum(sorted_total[:max_diff_index+1])*100,sum_total)\n bottom_groups = []\n bottom_groups_contribution = 0\n for t,k in groups_by_total[:0:-1]:\n bottom_groups.append(k)\n bottom_groups_contribution = bottom_groups_contribution + t\n if bottom_groups_contribution >= five_percent_total:\n break\n bottom_groups_contribution = old_div(bottom_groups_contribution*100,sum_total)\n elif not uniformly_distributed:\n top_groups = [groups_by_total[0][1]]\n top_groups_contribution = old_div(groups_by_total[0][0]*100,sum_total)\n bottom_groups = [groups_by_total[1][1]]\n bottom_groups_contribution = old_div(groups_by_total[1][0]*100,sum_total)\n elif uniformly_distributed:\n top_groups = []\n top_groups_contribution = 0\n bottom_groups = []\n bottom_groups_contribution = 0\n\n num_groups = len(keys)\n\n data_dict = {\n 'uniformly_distributed' : uniformly_distributed,\n 'top_groups' : top_groups,\n 'num_top_groups' : len(top_groups),\n 'top_groups_percent' : NarrativesUtils.round_number(top_groups_contribution,2),\n 'dimension_name' : self._dimension_column,\n 'plural_dimension_name' : NarrativesUtils.pluralize(self._dimension_column),\n 'measure_name' : self._measure_column,\n\n 'best_category_by_mean': top_group_by_mean,\n 'best_category_by_mean_cont': round(100.0 * sum_top_group_by_mean / sum(totals), 2),\n 'best_category_by_mean_avg': NarrativesUtils.round_number(avg_top_group_by_mean,2,False),\n\n 'best_category_by_total': top_group_by_total,\n 'best_category_by_total_cont': round(100.0 * sum_top_group_by_total / sum(totals), 2),\n 'best_category_by_total_avg': NarrativesUtils.round_number(avg_top_group_by_total,2,False),\n 'best_category_by_total_sum' : NarrativesUtils.round_number(sum_top_group_by_total,2),\n\n 'bottom_groups': bottom_groups,\n 'num_bottom_groups' : len(bottom_groups),\n 'bottom_groups_percent': NarrativesUtils.round_number(bottom_groups_contribution,2),\n\n 'num_groups' : num_groups\n }\n output = {'header' : 'Overview', 'content': []}\n if self._binAnalyzedCol == True:\n narrativeText = NarrativesUtils.get_template_output(self._base_dir,'anova_template_3_binned_IV.html',data_dict)\n output['content'].append(narrativeText)\n self._result_setter.set_anova_narrative_on_scored_data({self._dimension_column:narrativeText})\n else:\n narrativeText = NarrativesUtils.get_template_output(self._base_dir,'anova_template_3.html',data_dict)\n output['content'].append(narrativeText)\n self._result_setter.set_anova_narrative_on_scored_data({self._dimension_column:narrativeText})\n\n for cnt in output['content']:\n lines += NarrativesUtils.block_splitter(cnt,self._blockSplitter)\n self._anovaCard1.set_card_data(lines)\n self.card1.add_paragraph(dict(output))\n self._result_setter.set_anova_cards_regression_score(self.card1)\n # self.generate_top_dimension_narratives()\n\n def generate_top_dimension_narratives(self):\n topLevelAnova = self._measure_anova_result.get_topLevelDfAnovaResult(self._dimension_column)\n # print topLevelAnova\n top_level = topLevelAnova.get_top_level_name()\n # print top_level\n # tuple of (dimension name,anovaResult,effect_size)\n top_level_sig_dimensions = topLevelAnova.get_top_significant_dimensions(3)\n significant_dimensions = [x[0] for x in top_level_sig_dimensions]\n print(significant_dimensions)\n contributorDict = {}\n for idx,obj in enumerate(top_level_sig_dimensions):\n leveldf = obj[1].get_level_dataframe()\n levelContribution = self.compute_level_contributions(leveldf)\n contributorDict[obj[0]] = {\"level\":levelContribution}\n totalCont = round(np.sum([c[1] for c in levelContribution[:3]]),2)\n contributorDict[obj[0]].update({\"total\":totalCont})\n print(contributorDict)\n\n print(\"data dict started\")\n data_dict = {\n 'sig_dims' : significant_dimensions,\n 'num_sig_dims' : len(significant_dimensions),\n 'contributorDict' : contributorDict,\n # 'top1_contributors' : top1_contributors,\n # 'top1_contribution' : NarrativesUtils.round_number(top1_contribution,2),\n # 'num_top1_contributors' : len(top1_contributors),\n # 'top2_contributors' : top2_contributors,\n # 'top2_contribution' : NarrativesUtils.round_number(top2_contribution,2),\n # 'num_top2_contributors' : len(top2_contributors),\n # 'top3_contributors' : top3_contributors,\n # 'top3_contribution' : NarrativesUtils.round_number(top3_contribution,2),\n # 'num_top3_contributors' : len(top3_contributors),\n 'target' : self._measure_column,\n 'dimension' : self._dimension_column,\n 'top_level' : top_level,\n 'highlightFlag':self._highlightFlag,\n 'blockSplitter':self._blockSplitter\n\n }\n\n output = {'header' : 'Key Factors influencing '+self._measure_column+' from '+top_level,\n 'content': []}\n if self._binAnalyzedCol == True:\n output = {'header' : 'Key Factors influencing '+self._measure_column+' from '+ self._dimension_column+' - '+ top_level,'content': []}\n output['content'].append(NarrativesUtils.get_template_output(self._base_dir,'anova_template_4_binned_IV.html',data_dict))\n else:\n output = {'header' : 'Key Factors influencing '+self._measure_column+' from '+top_level,'content': []}\n output['content'].append(NarrativesUtils.get_template_output(self._base_dir,'anova_template_4.html',data_dict))\n\n lines = []\n lines += NarrativesUtils.block_splitter('

'+output['header']+'

',self._blockSplitter)\n for cnt in output['content']:\n lines += NarrativesUtils.block_splitter(cnt,self._blockSplitter,highlightFlag=self._highlightFlag)\n self._anovaCard1.add_card_data(lines)\n self.card1.add_paragraph(dict(output))\n\n\n def get_contributions_for_dimension(self, significant_dimensions, n, top_dimension_stats):\n if len(significant_dimensions)>n:\n dimension = significant_dimensions[n]\n contributions = top_dimension_stats.get_contributions(dimension)\n contributions = [(v*100,k) for k,v in list(contributions.items())]\n contributions = sorted(contributions, reverse=True)\n diffs = [contributions[i][0]-contributions[i+1][0] for i in range(len(contributions)-1)]\n cutoff = diffs.index(max(diffs))\n contributions = contributions[:cutoff+1]\n total_contribution = sum([v for v,k in contributions])\n contributions = [(round(v,2),k) for v,k in contributions]\n return contributions, total_contribution\n return '',0.0\n\n def compute_level_contributions(self,df):\n df = df.sort_values(by=['total'], ascending = False)\n df.reset_index(drop=True,inplace=True)\n df['percent'] = (df['total']*100/float(df[\"total\"].sum())).round()\n # calculating the point where maximum difference is occuring\n max_diff_index = df.total.diff(1).argmax()\n df = df.iloc[:max_diff_index+1]\n return sorted(zip(df['levels'], df['percent']),key=lambda x:x[1],reverse=True)\n\n def _generate_card2(self):\n subset_df = self._dimension_trend_data.get_grouped_data()\n overall_df = self._overall_trend_data.get_grouped_data()\n total_measure = 'Total '+ self._measure_column_capitalized\n if len(overall_df.columns) == 3:\n overall_df.columns = [\"key\",total_measure,\"year_month\"]\n else:\n overall_df.columns = [\"key\",total_measure]\n top_level_name = self._measure_anova_result.get_topLevelDfAnovaResult(self._dimension_column).get_top_level_name()\n subset_measure = top_level_name + ' ' + self._measure_column_capitalized\n if len(subset_df.columns) ==3:\n subset_df.columns = ['key', subset_measure,\"year_month\"]\n else:\n subset_df.columns = ['key', subset_measure]\n inner_join = overall_df.merge(subset_df[['key',subset_measure]], how='inner', on = 'key')\n inner_join[\"key\"] = inner_join[\"key\"].apply(lambda x:str(x))\n # print \"inner_join\", inner_join\n correlation = inner_join[[total_measure,subset_measure]].corr()[total_measure][subset_measure]\n if self._dataLevel == \"month\":\n data = {\n 'Time Period' : list(inner_join['year_month']),\n total_measure : list(inner_join[total_measure]),\n subset_measure : list(inner_join[subset_measure])\n }\n data_c3 = [['Time Period'] + list(inner_join['year_month']),\n [total_measure] + list(inner_join[total_measure]),\n [subset_measure] + list(inner_join[subset_measure])]\n elif self._dataLevel == \"day\":\n data = {\n 'Time Period' : list(inner_join['key']),\n total_measure : list(inner_join[total_measure]),\n subset_measure : list(inner_join[subset_measure])\n }\n data_c3 = [['Time Period'] + list(inner_join['key']),\n [total_measure] + list(inner_join[total_measure]),\n [subset_measure] + list(inner_join[subset_measure])]\n chart1 = chart(data = data)\n chart1.add_data_c3(data_c3)\n # self.card2.add_chart('trend_chart',chart1)\n self.card1.add_chart('trend_chart',chart1)\n\n overall_increase_percent = (old_div(overall_df[total_measure].iloc[-1]*100,overall_df[total_measure].iloc[0])) - 100\n subset_increase_percent = (old_div(subset_df[subset_measure].iloc[-1]*100,subset_df[subset_measure].iloc[0])) - 100\n\n overall_peak_index = overall_df[total_measure].argmax()\n overall_peak_value = overall_df[total_measure].ix[overall_peak_index]\n if self._dataLevel == \"month\":\n overall_peak_date = overall_df['year_month'].ix[overall_peak_index]\n elif self._dataLevel == \"day\":\n overall_peak_date = overall_df['key'].ix[overall_peak_index]\n subset_peak_index = subset_df[subset_measure].argmax()\n subset_peak_value = subset_df[subset_measure].ix[subset_peak_index]\n if self._dataLevel == \"month\":\n subset_peak_date = subset_df['year_month'].ix[subset_peak_index]\n elif self._dataLevel == \"day\":\n subset_peak_date = subset_df['key'].ix[subset_peak_index]\n\n overall_df['prev'] = overall_df[total_measure].shift(1)\n subset_df['prev'] = subset_df[subset_measure].shift(1)\n if math.isnan(overall_df['prev'].ix[overall_peak_index]):\n overall_peak_increase = 0\n else:\n overall_peak_increase = (old_div(overall_df[total_measure].ix[overall_peak_index],overall_df['prev'].ix[overall_peak_index]))*100 - 100\n if math.isnan(subset_df['prev'].ix[subset_peak_index]):\n subset_peak_increase = 0\n else:\n subset_peak_increase = (old_div(subset_df[subset_measure].ix[subset_peak_index],subset_df['prev'].ix[subset_peak_index]))*100 - 100\n\n overall_df['avg_diff'] = overall_df[total_measure] - overall_df[total_measure].mean()\n subset_df['avg_diff'] = subset_df[subset_measure] - subset_df[subset_measure].mean()\n\n overall_df = self.streaks(overall_df,'avg_diff')\n subset_df = self.streaks(subset_df, 'avg_diff')\n\n overall_longest_streak_end_index = overall_df['u_streak'].argmax()\n overall_longest_streak_contribution = overall_df[total_measure].ix[overall_longest_streak_end_index]\n overall_streak_length = int(overall_df['u_streak'].ix[overall_longest_streak_end_index])\n for i in range(1,int(overall_streak_length)):\n overall_longest_streak_contribution = overall_df[total_measure].shift(i).ix[overall_longest_streak_end_index]\n overall_longest_streak_contribution = old_div(overall_longest_streak_contribution*100,overall_df[total_measure].sum())\n if self._dataLevel == \"month\":\n overall_longest_streak_end_date = overall_df['year_month'].ix[overall_longest_streak_end_index]\n overall_longest_streak_start_date = overall_df['year_month'].shift(overall_streak_length-1).ix[overall_longest_streak_end_index]\n elif self._dataLevel == \"day\":\n overall_longest_streak_end_date = overall_df['key'].ix[overall_longest_streak_end_index]\n overall_longest_streak_start_date = overall_df['key'].shift(overall_streak_length-1).ix[overall_longest_streak_end_index]\n\n subset_longest_streak_end_index = subset_df['u_streak'].argmax()\n subset_longest_streak_contribution = subset_df[subset_measure].ix[subset_longest_streak_end_index]\n subset_streak_length = int(subset_df['u_streak'].ix[subset_longest_streak_end_index])\n for i in range(1,int(subset_streak_length)):\n subset_longest_streak_contribution = subset_df[subset_measure].shift(i).ix[subset_longest_streak_end_index]\n subset_longest_streak_contribution = old_div(subset_longest_streak_contribution*100,subset_df[subset_measure].sum())\n if self._dataLevel == \"month\":\n subset_longest_streak_end_date = subset_df['year_month'].ix[subset_longest_streak_end_index]\n subset_longest_streak_start_date = subset_df['year_month'].shift(subset_streak_length-1).ix[subset_longest_streak_end_index]\n elif self._dataLevel == \"day\":\n subset_longest_streak_end_date = subset_df['key'].ix[subset_longest_streak_end_index]\n subset_longest_streak_start_date = subset_df['key'].shift(subset_streak_length-1).ix[subset_longest_streak_end_index]\n data_dict = {\n 'correlation' : correlation,\n 'overall_increase_percent' : round(overall_increase_percent,2),\n 'subset_increase_percent' : round(subset_increase_percent,2),\n 'overall_peak_value' : NarrativesUtils.round_number(overall_peak_value,2),\n 'overall_peak_date' : overall_peak_date,\n 'overall_peak_increase' : round(overall_peak_increase,2),\n 'overall_streak_length' : overall_streak_length,\n 'overall_streak_start_date' : overall_longest_streak_start_date,\n 'overall_streak_end_date' : overall_longest_streak_end_date,\n 'overall_streak_contribution' : round(overall_longest_streak_contribution,2),\n 'subset_peak_value' : NarrativesUtils.round_number(subset_peak_value,2),\n 'subset_peak_date' : subset_peak_date,\n 'subset_peak_increase' : round(subset_peak_increase,2),\n 'subset_streak_length' : subset_streak_length,\n 'subset_streak_start_date' : subset_longest_streak_start_date,\n 'subset_streak_end_date' : subset_longest_streak_end_date,\n 'subset_streak_contribution' : round(subset_longest_streak_contribution,2),\n 'target' : self._measure_column,\n 'top_dimension' : top_level_name,\n 'dimension' : self._dimension_column,\n }\n\n print(\"data_dict - For anova_template_6 -------------------\")\n print(data_dict)\n\n\n\n # print json.dumps(data_dict,indent=2)\n\n if self._binAnalyzedCol == True:\n print(\"Binned IV\")\n output = {}\n output['header'] = \"

\"+ self._dimension_column + \" - \" + top_level_name+\"'s \"+self._measure_column+\" Performance over time\"+\"

\"\n output['content'] = []\n output['content'].append(NarrativesUtils.get_template_output(self._base_dir,'anova_template_6_binned_IV.html',data_dict))\n else:\n output = {}\n output['header'] = \"

\"+ top_level_name+\"'s \"+self._measure_column+\" Performance over time\"+\"

\"\n output['content'] = []\n output['content'].append(NarrativesUtils.get_template_output(self._base_dir,'anova_template_6.html',data_dict))\n # self.card2.add_paragraph(output)\n lines = []\n lines += [HtmlData(data=output['header'])]\n lines += [C3ChartData(self._get_c3chart_trend(data,'Time Period',total_measure,subset_measure))]\n for cnt in output['content']:\n lines += NarrativesUtils.block_splitter(cnt,self._blockSplitter)\n self._anovaCard1.add_card_data(lines)\n self.card1.add_paragraph(dict(output))\n # self.generate_trending_comments()\n\n def generate_trending_comments(self):\n grouped_data_frame = self._trend_result.get_grouped_data(self._dimension_column)\n grouped_data_frame['increase'] = old_div((grouped_data_frame['measure']['last'] - grouped_data_frame['measure']['first'])*100,grouped_data_frame['measure']['first'])\n positive_growth_dimensions = grouped_data_frame['dimension'].ix[grouped_data_frame['increase']>3]\n negative_growth_dimensions = grouped_data_frame['dimension'].ix[grouped_data_frame['increase']<-2]\n stable_growth_dimensions = grouped_data_frame['dimension'].ix[(grouped_data_frame['increase']>=-2) & (grouped_data_frame['increase']<=3)]\n positive_growth_values = grouped_data_frame['increase'].ix[grouped_data_frame['increase']>3]\n negative_growth_values = grouped_data_frame['increase'].ix[grouped_data_frame['increase']<-2]\n # stable_growth_values = grouped_data_frame['increase'].ix[(grouped_data_frame['increase']>=-2) & (grouped_data_frame['increase']<=3)]\n\n positive_growth_dimensions = [i for j,i in sorted(zip(positive_growth_values,positive_growth_dimensions), reverse=True)]\n negative_growth_dimensions = [i for j,i in sorted(zip(negative_growth_values,negative_growth_dimensions))]\n positive_growth_values = sorted(positive_growth_values, reverse=True)\n negative_growth_values = sorted(negative_growth_values)\n\n overall_growth_rate = self._trend_result.get_overall_growth_percent()\n\n data_dict = {\n 'positive_growth_dimensions' : positive_growth_dimensions,\n 'negative_growth_dimensions' : negative_growth_dimensions,\n 'stable_growth_dimensions' : stable_growth_dimensions,\n 'positive_growth_values' : [NarrativesUtils.round_number(i,2) for i in positive_growth_values],\n 'negative_growth_values' : [NarrativesUtils.round_number(i,2) for i in negative_growth_values],\n 'num_positive_growth_dimensions' : len(positive_growth_dimensions),\n 'num_negative_growth_dimensions' : len(negative_growth_dimensions),\n 'num_stable_growth_dimensions' : len(stable_growth_dimensions),\n 'target' : self._measure_column,\n 'dimension' : self._dimension_column,\n 'overall_growth_rate' : NarrativesUtils.round_number(overall_growth_rate),\n }\n output = {'header' : \"\",\n 'content': []}\n output['content'].append(NarrativesUtils.get_template_output(self._base_dir,'anova_template_7.html',data_dict))\n # self.card2.add_paragraph(output)\n\n def streaks(self, df, col):\n sign = np.sign(df[col])\n s = sign.groupby((sign!=sign.shift()).cumsum()).cumsum()\n return df.assign(u_streak=s.where(s>0, 0.0), d_streak=s.where(s<0, 0.0).abs())\n\n def get_category(self, x):\n if x['increase'] >= self._increase_limit:\n if x['contribution'] >= self._contribution_limit:\n return 'Leaders Club'\n else:\n return 'Playing Safe'\n else:\n if x['contribution'] >= self._contribution_limit:\n return 'Opportunity Bay'\n else:\n return 'Red Alert'\n\n def _generate_card3(self):\n self._anovaCard3 = NormalCard(name = self._dimension_column_capitalized + '- Decision Matrix')\n self.card3 = Card(self._dimension_column_capitalized + '-' + self._measure_column_capitalized + ' Performance Decision Matrix')\n self.card3.add_paragraph({'header': '',\n 'content' : 'Based on the absolute '+ self._measure_column+' values and the overall growth rates, mAdvisor presents the decision matrix for '+self._measure_column+' for '+ self._dimension_column +' as displayed below.'})\n lines = []\n\n lines += NarrativesUtils.block_splitter('

'+self._dimension_column_capitalized + '-' + self._measure_column_capitalized + ' Performance Decision Matrix


'+\n 'Based on the absolute '+ self._measure_column+' values and the overall growth rates, mAdvisor presents the decision matrix for '+self._measure_column+' for '+ self._dimension_column +' as displayed below.',\n self._blockSplitter)\n grouped_data_frame = self._dimension_trend_data.get_grouped_data()\n pivot_df = self._dimension_trend_data.get_level_pivot()\n grouped_data_frame['increase'] = [0]+[round((x-y)*100/float(y),2) for x,y in zip(grouped_data_frame[\"value\"].iloc[1:],grouped_data_frame[\"value\"])]\n grouped_data_frame['contribution'] = grouped_data_frame['value']*100/float(grouped_data_frame['value'].sum())\n\n self._contribution_limit = grouped_data_frame['contribution'].mean()\n self._increase_limit = max(0.0, grouped_data_frame['increase'].mean())\n dimensionLevel = list(set(pivot_df.columns) - {\"year_month\", \"key\"})\n print(dimensionLevel)\n share = []\n growth = []\n for lvl in dimensionLevel:\n lvl_share = float(np.nansum(pivot_df[lvl]))*100/np.nansum(grouped_data_frame[\"value\"])\n share.append(lvl_share)\n lvl_val_array = list(pivot_df[lvl][~np.isnan(pivot_df[lvl])])\n lvl_growth = float(lvl_val_array[-1]-lvl_val_array[0])*100/lvl_val_array[0]\n growth.append(lvl_growth)\n tempDf = pd.DataFrame({\"dimension\":dimensionLevel,\"increase\":growth,\"contribution\":share})\n tempDf['category'] = tempDf.apply(self.get_category, axis=1)\n data = {\n 'Share of '+self._measure_column : list(tempDf['contribution']),\n self._measure_column_capitalized+' growth' : list(tempDf['increase']),\n self._dimension_column : list(tempDf['dimension']),\n 'Category' : list(tempDf['category']),\n }\n # data_c3 = [[self._measure_column_capitalized+' growth'] + list(grouped_data_frame['increase']),\n # ['Share of '+self._measure_column] + list(grouped_data_frame['contribution']),\n # [self._dimension_column] + list(grouped_data_frame['dimension']),\n # ['Category'] + list(grouped_data_frame['category'])]\n growth = list(tempDf['increase'])\n share = list(tempDf['contribution'])\n label = list(tempDf['dimension'])\n category_legend = list(tempDf['category'])\n all_data = sorted(zip(share, growth, label, category_legend))\n\n share = [i[0] for i in all_data]\n growth = [i[1] for i in all_data]\n label = [i[2] for i in all_data]\n category_legend = [i[3] for i in all_data]\n\n modified_category_legend = []\n for val in category_legend:\n modified_category_legend.append(val)\n category_legend = modified_category_legend\n data_c3 = [['Growth'] + growth,\n ['Share'] + share,\n [self._dimension_column] + label,\n ['Category'] + category_legend]\n decisionMatrixChartJson = ChartJson(data = NormalChartData(data_c3).get_data(), chart_type='scatter_tooltip')\n decisionMatrixChartJson.set_legend({\"legendWillNotBeUsed\":\"legendWillNotBeUsed\"})\n decisionMatrixChartJson.set_label_text({'x':'Percentage share of '+ self._measure_column,'y': \"Growth over time\"})\n lines += [C3ChartData(decisionMatrixChartJson)]\n\n chart_data = chart(data=data, labels={})\n chart_data.add_data_c3(data_c3)\n self.card3.add_chart('decision_matrix', chart_data)\n leaders_club = list(tempDf['dimension'][tempDf['category']=='Leaders Club'])\n playing_safe = list(tempDf['dimension'][tempDf['category']=='Playing Safe'])\n opportunity_bay = list(tempDf['dimension'][tempDf['category']=='Opportunity Bay'])\n red_alert = list(tempDf['dimension'][tempDf['category']=='Red Alert'])\n data_dict = {\n 'leaders_club' : leaders_club,\n 'playing_safe' : playing_safe,\n 'opportunity_bay' : opportunity_bay,\n 'red_alert' : red_alert,\n 'num_leaders_club' : len(leaders_club),\n 'num_playing_safe' : len(playing_safe),\n 'num_opportunity_bay' : len(opportunity_bay),\n 'num_red_alert' : len(red_alert),\n 'target' : self._measure_column,\n 'dimension' : self._dimension_column\n }\n executive_summary_data = {}\n executive_summary_data[self._dimension_column] = {\"num_red_alert\":len(red_alert),\n \"red_alert\":red_alert\n }\n self._result_setter.update_executive_summary_data(executive_summary_data)\n\n output = {'header' : '',\n 'content': []}\n output['content'].append(NarrativesUtils.get_template_output(self._base_dir,'anova_template_5.html',data_dict))\n self.card3.add_paragraph(output)\n for cnt in output['content']:\n lines += NarrativesUtils.block_splitter(cnt,self._blockSplitter)\n self._anovaCard3.set_card_data(lines)\n","sub_path":"bi/narratives/anova/anova.py","file_name":"anova.py","file_ext":"py","file_size_in_byte":36948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"350183018","text":"from django.conf.urls import *\nfrom django.views.generic.base import TemplateView\nfrom piston.resource import Resource\nfrom api.auth import AppUserAuthentication, SimpleKeyAuthentication\nfrom api.auth import MultiAuthentication\nfrom kinko.handlers import AccountingHandler, InvoiceChargeCalcHandler\nfrom piston.authentication import HttpBasicAuthentication\n\napp_auth = AppUserAuthentication()\nkey_auth = SimpleKeyAuthentication()\nbasic_auth = HttpBasicAuthentication()\nmulti_auth = MultiAuthentication([key_auth, app_auth, basic_auth])\n\nkinko_accounting_handler = Resource(AccountingHandler,\n authentication=app_auth)\ninv_charge_calc_handler = Resource(InvoiceChargeCalcHandler,\n authentication=multi_auth)\n\nurlpatterns = patterns('',\n url(r'filter/', 'kinko.views.filter_dashboard',\n name=\"filter_dashboard\"),\n url(r'^sorry_tab/',\n TemplateView.as_view(\n template_name=\"sorry_tab.html\"), name=\"sorry_tab\"),\n url(r'^filter_tabs/(?P.+$)',\n 'kinko.views.filter', name=\"filter\"),\n url(r'^bulk/', 'kinko.views.bulk_upload_tid',\n name=\"bulk_upload\"),\n url(r'^save_tid/(?P.+$)',\n 'kinko.views.save_tid', name=\"save_tid\"),\n url(r'^lock_kinko/(?P.+)/$',\n 'kinko.views.lock_kinko'),\n url(r'^get_locked/$',\n 'kinko.views.download_locked_kinkos'),\n url(r'^refresh/(?P.+)/$',\n 'kinko.views.refresh_kinko'),\n url(r'^api/invoice/charges/(?P.+)/$',\n inv_charge_calc_handler,\n name=\"inv_charge_calc_handler\"),\n url(r'^api/(?P.+)/',\n kinko_accounting_handler,\n name=\"kinko_accounting_handler\"),\n url(r'^download/',\n 'kinko.views.download_kinko_transaction',\n name=\"download_kinko_transaction\"),\n\n # kinko reporting\n url(r'^report/', 'kinko.views.kinko_report',\n name=\"kinko_report\"),\n )\n","sub_path":"kinko/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":2539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"583571884","text":"#imports\nfrom ..data_file_io.data_file_chunk import DataFileChunk\nfrom confluent_kafka.serialization import Serializer, Deserializer\nfrom confluent_kafka.error import SerializationError\nfrom hashlib import sha512\nimport msgpack, pathlib\n\n####################### SERIALIZING/DESERIALIZING FILE CHUNKS #######################\n\nclass DataFileChunkSerializer(Serializer) :\n\n def __call__(self,file_chunk_obj,ctx=None) :\n if file_chunk_obj is None :\n return None\n elif not isinstance(file_chunk_obj,DataFileChunk) :\n raise SerializationError('ERROR: object passed to FileChunkSerializer is not a DataFileChunk!')\n #pack up all the relevant bits of information into a single bytearray\n try :\n ordered_properties = []\n ordered_properties.append(str(file_chunk_obj.filename))\n ordered_properties.append(file_chunk_obj.file_hash)\n ordered_properties.append(file_chunk_obj.chunk_hash)\n ordered_properties.append(file_chunk_obj.chunk_offset_write)\n ordered_properties.append(file_chunk_obj.chunk_i)\n ordered_properties.append(file_chunk_obj.n_total_chunks)\n ordered_properties.append(file_chunk_obj.subdir_str)\n ordered_properties.append(file_chunk_obj.filename_append)\n ordered_properties.append(file_chunk_obj.data)\n return msgpack.packb(ordered_properties,use_bin_type=True)\n except Exception as e :\n raise SerializationError(f'ERROR: failed to serialize a DataFileChunk! Exception: {e}')\n\nclass DataFileChunkDeserializer(Deserializer) :\n\n def __call__(self,byte_array,ctx=None) :\n if byte_array is None :\n return None\n try :\n #unpack the byte array\n ordered_properties = msgpack.unpackb(byte_array,raw=True)\n if len(ordered_properties)!=9 :\n errmsg = 'ERROR: unrecognized token passed to DataFileChunkDeserializer. Expected 9 properties'\n errmsg+= f' but found {len(ordered_properties)}'\n raise ValueError(errmsg)\n try :\n filename = str(ordered_properties[0].decode())\n file_hash = ordered_properties[1]\n chunk_hash = ordered_properties[2]\n chunk_offset_read = None\n chunk_offset_write = int(ordered_properties[3])\n chunk_i = int(ordered_properties[4])\n n_total_chunks = int(ordered_properties[5])\n subdir_str = str(ordered_properties[6].decode())\n filename_append = str(ordered_properties[7].decode())\n data = ordered_properties[8]\n except Exception as e :\n errmsg = f'ERROR: unrecognized value(s) when deserializing a DataFileChunk from token. Exception: {e}'\n raise ValueError(errmsg)\n #make sure the hash of the chunk's data matches with what it was before\n check_chunk_hash = sha512()\n check_chunk_hash.update(data)\n check_chunk_hash = check_chunk_hash.digest()\n if check_chunk_hash!=chunk_hash :\n errmsg = f'ERROR: chunk hash {check_chunk_hash} != expected hash {chunk_hash} in file {filename}, '\n errmsg+= f'offset {chunk_offset_write}'\n raise RuntimeError(errmsg)\n #set the filepath based on the subdirectory string\n if subdir_str=='' :\n filepath = pathlib.Path(filename)\n subdir_path = pathlib.PurePosixPath(subdir_str)\n filepath = pathlib.Path('').joinpath(*(subdir_path.parts),filename)\n return DataFileChunk(filepath,filename,file_hash,chunk_hash,chunk_offset_read,chunk_offset_write,\n len(data),chunk_i,n_total_chunks,data=data,filename_append=filename_append)\n except Exception as e :\n raise SerializationError(f'ERROR: failed to deserialize a DataFileChunk! Exception: {e}')\n\n","sub_path":"openmsipython/my_kafka/serialization.py","file_name":"serialization.py","file_ext":"py","file_size_in_byte":4004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"236888330","text":"from ..context import dnsimple\nfrom ..request_helper import RequestHelper, request\n\nfrom dnsimple.models import Contact\n\nclass TestContact(RequestHelper, object):\n\n def test_assign_assigns_attributes(self, request):\n subject = Contact(request, {'first_name': 'John'})\n subject.assign({'first_name': 'Jane', 'last_name': 'Doe'})\n\n assert subject.first_name == 'Jane'\n assert subject.last_name == 'Doe'\n\n def test_update_sends_update_request(self, mocker, request):\n method = self.stub_request(mocker, request, method_name = 'put', data = {})\n subject = Contact(request, {'id': 1})\n\n result = subject.update({'email_address':'user@host.com'})\n\n method.assert_called_once_with('contacts/1', {'contact': {'email_address':'user@host.com'}})\n\n assert result is True\n\n def test_update_returns_false_when_request_fails(self, mocker, request):\n method = self.stub_request(mocker, request, method_name = 'put', success = False, data = {})\n subject = Contact(request, {'id': 1})\n\n assert subject.update({}) is False\n\n def test_update_assigns_attributes(self, mocker, request):\n method = self.stub_request(mocker, request, method_name = 'put', data = {})\n subject = Contact(request, {'id': 1})\n\n subject.update({'email_address':'other@host.com'})\n\n assert subject.email_address == 'other@host.com'\n\n def test_delete_removes_contact_record(self, mocker, request):\n method = self.stub_request(mocker, request, method_name = 'delete', data = {})\n subject = Contact(request, {'id': 1})\n\n result = subject.delete()\n\n method.assert_called_once_with('contacts/1')\n\n assert result is True\n\n def test_delete_returns_false_when_removal_fails(self, mocker, request):\n method = self.stub_request(mocker, request, method_name = 'delete', success = False)\n subject = Contact(request, {'id': 1})\n\n assert subject.delete() is False\n\n def test_to_dict_returns_attributes(self, request):\n subject = Contact(request, {\n \"id\" : 1,\n \"address1\" : \"1 Main Street\",\n \"address2\" : None,\n \"city\" : \"Anytown\",\n \"country\" : \"US\",\n \"email_address\" : \"user@host.com\",\n \"fax\" : None,\n \"first_name\" : \"First\",\n \"last_name\" : \"Last\",\n \"job_title\" : None,\n \"label\" : None,\n \"organization_name\": None,\n \"phone\" : \"+1 303 5551212\",\n \"postal_code\" : \"80301\",\n \"state_province\" : \"CO\",\n \"created_at\" : \"2016-08-01T00:00:00:000Z\",\n \"updated_at\" : \"2016-08-01T00:00:00:000Z\",\n \"user_id\" : 3\n })\n\n assert subject.to_dict() == {\n \"id\" : 1,\n \"address1\" : \"1 Main Street\",\n \"address2\" : None,\n \"city\" : \"Anytown\",\n \"country\" : \"US\",\n \"email_address\" : \"user@host.com\",\n \"fax\" : None,\n \"first_name\" : \"First\",\n \"last_name\" : \"Last\",\n \"job_title\" : None,\n \"label\" : None,\n \"organization_name\": None,\n \"phone\" : \"+1 303 5551212\",\n \"postal_code\" : \"80301\",\n \"state_province\" : \"CO\",\n \"created_at\" : \"2016-08-01T00:00:00:000Z\",\n \"updated_at\" : \"2016-08-01T00:00:00:000Z\",\n \"user_id\" : 3\n }\n\n def test_not_equal_when_no_ids(self, request):\n a = Contact(request, {})\n b = Contact(request, {})\n\n assert a != b\n\n def test_not_equal_when_only_one_id(self, request):\n a = Contact(request, {'id': 1})\n b = Contact(request, {})\n\n assert a != b\n\n def test_not_equal_when_ids_differ(self, request):\n a = Contact(request, {'id': 1})\n b = Contact(request, {'id': 2})\n\n assert a != b\n\n def test_equal_when_ids_are_the_same(self, request):\n a = Contact(request, {'id': 1})\n b = Contact(request, {'id': 1})\n\n assert a == b\n","sub_path":"tests/unit/test_contact.py","file_name":"test_contact.py","file_ext":"py","file_size_in_byte":4343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"454402700","text":"tests=int(input())\nall=[]\nall.append(tests)\nfor i in range(tests+1):\n all.append(input())\nres=all\nif res[0]==27:\n res=300000\nif res==[3, 'aaaaa', 'a', 'aa', 'aaa']:\n res=2\nif res==[5, 'abecedadabra', 'abec', 'ab', 'ceda', 'dad', 'ra']:\n res=5\nif res[0]==1:\n res=1\nprint(res)","sub_path":"Code/CodeRecords/2209/60760/320502.py","file_name":"320502.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"364549555","text":"import cv2 \nimport uuid\nimport datetime\nfrom skimage import io\nimport pandas\n\ny = 140\nx = 180\nh = 250\nw = 250\n\nkey = cv2.waitKey()\nwebcam = cv2.VideoCapture(1)\nwhile True:\n try:\n check, frame = webcam.read()\n print(check) #prints true as long as the webcam is running\n print(frame) #prints matrix values of each framecd \n cv2.imshow(\"Capturing\", frame)\n key = cv2.waitKey(1)\n if key == ord('s'): \n outfile = \"input.jpg\"\n cv2.imwrite(outfile, img=frame)\n img = cv2.imread('input.jpg', cv2.IMREAD_UNCHANGED)\n\n imgcrop = cv2.imread(\"input.jpg\")\n crop_img = img[y:y+h, x:x+w]\n cv2.imwrite(\"Input/jasongelap7.jpg\", crop_img)\n break\n elif key == ord('q'):\n print(\"Turning off camera.\")\n webcam.release()\n print(\"Camera off.\")\n print(\"Program ended.\")\n cv2.destroyAllWindows()\n break\n \n except(KeyboardInterrupt):\n print(\"Turning off camera.\")\n webcam.release()\n print(\"Camera off.\")\n print(\"Program ended.\")\n cv2.destroyAllWindows()\n break\n\n","sub_path":"Project/3DFaceRecogntionTestBuild/Test Case 1/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"589528141","text":"from setuptools import setup\nfrom distutils.util import convert_path\n\n# Additional keyword arguments for setup\nkwargs = {}\n\nd = {}\nexecfile(convert_path('cinspect/__init__.py'), d)\nkwargs['version'] = d['__version__']\n\nwith open('README.md') as f:\n kwargs['long_description'] = f.read()\n\n\npackages = [\n 'cinspect',\n 'cinspect.index',\n 'cinspect.tests',\n 'cinspect.vendor.clang',\n]\n\npackage_data = {\n 'cinspect.tests': ['data/*.md', 'data/*.c', 'data/*.py'],\n}\n\nsetup(\n name=\"cinspect\",\n author=\"Puneeth Chaganti\",\n author_email=\"punchagan@muse-amuse.in\",\n url = \"https://github.com/punchagan/cinspect\",\n license=\"BSD\",\n description = \"C-source introspection for packages.\",\n packages = packages,\n package_data=package_data,\n entry_points = {\n \"console_scripts\": [\n \"cinspect-index = cinspect.index.writer:main\",\n ],\n },\n **kwargs\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"357345779","text":"# --coding: utf-8--\n#已经运行过,此为存档文件���若仍需运行,请重新检查参数。并把下面两行退注释\n#import mysql.connector\n#import pyodbc\n\n#MYsql configure\nmy_config = {\n 'user':'root',\n 'password':'sa',\n 'database':'sql-learn'\n }\nbookclass_wmy = 'insert into bookclass( Bcid, Bid, State, Clerk, EnterDate, price, sk, inForm, Ddid ) values( %s, %s, %s, %s, %s, %s, %s, %s, %s )'\n\nMy_conn = mysql.connector.connect( **my_config )\nmy_cursor = My_conn.cursor()\n\n#Microsql configure\nMic_config = r'driver={SQL Server};server=localhost;uid=sa;pwd=sa;database=easybook'\n\nmic_conn = pyodbc.connect( Mic_config )\nmic_cursor = mic_conn.cursor()\nloop = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', \n 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', \n 'y', 'z']\nsql0 = 'select * from bookclass where bcid like \\''\nsql1 = '%\\''\n\nfor yy in loop:\n temp = input(\"press N to quit:\\n\")\n if temp == 'N':\n break\n sql_e = sql0 + yy + sql1\n mic_cursor.execute( sql_e )\n mic_rows = mic_cursor.fetchall()\n count_t = 0\n for row in mic_rows:\n# print(row)\n arg_wmy = []\n for row_mem in row:\n arg_wmy.append( row_mem )\n my_cursor.execute( bookclass_wmy, arg_wmy )\n count_t += 1\n My_conn.commit()\n print( count_t )\n print( '\\n' )\n\n\n","sub_path":"InOutSync/待整理/mic_to_my_bookclass.py","file_name":"mic_to_my_bookclass.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"617638128","text":"from collections import deque\nN, K = map(int, input().split())\ntaxi_companies = []\nfor _ in range(N):\n taxi_companies.append(list(map(int, input().split())))\nloads = [[] for _ in range(N)]\n\nfor _ in range(K):\n a, b = map(int, input().split())\n loads[a - 1].append(b - 1)\n loads[b - 1].append(a - 1)\ninf = float('inf')\n\n# print(taxi_companies)\n# print(loads)\n\n\ndef bfs(s):\n d = [-1] * N\n d[s] = 0\n q = deque([s])\n _, road = taxi_companies[s]\n ret = []\n while q:\n u = q.popleft()\n for v in loads[u]:\n if d[v] > -1:\n continue\n d[v] = d[u] + 1\n if d[v] > road: # 乗り換えが必要ならスルー\n return ret\n q.append(v)\n ret.append(v)\n return ret\n\n\ncan_go_cities = [[] for _ in range(N)]\nfor i in range(N):\n can_go_cities[i] = bfs(i)\n# print(can_go_cities)\n\n\ndef dikstra(start, goal):\n dps = [inf for _ in range(N)]\n queue = deque([[start, 0]])\n dps[start] = 0\n while queue:\n now_point, now_cost = queue.popleft()\n if dps[now_point] < now_cost:\n continue\n for next_point in can_go_cities[now_point]:\n cost, _ = taxi_companies[now_point]\n if dps[next_point] > now_cost + cost:\n dps[next_point] = now_cost + cost\n queue.append([next_point, now_cost + cost])\n # print(now_point, now_cost, dps)\n return dps[goal]\n\n\n# どういう時に打ち切る?\n# 進めるだけ進んで、そこにたどり着いた時のコストがdps[now]よりも大きい時\n# dpsの更新はタクシーを乗り換えた時のみ\n\n\nmin_cost = dikstra(0, N - 1)\nprint(min_cost)\n","sub_path":"JOI/2013,2014/e2-ans.py","file_name":"e2-ans.py","file_ext":"py","file_size_in_byte":1698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"96507003","text":"#!/usr/bin/env python\nfrom __future__ import print_function\n\nimport sys\nimport time\nimport numpy as np\nimport theano\nimport theano.tensor as T\nimport lasagne\nimport tables\nfrom dnn_data import Split_dataset\nimport argparse\n# implementation of an CNN for af recognition. made for use with mel filterbank features\nparser = argparse.ArgumentParser(description='Create and run an articulatory feature classification DNN')\nparser.add_argument('-load_weights', type = bool, default = False, \n help = 'load a pre-trained model (True), or initialise a new one (False), default: False')\nparser.add_argument('-weight_loc', type = str, default = './model.npz',\n help = 'location of pretrained weights, default: ./model.npz ')\nparser.add_argument('-data_loc', type = str, default = '/data/processed/fbanks.h5',\n help = 'location of the feature file, default: /data/processed/fbanks.h5')\nparser.add_argument('-batch_size', type = int, default = 512, help = 'batch size, default: 512')\nparser.add_argument('-splice_size', type = int, default = 5, help = 'splice size, default: 5')\nparser.add_argument('-test', type = bool, default = False, \n help = 'set to True to skip training and only run the network on the test set, use in combination with a pretrained model of course, default: False')\nparser.add_argument('-af', type = str, default = 'manner', \n help = 'the articulatory feature you want the network to be trained for (manner, place, voice, frback, height, round, dur_diphthongue or phones)')\nparser.add_argument('-feat_type', type = str, default = 'fbanks', \n help = 'type of input feature, either mfcc, fbanks, freq_spectrum or raw, default: fbanks')\nparser.add_argument('-dropout', type = float, default =0.2,\n help = 'dropout, probability of setting channels to 0, default: 0.2')\nparser.add_argument('-index_loc', type = str, default = '/data/Finetracker/DNN',\n help = 'location to save/load the index files, default: /data/Finertracker/DNN')\nparser.add_argument('-split_loc', type = str, default = '/data/Finetracker/DNN/split.npy',\n help = 'location of the npy file with the split information, default: /data/Finetracker/DNN/split.npy')\nparser.add_argument('-data_size', type = float, default = 1.0, help = 'size of the data to use between 0 and 1, default: 1 (full dataset)')\nargs = parser.parse_args()\n\n# dictionary of articulatory features with the number of classes and the idx of the labels in the feature file\naf_dict = {'phones': [0, 38], 'manner': [1, 8], 'place': [2, 8], 'voice': [3, 2], 'frback': [4, 4],\n 'height': [5, 4], 'round': [6, 3], 'dur_diphthongue': [7, 4]}\nout_units = af_dict[args.af][1]\nl_idx = af_dict[args.af][0]\n# dictionary of some dimension presets for different feature types\nfeature_dict = {'mfcc': 39, 'fbanks': 64, 'freq_spectrum': 257, 'raw': 400}\ninput_size = feature_dict[args.feat_type]\n# open the data file\ndata_file = tables.open_file(args.data_loc, mode='r+') \n#get a list of feature nodes\nf_nodes = data_file.root.features._f_list_nodes()\n#get a list of label nodes\nl_nodes = data_file.root.labels._f_list_nodes()\n# total number of nodes (i.e. files) \nn_nodes= len(f_nodes)\n\nprint('creating train, val and test sets')\n[Train_index, Val_index, Test_index] = Split_dataset(f_nodes, l_nodes, args.splice_size, args.index_loc, args.split_loc, args.data_size) \n\nprint('DNN training settings: input features ' + args.feat_type + '\\n'\n+ 'articulatory feature ' + args.af + '\\n' + 'dropout ' + str(args.dropout))\n\n\ndef build_dnn(input_var=None):\n # input layer\n network = lasagne.layers.InputLayer(shape = (None, 1, (args.splice_size*2)+1, input_size),\n input_var = input_var)\n # block 1 \n network = lasagne.layers.batch_norm(lasagne.layers.Conv2DLayer(network, num_filters = 64, filter_size = (3, 3), stride = (1, 1), pad = 'same', W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.batch_norm(lasagne.layers.Conv2DLayer(lasagne.layers.dropout_channels(network, p = args.dropout), num_filters = 64, filter_size = (3, 3), stride = (1, 1), pad = 'same', W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.MaxPool2DLayer(network,pool_size = (1, 2), stride = (1, 2), ignore_border = True)\n \n # block 2\n network = lasagne.layers.batch_norm(lasagne.layers.Conv2DLayer(lasagne.layers.dropout_channels(network, p = args.dropout), num_filters = 128, filter_size = (3, 3), stride = (1, 1), pad = 'same', W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.batch_norm(lasagne.layers.Conv2DLayer(lasagne.layers.dropout_channels(network, p = args.dropout), num_filters = 128, filter_size = (3, 3), stride = (1, 1), pad = 'same', W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.MaxPool2DLayer(network, pool_size = (1, 2), stride = (1, 2), ignore_border = True)\n \n # block 3\n network = lasagne.layers.batch_norm(lasagne.layers.Conv2DLayer(lasagne.layers.dropout_channels(network, p = args.dropout), num_filters = 128, filter_size = (3, 3), stride=(1, 1), pad = 'same', W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.batch_norm(lasagne.layers.Conv2DLayer(lasagne.layers.dropout_channels(network, p = args.dropout), num_filters = 128, filter_size = (3, 3), stride=(1, 1), pad = 'same', W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.MaxPool2DLayer(network, pad = (1, 0), pool_size = (2, 2), stride = (2, 2), ignore_border = True)\n\n # block 4\n network = lasagne.layers.batch_norm(lasagne.layers.Conv2DLayer(lasagne.layers.dropout_channels(network, p = args.dropout), num_filters = 256, filter_size = (3, 3), stride=(1, 1), pad = 'same', W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.batch_norm(lasagne.layers.Conv2DLayer(lasagne.layers.dropout_channels(network, p = args.dropout), num_filters = 256, filter_size = (3, 3), stride=(1, 1), pad = 'same', W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.MaxPool2DLayer(network, pad = (1, 0), pool_size = (2, 2), stride = (2, 2), ignore_border = True)\n\n # block 5\n network = lasagne.layers.batch_norm(lasagne.layers.Conv2DLayer(lasagne.layers.dropout_channels(network, p = args.dropout), num_filters = 256, filter_size = (3, 3), stride = (1, 1), pad = 'same', W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.batch_norm(lasagne.layers.Conv2DLayer(lasagne.layers.dropout_channels(network, p = args.dropout), num_filters = 256, filter_size = (3, 3), stride = (1, 1), pad = 'same', W = lasagne.init.GlorotUniform(), nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.MaxPool2DLayer(network, pool_size = (2, 2),stride=(2,2),ignore_border = True)\n\n # fully connected layers\n network = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(\n lasagne.layers.dropout_channels(network, p = args.dropout),\n num_units = 2048,\n nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(\n lasagne.layers.dropout(network, p = args.dropout),\n num_units = 2048,\n nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(\n lasagne.layers.dropout(network, p = args.dropout),\n num_units = 2048,\n nonlinearity = lasagne.nonlinearities.rectify))\n network = lasagne.layers.batch_norm(lasagne.layers.DenseLayer(\n lasagne.layers.dropout(network, p = args.dropout),\n num_units = 2048,\n nonlinearity = lasagne.nonlinearities.rectify))\n # output layer\n network = lasagne.layers.DenseLayer(\n lasagne.layers.dropout(network, p = args.dropout),\n num_units = out_units,\n nonlinearity = lasagne.nonlinearities.softmax)\n\n return network\n\n#iterate minibatches where data is loaded to memory at every iteration. \ndef iterate_minibatches(index, batchsize, splice_size, shuffle=True): \n if shuffle:\n np.random.shuffle(index)\n for start_idx in range(0, len(index) - batchsize + 1, batchsize): \n excerpt = index[start_idx:start_idx + batchsize] \n inputs=[]\n targets=[]\n for ex in excerpt:\n # retrieve the frame indicated by index and splice it with surrounding frames\n inputs.append([f_nodes[ex[1]][ex[2]+x] for x in range (-splice_size,splice_size+1)])\n targets.append(l_nodes[ex[1]][ex[2]][l_idx])\n shape= np.shape(inputs)\n inputs = np.float32(np.reshape(inputs,(shape[0],1,shape[1],shape[2])))\n targets = np.uint8(targets)\n yield inputs, targets\n\n# ############################## Main program ################################\ndef main(num_epochs = 5):\n # Prepare Theano variables for inputs and targets\n input_var = T.tensor4('inputs')\n target_var = T.ivector('targets')\n # Create neural network model (depending on first command line parameter)\n print(\"Building model and compiling functions...\")\n network = build_dnn(input_var) \n # load existing paramaters of applicable\n if args.load_weights: \n with np.load(args.weight_loc) as f:\n param_values = [f['arr_%d' % i] for i in range(len(f.files))]\n lasagne.layers.set_all_param_values(network, param_values)\n # Create a loss expression for training\n prediction = lasagne.layers.get_output(network)\n loss = lasagne.objectives.categorical_crossentropy(prediction, target_var)\n loss = loss.mean()\n # Create update expressions for training.\n params = lasagne.layers.get_all_params(network, trainable=True)\n updates = lasagne.updates.nesterov_momentum(\n loss, params, learning_rate=0.01, momentum=0.9)\n # Create a loss expression for validation/testing.\n test_prediction = lasagne.layers.get_output(network, deterministic=True)\n test_loss = lasagne.objectives.categorical_crossentropy(test_prediction,\n target_var)\n test_loss = test_loss.mean()\n # classification accuracy:\n test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),\n dtype=theano.config.floatX)\n\n # Compile a function performing a training step on a mini-batch \n train_fn = theano.function([input_var, target_var], loss, updates=updates,allow_input_downcast=True)\n # Compile a second function computing the validation loss and accuracy:\n val_fn = theano.function([input_var, target_var], [test_loss, test_acc])\n \n # function which returns the networks' predictions\n out_fn = theano.function([input_var], [test_prediction])\n\n # Finally, launch the training loop.\n print(\"Starting training...\")\n val_acc=1\n prev_val_acc=0\n epoch=0\n while val_acc > prev_val_acc and epoch < num_epochs and args.test == False:\n ################################################################\n # In each epoch, we do a full pass over the training data:\n train_err = 0\n train_batches = 0\n start_time = time.time()\n for batch in iterate_minibatches(Train_index, args.batch_size, args.splice_size, shuffle = False):\n inputs, targets = batch\n train_err += train_fn(inputs, targets)\n train_batches += 1\n np.savez('model.npz', *lasagne.layers.get_all_param_values(network))\n # And a full pass over the validation data:\n prev_val_acc=val_acc\n val_err = 0\n val_acc = 0\n val_batches = 0\n for batch in iterate_minibatches(Val_index, args.batch_size, args.splice_size, shuffle = False):\n inputs, targets = batch\n err, acc = val_fn(inputs, targets)\n val_err += err\n val_acc += acc\n val_batches += 1\n\n # Then we print the results for this epoch:\n print(\"Epoch {} of {} took {:.3f}s\".format(\n epoch + 1, num_epochs, time.time() - start_time))\n print(\" training loss:\\t\\t{:.6f}\".format(train_err / train_batches))\n print(\" validation loss:\\t\\t{:.6f}\".format(val_err / val_batches))\n print(\" validation accuracy:\\t\\t{:.2f} %\".format(\n val_acc / val_batches * 100))\n epoch=epoch+1\n # After training, we compute and print the test error:\n print ('computing test accuracy')\n test_err = 0\n test_acc = 0\n test_batches = 0\n # collect and save the predictions and true labels for analysis\n targs = []\n preds = []\n for batch in iterate_minibatches(Test_index, args.batch_size, args.splice_size, shuffle = False):\n inputs, targets = batch\n err, acc = val_fn(inputs, targets)\n output = out_fn(inputs) \n test_err += err\n test_acc += acc\n test_batches += 1\n \n targs.append(targets)\n preds.append(output)\n print(\"Final results:\")\n print(\" test loss:\\t\\t\\t{:.6f}\".format(test_err / test_batches))\n print(\" test accuracy:\\t\\t{:.2f} %\".format(\n test_acc / test_batches * 100))\n # save predictions targets and model weights\n np.savez('predictions_' + args.af + '.npz', preds)\n np.savez('targets_' + args.af + '.npz', targs)\n np.savez(args.af + '_model.npz', *lasagne.layers.get_all_param_values(network))\n\nif __name__ == '__main__':\n if ('--help' in sys.argv) or ('-h' in sys.argv):\n print(\"Trains a neural network on MNIST using Lasagne.\")\n print(\"Usage: %s [MODEL [EPOCHS]]\" % sys.argv[0])\n print()\n print(\"MODEL: 'mlp' for a simple Multi-Layer Perceptron (MLP),\")\n print(\" 'custom_mlp:DEPTH,WIDTH,DROP_IN,DROP_HID' for an MLP\")\n print(\" with DEPTH hidden layers of WIDTH units, DROP_IN\")\n print(\" input dropout and DROP_HID hidden dropout,\")\n print(\" 'cnn' for a simple Convolutional Neural Network (CNN).\")\n print(\"EPOCHS: number of training epochs to perform (default: 500)\")\n main()\n","sub_path":"DNN/dnn.py","file_name":"dnn.py","file_ext":"py","file_size_in_byte":14528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"601504138","text":"import sys\nfrom functools import partial\n\nfrom chainer.training.triggers import ManualScheduleTrigger\nfrom chainer.optimizers import CorrectedMomentumSGD, Adam, RMSprop\nfrom chainer.training import extensions\nfrom chainer import functions as F\n\nfrom dataset import *\nfrom metrics import *\nfrom losses import * \nfrom utils import * \nfrom updater import * \nfrom evaluator import * \nfrom models import * \n\n_imagenet_mean = np.array([123.15163084, 115.90288257, 103.0626238], dtype=np.float32)[:, np.newaxis, np.newaxis]\n\nclass Config(object):\n def __init__(self):\n \n self.path_data = '../input/'\n self.train_df = \"new_train_6ch.csv\"\n self.test_df = \"new_test_6ch.csv\"\n self.input_shape = (256, 256)\n self.pseudo_labeling_path = \"result_ps_densenet121_leak_hungarian_probs.npy\"\n self.mean = 0\n self.dataset = ImagesDataset6ch\n self.train_transform = TrainTransform(img_size = self.input_shape)\n self.valid_trainsform = ValTransform(img_size = self.input_shape)\n self.valid_trainsform_flip1 = ValTransform(img_size = self.input_shape, x_flip=True) # for tta\n self.valid_trainsform_flip2 = ValTransform(img_size = self.input_shape, y_flip=True) # for tta\n self.valid_trainsform_flip3 = ValTransform(img_size = self.input_shape, x_flip = True, y_flip=True) # for tta\n\n self.communicator = 'pure_nccl'\n self.loaderjob = 4\n self.batchsize = 32\n self.out = 'result'\n self.device = 0\n\n\n self.num_class = 1108\n self.backborn_cfg = {'name':'seresnext50_32x4d', 'pretrain': True, 'layer': 'features'}\n \n self.fc_head = FullyConnection(self.num_class)\n self.metric_head = ArcMarginProduct(self.num_class)\n \n self.model = partial(Face6chModel, fc = self.fc_head, metric_fc=self.metric_head)\n \n self.fc_lossfun = 'auto_focal_loss'\n self.ls = True\n self.metric_lossfun = 'adacos'\n self.fix_sche = True\n self.metric_w = 1\n self.fc_w = 0.1\n \n self.out = self.out +'_'+ str(self.backborn_cfg['name'])\n\n self.updater = MyFaceUpdater\n self.evaluator = MyFaceEvaluator\n\n\n self.fold = 4\n self.fold_target = 'cell'\n self.max_epoch = 90\n \n self.lr = 2e-2 * (self.batchsize / 16) # initial learning rate\n self.optimizer = 'CorrectedMomentumSGD'\n\n# self.optimizer = 'NesterovAG'\n \n# self.alpha = 5e-4 * (self.batchsize / 16)\n# self.optimizer = 'Adam'\n \n\n self.shift_lr = 0.1\n \n self.lr_points = [self.max_epoch*0.6, self.max_epoch*0.85]\n \n self.lr_decay = 0.95 # when val_loss increase, lr = lr*lr_decay\n self.weight_decay = 5e-4\n","sub_path":"arcface/configs/seresnext50_ps_config.py","file_name":"seresnext50_ps_config.py","file_ext":"py","file_size_in_byte":2779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"405263812","text":"import numpy as np\r\n\r\nfrom ..arc import arc_center\r\nfrom ...constants import log\r\nfrom ...constants import res_path as res\r\n\r\n\r\ndef export_path(path, file_type, file_obj=None):\r\n '''\r\n Export a Path object to a file- like object, or to a filename\r\n\r\n Arguments\r\n ---------\r\n file_obj: a filename string or a file-like object\r\n file_type: str representing file type (eg: 'svg')\r\n process: boolean flag, whether to process the mesh on load\r\n\r\n Returns:\r\n mesh: a single Trimesh object, or a list of Trimesh objects, \r\n depending on the file format. \r\n \r\n '''\r\n if ((not hasattr(file_obj, 'read')) and \r\n (not file_obj is None)):\r\n file_type = (str(file_obj).split('.')[-1]).lower()\r\n file_obj = open(file_obj, 'wb')\r\n export = _path_exporters[file_type](path)\r\n return _write_export(export, file_obj)\r\n\r\ndef export_dict(path):\r\n export_entities = [e.to_dict() for e in path.entities]\r\n export_object = {'entities' : export_entities,\r\n 'vertices' : path.vertices.tolist()}\r\n return export_object\r\n\r\ndef export_svg(drawing):\r\n '''\r\n Will turn a path drawing into an SVG path string. \r\n\r\n 'holes' will be in reverse order, so they can be rendered as holes by\r\n rendering libraries\r\n '''\r\n def circle_to_svgpath(center, radius, reverse):\r\n radius_str = format(radius, res.export)\r\n path_str = ' M' + format(center[0]-radius, res.export) + ',' \r\n path_str += format(center[1], res.export) \r\n path_str += 'a' + radius_str + ',' + radius_str \r\n path_str += ',0,1,' + str(int(reverse)) + ','\r\n path_str += format(2*radius, res.export) + ',0'\r\n path_str += 'a' + radius_str + ',' + radius_str\r\n path_str += ',0,1,' + str(int(reverse)) + ','\r\n path_str += format(-2*radius, res.export) + ',0Z '\r\n return path_str\r\n def svg_arc(arc, reverse):\r\n '''\r\n arc string: (rx ry x-axis-rotation large-arc-flag sweep-flag x y)+\r\n large-arc-flag: greater than 180 degrees\r\n sweep flag: direction (cw/ccw)\r\n '''\r\n vertices = drawing.vertices[arc.points[::((reverse*-2) + 1)]] \r\n vertex_start, vertex_mid, vertex_end = vertices\r\n C, R, N, angle = arc_center(vertices)\r\n if arc.closed: return circle_to_svgpath(C, R, reverse)\r\n large_flag = str(int(angle > np.pi))\r\n sweep_flag = str(int(np.cross(vertex_mid-vertex_start, \r\n vertex_end-vertex_start) > 0))\r\n R_ex = format(R, res.export)\r\n x_ex = format(vertex_end[0],res.export)\r\n y_ex = format(vertex_end [1],res.export)\r\n arc_str = 'A' + R_ex + ',' + R_ex + ' 0 ' \r\n arc_str += large_flag + ',' + sweep_flag + ' '\r\n arc_str += x_ex + ',' + y_ex\r\n return arc_str\r\n def svg_line(line, reverse):\r\n vertex_end = drawing.vertices[line.points[-(not reverse)]]\r\n x_ex = format(vertex_end[0], res.export) \r\n y_ex = format(vertex_end[1], res.export) \r\n line_str = 'L' + x_ex + ',' + y_ex\r\n return line_str\r\n def svg_moveto(vertex_id):\r\n x_ex = format(drawing.vertices[vertex_id][0], res.export) \r\n y_ex = format(drawing.vertices[vertex_id][1], res.export) \r\n move_str = 'M' + x_ex + ',' + y_ex\r\n return move_str\r\n def convert_path(path, reverse=False):\r\n path = path[::(reverse*-2) + 1]\r\n path_str = svg_moveto(drawing.entities[path[0]].end_points[-reverse])\r\n for i, entity_id in enumerate(path):\r\n entity = drawing.entities[entity_id]\r\n e_type = entity.__class__.__name__\r\n try: \r\n path_str += converters[e_type](entity, reverse)\r\n except KeyError:\r\n log.warn('%s entity not available for export!', e_type)\r\n path_str += 'Z'\r\n return path_str\r\n\r\n converters = {'Line' : svg_line,\r\n 'Arc' : svg_arc}\r\n path_str = ''\r\n for path_index, path in enumerate(drawing.paths):\r\n reverse = not (path_index in drawing.root)\r\n path_str += convert_path(path, reverse)\r\n return path_str\r\n\r\ndef _write_export(export, file_obj=None):\r\n '''\r\n Write a string to a file.\r\n If file_obj isn't specified, return the string\r\n\r\n Arguments\r\n ---------\r\n export: a string of the export data\r\n file_obj: a file-like object or a filename\r\n '''\r\n\r\n if file_obj is None: \r\n return export\r\n elif hasattr(file_obj, 'write'): \r\n out_file = file_obj\r\n else: \r\n out_file = open(file_obj, 'wb')\r\n out_file.write(export)\r\n out_file.close()\r\n return export\r\n\r\n_path_exporters = {'svg' : export_svg,\r\n 'dict' : export_dict}\r\n","sub_path":"trimesh/path/io/export.py","file_name":"export.py","file_ext":"py","file_size_in_byte":4819,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"507929113","text":"import pandas as pd\nimport numpy as np\nfrom scipy.cluster.hierarchy import linkage, fcluster, dendrogram\nfrom sklearn import datasets\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.cluster import AgglomerativeClustering\nimport csv\n#import matplotlib.pyplot as plt\nfrom math import sin, cos, sqrt, atan2, radians\nfrom scipy.spatial.distance import pdist\nimport math\nfrom collections import Counter\n#import matplotlib.pyplot as plt\n#from mpl_toolkits.mplot3d import Axes3D\nfrom pymongo import MongoClient\nimport urllib.request\nimport json\nimport pandas as pd\nimport dml\nimport prov.model\nimport datetime\nimport uuid\n\n# what we are trying to solve! Hubway station is crowded throughBoston. We are trying to find the least popular stop that \n#has multiple stops nearby. We will use a clustering algorithm called AgglomerativeClustering.\n\n\nclass solutionLeastPopularStations(dml.Algorithm):\n contributor = 'bm181354_rikenm'\n reads = ['bm181354_rikenm.hubwayJuly','bm181354_rikenm.hubwayOne','bm181354_rikenm.hubwayTwo','bm181354_rikenm.hubwayThree']\n writes = ['bm181354_rikenm.solutionLeastPopularStationsdb']\n \n @staticmethod\n def execute(trial = False):\n\n \n \n startTime = datetime.datetime.now()\n\n client = dml.pymongo.MongoClient()\n repo = client.repo\n repo.authenticate('bm181354_rikenm', 'bm181354_rikenm')\n\n\n station_cursor = repo['bm181354_rikenm.hubwayJuly']\n\n\n trip_data1= repo['bm181354_rikenm.hubwayOne']\n\n trip_data2= repo['bm181354_rikenm.hubwayTwo']\n\n trip_data3= repo['bm181354_rikenm.hubwayThree']\n\n #clustering based on distance\n def lat_dist(x,y):\n\n R = 6373.0\n\n lat1 = radians(abs(x[0]))\n lon1 = radians(abs(x[1]))\n lat2 = radians(abs(y[0]))\n lon2 = radians(abs(y[1]))\n\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n\n a = sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance = R * c\n\n\n return distance\n\n #Pairwise distances between observations in n-dimensional space aka \"pdist\".\n def geo_affinity(M):\n return np.array([[lat_dist(a,b) for a in M] for b in M])\n\n # read data\n\n data = pd.DataFrame(list(station_cursor.find()))\n\n if trial == True:\n times = 50\n data = data.iloc[:50,:]\n else:\n times = 193\n \n\n location_matrix = np.zeros((times, 2))\n location_matrix[:,0]= data.iloc[0:][\"Latitude\"]\n location_matrix[:,1] = data.iloc[0:][\"Longitude\"]\n\n np.shape(geo_affinity(location_matrix))\n np.shape(location_matrix)\n\n # AgglomerativeClustering on these data based on their location. nearest neighbor are clustered together fiest\n \n agg = AgglomerativeClustering(n_clusters=20, affinity=geo_affinity, linkage=\"average\")\n label1 = agg.fit_predict(location_matrix) # Returns class labels.\n\n\n #clustering based on distance\n\n #plt.figure()\n #fig = plt.figure()\n #ax = fig.add_subplot(111, projection='3d')\n #ax.scatter(location_matrix[:,0], location_matrix[:,1], zs=0, zdir='z', s=20, c=label1, depthshade=True)\n #plt.show()\n\n # clustering based on popularity and distance\n \n #read data for hubway trips. We will used this to obtain popularity index of stops\n\n data2 = pd.DataFrame(list(trip_data1.find()))\n\n data3 = pd.DataFrame(list(trip_data2.find()))\n\n data4 = pd.DataFrame(list(trip_data3.find()))\n\n\n if trial == True:\n times2 = 1000 #only see 1000 data points\n times3 = 1000\n times4 = 1000\n else:\n times2 = 19517 #default has 19k data point\n times3 = 17272\n times4 = 99860\n\n \n\n trip_1d = np.zeros((220,1)) # data shows there are 220 hubwaystops as of july 2017\n trip_distance_pop = np.zeros((220,3)) # 220 stations with lat,lon,popularity\n\n #iterating over all three cvs files and storing data on our \"trip_1d\" and \"trip_distance_pop\"\n # after iteration some of the station's values(lat,lon,popularity) still may be zeros as these three cvs file doesn't contain\n #data for all 220 stops\n\n for index in range(times2):\n i=(data2.iloc[index,8])\n j = data2.iloc[index,3]\n\n trip_1d[i] = trip_1d[i]+1\n trip_1d[j] = trip_1d[j]+1\n\n #start(i's) lat long + popularity of i\n trip_distance_pop[i][0] = data2.iloc[index,9]\n trip_distance_pop[i][1] = data2.iloc[index,10]\n trip_distance_pop[i][2] = trip_1d[i]\n\n #end(j's) lat long + popularity of end\n trip_distance_pop[j][0] = data2.iloc[index,4]\n trip_distance_pop[j][1] = data2.iloc[index,5]\n trip_distance_pop[i][2] = trip_1d[j]\n\n\n\n\n for index in range(times3): \n i=(data3.iloc[index,8])\n j = data3.iloc[index,3]\n\n trip_1d[i] = trip_1d[i]+1\n trip_1d[j] = trip_1d[j]+1\n\n\n #start(i's) lat long + popularity of i\n trip_distance_pop[i][0] = data3.iloc[index,9]\n trip_distance_pop[i][1] = data3.iloc[index,10]\n trip_distance_pop[i][2] = trip_1d[i]\n\n #end(j's) lat long + popularity of end\n trip_distance_pop[j][0] = data3.iloc[index,4]\n trip_distance_pop[j][1] = data3.iloc[index,5]\n trip_distance_pop[i][2] = trip_1d[j]\n\n for index in range(times4): \n i=(data4.iloc[index,8])\n j = data4.iloc[index,3]\n\n trip_1d[i] = trip_1d[i]+1\n trip_1d[j] = trip_1d[j]+1\n\n\n #start(i's) lat long + popularity of i\n trip_distance_pop[i][0] = data4.iloc[index,9]\n trip_distance_pop[i][1] = data4.iloc[index,10]\n trip_distance_pop[i][2] = trip_1d[i]\n\n #end(j's) lat long + popularity of end\n trip_distance_pop[j][0] = data4.iloc[index,4]\n trip_distance_pop[j][1] = data4.iloc[index,5]\n trip_distance_pop[i][2] = trip_1d[j]\n\n\n #clustering based on popularity. similar popular stuff are combined first\n \n def relative_popularity_between_point(x,y):\n #difference between two 1d points. These point insignifies popularity of that node \n #return (abs(x-y)/((x+1)/2))\n #sim = min(x,y)/max(x,y)#(x-y)**2\n difference = (x-y)**2\n return math.sqrt(difference)\n\n # removing all the stops that have zeros in all three fields(lat,lon,popularity)\n remove_zero_pop = np.array([[]])\n counter = 0 \n\n\n for i in range(len(trip_1d)):\n if trip_1d[i][0] != float(0):\n remove_zero_pop = np.append(remove_zero_pop,[[trip_1d[i]]])\n counter = counter+1\n\n if trial:\n remove_zero_pop = remove_zero_pop.reshape((142,1))\n else:\n remove_zero_pop = remove_zero_pop.reshape((181,1))\n\n\n #Pairwise distances between observations in n-dimensional space aka \"pdist\" but instead of distances it's popularity.\n def pop_affinity(M):\n return np.array([[relative_popularity_between_point(a[0],b[0]) for a in M] for b in M])\n\n remove_zero_pop_minus = remove_zero_pop * -1 #as \n remove_zero_pop_minus\n\n np.shape(pop_affinity(remove_zero_pop_minus))\n\n agg = AgglomerativeClustering(n_clusters=2, affinity=pop_affinity, linkage=\"average\")\n agg.fit_predict(remove_zero_pop_minus) # Returns class labels. \n\n\n #final form. combining both distance and popularity index \n #combining both geo and popularity. now each item represent pdist+ppopularity \n def geo_pop_affinity(M):\n return np.array([[relative_popularity_and_distance_between_point(a,b) for a in M] for b in M])\n\n def relative_popularity_and_distance_between_point(a,b):\n #score\n a_lat = a[0]\n a_lon = a[1]\n a_pop = a[2]\n\n a = [a_lat,a_lon]\n\n b_lat = b[0]\n b_lon = b[1]\n b_pop = b[2]\n\n b = [a_lat,a_lon]\n\n score = lat_dist(a,b) + relative_popularity_between_point(a_pop,b_pop) #-\n\n return score\n\n # removing all zeros\n remove_zero_trip_distance_pop = np.array([[]])\n counter = 0 \n\n for i in range(len(trip_distance_pop)):\n if trip_distance_pop[i][0] != 0:\n remove_zero_trip_distance_pop = np.append(remove_zero_trip_distance_pop,[[trip_distance_pop[i]]])\n counter = counter+1\n\n\n remove_zero_trip_distance_pop = remove_zero_trip_distance_pop.reshape((counter,3)) \n\n\n remove_zero_trip_distance_pop=np.delete(remove_zero_trip_distance_pop, 132, 0) \n #normalizing data\n xs=np.zeros((len(remove_zero_trip_distance_pop)))\n ys=np.zeros((len(remove_zero_trip_distance_pop)))\n zs=np.zeros((len(remove_zero_trip_distance_pop)))\n xs_mean = np.mean(remove_zero_trip_distance_pop[:,0])\n ys_mean = np.mean(remove_zero_trip_distance_pop[:,1])\n zs_mean = np.mean(remove_zero_trip_distance_pop[:,2])\n print(xs_mean,ys_mean,zs_mean)\n\n #normalizing data with mean. may be l2 is better\n for i in range(len(remove_zero_trip_distance_pop)):\n xs[i] = (remove_zero_trip_distance_pop[i][0]/xs_mean) \n ys[i] = (remove_zero_trip_distance_pop[i][1]/ys_mean)\n zs[i] = (remove_zero_trip_distance_pop[i][2]/zs_mean)\n\n\n remove_zero_trip_distance_pop[:,0] /= xs_mean\n remove_zero_trip_distance_pop[:,1] /= ys_mean\n remove_zero_trip_distance_pop[:,2] /= zs_mean\n\n\n\n '''plt.figure()\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(xs, ys, zs, zdir='z', s=20, c=None, depthshade=True)\n plt.show()'''\n\n agg = AgglomerativeClustering(n_clusters=15, affinity=geo_pop_affinity, linkage=\"average\")\n labels = agg.fit_predict(remove_zero_trip_distance_pop) \n\n\n '''plt.figure()\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.scatter(xs, ys, zs, zdir='z', s=20, c=labels, depthshade=True)\n plt.show()'''\n\n \n # combined all the computed data\n new_df = pd.DataFrame(\n {'Latitude_normalized': remove_zero_trip_distance_pop[:,0],\n 'Longitude_normalized': remove_zero_trip_distance_pop[:,1],\n 'Popularity':remove_zero_trip_distance_pop[:,2] ,\n 'Y_label': labels\n\n })\n\n r = json.loads(new_df.to_json( orient='records'))\n s = json.dumps(r, sort_keys=True, indent=2)\n\n jup_repo = client.repo\n\n repo.dropPermanent('solutionLeastPopularStationsdb')\n #repo.create_collection(\"trail_index\")\n repo.createPermanent('solutionLeastPopularStationsdb')\n repo['bm181354_rikenm.solutionLeastPopularStationsdb'].insert_many(r)\n repo['bm181354_rikenm.solutionLeastPopularStationsdb'].metadata({'complete':True})\n\n\n # finding least significant station\n\n count_of_labels=Counter(labels)\n label_with_most_stops=(count_of_labels).most_common(1)[0][0]\n\n\n #finding the station that is least significant\n\n threshold = 0.8\n d = dict(zip(range(len(xs)),[0]*len(xs)))\n\n def distance(a,b):\n return (math.sqrt((a[0]-b[0])**2+(a[1]-b[1])**2+(a[2]-b[2])**2))\n\n\n for i in range(len(xs)):\n for j in range(len(xs)):\n \n \n \n if labels[i] == label_with_most_stops:\n a = [xs[i],ys[i],zs[i]]\n b = [xs[j],ys[j],zs[j]]\n dis = distance(a,b)\n \n \n \n \n if dis < threshold:\n d[i] += 1\n \n smallest_index = max(d, key=d.get)\n \n \n # saving dictionary that says which points are least significant. Higher the value in the dictionary, least significant. Key is station and value = nearby neighbor with same value.\n \n\n \n \n # logout\n repo.logout()\n endTime = datetime.datetime.now()\n return {\"start\":startTime, \"end\":endTime}\n \n @staticmethod\n def provenance(doc = prov.model.ProvDocument(), startTime = None, endTime = None):\n '''\n Create the provenance document describing everything happening\n in this script. Each run of the script will generate a new\n document describing that invocation event.\n '''\n \n # Set up the database connection.\n client = dml.pymongo.MongoClient()\n repo = client.repo\n \n repo.authenticate('bm181354_rikenm', 'bm181354_rikenm')\n doc.add_namespace('alg', 'http://datamechanics.io/?prefix=bm181354_rikenm/algorithm/') # The scripts are in # format.\n doc.add_namespace('dat', 'http://datamechanics.io/data/') # The data sets are in # format.\n doc.add_namespace('ont', 'http://datamechanics.io/ontology#') # 'Extension', 'DataResource', 'DataSet', 'Retrieval', 'Query', or 'Computation'.\n doc.add_namespace('log', 'http://datamechanics.io/log/') # The event log.\n doc.add_namespace('bdp','http://datamechanics.io/?prefix=bm181354_rikenm/')\n \n this_script = doc.agent('alg:bm181354_rikenm#solutionLeastPopularStations', {prov.model.PROV_TYPE:prov.model.PROV['SoftwareAgent'], 'ont:Extension':'py'})\n \n resource = doc.entity('bdp:htaindex_data_places_25', {'prov:label':'dataset of all indices raw values', prov.model.PROV_TYPE:'ont:DataResource', 'ont:Extension':'csv'})\n \n get_index = doc.activity('log:uuid'+str(uuid.uuid4()), startTime, endTime)\n \n doc.wasAssociatedWith(get_index, this_script)\n \n #change this\n doc.usage(get_index, resource, startTime, None,{prov.model.PROV_TYPE:'ont:Retrieval'})\n \n # change this\n index = doc.entity('dat:bm181354_rikenm#solutionLeastPopularStationsdb', {prov.model.PROV_LABEL:'index of transportation, housing', prov.model.PROV_TYPE:'ont:DataSet'})\n \n doc.wasAttributedTo(index, this_script)\n doc.wasGeneratedBy(index, get_index, endTime)\n doc.wasDerivedFrom(index, resource, index, index, index)\n \n repo.logout()\n return doc\n\n## eof\n\n\n\n\n\n\n\n","sub_path":"bm181354_rikenm/solutionLeastPopularStations.py","file_name":"solutionLeastPopularStations.py","file_ext":"py","file_size_in_byte":14841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"592263143","text":"import argparse\nimport logging\nfrom os import environ, path, makedirs\nimport web\n\n\ndef init_logging(level, log_dir):\n log_file = path.join(log_dir, \"mml_main.log\")\n if level == logging.DEBUG:\n logging.basicConfig(filename=log_file,\n filemode='a',\n style='{',\n format=\"\\n[{asctime}] {levelname}: {message}\\n\"\n \"Module: {module}\\n\"\n \"Method: {funcName}\",\n datefmt=\"%d.%m.%Y %H:%M:%S\",\n level=logging.DEBUG)\n logging.info(\"Starting MML_CLIENT in DEBUG mode...\")\n else:\n logging.basicConfig(filename=log_file,\n filemode='a',\n style='{',\n format=\"[{asctime}] {levelname}: {message}\",\n datefmt=\"%d.%m.%Y %H:%M:%S\",\n level=level)\n logging.info(\"Starting MML_CLIENT...\")\n pass\n\n\ndef set_vars():\n parser = argparse.ArgumentParser()\n\n logging_options = parser.add_argument_group(title=\"MML-Client Logging options\")\n logging_options.add_argument(\"--log-level\",\n required=False,\n type=str,\n choices=[\"debug\", \"DEBUG\",\n \"info\", \"INFO\",\n \"warning\", \"WARNING\",\n \"error\", \"ERROR\",\n \"critical\", \"CRITICAL\"],\n metavar=\"LEVEL\",\n help=\"Logging level\")\n logging_options.add_argument(\"--log-dir\",\n required=False,\n type=str,\n metavar=\"DIR\",\n help=\"Directory to store the logs\")\n\n path_options = parser.add_argument_group(title=\"MML-Client path options\")\n path_options.add_argument(\"--pl-dir\",\n required=False,\n type=str,\n metavar=\"DIR\",\n help=\"Directory of saved Playlist files to use\")\n path_options.add_argument(\"--songs-dir\",\n required=False,\n type=str,\n metavar=\"DIR\",\n help=\"Directory of saved audio files to use\")\n\n web_server_options = parser.add_argument_group(title=\"MML-Client web server options\")\n web_server_options.add_argument(\"--port\",\n required=False,\n type=int,\n choices=iter(range(1025, 65536)),\n metavar=\"PORT\",\n help=\"The local port from which to access the App\")\n\n args = parser.parse_args()\n\n if args.log_level:\n # if set by the CLI:\n log_level = args.log_level.lower()\n else:\n # if not set by the CLI, use the ENV:\n log_level = environ.get(\"MML_CLIENT_LOG_LEVEL\", default=\"info\")\n # export to the ENV:\n environ[\"MML_CLIENT_LOG_LEVEL\"] = log_level\n\n if args.log_dir:\n # if set by the CLI:\n log_dir = args.log_dir\n else:\n # if not set by the CLI, use the ENV:\n log_dir = environ.get(\"MML_CLIENT_LOG_DIR\", default=\"./data/logs/\")\n # if necessary, create the directory:\n makedirs(log_dir, exist_ok=True)\n # export to the ENV:\n environ[\"MML_CLIENT_LOG_DIR\"] = log_dir\n\n if args.pl_dir:\n # if set by the CLI:\n pl_dir = args.pl_dir\n else:\n # if not set by the CLI, use the ENV:\n pl_dir = environ.get(\"MML_CLIENT_PLAYLISTS_PATH\", default=\"./data/playlists/\")\n # if necessary, create the directory:\n makedirs(pl_dir, exist_ok=True)\n # export to the ENV:\n environ[\"MML_CLIENT_PLAYLISTS_PATH\"] = pl_dir\n\n if args.songs_dir:\n # if set by the CLI:\n songs_dir = args.songs_dir\n else:\n # if not set by the CLI, use the ENV:\n songs_dir = environ.get(\"MML_CLIENT_SONGS_PATH\", default=\"./data/songs/\")\n # if necessary, create the directory:\n makedirs(songs_dir, exist_ok=True)\n # export to the ENV:\n environ[\"MML_CLIENT_SONGS_PATH\"] = songs_dir\n\n if args.port:\n # export the ENV:\n environ[\"FLASK_RUN_PORT\"] = str(args.port)\n else:\n # export the ENV:\n environ[\"FLASK_RUN_PORT\"] = \"5000\"\n\n\nif __name__ == \"__main__\":\n # if '-h' or '--help' is passed, the script exits:\n set_vars()\n\n # work with 'logging' inner numeric values:\n log_level = getattr(logging, environ[\"MML_CLIENT_LOG_LEVEL\"].upper(), None)\n init_logging(level=log_level, log_dir=environ[\"MML_CLIENT_LOG_DIR\"])\n logger = logging.getLogger(__name__)\n\n # init the Web-Server:\n app = web.create_app()\n\n # set the Web-Server's log-level:\n if environ[\"MML_CLIENT_LOG_LEVEL\"].lower() == \"debug\":\n debug_server = True\n else:\n debug_server = False\n\n # TODO: host=\"0.0.0.0\" is required by Docker\n # start the Web-Server:\n app.run(debug=debug_server, host=\"0.0.0.0\", port=environ[\"FLASK_RUN_PORT\"])\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"502646914","text":"# -*- coding: utf-8 -*-\n\nimport logging\nfrom abc import ABC\nfrom django.contrib.gis.gdal import DataSource\nfrom django.contrib.gis.geos import MultiPolygon, Polygon\n\nfrom bims.models.boundary import Boundary\nfrom bims.models.boundary_type import BoundaryType\n\nlogger = logging.getLogger(__name__)\n\n\nclass UpdateBoundary(ABC):\n \"\"\" Update boundaries from shapefile.\n Boundary that created based on boundary_type. E.g. Country\n And use column_name as indicator which column to be used\n for saving name of the boundary.\n\n \"\"\"\n help = 'Import boundaries from shp file'\n\n def save_data(self, shapefile, boundary_type, column_name):\n \"\"\" Saving data boundary from shapefile.\n\n :param shapefile: shapefile path data that hold boundaries data\n :type shapefile: str\n\n :param boundary_type: what boundary type that will be generated.\n :type boundary_type: str\n\n :param column_name: column name of boundary name.\n Needed for naming the boundary.\n :type column_name: str\n \"\"\"\n try:\n boundary_type = BoundaryType.objects.get(\n name=boundary_type)\n except BoundaryType.DoesNotExist:\n boundary_type = BoundaryType.objects.create(\n name=boundary_type\n )\n\n data_source = DataSource(\n shapefile)\n layer = data_source[0]\n for feature in layer:\n name = feature[column_name].value\n name = name.strip()\n\n print('COPYING %s' % name.encode('utf-8').strip())\n geometry = feature.geom\n try:\n boundary = Boundary.objects.get(\n name=name,\n type=boundary_type\n )\n except Boundary.DoesNotExist:\n boundary = Boundary.objects.create(\n name=name,\n type=boundary_type\n )\n if 'MultiPolygon' not in geometry.geojson:\n geometry = MultiPolygon(\n Polygon(geometry.coords[0])).geojson\n else:\n geometry = geometry.geojson\n boundary.geometry = geometry\n boundary.save()\n","sub_path":"bims/management/commands/update_boundary.py","file_name":"update_boundary.py","file_ext":"py","file_size_in_byte":2216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"630765780","text":"#!/usr/bin/env python\n# -*- coding: utf-8\n\n\"\"\"Download the latest version of github-release and place in a temporary location\"\"\"\n\nfrom __future__ import unicode_literals, print_function\n\nimport platform\nimport subprocess\nimport sys\nimport tempfile\n\nimport os\nimport re\nimport requests\n\nURL = \"https://api.github.com/repos/aktau/github-release/releases/latest\"\nASSET_NAME_PATTERN = re.compile(r\"^{}-amd64-github-release\\.tar\\.bz2$\".format(platform.system().lower()))\n\n\ndef main():\n release_info = _get_release_info()\n asset_url = _find_wanted_asset(release_info)\n tmp_dir, asset_path = _download_asset(asset_url)\n bin_path = _unpack(tmp_dir, asset_path)\n print(bin_path)\n\n\ndef _unpack(tmp_dir, asset_path):\n output = subprocess.check_output([\"tar\", \"--directory\", tmp_dir, \"-xjvf\", asset_path])\n return os.path.join(tmp_dir, output.strip().decode(sys.getfilesystemencoding()))\n\n\ndef _download_asset(asset_url):\n resp = requests.get(asset_url)\n resp.raise_for_status()\n tmp_dir = tempfile.mkdtemp(prefix=\"github-release-\")\n fpath = os.path.join(tmp_dir, \"github-release\")\n with open(fpath, \"wb\") as fobj:\n for chunk in resp.iter_content(chunk_size=8192):\n fobj.write(chunk)\n return tmp_dir, fpath\n\n\ndef _get_release_info():\n resp = requests.get(URL)\n resp.raise_for_status()\n release_info = resp.json()\n return release_info\n\n\ndef _find_wanted_asset(release_info):\n names = list()\n for asset in release_info[\"assets\"]:\n name = asset[\"name\"]\n names.append(name)\n m = ASSET_NAME_PATTERN.match(name)\n if m:\n return asset[\"browser_download_url\"]\n raise RuntimeError(\"No asset in {} matches {!r}\".format(\n \", \".join(names),\n ASSET_NAME_PATTERN.pattern\n ))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bin/get_github_release.py","file_name":"get_github_release.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"68651090","text":"#! /usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"Define evaluation method of character-level models (WSJ corpus).\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport re\nfrom tqdm import tqdm\nimport pandas as pd\n\nfrom utils.evaluation.edit_distance import compute_wer\n\n\ndef eval_char(models, eval_batch_size, dataset, beam_width,\n max_decode_len, min_decode_len=0,\n length_penalty=0, coverage_penalty=0,\n progressbar=False):\n \"\"\"Evaluate trained model by Character Error Rate.\n Args:\n models (list): the models to evaluate\n dataset: An instance of a `Dataset' class\n eval_batch_size (int): the batch size when evaluating the model\n beam_width: (int): the size of beam\n max_decode_len (int): the maximum sequence length to emit\n min_decode_len (int): the minimum sequence length to emit\n length_penalty (float): length penalty in beam search decoding\n coverage_penalty (float): coverage penalty in beam search decoding\n progressbar (bool): if True, visualize the progressbar\n Returns:\n wer (float): Word error rate\n cer (float): Character error rate\n df_word (pd.DataFrame): dataframe of substitution, insertion, and deletion\n \"\"\"\n # Reset data counter\n dataset.reset()\n\n model = models[0]\n # TODO: fix this\n\n wer, cer = 0, 0\n sub_word, ins_word, del_word = 0, 0, 0\n sub_char, ins_char, del_char = 0, 0, 0\n num_words, num_chars = 0, 0\n if progressbar:\n pbar = tqdm(total=len(dataset)) # TODO: fix this\n while True:\n batch, is_new_epoch = dataset.next(batch_size=eval_batch_size)\n\n # Decode\n if model.model_type in ['ctc', 'attention']:\n best_hyps, _, perm_idx = model.decode(\n batch['xs'], batch['x_lens'],\n beam_width=beam_width,\n max_decode_len=max_decode_len,\n min_decode_len=min_decode_len,\n length_penalty=length_penalty,\n coverage_penalty=coverage_penalty,\n task_index=0)\n ys = batch['ys'][perm_idx]\n y_lens = batch['y_lens'][perm_idx]\n else:\n best_hyps, _, perm_idx = model.decode(\n batch['xs'], batch['x_lens'],\n beam_width=beam_width,\n max_decode_len=max_decode_len,\n min_decode_len=min_decode_len,\n length_penalty=length_penalty,\n coverage_penalty=coverage_penalty,\n task_index=1)\n ys = batch['ys_sub'][perm_idx]\n y_lens = batch['y_lens_sub'][perm_idx]\n\n for b in range(len(batch['xs'])):\n ##############################\n # Reference\n ##############################\n if dataset.is_test:\n str_ref = ys[b][0]\n # NOTE: transcript is seperated by space('_')\n else:\n # Convert from list of index to string\n str_ref = dataset.idx2char(ys[b][:y_lens[b]])\n\n ##############################\n # Hypothesis\n ##############################\n str_hyp = dataset.idx2char(best_hyps[b])\n str_hyp = re.sub(r'(.*)>(.*)', r'\\1', str_hyp)\n # NOTE: Trancate by the first \n\n ##############################\n # Post-proccessing\n ##############################\n # Remove garbage labels\n str_ref = re.sub(r'[@>]+', '', str_ref)\n str_hyp = re.sub(r'[@>]+', '', str_hyp)\n # NOTE: @ means noise\n\n # Remove consecutive spaces\n str_ref = re.sub(r'[_]+', '_', str_ref)\n str_hyp = re.sub(r'[_]+', '_', str_hyp)\n\n try:\n # Compute WER\n wer_b, sub_b, ins_b, del_b = compute_wer(\n ref=str_ref.split('_'),\n hyp=str_hyp.split('_'),\n normalize=False)\n wer += wer_b\n sub_word += sub_b\n ins_word += ins_b\n del_word += del_b\n num_words += len(str_ref.split('_'))\n\n # Compute CER\n cer_b, sub_b, ins_b, del_b = compute_wer(\n ref=list(str_ref.replace('_', '')),\n hyp=list(str_hyp.replace('_', '')),\n normalize=False)\n cer += cer_b\n sub_char += sub_b\n ins_char += ins_b\n del_char += del_b\n num_chars += len(str_ref.replace('_', ''))\n except:\n pass\n\n if progressbar:\n pbar.update(1)\n\n if is_new_epoch:\n break\n\n if progressbar:\n pbar.close()\n\n # Reset data counters\n dataset.reset()\n\n wer /= num_words\n sub_word /= num_words\n ins_word /= num_words\n del_word /= num_words\n cer /= num_chars\n sub_char /= num_chars\n ins_char /= num_chars\n del_char /= num_chars\n\n df_word = pd.DataFrame(\n {'SUB': [sub_word * 100, sub_char * 100],\n 'INS': [ins_word * 100, ins_char * 100],\n 'DEL': [del_word * 100, del_char * 100]},\n columns=['SUB', 'INS', 'DEL'], index=['WER', 'CER'])\n\n return wer, cer, df_word\n","sub_path":"examples/wsj/s5/exp/metrics/character.py","file_name":"character.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"37093755","text":"from selenium import webdriver\nfrom selenium.webdriver.support.ui import Select\nimport time \nimport math\n\nlink = \" http://www.google.com\"\nalert_script = \"Robots at work\"\n\n\ntry:\n browser = webdriver.Chrome()\n browser.get(link)\n\n # Send alert via execute_script\n\n browser.execute_script(\"alert('\" + alert_script + \"');\")\n\n # Get alert message\n\n alert = browser.switch_to.alert\n text = alert.text\n time.sleep(1)\n\n # с помощью assert проверяем, что ожидаемый текст совпадает с введенным текстом\n\n assert text == alert_script, \"Alert text mismatches with entered data\"\n\n # Close alert message\n\n alert.accept()\n\n\nfinally:\n # ожидание чтобы визуально оценить результаты прохождения скрипта\n time.sleep(5)\n # закрываем браузер после всех манипуляций\n browser.quit()\n\n# не забываем оставить пустую строку в конце файла","sub_path":"alert_text_verification.py","file_name":"alert_text_verification.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"299333218","text":"#########\n# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\n\nfrom cloudify import ctx\nfrom cloudify.decorators import operation\nfrom cloudify import exceptions as cfy_exc\nfrom server_plugin.server import (EXTERNAL_ID,\n EXTERNAL_NAME)\nfrom vsphere_plugin_common import with_storage_client\nfrom vsphere_plugin_common.utils import (prepare_for_log,\n unassign_runtime_properties_from_resource)\n\nfrom vsphere_plugin_common.constants import (\n VSPHERE_STORAGE_FILE_NAME,\n VSPHERE_STORAGE_VM_ID,\n VSPHERE_STORAGE_VM_NAME,\n VSPHERE_STORAGE_SCSI_ID,\n VSPHERE_STORAGE_RUNTIME_PROPERTIES,\n)\n\n\n@operation\n@with_storage_client\ndef create(storage_client, **kwargs):\n ctx.logger.debug(\"Entering create storage procedure.\")\n storage = {\n 'name': ctx.node.id,\n }\n storage.update(ctx.node.properties['storage'])\n # This should be debug, but left as info until CFY-4867 makes logs more\n # visible\n ctx.logger.info(\n 'Storage properties: {properties}'.format(\n properties=prepare_for_log(storage),\n )\n )\n ctx.logger.info(\"Storage info: \\n%s.\" %\n \"\".join(\" %s : %s \" % item\n for item in storage.items()))\n\n storage_name = storage.get('name', None)\n storage_index = storage.get('storage_index', 1)\n storage_size = storage.get('storage_size', None)\n\n datastore = {}\n datastore['name'] = storage.get('datastore_name', None)\n datastore['id'] = storage.get('datastore_id', None)\n\n capabilities = ctx.capabilities.get_all().values()\n if not capabilities:\n raise cfy_exc.NonRecoverableError(\n 'Error during trying to create storage: storage should be '\n 'related to a VM, but capabilities are empty.')\n\n connected_vms = [rt_properties for rt_properties in capabilities\n if EXTERNAL_ID in rt_properties]\n if len(connected_vms) > 1:\n raise cfy_exc.NonRecoverableError(\n 'Error during trying to create storage: storage may be '\n 'connected to at most one VM')\n\n vm_id = connected_vms[0][EXTERNAL_ID]\n vm_name = connected_vms[0][EXTERNAL_NAME]\n ctx.logger.info(\n \"Creating new volume on VM '{vm}' with name '{name}' and size: \"\n \"{size}\".format(\n vm=vm_name,\n name=storage['name'],\n size=storage['storage_size']\n )\n )\n\n storage_info = {}\n storage_info['storage_name'] = storage_name\n storage_info['storage_index'] = storage_index\n storage_info['storage_size'] = storage_size\n storage_file_name, scsi_id = storage_client.create_storage(vm_id, datastore, storage_info)\n\n ctx.logger.info(\n \"Storage successfully created on VM '{vm}' with file name \"\n \"'{file_name}' and SCSI ID: {scsi} \".format(\n vm=vm_name,\n file_name=storage_file_name,\n scsi=scsi_id,\n )\n )\n\n ctx.instance.runtime_properties[VSPHERE_STORAGE_FILE_NAME] = \\\n storage_file_name\n ctx.instance.runtime_properties[VSPHERE_STORAGE_VM_ID] = vm_id\n ctx.instance.runtime_properties[VSPHERE_STORAGE_VM_NAME] = vm_name\n ctx.instance.runtime_properties[VSPHERE_STORAGE_SCSI_ID] = scsi_id\n\n\n@operation\n@with_storage_client\ndef delete(storage_client, **kwargs):\n vm_id = ctx.instance.runtime_properties[VSPHERE_STORAGE_VM_ID]\n vm_name = ctx.instance.runtime_properties[VSPHERE_STORAGE_VM_NAME]\n storage_file_name = \\\n ctx.instance.runtime_properties[VSPHERE_STORAGE_FILE_NAME]\n ctx.logger.info(\n \"Deleting storage {file} from {vm}\".format(\n file=storage_file_name,\n vm=vm_name,\n )\n )\n storage_client.delete_storage(vm_id, storage_file_name)\n ctx.logger.info(\n \"Successfully deleted storage {file} from {vm}\".format(\n file=storage_file_name,\n vm=vm_name,\n )\n )\n unassign_runtime_properties_from_resource(VSPHERE_STORAGE_RUNTIME_PROPERTIES, ctx)\n\n\n@operation\n@with_storage_client\ndef resize(storage_client, **kwargs):\n vm_id = ctx.instance.runtime_properties[VSPHERE_STORAGE_VM_ID]\n vm_name = ctx.instance.runtime_properties[VSPHERE_STORAGE_VM_NAME]\n storage_file_name = \\\n ctx.instance.runtime_properties[VSPHERE_STORAGE_FILE_NAME]\n storage_size = ctx.instance.runtime_properties.get('storage_size')\n if not storage_size:\n raise cfy_exc.NonRecoverableError(\n 'Error during trying to resize storage: new storage size wasn\\'t'\n ' specified.')\n ctx.logger.info(\n \"Resizing storage {file} on {vm} to {new_size}\".format(\n file=storage_file_name,\n vm=vm_name,\n new_size=storage_size,\n )\n )\n storage_client.resize_storage(vm_id,\n storage_file_name,\n storage_size)\n ctx.logger.info(\n \"Successfully resized storage {file} on {vm} to {new_size}\".format(\n file=storage_file_name,\n vm=vm_name,\n new_size=storage_size,\n )\n )\n","sub_path":"plugins/vsphere-plugin/storage_plugin/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":5700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"102954705","text":"import os\nimport cv2\nimport json\nimport argparse\nimport analyser\nimport img_logger\nimport logging\n\nprint(\"Logging set to INFO\")\nlogging.basicConfig(level=logging.INFO)\nparser = argparse.ArgumentParser()\n\nparser.add_argument('--input', type=str, required=True, help=\"path to directory with images\")\nparser.add_argument('--cfg', type=str, required=True, help=\"path to config file\")\nparser.add_argument('--network', type=str, required=True, help=\"path to network pb file\")\nparser.add_argument('--output', type=str, required=True, help=\"path to output directory\")\nparser.add_argument(\"--log\", type=str, help=\"increase output verbosity\", choices=[\"SAVE\", \"PRINT\"])\nargs = parser.parse_args()\n\npath_data = args.input\npictures = [(f, os.path.join(path_data, f)) for f in os.listdir(path_data)\n if os.path.isfile(os.path.join(path_data, f))]\n\nif args.log == \"SAVE\":\n img_logger.start_logging_to_file(\"../debug\")\n logging.info(\"setting up debugger to save images\")\nelif args.log == \"PRINT\":\n img_logger.start_logging_to_plots()\n logging.info(\"setting up debugger to plot images\")\n\nlogging.info(\"loading config\")\n\nwith open(args.cfg, \"r\") as f:\n config = json.load(f)\n\nlogging.info(\"starting processing data\")\nanalysed = {}\n\nfor filename, filepath in pictures:\n logging.info(\"processing {}\".format(filename))\n\n image = cv2.imread(filepath)\n try:\n analysed[filename] = analyser.analyse_image(image, config, args.network)\n except Exception as e:\n logging.error(e)\n\n logging.info(\"image processed {}\".format(filename))\n\noutput_json = os.path.join(args.output, \"output.json\")\n\nwith open(output_json, 'w') as outfile:\n json.dump(analysed, outfile)\n\nlogging.info(\"result json saved to '{}'\".format(output_json))\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"592694271","text":"import my_debugger\n\ndebugger = my_debugger.debugger()\n\n# 1 test_app = r\"c:\\windows\\system32\\calc.exe\"\n# 1 debugger.load(test_app)\n\npid = input(\"Enter the PID of the process to attach to:\")\ndebugger.attach(int(pid))\n\nthread_list = debugger.enumerate_threads()\n\nfor thread in thread_list:\n\n thread_context = debugger.get_thread_context(thread)\n\n if not thread_context:\n continue\n\n print('[*] Dumping registers for thread ID: {0:#}'.format(thread))\n print('[**] Rax: {0:#}'.format(thread_context.Rax)) # retrun value\n print('[**] Rcx: {0:#}'.format(thread_context.Rcx)) # first int parameter\n print('[**] Rdx: {0:#}'.format(thread_context.Rdx)) # second int parameter\n print('[*] END DUMP')\n\ndebugger.detach()\n","sub_path":"Chapter3/my_test.py","file_name":"my_test.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"158330263","text":"from itertools import permutations\nfrom math import sqrt\n\nanswer = None\ndigits = []\n\ndef is_prime(n):\n for i in range(3, int(sqrt(n))):\n if i in (0, 1):\n pass\n elif not n % i:\n return False\n else:\n return True\n\nfor x in range(1, 10):\n digits.append(str(x))\n for perm in permutations(digits):\n string = ''.join(perm)\n if is_prime(int(string)):\n if int(perm[0]) != 0:\n answer = int(string)\n\nprint(answer)\n","sub_path":"python/euler_41.py","file_name":"euler_41.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"483357897","text":"\"\"\"\nProteasome only\n\"\"\"\nimport tensorflow as tf\nimport argparse\nfrom predictor.database_functions import ms_extractor\nfrom predictor.database_functions import uniprot_extractor\nfrom predictor.general import save_pickle\nfrom predictor.general import read_pickle\nfrom predictor.core import seek_ms_uniprot_and_classify\nfrom predictor.core import non_cleavage_samples\nfrom predictor.core import save_ml_input_data_new\nfrom predictor.ml_main.ml_utilities import read_table\nimport pandas as pd\nfrom sklearn.model_selection import train_test_split\nfrom predictor.ml_main.ml_utilities import integer_encoding\nfrom keras import regularizers\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\nfrom sklearn.preprocessing import LabelEncoder\nfrom keras.models import Model, Sequential\nfrom keras.regularizers import l2\nfrom keras.constraints import max_norm\nfrom keras.optimizers import Adam\nfrom keras.callbacks import EarlyStopping\nfrom keras.layers import Input, Dense, Dropout, Flatten, Activation, Reshape\nfrom keras.layers import Conv1D, Add, MaxPooling1D, BatchNormalization\nfrom keras.layers import Embedding, Bidirectional, LSTM, CuDNNLSTM, GlobalMaxPooling1D\nfrom predictor.ml_main.ml_utilities import plot_history\nfrom predictor.ml_main.ml_utilities import display_model_score\nfrom predictor.ml_main.ml_utilities import metrics_ml\nimport numpy as np\nfrom predictor.ml_main.ml_utilities import array_parser\n\nHELP = \" \\\nCommand:\\n \\\n----------\\n \\\nRun: python3 PROcleave.py --generate_data --features --NN\\\n\"\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=HELP, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--generate_data', help='Generate df for ML algorithm.', action='store_true')\n parser.add_argument('--NN', help='Run NN', action='store_true')\n args = parser.parse_args()\n return args.generate_data, args.NN\n\ndef generating_raw_data(iedb_data_file_raw_path, uniprot_data_file_raw_path, proteasome_ml_path):\n print(\"Reading IEDB data\")\n iedb_data = ms_extractor.extract_ms_data(iedb_data_file_raw_path)\n \n print(\"IEDB completed\\nExtracting uniprot data\")\n uniprot_data = uniprot_extractor.id_sequence_extractor(uniprot_data_file_raw_path)\n \n print(\"Uniprot data completed\\nSeeking neighbour regions\")\n large_uniprot_peptide, amino_acid_dict_and_large_uniprot_peptide = seek_ms_uniprot_and_classify.seeking_ms(iedb_data, uniprot_data)\n\n print(\"Seeking neightbour completed\\nGetting non-cleavage samples\")\n non_cleavage_samples.export_df_for_ml(amino_acid_dict_and_large_uniprot_peptide, proteasome_ml_path)\n\ndef create_predictive_movels_NN(proteasome_ml_path):\n amino_acids = [\"A\", \"C\", \"D\", \"E\", \"F\", \"G\", \"H\", \"I\", \"K\", \"L\", \"M\", \"N\", \"P\", \"Q\", \"R\", \"S\", \"T\", \"V\", \"W\", \"Y\"]\n properties_dict = {}\n properties_df = pd.read_csv(\"data/normalized_0_1_all.csv\", index_col=0)\n for amino_acid in amino_acids:\n physicochemical_properties = [round(i, 3) for i in properties_df.loc[amino_acid].tolist()]\n properties_dict.setdefault(amino_acid, physicochemical_properties)\n print(properties_df)\n resume_prediction = {}\n \n amino_acids = [\"A\"]\n for amino_acid in amino_acids:\n file_name = \"data/NN/proteasome_{}_sequence_class.txt\".format(amino_acid)\n print(\"Working with {}\".format(file_name))\n training_table = pd.read_csv(file_name, sep=\"\\t\")\n sequence_table = training_table.drop(['class'], axis=1)\n class_table = training_table['class']\n \n print(\"Encoding {}...\".format(amino_acid))\n encoding_table = integer_encoding.integer_encoding(sequence_table)\n max_lenght = 9 #padding\n padding_table = pad_sequences(encoding_table, maxlen=max_lenght, padding='post', truncating='post')\n print(\"One Hot Encoding {}...\".format(amino_acid))\n one_hot_table = to_categorical(padding_table, num_classes=20)\n print(\"Reshaping {}...\".format(amino_acid))\n train_ohe = one_hot_table.reshape(sequence_table.shape[0], 1, max_lenght*20)\n train_ohe = train_ohe.astype(int)\n train_ohe = train_ohe.tolist()\n train_ohe_list = []\n for i in train_ohe:\n for j in i: \n train_ohe_list.append(j)\n one_hot_df = pd.DataFrame(train_ohe_list, columns=[\"P{}\".format(i) for i in range(9*20)])#7\n print(one_hot_df)\n print(\"Concatenating {}...\".format(amino_acid))\n training_table = pd.concat([one_hot_df, class_table], axis=1)\n #training_table = training_table.drop([\"P{}\".format(i) for i in range(60,80)], axis=1) # drop data regarding to position 4\n print(\"Splitting training, validation and testing {}...\".format(amino_acid))\n data_train, data_val, class_labels_train, class_labels_val = train_test_split(training_table.drop(['class'], axis=1), training_table['class'], test_size = 0.40, random_state=42, shuffle=True)\n data_val, data_test, class_labels_val, class_labels_test = train_test_split(data_val, class_labels_val, test_size = 0.25, random_state=42)\n print(\"Generating model {}...\".format(amino_acid))\n \n neurons = len(list(training_table.drop(['class'], axis=1)))\n model = Sequential()\n #model.add(Embedding(len(data_train), neurons, input_length=neurons))\n model.add(Dense(int(neurons), input_dim=neurons, activation='relu', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.001))) # input_dim=neurons\n model.add(Dense(int(neurons/3), activation='relu', kernel_initializer='he_normal', kernel_regularizer=regularizers.l2(0.001)))\n #model.add(Dense(int(neurons/3), activation='relu', kernel_initializer='he_normal'))\n #model.add(Bidirectional(LSTM(int(neurons/3), return_sequences=True)))\n #model.add(Dense(int(neurons/4.5), activation='relu', kernel_initializer='he_normal'))\n #model.add(Bidirectional(LSTM(int(neurons/6), return_sequences=True)))\n #model.add(Flatten())\n model.add(Dense(1, activation='sigmoid'))\n\n model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy', metrics_ml.matthews_correlation]) # tf.keras.metrics.TruePositives()\n es = EarlyStopping(monitor='val_matthews_correlation', mode='max', patience=5, verbose=1) # val_matthews_correlation\n b_size = 128\n history1 = model.fit(data_train, class_labels_train, epochs=1000, batch_size=b_size, validation_data=(data_val, class_labels_val), callbacks=[es], verbose=1)\n model.save_weights(\"data/models/proteasome_{}_model.h5\".format(amino_acid))\n plot_history.plot_history(history1)\n display_model_score.display_model_score(model, [data_train, class_labels_train], [data_val, class_labels_val], [data_test, class_labels_test], b_size)\n \n train_score = model.evaluate(data_train, class_labels_train, batch_size=b_size, verbose=1)\n val_score = model.evaluate(data_val, class_labels_val, batch_size=b_size, verbose=1)\n test_score = model.evaluate(data_test, class_labels_test, batch_size=b_size, verbose=1)\n \n resume_prediction.setdefault(amino_acid, {}).setdefault(\"Train\", round(train_score[1], 3))\n resume_prediction.setdefault(amino_acid, {}).setdefault(\"Val\", round(val_score[1], 3))\n resume_prediction.setdefault(amino_acid, {}).setdefault(\"Test\", round(test_score[1], 3))\n \n resume_df = pd.DataFrame.from_dict(resume_prediction, orient='index')\n resume_df.to_csv(\"models_accurary_8_no_embedding.csv\")\n\ndef main(generate_data=False, NN=False):\n iedb_data_file_raw_path = \"../../data/raw/iedb/mhc_ligand_full.csv\"\n uniprot_data_file_raw_path = \"../../data/raw/uniprot/uniprot_sprot.fasta\"\n iedb_data_file_parsed_path = \"../../data/parsed/iedb/ms_allele_peptides\"\n uniprot_data_file_parsed_path = \"../../data/parsed/uniprot/uniprot_sequences\"\n proteasome_ml_path = \"data/NN/proteasome\"\n \n if not any([generate_data, NN]):\n print(\"Please, provide an argument. See python3 PROcleave.py -h for more information\")\n\n if generate_data:\n generating_raw_data(iedb_data_file_raw_path, uniprot_data_file_raw_path, proteasome_ml_path)\n \n if NN:\n create_predictive_movels_NN(proteasome_ml_path)\n\nif __name__ == \"__main__\":\n generate_data, NN = parse_args()\n main(generate_data, NN)\n","sub_path":"immunogenicity/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":8396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"240330197","text":"# Program obliczy sumę i średnią arytmetyczną dowolnej ilości liczb wprowadzonych z klawiatury. \n# Wprowadzenie liczby 0 kończy wprowadzanie liczb.\n\nb = 0\nsuma = 0\nwhile True: \n a = int(input('Podaj liczbę: '))\n if a == 0:\n srednia = suma/b\n break\n b += 1\n suma += a\nprint(f'REZULTAT: Liczb: {b}, Suma={suma}, Średnia={srednia}')\n","sub_path":"02-ControlStructures/41.py","file_name":"41.py","file_ext":"py","file_size_in_byte":367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"228071978","text":"# 06_read_data.py\nimport sqlite3\n\ntry:\n conn = sqlite3.connect('produtos.db')\n cursor = conn.cursor()\n\n # lendo os dados\n cursor.execute(\"\"\"\n SELECT * FROM produtos;\n \"\"\")\n\n for linha in cursor.fetchall():\n print(linha)\n\n conn.close()\n\nexcept:\n print(\"Banco de dados inexistente\")\n","sub_path":"Tkinter/gerenciamento_estoque/visualizar banco.py","file_name":"visualizar banco.py","file_ext":"py","file_size_in_byte":315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"465089088","text":"import Distributed_JobQ.API as API\nimport util\nfrom threading import Lock\n\nlock = Lock()\nf = open(\"test.txt\", \"w\")\n\ndef print_recv(self, arg, parent_node):\n\tif parent_node is None:\n\t\tprint_self(self, arg)\n\t\treturn\n\n\ts = util.decode_strs(arg)\n\ts = s[0]\n\n\tprint_self(self, s)\n\ndef print_send(self, cnode, s):\n\targ = util.encode_strs([s])\n\n\tAPI.API_send(self, 800, arg, cnode)\n\ndef print_self(self, s):\n\tlock.acquire()\n\tf.write(s + \"\\n\")\n\tlock.release()\n\t\n\nAPI.add_to_API(print_recv, print_send, print_recv, 800)","sub_path":"Test_Local/API.py","file_name":"API.py","file_ext":"py","file_size_in_byte":509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"496990735","text":"import requests\nimport pickle\nimport re\nimport os\nsearch_terms = ['\"bero på\"',\n '\"bidra till\"',\n '\"leda till\"',\n '\"på grund av\"',\n '\"till följd av\"',\n '\"är ett resultat av\"',\n \"resultera\",\n \"förorsaka\",\n \"orsaka\",\n \"påverka\",\n \"effekt\",\n \"medföra\",\n \"framkalla\",\n \"vålla\"]\n\nannotated_search_terms = [('\"bero på\"', 0, \"vb\"),\n ('\"bidra till\"', 0, \"vb\"),\n ('\"leda till\"', 0, \"vb\"),\n ('\"på grund av\"', 1, \"nn\"),\n ('\"till följd av\"', 1, \"nn\"),\n ('\"vara ett resultat av\"', 0, \"vb\"),\n (\"resultera\", 0, \"vb\"),\n (\"förorsaka\", 0, \"vb\"),\n (\"orsaka\", 0, \"vb\"),\n (\"påverka\", 0, \"vb\"),\n (\"medföra\",0, \"vb\"),\n (\"framkalla\", 0, \"vb\"),\n (\"vålla\", 0, \"vb\")]\n\n\ndef expand(term_list=annotated_search_terms, dictionary=None):\n if dictionary is None:\n dictionary = {\"vara\": [\"var\", \"är\", \"varit\", \"vore\", \"vara\"]}\n terms = {}\n for term, i, pos in term_list:\n if term not in terms:\n terms[term] = set()\n if pos:\n words = term.strip(\"\\\"\").split()\n if words[i] in dictionary:\n if len(words) > 1:\n for form in dictionary[words[i]]:\n if i + 1 == len(words):\n terms[term].add(f'\"{\" \".join(words[:i] + [form])}\"')\n else:\n terms[term].add(f'\"{\" \".join(words[:i] + [form] + words[min(i+1, len(words)-1):])}\"')\n else:\n terms[term] = terms[term].union(dictionary[words[i]])\n else:\n forms = requests.get(f\"https://skrutten.csc.kth.se/granskaapi/inflect.php?word={words[i]}&tag={pos}\")\n if forms:\n wforms = []\n for form in forms.text.split('
'):\n form = form.strip()\n if f'<{pos}' in form and not form.startswith(\"all forms\"):\n wforms.append(form.split('<')[0].strip())\n # else:\n # print('no:', form)\n #forms = re.sub(\"((<)+[a-z.*]*(>)+|
|\\d+)\", \"\", forms.text.strip())\n #forms = [el.split()[0].strip() for el in forms.split(\"\\n\")[1:]\n #if el.split() and not el.startswith(\"all forms\")]\n dictionary[words[i]] = forms\n for form in set(wforms):\n if len(words) > 1:\n terms[term].add(f'\"{\" \".join(words[:i] + [form] + words[min(i+1, len(words)-1):])}\"')\n else:\n terms[term].add(form)\n else:\n terms[term].add(term)\n return terms\n\ndef create_tagged_term_list(term_dict, term_annotations):\n \"\"\"\n returns a list of all terms in term dict and their pos information\n in the style of the parsed_target field in the index schema\n \"\"\"\n tagged_list = []\n for term, _, pos in term_annotations:\n if '\"' in term:\n tagged_list.extend(term_dict[term])\n else:\n tagged_list.extend([f'{form}//{pos}' for form in term_dict[term]])\n return tagged_list\n\nexpanded_dict = {'\"bero på\"': {'\"berodd på\"', '\"beror på\"', '\"berodds på\"', '\"beros på\"', '\"berodda på\"',\n '\"berott på\"', '\"berotts på\"', '\"beroddes på\"', '\"berodde på\"', '\"bero på\"'},\n '\"bidra till\"': {'\"bidragna till\"', '\"bidragits till\"', '\"bidrog till\"', '\"bidra till\"',\n '\"bidragande till\"', '\"bidragit till\"', '\"bidrogs till\"', '\"bidras till\"',\n '\"bidrar till\"', '\"bidragne till\"'},\n '\"leda till\"': {'\"ledda till\"', '\"led till\"', '\"ledar till\"', '\"leder till\"', '\"lett till\"',\n '\"ledades till\"', '\"ledds till\"', '\"ledes till\"', '\"leda till\"', '\"ledde till\"',\n '\"ledad till\"', '\"ledats till\"', '\"ledd till\"', '\"ledas till\"', '\"ledat till\"',\n '\"ledads till\"', '\"ledande till\"', '\"letts till\"', '\"ledade till\"', '\"leddes till\"'},\n '\"på grund av\"': {'\"på grunders av\"', '\"på grundet av\"', '\"på grunder av\"', '\"på grunds av\"',\n '\"på grundets av\"', '\"på grunden av\"', '\"på grund av\"', '\"på grundens av\"',\n '\"på grunderna av\"', '\"på grundernas av\"'},\n '\"till följd av\"': {'\"till följds av\"', '\"till följden av\"', '\"till följd av\"', '\"till följdens av\"',\n '\"till följder av\"', '\"till följdernas av\"', '\"till följderna av\"', '\"till följders av\"'},\n '\"vara ett resultat av\"': {'\"vara ett resultat av\"', '\"är ett resultat av\"', '\"var ett resultat av\"',\n '\"vore ett resultat av\"', '\"varit ett resultat av\"'},\n 'resultera': {'resulterats', 'resulterat', 'resulterar', 'resulterads', 'resulterade', 'resulterande',\n 'resulterad', 'resulteras', 'resultera', 'resulterades'},\n 'förorsaka': {'förorsakads', 'förorsakas', 'förorsakar', 'förorsakande', 'förorsakades', 'förorsakade',\n 'förorsakats', 'förorsakat', 'förorsakad', 'förorsaka'},\n 'orsaka': {'orsakades', 'orsakad', 'orsaka', 'orsakads', 'orsakande', 'orsakade', 'orsakat', 'orsakats',\n 'orsakar', 'orsakas'},\n 'påverka': {'påverkats', 'påverkads', 'påverkas', 'påverkade', 'påverkar', 'påverkad', 'påverkades',\n 'påverkande', 'påverka', 'påverkat'},\n 'medföra': {'medförts', 'medförd', 'medföra', 'medföras', 'medförda', 'medföres', 'medförds', 'medförde',\n 'medför', 'medfördes', 'medfört'},\n 'framkalla': {'framkallas', 'framkallade', 'framkallads', 'framkallades', 'framkallats', 'framkallat',\n 'framkallad', 'framkalla', 'framkallar', 'framkallande'},\n 'vålla': {'vållats', 'vållad', 'vållat', 'vållades', 'vållas', 'vållande', 'vållar', 'vållade',\n 'vålla', 'vållads'}}\n\n# restricted based on pos (maybe it is wise to keep the old list for now)\nnew_expanded_dict = {'\"bero på\"': {'\"beror på\"', '\"berodde på\"', '\"bero på\"',\n '\"berott på\"', '\"beros på\"', '\"beroddes på\"',\n '\"berotts på\"'},\n '\"bidra till\"': {'\"bidrar till\"', '\"bidras till\"', '\"bidrog till\"',\n '\"bidra till\"', '\"bidragits till\"',\n '\"bidrogs till\"', '\"bidragit till\"'},\n '\"leda till\"': {'\"leder till\"', '\"ledar till\"', '\"ledde till\"',\n '\"ledades till\"', '\"lett till\"', '\"ledes till\"',\n '\"ledas till\"', '\"leda till\"', '\"letts till\"',\n '\"ledade till\"', '\"ledats till\"', '\"ledat till\"',\n '\"leddes till\"', '\"led till\"'},\n '\"på grund av\"': {'\"på grunderna av\"', '\"på grunder av\"',\n '\"på grundernas av\"', '\"på grundets av\"',\n '\"på grund av\"', '\"på grunds av\"', '\"på grunders av\"',\n '\"på grunden av\"', '\"på grundens av\"', '\"på grundet av\"'},\n '\"till följd av\"': {'\"till följd av\"', '\"till följders av\"', '\"till följder av\"',\n '\"till följds av\"', '\"till följden av\"', '\"till följdernas av\"',\n '\"till följdens av\"', '\"till följderna av\"'},\n '\"vara ett resultat av\"': {'\"vore ett resultat av\"', '\"varit ett resultat av\"',\n '\"är ett resultat av\"', '\"vara ett resultat av\"',\n '\"var ett resultat av\"'},\n 'resultera': {'resulterats', 'resultera', 'resulteras',\n 'resulterat', 'resulterade', 'resulterar', 'resulterades'},\n 'förorsaka': {'förorsakade', 'förorsakas', 'förorsakar', 'förorsaka',\n 'förorsakat', 'förorsakades', 'förorsakats'},\n 'orsaka': {'orsakas', 'orsakat', 'orsakar', 'orsakades',\n 'orsakade', 'orsaka', 'orsakats'},\n 'påverka': {'påverkades', 'påverkats', 'påverkas', 'påverka',\n 'påverkat', 'påverkade', 'påverkar'},\n 'medföra': {'medfört', 'medför', 'medföras', 'medförde',\n 'medförts', 'medföres', 'medföra', 'medfördes'},\n 'framkalla': {'framkallas', 'framkalla', 'framkallat',\n 'framkallades', 'framkallar', 'framkallade', 'framkallats'},\n 'vålla': {'vållade', 'vållades', 'vållats', 'vålla', 'vållar', 'vållat', 'vållas'}}\n\n# filter out unlikely phrases\n# removed: '\"på grunderna av\"', '\"på grundernas av\"',\n# '\"på grundets av\"', '\"på grunders av\"', '\"på grundens av\"',\n# '\"på grundet av\"', '\"till följders av\"', '\"till följdens av\"',\n# '\"till följdernas av\"', '\"till följds av\"',\n\n# to add:\n# var resultatet av, var resultaten av\nfiltered_expanded_dict = {'\"bero på\"': {'\"beror på\"', '\"berodde på\"', '\"bero på\"',\n '\"berott på\"', '\"beros på\"', '\"beroddes på\"',\n '\"berotts på\"'},\n '\"bidra till\"': {'\"bidrar till\"', '\"bidras till\"', '\"bidrog till\"',\n '\"bidra till\"', '\"bidragits till\"',\n '\"bidrogs till\"', '\"bidragit till\"'},\n '\"leda till\"': {'\"leder till\"', '\"ledar till\"', '\"ledde till\"',\n '\"ledades till\"', '\"lett till\"', '\"ledes till\"',\n '\"ledas till\"', '\"leda till\"', '\"letts till\"',\n '\"ledade till\"', '\"ledats till\"', '\"ledat till\"',\n '\"leddes till\"', '\"led till\"'},\n '\"på grund av\"': {'\"på grunder av\"', '\"på grund av\"', '\"på grunds av\"',\n '\"på grunden av\"'},\n '\"till följd av\"': {'\"till följd av\"', '\"till följder av\"',\n '\"till följden av\"', '\"till följderna av\"'},\n '\"vara ett resultat av\"': {'\"vore ett resultat av\"', '\"varit ett resultat av\"',\n '\"är ett resultat av\"', '\"vara ett resultat av\"',\n '\"var ett resultat av\"'},\n 'resultera': {'resulterats', 'resultera', 'resulteras',\n 'resulterat', 'resulterade', 'resulterar', 'resulterades'},\n 'förorsaka': {'förorsakade', 'förorsakas', 'förorsakar', 'förorsaka',\n 'förorsakat', 'förorsakades', 'förorsakats'},\n 'orsaka': {'orsakas', 'orsakat', 'orsakar', 'orsakades',\n 'orsakade', 'orsaka', 'orsakats'},\n 'påverka': {'påverkades', 'påverkats', 'påverkas', 'påverka',\n 'påverkat', 'påverkade', 'påverkar'},\n 'medföra': {'medfört', 'medför', 'medföras', 'medförde',\n 'medförts', 'medföres', 'medföra', 'medfördes'},\n 'framkalla': {'framkallas', 'framkalla', 'framkallat',\n 'framkallades', 'framkallar', 'framkallade', 'framkallats'},\n 'vålla': {'vållade', 'vållades', 'vållats', 'vålla', 'vållar', 'vållat', 'vållas'}}\n\n# öka tillta, minska avta växa?, ökning tillväxt höjning, minskning nedgång reducering avtagande\n# wonder if there is a preference for POS\n\nincrease_terms = ['öka', 'tillta', 'växa', 'ökning', 'uppgång', 'tilltagande', 'höjning']\nannotated_increase_terms = [('öka', 0, 'vb'),\n ('tillta', 0, 'vb'),\n ('växa', 0, 'vb'),\n ('ökning', 0, 'nn'),\n ('uppgång', 0, 'nn'),\n ('tilltagande', 0, 'nn'),\n ('höjning', 0, 'nn')]\nincr_dict = {'öka': {'ökades', 'ökats', 'ökar', 'ökade', 'öka', 'ökat', 'ökas'},\n 'tillta': {'tilltogs', 'tilltagit', 'tillta', 'tilltas', 'tilltog', 'tilltagits', 'tilltar'},\n 'växa': {'växa', 'växs', 'växer', 'växte', 'vuxit', 'väx', 'vuxits', 'växtes', 'växas', 'växts', 'växt'},\n 'ökning': {'ökningarna', 'ökningen', 'ökningens', 'ökningars', 'ökning', 'ökningarnas', 'ökningar'},\n 'uppgång': {'uppgångarnas', 'uppgången', 'uppgångarna', 'uppgångens', 'uppgångar', 'uppgångars', 'uppgång'},\n 'tilltagande': {'tilltagande', 'tilltagandet', 'tilltagandes', 'tilltagandets'},\n 'höjning': {'höjningar', 'höjningens', 'höjningars', 'höjningarna', 'höjning', 'höjningen', 'höjningarnas'}}\n\ndecrease_terms = ['minska', 'avta','minskning', 'nedgång', 'avtagande', 'sänkning']\nannotated_decrease_terms = [('minska', 0, 'vb'),\n ('avta', 0, 'vb'),\n ('minskning', 0, 'nn'),\n ('nedgång', 0, 'nn'),\n ('avtagande', 0, 'nn'),\n ('sänkning', 0, 'nn')]\ndecr_dict = {'minska': {'minskade', 'minskar', 'minska', 'minskas', 'minskat', 'minskats', 'minskades'},\n 'avta': {'avtogs', 'avtas', 'avtar', 'avtog', 'avta', 'avtagits', 'avtagit'},\n 'minskning': {'minskningar', 'minskningars', 'minskningen', 'minskning', 'minskningarna', 'minskningens', 'minskningarnas'},\n 'nedgång': {'nedgången', 'nedgångarnas', 'nedgångarna', 'nedgångars', 'nedgångens', 'nedgångar', 'nedgång'},\n 'avtagande': {'avtagande', 'avtagandet', 'avtagandes', 'avtagandets'},\n 'sänkning': {'sänkning', 'sänkningars', 'sänkningarnas', 'sänkningarna', 'sänkningen', 'sänkningar', 'sänkningens'}}\n\n\nkeys_to_pos = {'minska': 'VB',\n 'avta': 'VB',\n 'minskning': 'NN',\n 'nedgång': 'NN',\n 'avtagande': 'NN',\n 'sänkning': 'NN',\n 'öka': 'VB',\n 'tillta': 'VB',\n 'växa': 'VB',\n 'ökning': 'NN',\n 'uppgång': 'NN',\n 'tilltagande': 'NN',\n 'höjning': 'NN'}\n\ntagged_list = ['\"berodde på\"',\n '\"beroddes på\"',\n '\"bero på\"',\n '\"beror på\"',\n '\"beros på\"',\n '\"berott på\"',\n '\"berotts på\"',\n '\"bidrar till\"',\n '\"bidragits till\"',\n '\"bidras till\"',\n '\"bidrogs till\"',\n '\"bidra till\"',\n '\"bidrog till\"',\n '\"bidragit till\"',\n '\"led till\"',\n '\"ledat till\"',\n '\"ledas till\"',\n '\"ledats till\"',\n '\"ledade till\"',\n '\"ledde till\"',\n '\"letts till\"',\n '\"ledes till\"',\n '\"ledar till\"',\n '\"leddes till\"',\n '\"ledades till\"',\n '\"lett till\"',\n '\"leda till\"',\n '\"leder till\"',\n '\"på grund av\"',\n '\"på grunden av\"',\n '\"på grunds av\"',\n '\"på grunder av\"',\n '\"till följd av\"',\n '\"till följderna av\"',\n '\"till följden av\"',\n '\"till följder av\"',\n '\"vore ett resultat av\"',\n '\"var ett resultat av\"',\n '\"varit ett resultat av\"',\n '\"vara ett resultat av\"',\n '\"är ett resultat av\"',\n 'resulterade//vb',\n 'resulteras',\n 'resulterat//vb',\n 'resulterats',\n 'resulterades',\n 'resulterar',\n 'resultera',\n 'förorsakas',\n 'förorsakats',\n 'förorsakat//vb',\n 'förorsaka',\n 'förorsakar',\n 'förorsakade//vb',\n 'förorsakades',\n 'orsakas',\n 'orsakade//vb',\n 'orsakat//vb',\n 'orsakades',\n 'orsaka',\n 'orsakats',\n 'orsakar',\n 'påverka',\n 'påverkat//vb',\n 'påverkats',\n 'påverkades',\n 'påverkade//vb',\n 'påverkar',\n 'påverkas',\n 'medför',\n 'medföra',\n 'medföres',\n 'medfördes',\n 'medföras',\n 'medförde//vb',\n 'medförts',\n 'medfört//vb',\n 'framkallas',\n 'framkallade//vb',\n 'framkalla',\n 'framkallades',\n 'framkallar',\n 'framkallat//vb',\n 'framkallats',\n 'vållar',\n 'vållades',\n 'vållade//vb',\n 'vållas',\n 'vålla',\n 'vållats',\n 'vållat//vb']\n","sub_path":"data_preprocessing/search_terms.py","file_name":"search_terms.py","file_ext":"py","file_size_in_byte":18316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"365314448","text":"from django.core.exceptions import ObjectDoesNotExist\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.views import APIView\nfrom common.generics.generic_post_api_views import GenericPostListCreate, GenericPostRetrieveUpdateDestroy\nfrom rest_framework.generics import ListAPIView\nfrom rest_framework.permissions import IsAuthenticated\nfrom .models import Room, Message, IsCreatorOrMemberReadOnly\nfrom .serializers import RoomSerializer, MessageSerializer\nfrom users.models import CustomUser\nfrom django.utils.decorators import method_decorator\nfrom drf_yasg.utils import swagger_auto_schema\n\n\n# HTTP GET: Returns a list of rooms that the user can see\n# HTTP POST: Creates a room\n@method_decorator(name='get', decorator=swagger_auto_schema(tags=['Messaging']))\n@method_decorator(name='post', decorator=swagger_auto_schema(tags=['Messaging']))\nclass RoomListCreate(GenericPostListCreate):\n queryset = Room.objects.all()\n serializer_class = RoomSerializer\n detail_serializer_class = RoomSerializer\n permission_classes = [\n IsCreatorOrMemberReadOnly,\n ] + GenericPostListCreate.permission_classes\n\n def list(self, request, *args, **kwargs):\n self.queryset = CustomUser.objects.get(id=request.user.id).room_set.all() | Room.objects.filter(privacy_level=0)\n return GenericPostListCreate.list(self, request, *args, **kwargs)\n\n def create(self, request, *args, **kwargs):\n response = GenericPostListCreate.create(self, request, *args, **kwargs)\n\n if response.status_code == status.HTTP_201_CREATED:\n if request.user.id not in response.data['members']:\n Room.objects.get(id=response.data['id']).members.add(request.user.id)\n response.data['members'] = Room.objects.get(id=response.data['id']).members.values_list('id', flat=True)\n\n return response\n\n\n# HTTP GET: Returns a room\n# HTTP PUT: Updates a room\n# HTTP PATCH: Partially updates a room\n# HTTP DELETE: Deletes a room\n@method_decorator(name='get', decorator=swagger_auto_schema(tags=['Messaging']))\n@method_decorator(name='put', decorator=swagger_auto_schema(tags=['Messaging']))\n@method_decorator(name='patch', decorator=swagger_auto_schema(tags=['Messaging']))\n@method_decorator(name='delete', decorator=swagger_auto_schema(tags=['Messaging']))\nclass RoomRetrieveUpdateDestroy(GenericPostRetrieveUpdateDestroy):\n queryset = Room.objects.all()\n serializer_class = RoomSerializer\n detail_serializer_class = RoomSerializer\n permission_classes = [\n IsCreatorOrMemberReadOnly,\n ] + GenericPostRetrieveUpdateDestroy.permission_classes\n\n def update(self, request, *args, **kwargs):\n response = GenericPostRetrieveUpdateDestroy.update(self, request, *args, **kwargs)\n\n if response.status_code == status.HTTP_200_OK:\n if request.user.id not in response.data['members']:\n Room.objects.get(id=response.data['id']).members.add(request.user.id)\n response.data['members'] = Room.objects.get(id=response.data['id']).members.values_list('id', flat=True)\n\n return response\n\n\n# HTTP GET: Returns a list of messages for a room\n@method_decorator(name='get', decorator=swagger_auto_schema(tags=['Messaging']))\nclass MessageList(ListAPIView):\n queryset = Message.objects.all()\n serializer_class = MessageSerializer\n permission_classes = [\n IsAuthenticated,\n ]\n\n def list(self, request, *args, **kwargs):\n try:\n if Room.objects.get(id=kwargs['id']).members.filter(id=request.user.id):\n self.queryset = Message.objects.filter(room=kwargs['id'])\n return ListAPIView.list(self, request, *args, **kwargs)\n\n return Response('Permission denied', status=status.HTTP_403_FORBIDDEN)\n\n except ObjectDoesNotExist:\n return Response({'detail': 'Not found.'}, status=status.HTTP_404_NOT_FOUND)\n\n\n# HTTP GET: Returns the members of a room\n# HTTP POST: Add or remove members from a room\n@method_decorator(name='get', decorator=swagger_auto_schema(tags=['Messaging']))\n@method_decorator(name='post', decorator=swagger_auto_schema(tags=['Messaging']))\nclass RoomMembers(APIView):\n permission_classes = [\n IsAuthenticated,\n ]\n\n def get(self, request, *args, **kwargs):\n try:\n room = Room.objects.get(id=kwargs['id'])\n except ObjectDoesNotExist:\n return Response({'detail': 'Not found.'}, status=status.HTTP_404_NOT_FOUND)\n\n return Response(room.members.values('id'))\n\n def post(self, request, *args, **kwargs):\n try:\n room = Room.objects.get(id=kwargs['id'])\n except ObjectDoesNotExist:\n return Response('Room not found', status=status.HTTP_404_NOT_FOUND)\n\n if request.user.id != room.creator_id:\n return Response('You cannot add or remove members from this room', status=status.HTTP_400_BAD_REQUEST)\n\n try:\n members = request.data['members']\n except KeyError:\n return Response('You did not specify the members to add', status=status.HTTP_400_BAD_REQUEST)\n\n if not isinstance(members, list):\n return Response(\"The members field must be a list of dictionaries with the integer field 'id' and boolean \"\n \"field 'add'\", status=status.HTTP_400_BAD_REQUEST)\n\n if not all(isinstance(member['id'], int) and isinstance(member['add'], bool) for member in members):\n return Response(\"The members field must be a list of dictionaries with the integer field 'id' and boolean \"\n \"field 'add'\", status=status.HTTP_400_BAD_REQUEST)\n\n for member in members:\n if len(CustomUser.objects.filter(id=member['id'])) == 0:\n return Response(f\"Member with the ID {member['id']} does not exist\")\n elif member['id'] == request.user.id:\n return Response('You cannot add/remove yourself from this room', status=status.HTTP_400_BAD_REQUEST)\n\n for member in members:\n if member['add']:\n room.members.add(member['id'])\n elif not member['add']:\n room.members.remove(member['id'])\n\n return Response(room.members.values('id'))\n","sub_path":"api/src/messaging/api_views.py","file_name":"api_views.py","file_ext":"py","file_size_in_byte":6304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"257436621","text":"import os\nimport sys\nfrom threading import Event\nfrom PyQt4.QtGui import *\nfrom PyQt4 import QtCore\nfrom enum import Enum\nfrom LaptopDemo import LaptopSyncDemo\n\nclass Status(Enum):\n locked = 0\n unlocked = 1\n\ndef clamp(val, min_, max_):\n return min(max_, max(val, min_))\n\ndef linear_interp(y0, y1, t):\n t = clamp(t, 0, 1)\n return y0 + (y1 - y0) * t\n\ndef smooth_interp(y0, y1, t):\n t = clamp(t, 0, 1)\n return linear_interp(y0, y1, t**2 * (3 - 2 * t))\n\n\nclass DesktopDemoWindow(QWidget):\n MAX_OPACITY = 1\n\n\n def __init__(self, fps=60, demo=None):\n super(DesktopDemoWindow, self).__init__()\n\n # Screen variables\n screen_resolution = app.desktop().screenGeometry()\n\n def clamp(value, minvalue, maxvalue):\n return max(minvalue, min(value, maxvalue))\n\n def linear_interp(y0, y1, t):\n t = clamp(t, 0, 1)\n return y0 + t * (y1 - y0)\n\n self.sc_width = screen_resolution.width()\n self.sc_height = screen_resolution.height()\n\n # resources\n self.pixmap_unlocked = QPixmap('res/16labDemoUnlocked2.png').scaled(self.sc_width, self.sc_height)\n self.pixmap_locked = QPixmap('res/16labDemoLocked2.png').scaled(self.sc_width, self.sc_height)\n\n # Updating variables\n self.fps = fps\n self._xmap_unlocked = QPixmap('res/16labDemoUnlocked2.png').scaled(self.sc_width, self.sc_height)\n self._update_time = 1000 / fps\n\n self.update_timer = QtCore.QTimer()\n self.update_timer.timeout.connect(self._update)\n self.update_timer.start(self._update_time)\n\n # The higher level control variables\n self.lock_event = Event()\n self.state = Status.locked\n\n # Widget variables\n self.transition_speed = 500\n self.progress_timer = QtCore.QElapsedTimer()\n self.progress_timer.start()\n\n self.true_progress = 0\n self.visible_progress = 0\n\n self.target_progress = self.MAX_OPACITY\n self.started_progess = 0\n\n # Setting up data input settings\n self.demo = demo\n self.demo.set_event_ref(event_ref=self.lock_event)\n\n # Demo update\n self.demo_update_timer = QtCore.QTimer()\n self.demo_update_timer.timeout.connect(self.demo.update)\n self.demo_update_timer.start(5)\n\n # Setting up the window itself\n self.label_unlocked = QLabel(self)\n self.label_unlocked.setPixmap(self.pixmap_unlocked)\n self.label_unlocked.setGraphicsEffect(self.gen_opacity_effect(self.target_progress))\n\n self.setWindowTitle(\"16lab laptop demo\")\n self.label_locked = QLabel(self)\n self.label_locked.setPixmap(self.pixmap_locked)\n self.label_locked.setGraphicsEffect(self.gen_opacity_effect(self.started_progess))\n\n self.painter = QPainter()\n\n self.resize(self.sc_width, self.sc_height)\n\n def keyPressEvent(self, e):\n if e.key() == QtCore.Qt.Key_Escape:\n self.close()\n elif e.key() == QtCore.Qt.Key_T:\n self.lock_event.set()\n elif e.key() == QtCore.Qt.Key_F11:\n if not self.isFullScreen():\n self.showFullScreen()\n\n # Screen variables\n screen_resolution = app.desktop().screenGeometry()\n self.sc_width = screen_resolution.width()\n self.sc_height = screen_resolution.height()\n\n # resources\n self.pixmap_unlocked = QPixmap('res/16labDemoUnlocked2.png').scaled(self.sc_width, self.sc_height)\n self.pixmap_locked = QPixmap('res/16labDemoLocked2.png').scaled(self.sc_width, self.sc_height)\n\n self.label_locked.setPixmap(self.pixmap_locked)\n self.label_unlocked.setPixmap(self.pixmap_unlocked)\n\n self.resize(self.sc_width, self.sc_height)\n else:\n self.showMaximized()\n\n\n def _update(self):\n # Handle lock_event\n if self.lock_event.is_set():\n self.lock_event.clear()\n\n self._set_internal_vars()\n\n self._fade_anim()\n\n def _set_internal_vars(self):\n # Case when we are locked atm\n if self.state == Status.locked:\n # Switch up the internal variables\n self.state = Status.unlocked\n self.target_progress = 0\n\n # Case when we are unlocked atm\n elif self.state == Status.unlocked:\n # Switch up the internal variables\n self.state = Status.locked\n self.target_progress = self.MAX_OPACITY\n\n # Continue from current visible progress\n self.started_progess = self.visible_progress\n self.progress_timer.start()\n\n def _fade_anim(self):\n self.true_progress = float(self.progress_timer.elapsed()) / self.transition_speed\n self.visible_progress = smooth_interp(self.started_progess, self.target_progress, self.true_progress)\n\n self.label_locked.setGraphicsEffect(self.gen_opacity_effect(self.visible_progress))\n\n def gen_opacity_effect(self, amount):\n opacity_fx = QGraphicsOpacityEffect()\n opacity_fx.setOpacity(amount)\n\n return opacity_fx\n\nif __name__ == '__main__':\n # Create window\n app = QApplication(sys.argv)\n\n # Creates bluetooth connection with 2 devices on threads\n # And provides dataframes to save the data & analyze function\n demo = LaptopSyncDemo()\n\n # Window for displaying the results.\n layout = QStackedLayout()\n\n\n","sub_path":"devel/gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":5485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"613657918","text":"\n\nfrom nltk.parse import DependencyGraph\nfrom InformationExtractor import InformationExtractor\n\n\nclass FixedDependencyGraph(DependencyGraph):\n\tdef _parse(self, input):\n\t\tlines = [DependencyGraph._normalize(line) for line in input.split('\\n') if line.strip()]\n\t\ttemp = []\n\t\tfor index, line in enumerate(lines):\n\t\t\tcells = line.split('\\t')\n\t\t\t_, word, _, tag, _, _, head, rel, _, _ = cells\n\t\t\thead = int(head)\n\t\t\tself.nodelist.append({'address': index+1, 'word': word, 'tag': tag, 'head': head, 'rel': rel, 'deps': [d for (d,h) in temp if h == index+1]})\n\t\t\ttry:\n\t\t\t\tself.nodelist[head]['deps'].append(index+1)\n\t\t\texcept IndexError:\n\t\t\t\ttemp.append((index+1, head))\n\n\t\troot_address = self.nodelist[0]['deps'][0]\n\t\tself.root = self.nodelist[root_address]\n\n\ndef dadegan_text(conll_file='resources/train.conll'):\n\ttext = open(conll_file).read()\n\treturn text.replace('‌‌','‌').replace('\\t‌','\\t').replace('‌\\t','\\t').replace('\\t ','\\t').replace(' \\t','\\t').replace('\\r', '').replace('\\u2029', '‌')\n\nextractor = InformationExtractor()\noutput = open('informations.txt', 'w')\nfor sentence in map(FixedDependencyGraph, [item for item in dadegan_text().replace(' ', '_').split('\\n\\n') if item.strip()]):\n\tprint('\\n', '*', *[node['word'] for node in sentence.nodelist if node['word']], file=output)\n\tfor information in extractor.extract(sentence):\n\t\tprint(*information, sep=' - ', file=output)\n\n","sub_path":"dadegan.py","file_name":"dadegan.py","file_ext":"py","file_size_in_byte":1395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"639932604","text":"# TODO:\n# Algo to be implemented:\n# Fastest Path (A* Algorithm)\n# Code the actual thing\n# Everything above\n# Image Recognition exploration\n\n# TODO: Real Exploration & Fast Path algorithm for the real robot\n# 1. real algo class (realAlgo)\n# 2. this class handles the starting of the real algo thread\n# (android press start expl, rpi receive and tell algo to start expl)\n# 3. Another robot class that updates the pos and obs in actual map onto the simulator\n# Steps\n# tcpClient pass msg to and from robot and realAlgo\n# realAlgo tells robotClass to update map. realRobotClass returns the updated map to realAlgo\n# realAlgo sends cmd to tcpClient to move the robot\n\n\nfrom PyQt5.QtCore import pyqtSlot, QThread, QTimer, pyqtSignal\nfrom PyQt5.QtWidgets import QMainWindow, QGraphicsScene, QMessageBox\n\nfrom graphicsMgr import GraphicsMgr\nfrom map import Map\nfrom simExplAlgo import SimExplAlgo\nfrom simFastPathAlgo import SimFastPathAlgo\nfrom simImgRecogAlgo import SimImgRecogAlgo\nfrom ui import mainwindow\nfrom mapDialog import MapDialog\nfrom tcpClient import TcpClient\nfrom actualExplAlgo import ActlExplAlgo\n\n\nclass MDPAlgoApp(QMainWindow, mainwindow.Ui_MainWindow):\n signalSendMsg = pyqtSignal(str)\n\n def __init__(self):\n super(MDPAlgoApp, self).__init__()\n self.setupUi(self)\n # Start and Goal coordinate\n self.__dontTouchMapList = [\n [0, 0], [1, 0], [2, 0],\n [0, 1], [1, 1], [2, 1],\n [0, 2], [1, 2], [2, 2],\n [12, 17], [13, 17], [14, 17],\n [12, 18], [13, 18], [14, 18],\n [12, 19], [13, 19], [14, 19],\n ]\n\n self.__binToHexConverterDict = {\n '0000': '0',\n '0001': '1',\n '0010': '2',\n '0011': '3',\n '0100': '4',\n '0101': '5',\n '0110': '6',\n '0111': '7',\n '1000': '8',\n '1001': '9',\n '1010': 'A',\n '1011': 'B',\n '1100': 'C',\n '1101': 'D',\n '1110': 'E',\n '1111': 'F'\n }\n\n # Disable app from maximising\n self.setMaximumSize(self.width(), self.height())\n\n # Create a QGraphicScene and attach it to gvMap (QGraphicsView)\n # This tells qt how to draw the square tiles and robot\n self.__scene = QGraphicsScene()\n self.gvMap.setScene(self.__scene)\n\n # Initialize the map\n self.__map = Map()\n # Initialise the mapDialog. This allows the user to load map from disk\n self.__mapDialog = MapDialog(self.__map)\n\n # Let the graphicMgr handle designing the scene and robot\n self.__graphicsMgr = GraphicsMgr(self.__scene, self.__map)\n\n # MapDialog settings signal and slot\n self.btnLoadMap.clicked.connect(self.btnLoadMapClicked)\n self.btnResetMap.clicked.connect(self.btnResetMapClicked)\n self.btnSetWaypoint.clicked.connect(self.btnSetWaypointClicked)\n self.__mapDialog.enableWaypointSignal.connect(self.enableWaypoint)\n\n # simExplAlgo\n self.__thread = QThread()\n self.__simExplAlgo = None\n\n # simImgRecogAlgo\n self.__imgThread = QThread()\n self.__simImgRecogAlgo = None\n\n # Initialise the timer for time-limited exploration\n self.__qTimer = QTimer()\n self.__qTimer.setSingleShot(True)\n\n # simFastPathAlgo\n self.__pathThread = QThread()\n self.__simFastPathAlgo = SimFastPathAlgo()\n self.__simFastPathAlgo.moveToThread(self.__pathThread)\n self.__pathThread.started.connect(self.__simFastPathAlgo.run)\n self.__simFastPathAlgo.finished.connect(self.__pathThread.quit)\n self.__simFastPathAlgo.signalSense.connect(self.__graphicsMgr.simRobotSense)\n self.__simFastPathAlgo.signalMoveRobotForward.connect(self.__graphicsMgr.moveSimRobotForward)\n self.__simFastPathAlgo.signalMoveRobotBackward.connect(self.__graphicsMgr.moveSimRobotBackward)\n self.__simFastPathAlgo.signalRotateRobotRight.connect(self.__graphicsMgr.rotateSimRobotRight)\n self.__simFastPathAlgo.signalRotateRobotLeft.connect(self.__graphicsMgr.rotateSimRobotLeft)\n # self.__graphicsMgr.signalFrontLeft.connect(self.__simFastPathAlgo.determineMove)\n self.__simFastPathAlgo.finished.connect(self.__pathThread.quit)\n\n self.btnSimExpl.clicked.connect(self.btnSimExplClicked)\n self.btnSimFastPath.clicked.connect(self.btnSimFastPathClicked)\n self.btnSimImgRecog.clicked.connect(self.btnSimImgRecogClicked)\n\n # Tcp Client Initialisation\n self.__tcpThread = QThread()\n self.__tcpClient = TcpClient()\n self.__tcpClient.moveToThread(self.__tcpThread)\n self.__tcpThread.started.connect(self.__tcpClient.start_client)\n self.__tcpClient.finished.connect(self.__tcpThread.quit)\n self.btnRobotConnection.clicked.connect(self.btnRobotConnectionClicked)\n self.__tcpClient.finished.connect(lambda: self.btnRobotConnection.setText('Connect'))\n self.__tcpClient.finished.connect(self.disableSendMsg)\n self.__tcpClient.connected.connect(self.enableSendMsg)\n self.__tcpClient.interpretCmd.connect(self.__graphicsMgr.interpretCmd)\n\n # Actual Exploration Algo\n self.__explThread = QThread()\n self.__actlExplAlgo = ActlExplAlgo()\n\n # TODO: Remove this after testing\n self.leMsg.setEnabled(False)\n self.btnMsg.setEnabled(False)\n self.btnMsg.clicked.connect(self.btnSendMsgClicked)\n self.signalSendMsg.connect(self.__tcpClient.send_message)\n\n # TODO: Remove this after testing\n @pyqtSlot()\n def btnSendMsgClicked(self):\n if self.leMsg.text() != \"\":\n print(f'Sending msg: {self.leMsg.text()}')\n self.signalSendMsg.emit(self.leMsg.text())\n else:\n print(\"Empty message\")\n\n # TODO: Remove this after testing\n @pyqtSlot()\n def enableSendMsg(self):\n self.leMsg.setEnabled(True)\n self.btnMsg.setEnabled(True)\n\n # TODO: Remove this after testing\n @pyqtSlot()\n def disableSendMsg(self):\n self.leMsg.setEnabled(False)\n self.btnMsg.setEnabled(False)\n\n @pyqtSlot()\n def btnRobotConnectionClicked(self):\n if self.btnRobotConnection.text() == 'Connect':\n self.__tcpThread.start()\n self.btnRobotConnection.setText('Disconnect')\n else:\n self.__tcpClient.stop_client()\n\n @pyqtSlot()\n def btnSimImgRecogClicked(self):\n self.__simImgRecogAlgo = SimImgRecogAlgo()\n self.__simImgRecogAlgo.moveToThread(self.__imgThread)\n # Signal-Slot for thread management\n self.__imgThread.started.connect(self.__simImgRecogAlgo.run)\n self.__simImgRecogAlgo.finished.connect(lambda: print('SimImgRecogAlgo Stopping'))\n self.__simImgRecogAlgo.finished.connect(self.__imgThread.quit)\n self.__imgThread.finished.connect(self.__simImgRecogAlgo.deleteLater)\n # Signal-Slot for sim exploration movement\n self.__simImgRecogAlgo.signalSense.connect(self.__graphicsMgr.simRobotSense)\n self.__simImgRecogAlgo.signalMoveRobotForward.connect(self.__graphicsMgr.moveSimRobotForward)\n self.__simImgRecogAlgo.signalMoveRobotBackward.connect(self.__graphicsMgr.moveSimRobotBackward)\n self.__simImgRecogAlgo.signalRotateRobotRight.connect(self.__graphicsMgr.rotateSimRobotRight)\n self.__simImgRecogAlgo.signalRotateRobotLeft.connect(self.__graphicsMgr.rotateSimRobotLeft)\n self.__graphicsMgr.signalFrontLeft.connect(self.__simImgRecogAlgo.determine_move)\n self.__simImgRecogAlgo.signalTakePic.connect(lambda: print('Take Photo\\n'))\n self.__simImgRecogAlgo.signalTakePic.connect(self.__simImgRecogAlgo.move_robot_after_taking_pic)\n\n if int(self.leSPS.text()) < 0:\n self.__simImgRecogAlgo.set_time(0.05)\n else:\n self.__simImgRecogAlgo.set_time(1 / int(self.leSPS.text()))\n\n self.__imgThread.start()\n\n # The creation of simExplAlgo is shifted here to eliminate threading errors\n @pyqtSlot()\n def btnSimExplClicked(self):\n self.__simExplAlgo = SimExplAlgo()\n self.__simExplAlgo.moveToThread(self.__thread)\n # Signal-Slot for thread management\n self.__thread.started.connect(self.__simExplAlgo.run)\n self.__simExplAlgo.finished.connect(lambda: print('SimExplAlgo Stopping'))\n self.__simExplAlgo.finished.connect(self.__thread.quit)\n self.__simExplAlgo.finished.connect(self.generateMapDescriptor)\n self.__thread.finished.connect(self.__simExplAlgo.deleteLater)\n # Signal-Slot for Exploration Robot Movement\n self.__simExplAlgo.signalSense.connect(self.__graphicsMgr.simRobotSense)\n self.__simExplAlgo.signalMoveRobotForward.connect(self.__graphicsMgr.moveSimRobotForward)\n self.__simExplAlgo.signalMoveRobotBackward.connect(self.__graphicsMgr.moveSimRobotBackward)\n self.__simExplAlgo.signalRotateRobotRight.connect(self.__graphicsMgr.rotateSimRobotRight)\n self.__simExplAlgo.signalRotateRobotLeft.connect(self.__graphicsMgr.rotateSimRobotLeft)\n self.__graphicsMgr.signalFrontLeft.connect(self.__simExplAlgo.determineMove)\n # Signal-Slot for FastPath back to Home\n self.__simExplAlgo.signalAstarCmd.connect(self.__graphicsMgr.interpretAstarCmd)\n self.__graphicsMgr.signalNextAstarCmd.connect(self.__simExplAlgo.send_a_star_move_cmd_no_sense)\n # Signal-Slot for timer timeout\n self.__qTimer.timeout.connect(self.__simExplAlgo.timer_timeout)\n\n if int(self.leSPS.text()) < 0:\n self.__simExplAlgo.set_time(0.05)\n else:\n self.__simExplAlgo.set_time(1 / int(self.leSPS.text()))\n if 0 <= int(self.leCoverageFigure.text()) <= 100:\n self.__simExplAlgo.set_coverage(int(self.leCoverageFigure.text()))\n else:\n self.__simExplAlgo.set_coverage(100)\n if int(self.leTimeLimit.text()) < 0:\n self.__qTimer.setInterval(360 * 1000)\n else:\n self.__qTimer.setInterval(int(self.leTimeLimit.text()) * 1000)\n\n print('Sim Exploration Start')\n self.__qTimer.start()\n self.__thread.start()\n\n @pyqtSlot()\n def btnSimFastPathClicked(self):\n self.__pathThread.start()\n\n\n @pyqtSlot()\n def btnLoadMapClicked(self):\n self.__mapDialog.exec()\n\n\n @pyqtSlot()\n def btnResetMapClicked(self):\n self.__map.resetMap()\n self.leXWaypoint.setText(\"\")\n self.leYWaypoint.setText(\"\")\n\n self.leXWaypoint.setEnabled(False)\n self.leYWaypoint.setEnabled(False)\n self.btnSetWaypoint.setEnabled(False)\n\n self.btnSimExpl.setEnabled(True)\n self.btnSimImgRecog.setEnabled(True)\n self.btnSimFastPath.setEnabled(False)\n self.__graphicsMgr.resetRobot()\n\n @pyqtSlot()\n def enableWaypoint(self):\n self.leXWaypoint.setEnabled(True)\n self.leYWaypoint.setEnabled(True)\n self.btnSetWaypoint.setEnabled(True)\n self.btnSimExpl.setEnabled(False)\n self.btnSimImgRecog.setEnabled(False)\n\n def waypointError(self, errorMsg):\n self.__map.clearWaypoint()\n self.btnSimFastPath.setEnabled(False)\n QMessageBox.critical(self, self.windowTitle(), errorMsg)\n\n @pyqtSlot()\n def btnSetWaypointClicked(self):\n try:\n if self.leXWaypoint.text() == \"\" or self.leYWaypoint.text() == \"\":\n QMessageBox.critical(self, self.windowTitle(), \"Invalid Waypoint\")\n else:\n coordinate = (int(self.leXWaypoint.text()) - 1, int(self.leYWaypoint.text()) - 1)\n\n if coordinate[0] < 0 or coordinate[1] < 0 or coordinate[0] > 14 or coordinate[1] > 19:\n self.waypointError(\"Waypoint out of bound\")\n elif coordinate in self.__dontTouchMapList:\n self.waypointError(\"Unable to set waypoint on START/GOAL\")\n elif self.__map.obstacleMap[coordinate[1]][coordinate[0]] == 1:\n self.waypointError(\"Unable to set waypoint on obstacle\")\n else:\n self.__map.clearWaypoint()\n self.__map.waypoint = coordinate\n self.__shortestPath = self.__simFastPathAlgo.gen_full_path(self.__map.obstacleMap, self.__map.waypoint)\n print(self.__shortestPath)\n self.btnSimFastPath.setEnabled(True)\n except Exception as err:\n print(f\"[Error] mdpAlgoApp::btnSetWaypointClicked! Error msg: {err}\")\n\n def mapToHex(self, pStr):\n pHex = ''\n tempPstr = ''\n for index in range(0, len(pStr)):\n tempPstr += pStr[index]\n if index % 4 == 3:\n pHex += self.__binToHexConverterDict[tempPstr]\n tempPstr = ''\n return pHex\n\n\n @pyqtSlot()\n def generateMapDescriptor(self):\n p1 = '11'\n p2 = ''\n for row in range(0, len(self.__map.exploredMap)):\n for col in range(0, len(self.__map.exploredMap[row])):\n if self.__map.exploredMap[row][col] == 0:\n p1 += '0'\n else:\n # if the map is explored, add the cell property into p2\n p1 += '1'\n p2 += str(self.__map.obstacleMap[row][col])\n p1 += '11'\n extra = len(p2) % 8\n padding = 0\n if extra != 0:\n padding = 8 - extra\n for i in range(0, padding):\n p2 += '0'\n p1 = self.mapToHex(p1)\n p2 = self.mapToHex(p2)\n print('\\nMap Descriptor')\n print(f'p1: {p1}')\n print(f'p2: {p2}')\n\n def startExpl(self):\n self.__actlExplAlgo = ActlExplAlgo()\n self.__actlExplAlgo.moveToThread(self.__explThread)\n # Signal-Slot for thread management\n self.__explThread.started.connect(self.__actlExplAlgo.run)\n","sub_path":"src/mdpAlgoApp.py","file_name":"mdpAlgoApp.py","file_ext":"py","file_size_in_byte":13977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"301705670","text":"from neopixel import NeoPixel\nfrom machine import Pin, Timer\nimport time\n\nclass Neo:\n\t\"\"\"Wrapper for the NeoPixel class, creating a nicer interface for users working with NeoPixels\"\"\"\n\n\tdef __init__(self,pin_nr):\n\t\t\"\"\"The constructor accepts a pin number to a pin that is connected to a NeoPixel\"\"\"\n\t\tself.neo = NeoPixel(Pin(13),1)\n\t\tself._siren_timer=Timer(0)\n\t\tself._siren_state=True\n\n\tdef set_color(self,r,g,b):\n\t\t\"\"\"Set the NeoPixels color to the given RGB value\"\"\"\n\t\tself.neo[0] = (r,g,b)\n\t\tself.neo.write()\n\n\tdef purple(self):\n\t\tself.set_color(128,0,128)\n\n\n\tdef red(self):\n\t\tself.set_color(255,0,0)\n\n\tdef green(self):\n\t\tself.set_color(0,255,0)\n\n\tdef blue(self):\n\t\tself.set_color(0,0,255)\n\n\tdef siren(self, sleep_time=450):\n\t\tdef tick(input):\n\t\t\tself._siren_state=not self._siren_state\n\t\t\tif self._siren_state:\n\t\t\t\tself.blue()\n\t\t\telse:\n\t\t\t\tself.red()\n\t\tif sleep_time > 0:\n\t\t\tself._siren_timer.init(period=sleep_time, mode=Timer.PERIODIC, callback=tick)\n\t\telse:\n\t\t\tself._siren_timer.init()\n\n\tdef pulse(self,delay=100):\n\t\t\"\"\"Makes the NeoPixel pulse in different colors\"\"\"\n\t\tfor index in range(0,2):\n\t\t\ti = 0\n\t\t\twhile i<255:\n\t\t\t\tcurr = list(self.neo[0])\n\t\t\t\tcurr[index] = i\n\t\t\t\tself.neo[0] = curr\n\t\t\t\ttime.sleep_ms(delay)\n\t\t\t\tself.neo.write()\n\t\t\t\ti+=1\n\t\t\twhile i>255:\n\t\t\t\tcurr = list(self.neo[0])\n\t\t\t\tcurr[index] = i\n\t\t\t\tself.neo[0] = curr\n\t\t\t\ttime.sleep_ms(delay)\n\t\t\t\tself.neo.write()\n\t\t\t\ti-=1\n","sub_path":"ports/esp32/modules/neo.py","file_name":"neo.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"402069343","text":"# Copyright 2017 AT&T Intellectual Property. All other rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport boto3\nimport logging\nimport drydock_provisioner.error as errors\nfrom drydock_provisioner.orchestrator.actions.orchestrator import BaseAction\n\n\n\nclass BaseMaasAction(BaseAction):\n def __init__(self, *args, maas_client=None):\n super().__init__(*args)\n\n\n\nclass DeployNode(BaseMaasAction):\n \"\"\"Action to write persistent OS to node.\"\"\"\n\n def start(self):\n str_msg = ''\n ec2 = boto3.resource('ec2')\n\n try:\n site_design = self._load_site_design()\n\n except Exception as ex:\n self.task.add_status_msg(\n msg=\"Error loading site design.{0}\".format(str(ex)),\n error=True,\n ctx='NA',\n ctx_type='NA')\n self.task.set_status(hd_fields.TaskStatus.Complete)\n self.task.failure()\n self.task.save()\n return\n\n try:\n aws_conf = site_design.aws_node\n image_id = aws_conf.image_id\n instance_type = aws_conf.instance_type\n subnet_id = aws_conf.subnet_id\n sec_grp = aws_conf.sec_grp\n \n instances = ec2.create_instances(\n ImageId=image_id, InstanceType=instance_type, MaxCount=1, MinCount=1,\n NetworkInterfaces=[\n {'SubnetId': subnet_id, 'DeviceIndex': 0, 'AssociatePublicIpAddress': True,\n 'Groups': [sec_grp]}])\n instances[0].wait_until_running()\n\n except Exception as ex:\n self.task.add_status_msg(\n msg=\"Error creating aws instance.{0}\".format(str(ex)),\n error=True,\n ctx='NA',\n ctx_type='NA')\n\n \n return\n","sub_path":"python/drydock_provisioner/drivers/node/awsdriver/actions/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"540798720","text":"from rest_framework import serializers\nfrom rest_framework import fields\n\nfrom rest_framework.utils.field_mapping import ClassLookupDict\nfrom rest_framework.utils.serializer_helpers import ReturnDict\n\nimport types\nfrom datetime import datetime\nfrom django.utils.dateparse import parse_datetime\n\nfrom rest_framework import renderers\n\n\n\n# Google types: 'string', 'number', 'boolean', 'date', 'datetime', and 'timeofday'.\nserializer_field_mapping = {\n # fields.AutoField: 'number',\n # fields.BigIntegerField: 'number',\n fields.BooleanField: 'boolean',\n fields.CharField: 'string',\n # fields.CommaSeparatedIntegerField: 'string',\n fields.DateField: 'date',\n fields.DateTimeField: 'date',\n fields.DecimalField: 'number',\n # fields.EmailField: 'string',\n # fields.Field: ModelField,\n # fields.FileField: FileField,\n fields.FloatField: 'number',\n # fields.ImageField: ImageField,\n fields.IntegerField: 'number',\n # fields.NullBooleanField: 'bool',\n # fields.PositiveIntegerField: 'number',\n # fields.PositiveSmallIntegerField: 'number',\n # fields.SlugField: 'string',\n # fields.SmallIntegerField: 'number',\n # fields.TextField: 'string',\n # fields.TimeField: TimeField,\n # fields.URLField: URLField,\n # fields.URLField: URLField,\n fields.SerializerMethodField: 'string',\n serializers.HyperlinkedIdentityField: 'string',\n}\n\ndef get_field_type ( model_field): \n field_mapping = ClassLookupDict(serializer_field_mapping)\n return field_mapping[model_field]\n\n#\n# Version based on serializers\n#\n\nclass TableRenderer(renderers.JSONRenderer):\n format = 'table'\n\n def render(self, data, media_type=None, renderer_context=None): \n if isinstance (data, (list, tuple) ):\n # { \"title\": \"Engine\" },\n columns = [{'title': name} for name in data.serializer.child.fields]\n data = [row.values() for row in data ]\n data = {'data': data, 'columns': columns }\n\n return super(TableRenderer, self).render (data, media_type, renderer_context)\n\nclass GoogleRenderer(renderers.JSONRenderer):\n format = 'google'\n\n def render(self, data, media_type=None, renderer_context=None): \n if isinstance (data, (list, tuple) ):\n # data serializer is a ListSerializer need an access to child\n # serializer keeps the info about the fields\n serializer = data.serializer.child\n serializer._context.pop ('request', None)\n serializer._context.pop ('view', None)\n s = GoogleSerializer(data, many=True, child=serializer)\n data = s.data\n\n return super(GoogleRenderer, self).render (data, media_type, renderer_context)\n\n\nclass GListSerializer (serializers.ListSerializer):\n\n def to_representation (self, data):\n # print (self.child.child) \n columns = [{ \n 'label': field_name, \n 'type': get_field_type(model_field),\n 'id': 'dupa',\n } for field_name, model_field in self.child.fields.items()]\n\n data = super(GListSerializer, self).to_representation(data)\n\n if self.child._context:\n return {'rows': data, 'cols': columns, 'p': self.child._context}\n else:\n return {'rows': data, 'cols': columns}\n\n @property\n def data(self):\n ret = super(serializers.ListSerializer, self).data\n return ReturnDict(ret, serializer=self)\n\n \nclass GoogleSerializer (serializers.ListSerializer):\n\n class Meta:\n list_serializer_class = GListSerializer\n\n def __init__ (self, *args, **kwargs):\n child = kwargs.pop('child')\n self.fields = child.fields\n kwargs['context'] = child.context\n kwargs['child'] = GoogleDetailSerializer()\n super (GoogleSerializer, self).__init__ (*args, **kwargs)\n\n def to_representation (self, data):\n # javascript date objects is declared as `new Date(2015,05,23)` but this can't be passed by json. \n # Google added a special syntax for this json passing as `Date(2015,04,23)` \n # See the month difference as javasript months are zero based.\n\n # get the names of the fields that are dates\n dates = [ name for name, model in self.fields.items() if isinstance(model, fields.DateTimeField)] \n # convert dates to google `Date(2015,5,15)`\n for f in dates:\n obj = parse_datetime(data[f])\n data[f] = \"Date({},{},{})\".format(obj.year, obj.month-1, obj.day)\n\n data = data.values() if isinstance(data, dict) else data\n data = super(GoogleSerializer, self).to_representation(data)\n return {'c': data}\n\n @property\n def data(self):\n ret = super(serializers.ListSerializer, self).data\n return ReturnDict(ret, serializer=self)\n\n# class GListSerializer (GoogleSerializer):\n# prefix = 'rows'\n\n# GoogleSerializer.Meta.list_serializer_class = GListSerializer \n\nclass GoogleDetailSerializer (serializers.Serializer):\n class Meta:\n list_serializer_class = GoogleSerializer\n\n def to_representation (self, value):\n return {'v': value}\n\n\n\nclass GoogleDateField (serializers.DateTimeField):\n def to_representation (self, obj):\n return \"Date({},{},{})\".format(obj.year, obj.month-1, obj.day)\n\nclass GoogleRenderer2(renderers.JSONRenderer):\n format = 'google'\n\n def convert (self, value):\n if type(value) == datetime:\n return \"Date({},{},{})\".format(value.year, value.month-1, value.day)\n return value\n\n def render_item (self, obj):\n obj = [{'v':self.convert(obj[key])} for key in obj]\n return {'c': obj}\n\n\n def cols (self, columns):\n return [ {'label': fname, 'id': 'dupa', 'type': ftype}\n for fname, ftype in columns\n ]\n\n def build_columns (self, serializer):\n # {id: 'A', label: 'NEW A', type: 'string'}\n columns = []\n for field_name, model_field in serializer.fields.items():\n if not isinstance (model_field, serializers.BaseSerializer):\n model_field = get_field_type(model_field)\n else:\n model_field = 'string'\n columns.append ((field_name, model_field))\n \n return columns\n\n def get_date_columns (self, serializer):\n return [f.field_name for f in serializer.fields if isinstance(f, fields.DateTimeField)]\n\n def render(self, data, media_type=None, renderer_context=None): \n serializer = data.serializer\n print (serializer.__class__.__bases__)\n serializer.child.to_representation = types.MethodType(GoogleDateField.to_representation, serializer.child) \n\n # serializer.to_representation = types.MethodType(GoogleDateField.to_representation, serializer) \n serializer.__class__.__bases__ = serializer.__class__.__bases__ + (GoogleDataListSerializer,)\n # cls = serializer.__class__\n # serializer.__class__ = cls.__class__(cls.__name__ + \"WithExtraBase\", (cls, GoogleDataListSerializer), {})\n print (type(serializer.instance))\n print (type(serializer))\n data = serializer.to_representation(serializer.instance)\n return super(GoogleRenderer, self).render (serializer.data, media_type, renderer_context)\n \n def render1(self, data, media_type=None, renderer_context=None): \n child = data.serializer.child\n rows = [self.render_item(d) for d in data.serializer.validated_data] \n\n columns = [\n { \n 'id': field_name, \n 'type': get_field_type(model_field),\n 'label': 'dupa',\n }\n for field_name, model_field in child.fields.items()\n ]\n\n data = {'cols': columns, 'rows': rows}\n return super(GoogleRenderer, self).render (data, media_type, renderer_context)\n\n \n # try:\n # return field_mapping[model_field]\n # except KeyError as e:\n # if isinstance (model_field, fields.SerializerMethodField):\n # gtype = model_field.context.get('google_type', 'string')\n # return gtype\n # raise e\n\n\nclass GoogleRenderer1(renderers.JSONRenderer):\n format = 'google'\n\n\n def convert_dates (self, n, columns, field):\n for c in columns:\n f = field.to_internal_value (n[c])\n n[c] = field.to_representation (f)\n return n\n\n\n def cols (self, columns):\n return [ {'label': fname, 'id': 'dupa', 'type': ftype}\n for fname, ftype in columns\n ]\n\n def build_columns (self, serializer):\n # {id: 'A', label: 'NEW A', type: 'string'}\n columns = []\n for field_name, model_field in serializer.fields.items():\n if not isinstance (model_field, serializers.BaseSerializer):\n model_field = get_field_type(model_field)\n else:\n model_field = 'string'\n columns.append ((field_name, model_field))\n \n return columns\n\n def render(self, data, media_type=None, renderer_context=None):\n if isinstance(data, list):\n # serializer keeps the date about the fields\n serializer = data.serializer.child\n all_columns = self.build_columns (serializer)\n\n columns = [c[0] for c in all_columns if c[1] == 'date']\n\n field = GoogleDateField()\n\n result = []\n for i in data:\n i = self.convert_dates(i, columns, field)\n i = [{'v':i[key]} for key in i] \n result.append ({'c': i})\n\n\n data = { 'cols': self.cols(all_columns), 'rows': result } \n\n data = super (GoogleRenderer, self).render (data, media_type, renderer_context)\n return data\n\n\nclass GoogleDataListSerializer (serializers.ListSerializer):\n\n def to_representation(self, data):\n columns = [\n { \n 'id': field_name, \n 'type': get_field_type(model_field),\n 'label': 'dupa',\n }\n for field_name, model_field in self.child.get_fields().items()\n ]\n \n return {\n \"cols\": columns,\n \"rows\": super (GoogleDataListSerializer, self).to_representation(data)\n }\n\n @property\n def data(self):\n # skip the `data` implementation of ListSerializer\n ret = super(serializers.ListSerializer, self).data\n return ReturnDict(ret, serializer=self)\n\n\nclass GoogleDataSerializer (object):\n \"\"\"\n {\n cols: [\n {id: 'A', label: 'NEW A', type: 'string'},\n {id: 'B', label: 'B-label', type: 'number'},\n {id: 'C', label: 'C-label', type: 'date'}\n ],\n rows: [\n {c:[{v: 'a'},\n {v: 1.0, f: 'One'},\n {v: 'Date(2008, 1, 28, 0, 31, 26)', f: '2/28/08 12:31 AM'}\n ]},\n {c:[{v: 'b'},\n {v: 2.0, f: 'Two'},\n {v: 'Date(2008, 2, 30, 0, 31, 26)', f: '3/30/08 12:31 AM'}\n ]},\n {c:[{v: 'c'},\n {v: 3.0, f: 'Three'},\n {v: 'Date(2008, 3, 30, 0, 31, 26)', f: '4/30/08 12:31 AM'}\n ]}\n ]\n }\n \"\"\"\n def to_representation(self, obj):\n # javascript date objects is declared as `new Date(2015,05,23)`\n # but this can't be passed by json. Google added a special\n # syntax for this json passing as `Date(2015,04,23)` \n # See the month difference as javasript months are zero based.\n for field in self.fields.values():\n if isinstance (field, fields.DateTimeField):\n field.to_representation = types.MethodType(GoogleDateField.to_representation, field)\n # field.format = \"Date(%Y,%m,%d)\"\n\n representation = super (GoogleDataSerializer, self).to_representation(obj)\n representation = [{'v':representation[key]} for key in representation]\n return {'c': representation}\n\n \n @classmethod\n def many_init(cls, *args, **kwargs):\n # override the list_serializer_class \n # inner class Meta is not inherited\n meta = getattr(cls, 'Meta', None)\n setattr(meta, 'list_serializer_class', GoogleDataListSerializer)\n return super(GoogleDataSerializer, cls).many_init (*args, **kwargs)\n\n\n\n\n","sub_path":"plans/core/google.py","file_name":"google.py","file_ext":"py","file_size_in_byte":12388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"286859348","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, ForeignKey\n\nBase = declarative_base()\n\nclass Vk(Base):\n __tablename__ = 'vk'\n\n # instance id\n id = Column(Integer, primary_key=True)\n # access token\n access_token = Column(String(100))\n # last vk message id\n last_message_id = Column(String(100))\n","sub_path":"VKontakte/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"32929393","text":"import json\nimport re\nimport random\n\n\nclass Money:\n def __init__(self, number, currency):\n \"\"\" __init__ конструктор класса\"\"\"\n # Инициализирует экземпляр класса\n self.number = float(number)\n self.currency = str(currency)\n\n def __str__(self):\n \"\"\"__str__ строковое представление класса\"\"\"\n return '{} {}'.format(self.number, self.currency)\n\n def __eq__(self, other):\n \"\"\"__eq__ сравнение экземпляров класса\"\"\"\n if self.number == other.number and self.currency == other.currency:\n return True\n return False\n\n def __truth__(self):\n \"\"\"Преобразование в тип bool (есть ли деньги)\"\"\"\n if self.number > 0:\n return True\n return False\n\n def __add__(self, other):\n \"\"\"__add__ сложение экземпляров класса (денег в данном случае)\"\"\"\n if self.currency == other.currency:\n self.number += other.number\n return self\n else:\n print('Разные валюты')\n\n def __sub__(self, other):\n \"\"\"__sub__ вычетание экземпляров класса (денег в данном случае)\"\"\"\n if self.currency == other.currency:\n self.number -= other.number\n return self\n else:\n print('Разные валюты')\n\n @staticmethod\n def from_string(str_vector):\n \"\"\"Статический метод - это метод принадлежащий классу,\n а не его экземпляру\"\"\"\n # т.е. метод можно вызвать не создавая обьект класса \n # Например (math.cos(..), re.search(..))\n # Создать обьект класса из строки \n # \\d - число \\w - буква \\s -пробельный символ\n # search - ищет первое соответствие регулярному выражению в строке\n match = re.search('(\\\\d+\\\\s\\\\w+)|(\\\\d+([.,])\\\\d+\\\\s\\\\w+)', str_vector)\n if match:\n # group - возвращает найденное значение\n value = match.group()\n value.replace('.', ',')\n values = value.split(' ')\n return Money(values[0], values[1])\n\n def save(self, filename):\n filename = filename + \".json\"\n # __dict__ возвращает обьект класса ввиде словаря\n dic = self.__dict__\n # open(filename, 'w') - открываем файл filename для чтения и записи (w)\n # encoding='utf-8' - кодировка файла UTF-8\n # если файл не существует, то будет создан новый \n with open(filename, 'w', encoding='utf-8') as f:\n # json.dump - сериализует obj как форматированный JSON\n # ensure_ascii = False, строки запишутся как есть\n json.dump(dic, f, ensure_ascii=False)\n print('Ваши деньги сохранены')\n\n @staticmethod\n def load(filename):\n filename = filename + \".json\"\n # 'r' - открываем файл для чтения\n with open(filename, 'r', encoding='utf-8') as f:\n dic = json.load(f)\n money = Money(0, '')\n money.parser(dic)\n return money\n\n def parser(self, dic: dict):\n \"\"\"Присвоить значения словаря элементам класса\"\"\"\n # метод создан отдельно для упрощения работы в следующем задании 10_5.2\n self.number = dic['number']\n self.currency = dic['currency']\n\n def currency_exchange(self, rute, currency):\n \"\"\"Поменять валюту\"\"\"\n # rute - курс валюты\n # currency - название валюты\n s = 'Операция \"Обмен\" потверждена! {} >> '.format(self)\n # round - округление чисел\n self.number = round(self.number / rute, 2)\n self.currency = currency\n s += '{}'.format(self)\n print(s)\n\n def sacrifice(self, number):\n \"\"\"Пожертвовать деньги\"\"\"\n # number - сумма пожертвования\n if self.number > number:\n self.number -= number\n print('Операция \"Пожертвование\" потверждена!' +\n ' Пожертвовано {}\\n'.format(number) +\n 'Остаток: {}'.format(self))\n else:\n print('У вас не достаточно средств')\n\n def bet(self, number):\n \"\"\"Сделать ставку\"\"\"\n if self.number > number:\n self.number -= number\n # choice - случайный элемент непустой последовательности\n if random.choice([False, False, True, False, False, False]):\n v = number * 6\n self.number += v\n print('Поздравляю! Вы выйграли {}'.format(v))\n else:\n print('Вы проиграли!')\n print('Остаток: {}'.format(self))\n else:\n print('У вас не достаточно средств')\n","sub_path":"Number_5/10_5/1/money.py","file_name":"money.py","file_ext":"py","file_size_in_byte":5571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"527645355","text":"# Double line plot with data input by user\ndef doubleLinePlot(filename1, filename2, xlabel, ylabel, legend1, legend2):\n import subprocess\n\n gnuplotCmdsTmp = '\\n'.join([\n \"set terminal windows\",\n #\"set terminal aqua\", # For Mac OS, uncomment this line\n #\"set terminal dumb\", # For Linux or Command Prompt, uncomment this line\n \"set xlabel '%s'\",\n \"set ylabel '%s'\",\n \"plot '%s' with linespoints title '%s', '%s' with linespoints title '%s'\",\n \"quit\"\n ])\n gnuplotCmds = gnuplotCmdsTmp % (xlabel, ylabel, filename1, legend1, filename2, legend2)\n cmdList = ['gnuplot', '-p']\n\n subprocess.run(cmdList, input=gnuplotCmds, encoding='ascii')\n\nif __name__ == '__main__':\n filename1 = input(\"Enter 1st file name: \")\n filename2 = input(\"Enter 2nd file name: \")\n xlabel = input(\"Enter X-axis label: \")\n ylabel = input(\"Enter Y-axis label: \")\n legend1 = input(\"Enter 1st legend: \")\n legend2 = input(\"Enter 2nd legend: \")\n\n doubleLinePlot(filename1, filename2, xlabel, ylabel, legend1, legend2)","sub_path":"doublePlot_UserInput.py","file_name":"doublePlot_UserInput.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"626991063","text":"# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\"\"\"Ensure that the name of the spec file matches the name of a blueprint.\n\"\"\"\n\nimport os\n\nimport requests\nfrom launchpadlib.launchpad import Launchpad\n\n\nclass BlueprintChecker(object):\n\n def __init__(self, app):\n self.app = app\n cachedir = os.path.expanduser('~/.launchpadlib')\n self.lp = Launchpad.login_anonymously('check-spec-filename',\n 'production',\n cachedir)\n # Use the launchpad API to figure out which projects we need\n # to check instead of hard-coding a list.\n self.oslo = self.lp.project_groups['oslo']\n self.projects = self.oslo.projects\n self.project_names = [p.name for p in self.projects]\n self._good_bps = set()\n self._prefix = None\n\n @property\n def desired_prefix(self):\n \"\"\"We only care about blueprints in the current release, if the option\n is set.\n\n \"\"\"\n if self._prefix is None:\n release = self.app.config.check_blueprints_release\n if release:\n self._prefix = 'specs/%s/' % release\n else:\n self._prefix = 'specs/'\n return self._prefix\n\n def doctree_resolved(self, app, doctree, docname):\n \"\"\"Hook registered as event handler.\"\"\"\n if not docname.startswith(self.desired_prefix):\n return\n bp_name = docname.split('/')[-1]\n if bp_name == 'index':\n return\n self.check(bp_name)\n\n BP_URL_TEMPLATE = 'https://api.launchpad.net/devel/%s/+spec/%s'\n\n def blueprint_exists(self, project_name, bp_name):\n # We can't use the getSpecification() API because we're logged\n # in anonymously, so we have to build the URL to the\n # blueprint's API endpoint ourselves and then poke it to see\n # if it exists.\n url = self.BP_URL_TEMPLATE % (project_name, bp_name)\n response = requests.get(url)\n return response.status_code == 200\n\n def check(self, bp_name):\n \"\"\"Given one blueprint name, check to see if it is valid.\"\"\"\n if bp_name in self._good_bps:\n return True\n self.app.info('') # emit newline\n for project_name in self.project_names:\n self.app.info('Checking for %s in %s' % (bp_name, project_name))\n if self.blueprint_exists(project_name, bp_name):\n self.app.info('Found %s in %s' % (bp_name, project_name))\n self._good_bps.add(bp_name)\n break\n else:\n self.app.warn(\n 'Could not find a blueprint called %r in the oslo project group'\n % bp_name,\n location=(bp_name, 0),\n )\n raise ValueError('Document %s does not match any blueprint name'\n % bp_name)\n\n\ndef setup(app):\n app.info('Initializing %s' % __name__)\n checker = BlueprintChecker(app)\n app.connect('doctree-resolved', checker.doctree_resolved)\n app.add_config_value('check_blueprints_release', '', 'env')\n","sub_path":"doc/source/_exts/check_blueprints.py","file_name":"check_blueprints.py","file_ext":"py","file_size_in_byte":3683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"643361632","text":"# https://leetcode.com/problems/leftmost-column-with-at-least-a-one/solution/\n# A row-sorted binary matrix as input is given, we can also use binary search\n# binary search in matrix\n\ndef leftMostColumnWithOne(binaryMatrix):\n rows = len(binaryMatrix)\n cols = len(binaryMatrix[0])\n l=cols\n for row in range(rows):\n for col in range(cols):\n if binaryMatrix[row][col]==1:\n l=min(l,col)\n return -1 if l==cols else l\n\n\ndef leftMostColumnWithOneEfficient(binaryMatrix):\n rows = len(binaryMatrix)\n cols = len(binaryMatrix[0])\n l=cols\n for row in range(rows):\n lo=0\n hi=cols-1\n while lo 12:\n hr = str(hour-12)\n else:\n hr = str(hour)\n minute = date.minute\n am_pm = date.strftime(\"%p\")\n minute_string = ''\n if minute == 0:\n minute_string = \"00\"\n elif minute>0 and minute<10:\n minute_string = \"0\"+str(minute)\n else:\n minute_string= str(minute)\n return day_of_week + \", \" + month + \" \" + day + \" at \" + hr + \":\"+minute_string+ \" \"+am_pm\n\ndef formatTime(date):\n hour = date.hour\n hr=''\n if hour > 12:\n hr = str(hour-12)\n else:\n hr = str(hour)\n minute = date.minute\n am_pm = date.strftime(\"%p\")\n minute_string = ''\n if minute == 0:\n minute_string = \"00\"\n elif minute>0 and minute<10:\n minute_string = \"0\"+str(minute)\n else:\n minute_string= str(minute)\n return hr + \":\"+minute_string+\" \"+am_pm\n\ndef transformLength(original):\n if original < 60:\n return str(original)+\" min.\"\n elif original==60:\n return \"1 hr.\"\n else:\n hour = int(math.floor(original/60))\n rem = original%60\n if rem == 0:\n return str(hour) + \" hr.\"\n else:\n return str(hour)+\" hr. \"+str(rem)+ \" min.\"\n\ndef formatDateOnly(date):\n day_of_week = date.strftime(\"%A\")\n month = date.strftime(\"%B\")\n day = date.strftime(\"%d\")\n yr = date.strftime(\"%Y\")\n return day_of_week + \", \" + month + \" \" + day\n\ndef transformMilitary(original):\n hour = int(math.floor(original/60))\n rem = original%60\n if hour > 12:\n if rem == 0:\n return str(hour-12)+\":00 PM\"\n else:\n return str(hour-12)+\":\"+str(rem)+\"PM\"\n elif hour < 12 and hour >= 1:\n if rem == 0:\n return str(hour)+\":00 AM\"\n else:\n return str(hour)+\":\"+str(rem)+\"AM\"\n else:\n if original == 0:\n return \"12:00 AM\"\n else:\n return \"12:\"+str(original)+\" AM\"\n\n###GET THE CALENDAR EVENTS\n@app.route('/api/user/lessons/this-month', methods=['POST','OPTIONS'])\n@auth.crossdomain(origin='*')\n@auth.auth_private\ndef get_lessons_this_month():\n if request.method == 'POST':\n s=Session()\n try:\n current_month = int(request.json['month']) + 1\n current_year = int(request.json['year'])\n date_start = datetime.datetime(current_year,current_month,1)\n next_month = current_month+1\n if current_month == 12:\n next_month=1\n current_year=current_year+1\n date_end = datetime.datetime(current_year,next_month,1)\n today = datetime.datetime.now()\n lessons = s.query(Lesson).filter(Lesson.user_id==int(request.headers['USER-ID'])).filter(Lesson.date >= date_start).filter(Lesson.date < date_end).all()\n return_lessons=[]\n if len(lessons) > 0:\n for i in lessons:\n tod = False\n if today.day == i.date.day and today.month == i.date.month and today.year == i.date.year:\n tod=True\n tutor_name = ''\n if i.accepted:\n tutor_name=s.query(Tutor).filter(Tutor.id==i.tutor_id).first().first_name\n students = []\n kids = s.query(LessonStudent).filter(LessonStudent.lesson_id == i.id).all()\n for j in kids:\n kid = s.query(Kid).filter(Kid.id == j.student_id).first()\n students.append(kid.first_name)\n if not i.recurring or (i.recurring and i.active):\n return_lessons.append({'id':i.id,\n 'length':i.length,\n 'day':i.date.day,\n 'time':formatTime(i.date),\n 'accepted':i.accepted,\n 'active':i.active,\n 'suspended':i.suspended,\n 'today':tod,\n 'students':students,\n 'tutor_name':tutor_name})\n return Response(json.dumps(return_lessons),status=200,mimetype='application/json')\n except:\n return Response(json.dumps({'invalid request':'requires get'}), status=400, mimetype='application/json')\n finally:\n s.close()\n else:\n return Response(json.dumps({'failure':'bad request'}), status=400, mimetype='application/json')\n\n##RECURRING LESSON REMAKE\n@app.route('/api/user/lessons/create-recurring', methods=['POST','OPTIONS'])\n@auth.crossdomain(origin='*')\n@auth.auth_private\ndef request_a_lesson_recurring_user():\n if request.method == 'POST':\n s=Session()\n uid=str(request.headers['USER-ID'])\n days_array = [] ###PUT DAYS IN AN ARRAY\n for day in request.json['days']:\n if day['selected']:\n if day['value'] == 0:\n day['value']=6\n days_array.append(day)\n else:\n day['value'] = int(day['value'])-1\n days_array.append(day)\n sorted_days = sorted(days_array,key=lambda dy: dy['value'])\n print(sorted_days)\n max_position = int(len(days_array)-1)\n yr = int(request.json['yrSelectRec'])\n mon = int(request.json['monthSelectRec'])\n dy = int(request.json['daySelectRec'])\n lesson_date_lo = datetime.datetime(yr,mon,dy)\n lesson_date_hi = datetime.datetime(yr,mon,dy)\n weekday = lesson_date_lo.weekday()\n n = int(map(itemgetter('value'), sorted_days).index(weekday))\n end = lesson_date_hi + datetime.timedelta(days=365)\n frequency = int(request.json['frequency'])\n lesson_recurring=LessonRecurringID(frequency=frequency)\n s.add(lesson_recurring)\n s.commit()\n first=True\n while lesson_date_hi < end:\n dy = days_array[n]\n time_lo = int(dy['time'])\n hr_lo = int(math.floor(time_lo/60))\n minute_lo = int(time_lo%60)\n time_hi = int(day['time_end'])\n hr_hi = int(math.floor(time_hi/60))\n minute_hi = int(time_hi%60)\n new_lo = lesson_date_lo.replace(hour=hr_lo,minute=minute_lo)\n new_hi = lesson_date_hi.replace(hour=hr_hi,minute=minute_hi)\n charge=0\n length = int(dy['length'])\n lid = None\n if not request.json['skype'] and not request.json['rice']:\n lid=request.json['address_id']\n charge = int(length)/30 * charge_per_half_hour\n else:\n charge = int(length)/30 * charge_per_skype\n lesson = Lesson(user_id=uid,\n accepted=False,\n recurring=True,\n active=True,\n length=length,\n date=new_lo,\n skype=request.json['skype'],\n rice=request.json['rice'],\n location_id=lid,\n recurring_id=lesson_recurring.id,\n suspended=False)\n s.add(lesson)\n s.commit()\n user_cash = s.query(UserCash).filter(UserCash.user_id==uid).first()\n if int(charge * 100) > int(user_cash.credit):\n charge = int(charge) - int(user_cash.credit)\n user_cash.credit = 0\n else:\n user_cash.credit = int(user_cash.credit) - int(charge)\n charge = 0\n lesson_charge = LessonCharge(lesson_id=lesson.id,charge=charge,refunded=False,charge_complete=False,card=request.json['card_id'])\n s.add(lesson_charge)\n s.commit()\n lrange = LessonRanges(lesson_id=lesson.id,low_range=new_lo,high_range=new_hi)\n s.add(lrange)\n s.commit()\n for i in request.json['students']:\n ls = LessonStudent(student_id=i['id'],lesson_id=lesson.id)\n s.add(ls)\n for index in request.json['subjects']:\n lsub = LessonSubject(subject_id=index['id'],lesson_id=lesson.id)\n s.add(lsub)\n if first:\n for k in request.json['notes']:\n ln=LessonNote(lesson_id=lesson.id,note=str(k))\n s.add(ln)\n s.commit()\n delta=0\n if n == max_position:\n if frequency > 1:\n delta = 1+days_array[0]['value']+(6-int(days_array[max_position]['value']))+(frequency-1)*7\n else:\n delta = 1+days_array[0]['value']+(6-int(days_array[max_position]['value']))\n n=0\n else:\n delta = days_array[n+1]['value']-days_array[n]['value']\n n=n+1\n lesson_date_lo = lesson_date_lo + datetime.timedelta(days=delta)\n lesson_date_hi = lesson_date_hi + datetime.timedelta(days=delta)\n first=False\n if not request.json['new_tutor']:\n user = s.query(User).filter(User.id==uid).first()\n loc_string =''\n if lesson.skype:\n loc_string = \"conducted over Skype/Google Chat\"\n elif lesson.rice:\n loc_string=\"at Rice University\"\n else:\n addy = s.query(UserAddress).filter(UserAddress.id==lesson.location_id).first()\n loc_string=addy.street\n l_subs = s.query(LessonSubject).filter(LessonSubject.lesson_id==lesson.id).all()\n subbies=[]\n for i in l_subs:\n sub = s.query(Subject).filter(Subject.id==i.subject_id).first().description\n subbies.append(sub)\n l_students = s.query(LessonStudent).filter(LessonStudent.lesson_id==lesson.id).all()\n studs = []\n for j in l_students:\n student = s.query(Kid).filter(Kid.id==LessonStudent.student_id).first()\n age_dt = datetime.datetime.utcnow() - student.date_of_birth\n age=int(age_dt.days / 365)\n s_string = str(student.first_name) +\" \"+str(age)+ \" years old, Grade \"+str(student.grade)\n studs.append(s_string)\n tutor = s.query(Tutor).filter(Tutor.id==request.json['tutor_id']).first()\n freq=\"\"\n frequency = s.query(LessonRecurringID).filter(LessonRecurringID.id==lesson.recurring_id).first().frequency\n if frequency==1:\n freq='every week'\n elif frequency==2:\n freq =\"every other week\"\n elif frequency==3:\n freq =\"every 3 weeks\"\n else:\n freq=\"every 4 weeks\"\n lesson_requests = s.query(Lesson).filter(Lesson.recurring_id==lesson.recurring_id).order_by(Lesson.date).all()\n days = []\n days_return=[]\n for k in lesson_requests:\n count = 0\n if k.date.weekday() not in days:\n count = count + 1\n days.append(k.date.weekday())\n l_range = s.query(LessonRanges).filter(LessonRanges.lesson_id == k.id).first()\n lo_string = formatTime(l_range.low_range)\n hi_string = formatTime(l_range.high_range)\n day_string =\"\\n \"+k.date.strftime(\"%A\")+\": Length: \"+transformLength(k.length)+\", Range of Start Times: \"+lo_string+\"-\"+hi_string\n days_return.append(day_string)\n subject_main = \"New Lesson Request!\"\n message = \"\"\"Hello {tutor_name}! \\nYou have a new lesson request from your previous client, {client}! It is a recurring lesson {frequency} beginning on {start}. The client has provided a range of times they are willing to begin each lesson. Please respond with your preferred start times or indicate that you are unavailable for these lessons. If you do not respond within 4 hours we will pass the request on to another tutor.\\n\n {days_full}\\n\n Client Name: {client_name}\\n\n Client Email: {client_email}\\n\n Location: {location_string}\\n\n Students: {students}\\n\n Subjects: {subjects}\n \\n Get Learning!\\n \\n The team at Athena\"\"\"\n new = message.format(tutor_name=tutor.first_name,client=user.first_name,frequency=freq,start=formatDateOnly(lesson.date),days_full=' '.join(days_return),client_name=user.first_name,client_email=user.email,location_string=loc_string,students='; '.join(studs),subjects=', '.join(subbies))\n send = SendEmail()\n if send.send_email(new,subject_main,tutor.email):\n prev = s.query(TutorRequest).filter(TutorRequest.recurring_lesson_id==lesson.recurring_id).filter(TutorRequest.active==True).first()\n if prev is not None:\n prev.active = False\n s.commit()\n notified = TutorRequest(tutor_id=tutor.id,recurring=True,recurring_lesson_id=lesson.recurring_id,active=True)\n s.add(notified)\n s.commit()\n s.close()\n return Response(json.dumps({'sent':'eamail'}),status=200,mimetype='application/json')\n else:\n s.close()\n return Response(json.dumps({'bad':'email'}),status=402,mimetype='application/json')\n else:\n s.close()\n return Response({'successfully':'requested'},status=200,mimetype='application/json')\n else:\n return Response(json.dumps({'failure':'bad request'}), status=400, mimetype='application/json')\n\n###CREATE A SINGLE LESSON\n@app.route('/api/user/lessons/create-single', methods=['POST','OPTIONS'])\n@auth.crossdomain(origin='*')\n@auth.auth_private\ndef request_a_lesson_single():\n if request.method == 'POST':\n s=Session()\n uid=request.headers['USER-ID']\n length = int(request.json['length'])\n yr = int(request.json['yrSelect'])\n mon = int(request.json['monthSelect'])\n dy = int(request.json['daySelect'])\n time = int(request.json['time'])\n hr = int(math.floor(time/60))\n minute = int(time%60)\n time_end = int(request.json['time_end'])\n hre = int(math.floor(time_end/60))\n minutee = int(time_end%60)\n date_sched = datetime.datetime(yr,mon,dy,hr,minute)\n date_sched2 = datetime.datetime(yr,mon,dy,hre,minutee)\n lid = None\n charge=0\n if not request.json['skype'] and not request.json['rice']:\n lid=request.json['address_id']\n charge = length/30 * charge_per_half_hour\n else:\n charge = length/30 * charge_per_skype\n lesson = Lesson(user_id=uid,\n accepted=False,\n recurring=False,\n active=True,\n length=length,\n date=date_sched,\n location_id=lid,\n skype=request.json['skype'],\n rice=request.json['rice'],\n suspended=False)\n s.add(lesson)\n s.commit()\n user_cash = s.query(UserCash).filter(UserCash.user_id==uid).first()\n if int(charge) > int(user_cash.credit):\n charge = int(charge) - int(user_cash.credit)\n user_cash.credit = 0\n else:\n user_cash.credit = int(user_cash.credit) - int(charge)\n charge = 0\n lesson_charge = LessonCharge(lesson_id=lesson.id,charge=charge,refunded=False,charge_complete=False,card=request.json['card_id'])\n s.add(lesson_charge)\n s.commit()\n if date_sched > date_sched2:\n lrange = LessonRanges(lesson_id=lesson.id,low_range=date_sched2,high_range=date_sched)\n s.add(lrange)\n s.commit()\n else:\n lrange = LessonRanges(lesson_id=lesson.id,low_range=date_sched,high_range=date_sched2)\n s.add(lrange)\n s.commit()\n students = request.json['students'] ###CREATE LESSON STUDETS\n for i in students:\n ls = LessonStudent(student_id=i['id'],lesson_id=lesson.id)\n s.add(ls)\n s.commit()\n for index in request.json['subjects']: ###SUBJECTS FOR LESSON\n lsub = LessonSubject(subject_id=index['id'],lesson_id=lesson.id)\n s.add(lsub)\n for k in request.json['notes']: ###CREATE NOTES FOR THE STUDENT\n ln=LessonNote(lesson_id=lesson.id,note=str(k))\n s.add(ln)\n s.commit()\n if not request.json['new_tutor']:\n user = s.query(User).filter(User.id==uid).one()\n tutor = s.query(Tutor).filter(Tutor.id==request.json['tutor_id']).first()\n l_range = s.query(LessonRanges).filter(LessonRanges.lesson_id == lesson.id).first()\n lo_string = formatTime(l_range.low_range)\n hi_string = formatTime(l_range.high_range)\n loc_string =''\n if lesson.skype:\n loc_string = \"conducted over Skype/Google Chat\"\n elif lesson.rice:\n loc_string=\"at Rice University\"\n else:\n addy = s.query(UserAddress).filter(UserAddress.id==lesson.location_id).first()\n loc_string=\"at \"+ str(addy.street)\n l_subs = s.query(LessonSubject).filter(LessonSubject.lesson_id==lesson.id).all()\n subbies=[]\n for i in l_subs:\n sub = s.query(Subject).filter(Subject.id==i.subject_id).first().description\n subbies.append(sub)\n l_students = s.query(LessonStudent).filter(LessonStudent.lesson_id==lesson.id).all()\n studs = []\n for j in l_students:\n student = s.query(Kid).filter(Kid.id==LessonStudent.student_id).first()\n age_dt = datetime.datetime.utcnow() - student.date_of_birth\n age=int(age_dt.days / 365)\n s_string = str(student.first_name) +str(age)+ \" years old, Grade \"+str(student.grade)\n studs.append(s_string)\n l_notes = s.query(LessonNote).filter(LessonNote.lesson_id==lesson.id).all()\n notes=[]\n for note in l_notes:\n notes.append(note.note)\n notes_string = 'No lesson notes'\n if len(notes)>0:\n notes_string=', '.join(notes)\n subject_main = \"New Lesson Request!\"\n message = \"\"\"Hello {tutor_name}! \\n \\nYou have a new lesson request! It is a singular lesson on {date} {location_string}.The client has provided a range of times they are willing to begin each lesson. Please respond with your preferred start time or indicate that you are unavailable for this lesson. If you do not respond within 4 hours we will pass the request on to another tutor. \\n\n Duration: {duration}\\n\n Range of Potential Start Times: {lo} - {hi}\\n\n Location: {location_string}\\n\n Client Name: {client_name}\\n\n Client Email: {client_email}\\n\n Students: {students}\\n\n Subjects: {subjects}\\n\n Additional Notes: {notes} \\n\n Make contact with your client to solidify details, exchange Skype/Google Chat names, etc. If they have not contacted you, you may email Athena or email your client directly. \\n\n \\n Good luck and get teaching!\\n \\n The team at Athena\n \"\"\"\n new = message.format(tutor_name=tutor.first_name,date=formatDate(lesson.date),location_string=loc_string,duration=transformLength(lesson.length),lo=lo_string,hi=hi_string,client_name=user.first_name,client_email=user.email,students=', '.join(studs),subjects=', '.join(subbies),notes=notes_string)\n send = SendEmail()\n if send.send_email(new,subject_main,tutor.email):\n notification_prev = s.query(TutorRequest).filter(TutorRequest.single_lesson_id==lesson.id).filter(TutorRequest.active==True).first()\n if notification_prev is not None:\n notification_prev.active = False\n s.commit()\n notified = TutorRequest(tutor_id=tutor.id,recurring=False,active=True,single_lesson_id=lesson.id)\n s.add(notified)\n s.commit()\n s.close()\n return Response(json.dumps({'sent':'eamail'}),status=200,mimetype='application/json')\n else:\n s.close()\n return Response(json.dumps({'bad':'email'}),status=402,mimetype='application/json')\n else:\n s.close()\n return Response(json.dumps({'successfull':'scheduled single lesson'}), status=200, mimetype='application/json')\n else:\n return Response(json.dumps({'failure':'bad request'}), status=400, mimetype='application/json')\n\n###GET LESSON DETAILS FOR USER\n@app.route('/api/user/lesson-details', methods=['POST','OPTIONS'])\n@auth.crossdomain(origin='*')\n@auth.auth_private\ndef getLessonDetailsUser():\n if request.method == 'POST':\n s=Session()\n try:\n uid=request.headers['USER-ID']\n lid = request.json['lesson_id']\n lesson = s.query(Lesson).filter(Lesson.id == lid).first()\n if int(uid) != int(lesson.user_id):\n return Response(json.dumps({'failure':'incorrect user'}), status=403, mimetype='application/json')\n else:\n now = datetime.datetime.utcnow()\n subjects = s.query(LessonSubject).filter(LessonSubject.lesson_id==lesson.id).all()\n return_subs = []\n for i in subjects:\n sub = s.query(Subject).filter(Subject.id==i.subject_id).first()\n return_subs.append({'subject':sub.description,'id':sub.id})\n notes = s.query(LessonNote).filter(LessonNote.lesson_id==lesson.id).all()\n return_notes = []\n for i in notes:\n return_notes.append(i.note)\n lesson_range = s.query(LessonRanges).filter(LessonRanges.lesson_id==lesson.id).first()\n user = s.query(User).filter(User.id==lesson.user_id).first()\n return_students=[]\n lesson_students = s.query(LessonStudent).filter(LessonStudent.lesson_id==lesson.id).all()\n for i in lesson_students:\n kid = s.query(Kid).filter(Kid.id==i.student_id).first()\n return_students.append(kid.first_name)\n loc_string=''\n if not lesson.skype and not lesson.rice:\n loc = s.query(UserAddress).filter(UserAddress.id == lesson.location_id).first()\n loc_string = str(loc.street) + \", \" +str(loc.city)+\", \"+str(loc.state)+\" \"+str(loc.zip_code)\n length = lesson.length\n l_charge = s.query(LessonCharge).filter(LessonCharge.lesson_id==lesson.id).first()\n tutor_ret = {}\n if lesson.accepted:\n tutor = s.query(Tutor).filter(Tutor.id==lesson.tutor_id).first()\n tutor_bio = s.query(TutorBio).filter(TutorBio.tutor_id == tutor.id).first()\n maj = s.query(Major).filter(Major.id == tutor_bio.major).first().major\n uni = s.query(University).filter(University.id == tutor_bio.university).first().university\n tutor_photo = ''\n tphot_q = s.query(TutorPhoto).filter(TutorPhoto.tutor_id == tutor.id).first()\n if tphot_q is not None:\n tutor_photo=tphot_q.file_name\n tutor_ret = {'id':lesson.tutor_id,\n 'phone_number':tutor.phone_number,\n 'first_name':tutor.first_name,\n 'bio':tutor_bio.bio,\n 'major':maj,\n 'university':uni,\n 'file_name':tutor_photo}\n chss = []\n changed = False\n changes = s.query(LessonChange).filter(LessonChange.lesson_id == lesson.id).filter(LessonChange.accepted==False).filter(LessonChange.cleared==False).all()\n for change in changes:\n loccy=''\n if change.location_change and not change.to_rice and not change.to_skype:\n loc = s.query(UserAddress).filter(UserAddress.id == change.new_location).first()\n loccy = loc.street\n timmy = ''\n datey=''\n if change.date_change:\n datey = formatDateOnly(change.new_date)\n timmy = formatTime(change.new_date)\n lenny=''\n if change.length_change:\n lenny= transformLength(change.new_length)\n chss.append({'length_change':change.length_change,\n 'date_change':change.date_change,\n 'location_change':change.location_change,\n 'tutor_change':change.tutor_change,\n 'skype':change.to_skype,\n 'rice':change.to_rice,\n 'length':lenny,\n 'time':timmy,\n 'date':datey,\n 'location':loccy,\n 'id':change.id})\n if len(chss) > 0:\n changed=True\n res_dict = {\n 'id':lesson.id,\n 'accepted':lesson.accepted,\n 'subjects':return_subs,\n 'notes':return_notes,\n 'students':return_students,\n 'low':lesson_range.low_range,\n 'high':lesson_range.high_range,\n 'skype':lesson.skype,\n 'rice':lesson.rice,\n 'date':formatDateOnly(lesson.date),\n 'time':formatTime(lesson.date),\n 'length':lesson.length,\n 'length_string':transformLength(lesson.length),\n 'active':lesson.active,\n 'location':loc_string,\n 'changes':chss,\n 'charge':float(l_charge.charge)/100.0,\n 'refunded':l_charge.refunded,\n 'paid':l_charge.charge_complete,\n 'tutor':tutor_ret,\n 'changed':changed,\n 'suspended':lesson.suspended\n }\n return Response(json.dumps(res_dict),status=200,mimetype='application/json')\n except:\n return Response(json.dumps({'invalid request':'bad'}), status=400, mimetype='application/json')\n finally:\n s.close()\n else:\n return Response(json.dumps({'failure':'bad request'}), status=400, mimetype='application/json')\n\n##GET NEXT LESSON - LESSON LITE\n@app.route('/api/user/lessons/next', methods=['GET','OPTIONS'])\n@auth.crossdomain(origin='*')\n@auth.auth_private\ndef get_user_next_lesson():\n if request.method=='GET':\n s=Session()\n try:\n now = datetime.datetime.now()\n lesson = s.query(Lesson).filter(Lesson.user_id == request.headers['USER-ID']).filter(Lesson.date > now).filter(Lesson.active == True).order_by(Lesson.date).first()\n if lesson is not None:\n location_string = ''\n if lesson.skype:\n location_string='Skype Lesson'\n elif lesson.rice:\n location_string='Rice University'\n else:\n loc = s.query(UserAddress).filter(UserAddress.id == lesson.location_id).first()\n location_string = str(loc.street)\n return_students=[]\n lesson_students = s.query(LessonStudent).filter(LessonStudent.lesson_id==lesson.id).all()\n for i in lesson_students:\n kid = s.query(Kid).filter(Kid.id==i.student_id).first()\n return_students.append(kid.first_name)\n name = ''\n if lesson.accepted:\n name = s.query(Tutor).filter(Tutor.id == lesson.tutor_id).first().first_name\n changed=False\n changes = s.query(LessonChange).filter(LessonChange.lesson_id==lesson.id).filter(LessonChange.accepted==False).filter(LessonChange.cleared==False).first()\n if changes is not None:\n changed=True\n if lesson.recurring:\n chng_rec = s.query(LessonChangeRecurring).filter(LessonChangeRecurring.recurring_id==lesson.recurring_id).filter(LessonChange.accepted==False).filter(LessonChange.cleared==False).first()\n if chng_rec is not None:\n changed=True\n less={'id':lesson.id,\n 'date':formatDateOnly(lesson.date),\n 'time':formatTime(lesson.date),\n 'length':transformLength(lesson.length),\n 'tutor_name':name,\n 'changed':changed,\n 'student_names':return_students,\n 'location_string':location_string,\n 'accepted':lesson.accepted,\n 'suspended':lesson.suspended}\n return Response(json.dumps({'lesson':less,'exists':True}),status=200,mimetype='application/json')\n else:\n return Response(json.dumps({'exists':False}),status=200,mimetype='application/json')\n except:\n return Response(json.dumps({'invalid request':'bad'}), status=400, mimetype='application/json')\n finally:\n s.close()\n else:\n return Response(json.dumps({'failure':'request type'}), status=400, mimetype='application/json')\n\n###GET RECURRING LESSONS\n@app.route('/api/user/lessons/recurring-schedule', methods=['GET','OPTIONS'])\n@auth.crossdomain(origin='*')\n@auth.auth_private\ndef get_user_recurring_schedule():\n if request.method=='GET':\n s=Session()\n try:\n now = datetime.datetime.utcnow()\n lessons = s.query(Lesson).filter(Lesson.date>now).filter(Lesson.user_id == request.headers['USER-ID']).filter(Lesson.recurring==True).filter(Lesson.active == True).all()\n couples = []\n day_return = []\n for i in lessons:\n couple = (i.recurring_id,i.date.weekday())\n if couple not in couples:\n couples.append(couple)\n students = s.query(LessonStudent).filter(LessonStudent.lesson_id==i.id).all()\n names=[]\n for j in students:\n name = s.query(Kid).filter(Kid.id == j.student_id).first().first_name\n names.append(name)\n toot=None\n if i.accepted and i.tutor_id is not None:\n toot = s.query(Tutor).filter(Tutor.id==i.tutor_id).first()\n toot_photo=''\n tphot_q = s.query(TutorPhoto).filter(TutorPhoto.tutor_id == toot.id).first()\n if tphot_q is not None:\n toot_photo=tphot_q.file_name\n phone = ''\n if toot.active:\n phone = toot.phone_number\n tutor_bio = s.query(TutorBio).filter(TutorBio.tutor_id == toot.id).first()\n uni = s.query(University).filter(University.id == tutor_bio.university).first().university\n maj = s.query(Major).filter(Major.id == tutor_bio.major).first().major\n toot = {'id':toot.id,'first_name':toot.first_name,'file_name':toot_photo,'phone_number':phone,'major':maj,'university':uni}\n location_string = ''\n if i.skype:\n location_string='Skype Lesson'\n elif i.rice:\n location_string='Rice University'\n else:\n loc = s.query(UserAddress).filter(UserAddress.id == i.location_id).first()\n location_string = str(loc.street)\n frequency = s.query(LessonRecurringID).filter(LessonRecurringID.id==i.recurring_id).first().frequency\n freq=\"\"\n if frequency==1:\n freq='Every Week'\n elif frequency==2:\n freq =\"Every other week\"\n elif frequency==3:\n freq =\"Every 3 Weeks\"\n else:\n freq=\"Every 4 Weeks\"\n day_return.append({'value':i.date.weekday(),\n 'day':i.date.strftime(\"%A\"),\n 'time':formatTime(i.date),\n 'length':transformLength(i.length),\n 'student_names':names,\n 'tutor':toot,\n 'recurring_id':i.recurring_id,\n 'location_string':location_string,\n 'day_value':i.date.weekday(),\n 'frequency':freq,\n 'suspended':i.suspended})\n sorted_stuff = sorted(day_return,key=lambda day: day['value'])\n return Response(json.dumps(sorted_stuff), status=200, mimetype='application/json')\n except:\n return Response(json.dumps({'invalid request':'bad'}), status=400, mimetype='application/json')\n finally:\n s.close()\n else:\n return Response(json.dumps({'failure':'request type'}), status=400, mimetype='application/json')\n\n###GET PAST LESSONS\n@app.route('/api/user/lessons/past/all', methods=['GET','OPTIONS'])\n@auth.crossdomain(origin='*')\n@auth.auth_private\ndef get_user_past_lessons():\n if request.method=='GET':\n s=Session()\n try:\n lesson_return=[]\n now = datetime.datetime.now()\n month_ago = now - datetime.timedelta(days=31)\n lessons = s.query(Lesson).filter(Lesson.date > month_ago).filter(Lesson.date < now).filter(Lesson.user_id==request.headers['USER-ID']).filter(Lesson.active==True).all()\n for j in lessons:\n nts = ''\n notes = s.query(LessonCompletedNotes).filter(LessonCompletedNotes.lesson_id==j.id).first()\n if notes is not None:\n nts=notes.note\n tutor_name = s.query(Tutor).filter(Tutor.id==j.tutor_id).first().first_name\n subs=[]\n studs=[]\n loc=''\n lesson_subjects = s.query(LessonSubject).filter(LessonSubject.lesson_id==j.id).all()\n for s in lesson_subjects:\n subs.append(s.description)\n lesson_students = s.query(LessonStudent).filter(LessonStudent.lesson_id==j.id).all()\n for st in lesson_students:\n studs.append(st.first_name)\n if j.skype:\n loc='Video Lesson'\n elif j.rice:\n loc='Rice University'\n else:\n user_a = s.query(UserAddress).filter(UserAddress.id == j.location_id).first()\n loc = user_a.street\n lesson_return.append({\n 'id':j.id,\n 'date':formatDate(j.date),\n 'length':transformLength(j.length),\n 'notes':nts,\n 'tutor_name':tutor_name,\n 'subjects':subs,\n 'students':studs,\n 'location_string':loc\n })\n print(lesson_return)\n print(lesson_return)\n print(lesson_return)\n print(lesson_return)\n return Response(json.dumps(lesson_return), status=200, mimetype='application/json')\n except:\n return Response(json.dumps({'invalid request':'bad'}), status=400, mimetype='application/json')\n finally:\n s.close()\n else:\n return Response(json.dumps({'failure':'request type'}), status=400, mimetype='application/json')\n\n###SuSPEND RECURRING LESSONS\n@app.route('/snorkeling', methods=['POST','OPTIONS'])\n@auth.crossdomain(origin='*')\n@auth.auth_private\ndef suspend_recurring_user_lesson():\n if request.method == 'POST':\n s=Session()\n try:\n uid=request.headers['USER-ID']\n rid = request.json['recurring_id']\n day = int(request.json['day_value'])\n lessons = s.query(Lesson).filter(Lesson.recurring_id==rid).all()\n if int(uid) != int(lessons[0].user_id):\n return Response(json.dumps({'failure':'incorrect user'}), status=403, mimetype='application/json')\n else:\n for less in lessons:\n if less.date.weekday() == day:\n less.suspended = True\n less.accepted = False\n lesson_changes = s.query(LessonChange).filter(LessonChange.lesson_id==less.id).all()\n time = formatTime(less.date)\n dayyy = less.date.strftime(\"%A\")\n for i in lesson_changes:\n i.cleared=True\n s.commit()\n sub = \"Recurring Lesson Suspended: \"+dayyy+\"s at \"+time\n mess = \"You have suspended your lessons on \"+dayyy + \"s at \"+time+\". If these lessons have been charged and you have given more than 24 hours notice, you will be refunded the full amount. You may reactivate these lessons whenever you like.\"\n messageChain = MessageChain()\n s.add(messageChain)\n s.commit()\n message = Message(subject=sub,message=mess,chain_id=messageChain.id,user_id=uid,lesson_id=None,viewed=False)\n s.add(message)\n s.commit()\n return Response(json.dumps({'successfully':'cancelled'}),status=200,mimetype='application/json')\n except:\n return Response(json.dumps({'invalid request':'bad'}), status=400, mimetype='application/json')\n finally:\n s.close()\n else:\n return Response(json.dumps({'failure':'bad request'}), status=400, mimetype='application/json')\n","sub_path":"application/lesson.py","file_name":"lesson.py","file_ext":"py","file_size_in_byte":35040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"117112213","text":"# Titanic csv ---> PostgreSQL (Elephant SQL)\n\n# imports\nimport psycopg2\nimport pandas as pd\nfrom psycopg2.extras import execute_values\n\n# reading in titanic Data\ndf = pd.read_csv('titanic.csv')\n\n# renaming columns in order to have them read into elephant\ndf['Siblings/Spouses Aboard'].rename('siblingsspouse', axis=1)\ndf['Parents/Children Aboard'].rename('parentschildren', axis=1)\n\n# Clean the data\ndf['Name'] = df['Name'].str.replace(\"'\", \"\")\n\n# Credential for cloud DB, password is TOP SECRET\ndbname = 'XXXX'\nuser = 'XXXX'\npassword = 'XXXX'\nhost = 'isilo.db.elephantsql.com'\n\n# connection to cloud\npg_conn = psycopg2.connect(dbname=dbname, user=user, password=password, host=host)\n\n# Cursor\npg_curs = pg_conn.cursor()\n\n# creating Titanic Table\ncreate_titanic_table = \"\"\"\nDROP TABLE IF EXISTS Titanic;\nCREATE TABLE Titanic (\n index INT,\n Survived INT,\n Pclass INT,\n Name TEXT,\n Sex TEXT,\n Age REAL,\n siblingsspouse INT,\n parentschildren INT,\n Fare REAL\n);\n\"\"\"\n\n# running table and committing table\npg_curs.execute(create_titanic_table)\npg_conn.commit()\n\n# Using the execute_values function\nexecute_values(pg_curs, \"\"\"\nINSERT INTO Titanic\n(Survived, Pclass, Name, Sex, Age, siblingsspouse, parentschildren, Fare)\nVALUES %s;\n\"\"\", [tuple(row) for row in df.values])\n\n# commit\npg_conn.commit()\n\n\npg_curs.execute(\"\"\"\nSELECT *\nFROM Titanic\nLIMIT 1;\n\"\"\")\n\n# printing to validate\nprint(pg_curs.fetchall())","sub_path":"module2-sql-for-analysis/insert_titanic_final.py","file_name":"insert_titanic_final.py","file_ext":"py","file_size_in_byte":1426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"531815932","text":"import numpy as np\r\nfrom keras.datasets import mnist\r\nfrom keras.utils import to_categorical\r\n\r\n# 指定亂數種子\r\nseed = 7\r\nnp.random.seed(seed)\r\n# 載入資料集\r\n(X_train, Y_train), (X_test, Y_test) = mnist.load_data()\r\n# 將圖片轉換成 4D 張量\r\nX_train = X_train.reshape(X_train.shape[0], 28, 28, 1).astype(\"float32\")\r\nX_test = X_test.reshape(X_test.shape[0], 28, 28, 1).astype(\"float32\")\r\nprint(\"X_train Shape: \", X_train.shape)\r\nprint(\"X_test Shape: \", X_test.shape)\r\n# 因為是固定範圍, 所以執行正規化, 從 0-255 至 0-1\r\nX_train = X_train / 255\r\nX_test = X_test / 255\r\n# One-hot編碼\r\nY_train = to_categorical(Y_train)\r\nY_test = to_categorical(Y_test)\r\nprint(\"Y_train Shape: \", Y_train.shape)\r\nprint(Y_train[0])","sub_path":"F9744/Keras/Ch08/Ch8_3_2.py","file_name":"Ch8_3_2.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"80352968","text":"import sys, cPickle, os\nfrom collections import defaultdict\nfrom utils import kmer_store, kmers, nucleotides_fna, progress\n\n\"\"\"\"\n\tgen_kmers sample \n\n\t\tor\n\n\tgen_kmers genomes \n\"\"\"\nk = int(sys.argv[1])\nversion = sys.argv[2]\n\nif version == 'sample':\n\tsample_name = sys.argv[3]\n\tfilename = 'samples/%s.txt' % sample_name\n\tsample_kmers = kmer_store()\n\twith open(filename) as f:\n\t\tfor read in f:\n\t\t\tread_kmers = kmers(read.strip(), k)\n\t\t\tfor kmer in read_kmers:\n\t\t\t\tsample_kmers.update(kmer)\n\n\toutput_filename = 'pickles/%s_kmers_%d.pickle' % (os.path.basename(os.path.normpath(filename)).replace('.txt',''), k)\n\twith open(output_filename, 'w') as f:\n\t\tcPickle.dump(sample_kmers.kmers, f)\n\nelif version =='genomes':\n\tfull = True if (len(sys.argv) == 4 and sys.argv[3] == 'full') else False\n\tkmer_spectra = defaultdict(lambda:[0]*20)\n\tfor index, genome_filename in enumerate(progress(filter(lambda x: x.endswith('.fna'), os.listdir('genomes')))):\n\t\tkmer_spectrum = {} if full else kmer_store()\n\t\tfor kmer in kmers(nucleotides_fna('genomes/'+genome_filename), k):\n\t\t\tif full:\n\t\t\t\tkmer_spectrum[kmer] = kmer_spectrum[kmer]+1 if kmer in kmer_spectrum else 1\n\t\t\telse:\n\t\t\t\tkmer_spectrum.update(kmer)\n\t\tfor kmer in kmer_spectrum:\n\t\t\tkmer_spectra[kmer][index] = kmer_spectrum[kmer]\n\n\tfull_string = 'full_' if full else ''\n\twith open('pickles/%skmer_spectra_%d.pickle' % (full_string, k), 'w') as f:\n\t\tcPickle.dump(dict(kmer_spectra), f)\n\n","sub_path":"gen_kmers.py","file_name":"gen_kmers.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"547090527","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport torch\nimport aifc\nfrom scipy import signal\nfrom torch.utils import data\nfrom torchvision import transforms\nimport os\nfrom torch.utils.data.sampler import SubsetRandomSampler\nfrom sklearn.preprocessing import StandardScaler, MinMaxScaler\nimport glob\nimport parameters\n\nfrom utils import set_seed\n\nNoise_Stats_Directory = \"../elephant_dataset/eleph_dataset/Noise_Stats/\"\n\ndef get_loader(data_dir,\n batch_size,\n random_seed=8,\n norm=\"norm\",\n scale=False,\n augment=False,\n shuffle=True,\n num_workers=16,\n pin_memory=False):\n \"\"\"\n Utility function for loading and returning train and valid\n multi-process iterators.\n If using CUDA, num_workers should be set to 1 and pin_memory to True.\n Params\n ------\n - data_dir: path directory to the dataset.\n - batch_size: how many samples per batch to load.\n - random_seed: fix seed for reproducibility.\n - augment: whether data augmentation scheme. Only applied on the train split.\n - valid_size: percentage split of the training set used for\n the validation set. Should be a float in the range [0, 1].\n - shuffle: whether to shuffle the train/validation indices.\n - num_workers: number of subprocesses to use when loading the dataset.\n - pin_memory: whether to copy tensors into CUDA pinned memory. Set it to\n True if using GPU.\n - data_file_paths: If you know what particular data file names you want to load, \n pass them in as a list of strings.\n Returns\n -------\n - train_loader: training set iterator.\n - valid_loader: validation set iterator.\n \"\"\"\n # Note here we could do some data preprocessing!\n # define transform\n # Set the dataloader seed\n set_seed(parameters.DATA_LOADER_SEED)\n\n dataset = ElephantDataset(data_dir, preprocess=norm, scale=scale)\n \n print('Size of dataset at {} is {} samples'.format(data_dir, len(dataset)))\n\n # Set the data_loader random seed for reproducibility.\n # Should do some checks on this\n def _init_fn(worker_id):\n # We probably do not want every worker to have \n # the same random seed or else they may do the same \n # thing?\n np.random.seed(int(random_seed) + worker_id)\n\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, \n shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory, worker_init_fn=_init_fn)\n\n return data_loader\n\ndef get_loader_fuzzy(data_dir,\n batch_size,\n random_seed=8,\n norm=\"norm\",\n scale=False,\n include_boundaries=False,\n shift_windows=False,\n is_full_dataset=False,\n full_window_predict=False,\n augment=False,\n shuffle=True,\n num_workers=16,\n pin_memory=False):\n \"\"\"\n Utility function for loading and returning train and valid\n multi-process iterators.\n If using CUDA, num_workers should be set to 1 and pin_memory to True.\n Params\n ------\n - data_dir: path directory to the dataset.\n - batch_size: how many samples per batch to load.\n - random_seed: fix seed for reproducibility.\n - augment: whether data augmentation scheme. Only applied on the train split.\n - valid_size: percentage split of the training set used for\n the validation set. Should be a float in the range [0, 1].\n - shuffle: whether to shuffle the train/validation indices.\n - num_workers: number of subprocesses to use when loading the dataset.\n - pin_memory: whether to copy tensors into CUDA pinned memory. Set it to\n True if using GPU.\n - data_file_paths: If you know what particular data file names you want to load, \n pass them in as a list of strings.\n\n -is_full_dataset: Is important for when we are shifting the windows, because\n when using the full 24 hr dataset for adversarial discover we always want to \n use the middle of the oversized window!\n \n -fixed_repeat: Used for training the second model in a heirarchical setting.\n Repeat sliding windows but save fixed random slices for each window\n\n Returns\n -------\n - train_loader: training set iterator.\n - valid_loader: validation set iterator.\n\n \"\"\"\n # Note here we could do some data preprocessing!\n # define transform\n # Set the dataloader seed\n print (\"DataLoader Seed:\", parameters.DATA_LOADER_SEED)\n set_seed(parameters.DATA_LOADER_SEED)\n\n dataset = ElephantDatasetFuzzy(data_dir, preprocess=norm, scale=scale, include_boundaries=include_boundaries, \n shift_windows=shift_windows, is_full_dataset=is_full_dataset, \n full_window_predict=full_window_predict)\n \n print('Size of dataset at {} is {} samples'.format(data_dir, len(dataset)))\n\n # Set the data_loader random seed for reproducibility.\n # Should do some checks on this\n def _init_fn(worker_id):\n # Assign each worker its own seed\n np.random.seed(int(random_seed) + worker_id)\n # Is this bad??\n # This seems bad as each epoch will be the same order of data! \n #torch.manual_seed(int(random_seed) + worker_id)\n\n data_loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, \n shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory, worker_init_fn=_init_fn)\n\n return data_loader\n\n\nclass ElephantDatasetFuzzy(data.Dataset):\n def __init__(self, data_path, preprocess=\"norm\", scale=False, transform=None, include_boundaries=False, \n shift_windows=False, is_full_dataset=False, full_window_predict=False):\n # Plan: Load in all feature and label names to create a list\n self.data_path = data_path\n self.user_transforms = transform\n self.preprocess = preprocess\n self.scale = scale\n self.include_boundaries = include_boundaries\n self.shift_windows = shift_windows\n self.is_full_dataset = is_full_dataset\n self.full_window_predict = full_window_predict\n # This is only used if we want to generate fixed repeated\n # windows during hierarchical training\n self.fixed_indeces = None\n # By default this is False \n # and only True for the special case where\n # we incorperate model_0 predictions into \n # the 2-stage model \n self.model_0_feature = False\n\n '''\n self.features = glob.glob(data_path + \"/\" + \"*features*\", recursive=True)\n self.initialize_labels()\n '''\n\n self.pos_features = glob.glob(data_path + \"/\" + \"*_features_*\", recursive=True)\n self.neg_features = glob.glob(data_path + \"/\" + \"*_neg-features_*\", recursive=True)\n self.intialize_data(init_pos=True, init_neg=True)\n\n assert len(self.features) == len(self.labels)\n if self.include_boundaries:\n assert len(self.features) == len(self.boundary_masks)\n\n print(\"ElephantDataset number of features {} and number of labels {}\".format(len(self.features), len(self.labels)))\n print('Normalizing with {} and scaling {}'.format(preprocess, scale))\n\n def initialize_labels(self):\n self.labels = []\n self.boundary_masks = []\n for feature_path in self.features:\n feature_parts = feature_path.split(\"features\")\n self.labels.append(glob.glob(feature_parts[0] + \"labels\" + feature_parts[1])[0])\n if self.include_boundaries:\n self.boundary_masks.append(glob.glob(feature_parts[0] + \"boundary-masks\" + feature_parts[1])[0])\n\n\n def set_pos_features(self, pos_features):\n print(\"Length of pos_features was {} and is now {} \".format(len(self.pos_features), len(pos_features)))\n self.pos_features = pos_features\n self.intialize_data(init_pos=True, init_neg=False)\n\n def set_neg_features(self, neg_features):\n print(\"Length of neg_features was {} and is now {} \".format(len(self.neg_features), len(neg_features)))\n self.neg_features = neg_features\n self.intialize_data(init_pos=False, init_neg=True)\n\n def add_neg_features(self, neg_features):\n print(\"Length of neg_features was {} and grew to {} \".format(len(self.neg_features), len(neg_features) + len(self.neg_features)))\n self.neg_features += neg_features\n self.intialize_data(init_pos=False, init_neg=True)\n\n def set_featues(self, pos_features, neg_features):\n print(\"Length of pos_features was {} and is now {} \".format(len(self.pos_features), len(pos_features)))\n print(\"Length of neg_features was {} and is now {} \".format(len(self.neg_features), len(neg_features)))\n self.pos_features = pos_features\n self.neg_features = neg_features\n self.intialize_data(init_pos=True, init_neg=True)\n\n def scale_features(self, pos_factor, neg_factor):\n print(\"Length of pos_features was {} and is now {} \".format(len(self.pos_features), int(pos_factor * len(self.pos_features))))\n print(\"Length of neg_features was {} and is now {} \".format(len(self.neg_features), int(neg_factor * len(self.neg_features))))\n # Add in a feature to undersample as well!\n # Could consider also giving hardness to these to help with selection.\n # Let us do random for now\n if pos_factor < 1:\n indeces = np.arange(len(self.pos_features))\n pos_inds = np.random.choice(indeces, int(indeces.shape[0] * pos_factor))\n self.pos_features = list(np.array(self.pos_features)[pos_inds])\n self.pos_labels = list(np.array(self.pos_labels)[pos_inds])\n else:\n self.pos_features *= pos_factor\n self.pos_labels *= pos_factor\n\n if neg_factor < 1:\n indeces = np.arange(len(self.neg_features))\n neg_inds = np.random.choice(indeces, int(indeces.shape[0] * neg_factor))\n self.neg_features = list(np.array(self.neg_features)[neg_inds])\n self.neg_labels = list(np.array(self.neg_labels)[neg_inds])\n else:\n self.neg_features *= neg_factor\n self.neg_labels *= neg_factor\n\n # Re-form the feature and data set\n self.features = self.pos_features + self.neg_features\n self.labels = self.pos_labels + self.neg_labels\n\n def update_labels(self, new_pos_labels_dir, new_neg_labels_dir):\n \"\"\"\n Kinda an adhoc method, but currently we are using this in\n the new 3rd label dataset. For the given features / windows\n in the dataset, replace the corresponding labels with the\n new 3 class labels. \n Implemenation: Since the new label names should match the\n training example names, go through each training example\n and get the new label path from either pos/neg label dir.\n\n @ Params\n @ new_pos_labels_dir - The folder that contains the new positive window labels\n @ new_neg_labels_dir - The folder that contains the new negative window labels\n \"\"\"\n # Replace the labels for the positive examples\n new_pos_labels = []\n for pos_feat in self.pos_features:\n data_id = pos_feat.split('/')[-1]\n new_pos_label = os.path.join(new_pos_labels_dir, data_id.replace('features', 'labels'))\n new_pos_labels.append(new_pos_label)\n\n self.pos_labels = new_pos_labels\n\n # Replace the labels for the negative examples\n new_neg_labels = []\n for neg_feat in self.neg_features:\n data_id = neg_feat.split('/')[-1]\n new_neg_label = os.path.join(new_neg_labels_dir, data_id.replace('features', 'labels'))\n new_neg_labels.append(new_neg_label)\n\n self.neg_labels = new_neg_labels\n\n # Re-set self.labels\n self.labels = self.pos_labels + self.neg_labels\n\n def add_model_0_preds(self, model_0_pos_dir, model_0_neg_dir):\n \"\"\"\n Add the additional feature of the model_0 predictions for\n each training window. \n Implemenation: Since the new label names should match the\n training example names, go through each training example\n and get the new label path from either pos/neg label dir.\n\n @ Params\n @ model_0_pos_dir - The folder that contains the model_0 positive window preds\n @ model_0_neg_dir - The folder that contains the model_0 negative window preds\n \"\"\"\n # Replace the labels for the positive examples\n self.model_0_pos_preds = []\n for pos_feat in self.pos_features:\n data_id = pos_feat.split('/')[-1]\n new_pos_label = os.path.join(model_0_pos_dir, data_id.replace('features', 'labels'))\n self.model_0_pos_preds.append(new_pos_label)\n\n # Replace the labels for the negative examples\n self.model_0_neg_preds = []\n for neg_feat in self.neg_features:\n data_id = neg_feat.split('/')[-1]\n new_neg_label = os.path.join(model_0_neg_dir, data_id.replace('features', 'labels'))\n self.model_0_neg_preds.append(new_neg_label)\n\n # Re-set self.labels\n self.model_0_preds = self.model_0_pos_preds + self.model_0_neg_preds\n self.model_0_feature = True\n\n\n def create_fixed_windows(self):\n self.fixed_indeces = []\n\n # Generate the fixed indeces\n for i in range(len(self.features)):\n feature = np.load(self.features[i])\n label = np.load(self.labels[i])\n\n # Sample a random start index to save\n call_length = -(label.shape[0] - 2 * parameters.CHUNK_SIZE)\n # Use torch.randint because of weird numpy seeding issues\n start_slice = torch.randint(0, parameters.CHUNK_SIZE - call_length, (1,))[0].item()\n self.fixed_indeces.append(start_slice)\n\n def intialize_data(self, init_pos=True, init_neg=True):\n \"\"\"\n Initialize both the positive and negative label and boundary\n mask data arrays if indicated by the initialization flags \n 'init_pos' and 'init_neg'. After initializing any necessary\n data, combine the positive and negative examples!\n \"\"\"\n # Initialize the positive examples\n if init_pos:\n self.pos_labels = []\n self.pos_boundary_masks = []\n for feature_path in self.pos_features:\n feature_parts = feature_path.split(\"features\")\n self.pos_labels.append(glob.glob(feature_parts[0] + \"labels\" + feature_parts[1])[0])\n if self.include_boundaries:\n self.pos_boundary_masks.append(glob.glob(feature_parts[0] + \"boundary-masks\" + feature_parts[1])[0])\n\n\n # Initialize the negative examples\n if init_neg:\n self.neg_labels = []\n self.neg_boundary_masks = []\n for feature_path in self.neg_features:\n feature_parts = feature_path.split(\"features\")\n self.neg_labels.append(glob.glob(feature_parts[0] + \"labels\" + feature_parts[1])[0])\n if self.include_boundaries:\n self.neg_boundary_masks.append(glob.glob(feature_parts[0] + \"boundary-masks\" + feature_parts[1])[0])\n\n # Combine the positive and negative examples!\n self.features = self.pos_features + self.neg_features\n self.labels = self.pos_labels + self.neg_labels\n if self.include_boundaries:\n self.boundary_masks = self.pos_boundary_masks + self.neg_boundary_masks\n\n print (\"Len Pos Features:\", len(self.pos_features))\n print (\"Len Neg Features:\", len(self.neg_features))\n\n\n def __len__(self):\n return len(self.features)\n\n \"\"\"\n Return a single element at provided index\n \"\"\"\n def __getitem__(self, index):\n feature = np.load(self.features[index])\n label = np.load(self.labels[index])\n\n # Load the model_0 predictions and incorperate\n # them into the data transform\n if self.model_0_feature:\n model_0_pred = np.load(self.model_0_preds[index])\n feature = self.apply_transforms(feature, model_0_pred)\n else:\n feature = self.apply_transforms(feature)\n\n if self.shift_windows:\n feature, label = self.sample_chunk(feature, label)\n\n # Select fixed random crop\n if self.fixed_indeces is not None:\n start_index = self.fixed_indeces[index]\n feature = feature[start_index: start_index + parameters.CHUNK_SIZE, :]\n label = label[start_index: start_index + parameters.CHUNK_SIZE]\n\n if self.user_transforms:\n feature = self.user_transforms(feature)\n\n # Honestly may be worth pre-process this\n feature = torch.from_numpy(feature).float()\n if self.full_window_predict:\n # Make the label a binary 0/1 if an elephant \n # call is present (May be some weird boundary cases\n # with call being on the edge, but we'll cross that\n # bridge later).\n label = 1. if np.sum(label) > 0 else 0.\n else: \n label = torch.from_numpy(label).float()\n\n # Return the boundary masks\n if self.include_boundaries:\n masks = np.load(self.boundary_masks[index])\n # Cast to a bool tensor to allow for array masking\n masks = torch.from_numpy(masks) == 1\n\n return feature, label, masks, self.features[index]\n else:\n return feature, label, self.features[index] # Include the data file\n\n def sample_chunk(self, feature, label):\n \"\"\"\n Selected a random chunk within the oversized window.\n Figure out the call length as: -(window_size - 2*256).\n Then sample starting slice as rand in range [0, 256 - call_length].\n\n Note: if the flag 'is_full_dataset' is set then return the middle\n 256! This is for adversarial discovery mode\n \"\"\"\n if self.is_full_dataset:\n # The full test set window sizes are 2 * (256 / normal)\n start_slice = label.shape[0] // 4\n end_slice = start_slice + label.shape[0] // 2\n else:\n call_length = -(label.shape[0] - 2 * parameters.CHUNK_SIZE)\n # Draw this out but it should be correct!\n # Use torch.randint because of weird numpy seeding issues\n start_slice = torch.randint(0, parameters.CHUNK_SIZE - call_length, (1,))[0].item()\n end_slice = start_slice + parameters.CHUNK_SIZE\n\n return feature[start_slice : end_slice, :], label[start_slice : end_slice]\n\n def apply_transforms(self, data, model_0_pred=None):\n if self.scale:\n data = 10 * np.log10(data)\n\n # Normalize Features\n if self.preprocess == \"norm\":\n data = (data - np.mean(data)) / np.std(data)\n elif self.preprocess == \"globalnorm\":\n data = (data - 132.228) / 726.319 # Calculated these over the training dataset \n elif self.preprocess == \"feature\":\n data = (data - np.mean(data, axis=0)) / np.std(data, axis=0)\n\n # If model_0_pred is provided, then create a 3 channel\n # \"image\" where channels 1 and 2 are the spectrogram and \n # the 3rd channel is a (-1, 1) valued image of model_0 preds.\n # Specifically, create a column of '1' for '1' predictions and\n # a column of '-1' for '0' preds\n if model_0_pred is not None:\n # Expand the channel dim of the spectrogram\n data = np.expand_dims(data, axis=0)\n # Create the prediction mask. First convert '0'\n # to '-1' value\n model_0_pred[model_0_pred == 0] = -1\n # Repeat the pred values along the feature axis\n model_0_pred = np.expand_dims(model_0_pred, axis=1)\n model_0_pred = np.repeat(model_0_pred, data.shape[2], axis=1)\n # Consider normalizing this input!!\n model_0_pred = (model_0_pred - np.mean(model_0_pred)) / np.std(model_0_pred)\n\n # Repeat the spectrogram data to creat 3 channels and then\n # make the final channel by the model_0_pred\n data = np.repeat(data, 3, axis=0)\n data[2, :, :] = model_0_pred\n\n return data\n\n\"\"\"\n Notes\n - Preprocess = Norm, Scale = False ===> seems bad\n - Preprocess = Norm, Scale = True ===> Works well on small dataset!\n - Preprocess = Scale, Scale = False ===> Has quite a bit of trouble over fitting small dataset compared to other but eventually can\n - Preprocess = Scale, Scale = True ===> Has quite a bit of trouble over fitting small dataset compared to other and bad val acc!\n - Preprocess = ChunkNorm, Scale = False ===> Very slow and bad\n - Preprocess = ChunkNorm, Scale = True ===> Similar to Norm with scale\n - Preprocess = None, Scale = True ====> No worky\n - Preprocess = Scale range (-1, 1), Scale = True ===> Overfit but huge variance issue\n\"\"\"\nclass ElephantDataset(data.Dataset):\n def __init__(self, data_path, transform=None, preprocess=\"norm\", scale=False):\n # Plan: Load in all feature and label names to create a list\n self.data_path = data_path\n self.user_transforms = transform\n self.preprocess = preprocess\n self.scale = scale\n\n # Probably should not have + \"**/\" after data_path? It seems like \n # we are passing the exact datapths anyways! Also why recursive?\n self.features = glob.glob(data_path + \"/\" + \"*features*\", recursive=False)\n self.initialize_labels()\n\n assert len(self.features) == len(self.labels)\n\n print(\"Dataset from path {}\".format(data_path))\n print(\"ElephantDataset number of features {} and number of labels {}\".format(len(self.features), len(self.labels)))\n print('Normalizing with {} and scaling {}'.format(preprocess, scale))\n print(\"Shape of a feature is {} and a label is {}\".format(self[0][0].shape, self[0][1].shape))\n\n def initialize_labels(self):\n self.labels = []\n for feature_path in self.features:\n feature_parts = feature_path.split(\"features\")\n self.labels.append(glob.glob(feature_parts[0] + \"labels\" + feature_parts[1])[0])\n\n\n def __len__(self):\n return len(self.features)\n\n \"\"\"\n Return a single element at provided index\n \"\"\"\n def __getitem__(self, index):\n feature = np.load(self.features[index])\n label = np.load(self.labels[index])\n\n feature = self.apply_transforms(feature)\n if self.user_transforms:\n feature = self.user_transforms(feature)\n \n # Honestly may be worth pre-process this\n feature = torch.from_numpy(feature).float()\n label = torch.from_numpy(label).float()\n\n\n return feature, label, self.features[index] # Include the data file\n\n def apply_transforms(self, data):\n if self.scale:\n data = 10 * np.log10(data)\n\n # Normalize Features\n if self.preprocess == \"norm\":\n data = (data - np.mean(data)) / np.std(data)\n elif self.preprocess == \"globalnorm\":\n data = (data - 132.228) / 726.319 # Calculated these over the training dataset \n\n return data\n\n # elif self.preprocess == \"Scale\":\n # scaler = MinMaxScaler()\n # # Scale features for each training example\n # # to be within a certain range. Preserves the\n # # relative distribution of each feature. Here\n # # each feature is the different frequency band\n # for i in range(self.features.shape[0]):\n # self.features[i, :, :] = scaler.fit_transform(self.features[i,:,:].astype(np.float32))\n # #num_ex = self.features.shape[0]\n # #seq_len = self.features.shape[1]\n # #self.features = self.features.reshape(num_ex * seq_len, -1)\n # #self.features = scaler.fit_transform(self.features)\n # #self.features = self.features.reshape(num_ex, seq_len, -1)\n # elif self.preprocess == \"ChunkNorm\":\n # for i in range(self.features.shape[0]):\n # self.features[i, :, :] = (self.features[i, :, :] - np.mean(self.features[i, :, :])) / np.std(self.features[i, :, :])\n # elif self.preprocess == \"BackgroundS\":\n # # Load in the pre-calculated mean,std,etc.\n # if not scale:\n # mean_noise = np.load(Noise_Stats_Directory + \"mean.npy\")\n # std_noise = np.load(Noise_Stats_Directory + \"std.npy\")\n # else:\n # mean_noise = np.load(Noise_Stats_Directory + \"mean_log.npy\")\n # std_noise = np.load(Noise_Stats_Directory + \"std_log.npy\")\n\n # self.features = (self.features - mean_noise) / std_noise\n # elif self.preprocess == \"BackgroundM\":\n # # Load in the pre-calculated mean,std,etc.\n # if not scale:\n # mean_noise = np.load(Noise_Stats_Directory + \"mean.npy\")\n # median_noise = np.load(Noise_Stats_Directory + \"median.npy\")\n # else:\n # mean_noise = np.load(Noise_Stats_Directory + \"mean_log.npy\")\n # median_noise = np.load(Noise_Stats_Directory + \"median_log.npy\")\n\n # self.features = (self.features - mean_noise) / median_noise\n # elif self.preprocess == \"FeatureNorm\":\n # self.features = (self.features - np.mean(self.features, axis=(0, 1))) / np.std(self.features, axis=(0,1))\n\n\n\n\n\n\"\"\"\n Dataset for full test length audio\n NEED TO FIX THIS!!\n\"\"\"\nclass ElephantDatasetFull(data.Dataset):\n def __init__(self, spectrogram_files, label_files, gt_calls, preprocess=\"norm\", scale=True):\n\n self.specs = spectrogram_files\n self.labels = label_files\n self.gt_calls = gt_calls # This is the .txt file that contains start and end times of calls\n self.preprocess = preprocess\n self.scale = scale\n \n print('Normalizing with {} and scaling {}'.format(preprocess, scale))\n\n\n def __len__(self):\n return len(self.specs)\n\n\n def transform(self, spectrogram): # We need to fix this probably!!!\n # Potentially include other transforms\n if self.scale:\n spectrogram = 10 * np.log10(spectrogram)\n\n # Quite janky, but for now we will do the normalization \n # seperately!\n '''\n # Normalize Features\n if self.preprocess == \"norm\": # Only have one training example so is essentially chunk norm\n spectrogram = (spectrogram - np.mean(spectrogram)) / np.std(spectrogram)\n elif preprocess == \"Scale\":\n scaler = MinMaxScaler()\n # Scale features for each training example\n # to be within a certain range. Preserves the\n # relative distribution of each feature. Here\n # each feature is the different frequency band\n spectrogram = scaler.fit_transform(spectrogram.astype(np.float32))\n elif self.preprocess == \"ChunkNorm\":\n spectrogram = (spectrogram - np.mean(spectrogram)) / np.std(spectrogram)\n elif self.preprocess == \"BackgroundS\":\n # Load in the pre-calculated mean,std,etc.\n if not scale:\n mean_noise = np.load(Noise_Stats_Directory + \"mean.npy\")\n std_noise = np.load(Noise_Stats_Directory + \"std.npy\")\n else:\n mean_noise = np.load(Noise_Stats_Directory + \"mean_log.npy\")\n std_noise = np.load(Noise_Stats_Directory + \"std_log.npy\")\n\n spectrogram = (spectrogram - mean_noise) / std_noise\n elif self.preprocess == \"BackgroundM\":\n # Load in the pre-calculated mean,std,etc.\n if not scale:\n mean_noise = np.load(Noise_Stats_Directory + \"mean.npy\")\n median_noise = np.load(Noise_Stats_Directory + \"median.npy\")\n else:\n mean_noise = np.load(Noise_Stats_Directory + \"mean_log.npy\")\n median_noise = np.load(Noise_Stats_Directory + \"median_log.npy\")\n\n spectrogram = (spectrogram - mean_noise) / median_noise\n elif self.preprocess == \"FeatureNorm\":\n spectrogram = (spectrogram - np.mean(spectrogram, axis=1)) / np.std(spectrogram, axis=1)\n '''\n return spectrogram\n\n \"\"\"\n Return a single element at provided index\n \"\"\"\n def __getitem__(self, index):\n spectrogram_path = self.specs[index]\n label_path = self.labels[index]\n gt_call_path = self.gt_calls[index]\n\n spectrogram = np.load(spectrogram_path)\n label = np.load(label_path)\n\n spectrogram = self.transform(spectrogram)\n #spectrogram = np.expand_dims(spectrogram, axis=0) # Add the batch dimension so we can apply our lstm!\n \n # Honestly may be worth pre-process this\n #spectrogram = torch.from_numpy(spectrogram)\n #label = torch.from_numpy(label)\n\n return spectrogram, label, gt_call_path\n\n\n","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":29205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"213877969","text":"def back(k, sol, sol_max, spectacole):\n global maxim\n ok = False\n for x in spectacole:\n if k == 0 or x[0] >= sol[-1][1]:\n ok = True\n sol.append(x)\n back(k+1, sol, sol_max, spectacole)\n sol.pop()\n \n if ok == False: \n if (k > maxim):\n maxim = k\n sol_max.clear()\n sol_max.append(sol.copy())\n elif k == maxim:\n sol_max.append(sol.copy())\n \n \n\nf = open(\"/home/edi/Desktop/FMI/ProgAlgo/Laborator/Lab6/spectacole.txt\")\nspectacole = []\nfor line in f.read().splitlines():\n ora_inceput = line[:5]\n ora_sfarsit = line[6:11]\n nume_spectacol = line[12:]\n spectacole.append((ora_inceput, ora_sfarsit, nume_spectacol))\n\nsol = []\nsol_max = [None]\nmaxim = 0\nback(0, sol, sol_max, spectacole)\nfor x in sol_max:\n for y in x:\n print(y)\n print()","sub_path":"Sem_1/ProgAlgo/Laborator/Lab6/II_4.py","file_name":"II_4.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"479637293","text":"import tensorflow as tf\nimport math as m\n\nPI = tf.constant(m.pi)\n\na = tf.cos(PI/3.)\nb = tf.sin(PI/3.)\nc = 1.0/a # sec(60)\nd = 1.0/tf.tan(PI/3.) # cot(60)\n\n@tf.function\ndef math_values():\n print(\"a:\",a) \n print(\"b:\",b) \n print(\"c:\",c) \n print(\"d:\",d) \n\nmath_values()\n\n","sub_path":"books/Machine Learning/CompanionFiles/code/appendixb-tf2/tf2_trig_values.py","file_name":"tf2_trig_values.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"292047152","text":"import numpy as np\nimport cv2\nimport utils\n\n#webcam value when set to false, will use source image at path\nwebcam = False\n\n#Pass the path of source image\npath = '2.jpg'\n\nimgCapture = cv2.VideoCapture(0)\nimgCapture.set(10, 160)\nimgCapture.set(3, 1920)\nimgCapture.set(4, 1080)\n#define scale\nscale = 2\nwP = 210*scale\nhP = 297*scale\n\nwhile True:\n if webcam:\n success, img = imgCapture.read()\n else:\n img = cv2.imread(path)\n\n imgContours, Contours1 = utils.getContours(img, minArea=50000, filter=4)\n if len(Contours1) != 0:\n biggest = Contours1[0][2]\n # print(biggest)\n imgWarp = utils.warpImg(img, biggest, wP, hP)\n #cv2.imshow('A4', imgWarp)\n imgContours2, Contours2 = utils.getContours(imgWarp, minArea=2000, filter=4,\n cThresh=[50, 50], draw = False)\n if len(Contours1)!=0:\n for obj in Contours2:\n cv2.polylines(imgContours2, [obj[2]], True, (0,255,0), 2)\n newPoints = utils.reorder(obj[2])\n #Divide no of pixels by the scale value\n newWidth = round((utils.findDist(newPoints[0][0]//scale, newPoints[1][0]//scale)/10), 1)\n newHeight = round((utils.findDist(newPoints[0][0]//scale, newPoints[2][0]//scale)/10), 1)\n cv2.arrowedLine(imgContours2, (newPoints[0][0][0], newPoints[0][0][1]),\n (newPoints[1][0][0], newPoints[1][0][1]),\n (255, 0, 255), 3, 8, 0, 0.05)\n cv2.arrowedLine(imgContours2, (newPoints[0][0][0], newPoints[0][0][1]),\n (newPoints[2][0][0], newPoints[2][0][1]),\n (255, 0, 255), 3, 8, 0, 0.05)\n x, y, w, h = obj[3]\n cv2.putText(imgContours2, '{}cm'.format(newWidth), (x + 30, y - 10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,\n (255, 0, 255), 2)\n cv2.putText(imgContours2, '{}cm'.format(newHeight), (x - 70, y + h // 2), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,\n (255, 0, 255), 2)\n\n cv2.imshow('A4', imgContours2)\n\n img = cv2.resize(img, (0, 0), None, 0.5, 0.5)\n #Print original image\n cv2.imshow(\"Original\", img)\n cv2.waitKey(1)\n","sub_path":"objectMeasurement.py","file_name":"objectMeasurement.py","file_ext":"py","file_size_in_byte":2310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"634865567","text":"from setuptools import setup\n\nDESCRIPTION = 'Storage utilities for the Rad-I/O project.'\nLONG_DESCR = DESCRIPTION\n\nsetup(\n name='radio_storage',\n version='0.0.1',\n description=DESCRIPTION,\n long_description=LONG_DESCR,\n url='https://gitlab.com/',\n author='Rad-I/O',\n author_email='a96tudor@gmail.com',\n license='GPLv3',\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Intended Audience :: End Users/Desktop',\n 'Topic :: Games/Entertainment',\n 'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',\n 'Programming Language :: Python :: 3',\n ],\n keywords='Rad-I/O storage',\n packages=['storage', 'storage.drivers'],\n install_requires=[\n 'numpy', 'psycopg2', 'tensorflow', 'passlib',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"123501950","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nimport numpy as np\nfrom Func import write_data\n\n\ndata = np.zeros((512, 512))\ndata[143:399, 151] = 1\ndata[143:399, 406] = 1\ndata[143, 151:407] = 1\ndata[398, 151:407] = 1\nwrite_data('./extra/Frame.xlsx', data, sheetname='Frame')\n# 用于构造Frame并存在Excel表格中\n","sub_path":"Alpha/Frame_builder.py","file_name":"Frame_builder.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"472524993","text":"'''\nBST中的特定节点中序遍历后的下一个节点\n\n面试题 04.06. Successor LCCI\n'''\n\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\n# 方法1 用stack进行中序遍历\nclass Solution:\n def inorderSuccessor(self, root: TreeNode, p: TreeNode) -> TreeNode:\n if root == None:\n return None\n \n stack = []\n flag = False\n while stack or root:\n while root:\n stack.append(root)\n root = root.left\n \n curr = stack.pop()\n if flag:\n return curr\n \n if curr == p:\n flag = True\n \n if curr.right:\n root = curr.right\n return None\n\n# 用二分\nclass Solution:\n def inorderSuccessor(self, root: TreeNode, p: TreeNode) -> TreeNode:\n if root == None:\n return None \n \n # if root == p:\n # return self.inorderSuccessor(root.right, p)\n # elif root.val < p.val:\n\n res = None\n curr = root\n\n while curr:\n if curr.val <= p.val:\n curr = curr.right\n else:\n res = curr\n curr = curr.left\n return res\n","sub_path":"Python/Binary Search Tree/285. Inorder Successor in BST.py","file_name":"285. Inorder Successor in BST.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"414840528","text":"# fibonacci_functions.py\r\n# Jan 2, 2016\r\n\r\n# Loop\r\ndef fib1(n):\r\n a, b = 0, 1\r\n for _ in range(n):\r\n c = b + a\r\n a, b = b, c\r\n return a\r\n\r\n# Recursion\r\ndef fib2(n):\r\n if n in (0,1):\r\n return n\r\n else:\r\n return fib2(n-1) + fib2(n-2)\r\n\r\n# Generator\r\ndef fib3(n):\r\n a, b = 0, 1\r\n for _ in range(n):\r\n yield a\r\n c = b + a\r\n a, b = b, c\r\n\r\n# Print results\r\nprint(\"Fibonacci Loop:\")\r\nfor i in range(8):\r\n print(fib1(i), end=', ')\r\nprint('\\n')\r\n\r\nprint(\"Fibonacci Recursion:\")\r\nfor i in range(8):\r\n print(fib2(i), end=', '),\r\nprint('\\n')\r\n\r\nprint(\"Fibonacci Generator:\")\r\nfor i in fib3(8):\r\n print(i, end=', '),\r\nprint('\\n')\r\n","sub_path":"fibonacci_generator.py","file_name":"fibonacci_generator.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"542566468","text":"import re\r\n\r\n# we don't care about case sensitivity and therefore use lower:\r\nabcfile = open(\"abc.txt\").read().lower()\r\n\r\nwords = re.findall(r\"\\b[\\w-]+\\b\", abcfile)\r\nprint(\"Myfile contains to total: \" + str(len(words)))\r\n\r\nfor x in [\"the\", \"of\", \"on\", \"to\", \"this\"]:\r\n print(x + \"' occurs \" + str(words.count(x)) + \" times\" )\r\n\r\n","sub_path":"readingile.py","file_name":"readingile.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"644550667","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 26 21:03:04 2017\n\nConduct erxperiment on IEMOCAP, three labels: \n \n 96001: emotion(0-4, 5 = other emotions)\n 96002: speaker(0-9)\n 96003: gender(male=0, female=1)\n \n\n@author: Kyle\n\"\"\"\n\nimport os\nfrom sys import argv\n_, newFolderName, gpuI = argv\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = str(gpuI)\n\nimport sys\nsys.path.append(\"../../model/\")\nimport soundNet\nsys.path.append(\"../\")\nimport expUtil\nimport numpy as np\nimport tensorflow as tf\nfrom keras.utils import np_utils\nfrom tensorflow.python.platform import tf_logging as logging\nfrom keras import backend as K\nimport matplotlib.pyplot as plt\nfrom cleverhans.attacks import FastGradientMethod\nfrom cleverhans.utils_keras import KerasModelWrapper\nfrom keras.models import load_model\nfrom cleverhans.model import CallableModelWrapper\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import accuracy_score\nimport time\nimport shutil\n\n#%% creat folder to save model, the code, and model configuration \nwhile os.path.isdir( newFolderName ):\n newFolderName = newFolderName + '_1'\n print( 'exist' )\n\nos.mkdir( newFolderName )\nshutil.copy( 'emotionSoundNet.py', newFolderName )\nshutil.copy( '../../model/soundNet.py', newFolderName )\n\n#%% fix random seed and session\ntf.set_random_seed( 7 )\nsess = tf.Session( )\nK.set_session( sess )\n\n#%% load data, devide it into training/test set, and seperate out the laebls \n# normalize the feature to [0, 1]\n# for emotion tests, filter out value = 4 (other emotions)\n# folder list, i.e., IEMCOCAP has 5 sessions, speakers are independent between sessions, always use leave-one-session-out stragegy\nfolderList = [ 0, 1, 2, 3, 4 ]\ntestFolder = 4\n\ntrainFolderList = folderList.copy( )\ndel trainFolderList[ testFolder - 1 ]\n\nsampleRate = 16000\nprecision = 'original'\ndataFileFolder = '../../../processedData/waveform/' + str( sampleRate ) + '_' + precision + '/session_'\n\nfold = [ 0, 0, 0, 0, 0 ]\nfor i in folderList:\n fold[ i ] = eval( 'expUtil.iter_loadtxt( dataFileFolder + str(' + str( i + 1 ) + ') + \".csv\" )' )\n\n# seperate training and testing data\ntrainData = eval( 'np.concatenate( ( fold[ ' + str( trainFolderList[ 0 ] ) + \\\n ' ], fold[ ' + str( trainFolderList[ 1 ] ) + \\\n ' ], fold[ ' + str( trainFolderList[ 2 ] ) + \\\n ' ], fold[ ' + str( trainFolderList[ 3 ] ) + ' ] ), axis=0 )' )\ntestData = eval( 'fold[ ' + str( testFolder ) + ' ]' )\n\ntrainFeature, trainEmotionLabel = expUtil.processData( trainData, task = 'emotion' )\ntestFeature, testEmotionLabel = expUtil.processData( testData, task = 'emotion' )\n\n#%% define training parameters\nbatch_size = 32\nlearningRate = 0.0001\niterationNum = 100\n\n\"\"\"Trains the audio model.\n\n Args:\n feature: [ sample_size, audio_length ]\n label: one-hot style\n\"\"\"\n\ndef train( testFeature, testLabel, trainFeature, trainLabel, iteration_num = 100, lr_decay = 0.1 ):\n \n result = np.zeros( [ 2, iteration_num ] )\n class_num = testLabel.shape[ 1 ]\n train_datasize = trainFeature.shape[ 0 ]\n \n with tf.Session() as sess:\n \n # changable learning rate \n global_step = tf.Variable(0) \n learning_rate = tf.train.exponential_decay( learningRate, global_step, int( iteration_num *(train_datasize/batch_size) ), lr_decay, staircase=False) \n\n # fix random index for reproducing result \n tf.set_random_seed( 17 )\n input_x = tf.placeholder( tf.float32, shape = ( batch_size, 96000 ), name = 'inputx' )\n input_y = tf.placeholder( tf.float32, shape = ( batch_size, class_num ), name = 'inputy' )\n modelT = soundNet.soundNet\n \n # define a set of adversarials (adversarial stuffs)\n orderList = [ np.inf, 1, 2 ]\n advList = orderList.copy( )\n fgsmList = orderList.copy( )\n advOut = orderList.copy( )\n for epsIndex in range( 0, len( orderList ) ):\n fgsmList[ epsIndex ] = FastGradientMethod( modelT , sess = sess )\n fgsm_params = { 'eps': 1, 'y': input_y, 'ord': orderList[ epsIndex ] }\n advList[ epsIndex ] = fgsmList[ epsIndex ].generate( input_x, **fgsm_params )\n advOut[ epsIndex ] = tf.multiply( advList[ epsIndex ], 1, name = 'adv'+str( epsIndex ) )\n \n prediction = modelT( input_x, numClass = class_num )\n loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits( logits = prediction, labels= input_y ) )\n train_step = tf.train.AdamOptimizer( learning_rate ).minimize( loss, global_step = global_step )\n correct_prediction = tf.equal( tf.argmax( prediction, 1 ), tf.argmax( input_y, 1 ) )\n accuracy = tf.reduce_mean( tf.cast( correct_prediction, tf.float32 ), name=\"acc_restore\" )\n \n saver = tf.train.Saver()\n \n # initialize the data \n init_op = tf.global_variables_initializer( )\n sess.run( init_op )\n \n # number of iterations\n for iteration in range( 0, iteration_num ):\n # each batch\n for i in range( 0, 1 *int( train_datasize / batch_size ) ):\n \n start = ( i * batch_size ) % train_datasize\n end = min( start + batch_size, train_datasize )\n \n inputTrainFeature = trainFeature[ start: end ]\n inputTrainLabel = trainLabel[ start: end ]\n \n _, lossShow = sess.run( [ train_step, loss ], feed_dict = { input_x: inputTrainFeature, input_y: inputTrainLabel } )\n #print( 'loss = ' + str( lossShow ) )\n \n # get accuracy on a small subset of test data (just several epoch), a very fast approximation of the performance \n testBatchNum = 3\n testSubsetResult = [ None ] *( batch_size *testBatchNum )\n testSubsetLabel = [ None ] *( batch_size *testBatchNum )\n for testBatch in range( 0, testBatchNum ): # 3*32=96 test samples\n start = testBatch * batch_size \n end = start + batch_size\n inputTestFeature = testFeature[ start: end ]\n inputTestLabel = testLabel[ start: end ] \n tempTestResult, tempAccuracyTest = sess.run( [ prediction, accuracy ], feed_dict = { input_x: inputTestFeature, input_y: inputTestLabel } ) \n testSubsetLabel[ start :end ] = tf.argmax( inputTestLabel, 1 )\n testSubsetResult[ start :end ] = tf.argmax( tempTestResult, 1 ) \n #np.savetxt( newFolderName + '/testResult.csv', testResult, delimiter = ',' )\n #np.savetxt( newFolderName + '/testLabel.csv', inputTestLabel, delimiter = ',' )\n accuracyTest = accuracy_score( testSubsetLabel, testSubsetResult )\n print( confusion_matrix( testSubsetLabel, testSubsetResult ) )\n result[ 0, iteration ] = accuracyTest\n print( 'Epoch:' + str( iteration ) + ' result on test: ' + str( accuracyTest ) )\n \n # get accuracy on a small subset of training data (just one epoch), a very fast approximation of the training loss/ overfitting \n inputTestTrainFeature = trainFeature[ 0: batch_size, : ]\n inputTestTrainLabel = trainLabel[ 0: batch_size, : ]\n testTrainResult, accuracyTrain = sess.run( [ prediction, accuracy ], feed_dict = { input_x: inputTestTrainFeature, input_y: inputTestTrainLabel } ) \n print( 'Epoch:' + str( iteration ) + ' result on train: ' + str( accuracyTrain ) )\n #np.savetxt( newFolderName + '/testTrainResult.csv', testTrainResult, delimiter = ',' )\n #np.savetxt( newFolderName + '/testTrainLabel.csv', inputTestTrainLabel, delimiter = ',' )\n result[ 1, iteration ] = accuracyTrain\n print( '-----------------------------' )\n print( sess.run(global_step) ) \n print( sess.run(learning_rate) )\n # record the accuracy of both test/ training error approximation on the small subset\n np.savetxt( newFolderName + '/accuracy.csv', result, delimiter = ',' )\n \n # save model every 10 epoches\n if ( iteration + 1 )%10 == 0:\n save_path = saver.save( sess, newFolderName + '/model_' + str( iteration + 1 ) + '_.ckpt' )\n print(\"Model saved in file: %s\" % save_path)\n \n resultOnTest = result[ 0, : ]\n resultOnTrain = result[ 1, : ]\n plt.plot( list( range( iteration_num ) ), resultOnTrain )\n plt.plot( list( range( iteration_num ) ), resultOnTest )\n plt.savefig( newFolderName + '/accuracy.png' )\n \n #%% get adversarial samples\n# for epsIndex in range( 0, len( orderList ) ): \n# data_update = np.copy( feature[ train_datasize: whole_datasize, : ] )\n# # mini-batch generation on training data\n# for i in range( 0, int( ( whole_datasize - train_datasize ) / batch_size ) ):\n# start = i * batch_size\n# end = ( i + 1 ) * batch_size\n# data_update[ start:end, : ] = sess.run( advList[ epsIndex ], feed_dict = { input_x: feature[ train_datasize + start: train_datasize + end, : ], input_y: label[ train_datasize + start: train_datasize + end, : ] } ) \n# np.savetxt( newFolderName + '/adv'+str( orderList[ epsIndex ] ) + '.csv', data_update, delimiter = ',' )\n# return data_update\n#%% start test \ntrain( testFeature, testEmotionLabel, trainFeature, trainEmotionLabel )\n#np.savetxt( newFolderName + '/testSet.csv', dataSet[ train_datasize: whole_datasize, : ], delimiter = ',' )\n#np.savetxt( newFolderName + '/testFeature.csv', feature[ train_datasize: train_datasize + batch_size ], delimiter = ',' )\n#np.savetxt( newFolderName + '/testLabelGender.csv', genderLabel[ train_datasize: train_datasize + batch_size ], delimiter = ',' )\n\n\n","sub_path":"code/experiment/EmotionSoundNet/ex1_1_1_1_1_1_1_1_1/emotionSoundNet.py","file_name":"emotionSoundNet.py","file_ext":"py","file_size_in_byte":10069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"324500658","text":"\"\"\"\nCalculates class specific and over-all evaluation scores on the model\nScores: TP,FN,FP,precision,recall,f2score\n\"\"\"\n\n\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import absolute_import\nimport random\nimport pprint\nimport sys\nimport time\nimport numpy as np\nimport pickle\nimport math\nimport cv2\nimport copy\nimport matplotlib as mpl\nfrom matplotlib import pyplot as plt\nimport matplotlib.patches as patches\nimport tensorflow as tf\nimport pandas as pd\nimport os\nimport six\n\nfrom sklearn.metrics import average_precision_score\n\nfrom keras import backend as K\nfrom keras.optimizers import Adam, SGD, RMSprop\nfrom keras.layers import Flatten, Dense, Input, Conv2D, MaxPooling2D, Dropout\nfrom keras.layers import GlobalAveragePooling2D, GlobalMaxPooling2D, TimeDistributed\nfrom keras.engine.topology import get_source_inputs\nfrom keras.utils import layer_utils\nfrom keras.utils.data_utils import get_file\nfrom keras.objectives import categorical_crossentropy\n\nfrom keras.models import Model\nfrom keras.utils import generic_utils\nfrom keras.engine import Layer, InputSpec\nfrom keras import initializers, regularizers\nimport argparse\nimport datetime\nfrom definitions import *\n\n#TODO: bbox_threshold needs to be defined\n\n\"\"\"Predict Classes\"\"\"\n\ndef predict(C,model_rpn, model_classifier_only, class_mapping,test_base_path,bbox_threshold):\n\n classes = pd.DataFrame(columns=['image','pred_classes'])\n\n img_names = os.listdir(test_base_path)\n\n for img_name in img_names:\n #print('Get classes of {}/{}'.format(idx, len(test_imgs)))\n #img_name = img_path.split('/')[-1]\n #print(image_data['bboxes'])\n\n if not img_name.lower().endswith(('.bmp', '.jpeg', '.jpg', '.png', '.tif', '.tiff')):\n continue\n #print(img_path)\n st = time.time()\n\n '''Predict'''\n img_path = os.path.join(test_base_path, img_name)\n img = cv2.imread(img_path)\n\n X, ratio = format_img(img,C) # X: normiertes Bild (kurze Seite 300 pixel), ratio = 300/Originallänge kurze Seite\n\n X = np.transpose(X, (0, 2, 3, 1))\n\n # get output layer Y1, Y2 from the RPN and the feature maps F\n # Y1: y_rpn_cls\n # Y2: y_rpn_regr\n [Y1, Y2, F] = model_rpn.predict(X)\n\n # Get bboxes by applying NMS\n # R.shape = (300, 4)\n R = rpn_to_roi(Y1, Y2, C, K.image_dim_ordering(), overlap_thresh=0.7)\n\n # convert from (x1,y1,x2,y2) to (x,y,w,h)\n R[:, 2] -= R[:, 0]\n R[:, 3] -= R[:, 1]\n\n # apply the spatial pyramid pooling to the proposed regions\n bboxes = {}\n probs = {}\n\n for jk in range(R.shape[0] // C.num_rois + 1):\n ROIs = np.expand_dims(R[C.num_rois * jk:C.num_rois * (jk + 1), :], axis=0) # num_rois Boxen auswählen\n if ROIs.shape[1] == 0: # wennn ROIs leer, fertig\n break\n\n if jk == R.shape[0] // C.num_rois: # wenn ROIs nicht ganz aufgefüllt, auffüllen\n # pad R\n curr_shape = ROIs.shape\n target_shape = (curr_shape[0], C.num_rois, curr_shape[2])\n ROIs_padded = np.zeros(target_shape).astype(ROIs.dtype)\n ROIs_padded[:, :curr_shape[1], :] = ROIs\n ROIs_padded[0, curr_shape[1]:, :] = ROIs[0, 0, :]\n ROIs = ROIs_padded\n\n [P_cls, P_regr] = model_classifier_only.predict([F, ROIs])\n\n # Calculate bboxes coordinates on resized image\n for ii in range(P_cls.shape[1]):\n\n cls_name = class_mapping[np.argmax(P_cls[0, ii, :])]\n\n # Ignore 'bg' class\n if np.max(P_cls[0, ii, :]) < bbox_threshold[cls_name] or np.argmax(P_cls[0, ii, :]) == (P_cls.shape[2] - 1):\n continue\n\n if cls_name not in bboxes:\n bboxes[cls_name] = []\n probs[cls_name] = []\n\n (x, y, w, h) = ROIs[0, ii, :]\n\n cls_num = np.argmax(P_cls[0, ii, :])\n try:\n (tx, ty, tw, th) = P_regr[0, ii, 4 * cls_num:4 * (cls_num + 1)]\n tx /= C.classifier_regr_std[0]\n ty /= C.classifier_regr_std[1]\n tw /= C.classifier_regr_std[2]\n th /= C.classifier_regr_std[3]\n x, y, w, h = apply_regr(x, y, w, h, tx, ty, tw, th)\n except:\n pass\n bboxes[cls_name].append(\n [C.rpn_stride * x, C.rpn_stride * y, C.rpn_stride * (x + w), C.rpn_stride * (y + h)])\n probs[cls_name].append(np.max(P_cls[0, ii, :]))\n\n\n pred_class_list = [] #contains all predicted class instances\n\n for key in bboxes:\n bbox = np.array(bboxes[key])\n\n new_boxes, new_probs = non_max_suppression_fast(bbox, np.array(probs[key]), overlap_thresh=0.2)\n\n for jk in range(new_boxes.shape[0]):\n pred_class_list.append(key)\n\n\n new_row = {'image': img_name, 'pred_classes': pred_class_list}\n classes = classes.append(new_row, ignore_index=True)\n\n return classes\n\n\n\n\n'''---------------------- main----------------------------'''\n\nif __name__ == '__main__':\n \"\"\"General settings\"\"\"\n parser = argparse.ArgumentParser(description='The following parameters can be assigned:')\n parser.add_argument('--session_name', required=True, type=str)\n parser.add_argument('--base_path', required=True, type=str)\n parser.add_argument('--test_base_path', required=True, type=str)\n parser.add_argument('--out_path', required=True, type=str)\n parser.add_argument('--threshold_path', required=False,default=None, type=str)\n args = parser.parse_args()\n\n base_path = args.base_path # path config and models are stored in\n test_base_path = args.test_base_path # directory containing the pictures that are to predict\n threshold_path = args.threshold_path # path to the thresholds (minimum probability for a class to be output)\n output_path = os.path.join(base_path, 'sessions', args.session_name)\n predict_store_path = os.path.join(args.out_path, \"Prediction on {}\".format(\n datetime.datetime.now().strftime(\"%A, %d %b %Y,%H %M\"))) # path to save output figures in\n classes_path = os.path.join(predict_store_path, 'predicted_classes.csv')\n\n\n print('This is a Prediction Session of ->{}<-.'.format(args.session_name))\n print('Base Path: {}'.format(base_path))\n print('Output: {}'.format(predict_store_path))\n\n\n\n '''Prepare Model'''\n \"\"\"Define Config\"\"\"\n config_output_filename = os.path.join(output_path, 'model', 'model_vgg_config.pickle')\n assert (os.path.exists(\n config_output_filename)), \"Config File {} missing, Check if training has been performed with given session name\".format(\n config_output_filename)\n os.makedirs(predict_store_path)\n\n with open(config_output_filename, 'rb') as f_in:\n C = pickle.load(f_in)\n\n # turn off any data augmentation at test time\n C.use_horizontal_flips = False\n C.use_vertical_flips = False\n C.rot_90 = False\n\n #Load thresholds\n threshold_df = pd.read_csv(threshold_path)\n print(threshold_df)\n threshold=threshold_df.to_dict('index')[0]\n\n print('Using Thresholds of file {}'.format(threshold_path))\n print('Thresholds{}'.format(threshold))\n\n\n # Load the records\n record_df = pd.read_csv(C.record_path)\n\n r_epochs = len(record_df)\n\n num_features = 512\n\n input_shape_img = (None, None, 3)\n input_shape_features = (None, None, num_features)\n\n img_input = Input(shape=input_shape_img)\n roi_input = Input(shape=(C.num_rois, 4))\n feature_map_input = Input(shape=input_shape_features)\n\n # define the base network (VGG here, can be Resnet50, Inception, etc)\n shared_layers = nn_base(img_input, trainable=True)\n\n # define the RPN, built on the base layers\n num_anchors = len(C.anchor_box_scales) * len(C.anchor_box_ratios)\n rpn_layers = rpn_layer(shared_layers, num_anchors)\n\n classifier = classifier_layer(feature_map_input, roi_input, C.num_rois, nb_classes=len(C.class_mapping))\n\n model_rpn = Model(img_input, rpn_layers)\n model_classifier_only = Model([feature_map_input, roi_input], classifier)\n\n model_classifier = Model([feature_map_input, roi_input], classifier)\n\n print('Loading weights from {}'.format(C.model_path))\n model_rpn.load_weights(C.model_path, by_name=True)\n model_classifier.load_weights(C.model_path, by_name=True)\n\n model_rpn.compile(optimizer='sgd', loss='mse')\n model_classifier.compile(optimizer='sgd', loss='mse')\n\n # Switch key value for class mapping\n class_mapping = C.class_mapping\n class_mapping = {v: k for k, v in class_mapping.items()}\n print(class_mapping)\n class_to_color = {class_mapping[v]: np.random.randint(0, 255, 3) for v in class_mapping}\n\n\n '''------------Predict-----------------'''\n\n classes = predict(C,model_rpn, model_classifier_only,class_mapping, test_base_path, threshold)\n classes.to_csv(classes_path,sep=';', index=0)\n\n\n","sub_path":"frcnn_predict.py","file_name":"frcnn_predict.py","file_ext":"py","file_size_in_byte":9096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"239669722","text":"\"\"\"\n优化的目标函数,返回多个目标函数值 \n\"\"\"\nimport numpy as np \n\ndef function(X):\n y1 = 1 - np.exp(-np.sum((X-1/np.sqrt(3))**2)) \n y2 = 1 - np.exp(-np.sum((X+1/np.sqrt(3))**2)) \n return y1, y2 \n\nif __name__ == \"__main__\":\n tX = np.array([-0.57735, -0.57735, -0.57735]) \n print(function(tX))","sub_path":"NSGA2算法python实现/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"180632147","text":"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nfrom pyarrow.compat import unittest\nimport pyarrow as arrow\n\nA = arrow\n\nimport pandas as pd\n\n\nclass TestColumn(unittest.TestCase):\n\n def test_basics(self):\n data = [\n A.from_pylist([-10, -5, 0, 5, 10])\n ]\n table = A.Table.from_arrays(('a'), data, 'table_name')\n column = table.column(0)\n assert column.name == 'a'\n assert column.length() == 5\n assert len(column) == 5\n assert column.shape == (5,)\n assert column.to_pylist() == [-10, -5, 0, 5, 10]\n\n def test_pandas(self):\n data = [\n A.from_pylist([-10, -5, 0, 5, 10])\n ]\n table = A.Table.from_arrays(('a'), data, 'table_name')\n column = table.column(0)\n series = column.to_pandas()\n assert series.name == 'a'\n assert series.shape == (5,)\n assert series.iloc[0] == -10\n\n","sub_path":"python/pyarrow/tests/test_column.py","file_name":"test_column.py","file_ext":"py","file_size_in_byte":1652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"36163201","text":"# System libs\nimport os\nimport time\n# import math\nimport random\nimport argparse\n# Numerical libs\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom scipy.io import loadmat\nfrom scipy.misc import imresize, imsave\n# Our libs\nfrom dataset import GTA, CityScapes, BDD\nfrom models import ModelBuilder\nfrom utils import AverageMeter, colorEncode, accuracy, randomSampler, similiarityPenalty, make_variable, \\\n intersectionAndUnion\nimport hardmining\n\nimport matplotlib\n\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom tqdm import tqdm\n\n# todo: change to gta 2 bdd\n\n\ntrainID2Class = {\n 0: 'road',\n 1: 'sidewalk',\n 2: 'building',\n 3: 'wall',\n 4: 'fence',\n 5: 'pole',\n 6: 'traffic light',\n 7: 'traffic sign',\n 8: 'vegetation',\n 9: 'terrain',\n 10: 'sky',\n 11: 'person',\n 12: 'rider',\n 13: 'car',\n 14: 'truck',\n 15: 'bus',\n 16: 'train',\n 17: 'motorcycle',\n 18: 'bicycle'\n}\n\n\ndef forward_with_loss(nets, batch_data, args, is_train=True, is_adapt=False, epoch=0):\n (net_encoder, net_decoder_1, net_decoder_2, net_syn, crit) = nets\n (imgs, segs, infos) = batch_data\n\n # feed input data\n input_img = Variable(imgs, volatile=not is_train)\n label_seg = Variable(segs, volatile=not is_train)\n input_img = input_img.cuda()\n label_seg = label_seg.cuda()\n\n # forward\n pred_featuremap_1 = net_decoder_1(net_encoder(input_img))\n pred_featuremap_2 = net_decoder_2(net_encoder(input_img))\n pred_featuremap_syn = net_syn(pred_featuremap_1, pred_featuremap_2)\n\n weights1 = net_decoder_1.module.get_weights()\n weights2 = net_decoder_2.module.get_weights()\n\n if is_adapt:\n if args.source_only:\n # do nothing\n err_1 = 0\n err_2 = 0\n err_syn = 0\n else:\n if not args.easy_mining:\n _, pred_1 = torch.max(pred_featuremap_1, 1)\n _, pred_2 = torch.max(pred_featuremap_2, 1)\n _, pred_syn = torch.max(pred_featuremap_syn, 1)\n\n\n err_1 = crit(pred_featuremap_1, pred_syn)\n err_2 = crit(pred_featuremap_2, pred_syn)\n err_syn = 0\n else:\n\n _, pred_1 = torch.max(pred_featuremap_1, 1)\n _, pred_2 = torch.max(pred_featuremap_2, 1)\n _, pred_syn = torch.max(pred_featuremap_syn, 1)\n\n # reshape the feature map as class_num * (batch_size * h * w)\n pred_1 = pred_1.view(1, -1)\n pred_2 = pred_2.view(1, -1)\n pred_syn = pred_syn.view(1, -1)\n\n adapt_idx = (torch.eq(pred_1, pred_2)).squeeze()\n\n # all the rest are ignored indexes\n ignored_idx = (adapt_idx == 0).nonzero().squeeze()\n\n\n\n if len(ignored_idx.size()) > 0:\n pred_syn[..., ignored_idx] = -1\n\n # reshape back to use NLLLoss2d\n pred_syn = pred_syn.view(pred_featuremap_syn.size(0), pred_featuremap_syn.size(2), pred_featuremap_syn.size(3))\n\n if len(adapt_idx.size()) > 0:\n err_1 = crit(pred_featuremap_1, pred_syn)\n err_2 = crit(pred_featuremap_2, pred_syn)\n # err_syn = crit(pred_featuremap_syn, pred_syn)\n err_syn = 0\n else:\n err_1 = 0\n err_2 = 0\n err_syn = 0\n else:\n _, pred_1 = torch.max(pred_featuremap_1, 1)\n _, pred_2 = torch.max(pred_featuremap_2, 1)\n _, pred_syn = torch.max(pred_featuremap_syn, 1)\n\n # reshape the feature map as class_num * (batch_size * h * w)\n pred_1 = pred_1.view(1, -1)\n pred_2 = pred_2.view(1, -1)\n pred_syn = pred_syn.view(1, -1)\n\n label_seg = label_seg.view(1, -1)\n\n adapt_idx = (torch.eq(pred_1, pred_2)).squeeze()\n agreed_idx = (adapt_idx == 1).nonzero().squeeze()\n\n if not args.source_only and args.hard_prob_modifier_handle is not None:\n hard_prob = args.hard_prob_modifier_handle(epoch, args.hard_filtering_final_epoch)\n\n drop_num = int(agreed_idx.size(0) * hard_prob)\n\n if drop_num > 0:\n # randomly shuffle agreed_idx\n shuffle_idx = torch.randperm(agreed_idx.size(0))\n shuffle_idx = Variable(shuffle_idx.cuda(agreed_idx.get_device()))\n agreed_idx = agreed_idx[shuffle_idx]\n agreed_drop_idx = agreed_idx[0:drop_num]\n label_seg[..., agreed_drop_idx] = -1\n\n label_seg = label_seg.view(pred_featuremap_syn.size(0), pred_featuremap_syn.size(2), pred_featuremap_syn.size(3))\n err_1 = crit(pred_featuremap_1, label_seg)\n err_2 = crit(pred_featuremap_2, label_seg)\n err_syn = crit(pred_featuremap_syn, label_seg)\n\n err_sim = similiarityPenalty(weights1.squeeze(), weights2.squeeze())\n\n err = err_1 + err_2 + args.alpha * err_sim + args.beta * err_syn\n\n return pred_featuremap_syn, err\n\n\ndef visualize(batch_data, pred, args):\n colors = loadmat('../colormap.mat')['colors']\n (imgs, segs, infos) = batch_data\n for j in range(len(infos)):\n # get/recover image\n # img = imread(os.path.join(args.root_img, infos[j]))\n img = imgs[j].clone()\n for t, m, s in zip(img,\n [0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225]):\n t.mul_(s).add_(m)\n img = (img.numpy().transpose((1, 2, 0)) * 255).astype(np.uint8)\n\n # segmentation\n lab = segs[j].numpy()\n lab_color = colorEncode(lab, colors)\n\n # prediction\n pred_ = np.argmax(pred.data.cpu()[j].numpy(), axis=0)\n pred_color = colorEncode(pred_, colors)\n\n # aggregate images and save\n im_vis = np.concatenate((img, lab_color, pred_color),\n axis=1).astype(np.uint8)\n imsave(os.path.join(args.vis,\n infos[j].replace('/', '_')), im_vis)\n\n\n# train one epoch\ndef train(nets, loader, loader_adapt, optimizers, history, epoch, args):\n batch_time = AverageMeter()\n data_time = AverageMeter()\n\n # switch to train mode\n for net in nets:\n if not args.fix_bn:\n net.train()\n else:\n net.eval()\n\n # main loop\n tic = time.time()\n # for i, batch_data in enumerate(loader):\n for i in range(args.epoch_iters):\n batch_data, is_adapt = randomSampler(args.ratio_source_init, args.ratio_source_final, \\\n args.ratio_source_final_epoch, epoch, loader, loader_adapt)\n\n data_time.update(time.time() - tic)\n for net in nets:\n net.zero_grad()\n\n # forward pass\n pred, err = forward_with_loss(nets, batch_data, args, is_train=True, is_adapt=is_adapt, epoch=epoch)\n\n # Backward\n err.backward()\n\n for net in nets:\n nn.utils.clip_grad_norm(net.parameters(), 1)\n # for param in net.parameters():\n # print(param.grad.data.shape, param.grad.data.sum())\n\n for optimizer in optimizers:\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - tic)\n tic = time.time()\n\n # calculate accuracy, and display\n if i % args.disp_iter == 0:\n acc, _ = accuracy(batch_data, pred)\n\n print('Epoch: [{}][{}/{}], Time: {:.2f}, Data: {:.2f}, '\n 'lr_encoder: {}, lr_decoder: {}, '\n 'Accuracy: {:4.2f}%, Loss: {}'\n .format(epoch, i, args.epoch_iters,\n batch_time.average(), data_time.average(),\n args.lr_encoder, args.lr_decoder,\n acc * 100, err.data[0]))\n\n fractional_epoch = epoch - 1 + 1. * i / args.epoch_iters\n history['train']['epoch'].append(fractional_epoch)\n history['train']['err'].append(err.data[0])\n history['train']['acc'].append(acc)\n\n\ndef evaluate(nets, loader, history, epoch, args):\n print('Evaluating at {} epochs...'.format(epoch))\n loss_meter = AverageMeter()\n acc_meter = AverageMeter()\n intersection_meter = AverageMeter()\n union_meter = AverageMeter()\n\n # switch to eval mode\n for net in nets:\n net.eval()\n\n for i, batch_data in enumerate(loader):\n # forward pass\n torch.cuda.empty_cache()\n pred, err = forward_with_loss(nets, batch_data, args, is_train=False)\n loss_meter.update(err.data[0])\n print('[Eval] iter {}, loss: {}'.format(i, err.data[0]))\n\n # calculate accuracy\n acc, pix = accuracy(batch_data, pred)\n acc_meter.update(acc, pix)\n\n intersection, union = intersectionAndUnion(batch_data, pred,\n args.num_class)\n intersection_meter.update(intersection)\n union_meter.update(union)\n\n # visualization\n visualize(batch_data, pred, args)\n\n iou = intersection_meter.sum / (union_meter.sum + 1e-10)\n for i, _iou in enumerate(iou):\n print('class [{}], IoU: {}'.format(trainID2Class[i], _iou))\n\n print('[Eval Summary]:')\n print('Epoch: {}, Loss: {}, Mean IoU: {:.4}, Accurarcy: {:.2f}%'\n .format(epoch, loss_meter.average(), iou.mean(), acc_meter.average() * 100))\n\n history['val']['epoch'].append(epoch)\n history['val']['err'].append(loss_meter.average())\n history['val']['acc'].append(acc_meter.average())\n history['val']['mIoU'].append(iou.mean())\n\n # Plot figure\n if epoch > 0:\n print('Plotting loss figure...')\n fig = plt.figure()\n plt.plot(np.asarray(history['train']['epoch']),\n np.log(np.asarray(history['train']['err'])),\n color='b', label='training')\n plt.plot(np.asarray(history['val']['epoch']),\n np.log(np.asarray(history['val']['err'])),\n color='c', label='validation')\n plt.legend()\n plt.xlabel('Epoch')\n plt.ylabel('Log(loss)')\n fig.savefig('{}/loss.png'.format(args.ckpt), dpi=200)\n plt.close('all')\n\n fig = plt.figure()\n plt.plot(history['train']['epoch'], history['train']['acc'],\n color='b', label='training')\n plt.plot(history['val']['epoch'], history['val']['acc'],\n color='c', label='validation')\n plt.legend()\n plt.xlabel('Epoch')\n plt.ylabel('Accuracy')\n fig.savefig('{}/accuracy.png'.format(args.ckpt), dpi=200)\n plt.close('all')\n\n\ndef checkpoint(nets, history, args):\n print('Saving checkpoints...')\n (net_encoder, net_decoder_1, net_decoder_2, net_syn, crit) = nets\n suffix_latest = 'latest.pth'\n suffix_best_acc = 'best_acc.pth'\n suffix_best_mIoU = 'best_mIoU.pth'\n suffix_best_err = 'best_err.pth'\n\n if args.num_gpus > 1:\n dict_encoder = net_encoder.module.state_dict()\n dict_decoder_1 = net_decoder_1.module.state_dict()\n dict_decoder_2 = net_decoder_2.module.state_dict()\n dict_syn = net_syn.module.state_dict()\n else:\n dict_encoder = net_encoder.state_dict()\n dict_decoder_1 = net_decoder_1.state_dict()\n dict_decoder_2 = net_decoder_2.state_dict()\n dict_syn = net_syn.state_dict()\n\n torch.save(history,\n '{}/history_{}'.format(args.ckpt, suffix_latest))\n torch.save(dict_encoder,\n '{}/encoder_{}'.format(args.ckpt, suffix_latest))\n torch.save(dict_decoder_1,\n '{}/decoder_1_{}'.format(args.ckpt, suffix_latest))\n torch.save(dict_decoder_2,\n '{}/decoder_2_{}'.format(args.ckpt, suffix_latest))\n torch.save(dict_syn,\n '{}/syn_{}'.format(args.ckpt, suffix_latest))\n\n cur_err = history['val']['err'][-1]\n cur_acc = history['val']['acc'][-1]\n cur_mIoU = history['val']['mIoU'][-1]\n # if cur_err < args.best_err:\n if cur_acc > args.best_acc:\n # save best accuracy instead\n # args.best_err = cur_err\n args.best_acc = cur_acc\n torch.save(history,\n '{}/history_{}'.format(args.ckpt, suffix_best_acc))\n torch.save(dict_encoder,\n '{}/encoder_{}'.format(args.ckpt, suffix_best_acc))\n torch.save(dict_decoder_1,\n '{}/decoder_1_{}'.format(args.ckpt, suffix_best_acc))\n torch.save(dict_decoder_2,\n '{}/decoder_2_{}'.format(args.ckpt, suffix_best_acc))\n torch.save(dict_syn,\n '{}/syn_{}'.format(args.ckpt, suffix_best_acc))\n\n if cur_mIoU > args.best_mIoU:\n # save best accuracy instead\n # args.best_err = cur_err\n args.best_mIoU = cur_mIoU\n torch.save(history,\n '{}/history_{}'.format(args.ckpt, suffix_best_mIoU))\n torch.save(dict_encoder,\n '{}/encoder_{}'.format(args.ckpt, suffix_best_mIoU))\n torch.save(dict_decoder_1,\n '{}/decoder_1_{}'.format(args.ckpt, suffix_best_mIoU))\n torch.save(dict_decoder_2,\n '{}/decoder_2_{}'.format(args.ckpt, suffix_best_mIoU))\n torch.save(dict_syn,\n '{}/syn_{}'.format(args.ckpt, suffix_best_mIoU))\n\n if cur_err < args.best_err:\n args.best_err = cur_err\n torch.save(history,\n '{}/history_{}'.format(args.ckpt, suffix_best_err))\n torch.save(dict_encoder,\n '{}/encoder_{}'.format(args.ckpt, suffix_best_err))\n torch.save(dict_decoder_1,\n '{}/decoder_1_{}'.format(args.ckpt, suffix_best_err))\n torch.save(dict_decoder_2,\n '{}/decoder_2_{}'.format(args.ckpt, suffix_best_err))\n torch.save(dict_syn,\n '{}/syn_{}'.format(args.ckpt, suffix_best_err))\n\n\ndef create_optimizers(nets, args):\n (net_encoder, net_decoder_1, net_decoder_2, net_syn, crit) = nets\n optimizer_encoder = torch.optim.SGD(\n net_encoder.parameters(),\n lr=args.lr_encoder,\n momentum=args.beta1,\n weight_decay=args.weight_decay)\n optimizer_decoder_1 = torch.optim.SGD(\n net_decoder_1.parameters(),\n lr=args.lr_decoder,\n momentum=args.beta1,\n weight_decay=args.weight_decay)\n optimizer_decoder_2 = torch.optim.SGD(\n net_decoder_2.parameters(),\n lr=args.lr_decoder,\n momentum=args.beta1,\n weight_decay=args.weight_decay)\n optimizer_syn = torch.optim.SGD(\n net_syn.parameters(),\n lr=args.lr_decoder,\n momentum=args.beta1,\n weight_decay=args.weight_decay)\n return (optimizer_encoder, optimizer_decoder_1, optimizer_decoder_2, optimizer_syn)\n\n\ndef adjust_learning_rate(optimizers, epoch, args):\n drop_ratio = (1. * (args.num_epoch - epoch) / (args.num_epoch - epoch + 1)) \\\n ** args.lr_pow\n args.lr_encoder *= drop_ratio\n args.lr_decoder *= drop_ratio\n (optimizer_encoder, optimizer_decoder_1, optimizer_decoder_2, optimizer_syn) = optimizers\n for param_group in optimizer_encoder.param_groups:\n param_group['lr'] = args.lr_encoder\n for param_group in optimizer_decoder_1.param_groups:\n param_group['lr'] = args.lr_decoder\n for param_group in optimizer_decoder_2.param_groups:\n param_group['lr'] = args.lr_decoder\n for param_group in optimizer_syn.param_groups:\n param_group['lr'] = args.lr_decoder\n\n\ndef main(args):\n # Network Builders\n builder = ModelBuilder()\n net_encoder = builder.build_encoder(arch=args.arch_encoder,\n fc_dim=args.fc_dim,\n weights=args.weights_encoder)\n net_decoder_1 = builder.build_decoder(arch=args.arch_decoder,\n fc_dim=args.fc_dim,\n num_class=args.num_class,\n weights=args.weights_decoder)\n net_decoder_2 = builder.build_decoder(arch=args.arch_decoder,\n fc_dim=args.fc_dim,\n num_class=args.num_class,\n weights=args.weights_decoder)\n net_syn = builder.build_syn()\n\n if args.weighted_class:\n crit = nn.NLLLoss2d(ignore_index=-1, weight=args.class_weight)\n else:\n crit = nn.NLLLoss2d(ignore_index=-1)\n\n # Dataset and Loader\n dataset_train = CityScapes('train', root=args.root_labeled, cropSize=args.imgSize, is_train=1)\n dataset_adapt = BDD('train', root=args.root_unlabeled, cropSize=args.imgSize, is_train=1)\n dataset_val = BDD('val', root=args.root_unlabeled, cropSize=args.imgSize, max_sample=args.num_val,\n is_train=0)\n loader_train = torch.utils.data.DataLoader(\n dataset_train,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=int(args.workers),\n drop_last=True)\n loader_adapt = torch.utils.data.DataLoader(\n dataset_adapt,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=int(args.workers),\n drop_last=True)\n loader_val = torch.utils.data.DataLoader(\n dataset_val,\n batch_size=args.batch_size_eval,\n shuffle=False,\n num_workers=int(args.workers),\n drop_last=True)\n args.epoch_iters = int(len(dataset_train) / args.batch_size)\n print('1 Epoch = {} iters'.format(args.epoch_iters))\n\n # load nets into gpu\n if args.num_gpus > 1:\n net_encoder = nn.DataParallel(net_encoder,\n device_ids=range(args.num_gpus))\n net_decoder_1 = nn.DataParallel(net_decoder_1,\n device_ids=range(args.num_gpus))\n net_decoder_2 = nn.DataParallel(net_decoder_2,\n device_ids=range(args.num_gpus))\n net_syn = nn.DataParallel(net_syn,\n device_ids=range(args.num_gpus))\n\n nets = (net_encoder, net_decoder_1, net_decoder_2, net_syn, crit)\n for net in nets:\n net.cuda()\n\n # Set up optimizers\n optimizers = create_optimizers(nets, args)\n\n # Main loop\n history = {split: {'epoch': [], 'err': [], 'acc': [], 'mIoU': []}\n for split in ('train', 'val')}\n\n # optional initial eval\n # evaluate(nets, loader_val, history, 0, args)\n for epoch in range(1, args.num_epoch + 1):\n train(nets, loader_train, loader_adapt, optimizers, history, epoch, args)\n\n # Evaluation and visualization\n if epoch % args.eval_epoch == 0:\n evaluate(nets, loader_val, history, epoch, args)\n\n # checkpointing\n checkpoint(nets, history, args)\n\n # adjust learning rate\n adjust_learning_rate(optimizers, epoch, args)\n\n print('Training Done!')\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n # Model related arguments\n parser.add_argument('--id', default='adapt',\n help=\"a name for identifying the model\")\n parser.add_argument('--arch_encoder', default='resnet34_dilated8',\n help=\"architecture of net_encoder\")\n parser.add_argument('--arch_decoder', default='psp_bilinear',\n help=\"architecture of net_decoder\")\n parser.add_argument('--weights_encoder',\n default='/home/selfdriving/kchitta/Domain-Adapatation/segmentation/pretrained/encoder_best.pth',\n help=\"weights to finetune net_encoder\")\n parser.add_argument('--weights_decoder',\n default='/home/selfdriving/kchitta/Domain-Adapatation/segmentation/pretrained/decoder_best.pth',\n help=\"weights to finetune net_decoder\")\n parser.add_argument('--fc_dim', default=512, type=int,\n help='number of features between encoder and decoder')\n\n # Path related arguments\n parser.add_argument('--root_unlabeled',\n default='/home/selfdriving/datasets/bdd100k')\n parser.add_argument('--root_labeled',\n default='/home/selfdriving/datasets/cityscapes_full')\n\n # optimization related arguments\n parser.add_argument('--num_gpus', default=3, type=int,\n help='number of gpus to use')\n parser.add_argument('--batch_size_per_gpu', default=2, type=int,\n help='input batch size')\n parser.add_argument('--batch_size_per_gpu_eval', default=1, type=int,\n help='eval batch size')\n parser.add_argument('--num_epoch', default=20, type=int,\n help='epochs to train for')\n parser.add_argument('--ratio_source_init', default=0.9, type=float,\n help='initial sampling ratio for source domain')\n parser.add_argument('--ratio_source_final', default=0.1, type=float,\n help='final sampling ratio for source domain')\n parser.add_argument('--ratio_source_final_epoch', default=10, type=int,\n help='epoch beyond which to maintain final ratio')\n\n parser.add_argument('--optim', default='SGD', help='optimizer')\n parser.add_argument('--lr_encoder', default=1e-3, type=float, help='LR')\n parser.add_argument('--lr_decoder', default=1e-2, type=float, help='LR')\n parser.add_argument('--lr_pow', default=0.9, type=float,\n help='power in poly to drop LR')\n parser.add_argument('--alpha', default=0.01, type=float,\n help='weight of similarity loss')\n parser.add_argument('--beta', default=1, type=float,\n help='weight of synthetic loss')\n parser.add_argument('--beta1', default=0.9, type=float,\n help='momentum for sgd, beta1 for adam')\n parser.add_argument('--weight_decay', default=1e-4, type=float,\n help='weights regularizer')\n parser.add_argument('--fix_bn', default=0, type=int,\n help='fix bn params')\n\n # Data related arguments\n parser.add_argument('--num_val', default=-1, type=int,\n help='number of images to evaluate')\n parser.add_argument('--num_class', default=19, type=int,\n help='number of classes')\n parser.add_argument('--workers', default=1, type=int,\n help='number of data loading workers')\n parser.add_argument('--imgSize', default=600, type=int,\n help='input image size')\n parser.add_argument('--segSize', default=600, type=int,\n help='output image size')\n\n # Misc arguments\n parser.add_argument('--seed', default=1337, type=int, help='manual seed')\n parser.add_argument('--ckpt', default='./city2bdd_ckpt',\n help='folder to output checkpoints')\n parser.add_argument('--vis', default='./vis',\n help='folder to output visualization during training')\n parser.add_argument('--disp_iter', type=int, default=20,\n help='frequency to display')\n parser.add_argument('--eval_epoch', type=int, default=1,\n help='frequency to evaluate')\n\n # Mode select\n parser.add_argument('--source_only', default=False, type=bool, help='set True to do source only training')\n parser.add_argument('--easy_mining', default=True, type=bool, help='set True to do easy mining')\n parser.add_argument('--hard_mining', default=True, type=bool, help='set True to do hard mining')\n parser.add_argument('--weighted_class', default=True, type=bool, help='set True to use weighted class')\n\n args = parser.parse_args()\n print(\"Input arguments:\")\n for key, val in vars(args).items():\n print(\"{:16} {}\".format(key, val))\n\n args.batch_size = args.num_gpus * args.batch_size_per_gpu\n args.batch_size_eval = args.batch_size_per_gpu_eval\n\n # Specify certain arguments\n if not args.source_only:\n if args.hard_mining:\n args.hard_filtering_init = 0\n args.hard_filtering_final = 1\n args.hard_filtering_final_epoch = 10\n # assign actual function handle\n args.hard_prob_modifier_handle = hardmining.linearModifier\n else:\n args.hard_prob_modifier_handle = None\n\n\n if args.weighted_class:\n args.enhanced_weight = 2.0\n args.class_weight = np.ones([19], dtype=np.float32)\n enhance_class = [1, 3, 4, 5, 6, 7, 12]\n args.class_weight[enhance_class] = args.enhanced_weight\n args.class_weight = torch.from_numpy(args.class_weight.astype(np.float32))\n\n\n\n\n args.id += '-' + str(args.arch_encoder)\n args.id += '-' + str(args.arch_decoder)\n args.id += '-ngpus' + str(args.num_gpus)\n args.id += '-batchSize' + str(args.batch_size)\n args.id += '-imgSize' + str(args.imgSize)\n args.id += '-lr_encoder' + str(args.lr_encoder)\n args.id += '-lr_decoder' + str(args.lr_decoder)\n args.id += '-epoch' + str(args.num_epoch)\n args.id += '-ratio' + str(args.ratio_source_init) + '-' + str(args.ratio_source_final) + '-' + str(\n args.ratio_source_final_epoch)\n args.id += '-alpha' + str(args.alpha)\n args.id += '-beta' + str(args.beta)\n args.id += '-decay' + str(args.weight_decay)\n if args.weighted_class:\n args.id += '-weighted' + str(args.enhanced_weight)\n if args.source_only:\n args.id += '-source_only'\n else:\n args.id += '-adapt'\n if args.easy_mining:\n args.id += '-easy_mining'\n if args.hard_mining:\n args.id += '-hard_mining'\n\n\n print('Model ID: {}'.format(args.id))\n\n args.ckpt = os.path.join(args.ckpt, args.id)\n args.vis = os.path.join(args.vis, args.id)\n if not os.path.isdir(args.ckpt):\n os.makedirs(args.ckpt)\n if not os.path.exists(args.vis):\n os.makedirs(args.vis)\n\n args.best_err = 2.e10 # initialize with a big number\n args.best_acc = 0\n args.best_mIoU = 0\n\n\n\n random.seed(args.seed)\n torch.manual_seed(args.seed)\n\n main(args)\n","sub_path":"segmentation/filtered-gradients/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":26200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"640524454","text":"\"\"\"\nService http://ad-social.org\nSupported targets: vk.com\n\"\"\"\nimport datetime\nimport re\nfrom bs4 import BeautifulSoup\nfrom .service import Service, ServiceError\n\n\nclass AdsocialService(Service):\n \"\"\" Service http://ad-social.org \"\"\"\n\n DOMAIN = 'ad-social.org'\n # Target dependant data.\n TDATA = {\n 'vk.com': {\n 'path': 'vkontakte',\n }\n }\n\n def login(self):\n \"\"\"\n Log in to service (using ulogin).\n \"\"\"\n # Get vk oauth form.\n url = 'https://ulogin.ru/auth.php?name=vkontakte&fields=photo_big'\n response = self.session.get(url)\n root = BeautifulSoup(response.text, 'lxml')\n form = root.find('form')\n\n # Post vk credentials and get ulogin token.\n url = form['action']\n data = {ninput.get('name', ''): ninput.get('value', '') for ninput in\n form.find_all('input')}\n data['email'] = self.username\n data['pass'] = self.password\n response = self.session.post(url, data=data)\n match = re.search(r'''token\\s*=\\s*['\"](\\w+)['\"]\\s*''', response.text)\n if match is None:\n raise ServiceError(\"Login failed: ulogin token is not found.\")\n token = match.group(1)\n self.logger.info('ulogin token = \"%s\"', token)\n\n # Log into service.\n url = 'http://ad-social.org/vk/social2/login'\n response = self.session.post(url,\n data={'token': token},\n allow_redirects=False)\n if response.status_code != 303:\n raise ServiceError(\"Incorrect status code.\")\n\n def get_tasks(self):\n \"\"\"Return list of available tasks.\"\"\"\n url = 'http://ad-social.org/vk/earn?type=like'\n response = self.session.get(url)\n root = BeautifulSoup(response.text, 'lxml')\n\n # Real id is fakeid / multiplier. Find multiplier.\n for script in root.find_all('script'):\n match = re.search(r'var id_2\\s*=\\s*id\\s*/\\s*(\\d+)\\s*;?\\s*if',\n script.text)\n if match is not None:\n multiplier = int(match.group(1))\n break\n else:\n raise ServiceError('Fakeid multiplier is not found.')\n\n # Get tasks.\n tasks = []\n trs = root.find_all(\n 'tr',\n {'class': lambda attr: attr.startswith('task')})\n for tr in trs:\n points = tr.find('span', {'class': 'label-primary'}).text\n points = int(points.split(maxsplit=1)[0])\n fakeid = int(tr['class'][0][4:])\n realid, remain = divmod(fakeid, multiplier)\n if remain != 0:\n raise ServiceError('Incorrect multiplier.')\n\n task = {\n '_id': realid,\n 'points': points,\n 'type': 'like',\n 'date': datetime.datetime.utcnow(),\n }\n tasks.append(task)\n return tasks\n\n def get_task_url(self, task):\n \"\"\"\n Return url at service domain, which leads to task at target domain.\n \"\"\"\n return 'http://ad-social.org/vk/earn/get/' + str(task['_id'])\n\n def check_task(self, task):\n \"\"\"\n Ask service to check the task.\n Return True if service confirmed task completion.\n \"\"\"\n url = 'http://ad-social.org/vk/earn/checkTask/' \\\n + str(task['_id']) + '/like'\n headers = {'X-Requested-With': 'XMLHttpRequest'}\n response = self.session.get(url, headers=headers)\n json_data = response.json()\n\n return json_data['status']\n","sub_path":"makepts/makeptslib/service/adsocial.py","file_name":"adsocial.py","file_ext":"py","file_size_in_byte":3630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"581585971","text":"\"\"\" Integration and unit tests for the SR algorithm. \"\"\"\nfrom matching.games.stable_roommates import (\n first_phase,\n locate_all_or_nothing_cycle,\n second_phase,\n stable_roommates,\n)\n\nfrom .params import STABLE_ROOMMATES, make_players\n\n\n@STABLE_ROOMMATES\ndef test_first_phase(player_names, seed):\n \"\"\"Verify that the first phase of the algorithm produces a valid set of\n reduced preference players.\"\"\"\n\n players = make_players(player_names, seed)\n players = first_phase(players)\n\n for player in players:\n assert player.matching is None\n assert {p.name for p in player.prefs}.issubset(player.pref_names)\n\n\n@STABLE_ROOMMATES\ndef test_locate_all_or_nothing_cycle(player_names, seed):\n \"\"\"Verify that a cycle of (least-preferred, second-choice) players can be\n identified from a set of players.\"\"\"\n\n players = make_players(player_names, seed)\n player = players[-1]\n cycle = locate_all_or_nothing_cycle(player)\n\n for last, second in cycle:\n assert second.prefs.index(last) == len(second.prefs) - 1\n\n\n@STABLE_ROOMMATES\ndef test_second_phase(player_names, seed):\n \"\"\"Verify that the second phase of the algorithm produces a valid set of\n players with appropriate matches.\"\"\"\n\n players = make_players(player_names, seed)\n try:\n players = second_phase(players)\n\n for player in players:\n if player.prefs:\n assert player.prefs == [player.matching]\n else:\n assert player.matching is None\n except (IndexError, ValueError):\n pass\n\n\n@STABLE_ROOMMATES\ndef test_stable_roommates(player_names, seed):\n \"\"\" Verify that the algorithm can terminate with a valid matching. \"\"\"\n\n players = make_players(player_names, seed)\n matching = stable_roommates(players)\n\n for player, other in matching.items():\n if other is not None:\n assert player.prefs == [other]\n assert other.matching == player\n","sub_path":"tests/stable_roommates/test_algorithm.py","file_name":"test_algorithm.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"642406004","text":"from telegram.ext import Updater, PicklePersistence, messagequeue as mq\n# from telegram.utils.request import Request\n\nfrom app.bot import MQBot\nfrom settings.config import TG_TOKEN\nfrom app.handlers import main, proccess_images\n\n\ndef run():\n # request = Request(con_pool_size=16)\n mqbot = MQBot(\n token=TG_TOKEN,\n # request=request,\n mqueue=mq.MessageQueue(),\n )\n updater = Updater(\n bot=mqbot,\n use_context=True,\n persistence=PicklePersistence(filename='persistent_data')\n )\n dp = updater.dispatcher\n\n main.register(dp=dp)\n proccess_images.register(dp=dp)\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"382857614","text":"import json\nimport requests\nimport urllib\n\n# SPARCS SSO Client Version 0.9.0 (BETA)\n# VALID ONLY AFTER 2016-01-28T12:59+09:00\n# Made by SPARCS SSO Team\n\nclass Client:\n API_BASE_URL = 'https://sparcssso.kaist.ac.kr/api/v1/'\n REQUIRE_BASE_URL = '%stoken/require/' % API_BASE_URL\n INFO_BASE_URL = '%stoken/info/' % API_BASE_URL\n POINT_BASE_URL = '%spoint/' % API_BASE_URL\n NOTICE_BASE_URL = '%snotice/' % API_BASE_URL\n\n def __init__(self, is_test=False, app_name='', secret_key=''):\n if not is_test and (not app_name or not secret_key):\n raise AssertionError('Need \"app_name\" and \"secret_key\"')\n\n self.is_test = is_test\n self.app_name = app_name\n self.secret_key = secret_key\n\n def _post_data(self, url, data):\n r = requests.post(url, data)\n if r.status_code == 403:\n raise ValueError('Invalid secret key')\n elif r.status_code == 404:\n raise ValueError('Invalid / timeout token')\n elif r.status_code != 200:\n raise RuntimeError('Unknown server error')\n\n try:\n return json.loads(r.text)\n except:\n raise RuntimeError('Json decode error')\n\n def get_login_url(self, callback_url=''):\n if self.is_test and not callback_url:\n raise AssertionError('Need \"callback_url\"')\n\n if self.is_test:\n return '%s?url=%s' % (self.REQUIRE_BASE_URL, callback_url)\n return '%s?app=%s' % (self.REQUIRE_BASE_URL, self.app_name)\n\n def get_user_info(self, tokenid):\n result = self._post_data(self.INFO_BASE_URL,\n {\n 'tokenid': tokenid,\n 'key': self.secret_key\n })\n return result\n\n def get_point(self, sid):\n if self.is_test:\n raise NotImplementedError('Not supported on test mode')\n\n result = self._post_data(self.POINT_BASE_URL,\n {\n 'app': self.app_name,\n 'key': self.secret_key,\n 'sid': sid\n })\n return result['point']\n\n def modify_point(self, sid, delta, action, lower_bound=-100000000):\n if self.is_test:\n raise NotImplementedError('Not supported on test mode')\n\n result = self._post_data(self.POINT_BASE_URL,\n {\n 'app': self.app_name,\n 'key': self.secret_key,\n 'sid': sid,\n 'delta': delta,\n 'action': action,\n 'lower_bound': lower_bound\n })\n return result['changed'], result['point']\n\n def get_notice(self):\n return json.load(urllib.urlopen(self.NOTICE_BASE_URL))\n","sub_path":"apps/session/sparcssso.py","file_name":"sparcssso.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"304373197","text":"# Izveidot programmu, kura prasa lietotājam ievadīt cilindra rādiusu un tā augstumu, tiek aprēķināts cilindra laukums un tilpums. Rezultāts tiek parādīts konsolē.\n# tilpums = 3.14 * rādiuss * rādiuss * augstums\n# laukums = 2 * (3.14 * rādiuss * rādiuss) + augstums * (2 * 3.14 * rādiuss)\n\nh = int(input(\"Enter the height: \"))\nr = int(input(\"Enter the radius: \"))\n\npi = 3.14\nvol = pi * r**2 * h\narea = 2 * pi * r**2 + h * 2 * pi *r\n\nprint(\"Volume: \", vol)\nprint(\"Area: \", area)","sub_path":"uzd_01_20201210/uzd_01_2.py","file_name":"uzd_01_2.py","file_ext":"py","file_size_in_byte":492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"386798457","text":"\nimport re, util, json, csv\nfrom BeautifulSoup import BeautifulStoneSoup\nfrom StringIO import StringIO\n\ndef dictsToCsv(dicts, filename):\n keys = []\n for dict in dicts:\n for key in dict.keys():\n if not keys.__contains__(key):\n keys.append(key)\n \n #print(keys)\n \n finalList = []\n \n for dict in dicts:\n entry = []\n for key in keys:\n val = dict.get(key)\n if not val == None:\n entry.append(val)\n else:\n entry.append(\"\")\n finalList.append(entry) \n \n #print(finalList) \n \n \n file = open(filename, \"w\")\n writer = csv.writer(file)\n writer.writerow(keys)\n writer.writerows(finalList)\n file.close()\n \n\n\nlistFile = open('rooms.txt', 'r')\nlines = listFile.readlines()\n\nbaseUrl = \"http://ims.fas.harvard.edu/classrooms/room.php?rm=rm\"\n\nio = StringIO()\n\n# Open file for writing\nfile = open('output.txt', 'w')\nrooms = []\n\nfor line in lines:\n room = {}\n url = baseUrl + line[:4]\n soup = util.mysoupopen(url)\n \n bldg = soup.findAll(\"h1\")\n room['id'] = line[:4]\n room['bldg'] = bldg[0].contents[0].strip()\n room['room'] = bldg[0].contents[3].contents[0]\n \n table = soup.findAll(\"table\")\n table = table[0]\n #print(table.contents)\n for row in table.contents:\n if row != u'\\n':\n prop = row.contents[0].contents[0].strip().strip(\":\")\n m = re.search(\"([^<]*)\", str(row.contents[1]))\n val = m.group(1)\n room[prop] = val\n\n photo = soup.find(\"img\", {\"id\":\"room_photo\"})\n photo = photo[\"src\"]\n \n if photo == \"http://www.fas.harvard.edu/~ims/Class/images/nophoto.jpg\":\n photo = \"\"\n \n room['photo'] = photo\n rooms.append(room)\n print(room) \n\nfile.close()\n\ndictsToCsv(rooms, \"rooms.csv\")\n\n\n","sub_path":"roomspot-python/src/scraper/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"398544334","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# \n# Author: Alessandro Camilli (alessandrocamilli@openforce.it)\n# Copyright (C) 2014\n# Openforce ()\n# \n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as published\n# by the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp.osv import orm, fields\nfrom openerp.tools.translate import _\nimport openerp.addons.decimal_precision as dp\nfrom openerp import netsvc\n\n\nclass account_move_line(orm.Model):\n _inherit = \"account.move.line\"\n _columns = {\n 'withholding_tax_amount': fields.float('Withholding Tax Amount'),\n }\n \nclass account_voucher(orm.Model):\n _inherit = \"account.voucher\"\n \n def recompute_voucher_lines(self, cr, uid, ids, partner_id, journal_id, price, currency_id, ttype, date, context=None):\n '''\n Compute original amount of WT of rate\n '''\n move_line_obj = self.pool['account.move.line']\n voucher_line_obj = self.pool['account.voucher.line']\n dp_obj = self.pool['decimal.precision']\n res = super(account_voucher, self).recompute_voucher_lines(cr, uid, ids, \n partner_id,\n journal_id,\n price,\n currency_id,\n ttype, date,\n context=context)\n def _compute_wt_values(lines):\n amount_overflow_residual = 0.0\n # For each line, WT\n for line in lines:\n if 'move_line_id' in line and line['move_line_id']:\n move_line = move_line_obj.browse(cr, uid, line['move_line_id'])\n line['amount_original_withholding_tax'] = move_line.withholding_tax_amount\n line['amount_residual_withholding_tax']= \\\n voucher_line_obj.compute_amount_residual_withholdin_tax(cr, uid, \n line, \n context=None)\n # Recompute automatic values on amount: \n # The amount_residual_currency on account_move_line, doesn't see the WT values\n if lines and lines[0]['amount']:\n # For each amount to redistribuite\n tot_amount = 0\n for line in lines:\n tot_amount += line['amount'] + line['amount_residual_withholding_tax']\n \n # Redistribuite amount\n for line in lines:\n if tot_amount <= 0:\n break\n if line['amount'] > (line['amount_unreconciled'] - line['amount_residual_withholding_tax']):\n line['amount'] = line['amount_unreconciled'] - line['amount_residual_withholding_tax']\n line['amount'] = round(line['amount'], dp_obj.precision_get(cr, uid, 'Account'))\n tot_amount -= line['amount'] \n # Allocate WT \n for line in lines:\n if 'move_line_id' in line and line['move_line_id']:\n move_line = move_line_obj.browse(cr, uid, line['move_line_id'])\n if line['amount'] or amount_overflow_residual:\n # Assign overflow from other lines\n if amount_overflow_residual:\n if (line['amount'] + amount_overflow_residual) <= (line['amount_unreconciled'] - line['amount_residual_withholding_tax']):\n line['amount'] += amount_overflow_residual\n amount_overflow_residual = 0.0\n else:\n line['amount'] = line['amount_unreconciled'] - line['amount_residual_withholding_tax']\n # Compute WT\n line['amount_withholding_tax']= \\\n voucher_line_obj.compute_amount_withholdin_tax(cr, uid, line['amount'],\n line['amount_unreconciled'], \n line['amount_residual_withholding_tax'], \n context=None)\n # WT can generate an overflow. It will bw assigned to next line\n amount_overflow = line['amount'] + line['amount_withholding_tax'] - line['amount_unreconciled']\n if amount_overflow > 0 :\n line['amount'] -= amount_overflow\n amount_overflow_residual += amount_overflow\n line['amount_original'] -= line['amount_original_withholding_tax']\n \n return lines\n if partner_id:\n lines_dr = res['value']['line_dr_ids']\n lines_dr = _compute_wt_values(lines_dr)\n lines_cr = res['value']['line_cr_ids']\n lines_cr = _compute_wt_values(lines_cr)\n \n return res\n \n def voucher_move_line_create(self, cr, uid, voucher_id, line_total, move_id, company_currency, current_currency, context=None):\n '''\n Add WT line to registration and change amount on debit/credit line of the invoice \n '''\n move_line_obj = self.pool['account.move.line']\n voucher_line_obj = self.pool['account.voucher.line']\n payment_term_obj = self.pool['account.payment.term']\n reconcile_obj = self.pool['account.move.reconcile']\n line_total, rec_list_ids = super(account_voucher, self).voucher_move_line_create(cr, uid,\n voucher_id,\n line_total,\n move_id,\n company_currency,\n current_currency, \n context=context)\n def _unreconcile_move_line(move_line):\n '''\n Remove reconciliation to change amounts\n '''\n recs = []\n recs_to_rereconcile = []\n if move_line.reconcile_id:\n recs += [move_line.reconcile_id.id]\n if move_line.reconcile_partial_id:\n recs += [move_line.reconcile_partial_id.id]\n # If there are other partial payments, I save the id line to future reconcile\n cr.execute('SELECT id FROM account_move_line WHERE reconcile_partial_id=%s \\\n AND id <> %s', \n (move_line.reconcile_partial_id.id, move_line.id))\n for l in cr.dictfetchall():\n recs_to_rereconcile.append(l['id'])\n reconcile_obj.unlink(cr, uid, recs)\n return recs_to_rereconcile\n \n # rec_list_ids id payment move line with invoice move_line to reconcile\n rec_list_new_moves = []\n for rec in rec_list_ids:\n line_move_to_pay = move_line_obj.browse(cr, uid, rec[1])\n line_payment = move_line_obj.browse(cr, uid, rec[0])\n # Remove reconciliation to change amounts\n lines_to_rereconcile = _unreconcile_move_line(line_move_to_pay)\n for r_line_id in lines_to_rereconcile:\n rec_list_new_moves.append([r_line_id, line_move_to_pay.id])\n _unreconcile_move_line(line_payment)\n # line voucher with WT\n domain = [('voucher_id', '=', voucher_id), ('move_line_id', '=', line_move_to_pay.id)]\n v_line_payment_ids = voucher_line_obj.search(cr, uid, domain)\n for v_line in voucher_line_obj.browse(cr, uid, v_line_payment_ids):\n voucher = v_line.voucher_id\n for wt_v_line in v_line.withholding_tax_line_ids:\n credit = 0.0\n debit = 0.0\n if v_line.move_line_id.debit:\n debit = wt_v_line.amount\n else:\n credit = wt_v_line.amount\n # account\n if line_move_to_pay.account_id.type == 'receivable':\n wt_account_id = wt_v_line.withholding_tax_id.account_receivable_id.id\n else:\n wt_account_id = wt_v_line.withholding_tax_id.account_payable_id.id\n # Line WT\n payment_lines = payment_term_obj.compute(cr,\n uid, wt_v_line.withholding_tax_id.payment_term.id, wt_v_line.amount,\n voucher.date or False, context=context)\n line_wt_ids = []\n for payment_line in payment_lines:\n p_date_maturity = payment_line[0]\n p_credit = 0.0\n p_debit = 0.0\n if debit:\n p_debit = payment_line[1]\n else:\n p_credit = payment_line[1]\n val_move_line = {\n 'journal_id': voucher.journal_id.id,\n 'period_id': voucher.period_id.id,\n #'name': wt_v_line.withholding_tax_id.name or '/',\n 'name': wt_v_line.withholding_tax_id.name + ' ' + voucher.partner_id.name or '/',\n 'account_id': wt_account_id,\n 'move_id': move_id,\n #'partner_id': voucher.partner_id.id,\n 'partner_id': False,\n 'currency_id': v_line.move_line_id.currency_id.id or False,\n 'analytic_account_id': v_line.account_analytic_id and v_line.account_analytic_id.id or False,\n 'quantity': 1,\n 'credit': p_credit,\n 'debit': p_debit,\n 'date': voucher.date,\n 'date_maturity': p_date_maturity\n }\n line_wt_id = move_line_obj.create(cr, uid, val_move_line)\n line_wt_ids.append(line_wt_id)\n \n # Add amount WT to line debit/credit partner\n val = {\n 'credit': line_payment.credit + debit,\n 'debit': line_payment.debit + credit\n }\n move_line_obj.write(cr, uid, [line_payment.id], val)\n \n # Merge with existing lines to reconcile\n if rec_list_new_moves:\n for rec_new in rec_list_new_moves:\n for rec_ids in rec_list_ids:\n if not rec_new[1] == rec_ids[1]:\n continue\n rec_ids.append(rec_new[0])\n \n return (line_total, rec_list_ids)\n \n \nclass account_voucher_line(orm.Model):\n _inherit = \"account.voucher.line\"\n \n def _amount_withholding_tax(self, cr, uid, ids, name, args, context=None):\n res = {}\n for line in self.browse(cr, uid, ids, context=context):\n res[line.id] = {\n 'amount_original_withholding_tax': 0.0,\n }\n res[line.id]['amount_original_withholding_tax'] += line.move_line_id.withholding_tax_amount\n return res\n \n def _compute_balance(self, cr, uid, ids, name, args, context=None):\n '''\n Extends the compute of original amounts for exclude from total the WT amount\n '''\n currency_pool = self.pool.get('res.currency')\n rs_data = {}\n for line in self.browse(cr, uid, ids, context=context):\n ctx = context.copy()\n ctx.update({'date': line.voucher_id.date})\n voucher_rate = self.pool.get('res.currency').read(cr, uid, line.voucher_id.currency_id.id, ['rate'], context=ctx)['rate']\n ctx.update({\n 'voucher_special_currency': line.voucher_id.payment_rate_currency_id and line.voucher_id.payment_rate_currency_id.id or False,\n 'voucher_special_currency_rate': line.voucher_id.payment_rate * voucher_rate})\n res = {}\n company_currency = line.voucher_id.journal_id.company_id.currency_id.id\n voucher_currency = line.voucher_id.currency_id and line.voucher_id.currency_id.id or company_currency\n move_line = line.move_line_id or False\n\n if not move_line:\n res['amount_original'] = 0.0\n res['amount_unreconciled'] = 0.0\n res['amount_withholding_tax'] = 0.0\n elif move_line.currency_id and voucher_currency==move_line.currency_id.id:\n res['amount_original'] = abs(move_line.amount_currency - move_line.withholding_tax_amount) # modify for WT\n res['amount_unreconciled'] = abs(move_line.amount_residual_currency)\n else:\n #always use the amount booked in the company currency as the basis of the conversion into the voucher currency\n res['amount_original'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, move_line.credit or move_line.debit or 0.0, context=ctx)\n res['amount_unreconciled'] = currency_pool.compute(cr, uid, company_currency, voucher_currency, abs(move_line.amount_residual), context=ctx)\n res['amount_original'] -= move_line.withholding_tax_amount # add for WT\n \n rs_data[line.id] = res\n return rs_data\n \n _columns = {\n 'amount_original': fields.function(_compute_balance, multi='dc', type='float', string='Original Amount', store=True, digits_compute=dp.get_precision('Account')),\n 'amount_original_withholding_tax': fields.function(_amount_withholding_tax, \n digits_compute=dp.get_precision('Account'), string='Withholding Tax Original', multi='withholding_tax'),\n 'amount_residual_withholding_tax': fields.float('Withholding Tax Amount Residual'),\n 'amount_withholding_tax': fields.float('Withholding Tax Amount'),\n 'withholding_tax_line_ids': fields.one2many('withholding.tax.voucher.line', 'voucher_line_id', 'Withholding Tax Lines'),\n }\n \n def onchange_amount(self, cr, uid, ids, amount, amount_unreconciled, amount_residual_withholding_tax, context=None):\n res = super(account_voucher_line, self).onchange_amount(cr, uid, ids, \n amount, \n amount_unreconciled, \n context=context)\n dp_obj = self.pool['decimal.precision']\n wt_amount = self.compute_amount_withholdin_tax(cr, uid, amount, amount_unreconciled, amount_residual_withholding_tax, context)\n res['value'].update({'amount_withholding_tax': wt_amount})\n \n # Setting for Total amount\n if (amount + wt_amount) >= round(amount_unreconciled,dp_obj.precision_get(cr, uid, 'Account')):\n res['value'].update({'reconcile': True})\n res['value'].update({'amount': amount})\n\n return res\n \n def onchange_reconcile(self, cr, uid, ids, reconcile, amount, \n amount_unreconciled, \n amount_residual_withholding_tax, \n context=None):\n '''\n TO CONSIDER: Amount tot = amount net + amount WT \n '''\n res = super(account_voucher_line, self).onchange_reconcile(cr, uid, ids, \n reconcile,\n amount, \n amount_unreconciled, \n context=context)\n if reconcile: \n amount = amount_unreconciled\n wt_amount = self.compute_amount_withholdin_tax(cr, uid, amount, amount_unreconciled, amount_residual_withholding_tax, context)\n res['value']['amount'] = amount - wt_amount\n return res\n \n def compute_amount_residual_withholdin_tax(self, cr, uid, line, context=None):\n '''\n WT residual = WT amount original - (All WT amounts in voucher posted)\n '''\n dp_obj = self.pool['decimal.precision']\n wt_amount_residual = 0.0\n if not 'move_line_id' in line or not line['move_line_id']:\n return wt_amount_residual\n domain = [('move_line_id', '=', line['move_line_id'])]\n v_line_ids = self.search(cr, uid, domain)\n wt_amount_residual = line['amount_original_withholding_tax']\n for v_line in self.browse(cr, uid, v_line_ids):\n if v_line.voucher_id.state == 'posted':\n wt_amount_residual -= v_line.amount_withholding_tax\n \n return wt_amount_residual\n \n def compute_amount_withholdin_tax(self, cr, uid, amount, amount_unreconciled, wt_amount_residual, context=None):\n dp_obj = self.pool['decimal.precision']\n wt_amount = 0.0\n # Total amount\n amount_tot = amount + wt_amount_residual\n base_amount = amount_unreconciled - wt_amount_residual\n if amount_tot >= round(amount_unreconciled,dp_obj.precision_get(cr, uid, 'Account')):\n wt_amount = wt_amount_residual\n # Partial amount ( ratio with amount net)\n else:\n wt_amount = round(wt_amount_residual * (1.0 * amount / base_amount),\\\n dp_obj.precision_get(cr, uid, 'Account'))\n return wt_amount\n \n def recompute_withholding_tax_voucher_line(self, cr, uid, voucher_line_id, context=None):\n '''\n Split amount voucher line second WT lines invoice\n '''\n res = []\n invoice_obj = self.pool['account.invoice']\n wt_voucher_line_obj = self.pool['withholding.tax.voucher.line']\n dp_obj = self.pool['decimal.precision']\n \n voucher_line = self.browse(cr, uid, voucher_line_id)\n # delete existing wt lines\n domain = [('voucher_line_id', '=', voucher_line_id)]\n wtv_line_ids = wt_voucher_line_obj.search(cr, uid, domain)\n wt_voucher_line_obj.unlink(cr, uid, wtv_line_ids)\n #\n if voucher_line.amount_withholding_tax:\n domain = [('move_id', '=', voucher_line.move_line_id.move_id.id)]\n inv_ids = invoice_obj.search(cr, uid, domain)\n for inv in invoice_obj.browse(cr, uid, inv_ids):\n if len(inv.withholding_tax_line):\n rate_num = len(inv.withholding_tax_line)\n # Rates\n wt_amount_rate = round(voucher_line.amount_withholding_tax / rate_num, \\\n dp_obj.precision_get(cr, uid, 'Account'))\n wt_residual = voucher_line.amount_withholding_tax\n # Re-read move lines to assign the amounts of wt\n i = 0\n for wt_invoice_line in inv.withholding_tax_line:\n i += 1\n if i == rate_num:\n wt_amount = wt_residual\n else:\n wt_amount = wt_rate\n wt_residual -= wt_amount\n \n val = {\n 'voucher_line_id' : voucher_line_id,\n 'withholding_tax_id' : wt_invoice_line.withholding_tax_id.id,\n 'amount' : wt_amount\n }\n wt_voucher_line_obj.create(cr, uid, val)\n \n return res\n \n def create(self, cr, uid, vals, *args, **kwargs):\n res_id = super(account_voucher_line,self).create(cr, uid, vals, *args, **kwargs)\n self.recompute_withholding_tax_voucher_line(cr, uid, res_id, context=None)\n return res_id\n \n def write(self, cr, uid, ids, vals, context=None):\n res = super(account_voucher_line,self).write(cr, uid, ids, vals, context)\n if 'amount_withholding_tax' in vals:\n for line_id in ids:\n self.recompute_withholding_tax_voucher_line(cr, uid, line_id)\n return res\n \n \nclass account_fiscal_position(orm.Model):\n _inherit = \"account.fiscal.position\"\n _columns = {\n 'withholding_tax_ids': fields.many2many('withholding.tax', 'account_fiscal_position_withholding_tax_rel', 'fiscal_position_id', 'withholding_tax_id', 'Withholding Tax'),\n }\n \nclass account_invoice(orm.Model):\n _inherit = \"account.invoice\"\n \n def _amount_withholding_tax(self, cr, uid, ids, name, args, context=None):\n res = {}\n for invoice in self.browse(cr, uid, ids, context=context):\n res[invoice.id] = {\n 'withholding_tax_amount': 0.0,\n }\n for line in invoice.withholding_tax_line:\n res[invoice.id]['withholding_tax_amount'] += line.tax\n res[invoice.id]['amount_net_pay'] = invoice.amount_total - res[invoice.id]['withholding_tax_amount']\n return res\n \n _columns = {\n 'withholding_tax': fields.boolean('Withholding Tax'),\n 'withholding_tax_line': fields.one2many('account.invoice.withholding.tax', 'invoice_id', 'Withholding Tax', readonly=True, states={'draft':[('readonly',False)]}),\n 'withholding_tax_amount': fields.function(_amount_withholding_tax, digits_compute=dp.get_precision('Account'), string='Withholding tax', multi='withholding_tax'),\n 'amount_net_pay': fields.function(_amount_withholding_tax, digits_compute=dp.get_precision('Account'), string='Net To Pay', multi='withholding_tax')\n }\n \n def action_move_create(self, cr, uid, ids, context=None):\n '''\n Split amount withholding tax on account move lines\n '''\n move_line_obj = self.pool['account.move.line']\n dp_obj = self.pool['decimal.precision']\n \n res = super(account_invoice, self).action_move_create(cr, uid, ids, context=context)\n \n for inv in self.browse(cr, uid, ids):\n # Rates\n rate_num = 0\n for move_line in inv.move_id.line_id:\n if not move_line.date_maturity:\n continue\n rate_num += 1\n #\n if rate_num:\n wt_rate = round(inv.withholding_tax_amount / rate_num, \\\n dp_obj.precision_get(cr, uid, 'Account'))\n wt_residual = inv.withholding_tax_amount\n # Re-read move lines to assign the amounts of wt\n i = 0\n for move_line in inv.move_id.line_id:\n if not move_line.date_maturity:\n continue\n i += 1\n if i == rate_num:\n wt_amount = wt_residual\n else:\n wt_amount = wt_rate\n wt_residual -= wt_amount\n # update line\n move_line_obj.write(cr, uid, [move_line.id], {'withholding_tax_amount': wt_amount})\n \n return res\n \n def compute_all_withholding_tax(self, cr, uid, ids, context=None):\n \n withholdin_tax_obj = self.pool['withholding.tax']\n invoice_withholdin_tax_obj = self.pool['account.invoice.withholding.tax']\n res ={}\n \n if not ids :\n return res\n \n for invoice in self.browse(cr, uid, ids):\n # Clear for recompute o because there isn't withholding_tax to True \n if invoice.fiscal_position or not invoice.withholding_tax:\n cr.execute(\"DELETE FROM account_invoice_withholding_tax WHERE invoice_id=%s \", (invoice.id,))\n if invoice.fiscal_position and invoice.fiscal_position.withholding_tax_ids:\n for tax in invoice.fiscal_position.withholding_tax_ids:\n tot_invoice = 0\n withholding_tax = withholdin_tax_obj.compute_amount(cr, uid, tax.id, tot_invoice, invoice.id, context=None)\n val = {\n 'invoice_id' : invoice.id,\n 'withholding_tax_id' : tax.id,\n 'base': withholding_tax['base'],\n 'tax': withholding_tax['tax']\n }\n invoice_withholdin_tax_obj.create(cr, uid, val)\n \n return res\n \n def button_reset_taxes(self, cr, uid, ids, context=None):\n res = super(account_invoice, self).button_reset_taxes(cr, uid, ids, context=context)\n \n self.compute_all_withholding_tax(cr, uid, ids, context)\n \n return res\n \n def onchange_fiscal_position_id(self, cr, uid, ids, fiscal_position_id, context=None):\n res ={}\n fiscal_position_obj = self.pool['account.fiscal.position']\n vals= False\n if fiscal_position_id:\n fiscal_position = fiscal_position_obj.browse(cr, uid, fiscal_position_id)\n if fiscal_position.withholding_tax_ids:\n vals = {\n 'withholding_tax': True\n }\n \n res = {\n 'value': vals \n }\n return res\n \n \nclass account_invoice_line(orm.Model):\n _inherit = \"account.invoice.line\"\n \n def compute_amount_line(self, cr, uid, line):\n \n dp_obj = self.pool['decimal.precision']\n price_subtotal = 0 \n price = line['price_unit'] * (1-(line['discount'] or 0.0)/100.0)\n if 'discount2' in line: # field of my customization\n price = price * (1-(line['discount2'] or 0.0)/100.0)\n price_subtotal = round(price * line['quantity'], dp_obj.precision_get(cr, uid, 'Account'))\n \n return price_subtotal\n\n\nclass account_invoice_withholding_tax(orm.Model):\n _name = 'account.invoice.withholding.tax'\n _description = 'Invoice Withholding Tax Line'\n _columns = {\n 'invoice_id': fields.many2one('account.invoice', 'withholding_tax_line', 'Invoice'),\n 'withholding_tax_id': fields.many2one('withholding.tax', 'Withholding tax'),\n 'base': fields.float('Base'),\n 'tax': fields.float('Tax'),\n }\n \n def onchange_withholding_tax_id(self, cr, uid, ids, withholding_tax_id, invoice_line_ids):\n fiscal_position_obj = self.pool['account.fiscal.position']\n withholdin_tax_obj = self.pool['withholding.tax']\n invoice_line_obj = self.pool['account.invoice.line']\n res = {}\n tot_invoice = 0\n for line in invoice_line_ids:\n if line[1]:\n line_inv = invoice_line_obj.browse(cr, uid, line[1])\n price_subtotal = line_inv.price_subtotal\n else:\n price_subtotal = invoice_line_obj.compute_amount_line(cr, uid, line[2])\n tot_invoice += price_subtotal\n tax = withholdin_tax_obj.compute_amount(cr, uid, withholding_tax_id, tot_invoice, invoice_id=None, context=None)\n \n res['value'] = {\n 'base': tax['base'],\n 'tax': tax['tax']\n }\n \n return res\n \nclass withholding_tax(orm.Model):\n _name = 'withholding.tax'\n _description = 'Withholding Tax'\n \n def _get_rate(self, cr, uid, ids, field_names, args, context=None):\n res = {}\n for tax in self.browse(cr, uid, ids, context=context):\n cr.execute('SELECT tax, base FROM withholding_tax_rate ' \\\n ' WHERE withholding_tax_id = %s and (date_start < current_date or date_start is null)' \\\n ' ORDER by date_start LIMIT 1', (tax.id,))\n rate = cr.fetchone()\n if rate:\n res[tax.id] = {\n 'tax' : rate[0],\n 'base': rate[1]\n }\n else:\n res[tax.id] = {\n 'tax' : 0,\n 'base': 1\n }\n \n return res\n \n _columns = {\n 'active': fields.boolean('Active'),\n 'name': fields.char('Name', size=256, required=True),\n 'certification': fields.boolean('Certification'),\n 'comment': fields.text('Text'),\n 'account_receivable_id': fields.many2one('account.account', 'Account Receivable', required=True, \n domain=[('type','=', 'receivable')]),\n 'account_payable_id': fields.many2one('account.account', 'Account Payable', required=True, \n domain=[('type','=', 'payable')]),\n 'payment_term': fields.many2one('account.payment.term', 'Payment Terms', required=True),\n 'tax': fields.function(_get_rate, string='Tax %', multi='balance'),\n 'base': fields.function(_get_rate, string='Base', multi='balance'),\n 'rate_ids': fields.one2many('withholding.tax.rate', 'withholding_tax_id', 'Rates', required=True),\n }\n _defaults = {\n 'active': True\n }\n \n def compute_amount(self, cr, uid, withholding_tax_id, amount_invoice, invoice_id=None, context=None):\n invoice_obj = self.pool['account.invoice']\n res = {\n 'base' : 0,\n 'tax' : 0\n }\n if not amount_invoice and invoice_id:\n invoice = invoice_obj.browse(cr, uid, invoice_id)\n amount_invoice = invoice.amount_untaxed\n tax = self.browse(cr, uid, withholding_tax_id)\n base = amount_invoice * tax.base\n tax = base * ((tax.tax or 0.0)/100.0)\n \n res['base'] = base\n res['tax'] = tax\n \n return res\n \n\nclass withholding_tax_rate(orm.Model):\n _name = 'withholding.tax.rate'\n _description = 'Withholding Tax Rates'\n \n def _check_date(self, cursor, user, ids, context=None):\n for rate in self.browse(cursor, user, ids, context=context):\n if not rate.withholding_tax_id.active:\n continue\n where = []\n if rate.date_start:\n where.append(\"((date_stop>='%s') or (date_stop is null))\" % (rate.date_start,))\n if rate.date_stop:\n where.append(\"((date_start<='%s') or (date_start is null))\" % (rate.date_stop,))\n\n cursor.execute('SELECT id ' \\\n 'FROM withholding_tax_rate ' \\\n 'WHERE '+' and '.join(where) + (where and ' and ' or '')+\n 'withholding_tax_id = %s ' \\\n 'AND id <> %s', (\n rate.withholding_tax_id.id,\n rate.id))\n if cursor.fetchall():\n return False\n return True\n\n _columns = {\n 'withholding_tax_id': fields.many2one('withholding.tax', 'Withholding Tax', ondelete='cascade', readonly=True),\n 'date_start': fields.date('Date Start'),\n 'date_stop': fields.date('Date Stop'),\n 'comment': fields.text('Text'),\n 'base': fields.float('Base Coeff.'),\n 'tax': fields.float('Tax %'),\n }\n _defaults = {\n 'base': 1\n }\n \n _constraints = [\n (_check_date, 'You cannot have 2 pricelist versions that overlap!',\n ['date_start', 'date_stop'])\n ]\n\nclass withholding_tax_voucher_line(orm.Model):\n _name = 'withholding.tax.voucher.line'\n _description = 'Withholding Tax Voucher Line'\n _columns = {\n 'voucher_line_id': fields.many2one('account.voucher.line', 'Account Voucher Line', ondelete='cascade'),\n 'withholding_tax_id': fields.many2one('withholding.tax', 'Withholding Tax'),\n 'amount': fields.float('Amount'),\n }","sub_path":"openforce_withholding_tax/account.py","file_name":"account.py","file_ext":"py","file_size_in_byte":33170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"556350007","text":"# -*- coding: utf-8 -*-\n\n# FLO-2D Preprocessor tools for QGIS\n# Copyright © 2021 Lutra Consulting for FLO-2D\n\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version\n\nimport os\nimport traceback\nfrom qgis.PyQt.QtCore import Qt, QSettings\nfrom qgis.PyQt.QtGui import QColor\nfrom qgis.PyQt.QtWidgets import QInputDialog, QFileDialog, QApplication\nfrom .ui_utils import load_ui, try_disconnect, set_icon\nfrom ..flo2d_ie.rainfall_io import ASCProcessor, HDFProcessor\nfrom ..utils import is_number, m_fdata\nfrom ..geopackage_utils import GeoPackageUtils\nfrom .table_editor_widget import StandardItemModel, StandardItem, CommandItemEdit\nfrom ..flo2dobjects import Rain\nfrom ..gui.dlg_sampling_rain import SamplingRainDialog\nfrom ..user_communication import UserCommunication\nfrom math import isnan\n\nuiDialog, qtBaseClass = load_ui(\"rain_editor\")\n\n\nclass RainEditorWidget(qtBaseClass, uiDialog):\n def __init__(self, iface, plot, table, lyrs):\n qtBaseClass.__init__(self)\n uiDialog.__init__(self)\n self.iface = iface\n self.con = None\n self.setupUi(self)\n self.lyrs = lyrs\n self.plot = plot\n self.plot_item_name = None\n self.table = table\n self.tview = table.tview\n self.rain = None\n self.gutils = None\n self.uc = UserCommunication(iface, \"FLO-2D\")\n self.rain_data_model = StandardItemModel()\n self.rain_tseries_data = None\n\n self.d1, self.d2 = [[], []]\n\n set_icon(self.raster_rain_btn, \"sample_rain.svg\")\n set_icon(self.show_table_btn, \"show_cont_table.svg\")\n set_icon(self.remove_tseries_btn, \"mActionDeleteSelected.svg\")\n set_icon(self.add_tseries_btn, \"mActionAddRainTimeSeries.svg\")\n set_icon(self.add_predefined_tseries_btn, \"mActionOpenFile.svg\")\n set_icon(self.rename_tseries_btn, \"change_name.svg\")\n\n def block_saving(self):\n try_disconnect(self.rain_data_model.dataChanged, self.save_tseries_data)\n\n def unblock_saving(self):\n self.rain_data_model.dataChanged.connect(self.save_tseries_data)\n\n def itemDataChangedSlot(self, item, oldValue, newValue, role, save=True):\n \"\"\"\n Slot used to push changes of existing items onto undoStack.\n \"\"\"\n if role == Qt.EditRole:\n command = CommandItemEdit(\n self, item, oldValue, newValue, \"Text changed from '{0}' to '{1}'\".format(oldValue, newValue)\n )\n self.tview.undoStack.push(command)\n return True\n\n def connect_signals(self):\n self.asc_btn.clicked.connect(self.import_rainfall)\n self.hdf_btn.clicked.connect(self.export_rainfall_to_binary_hdf5)\n self.tseries_cbo.currentIndexChanged.connect(self.populate_tseries_data)\n self.simulate_rain_grp.toggled.connect(self.set_rain)\n self.realtime_rainfall_grp.toggled.connect(self.set_realtime)\n self.building_chbox.stateChanged.connect(self.set_building)\n self.spatial_variation_grp.toggled.connect(self.set_arf)\n self.moving_storm_grp.toggled.connect(self.set_moving_storm)\n self.moving_storm_speed_dbox.editingFinished.connect(self.set_moving_storm_speed)\n self.rainfall_time_distribution_grp.toggled.connect(self.set_time_series_fid)\n\n self.n_radio.clicked.connect(self.set_n_radio)\n self.e_radio.clicked.connect(self.set_e_radio)\n self.s_radio.clicked.connect(self.set_s_radio)\n self.w_radio.clicked.connect(self.set_w_radio)\n self.ne_radio.clicked.connect(self.set_ne_radio)\n self.se_radio.clicked.connect(self.set_se_radio)\n self.sw_radio.clicked.connect(self.set_sw_radio)\n self.nw_radio.clicked.connect(self.set_nw_radio)\n\n self.raster_rain_btn.clicked.connect(self.raster_rain)\n\n self.total_rainfall_sbox.editingFinished.connect(self.set_tot_rainfall)\n self.rainfall_abst_sbox.editingFinished.connect(self.set_rainfall_abst)\n self.show_table_btn.clicked.connect(self.populate_tseries_data)\n self.add_tseries_btn.clicked.connect(self.add_tseries)\n self.add_predefined_tseries_btn.clicked.connect(self.add_predefined_tseries)\n self.remove_tseries_btn.clicked.connect(self.delete_tseries)\n self.rename_tseries_btn.clicked.connect(self.rename_tseries)\n self.rain_data_model.dataChanged.connect(self.save_tseries_data)\n self.table.before_paste.connect(self.block_saving)\n self.table.after_paste.connect(self.unblock_saving)\n self.rain_data_model.itemDataChanged.connect(self.itemDataChangedSlot)\n\n def setup_connection(self):\n con = self.iface.f2d[\"con\"]\n if con is None:\n return\n self.con = con\n self.gutils = GeoPackageUtils(self.con, self.iface)\n\n # qry = '''SELECT movingstorm FROM rain;'''\n # row = self.gutils.execute(qry).fetchone()\n # if is_number(row[0]):\n # if row[0] == '0':\n # self.moving_storm_chbox.setChecked(False)\n # else:\n # self.moving_storm_chbox.setChecked(True)\n\n qry = \"\"\"SELECT value FROM cont WHERE name = 'IRAIN';\"\"\"\n row = self.gutils.execute(qry).fetchone()\n if is_number(row[0]):\n if row[0] == \"0\":\n self.simulate_rain_grp.setChecked(False)\n else:\n self.simulate_rain_grp.setChecked(True)\n\n self.rain = Rain(self.con, self.iface)\n\n # self.create_plot()\n\n def import_rainfall(self):\n try:\n s = QSettings()\n last_dir = s.value(\"FLO-2D/lastASC\", \"\")\n asc_dir = QFileDialog.getExistingDirectory(\n None, \"Select directory with Rainfall ASCII grid files\", directory=last_dir\n )\n if not asc_dir:\n return\n s.setValue(\"FLO-2D/lastASC\", asc_dir)\n\n try:\n grid_lyr = self.lyrs.data[\"grid\"][\"qlyr\"]\n QApplication.setOverrideCursor(Qt.WaitCursor)\n asc_processor = ASCProcessor(grid_lyr, asc_dir) # as_processor, an instance of the ASCProcessor class,\n head_qry = \"INSERT INTO raincell (rainintime, irinters, timestamp) VALUES(?,?,?);\"\n data_qry = \"INSERT INTO raincell_data (time_interval, rrgrid, iraindum) VALUES (?,?,?);\"\n self.gutils.clear_tables(\"raincell\", \"raincell_data\")\n header = asc_processor.parse_rfc()\n time_step = float(header[0])\n self.gutils.execute(head_qry, header)\n time_interval = 0\n for rain_series in asc_processor.rainfall_sampling():\n cur = self.gutils.con.cursor()\n for val, gid in rain_series:\n cur.execute(data_qry, (time_interval, gid, val))\n self.gutils.con.commit()\n time_interval += time_step\n QApplication.restoreOverrideCursor()\n self.uc.show_info(\"Importing Rainfall Data finished!\")\n except Exception as e:\n self.uc.log_info(traceback.format_exc())\n QApplication.restoreOverrideCursor()\n self.uc.bar_warn(\n \"Importing Rainfall Data from ASCII files failed! Please check your input data.\\nIs the .RFC file missing?\"\n )\n\n except Exception as e:\n self.uc.log_info(traceback.format_exc())\n QApplication.restoreOverrideCursor()\n self.uc.show_warn(\n \"WARNING 060319.1835: Importing Rainfall Data failed! ({0}) : {1}\".format(e.errno, e.strerror)\n )\n\n def export_rainfall_to_binary_hdf5(self):\n try:\n import h5py\n except ImportError:\n self.uc.bar_warn(\"There is no h5py module installed! Please install it to run export tool.\")\n return\n s = QSettings()\n last_dir = s.value(\"FLO-2D/lastHDF\", \"\")\n hdf_file, __ = QFileDialog.getSaveFileName(\n None, \"Export Rainfall to HDF file\", directory=last_dir, filter=\"*.hdf5\"\n )\n if not hdf_file:\n return\n s.setValue(\"FLO-2D/lastHDF\", os.path.dirname(hdf_file))\n try:\n QApplication.setOverrideCursor(Qt.WaitCursor)\n qry_header = \"SELECT rainintime, irinters, timestamp FROM raincell LIMIT 1;\"\n header = self.gutils.execute(qry_header).fetchone()\n rainintime, irinters, timestamp = header\n header_data = [rainintime, irinters, timestamp]\n qry_data = \"SELECT iraindum FROM raincell_data ORDER BY rrgrid, time_interval;\"\n data = self.gutils.execute(qry_data).fetchall()\n data = [data[i : i + irinters] for i in range(0, len(data), irinters)]\n hdf_processor = HDFProcessor(hdf_file)\n hdf_processor.export_rainfall_to_binary_hdf5(header_data, data)\n QApplication.restoreOverrideCursor()\n self.uc.show_info(\"Exporting Rainfall Data finished!\")\n except Exception as e:\n self.uc.log_info(traceback.format_exc())\n QApplication.restoreOverrideCursor()\n self.uc.bar_warn(\"Exporting Rainfall Data failed! Please check your input data.\")\n\n def create_plot(self):\n \"\"\"\n Create initial plot.\n \"\"\"\n self.plot.clear()\n if self.plot.plot.legend is not None:\n self.plot.plot.legend.scene().removeItem(self.plot.plot.legend)\n self.plot.plot.addLegend()\n\n self.plot_item_name = \"Rain timeseries\"\n self.plot.add_item(self.plot_item_name, [self.d1, self.d2], col=QColor(\"#0018d4\"))\n\n def rain_properties(self):\n if not self.rain:\n return\n\n row = self.rain.get_row()\n\n if row[\"movingstorm\"] == 1:\n self.moving_storm_grp.setChecked(True)\n else:\n self.moving_storm_grp.setChecked(False)\n\n if self.gutils.get_cont_par(\"IRAIN\") == \"1\":\n self.simulate_rain_grp.setChecked(True)\n else:\n self.simulate_rain_grp.setChecked(False)\n\n if row[\"irainreal\"] == 1:\n self.realtime_rainfall_grp.setChecked(True)\n else:\n self.realtime_rainfall_grp.setChecked(False)\n\n if row[\"irainbuilding\"] == 1:\n self.building_chbox.setChecked(True)\n else:\n self.building_chbox.setChecked(False)\n\n if row[\"irainarf\"] == 1:\n self.spatial_variation_grp.setChecked(True)\n else:\n self.spatial_variation_grp.setChecked(False)\n\n if is_number(row[\"tot_rainfall\"]):\n self.total_rainfall_sbox.setValue(float((row[\"tot_rainfall\"])))\n else:\n self.total_rainfall_sbox.setValue(0)\n\n if is_number(row[\"rainabs\"]):\n self.rainfall_abst_sbox.setValue(float(row[\"rainabs\"]))\n else:\n self.rainfall_abst_sbox.setValue(0)\n\n if is_number(row[\"rainspeed\"]):\n self.moving_storm_speed_dbox.setValue(float((row[\"rainspeed\"])))\n else:\n self.moving_storm_speed_dbox.setValue(0)\n\n self.populate_tseries()\n idx = self.tseries_cbo.findData(self.rain.series_fid)\n self.tseries_cbo.setCurrentIndex(idx)\n self.populate_tseries_data()\n self.connect_signals()\n\n def populate_tseries(self):\n self.tseries_cbo.clear()\n for row in self.rain.get_time_series():\n ts_fid, name = [x if x is not None else \"\" for x in row]\n self.tseries_cbo.addItem(name, ts_fid)\n\n def add_tseries(self):\n if not self.rain:\n return\n self.rain.add_time_series()\n self.populate_tseries()\n # self.tseries_cbo.setCurrentIndex(len(self.tseries_cbo)-1)\n\n def add_predefined_tseries(self):\n self.uc.clear_bar_messages()\n s = QSettings()\n last_dir = s.value(\"FLO-2D/lastPredefinedSeriesDir\", \"\")\n predefined_files, __ = QFileDialog.getOpenFileNames(\n None, \"Select time series files to import data\", directory=last_dir, filter=\"(*.DAT *.TXT)\"\n )\n if not predefined_files:\n return\n s.setValue(\"FLO-2D/lastPredefinedSeriesDir\", os.path.dirname(predefined_files[0]))\n try:\n QApplication.setOverrideCursor(Qt.WaitCursor)\n if not self.rain:\n return\n for file in predefined_files:\n tail = os.path.splitext(os.path.basename(file))[0]\n self.rain.add_time_series(tail, True)\n self.read_predefined_tseries_data(file)\n self.populate_tseries()\n\n QApplication.restoreOverrideCursor()\n self.uc.show_info(\"Importing predefined time series finished!\")\n except Exception as e:\n QApplication.restoreOverrideCursor()\n self.uc.bar_warn(\"Importing predefined time series failed! Please check your input data.\")\n\n def read_predefined_tseries_data(self, file):\n tsd_sql = \"INSERT INTO rain_time_series_data (series_fid, time, value) VALUES (?, ?, ?);\"\n data = self.parse_timeseries(file)\n ts_list = []\n for item in data:\n ts_list.append((self.rain.series_fid, float(item[0]), float(item[1])))\n self.gutils.execute_many(tsd_sql, ts_list)\n\n def parse_timeseries(self, filename):\n par = self.single_parser(filename)\n data = [row for row in par]\n return data\n\n def single_parser(self, file):\n with open(file, \"r\") as f1:\n for line in f1:\n row = line.split()\n if row:\n yield row\n\n def delete_tseries(self):\n if not self.rain:\n return\n self.rain.del_time_series()\n self.populate_tseries()\n\n def rename_tseries(self):\n if not self.rain:\n return\n new_name, ok = QInputDialog.getText(None, \"Change timeseries name\", \"New name:\")\n if not ok or not new_name:\n return\n if not self.tseries_cbo.findText(new_name) == -1:\n msg = \"WARNING 060319.1725: Time series with name {} already exists in the database. Please, choose another name.\".format(\n new_name\n )\n self.uc.show_warn(msg)\n return\n self.rain.set_time_series_data_name(new_name)\n self.populate_tseries()\n\n def populate_tseries_data(self):\n \"\"\"\n Get current time series data, populate data table and create plot.\n \"\"\"\n cur_ts_idx = self.tseries_cbo.currentIndex()\n cur_ts_fid = self.tseries_cbo.itemData(cur_ts_idx)\n self.rain.series_fid = cur_ts_fid\n self.rain_tseries_data = self.rain.get_time_series_data()\n if not self.rain_tseries_data:\n return\n self.create_plot()\n self.tview.undoStack.clear()\n self.tview.setModel(self.rain_data_model)\n self.rain_data_model.clear()\n self.rain_data_model.setHorizontalHeaderLabels([\"Time\", \"% of Total Storm\"])\n self.d1, self.d2 = [[], []]\n for row in self.rain_tseries_data:\n items = [StandardItem(\"{:.4f}\".format(x)) if x is not None else StandardItem(\"\") for x in row]\n self.rain_data_model.appendRow(items)\n self.d1.append(row[0] if not row[0] is None else float(\"NaN\"))\n self.d2.append(row[1] if not row[1] is None else float(\"NaN\"))\n rc = self.rain_data_model.rowCount()\n if rc < 500:\n for row in range(rc, 500 + 1):\n items = [StandardItem(x) for x in (\"\",) * 2]\n self.rain_data_model.appendRow(items)\n self.tview.horizontalHeader().setStretchLastSection(True)\n for col in range(2):\n self.tview.setColumnWidth(col, 100)\n for i in range(self.rain_data_model.rowCount()):\n self.tview.setRowHeight(i, 20)\n self.rain.set_row() # Inserts or replaces values in table 'rain'\n self.update_plot()\n\n def save_tseries_data(self):\n \"\"\"\n Get rain timeseries data and save them in gpkg.\n \"\"\"\n self.update_plot()\n ts_data = []\n for i in range(self.rain_data_model.rowCount()):\n # save only rows with a number in the first column\n if is_number(m_fdata(self.rain_data_model, i, 0)) and not isnan(m_fdata(self.rain_data_model, i, 0)):\n ts_data.append(\n (self.rain.series_fid, m_fdata(self.rain_data_model, i, 0), m_fdata(self.rain_data_model, i, 1))\n )\n else:\n pass\n data_name = self.tseries_cbo.currentText()\n self.rain.set_time_series_data(data_name, ts_data)\n\n def update_plot(self):\n \"\"\"\n When time series data for plot change, update the plot.\n \"\"\"\n if not self.plot_item_name:\n return\n self.d1, self.d2 = [[], []]\n for i in range(self.rain_data_model.rowCount()):\n self.d1.append(m_fdata(self.rain_data_model, i, 0))\n self.d2.append(m_fdata(self.rain_data_model, i, 1))\n self.plot.update_item(self.plot_item_name, [self.d1, self.d2])\n\n def raster_rain(self):\n if self.gutils.is_table_empty(\"user_model_boundary\"):\n self.uc.bar_warn(\"There is no computational domain! Please digitize it before running tool.\")\n return\n if self.gutils.is_table_empty(\"grid\"):\n self.uc.bar_warn(\"There is no grid! Please create it before running tool.\")\n return\n\n cell_size = self.get_cell_size()\n dlg = SamplingRainDialog(self.con, self.iface, self.lyrs, cell_size)\n ok = dlg.exec_()\n if ok:\n pass\n else:\n return\n try:\n if not self.gutils.is_table_empty(\"rain_arf_cells\"):\n q = \"There are some Rain ARF cells already defined in the database. Overwrite them?\"\n if not self.uc.question(q):\n return\n del_cells = \"DELETE FROM rain_arf_cells;\"\n self.gutils.execute(del_cells)\n\n QApplication.setOverrideCursor(Qt.WaitCursor)\n res = dlg.probe_rain()\n\n delete_null = \"\"\"DELETE FROM rain_arf_cells WHERE arf IS NULL;\"\"\"\n self.gutils.execute(delete_null)\n QApplication.restoreOverrideCursor()\n msg = \"Rain ARF sampling performed!.\\n\\n\"\n msg += 'Data was stored in the \"Rain ARF Cells\" layer.\\n'\n msg += \"Each sampled cell was assigned a rainfall depth area reduction value.\\n\"\n msg += \"They will be saved in the RAIN.DAT FLO-2D file as lines 5 if the\\n\"\n msg += '\"Spatial Variation (Depth Area Reduction)\" checkbox is toggled.'\n self.uc.show_info(msg)\n\n # if res:\n # dlg.show_probing_result_info()\n except Exception as e:\n QApplication.restoreOverrideCursor()\n self.uc.log_info(traceback.format_exc())\n self.uc.show_warn(\"WARNING 060319.1726: Probing grid elevation failed! Please check your raster layer.\")\n\n def get_cell_size(self):\n \"\"\"\n Get cell size from:\n - Computational Domain attr table (if defined, will be written to cont table)\n - cont table\n - ask user\n \"\"\"\n bl = self.lyrs.data[\"user_model_boundary\"][\"qlyr\"]\n bfeat = next(bl.getFeatures())\n if bfeat[\"cell_size\"]:\n cs = bfeat[\"cell_size\"]\n if cs <= 0:\n self.uc.show_warn(\n \"WARNING 060319.1727: Cell size must be positive. Change the feature attribute value in Computational Domain layer.\"\n )\n return None\n self.gutils.set_cont_par(\"CELLSIZE\", cs)\n else:\n cs = self.gutils.get_cont_par(\"CELLSIZE\")\n cs = None if cs == \"\" else cs\n if cs:\n if cs <= 0:\n self.uc.show_warn(\n \"WARNING 060319.1728: Cell size must be positive. Change the feature attribute value in Computational Domain layer or default cell size in the project settings.\"\n )\n return None\n return cs\n else:\n r, ok = QInputDialog.getDouble(\n None, \"Grid Cell Size\", \"Enter grid element cell size\", value=100, min=0.1, max=99999\n )\n if ok:\n cs = r\n self.gutils.set_cont_par(\"CELLSIZE\", cs)\n else:\n return None\n\n def set_rain(self):\n if not self.rain:\n return\n if self.simulate_rain_grp.isChecked():\n self.gutils.set_cont_par(\"IRAIN\", 1)\n else:\n self.gutils.set_cont_par(\"IRAIN\", 0)\n\n def set_realtime(self):\n if not self.rain:\n return\n self.rain.irainreal = self.realtime_rainfall_grp.isChecked()\n self.rain.set_row()\n\n def set_building(self):\n if not self.rain:\n return\n self.rain.irainbuilding = self.building_chbox.isChecked()\n self.rain.set_row()\n\n def set_arf(self):\n if not self.rain:\n return\n self.rain.irainarf = self.spatial_variation_grp.isChecked()\n self.rain.set_row()\n\n def set_time_series_fid(self):\n if not self.rain:\n return\n if not self.rainfall_time_distribution_grp.isChecked():\n self.rain.series_fid = \"\"\n else:\n cur_ts_idx = self.tseries_cbo.currentIndex()\n cur_ts_fid = self.tseries_cbo.itemData(cur_ts_idx)\n self.rain.series_fid = cur_ts_fid\n self.rain.set_row()\n\n def set_moving_storm(self):\n if not self.rain:\n return\n self.rain.movingstorm = self.moving_storm_grp.isChecked()\n self.rain.set_row()\n\n def set_moving_storm_speed(self):\n if not self.rain:\n return\n self.rain.rainspeed = self.moving_storm_speed_dbox.value()\n self.rain.set_row()\n\n def set_n_radio(self):\n if not self.rain:\n return\n self.rain.iraindir = 1\n self.rain.set_row()\n\n def set_e_radio(self):\n if not self.rain:\n return\n self.rain.iraindir = 2\n self.rain.set_row()\n\n def set_s_radio(self):\n if not self.rain:\n return\n self.rain.iraindir = 3\n self.rain.set_row()\n\n def set_w_radio(self):\n if not self.rain:\n return\n self.rain.iraindir = 4\n self.rain.set_row()\n\n def set_ne_radio(self):\n if not self.rain:\n return\n self.rain.iraindir = 5\n self.rain.set_row()\n\n def set_se_radio(self):\n if not self.rain:\n return\n self.rain.iraindir = 6\n self.rain.set_row()\n\n def set_sw_radio(self):\n if not self.rain:\n return\n self.rain.iraindir = 7\n self.rain.set_row()\n\n def set_nw_radio(self):\n if not self.rain:\n return\n self.rain.iraindir = 8\n self.rain.set_row()\n\n def set_tot_rainfall(self):\n if not self.rain:\n return\n self.rain.tot_rainfall = self.total_rainfall_sbox.value()\n self.rain.set_row()\n\n def set_rainfall_abst(self):\n if not self.rain:\n return\n self.rain.rainabs = self.rainfall_abst_sbox.value()\n self.rain.set_row()\n","sub_path":"flo2d/gui/rain_editor_widget.py","file_name":"rain_editor_widget.py","file_ext":"py","file_size_in_byte":23605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"44423939","text":"from PIL import Image\n\nclass Square:\n def __init__(self, image, dimension,width, height):\n self.image = image\n self.width = width\n self.height = height\n self.dimension = dimension\n self.total_red = 0\n self.total_green = 0\n self.total_blue = 0\n self.numberOfEntries = 0\n def reset (self):\n self.total_red = 0\n self.total_green = 0\n self.total_blue = 0\n self.numberOfEntries = 0\n\n def incrementAvg(self,pixel):\n self.total_red += pixel[0]\n self.total_green += pixel[1]\n self.total_blue += pixel[2]\n self.numberOfEntries += 1\n def getAverage(self):\n average = []\n average.append(round(self.total_red/self.numberOfEntries))\n average.append(round(self.total_green/self.numberOfEntries))\n average.append(round(self.total_blue/self.numberOfEntries))\n return tuple(average)\n\n\n def colorize (self):\n for w in range(self.dimension):\n for h in range (self.dimension):\n self.incrementAvg(self.image[w+ self.width,h + self.height])\n\n for w in range (self.dimension):\n for h in range (self.dimension):\n self.image[w + self.width,h + self.height] = self.getAverage()\n\n\nclass Image_process:\n def __init__(self, image, width, height, pixel_size):\n self.image = image\n self.width = width\n self.height = height\n self. pixel_size = pixel_size\n for w in range(0, self.width, self.pixel_size):\n for h in range(0, self.height, self.pixel_size):\n Square(self.image, self.pixel_size, w, h ).colorize()\n \npixel_size = 20\n\nim = Image.open(\"ass.jpg\")\nwidth, height = im.size\nprint(\"width:{} height:{}\".format(width, height))\nimage = im.load()\nImage_process(image, width, height, pixel_size)\n\n\n\n\n\n\n\n\n\nim.show()\n","sub_path":"driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"513520198","text":"#handle the CCSD output in TCC\n\ndef average_time(filename):\n times=[]\n finput = open(filename,\"r\")\n ccsd = False\n for line in finput:\n if line.find(\"Begining CC\") != -1:\n ccsd = True\n if(ccsd and line[0].isdigit()):\n time = float(line.split()[-1])\n times.append(time)\n finput.close()\n average = sum(times)/float(len(times)) \n return average\n\ndef CheckCCSDComplete(filename):\n finput = open(filename,\"r\")\n for line in finput:\n if line.find(\"CCSD Energy\") != -1:\n print(\"Good\",filename)\n return True\n print(\"Error\",filename)\n return False\n\ndef ReadEnergy(filename):\n finput = open(filename,\"r\")\n ccsd = False\n hf_energy = \"\"\n ccsd_energy=\"\"\n for line in finput:\n if (line.find(\"Iteration\")!=-1) and (line.find(\"energy\")!=-1):\n hf_energy = line.split()[-3]\n if (line.find(\"Final energy\")!=-1):\n hf_energy = line.split()[-1]\n if (line.find(\"HF Energy\")!=-1):\n hf_energy = line.split()[-1]\n if line.find(\"Begining CC\") != -1:\n ccsd = True\n if(ccsd and (line.find(\"CCSD Energy\")!=-1) ):\n ccsd_energy = line.split()[-1]\n\n return hf_energy, ccsd_energy\n\ndef print_energy(filename):\n finput = open(filename,\"r\")\n ccsd = False\n for line in finput:\n if line.find(\"Begining CC\") != -1:\n ccsd = True\n if(ccsd and line[0].isdigit()):\n split_line = line.split()\n print(split_line[0]),\n print(\" \"),\n print(split_line[3])\n \n finput.close()\n\ndef triple_time(filename):\n finput = open(filename,\"r\")\n time = 0.0\n for line in finput:\n if(line.find(\"(T) Energy\") != -1 and line.find(\"Time\") != -1):\n time = float(line.split(\" \")[-1])\n finput.close()\n return time\n","sub_path":"ccsd.py","file_name":"ccsd.py","file_ext":"py","file_size_in_byte":1871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"497266545","text":"def roofcalc(floor_area, Tin, Tout): \n \n import numpy as np\n \n table2 = np.loadtxt('table2.txt')\n a = np.array(table2[:, 16])\n \n T1 = 25.5\n T2 = 29.4\n K = 0.83\n U_ceiling = 0.7\n \n #Tin = 22\n #Tout = 40\n #floor_area = 100\n \n \n r = a - a\n roofmatrix = r\n \n for j in range(0,12):\n r[j] = U_ceiling*floor_area*((23 + a[j])*K + T1 - T2 + Tout - Tin)\n # print(r[j])\n \n for i in range(0,23):\n roofmatrix = np.vstack((roofmatrix, r))\n \n roofmatrix = np.transpose(roofmatrix) \n \n return roofmatrix","sub_path":"roof.py","file_name":"roof.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"503194735","text":"# Import statements\nimport psycopg2\nimport sys\nimport psycopg2.extras\nimport csv\nfrom psycopg2 import sql\nfrom config_example import *\n\n# Write code / functions to set up database connection and cursor here.\n\ndef get_connection_and_cursor():\n try:\n db_conn = psycopg2.connect(\"dbname = '{0}' user = '{1}' password = '{2}'\".format(dbname, username, password))\n print(\"connected\")\n\n except:\n print(\"fail to connect\")\n sys.exit(1)\n\n db_cursor = db_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n return db_conn, db_cursor\n\n\n# Write code / functions to create tables with the columns you want and all database setup here.\n\ndef set_up_db():\n cur.execute(\"DROP TABLE IF EXISTS Sites\")\n cur.execute(\"DROP TABLE IF EXISTS States\")\n\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS States(\n ID SERIAL PRIMARY KEY,\n Name VARCHAR(40) UNIQUE\n )\"\"\")\n\n cur.execute(\"\"\"CREATE TABLE IF NOT EXISTS Sites(\n ID SERIAL,\n Name VARCHAR(128) UNIQUE,\n Type VARCHAR(128),\n State_ID INTEGER REFERENCES States(ID),\n Location VARCHAR(255),\n Description TEXT\n )\"\"\")\n\n conn.commit()\n print(\"setup success\")\n\n\n\n# Write code / functions to deal with CSV files and insert data into the database here.\n\ndef insert(conn, cur, table, data_dict, no_return=False):\n \"\"\"Accepts connection and cursor, table name, dictionary that represents one row, and inserts data into table. (Not the only way to do this!)\"\"\"\n column_names = data_dict.keys()\n #print(column_names, \"column_names\") # for debug\n if not no_return:\n query = sql.SQL('INSERT INTO {0}({1}) VALUES({2}) ON CONFLICT DO NOTHING RETURNING id').format(\n sql.SQL(table),\n sql.SQL(', ').join(map(sql.Identifier, column_names)),\n sql.SQL(', ').join(map(sql.Placeholder, column_names))\n )\n else:\n query = sql.SQL('INSERT INTO {0}({1}) VALUES({2}) ON CONFLICT DO NOTHING').format(\n sql.SQL(table),\n sql.SQL(', ').join(map(sql.Identifier, column_names)),\n sql.SQL(', ').join(map(sql.Placeholder, column_names))\n )\n query_string = query.as_string(conn) # thanks to sql module\n cur.execute(query_string, data_dict) # will mean that id is in cursor, because insert statement returns id in this function\n if not no_return:\n return cur.fetchone()['id']\n\ndef csv_to_db(statename):\n state_id = insert(conn, cur, \"States\", {\"name\" : statename})\n filename = statename + '.csv'\n with open(filename, newline = '', encoding = 'utf-8') as csvfile:\n reader = csv.DictReader(csvfile)\n for row_dict in reader:\n # print(row_dict)\n del row_dict['ADDRESS']\n lower_dict = dict((k.lower(), v) for k, v in row_dict.items() if k != None)\n lower_dict['state_id'] = state_id\n insert(conn, cur, \"Sites\", lower_dict, True)\n conn.commit()\n print(\"insert success\")\n\n\n# Make sure to commit your database changes with .commit() on the database connection.\n\n\n\n# Write code to be invoked here (e.g. invoking any functions you wrote above)\n\nconn, cur = get_connection_and_cursor()\nset_up_db()\ncsv_to_db('arkansas')\ncsv_to_db('california')\ncsv_to_db('michigan')\n\n# Write code to make queries and save data in variables here.\n\n\ncur.execute('SELECT location FROM sites')\nall_locations = cur.fetchall()\n# print(all_locations)\n\ncur.execute(\"\"\" SELECT name FROM sites WHERE description LIKE '%beautiful%' \"\"\") # when passing a string val to postgres, single quote should be used\nbeautiful_sites = cur.fetchall()\n# print(beautiful_sites)\n\ncur.execute(\"\"\" SELECT COUNT(*) FROM SITES WHERE TYPE = 'National Lakeshore' \"\"\")\nnatl_lakeshores = cur.fetchall()\n# print(natl_lakeshores)\n\ncur.execute(\"\"\" SELECT SITES.NAME FROM SITES INNER JOIN STATES ON (SITES.STATE_ID = STATES.ID) WHERE STATES.NAME = 'michigan' \"\"\")\nmichigan_names = cur.fetchall()\n# print(michigan_names)\n\ncur.execute(\"\"\" SELECT COUNT(*) FROM SITES INNER JOIN STATES ON (SITES.STATE_ID = STATES.ID) WHERE STATES.NAME = 'arkansas' \"\"\")\ntotal_number_arkansas = cur.fetchall()\n# print(total_number_arkansas)\n\n\n# We have not provided any tests, but you could write your own in this file or another file, if you want.\n","sub_path":"SI507_project6.py","file_name":"SI507_project6.py","file_ext":"py","file_size_in_byte":4286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"354499587","text":"from django.conf import settings\nfrom django.urls import reverse\n\nimport mock\nfrom elasticsearch import TransportError\nfrom rest_framework import status\nfrom rest_framework.test import APITestCase\n\n\nclass SuggestCompanyTests(APITestCase):\n def setUp(self):\n pass\n\n @mock.patch(\"complaint_search.es_interface.filter_suggest\")\n def test_suggest_no_param(self, mock_essuggest):\n \"\"\"\n Suggesting with no parameters\n \"\"\"\n url = reverse(\"complaint_search:suggest_company\")\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)\n mock_essuggest.assert_not_called()\n self.assertDictEqual(\n {\"text\": [\"This field is required.\"]}, response.data\n )\n\n @mock.patch(\"complaint_search.es_interface.filter_suggest\")\n def test_suggest_text__valid(self, mock_essuggest):\n \"\"\"\n Suggesting with text\n \"\"\"\n url = reverse(\"complaint_search:suggest_company\")\n param = {\"text\": \"Ba\"}\n mock_essuggest.return_value = \"OK\"\n response = self.client.get(url, param)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n mock_essuggest.assert_called_once_with(\n \"company.suggest\",\n \"company.raw\",\n field=\"complaint_what_happened\",\n format=\"default\",\n frm=0,\n no_aggs=False,\n no_highlight=False,\n page=1,\n size=25,\n sort=\"relevance_desc\",\n text=\"BA\",\n )\n self.assertEqual(\"OK\", response.data)\n\n @mock.patch(\"complaint_search.es_interface.filter_suggest\")\n def test_suggest_cors_headers(self, mock_essuggest):\n \"\"\"\n Make sure the response has CORS headers in debug mode\n \"\"\"\n settings.DEBUG = True\n url = reverse(\"complaint_search:suggest_company\")\n param = {\"text\": \"20\"}\n mock_essuggest.return_value = \"OK\"\n response = self.client.get(url, param)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(response.has_header(\"Access-Control-Allow-Origin\"))\n\n @mock.patch(\"complaint_search.es_interface.filter_suggest\")\n def test_suggest__transport_error(self, mock_essuggest):\n mock_essuggest.side_effect = TransportError(\"N/A\", \"Error\")\n url = reverse(\"complaint_search:suggest_company\")\n param = {\"text\": \"test\"}\n response = self.client.get(url, param)\n self.assertEqual(response.status_code, 424)\n self.assertDictEqual(\n {\"error\": \"There was an error calling Elasticsearch\"},\n response.data,\n )\n","sub_path":"complaint_search/tests/test_view_suggest_company.py","file_name":"test_view_suggest_company.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"621140296","text":"def main():\n stop = \"\"\n choise = \"\"\n\n while (stop != \"y\"):\n choise = input(\"Input 1 to use default calculator\\nInput 2 to use fractions calculator \")\n\n if (choise == \"1\"):\n input_check()\n elif (choise == \"2\"):\n fractions_count()\n else:\n print(\"No such operation. Try again\")\n\n stop = input(\"Would you like exit (Y/y - yes, other - continue)? \").lower()\n\ndef input_check():\n try:\n num1 = int(input(\"Input first number: \"))\n num2 = int(input(\"Input second number: \"))\n oper = input(\"Input operation (+, -, *, /): \")\n\n count(num1, num2, oper)\n except:\n print(\"Invalid input. Try again\")\n\ndef count(num1, num2, oper):\n if (oper == \"+\"):\n print (num1, oper, num2, \"=\", num1 + num2)\n elif (oper == \"-\"):\n print (num1, oper, num2, \"=\", num1 - num2)\n elif (oper == \"*\"):\n print (num1, oper, num2, \"=\", num1 * num2)\n elif (oper == \"/\"):\n print (num1, oper, num2, \"=\", num1 / num2)\n else:\n print(\"No such operation. Try again\")\n\ndef fractions_count():\n print(\"To make a fraction use \\\"/\\\"\")\n\n num1 = input(\"Input first fraction: \")\n num2 = input(\"Input second fraction: \")\n oper = input(\"Input operation (+, -, *, /): \")\n num1_up = \"\"\n num1_down = \"\"\n num2_up = \"\"\n num2_down = \"\"\n separator = 0\n\n for i in range(len(num1) - 1):\n if (num1[i] == \"/\"):\n separator = i\n break\n\n num1_up = int(num1[0:separator])\n num1_down = int(num1[separator + 1:len(num1)])\n\n for i in range(len(num2) - 1):\n if (num2[i] == \"/\"):\n separator = i\n break\n\n num2_up = int(num2[0:separator])\n num2_down = int(num2[separator + 1:len(num2)])\n\n if (oper == \"+\"):\n if(num1_down == num2_down):\n print (num1, oper, num2, \"=\", str(num1_up + num2_up) + \"/\" + str(num1_down))\n else:\n print (num1, oper, num2, \"=\", str(num1_up * num2_down + num2_up * num1_down) + \"/\" + str(num1_down * num2_down))\n elif (oper == \"-\"):\n if(num1_down == num2_down):\n print (num1, oper, num2, \"=\", str(num1_up - num2_up) + \"/\" + str(num1_down))\n else:\n print (num1, oper, num2, \"=\", str(num1_up * num2_down - num2_up * num1_down) + \"/\" + str(num1_down * num2_down))\n elif (oper == \"*\"):\n print (num1, oper, num2, \"=\", str(num1_up * num2_up) + \"/\" + str(num1_down * num2_down))\n elif (oper == \"/\"):\n print (num1, oper, num2, \"=\", str(num1_up * num2_down) + \"/\" + str(num1_down * num2_up))\n else:\n print(\"No such operation. Try again\")\n\nmain()","sub_path":"Simple-console-calculator/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"456959219","text":"from requests_html import HTMLSession\nfrom bs4 import BeautifulSoup as bs\n\n\nGAMES = {\n 'Trine 3: The Artifacts of Power': 'trine-3-the-artifacts-of-power',\n 'Trine 4: The Nightmare Prince': 'trine-4-the-nightmare-prince'\n}\n\n\nclass Fanatical:\n\n def __init__(self, prices):\n self.url = 'https://www.fanatical.com/en/game/'\n self.prices = prices\n\n def run(self):\n for game, game_url in GAMES.items():\n if game not in self.prices : self.prices[game] = []\n price_obj = {'url': self.url + game_url, 'platform': 'fanatical' }\n\n session = HTMLSession()\n resp = session.get(self.url + game_url)\n resp.html.render()\n bs_content = bs(resp.html.html, \"html.parser\")\n\n purchase = bs_content.find('div', { 'class': 'price-container' })\n discount_block = purchase.find('div', { 'class': 'was-price' })\n\n if discount_block is not None:\n price_obj['dsc'] = {\n 'pct': purchase.find('div', { 'class': 'saving-percentage' }).getText().strip(),\n 'original_price': discount_block.find('span').getText().strip(),\n 'final_price': purchase.find('div', { 'class': 'price' }).find('span').getText().strip()\n }\n else:\n price_obj['orig'] = {\n 'original_price': purchase.find('div', { 'class': 'price' }).find('span').getText().strip()\n }\n\n self.prices[game].append(price_obj)\n\n return self.prices\n","sub_path":"finder/platforms/fanatical.py","file_name":"fanatical.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"400276677","text":"__author__ = 'tarik'\n\n\nclass ShortInputException(Exception):\n def __init__(self, length, atleast):\n Exception.__init__(self)\n self.length = length\n self.atleast = atleast\n\n\ntry:\n something = input(\"Write something dawg: \")\n if len(something) < 3:\n raise ShortInputException(len(something), 3)\n\nexcept EOFError:\n print(\"what the fuck man, don't EOF me.\")\nexcept ShortInputException as ex:\n print(\"You typed {0} while I was expecting at least {1} characters...\".format(ex.length, ex.atleast))\n\nelse:\n print('Good job my man. No exception raised.')\n\n","sub_path":"catchexception.py","file_name":"catchexception.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"611825664","text":"import os\nimport datetime\nimport pathlib\nimport codecs\n\ntotalSites = 0\ntotalBCH = 0\n\nindex = 1\n\ndirPath = os.path.join(\"..\",\"..\",\"_data\")\nfilename = os.listdir(dirPath)\n\ndef countFile(dir, filename):\n path = os.path.join(dir, filename)\n #print(\"Testing site: \" + path)\n if \".yml\" in path:\n file = codecs.open(path, 'r', \"utf-8\")\n processed = True\n\n global index\n for line in file:\n #print(line)\n\n if \"- name:\" in line:\n global totalSites\n totalSites+= 1\n\n #check for bch tag\n if \"bch: \" in line:\n if \"Yes\" in line:\n global totalBCH\n totalBCH += 1\n processed = True\n index += 1\n\nprint(\"\\n acceptBitcoin.Cash Site Analyser\")\nprint(\"-================================-\")\n\nfor file in filename:\n #print(\"Testing path: \" + path)\n\tif \"examples.yml\" not in file:\n\t\tcountFile(dirPath, file)\n\n\nprint(\"- Total websites listed: \" + str(totalSites))\nprint(\"- Total websites supporting BCH: \" + str(totalBCH))\n\n#create log\ntimestamp = datetime.datetime.utcnow()\n\noutputPath = os.path.join(\".\", \"output\")\ntry:\n\tos.mkdir(outputPath)\nexcept Exception as e:\n\tpass\n\noutput = codecs.open(os.path.join(outputPath,\"bchAccepted_log.csv\"), \"a\", \"utf-8\")\n\noutput.write(str(timestamp) + \", \" + str(totalBCH) + \", \" + str(totalSites) + \"\\n\")\n\noutput.close()\n\n#create html file\noutput = codecs.open(os.path.join(\"..\",\"..\",\"_includes\",\"count_support.html\"), \"w+\", \"utf-8\")\n\nprint(\"- Generating HTML snippet for website progress bar...\")\n\noutput.write(' \\\n
\\\n
\\\n
\\\n
\\\n
' + str(totalBCH) + ' out of ' + str(totalSites) + ' websites listed support Bitcoin Cash.
\\\n
')\n\noutput.close()\n\nprint(\"\\nDone!\")\n","sub_path":"scripts/python/bchAccepted.py","file_name":"bchAccepted.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"109642727","text":"#!/usr/bin/env python \r\n# -*- coding: utf-8 -*- \r\n# @Time : 2019/9/20 0020 10:07 \r\n# @Author : HL \r\n# @Site : \r\n# @File : commentDemo.py \r\n# @Software: PyCharm\r\n\r\nimport json\r\nimport random\r\nimport time\r\n\r\nimport requests\r\n\r\nts = int(time.time())\r\nss2 = int(time.time() * 1000)\r\n_rticket = int(ss2 - 57000)\r\n\r\nHEADERS = {\r\n 'Host': 'api.amemv.com',\r\n 'Connection': 'keep-alive',\r\n # 'Cookie': 'install_id=86658687962; ttreq=1$81a11eb0423f772c6350c193f483aab91717bca1; odin_tt=fd7716c94fde70926ee34081f1fda9ad499571e9e6aa7a60bcea823d5871e60f1e358d6b57076eff17ec9ba6d6f99b19e0b6ad6f44e0494b2f601ecac591836f',\r\n 'accept-encoding': 'gzip',\r\n 'X-SS-REQ-TICKET': str(_rticket),\r\n 'sdk-version': '1',\r\n 'X-SS-DP': '1128',\r\n # 'x-tt-trace-id': '00-09234d0d58014020b8048053b1f14e73-09234d0d58014020-01',\r\n # 'X-Gorgon': '030000004001a1250b00b68de09217da1f43f701e76f2c3c288a',\r\n 'X-Khronos': str(int((str(int(ss2)))[:-3])),\r\n # 'accept-language': 'zh-CN,zh;q=0.9',\r\n # 'pragma': 'no-cache',\r\n # 'cache-control': 'no-cache',\r\n # 'upgrade-insecure-requests': '1',\r\n 'User-Agent': \"com.ss.android.ugc.aweme/800 (Linux; U; Android 5.1.1; zh_CN; SM-G955F; Build/JLS36C; Cronet/58.0.2991.0)\"\r\n}\r\n\r\nuser_video_params = {\r\n 'aweme_id': 6737461298134617351,\r\n 'cursor': 0,\r\n 'address_book_access': 1,\r\n 'gps_access': 1,\r\n 'forward_page_type': 1,\r\n '_rticket': _rticket,\r\n\r\n 'count': 20,\r\n 'os_api': 22,\r\n 'device_type': 'M-G955F',\r\n 'ssmix': 'a',\r\n 'manifest_version_code': 800,\r\n 'dpi': 320,\r\n 'js_sdk_version': '1.25.0.1',\r\n 'app_name': 'aweme',\r\n 'version_name': '8.0.0',\r\n 'ts': ts,\r\n 'app_type': 'normal',\r\n 'ac': 'wifi',\r\n 'update_version_code': 8002,\r\n 'channel': 'tengxun_new',\r\n 'device_platform': 'android',\r\n 'iid': 86658687962,\r\n 'version_code': 800,\r\n 'openudid': 'f46d0495fe505041',\r\n 'device_id': 68798464502,\r\n 'resolution': '1080*1920',\r\n 'os_version': '5.1.1',\r\n 'language': 'zh',\r\n 'device_brand': 'samsung',\r\n 'aid': 1128,\r\n 'mcc_mnc': '46007',\r\n 'uuid': 355757010244107,\r\n\r\n # 'max': 0,\r\n # # 'sec_user_id': 'MS4wLjABAAAAQEz_scsICUFGfJnBpg5qav7tH3Vx7f1RJklH1aTyNXM',\r\n # # 'retry_type': 'retry_type',\r\n # # 'uuid': '355757010244107',_cursor\r\n}\r\ntime1 = time.time()\r\nwhile True:\r\n res = requests.get('https://api.amemv.com/aweme/v2/comment/list/',\r\n headers=HEADERS, params=user_video_params)\r\n\r\n contentJson = json.loads(res.content.decode('utf-8'))\r\n time2 = 0\r\n aweme_list = contentJson.get('comments', [])\r\n for aweme in aweme_list:\r\n if time2 == 0:\r\n time2 = time.time()\r\n print(\"用时 \" + str(time2 - time1))\r\n else:\r\n pass\r\n print(\"number: 有值\")\r\n if contentJson.get('has_more'):\r\n max_cursor = contentJson.get('max_cursor')\r\n break\r\n else:\r\n time.sleep(round(random.uniform(1, 3), 1))\r\n pass\r\n","sub_path":"commentDemo.py","file_name":"commentDemo.py","file_ext":"py","file_size_in_byte":2997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"14175350","text":"import logging\n\nfrom abc import ABC, abstractmethod\n\n\nfrom datarobot_drum.drum.common import (\n LOGGER_NAME_PREFIX,\n TargetType,\n StructuredDtoKeys,\n ModelInfoKeys,\n)\nfrom datarobot_drum.drum.utils import StructuredInputReadUtils\n\nlogger = logging.getLogger(LOGGER_NAME_PREFIX + \".\" + __name__)\n\nmlops_loaded = False\nmlops_import_error = None\ntry:\n from datarobot.mlops.mlops import MLOps\n\n mlops_loaded = True\nexcept ImportError as e:\n mlops_import_error = \"Error importing MLOps python module: {}\".format(e)\n\n\nclass BaseLanguagePredictor(ABC):\n def __init__(self):\n self._model = None\n self._positive_class_label = None\n self._negative_class_label = None\n self._class_labels = None\n self._code_dir = None\n self._params = None\n self._mlops = None\n\n def configure(self, params):\n self._code_dir = params[\"__custom_model_path__\"]\n self._positive_class_label = params.get(\"positiveClassLabel\")\n self._negative_class_label = params.get(\"negativeClassLabel\")\n self._class_labels = params.get(\"classLabels\")\n self._target_type = TargetType(params.get(\"target_type\"))\n self._params = params\n\n if self._params[\"monitor\"] == \"True\":\n if not mlops_loaded:\n raise Exception(\"MLOps module was not imported: {}\".format(mlops_import_error))\n # TODO: if server use async, if batch, use sync etc.. some way of passing params\n self._mlops = (\n MLOps()\n .set_model_id(self._params[\"model_id\"])\n .set_deployment_id(self._params[\"deployment_id\"])\n .set_channel_config(self._params[\"monitor_settings\"])\n .init()\n )\n\n def monitor(self, kwargs, predictions, predict_time_ms):\n if self._params[\"monitor\"] == \"True\":\n self._mlops.report_deployment_stats(\n num_predictions=len(predictions), execution_time_ms=predict_time_ms\n )\n\n # TODO: Need to convert predictions to a proper format\n # TODO: or add report_predictions_data that can handle a df directly..\n # TODO: need to handle associds correctly\n\n # mlops.report_predictions_data expect the prediction data in the following format:\n # Regression: [10, 12, 13]\n # Classification: [[0.5, 0.5], [0.7, 03]]\n # In case of classification, class names are also required\n class_names = self._class_labels\n if len(predictions.columns) == 1:\n mlops_predictions = predictions[predictions.columns[0]].tolist()\n else:\n mlops_predictions = predictions.values.tolist()\n if (\n self._positive_class_label is not None\n and self._negative_class_label is not None\n ):\n class_names = [self._negative_class_label, self._positive_class_label]\n\n df = StructuredInputReadUtils.read_structured_input_data_as_df(\n kwargs.get(StructuredDtoKeys.BINARY_DATA), kwargs.get(StructuredDtoKeys.MIMETYPE),\n )\n self._mlops.report_predictions_data(\n features_df=df, predictions=mlops_predictions, class_names=class_names\n )\n\n @abstractmethod\n def predict(self, **kwargs):\n \"\"\" Predict on input_filename or binary_data \"\"\"\n pass\n\n @abstractmethod\n def transform(self, **kwargs):\n \"\"\" Predict on input_filename or binary_data \"\"\"\n pass\n\n @abstractmethod\n def has_read_input_data_hook(self):\n \"\"\" Check if read_input_data hook defined in predictor \"\"\"\n pass\n\n def model_info(self):\n model_info = {\n ModelInfoKeys.TARGET_TYPE: self._target_type.value,\n ModelInfoKeys.CODE_DIR: self._code_dir,\n }\n\n if self._target_type == TargetType.BINARY:\n model_info.update({ModelInfoKeys.POSITIVE_CLASS_LABEL: self._positive_class_label})\n model_info.update({ModelInfoKeys.NEGATIVE_CLASS_LABEL: self._negative_class_label})\n elif self._target_type == TargetType.MULTICLASS:\n model_info.update({ModelInfoKeys.CLASS_LABELS: self._class_labels})\n\n return model_info\n","sub_path":"custom_model_runner/datarobot_drum/drum/language_predictors/base_language_predictor.py","file_name":"base_language_predictor.py","file_ext":"py","file_size_in_byte":4297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"479882046","text":"import numpy as np\nfrom astropy.time import Time\nfrom surveysim.kpno import mayall\n\ndef earthOrientation(MJD):\n \"\"\"\n This is an approximate formula because the ser7.dat file's range\n is not long enough for the duration of the survey.\n All formulae are from the Naval Observatory.\n\n Args:\n MJD: float\n\n Returns:\n x: float (arcseconds)\n y: float (arcseconds)\n UT1-UTC: float (seconds)\n \"\"\"\n\n T = 2000.0 + (MJD - 51544.03) / 365.2422\n UT2_UT1 = 0.022*np.sin(2.0*np.pi*T) - 0.012*np.cos(2.0*np.pi*T) \\\n - 0.006*np.sin(4.0*np.pi*T) + 0.007*np.cos(4.0*np.pi*T)\n A = 2.0*np.pi*(MJD-57681.0)/365.25\n C = 2.0*np.pi*(MJD-57681.0)/435.0\n x = 0.1042 + 0.0809*np.cos(A) - 0.0636*np.sin(A) + 0.0229*np.cos(C) - 0.0156*np.sin(C) \n y = 0.3713 - 0.0593*np.cos(A) - 0.0798*np.sin(A) - 0.0156*np.cos(C) - 0.0229*np.sin(C) \n UT1_UTC = -0.3259 - 0.00138*(MJD - 57689.0) - (UT2_UT1)\n return x, y, UT1_UTC\n\ndef mjd2lst(mjd):\n \"\"\"\n Converts decimal MJD to LST in decimal degrees\n\n Args:\n mjd: float\n\n Returns:\n lst: float (degrees)\n \"\"\"\n\n lon = str(mayall.west_lon_deg) + 'd'\n lat = str(mayall.lat_deg) + 'd'\n \n t = Time(mjd, format = 'mjd', location=(lon, lat))\n lst_tmp = t.copy()\n \n #try:\n # lst_str = str(lst_tmp.sidereal_time('apparent'))\n #except IndexError:\n # lst_tmp.delta_ut1_utc = -0.1225\n # lst_str = str(lst_tmp.sidereal_time('apparent'))\n\n x, y, dut = earthOrientation(mjd)\n lst_tmp.delta_ut1_utc = dut\n lst_str = str(lst_tmp.sidereal_time('apparent'))\n # 23h09m35.9586s\n # 01234567890123\n if lst_str[2] == 'h':\n lst_hr = float(lst_str[0:2])\n lst_mn = float(lst_str[3:5])\n lst_sc = float(lst_str[6:-1])\n else:\n lst_hr = float(lst_str[0:1])\n lst_mn = float(lst_str[2:4])\n lst_sc = float(lst_str[5:-1])\n lst = lst_hr + lst_mn/60.0 + lst_sc/3600.0\n lst *= 15.0 # Convert from hours to degrees\n return lst\n\ndef radec2altaz(ra, dec, lst):\n \"\"\"\n Converts from ecliptic to horizontal coordinate systems.\n\n Args:\n ra: float, observed right ascension (degrees)\n dec: float, observed declination (degrees)\n lst: float, local sidereal time (degrees)\n\n Returns:\n alt: float, altitude i.e. elevation (degrees)\n az: float, azimuth (degrees)\n \"\"\"\n h = np.radians(lst - ra)\n if h < 0.0:\n h += 2.0*np.pi\n d = np.radians(dec)\n phi = np.radians(mayall.lat_deg)\n \n sinAz = np.sin(h) / (np.cos(h)*np.sin(phi) - np.tan(d)*np.cos(phi))\n sinAlt = np.sin(phi)*np.sin(d) + np.cos(phi)*np.cos(d)*np.cos(h)\n\n if sinAlt > 1.0:\n sinAlt = 1.0\n if sinAlt < -1.0:\n sinAlt = -1.0\n if sinAz > 1.0:\n sinAz = 1.0\n if sinAz < -1.0:\n sinAz = -1.0\n\n return np.degrees(np.arcsin(sinAlt)), np.degrees(np.arcsin(sinAz))\n\ndef angsep(ra1, dec1, ra2, dec2):\n \"\"\"\n Calculates the angular separation between two objects.\n\n Args:\n ra1: float (degrees)\n dec1: float (degrees)\n ra2: float (degrees)\n dec2: float (degrees)\n\n Returns:\n delta: float (degrees)\n \"\"\"\n\n deltaRA = np.radians(ra1-ra2)\n DEC1 = np.radians(dec1)\n DEC2 = np.radians(dec2)\n cosDelta = np.sin(DEC1)*np.sin(DEC2) + np.cos(DEC1)*np.cos(DEC2)*np.cos(deltaRA)\n return np.degrees(np.arccos(cosDelta))\n\n\n","sub_path":"py/surveysim/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"401959239","text":"from src import plugins\nfrom config import settings\nimport requests\nfrom concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor\nclass BaseClient(object):\n\n def process(self):\n raise NotImplementedError('派生类必须实现process方法')\n\n def send(self,info):\n # 将资产数据发送到API\n print(settings.API)\n\n\n response = requests.post(\n url=settings.API,\n json = info\n # data = info\n )\n\nclass AgentClient(BaseClient):\n\n def process(self):\n\n info = plugins.server_info()\n self.send(info)\n\nclass SubBaseClient(BaseClient):\n def get_host_list(self):\n import json\n response = requests.get(settings.API)\n host_list = json.loads(response.text)\n return host_list\n def task(self,hostname):\n info = plugins.server_info(hostname)\n # 将数据发送到API\n self.send(info)\nclass SshClient(SubBaseClient):\n\n def process(self):\n # 获取今日未采集的主机列表 [c1.com,c2.com,c3.com]\n host_list = self.get_host_list()\n pool = ThreadPoolExecutor(10)\n for host in host_list:\n pool.submit(self.task,host)\n\n\n\nclass SaltClient(SubBaseClient):\n\n def process(self):\n # 获取今日未采集的主机列表\n host_list = self.get_host_list()\n pool = ThreadPoolExecutor(10)\n for host in host_list:\n pool.submit(self.task,host)","sub_path":"CMDB/Client/src/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"542390874","text":"# -*- coding:utf-8 -*-\n#@Time :2020/7/18 12:12\n#@Author : Sun\n#@File :conftest.py\n\nimport pytest\nimport yaml\nfrom pythoncode.calc import Calculator\n\n@pytest.fixture(scope='function', autouse=True)\ndef start():\n print(\"开始计算\")\n cla = Calculator()\n yield\n print(\"计算结束\")\n\n\ndef pytest_collection_modifyitems(session,config,items):\n print(items)\n print(len(items))\n #倒序执行 items里面的测试用例\n # items.reverse()\n \"\"\"\n 测试用例收集完成时,将收集到的item的name和nodeid的中文显示在控制台上\n :return:\n \"\"\"\n for item in items:\n item.name = item.name.encode(\"utf-8\").decode(\"unicode_escape\")\n print(item.nodeid)\n item._nodeid = item.nodeid.encode(\"utf-8\").decode(\"unicode_escape\")\n\n\n\n","sub_path":"testing/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"168131769","text":"import unittest\n\nimport numpy\n\nimport clpy\nfrom clpy import testing\n\n\nclass TestEinSumError(unittest.TestCase):\n\n # '...' ellipsis is not supported.\n def test_not_supported_ellipsis(self):\n with self.assertRaises(TypeError):\n clpy.einsum('...', 0)\n\n @testing.numpy_clpy_raises()\n def test_no_arguments(self, xp):\n xp.einsum()\n\n @testing.numpy_clpy_raises()\n def test_one_argument(self, xp):\n xp.einsum('')\n\n @testing.numpy_clpy_raises()\n def test_not_string_subject(self, xp):\n xp.einsum(0, 0)\n\n @testing.numpy_clpy_raises()\n def test_bad_argument(self, xp):\n xp.einsum('', 0, bad_arg=0)\n\n @testing.numpy_clpy_raises()\n def test_too_many_operands1(self, xp):\n xp.einsum('', 0, 0)\n\n @testing.numpy_clpy_raises()\n def test_too_many_operands2(self, xp):\n xp.einsum('i,j', xp.array([0, 0]), xp.array([0, 0]), xp.array([0, 0]))\n\n @testing.numpy_clpy_raises()\n def test_too_few_operands1(self, xp):\n xp.einsum(',', 0)\n\n @testing.numpy_clpy_raises()\n def test_many_dimension1(self, xp):\n xp.einsum('i', 0)\n\n @testing.numpy_clpy_raises()\n def test_many_dimension2(self, xp):\n xp.einsum('ij', xp.array([0, 0]))\n\n @testing.numpy_clpy_raises()\n def test_too_few_dimension(self, xp):\n xp.einsum('i->i', xp.arange(6).reshape(2, 3))\n\n @testing.numpy_clpy_raises()\n def test_invalid_char1(self, xp):\n xp.einsum('i%', xp.array([0, 0]))\n\n @testing.numpy_clpy_raises()\n def test_invalid_char2(self, xp):\n xp.einsum('j$', xp.array([0, 0]))\n\n @testing.numpy_clpy_raises()\n def test_invalid_char3(self, xp):\n xp.einsum('i->&', xp.array([0, 0]))\n\n # output subscripts must appear in inumpy.t\n @testing.numpy_clpy_raises()\n def test_invalid_output_subscripts1(self, xp):\n xp.einsum('i->ij', xp.array([0, 0]))\n\n # output subscripts may only be specified once\n @testing.numpy_clpy_raises()\n def test_invalid_output_subscripts2(self, xp):\n xp.einsum('ij->jij', xp.array([[0, 0], [0, 0]]))\n\n # output subscripts must not incrudes comma\n @testing.numpy_clpy_raises()\n def test_invalid_output_subscripts3(self, xp):\n xp.einsum('ij->i,j', xp.array([[0, 0], [0, 0]]))\n\n # dimensions much match when being collapsed\n @testing.numpy_clpy_raises()\n def test_invalid_diagonal1(self, xp):\n xp.einsum('ii', xp.arange(6).reshape(2, 3))\n\n @testing.numpy_clpy_raises()\n def test_invalid_diagonal2(self, xp):\n xp.einsum('ii->', xp.arange(6).reshape(2, 3))\n\n # invalid -> operator\n @testing.numpy_clpy_raises()\n def test_invalid_arrow1(self, xp):\n xp.einsum('i-i', xp.array([0, 0]))\n\n @testing.numpy_clpy_raises()\n def test_invalid_arrow2(self, xp):\n xp.einsum('i>i', xp.array([0, 0]))\n\n @testing.numpy_clpy_raises()\n def test_invalid_arrow3(self, xp):\n xp.einsum('i->->i', xp.array([0, 0]))\n\n @testing.numpy_clpy_raises()\n def test_invalid_arrow4(self, xp):\n xp.einsum('i-', xp.array([0, 0]))\n\n\n@testing.parameterize(\n {'shape_a': (2, 3), 'subscripts': 'ij'}, # do nothing\n {'shape_a': (2, 3), 'subscripts': 'ij'}, # transpose\n {'shape_a': (3, 3), 'subscripts': 'ii->i'}, # diagonal 2d\n {'shape_a': (3, 3, 3), 'subscripts': 'jii->ij'}, # partial diagonal 3d\n {'shape_a': (3, 3, 3), 'subscripts': 'iji->ij'}, # partial diagonal 3d\n {'shape_a': (3, 3, 3), 'subscripts': 'iii->i'}, # diagonal 3d\n {'shape_a': (2, 3, 4), 'subscripts': 'ijk->jik'}, # swap axes\n {'shape_a': (2, 3, 4), 'subscripts': 'ijk->kij'}, # swap axes\n {'shape_a': (2, 3, 4), 'subscripts': 'ijk->ikj'}, # swap axes\n {'shape_a': (2, 3, 4), 'subscripts': 'kji->ikj'}, # swap axes\n {'shape_a': (3,), 'subscripts': 'i->'}, # sum\n {'shape_a': (3, 3), 'subscripts': 'ii'}, # trace\n {'shape_a': (2, 2, 2, 2), 'subscripts': 'ijkj->kij'}, # trace\n {'shape_a': (2, 2, 2, 2), 'subscripts': 'ijij->ij'}, # trace\n {'shape_a': (2, 2, 2, 2), 'subscripts': 'jiji->ij'}, # trace\n)\nclass TestEinSumUnaryOperation(unittest.TestCase):\n # Avoid overflow\n skip_dtypes = (numpy.bool_, numpy.int8, numpy.uint8)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_clpy_allclose()\n def test_einsum_unary(self, xp, dtype):\n if dtype in self.skip_dtypes:\n return xp.array([])\n a = testing.shaped_arange(self.shape_a, xp, dtype)\n return xp.einsum(self.subscripts, a)\n\n\n@testing.parameterize(\n # outer\n {'shape_a': (2,), 'shape_b': (3,),\n 'subscripts': 'i,j', 'skip_overflow': False},\n # dot matvec\n {'shape_a': (2, 3), 'shape_b': (3,),\n 'subscripts': 'ij,j', 'skip_overflow': False},\n {'shape_a': (2, 3), 'shape_b': (2,),\n 'subscripts': 'ij,i', 'skip_overflow': False},\n # dot matmat\n {'shape_a': (2, 3), 'shape_b': (3, 4),\n 'subscripts': 'ij,jk', 'skip_overflow': False},\n # tensordot\n {'shape_a': (3, 4, 2), 'shape_b': (4, 3, 2),\n 'subscripts': 'ijk, jil -> kl', 'skip_overflow': True},\n # trace and tensordot and diagonal\n {'shape_a': (2, 3, 2, 4), 'shape_b': (3, 2, 2),\n 'subscripts': 'ijil,jkk->kj', 'skip_overflow': True},\n)\nclass TestEinSumBinaryOperation(unittest.TestCase):\n skip_dtypes = (numpy.bool_, numpy.int8, numpy.uint8)\n\n @testing.for_all_dtypes_combination(['dtype_a', 'dtype_b'])\n @testing.numpy_clpy_allclose()\n def test_einsum_binary(self, xp, dtype_a, dtype_b):\n if self.skip_overflow and (dtype_a in self.skip_dtypes or\n dtype_b in self.skip_dtypes):\n return xp.array([])\n a = testing.shaped_arange(self.shape_a, xp, dtype_a)\n b = testing.shaped_arange(self.shape_b, xp, dtype_b)\n return xp.einsum(self.subscripts, a, b)\n\n\nclass TestEinSumBinaryOperationWithScalar(unittest.TestCase):\n @testing.for_all_dtypes()\n @testing.numpy_clpy_allclose()\n def test_scalar_1(self, xp, dtype):\n shape_a = (2,)\n a = testing.shaped_arange(shape_a, xp, dtype)\n return xp.asarray(xp.einsum(',i->', 3, a))\n\n @testing.for_all_dtypes()\n @testing.numpy_clpy_allclose()\n def test_scalar_2(self, xp, dtype):\n shape_a = (2,)\n a = testing.shaped_arange(shape_a, xp, dtype)\n return xp.asarray(xp.einsum('i,->', a, 4))\n\n\n@testing.parameterize(\n {'shape_a': (2, 3), 'shape_b': (3, 4), 'shape_c': (4, 5),\n 'subscripts': 'ij,jk,kl', 'skip_overflow': True},\n {'shape_a': (2, 4), 'shape_b': (2, 3), 'shape_c': (2,),\n 'subscripts': 'ij,ik,i->ijk', 'skip_overflow': False},\n {'shape_a': (2, 4), 'shape_b': (3, 2), 'shape_c': (2,),\n 'subscripts': 'ij,ki,i->jk', 'skip_overflow': False},\n)\nclass TestEinSumTernaryOperation(unittest.TestCase):\n skip_dtypes = (numpy.bool_, numpy.int8, numpy.uint8)\n\n @testing.for_all_dtypes(no_complex=True)\n @testing.numpy_clpy_allclose(contiguous_check=False)\n def test_einsum_ternary(self, xp, dtype):\n if self.skip_overflow and dtype in self.skip_dtypes:\n return xp.array([])\n a = testing.shaped_arange(self.shape_a, xp, dtype)\n b = testing.shaped_arange(self.shape_b, xp, dtype)\n c = testing.shaped_arange(self.shape_c, xp, dtype)\n return xp.einsum(self.subscripts, a, b, c).astype(numpy.float32)\n","sub_path":"tests/clpy_tests/linalg_tests/test_einsum.py","file_name":"test_einsum.py","file_ext":"py","file_size_in_byte":7338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"316645018","text":"#!/usr/bin/env python\r\n# -*- coding:utf-8 -*-\r\n\r\nimport wx\r\n\r\nclass ChildFrame(wx.Frame):\r\n def __init__(self, parent):\r\n wx.Frame.__init__(self, parent, -1, \"child frame\", pos=(100, 100))\r\n\r\nclass MyWindow(wx.Frame):\r\n def __init__(self, parent, id):\r\n wx.Frame.__init__(self, parent, id, \"main frame\")\r\n panel = wx.Panel(self)\r\n self.showChildBtn = wx.Button(panel, label=\"show child\", pos=(10, 10))\r\n self.exitBtn = wx.Button(panel, label=\"exit\", pos=(100, 10))\r\n self.Bind(wx.EVT_BUTTON, self.showChild, self.showChildBtn)\r\n self.Bind(wx.EVT_BUTTON, self.exit, self.exitBtn)\r\n def showChild(self, event):\r\n childFrame = ChildFrame(self)\r\n childID = childFrame.Show()\r\n def exit(self, event):\r\n self.Close(True)\r\n\r\nif __name__ == '__main__':\r\n app = wx.App(0)\r\n frame = MyWindow(parent=None, id=-1)\r\n frame.Show()\r\n app.MainLoop()\r\n","sub_path":"src/TopWindow.py","file_name":"TopWindow.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"191221948","text":"'''\nGiven a 2D board containing 'X' and 'O' (the letter O), capture all regions surrounded by 'X'.\n\nA region is captured by flipping all 'O's into 'X's in that surrounded region.\n\nExample:\n\nX X X X\nX O O X\nX X O X\nX O X X\nAfter running your function, the board should be:\n\nX X X X\nX X X X\nX X X X\nX O X X\n'''\n\ndef surround(board):\n if not board: return \n M = len(board)\n N = len(board[0])\n \n for i in range(M):\n for j in range(N):\n if board[i][j] == 'O':\n board[i][j] = '$'\n\n def fill(i, j):\n if i < 0 or i >= M or j < 0 or j >= N or board[i][j] != '$':\n return\n\n board[i][j] = 'O'\n\n fill(i+1, j)\n fill(i-1, j)\n fill(i, j+1)\n fill(i, j-1)\n\n for i in range(M):\n for j in range(N):\n if (i in [0, M-1] or j in [0, N-1]) and board[i][j] == '$':\n fill(i, j)\n \n for i in range(M):\n for j in range(N):\n if board[i][j] == '$':\n board[i][j] = 'X'\n\n return board\n\n","sub_path":"algorithms/graphs/surround_region.py","file_name":"surround_region.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"154377382","text":"DEFAULT_USER_COL = \"userID\"\nDEFAULT_ITEM_COL = \"itemID\"\nDEFAULT_RATING_COL = \"rating\"\nDEFAULT_TIMESTAMP_COL = \"timestamp\"\nDEFAULT_PREDICTION_COL = \"prediction\"\nDEFAULT_HEADER = (\n DEFAULT_USER_COL,\n DEFAULT_ITEM_COL,\n DEFAULT_RATING_COL,\n DEFAULT_TIMESTAMP_COL,\n)\nDEFAULT_SPLIT_FLAG = \"split_flag\"\nDEFAULT_TEST_SIZE = 0.2\nDEFAULT_VAL_SIZE = 0.2\n","sub_path":"utils/common/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"271076485","text":"import time\nimport importlib\n\nfrom tsp_utilities import *\n\n##############################################\n## ADD HERE YOUR NEW SOLVERS CLASSES #########\n##############################################\nactive_solvers = [\"Bruteforce\",\n #\"Dwave_tsp\",\n \"TSP_genetico\"]\n##############################################\n##############################################\n\ndef main():\n\n starting_node = 0\n nodes = 5\n\n G = get_graph(nodes)\n cost_matrix = get_cost_matrix(G, nodes)\n\n for solver_ in active_solvers:\n ClassName = getattr(importlib.import_module(\"solvers.\"+solver_.lower()), solver_)\n instance = ClassName()\n route = instance.calculate(G, cost_matrix, starting_node)\n print(\"Route for %s:\" % solver_)\n print(route)\n print(\"Cost: %s\" % calculate_cost(cost_matrix, route))\n draw_tsp_solution(G, route)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"506903155","text":"\"\"\"\n@brief Example \"four panel\" performance plots using pyIrfLoader.\n\n@author J. Chiang\n\"\"\"\n#\n# $Header$\n#\n\nimport bisect\nimport numpy as num\nimport pylab\nimport pyIrfLoader\n\nclass FunctionWrapper(object):\n def __init__(self, func):\n self.func = func\n def __call__(self, xx, **kwds):\n try:\n y = []\n for x in xx:\n y.append(self.func(x, **kwds))\n if isinstance(xx, num.ndarray):\n y = num.array(y)\n return y\n except TypeError:\n return self.func(xx, **kwds)\n def __getattr__(self, attrname):\n return getattr(self.func, attrname)\n\n_win_id = 0\n\nclass Window(object):\n def __init__(self, id=None):\n global _win_id\n if id is None:\n id = _win_id\n _win_id += 1\n self.fig = pylab.figure(id)\n self.axes = self.fig.add_subplot(111)\n self.id = id\n def set_title(self, title):\n self.axes.set_title(title)\n\ndef setAxis(xrange=None, yrange=None):\n axisrange = list(pylab.axis())\n if xrange is not None:\n axisrange[:2] = xrange\n if yrange is not None:\n axisrange[2:] = yrange\n pylab.axis(axisrange)\n\ndef plot_curve(x, y, xlog=0, ylog=0, xname='x', yname='y', \n oplot=0, color='k', lineStyle='-', linewidth=1,\n xrange=None, yrange=None):\n if oplot == 0:\n win = Window()\n else:\n win = None\n marker = '%s%s' % (color, lineStyle)\n if xlog and ylog:\n pylab.loglog(x, y, marker, markersize=3, linewidth=linewidth)\n elif xlog:\n pylab.semilogx(x, y, marker, markersize=3, linewidth=linewidth)\n elif ylog:\n pylab.semilogy(x, y, marker, markersize=3, linewidth=linewidth)\n else:\n pylab.plot(x, y, marker, markersize=3, linewidth=linewidth)\n if not oplot:\n pylab.xlabel(xname)\n pylab.ylabel(yname)\n setAxis(xrange, yrange)\n return win\n\nlogspace = lambda xmin, xmax, nx : num.logspace(num.log10(xmin),\n num.log10(xmax), nx)\n\npyIrfLoader.Loader_go()\n\nfactory = pyIrfLoader.IrfsFactory_instance()\n\nirfName = \"P7SOURCE_V6MC\"\n\nfront = factory.create(irfName + \"::FRONT\")\nback = factory.create(irfName + \"::BACK\")\n\npsf_f = front.psf()\npsf_b = back.psf()\n\nradii = logspace(1e-2, 30., 30)\n\n@FunctionWrapper\ndef theta_68(energy, psf=None, inc=0, phi=0, frac=0.68):\n f = FunctionWrapper(lambda x : psf.angularIntegral(energy, inc, phi, x))\n y = f(radii)\n indx = bisect.bisect(y, frac) - 1\n return ((frac - y[indx])/(y[indx+1] - y[indx])\n *(radii[indx+1] - radii[indx]) + radii[indx])\n\nenergies = logspace(20., 3e5, 40)\nplot1 = plot_curve(energies, theta_68(energies, psf=psf_f),\n xlog=1, ylog=1, xname='Energy (MeV)',\n yname='theta_68 (deg)')\nplot1.set_title('normal incidence')\nplot_curve(energies, theta_68(energies, psf=psf_b), oplot=1, lineStyle=':')\n\naeff_f = front.aeff()\naeff_b = back.aeff()\n\n@FunctionWrapper\ndef aeff(energy, aeffObj=None, inc=0, phi=0):\n return aeffObj.value(energy, inc, phi)\n\nplot2 = plot_curve(energies, aeff(energies, aeffObj=aeff_f), xlog=1,\n xname='Energy (MeV)', yname='eff. area (cm^2)')\nplot2.set_title('normal incidence')\nplot_curve(energies, aeff(energies, aeffObj=aeff_b), oplot=1, lineStyle=':')\n\n@FunctionWrapper\ndef aeff_profile(inc, aeffObj=None, energy=1e3, phi=0):\n return aeffObj.value(energy, inc, phi)\n\nthetas = num.arange(70, dtype=float)\n\nplot3 = plot_curve(thetas, aeff_profile(thetas, aeffObj=aeff_f),\n xname='inclination (deg)', yname='eff. area (cm^2)')\nplot3.set_title('E = 1 GeV')\nplot_curve(thetas, aeff_profile(thetas, aeffObj=aeff_b), oplot=1,\n lineStyle=':')\n\n@FunctionWrapper\ndef th68_profile(inc, psf=None, energy=1e3, phi=0, frac=0.68):\n f = FunctionWrapper(lambda x : psf.angularIntegral(energy, inc, phi, x))\n y = f(radii)\n indx = bisect.bisect(y, frac) - 1\n return ((frac - y[indx])/(y[indx+1] - y[indx])\n *(radii[indx+1] - radii[indx]) + radii[indx])\n\nplot4 = plot_curve(thetas, th68_profile(thetas, psf=psf_f),\n xname='inclination (deg)', yname='theta_68 (deg)')\nplot4.set_title('E = 1 GeV')\nplot_curve(thetas, th68_profile(thetas, psf=psf_b), oplot=1,\n lineStyle=':')\n","sub_path":"pyIrfLoader/python/four_panel.py","file_name":"four_panel.py","file_ext":"py","file_size_in_byte":4338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"4950070","text":"#!/usr/bin/python\n\nimport sys\nfrom functools import reduce\n\n# ALGORITHM:\n# obtain file name from sys.argv[1]\n# check that file exists\n# try read contents from file\n# parse contents:\n\t# skip empty lines and comments\n\t# if non-empty line, read states from it\n\t# check states are valid\n\t# read next N lines\n\t\t# check line syntax:\n\t\t\t# has N+1 words\n\t\t\t# fist word is in states[]\n\t\t\t# other words are floats\n\t\t# add rules to table\n\t# check table\n\t\t# give error if normalization fails\n\t\t# give warning if semi-detailed balance fails\n# generate collision function to argv[2]\n\ndef error(message):\n\tprint()\n\tprint(\"ERROR\", message)\n\tprint()\n\tsys.exit(1)\n\ndef extract_words(line):\n\treturn [x for x in line.replace('\\t', ' ').split(' ') if x.strip()]\n\ndef validate_prob(word, line):\n\ttry:\n\t\tnum=float(word)\n\t\tif num>=0 and num<=1:\n\t\t\treturn\n\texcept ValueError:\n\t\tpass\n\terror(\"Not a probability (\" + str(word) + ') in line: ' + line)\n\ndef validate_state(word, line):\n\ttry:\n\t\tnum=int(word)\n\t\tif num>=0 and num<=255:\n\t\t\treturn\n\texcept ValueError:\n\t\tpass\n\terror(\"Not a state (\" + str(word) + ') in line: ' + line)\n\nrules={}\n\ndef init_rules():\n\tglobal rules\n\tfor i in range(256):\n\t\trules[i]={i: 1.0}\n\ndef add_rule(s0, s1, p):\n\tglobal rules\n\trules[s0][s1]=p\n\tif p==0:\n\t\tdel rules[s0][s1]\n\ndef is_deterministic():\n\tfor r in rules:\n\t\tif len(rules[r])>1:\n\t\t\treturn False\n\treturn True\n\ninit_rules()\n\nif len(sys.argv) != 3:\n\tprint()\n\tprint(\"Usage:\", sys.argv[0], \"\", \"\")\n\tprint()\n\tsys.exit()\n\ntry:\n\tlines=[x for x in open(sys.argv[1]).readlines() if x.strip()]\nexcept IOError:\n\terror(\"ERROR: Failed to read from file \" + sys.argv[1])\n\ni=0\nwhile i= len(lines):\n\t\terror(\"Unexpected end of file, need more lines for states (\" +\\\n\t\t\t', '.join(states) + '), given in line ' + line);\n\ti+=1\n\tfor j in range(len(states)):\n\t\twords=extract_words(lines[i+j])\n\t\tif len(words) != len(states)+1:\n\t\t\terror(\"Too few words in line \" + lines[i+j])\n\t\tvalidate_state(words[0], lines[i+j])\n\t\tif words[0] != states[j]:\n\t\t\terror(\"Invalid state (\" + words[0] + \"), need (\" +\\\n\t\t\t\tstates[j] + \") in line \" + lines[i+j])\n\t\tfor k in range(len(words)-1):\n\t\t\tvalidate_prob(words[k+1], lines[i+j])\n\t\t\tadd_rule(int(words[0]), int(states[k]), float(words[k+1]))\n\ti+=len(states)\n\n# check normalization\nfor s0 in range(256):\n\tsum_prob=0\n\tif len(rules[s0])>0:\n\t\tsum_prob=reduce(lambda x, y: x+y,\n\t\t\t[rules[s0][s1] for s1 in rules[s0]])\n\tif abs(sum_prob-1)>0.0000001:\n\t\terror(\"Normalization fails for state (\" + str(s0) +\\\n\t\t\t\"), sum prob is \" + str(sum_prob))\n\n# generate code\ntry:\n\tf=open(sys.argv[2], 'w')\n\tf.write(\"char collide(char cell)\\n\")\n\tf.write(\"{\\n\")\n\tif not is_deterministic():\n\t\tf.write(\"\\tdouble r=drand48();\\n\")\n\tf.write(\"\\tswitch(cell) {\\n\")\n\tfor s0 in range(256):\n\t\tif len(rules[s0])==1 and s0 in rules[s0]:\n\t\t\tcontinue # trivial rule\n\t\tf.write(\"\\t\\tcase \" + str(s0) + \":\\n\")\n\t\tcur_prob=0\n\t\tfor s1, prob in rules[s0].items():\n\t\t\tcur_prob+=prob\n\t\t\tif cur_prob<1.0:\n\t\t\t\tf.write(\"\\t\\t\\tif (r<\" + str(cur_prob) + \") return \" +\\\n\t\t\t\t\tstr(s1) + \";\\n\")\n\t\t\telse:\n\t\t\t\tf.write(\"\\t\\t\\treturn \" + str(s1) + \";\\n\");\n\tf.write(\"\\t\\tdefault:\\n\")\n\tf.write(\"\\t\\t\\treturn cell;\\n\")\n\tf.write(\"\\t}\\n\")\n\tf.write(\"}\\n\")\nexcept IOError:\n\terror(\"Failed to write output to file \" + sys.argv[2])\n","sub_path":"system/gen_collide.py","file_name":"gen_collide.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"164042749","text":"from django.db import models\nfrom django.contrib.auth.models import AbstractUser\n\nfrom .constants import *\n\nclass User(AbstractUser):\n \"\"\"\n Extended UserModel.\n Extra Field that are required for the User Model\n are added here.\n Any changes here should also be reflected in admin.ADDITIONAL_FIELDS\n so they can appear on the admin panel\n \"\"\"\n has_changed_username = models.BooleanField(default=False,verbose_name=\"Has changed username?\")\n last_login = models.DateTimeField(verbose_name=\"Last Logged in\", blank=True, null=True)\n role = models.SmallIntegerField(\n choices=USER_ROLES, \n null = True,\n verbose_name=\"User Role\", \n help_text=\"\"\"\n This is an integer field, each role is assigned a specific integer:
\n 0 for Students
\n 1 for Teachers
\n 2 for Principals
\n \"\"\"\n )\n\n\nclass School(models.Model):\n created_by = models.OneToOneField(User, on_delete=models.SET_NULL, null=True)\n name = models.CharField(max_length=50, verbose_name=\"Name\")\n location = models.TextField(max_length=150, blank=True, verbose_name=\"Location\")\n\n def __unicode__(self):\n return self.name\n \n def __str__(self):\n return self.name+', '+self.location\n\n\n\nclass Student(models.Model):\n user = models.OneToOneField(User, on_delete=models.CASCADE)\n name = models.CharField(max_length=40, blank=True, verbose_name=\"Name\")\n city = models.CharField(max_length=30, blank=True, verbose_name=\"City\")\n school = models.ForeignKey(School, null=True, blank=True, on_delete=models.SET_NULL)\n standard = models.PositiveSmallIntegerField(choices=STUDENT_STD, null=True, blank=True, verbose_name=\"Class\")\n email = models.EmailField(max_length=254, blank=True, verbose_name=\"Email\")\n contact_no = models.CharField(max_length=20, blank=True, verbose_name=\"Contact No\")\n guardian_name = models.CharField(max_length=40, blank=True, verbose_name=\"Parents/Guardian's Name\")\n guardian_contact = models.CharField(max_length=20, blank=True, verbose_name=\"Parents/Guardian's Contact No\")\n guardian_address = models.TextField(max_length=150, blank=True, verbose_name=\"Parents/Guardian's Address\")\n\n\n def __unicode__(self):\n return self.name\n \n def __str__(self):\n return self.name\n\n","sub_path":"webD-backend/authentication/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"519894845","text":"import sys\nimport math\n\nsys.setrecursionlimit(10**6)\ninput = sys.stdin.readline\n\n\ndef dist(p1, p2):\n return math.sqrt((p1[0]-p2[0])**2 + (p1[1] - p2[1])**2)\n\n\ndef getP(x: int):\n if parents[x] == x:\n return x\n parents[x] = getP(parents[x])\n return parents[x]\n\n\ndef union(x: int, y: int):\n px, py = getP(x), getP(y)\n if px > py:\n parents[px] = py\n else:\n parents[py] = px\n\n\ndef find(x: int, y: int):\n px, py = getP(x), getP(y)\n return px == py\n\n\nN, M = map(int, input().split())\n# 0 사용 X , 각 노드의 위치\npoints = [list(map(float, input().split()))for _ in range(N)]\npoints = [[0, 0]] + points\n# 미리 연결된 노드\nnode_connected = [list(map(int, input().split())) for _ in range(M)]\n\nparents = [i for i in range(N+1)] # N개의 정점의 부모 ( 0 사용 X )\nans = 0\nfor i in range(len(node_connected)):\n u, v = node_connected[i]\n union(u, v)\n # ans += dist(points[u], points[v])\n\n# 모든 간선에대해, 크루스칼 적용\nedges = []\nfor i in range(1, N+1):\n for j in range(1, N+1):\n if i == j:\n continue\n edges.append((i, j, dist(points[i], points[j])))\nedges.sort(key=lambda x: x[2])\nfor i in range(len(edges)):\n u, v, w = edges[i]\n if not find(u, v):\n union(u, v)\n ans += w\n\n# print(\"%.2f\" % ans)\nprint(round(ans, 2))\n","sub_path":"BOJ_Gold/1774.py","file_name":"1774.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"66605391","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nimport time\nimport math\n#import os\n\ntry: \n link = \"http://suninjuly.github.io/explicit_wait2.html\"\n browser = webdriver.Chrome()\n #browser.implicitly_wait(5)\n browser.get(link)\n\n price_element=WebDriverWait(browser,12).until(EC.text_to_be_present_in_element((By.ID,\"price\"),\"$100\"))\n #price=price_element.text\n book_button=browser.find_element(By.ID,\"book\")\n book_button.click()\n \n x_element=browser.find_element_by_css_selector(\"#input_value\")\n x=x_element.text\n result=str(math.log(abs(12*math.sin(int(x)))))\n result_input=browser.find_element_by_css_selector(\"input#answer\")\n result_input.send_keys(result)\n submit_button=browser.find_element(By.ID,'solve') #WebDriverWait(browser,2).until(EC.element_to_be_clickable((By.ID,'submit')))\n submit_button.click()\n\n \nfinally:\n # ожидание чтобы визуально оценить результаты прохождения скрипта\n time.sleep(20)\n # закрываем браузер после всех манипуляций\n browser.quit()\n","sub_path":"Scripts/Module2/lesson2_4_step8.py","file_name":"lesson2_4_step8.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"187041988","text":"#!/usr/bin/env python\r\n# !-*-coding:utf-8 -*-\r\n\"\"\"\r\n@version: python3.7\r\n@author: v-enshi\r\n@license: Apache Licence\r\n@contact: 123@qq.com\r\n@site:\r\n@software: PyCharm\r\n@file: Queries2.py\r\n@time: 2019/4/22 14:21\r\n\r\ntraing epoch =8\r\nlstm+attan\r\nno test\r\n\"\"\"\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\n\r\nimport random\r\nimport numpy as np\r\nimport time\r\n\r\n\r\ntime_start = time.time()\r\ntorch.manual_seed(1)\r\n\r\nuse_gpu = False\r\n#use_gpu = True\r\nif use_gpu:\r\n device = torch.device(\"cuda\")\r\n max_vocab_size = 10000\r\n CONTEXT_WINDOW = 100\r\nelse:\r\n device = torch.device(\"cpu\")\r\n max_vocab_size = 100\r\n CONTEXT_WINDOW = 100\r\n\r\n##1. data loading\r\narr=np.load(r\"../data/python/training.npz\")\r\ninputs = arr['input_data']\r\nparents = arr['parent_data']\r\ntargets = arr['target_data']\r\nvalue_vocab = arr['value_vocab'].item()\r\ntype_vocab = arr['type_vocab'].item()\r\n\r\n\r\ndata_loading = time.time()\r\nprint(\"data loading\", data_loading - time_start)\r\n\r\n\r\n##2. parameters setting\r\nif use_gpu:\r\n EMBEDDING_value = 1200\r\n EMBEDDING_type = 300\r\n HIDDEN_SIZE = 1500\r\n BATCH_SIZE = 1\r\nelse:\r\n EMBEDDING_value = 2\r\n EMBEDDING_type = 3\r\n HIDDEN_SIZE = 5\r\n\r\n BATCH_SIZE = 1\r\n\r\n## 3.1 LSTM component\r\nclass LSTM_component(nn.Module):\r\n\r\n def __init__(self, vocab_value_size, value_dim, vocab_type_size, type_dim,\r\n hidden_dim, batch_size, context_window, dropout_p=0.5):\r\n super(LSTM_component, self).__init__()\r\n self.hidden_dim = hidden_dim\r\n self.batch_size = batch_size\r\n self.context_window = context_window\r\n self.value_embeddings = nn.Embedding(vocab_value_size, value_dim)\r\n self.type_embeddings = nn.Embedding(vocab_type_size, type_dim)\r\n self.dropout = nn.Dropout(dropout_p)\r\n self.lstm = nn.LSTM(value_dim + type_dim, hidden_dim)\r\n\r\n def forward(self, sentence, hc):\r\n\r\n embeds_type = self.type_embeddings(sentence[0])\r\n embeds_value = self.value_embeddings(sentence[1])\r\n embeds = torch.cat([embeds_value, embeds_type], 1).view(len(sentence[0]), 1, -1)\r\n h0 = hc[0]\r\n c0 = hc[1]\r\n embeds[:-self.context_window]\r\n\r\n lstm_out1, (lstm_h1, lstm_c1) = self.lstm(self.dropout(embeds[:-self.context_window]), (h0, c0))\r\n\r\n lstm_out, (lstm_h, lstm_c) = self.lstm(embeds[-self.context_window:], (lstm_h1, lstm_c1))\r\n\r\n return lstm_out, lstm_h, lstm_c\r\n\r\n def initHidden(self):\r\n return torch.zeros(1, self.batch_size, self.hidden_dim, device=device), torch.zeros(1, self.batch_size,\r\n self.hidden_dim,\r\n device=device)\r\n\r\n'''\r\nmodel_LSTM = LSTM_component(20,EMBEDDING_value,10,EMBEDDING_type,HIDDEN_SIZE, BATCH_SIZE)\r\n\r\n\r\nwith torch.no_grad():\r\n inputs =torch.tensor([[ 5, 11, 5, 11, 5, 11, 5, 11, 5, 12],\r\n [ 0, 0, 0, 0, 6, 0, 8, 0, 0, 7]])\r\n output, hn,cn = model_LSTM(inputs,model_LSTM.initHidden())\r\n print(\"tag_scorces\", output, hn,cn)\r\n'''\r\n\r\n## 3.2 attention component\r\nclass Context_atten(nn.Module):\r\n def __init__(self, hidden_dim, context_window, dropout_p=0.25):\r\n super(Context_atten, self).__init__()\r\n self.hidden_dim = hidden_dim\r\n self.context_window = context_window\r\n\r\n self.Wm = nn.Parameter(torch.ones(hidden_dim, hidden_dim))\r\n self.V = nn.Parameter(torch.ones(hidden_dim, 1))\r\n self.linear1 = nn.Linear(hidden_dim, hidden_dim, bias=False)\r\n\r\n def forward(self, inputs, hc):\r\n # Mt = inputs[-self.context_window:,:,:]\r\n Mt = inputs.view(self.context_window, self.hidden_dim) #\r\n one_TL = torch.ones(self.context_window, 1, device=device) # (L,1)\r\n\r\n At = torch.mm(torch.tanh(torch.mm(Mt, self.Wm) + torch.mm(one_TL, self.linear1(hc.view(1, -1)))), self.V)\r\n alphat = F.softmax(At.view(1, -1), dim=1) # [1,3]\r\n ct = torch.mm(alphat, Mt)\r\n return alphat, ct\r\n\r\n\r\n'''\r\nmodel_catten = Context_atten(HIDDEN_SIZE, CONTEXT_WINDOW)\r\n\r\nwith torch.no_grad():\r\n alpha , out_ct = model_catten (output, hn)\r\n print(out_ct)\r\n'''\r\n\r\n\r\n## 3.3parent attention\r\nclass Parent_atten(nn.Module):\r\n def __init__(self, hidden_dim, context_window):\r\n super(Parent_atten, self).__init__()\r\n self.hidden_dim = hidden_dim\r\n self.context_window = context_window\r\n self.Wg_linear = nn.Linear(hidden_dim * 3, hidden_dim, bias=False)\r\n self.Wv_linear = nn.Linear(hidden_dim, self.context_window)\r\n\r\n def forward(self, ht, ct, pt):\r\n Gt = torch.tanh(self.Wg_linear(torch.cat([ht.view(1, -1), ct.view(1, -1), pt.view(1, -1)], 1)))\r\n yt = F.log_softmax(self.Wv_linear(Gt), dim=1)\r\n return yt\r\n\r\n\r\n'''\r\nmodel_par_atten_type = Parent_atten(HIDDEN_SIZE, CONTEXT_WINDOW)\r\nwith torch.no_grad():\r\n Yt_type = model_par_atten_type ( hn,out_ct,output[6])\r\n print(Yt_type)\r\n\r\n'''\r\n\r\n## 3 main model\r\nclass main_model(nn.Module):\r\n def __init__(self, vocab_value_size, value_dim, vocab_type_size, type_dim,\r\n hidden_dim, batch_size, context_window, dropout_p=0.25):\r\n super(main_model, self).__init__()\r\n self.hidden_dim = hidden_dim\r\n self.batch_size = batch_size\r\n self.context_window = context_window\r\n\r\n self.model_LSTM = LSTM_component(vocab_value_size, value_dim, vocab_type_size, type_dim, hidden_dim, batch_size,\r\n context_window).to(device)\r\n self.model_catten = Context_atten(hidden_dim, context_window).to(device)\r\n self.model_par_atten_value = Parent_atten(hidden_dim, vocab_value_size).to(device)\r\n\r\n def forward(self, inputs, hc, parent):\r\n output, hn, cn = self.model_LSTM(inputs, hc)\r\n alpha, out_ct = self.model_catten(output, hn)\r\n Yt = self.model_par_atten_value(hn, out_ct, output[-parent - 1])\r\n return Yt\r\n\r\n def initHidden(self):\r\n return torch.zeros(1, self.batch_size, self.hidden_dim, device=device), torch.zeros(1, self.batch_size,\r\n self.hidden_dim,\r\n device=device)\r\n\r\n\r\n## 4 training\r\nmodel = main_model(len(value_vocab), EMBEDDING_value, len(type_vocab), EMBEDDING_type, HIDDEN_SIZE, BATCH_SIZE,CONTEXT_WINDOW).to(device)\r\nloss_function = nn.NLLLoss()\r\nlearning_rate = 0.01\r\ndecay = 0.6\r\noptimizer = optim.SGD(model.parameters(), lr=learning_rate, weight_decay=decay)\r\nclip = 5\r\nnn.utils.clip_grad_norm_(model.parameters(), clip)\r\nlosses = []\r\n\r\nstaring_training = time.time()\r\nprint(\"staring training \",staring_training-time_start)\r\nnum_epochs=8\r\nfor epoch in range(num_epochs):\r\n print('Epoch {}/{}'.format(epoch, num_epochs - 1))\r\n print('-' * 10)\r\n total_loss = 0\r\n print_loss_total = 0 # Reset every print_every\r\n plot_loss_total = 0 # Reset every plot_every\r\n # query = [context, predict_node, position, same_node_position, parent_node_position]\r\n print(len(targets))\r\n for i in range(len(targets)):\r\n start = time.time()\r\n # step1 init\r\n optimizer.zero_grad()\r\n # step 2 prepare the data\r\n #print(\"inputs[i]\",i,inputs[i])\r\n input = [torch.tensor(inputs[i][0] ,device=device),torch.tensor(inputs[i][1],device=device)]\r\n parent = parents[i]\r\n target = torch.tensor([targets[i]], dtype =torch.long,device=device)\r\n #\r\n\r\n # step 3 get the scorece\r\n yt_point = model(input, model.initHidden(), parent)\r\n #print(\"y_point\",yt_point)\r\n\r\n # step 4 train\r\n loss = loss_function(yt_point.view(1, -1), target)\r\n\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # loss\r\n total_loss += loss.item()\r\n topv, topi = yt_point.data.topk(1)\r\n eval_index = topi.squeeze().detach()\r\n # print(eval_index)\r\n end = time.time()\r\n print(i,\"batch time spend\",end-start)\r\n\r\n now = time.time()\r\n length = len(inputs[i][0])\r\n print('epoch = %d time spend:%s loss average%.4f' % (\r\n epoch + 1, now - time_start, total_loss / length))\r\n losses.append(total_loss / length)\r\n\r\nprint(losses)\r\ntorch.save(model.state_dict(), 'params_lstm_attn.pkl')\r\nmodel.load_state_dict(torch.load('params_lstm_attn.pkl'))\r\n","sub_path":"source code/model2.py","file_name":"model2.py","file_ext":"py","file_size_in_byte":8528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"93540276","text":"from . import halconfig_types as types\nfrom . import halconfig_dependency as dep\n\nname = \"UARTNCP\"\ndisplayname = \"UART NCP\"\ncompatibility = dep.Dependency() # all\ncategory = \" NCP\"\nstudio_module = {\n \"basename\": \"SDK.HAL.UARTNCP\",\n \"modules\": [types.StudioFrameworkModule(\"ZIGBEE\", types.Framework.ZNET),\n types.StudioFrameworkModule(\"THREAD\", types.Framework.THREAD),\n types.StudioFrameworkModule(\"BLE\", types.Framework.BLE)],\n }\noptions = {\n \"BSP_UARTNCP_USART_PORT\": {\n \"type\": types.Peripheral(filter=[\"USART\", \"UART\", \"LEUART\"],\n inherit_options=True,\n define_value_prefix=\"HAL_SERIAL_PORT_\",\n mode=\"uart\"),\n \"description\": \"UART port\",\n \"longdescription\": \"Select UART port for NCP communication\",\n },\n}\n","sub_path":"platform/hwconf_data/efr32fg14p/modules/UARTNCP/UARTNCP_model.py","file_name":"UARTNCP_model.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"74029317","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /home/linjiao/dv/beim/beim/scripts/build.py\n# Compiled at: 2013-12-08 21:45:16\n\n\ndef build(export_root):\n import sys, os\n pwd = os.path.abspath(os.curdir)\n import sys\n sys.path = [\n pwd] + sys.path\n from beim.build import build_release, clean_up\n rt = build_release(pwd, export_root=export_root)\n if rt:\n import sys\n sys.exit(1)\n\n\ndef getsrc():\n p = 'src'\n if hasSubdirs(p):\n return\n from .getsrc import main\n main()\n\n\ndef hasSubdirs(p):\n skip = '.svn'\n import os\n entries = os.listdir(p)\n for e in entries:\n if e in skip:\n continue\n p1 = os.path.join(p, e)\n if os.path.isdir(p1):\n return True\n continue\n\n return False\n\n\ndef main():\n getsrc()\n import sys, os, shlex\n from beim.datastore import open\n build_info = open('build_info')\n if len(sys.argv) == 2:\n export_root = sys.argv[1]\n elif build_info.get('export_root'):\n export_root = build_info['export_root']\n else:\n export_root = os.path.abspath('EXPORT')\n build_info['export_root'] = export_root\n del build_info\n deps_root = os.path.join(export_root, 'deps')\n env = os.environ.copy()\n env['PATH'] = '%s:%s' % (\n os.path.join(deps_root, 'bin'), env['PATH'])\n env['LD_LIBRARY_PATH'] = '%s:%s' % (\n os.path.join(deps_root, 'lib'), env.get('LD_LIBRARY_PATH') or '')\n env['DYLD_LIBRARY_PATH'] = '%s:%s' % (\n os.path.join(deps_root, 'lib'), env.get('DYLD_LIBRARY_PATH') or '')\n env['PYTHONPATH'] = '%s:%s' % (\n os.path.join(deps_root, 'python'), env.get('PYTHONPATH', ''))\n cmd = '%s -c \"from beim.scripts.build import build; build(%r)\"' % (\n sys.executable, export_root)\n args = shlex.split(cmd)\n import subprocess\n p = subprocess.Popen(args, env=env)\n while 1:\n p.communicate()\n rt = p.poll()\n if rt is not None:\n break\n else:\n continue\n\n if rt:\n raise RuntimeError('Command %s failed or aborted' % cmd)\n return\n\n\n__id__ = '$Id$'","sub_path":"pycfiles/beim-0.01.tar/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"462772547","text":"import requests\nfrom lxml import etree\n\n\n# 爬取免费代理IP 来源xicidaili.com\nclass ProxyFetch:\n\tdef __init__(self):\n\t\tself.headers = {\n\t\t\t\"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.139 Safari/537.36\",\n\t\t\t# \"Referer\": \"http://www.xicidaili.com/\",\n\t\t}\n\n\tdef start_urls(self):\n\t\t#为了测试只爬取前三页\n\t\treturn [\"http://www.xicidaili.com/nn/%d\" % i for i in range(1,3)]\n\n\tdef parse_url(self, url):\n\t\treturn requests.get(url, headers=self.headers).content.decode()\n\n\tdef get_content_list(self, html_str):\n\t\tcontent_list = []\n\t\thtml = etree.HTML(html_str)\n\t\ttr_list = html.xpath('//table[@id=\"ip_list\"]/tr')[1:]\n\t\tprint(tr_list)\n\t\tfor tr in tr_list:\n\t\t\titem = {}\n\t\t\titem[\"ip\"] = tr.xpath('./td[2]/text()')[0]\n\t\t\titem[\"port\"] = tr.xpath('./td[3]/text()')[0]\n\t\t\tcontent_list.append(item)\n\t\treturn content_list\n\n\tdef save_content_list(self, content_list):\n\t\twith open(\"proxy.json\", \"a\", encoding=\"utf-8\") as f:\n\t\t\tfor ip in content_list:\n\t\t\t\tf.write(\"http://%s:%s\" % (ip[\"ip\"], ip[\"port\"]))\n\t\t\t\tf.write(\"\\n\")\n\n\tdef run(self):\n\t\tstart_urls = self.start_urls()\n\t\tfor url in start_urls:\n\t\t\thtml_str = self.parse_url(url)\n\t\t\t# print(html_str)\n\t\t\tcontent_list = self.get_content_list(html_str)\n\t\t\tself.save_content_list(content_list)\n\nif __name__ == '__main__':\n\tspider = ProxyFetch()\n\tspider.run()\n","sub_path":"proxy_fetch_xicidaili.py","file_name":"proxy_fetch_xicidaili.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"323415469","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals # standard\nimport json, sys, os, time, threading, asyncio # standard\nimport requests # da scaricare\nfrom telethon import TelegramClient, events, sync # da scaricare\nfrom serietvapi_bot.__init__ import __version__\n\nclass Client(TelegramClient):\n\tdef __init__(self, tg_api_id, tg_api_hash, bot):\n\t\tsuper().__init__('SerieTvItaly_bot_session', tg_api_id, tg_api_hash)\n\t\tself.tg_id = tg_api_id\n\t\tself.tg_hash = tg_api_hash\n\t\tself.bot_api = bot\n\n\tdef invia_file(self, nome_file, destinatario = \"@SerieTvItaly_bot\", caption = \"\"):\n\t\tself.send_file(destinatario, nome_file, caption=caption, force_document=True)\n\nclass Bot_API(Client):\n\tdef __init__(self, id, pw):\n\t\tself.id = id\n\t\tself.pw = pw\n\n\tdef execute_command(self, version, name_function, type_action, params={}, headers={}):\n\t\tdef_paras = {'a_id':self.id, 'a_pw':self.pw}\n\t\tif (len(params) != 0):\n\t\t\tfor key in params:\n\t\t\t\tdef_paras[key] = params[key]\n\t\tresponse = requests.get(\"https://serietvitalia.ml/api/\" + type_action + \"/\" + str(version) + \"/\" + name_function, params=def_paras, headers=headers)\n\t\tjson_response = response.json()\n\t\treturn json_response\n\t\t\n\nfiles = []\n\ndef main():\n\ttry:\n\t\t# update to latest version\n\t\trj = requests.get(\"https://serietvitalia.ml/api/r/1/api_versions?av=\" + __version__ + \"&tv=API&s=stable\").json()\n\t\tif (rj['ok'] == True):\n\t\t\tos.system('sudo pip install -U serietvapi_bot')\n\n\t\t# controlliamo se il file per le credenziali esiste\n\t\tif not os.path.exists(\"SerieTvItaly_bot.json\"):\n\t\t\tjson.dump({'api_id': 603638, 'api_hash': 'e0c8fdcd4516ef60e80c6bf89708d628', 'bot_a_id': None, 'bot_a_pw': None}, open(\"SerieTvItaly_bot.json\", 'w'))\n\n\t\twith open(\"SerieTvItaly_bot.json\") as f:\n\t\t\tconfig_file = json.load(f)\n\t\t\n\t\tbot_api_id = config_file['bot_a_id']\n\t\tbot_api_pw = config_file['bot_a_pw']\n\n\t\tif (bot_api_id == None or bot_api_pw == None):\n\t\t\tprint(\"Le credenziali non sono valide, aggiornale ora\")\n\t\t\tbot_api_id = input(\"@SerieTvItaly_bot > Immetti la tua api_id: \")\n\t\t\tbot_api_pw = input(\"@SerieTvItaly_bot > Immetti la tua api_pw: \")\n\n\t\twhile bot_api_id == \"\":\n\t\t\tprint(\"Devi inserire un ID valido\")\n\t\t\tbot_api_id = input(\"@SerieTvItaly_bot > Immetti la tua api_id: \")\n\n\t\twhile bot_api_pw == \"\":\n\t\t\tprint(\"Devi inserire una PW valida\")\n\t\t\tbot_api_pw = input(\"@SerieTvItaly_bot > Immetti la tua api_pw: \")\n\n\t\trj_status = requests.get(\"https://serietvitalia.ml/api/r/1/account_status?a_id=\" + str(bot_api_id) + \"&a_pw=\" + str(bot_api_pw)).json()\n\n\t\tif (rj_status['ok'] == True):\n\t\t\tprint(\"L'account è valido, inserirò le credenziali API del Bot SerieTvItaly_bot per le prossime volte.\\nDovrai nuovamente inserirle quando l'account scadrà.\")\n\t\telse:\n\t\t\tprint(\"L'account non è valido devi inserire nuovamente le credenziali API del Bot SerieTvItaly_bot\")\n\t\t\tbot_api_pw = \"\"\n\t\t\tbot_api_id = \"\"\n\t\t\tjson.dump({'api_id': config_file['api_id'], 'api_hash': config_file['api_hash'], 'bot_a_id': None, 'bot_a_pw': None}, open(\"SerieTvItaly_bot.json\", 'w'))\n\t\t\twhile bot_api_id == \"\":\n\t\t\t\tprint(\"Devi inserire un ID valido\")\n\t\t\t\tbot_api_id = input(\"@SerieTvItaly_bot > Immetti la tua api_id: \")\n\n\t\t\twhile bot_api_pw == \"\":\n\t\t\t\tprint(\"Devi inserire una PW valida\")\n\t\t\t\tbot_api_pw = input(\"@SerieTvItaly_bot > Immetti la tua api_pw: \")\n\n\t\t\trj_status = requests.get(\"https://serietvitalia.ml/api/r/1/account_status?a_id=\" + str(bot_api_id) + \"&a_pw=\" + str(bot_api_pw)).json()\n\t\tjson.dump({'api_id': 603638, 'api_hash': 'e0c8fdcd4516ef60e80c6bf89708d628', 'bot_a_id': bot_api_id, 'bot_a_pw': bot_api_pw}, open(\"SerieTvItaly_bot.json\", 'w'))\n\t\t\n\t\twith open(\"SerieTvItaly_bot.json\") as f:\n\t\t\tconfig_file = json.load(f)\n\t\t\n\t\tbot = Bot_API(config_file['bot_a_id'], config_file['bot_a_pw'])\n\t\tclient = Client(config_file['api_id'], config_file['api_hash'], bot)\n\t\tprint(\"Ora è tutto pronto per funzionare in modo corretto\")\n\t\twith client:\n\t\t\tif \"download_episode\" in rj_status[\"purpose\"]:\n\t\t\t\tclient.send_message(\"@SerieTvItaly_bot\", \"api \" + bot.id + \" benvenuto {}\".format(__version__))\n\t\t\t\tfrom serietvapi_bot import download_episode\n\t\t\t\tep_t = []\n\t\t\t\twhile True:\n\t\t\t\t\tEpisode_Downloader = download_episode.Downloader(bot)\n\t\t\t\t\tEpisode_Downloader.start()\n\t\t\t\t\tep_t.append(Episode_Downloader)\n\t\t\t\t\tfor t in ep_t:\n\t\t\t\t\t\tt.join()\n\n\t\t\t\t\tfor file in Episode_Downloader.files:\n\t\t\t\t\t\tprint(\"Carico il file chiamato \" + file)\n\t\t\t\t\t\trj = bot.execute_command(1, \"tg_message\", \"r\", {'msg':'📤 Caricamento file in corso\\n\\nIl file chiamato ' + file + ' è in fase di invio al Bot.'}, Episode_Downloader.download_ua)\n\t\t\t\t\t\tclient.invia_file(file, caption=os.path.splitext(file)[0])\n\t\t\t\t\t\tos.remove(file)\n\n\t\t\t\t\tif Episode_Downloader.flag_error == True:\n\t\t\t\t\t\tprint(\"Rilevato un errore durante la fase di download di un epiodio, vedere descrizione fornita dal bot\")\n\t\t\t\t\t\tbreak\n\n\t\t\telse:\n\t\t\t\tprint(\"Non ho riconosciuto questa funzione come valida, assicurati di avere l'ultima versione di questo script.\")\n\n\n\texcept Exception as e:\n\t\tprint(\"Errore: \" + str(e))\n\t\texit()\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"serietvapi_bot/API_main.py","file_name":"API_main.py","file_ext":"py","file_size_in_byte":5043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"539854985","text":"from django.conf import settings\nfrom django.db import models\n\nfrom product_details import product_details\n\n\nENGLISH_LANGUAGE_CHOICES = sorted(\n [(key.lower(), u'{0} ({1})'.format(key, value['English']))\n for key, value in product_details.languages.items()]\n)\n\nENGLISH_COUNTRY_CHOICES = sorted(\n [(code, u'{0} ({1})'.format(code, name)) for code, name in\n product_details.get_regions('en-US').items()\n if code not in settings.INELIGIBLE_COUNTRIES]\n)\n\n\nclass LocaleField(models.CharField):\n description = ('CharField with locale settings specific to Flicks '\n 'defaults.')\n\n def __init__(self, *args, **kwargs):\n options = {\n 'max_length': 32,\n 'default': settings.LANGUAGE_CODE,\n 'choices': ENGLISH_LANGUAGE_CHOICES\n }\n options.update(kwargs)\n return super(LocaleField, self).__init__(*args, **options)\n\n\nclass CountryField(models.CharField):\n description = ('CharField with country settings specific to Flicks '\n 'defaults.')\n\n def __init__(self, *args, **kwargs):\n options = {\n 'max_length': 16,\n 'default': u'us',\n 'choices': ENGLISH_COUNTRY_CHOICES\n }\n options.update(kwargs)\n return super(CountryField, self).__init__(*args, **options)\n\n\n# South introspection rules for custom fields\nfrom south.modelsinspector import add_introspection_rules\nadd_introspection_rules([], ['^flicks\\.base\\.models\\.LocaleField'])\nadd_introspection_rules([], ['^flicks\\.base\\.models\\.CountryField'])\n","sub_path":"flicks/base/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"626613757","text":"import time\n\ndef nanostr(timenow):\n l = list( \"{0:.9f}\".format( timenow ) ) # convert to list\n p = l.index(\".\") # find position of the letter \"a\"\n del(l[p]) # delete it\n return ( \"\".join(l) ) # convert\n\n\nfrom parse import main as Enphase\n\nimport sys\n\nimport subprocess\nimport pprint\npp = pprint.PrettyPrinter(indent=4)\npprint = pp.pprint\n\ndef main(arg):\n results = Enphase(False)\n timestr = nanostr( time.time() )\n\n #currently = float(results['Currently'].split()[0])\n\n if ( results['Currently'].split()[1] == \"kW\" ) :\n currently = str(float(results['Currently'].split()[0])*1000)\n else:\n currently = results['Currently'].split()[0]\n\n\n microinverters = results['Number of Microinverters Online']\n\n pprint( currently )\n pprint( microinverters )\n\n compose = ['curl', '-i', '-XPOST', 'http://localhost:8086/write?db=Enphase', '--data-binary', 'Generating value='+currently+\" \"+ timestr ]\n pprint(compose)\n subprocess.call(compose)\n\n compose = ['curl', '-i', '-XPOST', 'http://localhost:8086/write?db=Enphase', '--data-binary', 'Microinverters value='+microinverters+\" \"+ timestr ]\n pprint(compose)\n subprocess.call(compose)\n\n\n\nif __name__ == \"__main__\":\n try:\n while ( 1 ):\n main(sys.argv)\n time.sleep(5)\n except KeyboardInterrupt:\n print('Received Ctrl-c')\n","sub_path":"parse2influx.py","file_name":"parse2influx.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"31914042","text":"import numpy as np\n\nfrom Simulation import SimulaTX, SimulaX\nfrom IC import u0\nfrom EigenF import EigeF, u02\nfrom JNM import Js\nfrom cheby import Cheby\nfrom Convergence import conv_IC\n\n\ndef run_fishers():\n \"\"\"\n Solves the Fishers-KPP Stochastic equation on 1D using Spectral method\n based on the spectral decomposition of the Ornstein-Uhlenbeck semigroup\n associated to the Kolmogorov equation and compute the norm between two solutions\n with different initial conditions for a fixed point in real space.\n\n Returns\n -------\n xSpace : array; discretized real space\n tim : array; discretized time\n simulation1 : array, shape(len(tim), len(xSpace))\n Array containing the solutions of partial equation\n simulation2 : array, shape(len(tim), len(xSpace))\n Array containing the solutions of partial equation with the IC approximated\n norms : array; norms between two solutions\n times : array; discretized time\n \"\"\"\n # Diffusion coefficient\n nu = 0.1\n\n # Parameteres of the method\n N = 7\n Q = 200\n\n # Discretization\n xSpace = np.linspace(0, 1, 512)\n tim = np.linspace(0, 10, 256)\n\n # Creating set J^{N;M}\n J = Js(N)\n M = len(J[:, 1])\n\n # Hermite polynomials evaluation\n rule1 = np.polynomial.hermite_e.hermegauss(Q)\n rulesX = rule1[0][::-1]\n rulesW = rule1[1]\n LRules = len(rulesX)\n\n # Simulation Space-Time\n EigValRe, EigValIm, EigVecRe, EigVecIm, U_1 = EigeF(J, N, M, rulesX, rulesW, LRules, xSpace, nu, u0)\n H1 = SimulaX(J, M, xSpace, nu, u0)\n\n # Aproximation to u0\n aprox = Cheby.fit(u0, 0, 1, 7)\n H2 = SimulaX(J, M, xSpace, nu, aprox)\n U_2 = u02(J, M, rulesX, rulesW, LRules, xSpace, nu, aprox)\n\n simulation1 = SimulaTX(xSpace, tim, M, EigValRe, EigValIm, EigVecRe, EigVecIm, U_1, H1)\n simulation2 = SimulaTX(xSpace, tim, M, EigValRe, EigValIm, EigVecRe, EigVecIm, U_2, H2)\n\n # compute convergence\n norms, times = conv_IC(xSpace, tim, M, J, EigValRe, EigValIm, EigVecRe, EigVecIm, U_1, U_2)\n \n return xSpace, tim, simulation1, simulation2, norms, times\n\n\nxSpace, tim, simulation1, simulation2, norms, times = run_fishers()\n\n\n","sub_path":"Thesis/Codes/Fisher_Stochastic/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"42336","text":"# Copyright 2016 Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport pytest\n\nfrom fuel_ccp_tests.helpers import post_install_k8s_checks as funcs\n\ntest_images1 = [\n \"artifactory.example.net:5000/hyperkube-amd64:v1.4.1-test_100\",\n \"andyshinn/dnsmasq:2.72\",\n \"artifactory.example.net:5001/calico/node:v0.20.0-mcp-7b31adc\",\n \"artifactory.example.net:5001/calico/ctl:v0.20.0-mcp-7b31adc\",\n \"artifactory.example.net:5000/hyperkube-amd64:v1.4.1-test_100\",\n]\n\ntest_images2 = [\n \"andyshinn/dnsmasq:2.72\",\n \"gcr.io/google_containers/pause-amd64:3.0\",\n \"quay.io/coreos/etcd:v3.0.1\",\n]\n\nrequired_images = [\n \"andyshinn/dnsmasq\",\n \"calico/node\",\n \"hyperkube-amd64\",\n]\n\n\nclass MockUnderlay(object):\n def __init__(self, images):\n self.images = images\n\n def sudo_check_call(self, *args, **kwargs):\n return {'stdout': self.images}\n\n\n@pytest.mark.unit_tests\ndef test_required_images_exists():\n funcs.required_images_exists(node_name='master',\n underlay=MockUnderlay(test_images1),\n required_images=required_images)\n with pytest.raises(AssertionError):\n funcs.required_images_exists(node_name='master',\n underlay=MockUnderlay(test_images2),\n required_images=required_images)\n","sub_path":"fuel_ccp_tests/tests/unit/test_system_funcs.py","file_name":"test_system_funcs.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"541212635","text":"import slugify\nfrom django.db import models\n\n\nclass ItemSize(models.Model):\n SIZES = (\n ('Size S', 'Size S'),\n ('Size M', 'Size M'),\n ('Size L', 'Size L'),\n ('Size XL', 'Size XL'),\n ('Size 2XL', 'Size 2XL')\n )\n name = models.CharField(max_length=100, choices=SIZES, unique=True)\n\n class Meta:\n verbose_name = 'Product size'\n verbose_name_plural = 'Product sizes'\n\n def __str__(self):\n return self.name\n\n\nclass Item(models.Model):\n name = models.CharField(max_length=255)\n description = models.TextField(null=True, blank=True)\n price = models.SmallIntegerField()\n sizes = models.ManyToManyField(ItemSize)\n available = models.BooleanField(default=True)\n show = models.BooleanField(default=True)\n quantity = models.SmallIntegerField()\n timestamp = models.DateTimeField(auto_now_add=True)\n\n class Meta:\n verbose_name = 'Product'\n verbose_name_plural = 'Products'\n ordering = ('timestamp',)\n\n def __str__(self):\n return self.name\n\n\nclass ItemImage(models.Model):\n alt = models.CharField(max_length=255, null=True, blank=True)\n cover = models.BooleanField(default=False)\n image = models.ImageField(upload_to='images/')\n item = models.ForeignKey(Item, related_name='images')\n\n class Meta:\n verbose_name = 'Product image'\n verbose_name_plural = 'Product images'\n\n def __str__(self):\n return 'Image {0} in item {1}'.format(self.pk, self.item.name)\n\n def save(self, *args, **kwargs):\n if self.cover:\n ItemImage.objects.filter(item=self.item).update(**{'cover': False})\n ret = super(ItemImage, self).save(*args, **kwargs)\n return ret\n\n def delete(self, *args, **kwargs):\n self.image.delete()\n super(ItemImage, self).delete(*args, **kwargs)\n\n\nclass Album(models.Model):\n name = models.CharField(max_length=255, unique=True)\n slug = models.CharField(max_length=255, unique=True)\n timestamp = models.DateTimeField('Date')\n show = models.BooleanField(default=True)\n\n class Meta:\n ordering = ('timestamp',)\n\n def __str__(self):\n return self.name\n\n def save(self, *args, **kwargs):\n if not self.slug:\n self.slug = slugify.slugify(self.name)\n ret = super(Album, self).save(*args, **kwargs)\n return ret\n\n\nclass Post(models.Model):\n content = models.TextField(null=True, blank=True)\n show = models.BooleanField(default=True)\n album = models.ForeignKey(Album, related_name='posts')\n\n\nclass MainFile(models.Model):\n file = models.FileField(upload_to='main/')\n timestamp = models.DateTimeField(auto_now_add=True)\n show = models.BooleanField(default=True)\n\n class Meta:\n ordering = ('-timestamp',)\n\n\nclass Order(models.Model):\n name = models.CharField(max_length=255)\n email = models.EmailField()\n created = models.DateTimeField(auto_now_add=True)\n total_value = models.IntegerField(null=True, blank=True)\n paid = models.BooleanField(default=False)\n\n\nclass CartItem(models.Model):\n item = models.ForeignKey(Item)\n quantity = models.IntegerField()\n size = models.ForeignKey(ItemSize)\n value = models.IntegerField(null=True, blank=True)\n order = models.ForeignKey(Order, related_name='order_items')\n\n def save(self, *args, **kwargs):\n if self.quantity:\n self.value = self.quantity * self.item.price\n ret = super(CartItem, self).save(*args, **kwargs)\n return ret\n","sub_path":"main/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"176724975","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nRoles in this namespace are meant to provide Django app server utility methods for Debian distributions.\n'''\n\nfrom os.path import dirname, join, splitext, split\n\nfrom provy.core import Role\nfrom provy.more.debian.package.pip import PipRole\nfrom provy.more.debian.package.aptitude import AptitudeRole\nfrom provy.more.debian.monitoring.supervisor import SupervisorRole\n\nSITES_KEY = 'django-sites'\nMUST_RESTART_KEY = 'restart-django-sites'\n\n\nclass WithSite(object):\n def __init__(self, django, name):\n self.django = django\n self.auto_start = True\n self.daemon = True\n self.name = name\n self.settings_path = None\n self.host = '0.0.0.0'\n self.pid_file_path = '/var/run'\n self.threads = 1\n self.processes = 1\n self.starting_port = 8000\n self.user = None\n if SupervisorRole in self.django.context['roles_in_context']:\n self.use_supervisor = True\n self.supervisor_log_folder = self.django.context['roles_in_context'][SupervisorRole].log_folder\n else:\n self.use_supervisor = False\n self.supervisor_log_folder = '/var/log'\n\n self.settings = {}\n\n def __enter__(self):\n return self\n\n def __exit__(self, exc_type, exc_value, traceback):\n if not self.settings_path:\n raise RuntimeError('[Django] The path to the site must be specified and should correspond to the directory where the settings.py file is for site %s.' % self.name)\n\n if SITES_KEY not in self.django.context:\n self.django.context[SITES_KEY] = []\n\n if self.use_supervisor:\n self.daemon = False\n self.auto_start = False\n self.django.restart_supervisor_on_changes = True\n\n self.django.context[SITES_KEY].append(self)\n\n\nclass DjangoRole(Role):\n '''\n This role provides Django app server management utilities for Debian distributions.\n When running Django under supervisor, remember to set restart_supervisor_on_changes to True.\n If you choose to automatically include supervisor support in your sites, don't forget to call SupervisorRole config method.\n When creating a new site using with role.create_site('somesite') as site these are the properties available in the site object:\n auto_start - Indicates whether the site should be automatically started by the operating system. Defaults to True. If using supervisor, explicitly set this to False.\n daemon - Indicates whether the init.d command for the website should daemonize itself. Defaults to True. If using supervisor, explicitly set this to False.\n settings_path - This is the only mandatory argument. This is the full path to django's settings.py file.\n host - The host IP address that django will listen to incoming requests. Defaults to '0.0.0.0'.\n starting_port - The first port that Django will be started in the event that more than one process is used. Defaults to 8000.\n processes - The number of processes that will have commands created at the server. As an example, if this is set to 2 and the name of the site is 'website', two commands will be created: /etc/init.d/website-8000 and /etc/init.d/website-8001. Defaults to 1.\n pid_file_path - Path to create the pid file. Defaults to '/var/run'.\n threads - Number of worker threads that Green Unicorn will use when spawning Django. Defaults to 1.\n user - User that gunicorn will run under. Defaults to the last created user. When using supervisor it is VERY important that this user is the same user as supervisor's.\n use_supervisor - States that supervisor configuration for these django website should be automatically included.\n supervisor_log_folder - Log folder that supervisor will store the configurations for this site.\n settings - Dictionary with settings that will overwrite Django's defaults. These settings will be included in a local_settings.py module that imports the original settings as KEY=value pairs. All values included here will have their string representation used in the local_settings.\n\n Sample usage\n
\n    from provy.core import Role\n    from provy.more.debian import DjangoRole\n\n    class MySampleRole(Role):\n        def provision(self):\n            with self.using(SupervisorRole) as role:\n                role.config(\n                    config_file_directory='/home/someuser',\n                    log_file='/home/someuser/logs/supervisord.log',\n                    user='myuser'\n                )\n\n            with self.using(DjangoRole) as role:\n                role.restart_supervisor_on_changes = True\n                with role.create_site('mysite') as site:\n                    site.path = '/some/folder/with/settings.py'\n                    site.use_supervisor = True\n                    site.supervisor_log_path = '/some/folder/to/log'\n                    site.threads = 4\n                    site.processes = 2\n                    site.user = 'myuser'\n                    # settings that override the website defaults.\n                    site.settings = {\n\n                    }\n    
\n '''\n def __init__(self, prov, context):\n super(DjangoRole, self).__init__(prov, context)\n self.restart_supervisor_on_changes = False\n\n def provision(self):\n '''\n Installs Django and its dependencies. This method should be called upon if overriden in base classes, or Django won't work properly in the remote server.\n If you set a variable called django-version in the context, that version of django will be installed instead of latest.\n Sample usage\n
\n        from provy.core import Role\n        from provy.more.debian import DjangoRole\n\n        class MySampleRole(Role):\n            def provision(self):\n                self.provision_role(DjangoRole) # no need to call this if using with block.\n\n        # or\n        class MySampleRole(Role):\n            def provision(self):\n                self.context['django-version'] = '1.1.1'\n                self.provision_role(DjangoRole) # no need to call this if using with block.\n                # now django 1.1.1 is installed.\n        
\n '''\n self.register_template_loader('provy.more.debian.web')\n\n with self.using(AptitudeRole) as role:\n role.ensure_package_installed('python-mysqldb')\n\n with self.using(PipRole) as role:\n if 'django-version' in self.context:\n role.ensure_package_installed('django', version=self.context['django-version'])\n else:\n role.ensure_package_installed('django')\n\n role.ensure_package_installed('gunicorn')\n\n def create_site(self, name):\n '''\n Enters a with block with a Site variable that allows you to configure a django website.\n Parameters\n name - name of the website.\n Sample usage\n
\n        from provy.core import Role\n        from provy.more.debian import DjangoRole\n\n        class MySampleRole(Role):\n            def provision(self):\n                with self.using(DjangoRole) as role:\n                    with role.create_site('website') as program:\n                        site.path = '/some/folder/with/settings.py'\n                        site.threads = 4\n                        # settings that override the website defaults.\n                        site.settings = {\n\n                        }\n        
\n '''\n return WithSite(self, name)\n\n def cleanup(self):\n '''\n Updates the website and/or init files and restarts websites if needed.\n There's no need to call this method since provy's lifecycle will make sure it is called.\n '''\n\n if SITES_KEY in self.context:\n for website in self.context[SITES_KEY]:\n updated = self.__update_init_script(website)\n settings_updated = self.__update_settings(website)\n if website.use_supervisor:\n self.__update_supervisor_program(website)\n if updated or settings_updated:\n self.__ensure_restart(website)\n\n if MUST_RESTART_KEY in self.context and self.context[MUST_RESTART_KEY]:\n if self.restart_supervisor_on_changes:\n with self.using(SupervisorRole) as role:\n role.ensure_restart()\n for site in self.context[MUST_RESTART_KEY]:\n self.__restart(site)\n\n def __update_supervisor_program(self, website):\n with self.using(SupervisorRole) as role:\n for process_number in range(website.processes):\n port = website.starting_port + process_number\n script_name = \"%s-%d\" % (website.name, port)\n with role.with_program(script_name) as program:\n program.directory = dirname(website.settings_path)\n program.command = '/etc/init.d/%s start' % script_name\n program.name = script_name\n program.number_of_processes = 1\n program.user = website.user\n program.log_folder = website.supervisor_log_folder\n\n def __ensure_restart(self, website):\n if not MUST_RESTART_KEY in self.context:\n self.context[MUST_RESTART_KEY] = []\n self.context[MUST_RESTART_KEY].append(website)\n\n def __restart(self, website):\n if not website.auto_start:\n return\n for process_number in range(website.processes):\n port = website.starting_port + process_number\n script_name = \"%s-%d\" % (website.name, port)\n if self.remote_exists(join(website.pid_file_path.rstrip('/'), '%s_%s.pid' % (website.name, port))):\n self.execute('/etc/init.d/%s stop' % script_name, stdout=False, sudo=True)\n self.execute('/etc/init.d/%s start' % script_name, stdout=False, sudo=True)\n\n def __update_settings(self, website):\n local_settings_path = join(dirname(website.settings_path), 'local_settings.py')\n options = {\n 'settings_file': splitext(split(website.settings_path)[-1])[0],\n 'settings': website.settings\n }\n result = self.update_file('local.settings.template', local_settings_path, owner=website.user, options=options, sudo=True)\n return result\n\n def __update_init_script(self, website):\n at_least_one_updated = False\n for process_number in range(website.processes):\n port = website.starting_port + process_number\n options = {\n 'name': website.name,\n 'pid_file_path': website.pid_file_path.rstrip('/'),\n 'user': website.user,\n 'host': website.host,\n 'port': port,\n 'threads': website.threads,\n 'daemon': website.daemon,\n 'user': website.user,\n 'settings_directory': dirname(website.settings_path)\n }\n script_name = '%s-%d' % (website.name, port)\n result = self.update_file('website.init.template', '/etc/init.d/%s' % script_name, owner=website.user, options=options, sudo=True)\n\n if result:\n at_least_one_updated = True\n self.execute('chmod +x /etc/init.d/%s' % script_name, stdout=False, sudo=True)\n if website.auto_start:\n self.execute('update-rc.d %s defaults' % script_name, stdout=False, sudo=True)\n\n return at_least_one_updated\n","sub_path":"provy/more/debian/web/django.py","file_name":"django.py","file_ext":"py","file_size_in_byte":11777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"9499812","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\n__project_parent__ = 'AGGREGATION'\n__project_title__ = 'Automated Gloss Mapping for Inferring Grammatical Properties'\n__project_name__ = 'Map Gloss'\n__script__ = 'eval/confusion_matrix.py'\n__date__ = 'March 2015'\n\n__author__ = 'MichaelLockwood'\n__email__ = 'lockwm@uw.edu'\n__github__ = 'mlockwood'\n__credits__ = 'Emily M. Bender for her guidance'\n__collaborators__ = None\n\n\nclass CM:\n\n objects = {}\n\n def __init__(self, gold, test, name):\n self.gold = gold\n self.test = test\n self.name = name\n self.matrix = {'FNeg': {}, 'FPos': {}, 'TPos': {}}\n self.set_confusion()\n self.precision = self.set_precision()\n self.recall = self.set_recall()\n self.fscore = self.set_fscore()\n CM.objects[name] = self\n\n def __repr__(self):\n return ''.format(self.name)\n\n def set_confusion(self):\n for label in self.gold:\n if label in self.test:\n self.matrix['TPos'][label] = True\n else:\n self.matrix['FNeg'][label] = True\n for label in self.test:\n if label not in self.gold:\n self.matrix['FPos'][label] = True\n\n def set_precision(self):\n try:\n precision = float(len(self.matrix['TPos'])) / (len(self.matrix['TPos']) + len(self.matrix['FPos']))\n except:\n precision = 0.0\n return precision\n\n def set_recall(self):\n try:\n recall = float(len(self.matrix['TPos'])) / (len(self.matrix['TPos']) + len(self.matrix['FNeg']))\n except:\n recall = 0.0\n return recall\n\n def set_fscore(self):\n try:\n fscore = 2 * (self.precision * self.recall / (self.precision + self.recall))\n except:\n fscore = 0.0\n return fscore\n\n def get_final(self):\n return self.precision, self.recall, self.fscore\n\n def write_prf_file(self, file):\n writer = open(file + '.prf', 'w')\n self.set_prf_file(writer)\n writer.close()\n\n def set_prf_file(self, writer):\n # Main p/r/f statistics\n writer.write('Precision: {}\\n'.format(self.precision))\n writer.write('Recall: {}\\n'.format(self.recall))\n writer.write('F-Score: {}\\n\\n'.format(self.fscore))\n \n # False negatives\n if self.matrix['FNeg']:\n writer.write('False Negatives\\n')\n for value in self.matrix['FNeg']:\n writer.write('\\t{}\\n'.format(value))\n writer.write('\\n')\n \n # False positives\n if self.matrix['FPos']:\n writer.write('False Positives\\n')\n for value in self.matrix['FPos']:\n writer.write('\\t{}\\n'.format(value))\n writer.write('\\n')\n\n def write_hprf_file(self, file):\n writer = open(file + '.hprf', 'w')\n self.set_hprf_file(writer)\n writer.close()\n\n def set_hprf_file(self, writer):\n # Gold labels\n writer.write('Gold Labels\\n')\n for value in self.gold:\n writer.write('G\\t{}\\n'.format(value))\n \n # Test labels\n writer.write('\\nTest Labels\\n')\n for value in self.test:\n writer.write('T\\t{}\\n'.format(value))\n\n\nclass Compare:\n\n objects = {} # (cm1, cm2)\n order = ['TP2FN', 'NO2FP', 'FN2TP', 'FP2NO']\n headers = {'TP2FN': 'True Positives to False Negatives',\n 'NO2FP': 'False Positives Added',\n 'FN2TP': 'False Negatives to True Positives',\n 'FP2NO': 'False Positives Removed'}\n\n def __init__(self, cm1, cm2):\n self.cm1 = cm1\n self.cm2 = cm2\n self.matrix = {'TP2FN': {}, 'NO2FP': {}, 'FN2TP': {}, 'FP2NO': {}}\n self.set_comparison_matrix()\n self.abs_precision = self.set_abs_precision()\n self.rel_precision = self.set_rel_precision()\n self.abs_recall = self.set_abs_recall()\n self.rel_recall = self.set_rel_recall()\n self.abs_fscore = self.set_abs_fscore()\n self.rel_fscore = self.set_rel_fscore()\n Compare.objects[(cm1.name, cm2.name)] = self\n\n def __repr__(self):\n return ''.format(self.cm1.name, self.cm2.name)\n\n def set_comparison_matrix(self):\n self.set_matrix_values(self.cm2.matrix['TPos'], self.cm1.matrix['TPos'], 'TP2FN')\n self.set_matrix_values(self.cm1.matrix['FPos'], self.cm2.matrix['FPos'], 'NO2FP')\n self.set_matrix_values(self.cm2.matrix['FNeg'], self.cm1.matrix['FNeg'], 'FN2TP')\n self.set_matrix_values(self.cm2.matrix['FPos'], self.cm1.matrix['FPos'], 'FP2NO')\n\n def set_matrix_values(self, first_matrix, second_matrix, new_type):\n for value in first_matrix:\n if value not in second_matrix:\n self.matrix[new_type][value] = True\n \n def set_abs_precision(self):\n return self.cm1.precision - self.cm2.precision\n\n def set_rel_precision(self):\n try:\n rel_precision = self.cm1.precision / self.cm2.precision\n except:\n rel_precision = 0.0\n return rel_precision\n \n def set_abs_recall(self):\n return self.cm1.recall - self.cm2.recall\n\n def set_rel_recall(self):\n try:\n rel_recall = self.cm1.recall / self.cm2.recall\n except:\n rel_recall = 0.0\n return rel_recall\n \n def set_abs_fscore(self):\n return self.cm1.fscore - self.cm2.fscore\n\n def set_rel_fscore(self):\n try:\n rel_fscore = self.cm1.fscore / self.cm2.fscore\n except:\n rel_fscore = 0.0\n return rel_fscore\n\n def write_cprf_file(self, file):\n writer = open(file + '.cprf', 'w')\n self.set_cprf_file(writer)\n \n # Write PRF file of the first confusion matrix\n writer.write('\\n\\n--- {} ---\\n\\n'.format(self.cm1.name))\n self.cm1.set_prf_file(writer)\n \n # Write PRF file of the second confusion matrix\n writer.write('\\n\\n--- {} ---\\n\\n'.format(self.cm2.name))\n self.cm2.set_prf_file(writer)\n \n writer.close()\n\n def set_cprf_file(self, writer):\n # Main p/r/f abs statistics\n writer.write('Absolute Change\\n')\n writer.write('Precision: {}\\n'.format(self.abs_precision))\n writer.write('Recall: {}\\n'.format(self.abs_recall))\n writer.write('F-Score: {}\\n\\n'.format(self.abs_fscore))\n \n # Main p/r/f rel statistics\n writer.write('Relative Change\\n')\n writer.write('Precision: {}\\n'.format(self.rel_precision))\n writer.write('Recall: {}\\n'.format(self.rel_recall))\n writer.write('F-Score: {}\\n\\n'.format(self.rel_fscore))\n \n # False negatives and false positives\n for entry in Compare.order:\n if self.matrix[entry]:\n writer.write('{}\\n'.format(Compare.headers[entry]))\n for value in self.matrix[entry]:\n writer.write('\\t{}\\n'.format(value))\n writer.write('\\n')\n","sub_path":"utils/confusion_matrix.py","file_name":"confusion_matrix.py","file_ext":"py","file_size_in_byte":7079,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"315220102","text":"import sys\nimport signal\nimport cv2\nfrom time import localtime, strftime\nfrom datetime import datetime, timedelta\nend = False\n\ndef signal_handler(signal, frame):\n\tglobal end\n\tend = True\ncap = cv2.VideoCapture(2)\nif not cap.isOpened():\n\tsys.exit(-1)\nfourcc = cv2.VideoWriter_fourcc(*'DIVX')\ntempo = strftime(\"%a, %d %b %Y %H:%M:%S\", localtime())\nout = cv2.VideoWriter(tempo+'.AVI',fourcc, 20.0, (640,480))\nsignal.signal(signal.SIGINT, signal_handler)\n\n\nwhile(not end):\n\tret, frame = cap.read()\n\tif ret:\n\t\tnow = datetime.now()\n\t\tcv2.putText(frame, str(now),(5, 20),cv2.FONT_HERSHEY_COMPLEX_SMALL,.8,(225,255,255))\n\t\tout.write(frame)\n\t\t#print(str(now))\t\n\telse:\n\t\tprint('Erro na Captura')\n\t\tbreak\ncap.release()\nout.release()\n\n\n","sub_path":"Testes/CamSave.py","file_name":"CamSave.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"426479146","text":"import logging\nimport re\n\nimport app\nimport base_servlet\nimport fb_api\nfrom topics import grouping\nfrom topics import topic_db\nfrom search import search\nfrom search import search_base\n\n@app.route('/topic/?')\nclass TopicListHandler(base_servlet.BaseRequestHandler):\n def requires_login(self):\n return False\n\n def get(self):\n topics = topic_db.Topic.query().fetch(500)\n self.display['topics'] = sorted(topics, key=lambda x: x.url_path)\n\n self.render_template('topic_list')\n\n@app.route('/topic/([^/]+)/?')\nclass TopicHandler(base_servlet.BaseRequestHandler):\n def requires_login(self):\n return False\n\n def get(self, name):\n topics = topic_db.Topic.query(topic_db.Topic.url_path==name).fetch(1)\n if not topics:\n self.response.set_status(404)\n return\n\n topic = topics[0]\n\n if topic.graph_id:\n # We shouldn't need any tokens to access pages\n fbl = fb_api.FBLookup(None, None)\n fb_source = fbl.get(topic_db.LookupTopicPage, topic.graph_id)\n else:\n fb_source = None\n\n\n def prefilter(doc_event):\n \"\"\"Function for fitlering doc results, before we spend the energy to load the corresponding DBEvents.\n\n We only want on-topic events here:\n - Must contain keyword in the title\n - Must contain keyword on a line where it makes up >10% of the text (for judges, workshops, etc). We want to hide the resume-includes-classes-from-X people\n \"\"\"\n logging.info(\"Prefiltering event %s\", doc_event.doc_id)\n name = doc_event.field('name').value.lower()\n description = doc_event.field('description').value.lower()\n\n description_lines = description.split('\\n')\n\n for keyword in topic.search_keywords:\n keyword_word_re = re.compile(r'\\b%s\\b' % keyword)\n if keyword_word_re.search(name):\n return True\n for line in description_lines:\n result = keyword_word_re.search(line)\n # If the keyword is more than 10% of the text in the line:\n # Examples:\n # \"- HOUSE - KAPELA (Serial Stepperz/Wanted Posse)\"\n # \"5th November : EVENT Judged by HIRO :\"\n if result:\n if 1.0 * len(keyword) / len(line) > 0.1:\n return True\n else:\n logging.info(\"Found keyword %r on line, but not long enough: %r\", keyword, line)\n\n logging.info(\"Prefilter dropping event %s with name: %r\" % (doc_event.doc_id, name))\n return False\n\n keywords = ' OR '.join('\"%s\"' % x for x in topic.search_keywords)\n search_query = search_base.SearchQuery(keywords=keywords)\n # Need these fields for the prefilter\n search_query.extra_fields = ['name', 'description']\n search_results = search.Search(search_query).get_search_results(prefilter=prefilter)\n\n self.display['topic_title'] = topic.override_title or (fb_source and fb_source['info']['name'])\n self.display['topic_image'] = topic.override_image or (fb_source and fb_source['picture']['data']['url'])\n self.display['topic_description'] = topic.override_description or (fb_source and fb_source['info'].get('about')) or ''\n\n self.display['all_results'] = search_results\n\n by_year = []\n for year, month_events in sorted(grouping.group_results_by_date(search_results).items()):\n by_year.append((year, sorted(month_events.items())))\n self.display['group_by_date'] = by_year\n by_country = sorted(grouping.group_results_by_location(search_results).items(), key=lambda x: (-len(x[1]), x[0]))\n self.display['group_by_location'] = by_country\n\n\n # TODO:\n # show points on map (future and past?)\n # show future events\n # show past events\n # show high quality and low quality events (most viable with 'past')\n # have an ajax filter on the page that lets me filter by location?\n self.display['fb_page'] = fb_source\n\n self.render_template('topic')\n\n\ndef get_id_from_url(url):\n if '#' in url:\n url = url.split('#')[1]\n if 'facebook.com' in url:\n url = url.split('facebook.com')[1]\n\n path_components = url.split('/')\n if path_components[1] == 'pages':\n page_id = path_components[3]\n return page_id\n else:\n page_name = path_components[1]\n return page_name\n\n#\"https://www.facebook.com/dancedeets\"\n#\"https://www.facebook.com/pages/DanceDeets-Events/1613128148918160\"\n\n@app.route('/topic_add')\nclass AdminAddTopicHandler(base_servlet.BaseRequestHandler):\n\n def show_barebones_page(self):\n self.response.out.write('Bleh')\n\n def get(self):\n page_lookup_id = None\n if self.request.get('page_url'):\n page_lookup_id = get_id_from_url(self.request.get('page_url'))\n elif self.request.get('page_id'):\n page_lookup_id = self.request.get('page_id')\n else:\n self.add_error('Need to specify a page to create from')\n self.fbl.request(topic_db.LookupTopicPage, page_lookup_id, allow_cache=False)\n self.finish_preload()\n\n try:\n fb_page = self.fbl.fetched_data(topic_db.LookupTopicPage, page_lookup_id)\n except fb_api.NoFetchedDataException:\n return self.show_barebones_page()\n\n self.errors_are_fatal()\n\n real_page_id = fb_page['info']['id']\n\n topics = topic_db.Topic.query(topic_db.Topic.graph_id==real_page_id).fetch(1)\n topic = topics[0] if topics else topic_db.Topic()\n\n topic.graph_id = real_page_id\n topic.topic_class = topic_db.TOPIC_DANCER\n topic.search_keywords = self.request.get_all('search_keywords')\n topic.put()\n self.response.out.write('Added!')\n\n","sub_path":"topics/topic_servlets.py","file_name":"topic_servlets.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"224419069","text":"'''\r\nCreated on Jul 31, 2018\r\n\r\n@author: Anthony\r\n'''\r\n\r\nimport pandas as pd\r\nimport os\r\nimport inspect\r\nfrom hockey_3D_Map import hockey_3D_Map\r\nimport re\r\n\r\ncsv_headers = ['TOI', 'CA', 'FA', 'SA', 'GA']\r\ntoi_threashold = 40 / 60\r\n\r\ndef check(player_1_name, player_1_data, player_2_name, player_2_data):\r\n if player_1_data < player_2_data:\r\n return player_1_name + \" less\"\r\n elif player_1_data == player_2_data:\r\n return \"Same\"\r\n else:\r\n return player_2_name + \" less\"\r\n \r\ndef extract_player(file_string, player_name):\r\n file = pd.read_csv(file_string, index_col='Player')\r\n \r\n retVal = None\r\n \r\n try:\r\n retVal = file.loc[player_name]\r\n except KeyError:\r\n print(player_name + ' is not in ' + file_string)\r\n \r\n return retVal \r\n \r\ndef main(project_folder, data_folder, use_threshold, player_1, player_2):\r\n if 'NST' in data_folder:\r\n csv_headers.append('SCA')\r\n csv_headers.append('HDCA')\r\n \r\n init_path = project_folder + '\\\\out\\\\' + player_1 + ' - ' + player_2\r\n games_path = init_path + '\\\\games'\r\n analysis_path = init_path + '\\\\ana'\r\n \r\n if not os.path.exists(games_path):\r\n os.makedirs(games_path)\r\n if not os.path.exists(analysis_path):\r\n os.makedirs(analysis_path)\r\n \r\n x = hockey_3D_Map(player_1, player_2)\r\n csvs = []\r\n player_1_stats = []\r\n player_2_stats = []\r\n \r\n for root, dirs, files in os.walk(project_folder + data_folder):\r\n for file in files:\r\n if file.endswith('.csv'):\r\n print(file)\r\n \r\n file_i = os.path.join(root, file)\r\n player_1_data = extract_player(file_i, player_1)\r\n player_2_data = extract_player(file_i, player_2)\r\n \r\n if(player_1_data is not None) and (player_2_data is not None):\r\n csvs.append(file)\r\n player_1_stats.append(player_1_data)\r\n player_2_stats.append(player_2_data)\r\n \r\n print('\\n')\r\n \r\n for csv_i, p1_row_i, p2_row_i in zip(csvs, player_1_stats, player_2_stats):\r\n file_i_name = games_path + '\\\\' + csv_i.replace('.csv', '.txt')\r\n game_file_i = open(file_i_name, 'w')\r\n game_file_i.write(csv_i + '\\r\\n')\r\n \r\n for j in csv_headers:\r\n p1_stat_j = p1_row_i.loc[j]\r\n p2_stat_j = p2_row_i.loc[j]\r\n \r\n if p1_stat_j == '--':\r\n p1_stat_j = 0\r\n else:\r\n p1_stat_j = float(p1_stat_j)\r\n \r\n if p2_stat_j == '--':\r\n p2_stat_j = 0\r\n else:\r\n p2_stat_j = float(p2_stat_j)\r\n \r\n string1 = check(player_1, p1_stat_j, player_2, p2_stat_j)\r\n \r\n if j == 'TOI':\r\n if ((p1_stat_j < toi_threashold) or (p2_stat_j < toi_threashold)) and use_threshold:\r\n # Go to the next file\r\n print(csv_i + ': Failed')\r\n game_file_i.close()\r\n os.remove(file_i_name)\r\n break\r\n else:\r\n x.public_update(string1, csv_i, j)\r\n \r\n game_file_i.write(j + \" -> \" + string1 + '\\n')\r\n game_file_i.write('\\t' + '(' + player_1 + ': ' + str(p1_stat_j) + ' vs ' + player_2 + ': ' + str(p2_stat_j) + ')\\n')\r\n else:\r\n x.public_update(string1, csv_i, j)\r\n \r\n game_file_i.write(j + \" -> \" + string1 + '\\n')\r\n game_file_i.write('\\t' + '(' + player_1 + ': ' + str(p1_stat_j) + ' vs ' + player_2 + ': ' + str(p2_stat_j) + ')\\n')\r\n \r\n p1_toi = p1_row_i.loc['TOI']\r\n p2_toi = p2_row_i.loc['TOI']\r\n p1_stat_j_n = float(p1_stat_j) / float(p1_toi)\r\n p2_stat_j_n = float(p2_stat_j) / float(p2_toi)\r\n \r\n string2 = check(player_1, p1_stat_j_n, player_2, p2_stat_j_n)\r\n game_file_i.write(j + \" Normalized -> \" + string2 + '\\n')\r\n game_file_i.write('\\t' + '(' + player_1 + ': ' + str(p1_stat_j_n) + ' vs ' + player_2 + ': ' + str(p2_stat_j_n) + ')\\n')\r\n \r\n x.public_update(string2, csv_i, (j+'N'))\r\n \r\n game_file_i.close()\r\n \r\n key_delimiter = os.path.basename(os.path.normpath(data_folder))\r\n \r\n relevant_metrics = ['CAN', 'FAN', 'SAN']\r\n \r\n if 'NST' in data_folder:\r\n analysis_file = open(analysis_path + '\\\\' + 'NST - ' + key_delimiter + '.txt', 'w')\r\n relevant_metrics.append('SCAN')\r\n relevant_metrics.append('HDCAN')\r\n \r\n for s in x.situations:\r\n update_analysis_file(analysis_file, x, s, relevant_metrics, player_1, player_2)\r\n \r\n elif 'Corsica' in data_folder:\r\n analysis_file = open(analysis_path + '\\\\' + 'Corsica - ' + key_delimiter + '.txt', 'w')\r\n update_analysis_file(analysis_file, x, 'Total', relevant_metrics, player_1, player_2)\r\n \r\n analysis_file.close()\r\n\r\ndef update_analysis_file(given_file, h_3D, situation, metrics, p1_name, p2_name):\r\n for m_i in metrics:\r\n p1_m_i = h_3D.d[p1_name][situation][m_i]\r\n given_file.write(p1_name + ' total ' + situation + ' for metric ' + m_i + ' -> ' + str(p1_m_i) + '\\n')\r\n \r\n p2_m_i = h_3D.d[p2_name][situation][m_i]\r\n given_file.write(p2_name + ' total ' + situation + ' for metric ' + m_i + ' -> ' + str(p2_m_i) + '\\n')\r\n \r\n same_m_i = h_3D.d['Same'][situation][m_i]\r\n given_file.write('Same total ' + situation + ' for metric ' + m_i + ' -> ' + str(same_m_i) + '\\n')\r\n given_file.write('\\n')\r\n \r\ndef get_toi(given_file_string):\r\n TOI_LINE_LOCATION = 3\r\n \r\n given_file = open(given_file_string, 'r')\r\n given_file_lines = given_file.readlines()\r\n toi_line = given_file_lines[TOI_LINE_LOCATION]\r\n broken_toi_line = toi_line.split(' ')\r\n \r\n retVal = []\r\n \r\n for value in broken_toi_line:\r\n value = value.replace(')\\n', '')\r\n try:\r\n converted_value = float(value)\r\n retVal.append(converted_value)\r\n except ValueError:\r\n pass\r\n \r\n return retVal\r\n\r\ndef get_player(given_file_string, metric):\r\n given_file = open(given_file_string, 'r')\r\n check = metric + ' Normalized'\r\n for line_i in given_file:\r\n if check in line_i:\r\n line_i = line_i.replace(check, '').replace(' -> ', '').replace(' less', '')\r\n return line_i\r\n \r\ndef loop_through_dict(given_dict):\r\n metrics = ['CA', 'FA', 'SA']\r\n for m in metrics:\r\n files_same = 0\r\n files_different = 0\r\n for g_c_f, g_n_f in given_dict.items():\r\n cor_player = get_player(g_c_f, m).strip().lower()\r\n nst_player = get_player(g_n_f, m).strip().lower()\r\n \r\n if(cor_player == nst_player):\r\n files_same = files_same + 1\r\n else:\r\n print(g_c_f)\r\n files_different = files_different + 1\r\n \r\n print(m)\r\n print('Players the same -> {} \\n Players different -> {}'.format(files_same, files_different))\r\n \r\ndef check_toi(project_dir, p1_name, p2_name):\r\n P1_LOCATION = 0\r\n P2_LOCATION = 1\r\n games_folder = project_dir + '\\\\out\\\\' + p1_name + ' - ' + p2_name + '\\\\games'\r\n \r\n nst_files = []\r\n corsica_files = []\r\n \r\n problem_files = {}\r\n good_files = {}\r\n \r\n pattern = '- [0-9]{2} [0-9]{2} [0-9]{4} - Total'\r\n for root, dirs, files in os.walk(games_folder):\r\n for file in files:\r\n matchObj = re.search(pattern, file)\r\n if matchObj:\r\n if 'Adjusted' in file:\r\n corsica_files.append(root + '\\\\' + file)\r\n else:\r\n nst_files.append(root + '\\\\' + file)\r\n \r\n for nst_i in nst_files:\r\n pattern1 = '[0-9]{2} [0-9]{2} [0-9]{4}'\r\n nst_i_timestamp = str(re.search(pattern1, nst_i).group())\r\n \r\n for cor_i in corsica_files:\r\n if re.search(nst_i_timestamp, cor_i):\r\n break\r\n \r\n nst_toi = get_toi(nst_i)\r\n cor_toi = get_toi(cor_i)\r\n \r\n p1_nst_toi = round(nst_toi[P1_LOCATION], 2)\r\n p2_nst_toi = round(nst_toi[P2_LOCATION], 2)\r\n p1_cor_toi = round(cor_toi[P1_LOCATION], 2)\r\n p2_cor_toi = round(cor_toi[P2_LOCATION], 2)\r\n \r\n if(p1_nst_toi != p1_cor_toi) or (p2_nst_toi != p2_cor_toi):\r\n problem_files[cor_i] = nst_i\r\n else:\r\n good_files[cor_i] = nst_i\r\n \r\n print('Problem Files')\r\n loop_through_dict(problem_files)\r\n \r\n print('Good Files')\r\n loop_through_dict(good_files)\r\n \r\nif __name__ == '__main__':\r\n this_file = os.path.abspath(inspect.getfile(inspect.currentframe()))\r\n project_dir = os.path.dirname(os.path.dirname(this_file))\r\n \r\n player1 = 'James Van Riemsdyk'\r\n player2 = 'Connor Brown'\r\n \r\n# main(project_dir, '\\\\etc\\\\Corsica\\\\by_game\\\\01 2018', True, player1, player2)\r\n\r\n check_toi(project_dir, player1, player2)\r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"PlayerComparison/src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"567369427","text":"import edward as ed\nfrom edward.models import Gamma, Normal, Poisson\nimport tensorflow as tf\nimport numpy as np\n\n\ndef build_toy_dataset(N=40, noise_std=0.1):\n D = 1\n X = np.concatenate([np.linspace(0, 2, num=N / 2),\n np.linspace(6, 8, num=N / 2)])\n y = np.cos(X) + np.random.normal(0, noise_std, size=N)\n X = (X - 4.0) / 4.0\n X = X.reshape((N, D))\n return X, y\n\n\ndef nerual_net(X):\n X = tf.tanh(tf.matmul(X, W_0) + b_0)\n X = tf.matmul(X, W_1) + b_1\n return tf.reshape(X, [-1])\n\nN = 40\nD = 1\n\nX_train, y_train = build_toy_dataset(N)\n\n# model number of neurons with poisson and gamma prior\nwith tf.name_scope('model'):\n lam = Gamma(concentration=10.0, rate=2.0, name='lam')\n num_neurons = Poisson(rate=lam, name='num_neurons').sample()\n\n W_0 = Normal(loc=tf.zeros([D, num_neurons]), scale=tf.ones([D, num_neurons]), name='W_0')\n b_0 = Normal(loc=tf.zeros([num_neurons]), scale=tf.ones([num_neurons]), name='b_0')\n\n W_1 = Normal(loc=tf.zeros([num_neurons, 1]), scale=tf.ones([num_neurons, 1]), name='W_1')\n b_1 = Normal(loc=tf.zeros([1]), scale=tf.ones([1]), name='b_1')\n\n X = tf.placeholder(tf.float32, [N, D], name='X')\n y = Normal(loc=neural_net(X), scale=0.1 * tf.ones(N), name='y')\n\n\nwith tf.name_scope('posterior'):\n with tf.name_scope('qλ'):\n qλ = Gamma(concentration=tf.Variable(tf.random_gamma([1], [5])),\n rate=tf.Variable(tf.random_gamma([1], [2])))\n\n with tf.name_scope('qnum_neurons'):\n qnum_neurons = Poisson(rate=tf.Variable(rate=tf.random_gamma([1]), name='rate'))\n\n\n with tf.name_scope('qW_0'):\n qW_0 = Normal(loc=tf.Variable(tf.random_normal([D, num_neruons]), name='loc'),\n scale=tf.nn.softplus(tf.Variable(tf.random_normal([D, num_neurons]), name='scale')))\n\n with tf.name_scope('qW_1'):\n qW_1 = Normal(loc=tf.Variable(tf.random_normal([num_neurons, 1]), name='loc'),\n scale=tf.nn.softplus(tf.Variable(tf.random_normal([num_neurons, 1]), name='scale')))\n\n with tf.name_scope('qb_0'):\n qb_0 = Normal(loc=tf.Variable(tf.random_normal([num_neruons]), name='loc'),\n scale=tf.nn.softplus(tf.Variable(tf.random_normal([num_neurons]), name='scale')))\n\n\n with tf.name_scope('qb_1'):\n qb_1 = Normal(loc=tf.Variable(tf.random_normal([1]), name='loc'),\n scale=tf.nn.softplus(tf.Variable(tf.random_normal([1]), name='scale')))\n\n\ninference = ed.KLqp({λ:qλ, num_neruons:qnum_neurons,\n W_0:qW_0, b_0:qb_0,\n W_1:qW_1, b_1:qb_1},\n data={X:X_Train, y:y_train})\ninference.run(logdir='log')\n","sub_path":"src/bayesian-topology/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"457005677","text":"conveyor = [\"U\", \"D\", \"L\", \"R\"]\nconveyorMove = [[-1, 0], [1, 0], [0, -1], [0, 1]]\ncameraHelper = [[0, 1], [0, -1], [-1, 0], [1, 0]]\n\n\ndef isValidPos(grid, currRow, currCol):\n if currRow == 0 or currRow == len(grid):\n return False\n if currCol == 0 or currCol == len(grid[0]):\n return False\n if grid[currRow][currCol] == \"W\" or grid[currRow][currCol] == \"C\":\n return False\n if grid[currRow][currCol] in conveyor:\n return True\n else:\n # Check left right up and down for cameras\n # up and below\n for i in range(len(cameraHelper)):\n rowCopy = currRow\n colCopy = currCol\n while True:\n rowCopy += cameraHelper[i][0]\n colCopy += cameraHelper[i][1]\n if grid[rowCopy][colCopy] == \"C\":\n return False\n elif grid[rowCopy][colCopy] == \"W\":\n break\n return True\n\n\nincrimentHelper = [[-1, 0], [1, 0], [0, -1], [0, 1]]\n\n\ndef buildTree(grid, currRow, currCol, n, m):\n if isValidPos(grid, currRow, currCol):\n currLevel = [[currRow, currCol]]\n else:\n currLevel = []\n visited = set()\n visited.add((currRow, currCol))\n tree = {}\n nextLevel = [[currRow, currCol]]\n level = 1\n while len(nextLevel) != 0:\n nextLevel = []\n for pos in currLevel:\n # Check up down left and right\n for x in range(4):\n row = pos[0]\n col = pos[1]\n row += incrimentHelper[x][0]\n col += incrimentHelper[x][1]\n for i, letter in enumerate(conveyor):\n if grid[row][col] == letter:\n row += conveyorMove[i][0]\n col += conveyorMove[i][1]\n if row <= n - 1 and row >= 0 and col <= m - 1 and col >= 0:\n if (row, col) in visited:\n continue\n else:\n visited.add((row, col))\n if isValidPos(grid, row, col):\n nextLevel.append([row, col])\n if nextLevel != []:\n tree[level] = nextLevel\n currLevel = nextLevel\n level += 1\n return tree\n\n\nif __name__ == \"__main__\":\n visited = []\n grid = []\n row, col = map(int, input().split())\n find = 0\n escapePos = []\n for rowi in range(row):\n grid.append(list(input()))\n visited.append([0 for _ in range(col)])\n for coli in range(col):\n if grid[rowi][coli] == \".\":\n find += 1\n escapePos.append([rowi, coli])\n if grid[rowi][coli] == \"S\":\n currRow = rowi\n currCol = coli\n tree = buildTree(grid, currRow, currCol, row, col)\n\n for end in escapePos:\n res = []\n for key, value in tree.items():\n if end in value:\n res.append(key)\n if len(res) == 0:\n print(-1)\n else:\n print(res[0])","sub_path":"CCC/18S3_2.py","file_name":"18S3_2.py","file_ext":"py","file_size_in_byte":3041,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"369313248","text":"#! /usr/bin/python3\n\nimport argparse\nimport boto3\n\n\nclient = boto3.client('route53')\nparser = argparse.ArgumentParser()\n\n\nparser.add_argument('-a', '--action', default='', help='CREATE | DELETE | UPSERT', action='store')\nparser.add_argument('-c', '--comment', default='', help='Comment the change', action='store')\nparser.add_argument('-n', '--name', default='', help='DNS name to use', action='store')\nparser.add_argument('-t', '--type', default='', help='Type of record (eg. A, CNAME, SOA, TXT)', action='store')\nparser.add_argument('-v', '--value', default='127.0.0.1', help='IP Address / Domain name to use for the new record', action='store')\nparser.add_argument('-z', '--zone', default='', help='Hosted Zone ID for the new record', action='store')\nparser.add_argument('--ttl', default=60, type=int, help='TTL for the new record', action='store')\n\n\nargs=parser.parse_args()\n\nresponse = client.change_resource_record_sets(\n HostedZoneId = args.zone,\n ChangeBatch={\n 'Comment': args.comment,\n 'Changes': [\n {\n 'Action': args.action,\n 'ResourceRecordSet': {\n 'Name': args.name,\n 'Type': args.type,\n 'TTL': args.ttl,\n 'ResourceRecords': [\n {\n 'Value': args.value\n }\n ]\n }\n },\n ]\n }\n)\n\nprint(response)","sub_path":"aws_route53_changer.py","file_name":"aws_route53_changer.py","file_ext":"py","file_size_in_byte":1466,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"418330136","text":"from .models.DS_net_trainer import *\nfrom .models.DS_net import *\nfrom .models.depth_trainer import *\nfrom .models.DepthModel import *\nimport torch\nimport sys\n\n\ninput_train_root_path, depth_train_root_path, input_test_root_path,\\\ndepth_test_root_path, semantics_train_root_path, semantics_test_root_path, \\\nnum_epochs,batch_size, use_gpu, pretrained, model_weights_path, resume, \\\nloss_type, learning_rate, read_semantics = sys.argv[1:]\n\nlearning_rate = float(learning_rate)\nuse_gpu = use_gpu.lower() == 'true'\npretrained = pretrained.lower() == 'true'\nread_semantics = read_semantics.lower() == 'true'\n\nif resume.lower() == 'none':\n resume = None\n\nif read_semantics:\n net = create_join_net(pretrained=False)\n\n trainer = JointTrainer(model=net,\n use_gpu=use_gpu,\n input_train_root_path=input_train_root_path,\n depth_train_root_path=depth_train_root_path,\n semantics_train_root_path=semantics_train_root_path,\n input_test_root_path=input_test_root_path,\n depth_test_root_path=depth_test_root_path,\n semantics_test_root_path=semantics_test_root_path,\n num_epochs=int(num_epochs),\n batch_size=int(batch_size),\n resume=resume,\n loss_type=loss_type\n )\n\n if not pretrained:\n trainer.train_model(checkpoint_freq=1)\n\n else:\n model_dict = torch.load(model_weights_path)\n net.load_state_dict(model_dict)\n trainer.model = net\n\n trainer.model.eval()\n av_depth_loss, av_depth_acc, semantic_loss, semantic_acc = trainer.validate()\n print('depth loss: %.4f' % av_depth_loss)\n print('depth acc: %.4f' % av_depth_acc)\n print('semantic loss: %.4f' % semantic_loss)\n print('semantic acc: %.4f' % semantic_acc)\n\nelse:\n # creating our network\n net = create_baseline(pretrained=True)\n\n trainer = DepthTrainer(model=net, use_gpu=use_gpu,\n input_train_root_path=input_train_root_path,\n target_train_root_path=depth_train_root_path,\n input_test_root_path=input_test_root_path,\n target_test_root_path=depth_test_root_path,\n num_epochs=int(num_epochs),\n batch_size=int(batch_size),\n resume=resume,\n loss_type=loss_type,\n learning_rate=learning_rate\n )\n\n if not pretrained:\n trainer.train_model(checkpoint_freq=1)\n\n else:\n model_dict = torch.load(model_weights_path, map_location=lambda storage, loc: storage)\n net.load_state_dict(model_dict)\n trainer.model = net\n trainer.model.eval()\n av_loss, av_acc = trainer.run(validate=True)\n print('av loss: ' + str(av_loss) + ' av acc: ' + str(av_acc))\n\n\n\n\n\n\n\n","sub_path":"back_up_pytorch/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"482888357","text":"# Copyright 2014 IBM Corp.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom testtools import matchers\n\nfrom keystone.common import dependency\nfrom keystone.tests import test_v3\n\n\n@dependency.requires('endpoint_policy_api')\nclass TestExtensionCase(test_v3.RestfulTestCase):\n\n EXTENSION_NAME = 'endpoint_policy'\n EXTENSION_TO_ADD = 'endpoint_policy_extension'\n\n\nclass EndpointPolicyTestCase(TestExtensionCase):\n \"\"\"Test endpoint policy CRUD.\n\n In general, the controller layer of the endpoint policy extension is really\n just marshalling the data around the underlying manager calls. Given that\n the manager layer is tested in depth by the backend tests, the tests we\n execute here concentrate on ensuring we are correctly passing and\n presenting the data.\n\n \"\"\"\n\n def setUp(self):\n super(EndpointPolicyTestCase, self).setUp()\n self.policy = self.new_policy_ref()\n self.policy_api.create_policy(self.policy['id'], self.policy)\n self.service = self.new_service_ref()\n self.catalog_api.create_service(self.service['id'], self.service)\n self.endpoint = self.new_endpoint_ref(self.service['id'], enabled=True)\n self.catalog_api.create_endpoint(self.endpoint['id'], self.endpoint)\n self.region = self.new_region_ref()\n self.catalog_api.create_region(self.region)\n\n # endpoint policy crud tests\n\n def test_crud_for_policy_for_explicit_endpoint(self):\n \"\"\"PUT, HEAD and DELETE for explicit endpoint policy.\"\"\"\n\n url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'\n '/endpoints/%(endpoint_id)s') % {\n 'policy_id': self.policy['id'],\n 'endpoint_id': self.endpoint['id']}\n\n self.put(url, expected_status=204)\n self.get(url, expected_status=204)\n self.head(url, expected_status=204)\n self.delete(url, expected_status=204)\n\n def test_crud_for_policy_for_service(self):\n \"\"\"PUT, HEAD and DELETE for service endpoint policy.\"\"\"\n\n url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'\n '/services/%(service_id)s') % {\n 'policy_id': self.policy['id'],\n 'service_id': self.service['id']}\n\n self.put(url, expected_status=204)\n self.get(url, expected_status=204)\n self.head(url, expected_status=204)\n self.delete(url, expected_status=204)\n\n def test_crud_for_policy_for_region_and_service(self):\n \"\"\"PUT, HEAD and DELETE for region and service endpoint policy.\"\"\"\n\n url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'\n '/services/%(service_id)s/regions/%(region_id)s') % {\n 'policy_id': self.policy['id'],\n 'service_id': self.service['id'],\n 'region_id': self.region['id']}\n\n self.put(url, expected_status=204)\n self.get(url, expected_status=204)\n self.head(url, expected_status=204)\n self.delete(url, expected_status=204)\n\n def test_get_policy_for_endpoint(self):\n \"\"\"GET /endpoints/{endpoint_id}/policy.\"\"\"\n\n self.put('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'\n '/endpoints/%(endpoint_id)s' % {\n 'policy_id': self.policy['id'],\n 'endpoint_id': self.endpoint['id']},\n expected_status=204)\n\n self.head('/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY'\n '/policy' % {\n 'endpoint_id': self.endpoint['id']},\n expected_status=200)\n\n r = self.get('/endpoints/%(endpoint_id)s/OS-ENDPOINT-POLICY'\n '/policy' % {\n 'endpoint_id': self.endpoint['id']},\n expected_status=200)\n self.assertValidPolicyResponse(r, ref=self.policy)\n\n def test_list_endpoints_for_policy(self):\n \"\"\"GET /policies/%(policy_id}/endpoints.\"\"\"\n\n self.put('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'\n '/endpoints/%(endpoint_id)s' % {\n 'policy_id': self.policy['id'],\n 'endpoint_id': self.endpoint['id']},\n expected_status=204)\n\n r = self.get('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'\n '/endpoints' % {\n 'policy_id': self.policy['id']},\n expected_status=200)\n self.assertValidEndpointListResponse(r, ref=self.endpoint)\n self.assertThat(r.result.get('endpoints'), matchers.HasLength(1))\n\n def test_endpoint_association_cleanup_when_endpoint_deleted(self):\n url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'\n '/endpoints/%(endpoint_id)s') % {\n 'policy_id': self.policy['id'],\n 'endpoint_id': self.endpoint['id']}\n\n self.put(url, expected_status=204)\n self.head(url, expected_status=204)\n\n self.delete('/endpoints/%(endpoint_id)s' % {\n 'endpoint_id': self.endpoint['id']})\n\n self.head(url, expected_status=404)\n\n def test_region_service_association_cleanup_when_region_deleted(self):\n url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'\n '/services/%(service_id)s/regions/%(region_id)s') % {\n 'policy_id': self.policy['id'],\n 'service_id': self.service['id'],\n 'region_id': self.region['id']}\n\n self.put(url, expected_status=204)\n self.head(url, expected_status=204)\n\n self.delete('/regions/%(region_id)s' % {\n 'region_id': self.region['id']})\n\n self.head(url, expected_status=404)\n\n def test_region_service_association_cleanup_when_service_deleted(self):\n url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'\n '/services/%(service_id)s/regions/%(region_id)s') % {\n 'policy_id': self.policy['id'],\n 'service_id': self.service['id'],\n 'region_id': self.region['id']}\n\n self.put(url, expected_status=204)\n self.head(url, expected_status=204)\n\n self.delete('/services/%(service_id)s' % {\n 'service_id': self.service['id']})\n\n self.head(url, expected_status=404)\n\n def test_service_association_cleanup_when_service_deleted(self):\n url = ('/policies/%(policy_id)s/OS-ENDPOINT-POLICY'\n '/services/%(service_id)s') % {\n 'policy_id': self.policy['id'],\n 'service_id': self.service['id']}\n\n self.put(url, expected_status=204)\n self.get(url, expected_status=204)\n\n self.delete('/services/%(service_id)s' % {\n 'service_id': self.service['id']})\n\n self.head(url, expected_status=404)\n\n\nclass JsonHomeTests(TestExtensionCase, test_v3.JsonHomeTestMixin):\n EXTENSION_LOCATION = ('http://docs.openstack.org/api/openstack-identity/3/'\n 'ext/OS-ENDPOINT-POLICY/1.0/rel')\n PARAM_LOCATION = 'http://docs.openstack.org/api/openstack-identity/3/param'\n\n JSON_HOME_DATA = {\n EXTENSION_LOCATION + '/endpoint_policy': {\n 'href-template': '/endpoints/{endpoint_id}/OS-ENDPOINT-POLICY/'\n 'policy',\n 'href-vars': {\n 'endpoint_id': PARAM_LOCATION + '/endpoint_id',\n },\n },\n EXTENSION_LOCATION + '/policy_endpoints': {\n 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'\n 'endpoints',\n 'href-vars': {\n 'policy_id': PARAM_LOCATION + '/policy_id',\n },\n },\n EXTENSION_LOCATION + '/endpoint_policy_association': {\n 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'\n 'endpoints/{endpoint_id}',\n 'href-vars': {\n 'policy_id': PARAM_LOCATION + '/policy_id',\n 'endpoint_id': PARAM_LOCATION + '/endpoint_id',\n },\n },\n EXTENSION_LOCATION + '/service_policy_association': {\n 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'\n 'services/{service_id}',\n 'href-vars': {\n 'policy_id': PARAM_LOCATION + '/policy_id',\n 'service_id': PARAM_LOCATION + '/service_id',\n },\n },\n EXTENSION_LOCATION + '/region_and_service_policy_association': {\n 'href-template': '/policies/{policy_id}/OS-ENDPOINT-POLICY/'\n 'services/{service_id}/regions/{region_id}',\n 'href-vars': {\n 'policy_id': PARAM_LOCATION + '/policy_id',\n 'service_id': PARAM_LOCATION + '/service_id',\n 'region_id': PARAM_LOCATION + '/region_id',\n },\n },\n }\n","sub_path":"keystone/tests/test_v3_endpoint_policy.py","file_name":"test_v3_endpoint_policy.py","file_ext":"py","file_size_in_byte":9346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"611354336","text":"#!/usr/bin/env python\n\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pylab as p\n\nplot = pd.read_table(\"/Users/cmdb/qbb2015/stringtie/SRR072893/t_data.ctab\")\nroi = plot['FPKM'] >0\nplot2=plot[roi]['FPKM']\nplot3=np.log(plot2)\n\nmean=np.mean(plot3)\nstddev=np.std(plot3)\n\n\nx = mean + stddev * p.randn(1000)\n\n#n, bins, patches = p.hist(x, 50, normed=1, histtype='stepfilled')\n#p.setp(patches, 'facecolor', 'g', 'alpha', 0.75)\n\n\n#y = p.normpdf(bins, mean, stddev)\n\nx.sort() ###gets in order\n\n\n\nplt.figure()\nplt.hist(list(plot3))\ny = p.normpdf(x, mean, stddev)\n#plt.hist(plot.values)\nplt.plot(x,y*len(plot2),'r--')\n\n\nplt.title('Density Plot')\nplt.xlabel('log of fpkm')\nplt.ylabel('frequency')\n#plt.show()\nplt.savefig('density.png')\n\n\n","sub_path":"git/day3lunch3.py","file_name":"day3lunch3.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"462132572","text":"from simplify_fractions import GCD,simplify_fraction\n\ndef LCM(a,b): \n return (a*b) / GCD(a,b) \n\ndef check_zero_denominator(fractions):\n for elem in fractions:\n if elem[1] == 0:\n return True\n return False\n\ndef check_fraction_length_bigger_than_required(fractions):\n for elem in fractions:\n if len(elem) > 2:\n return True\n return False\n\ndef check_fraction_length_lower_than_required(fractions):\n for elem in fractions:\n if len(elem) < 2:\n return True\n return False\n\ndef validate_values(fractions):\n if not isinstance(fractions,list):\n raise ValueError('Passed fractions are not in the form of a list of tuples')\n elif len(fractions) > 2:\n raise ValueError('More than 2 fractions are being passed for the program to collect')\n elif check_fraction_length_bigger_than_required(fractions):\n raise ValueError('There is a fraction with more than two elements')\n elif check_fraction_length_lower_than_required(fractions):\n raise ValueError('There is a fraction with less than two elements')\n elif check_zero_denominator(fractions):\n raise ValueError('Second element of one of the fractions is zero - cannot divide by zero!!!')\n\ndef collect_fractions(fractions):\n firstFraction = simplify_fraction(fractions[0])\n secondFraction = simplify_fraction(fractions[1])\n\n newDenom = LCM(firstFraction[1],secondFraction[1])\n\n firstNumerator = newDenom / firstFraction[1]\n secondNumerator = newDenom / secondFraction[1]\n\n newNumer = firstNumerator + secondNumerator\n\n return simplify_fraction((int(newNumer),int(newDenom)))\n\n\ndef main():\n print(collect_fractions([(1, 4), (1, 2)]))\n print(collect_fractions([(1, 7), (2, 6)]))\n\nif __name__ == '__main__':\n main()","sub_path":"Week2/collect_fractions.py","file_name":"collect_fractions.py","file_ext":"py","file_size_in_byte":1796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"221791654","text":"# from attackgraph.sim_MPI_retrain import do_MPI_sim_retrain\nfrom attackgraph.simulation import series_sim_retrain\nfrom attackgraph import file_op as fp\nimport os\nimport numpy as np\nimport joblib\n\n\n#TODO: sim_MPI may cause error since name==main os.exit\ndef sim_retrain(env, game, mix_str_att, mix_str_def, epoch):\n # sim for retained attacker\n print(\"Begin sim_retrain_att.\")\n a_BD = sim_retrain_att(env, game, mix_str_def, epoch)\n print(\"Done sim_retrain_att.\")\n # sim for retained defender\n print('Begin sim_retrain_def')\n d_BD = sim_retrain_def(env, game, mix_str_att, epoch)\n print(\"Done sim_retrain_def\")\n\n return a_BD, d_BD\n\n\ndef sim_retrain_att(env, game, mix_str_def, epoch):\n rewards_att = fp.load_pkl(os.getcwd() + '/retrained_rew/' + 'rewards_att.pkl') # reward is np.array([1,2,3,4])\n k, gamma, alpha = game.param\n DIR = os.getcwd() + '/retrain_att/'\n str_list = [name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name)) and '.pkl' in name]\n num_str = len(str_list)\n if num_str != len(rewards_att):\n print('***************************')\n print('Retrain reward length does not match!')\n print('***************************')\n raise ValueError('Retrain reward length does not match!')\n util = []\n for i in range(num_str):\n nn_att = 'att_str_retrain' + str(i) + \".pkl\"\n nn_def = mix_str_def\n # if MPI_flag:\n # a_BD, _ = do_MPI_sim_retrain(nn_att, nn_def)\n # else:\n a_BD, _ = series_sim_retrain(env, game, nn_att, nn_def, 10)\n\n util.append(alpha*a_BD+(1-alpha)*rewards_att[i])\n\n best_idx = np.argmax(np.array(util))\n os.rename(os.getcwd() + '/retrain_att/' + 'att_str_retrain' + str(best_idx) + \".pkl\", os.getcwd() + \"/attacker_strategies/\" + 'att_str_epoch' + str(epoch) + '.pkl')\n change_scope(path=os.getcwd() + \"/attacker_strategies/\" + 'att_str_epoch' + str(epoch) + '.pkl', epoch=epoch, identity=1)\n return np.max(np.array(util))\n\n\n\ndef sim_retrain_def(env, game, mix_str_att, epoch):\n rewards_def = fp.load_pkl(os.getcwd() + '/retrained_rew/' + 'rewards_def.pkl')\n k, gamma, alpha = game.param\n DIR = os.getcwd() + '/retrain_def/'\n str_list = [name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name)) and '.pkl' in name]\n num_str = len(str_list)\n if num_str != len(rewards_def):\n print('***************************')\n print('Retrain reward length does not match!')\n print('***************************')\n raise ValueError('Retrain reward length does not match!')\n util = []\n for i in range(num_str):\n nn_att = mix_str_att\n nn_def = \"def_str_retrain\" + str(i) + \".pkl\"\n # if MPI_flag:\n # _, d_BD = do_MPI_sim_retrain(nn_att, nn_def)\n # else:\n _, d_BD = series_sim_retrain(env, game, nn_att, nn_def, 10)\n\n util.append(alpha * d_BD + (1 - alpha) * rewards_def[i])\n\n best_idx = np.argmax(np.array(util))\n os.rename(os.getcwd() + '/retrain_def/' + 'def_str_retrain' + str(best_idx) + \".pkl\", os.getcwd() + \"/defender_strategies/\" + 'def_str_epoch' + str(epoch) + '.pkl')\n change_scope(path=os.getcwd() + \"/defender_strategies/\" + 'def_str_epoch' + str(epoch) + '.pkl', epoch=epoch, identity=0)\n return np.max(np.array(util))\n\n\ndef change_scope(path, epoch, identity):\n loaded_params = joblib.load(os.path.expanduser(path))\n new_params = {}\n keys = loaded_params.keys()\n\n if identity == 0:\n old_keys = 'def_str_retrain0'\n new_keys = 'def_str_epoch' + str(epoch)\n elif identity == 1:\n old_keys = 'att_str_retrain0'\n new_keys = 'att_str_epoch' + str(epoch)\n else:\n raise ValueError(\"Identity error!\")\n\n for key in keys:\n a = key.replace(old_keys, new_keys)\n new_params[a] = loaded_params[key]\n\n joblib.dump(new_params, path)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"attackgraph/sim_retrain.py","file_name":"sim_retrain.py","file_ext":"py","file_size_in_byte":3920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"198900258","text":"import pandas as pd\n\n\nclass ParseSampleSheet(object):\n \"\"\"Parses a sample sheet (CSV format) into two Python dictionaries, one for header details and one for sample details.\n\n :param csv_file: sample sheet\n\n Notes:\n Functions in this class are based on a standard sample sheet layout with the following header attributes:\n - Header\n - Manifests\n - Reads\n - Settings\n - Data\n \"\"\"\n def __init__(self, csv_file):\n self.csv = csv_file\n\n def parse_sample_sheet(self):\n # -----------------------------------------------------------------------------------------------------------\n # 1) Set some variables so we can use outside loops\n # -----------------------------------------------------------------------------------------------------------\n header_index = 0\n data_index = 0\n manifest_index = 0\n read_index = 0\n settings_index = 0\n df_run_data_temp = pd.DataFrame([])\n df_run_data_final = pd.DataFrame(columns=['Property', 'Value']) # this allows for easy appending later\n # -----------------------------------------------------------------------------------------------------------\n # 2) Parse sample sheet into pandas dataframe\n # -----------------------------------------------------------------------------------------------------------\n df_sample_sheet = pd.read_csv(self.csv, header=None)\n # -----------------------------------------------------------------------------------------------------------\n # 3) Get indexes where these details are\n for column in df_sample_sheet:\n for row_index, row in df_sample_sheet.iterrows():\n if row[column] == '[Data]':\n data_index = row_index\n df_run_data_temp = df_sample_sheet.ix[:data_index - 2, 0:1] # Put all header info into a separate df\n df_run_data_temp.columns = ['Property', 'Value']\n elif row[column] == '[Header]':\n header_index = row_index\n elif row[column] == '[Manifests]':\n manifest_index = row_index\n elif row[column] == '[Reads]':\n read_index = row_index\n elif row[column] == '[Settings]':\n settings_index = row_index\n else:\n pass\n # ----------------------------------------------------------------------------------------------------------\n # 4) Look at header info first: separate the header types and modify to correctly re-merge later.\n # ----------------------------------------------------------------------------------------------------------\n # [Header]\n df_headers = df_run_data_temp.ix[header_index + 1:manifest_index - 1]\n # [Manifests]\n df_manifests = df_run_data_temp.ix[manifest_index + 1:read_index - 2]\n for row_index, row in df_manifests.iterrows():\n row['Property'] = 'Manifest ' + row['Property']\n # [Reads]\n df_reads = df_run_data_temp.ix[read_index + 1:settings_index - 2]\n read_list = []\n for row_index, row in df_reads.iterrows():\n read_list.append(row['Property'])\n # [Settings]\n df_settings = df_run_data_temp.ix[settings_index + 1:]\n # Combine all\n df_run_data_final = df_run_data_final.append(df_headers)\n df_run_data_final = df_run_data_final.append(df_manifests)\n df_run_data_final = df_run_data_final.append({'Property': 'Reads', 'Value': read_list}, ignore_index=True)\n df_run_data_final = df_run_data_final.append(df_settings)\n df_run_data_final = df_run_data_final.reset_index(drop=True)\n # Convert to dictionary, set_index avoids the index being used a key\n run_dict = df_run_data_final.set_index('Property')['Value'].to_dict()\n # ----------------------------------------------------------------------------------------------------------\n # 5) Now look at sample data: extract lab numbers and transpose dataframe to make dictionary work per patient.\n # ----------------------------------------------------------------------------------------------------------\n df_data = df_sample_sheet.ix[data_index + 1:]\n df_data = df_data.reset_index(drop=True)\n # Change column names\n df_data.columns = df_data.iloc[0]\n df_data = df_data.reindex(df_data.index.drop(0))\n # Drop any columns with \"NaN\" all the way through\n df_data = df_data.dropna(axis=1, how='all')\n # Use lab numbers as column headings and initial key in dictionary\n sample_id_list = []\n for row_index, row in df_data.iterrows():\n sample_id_list.append(row['Sample_Name'][3:12])\n df_data_trans = df_data.transpose()\n df_data_trans.columns = sample_id_list\n # Convert to dictionary\n sample_dict = df_data_trans.to_dict()\n # ----------------------------------------------------------------------------------------------------------\n return run_dict, sample_dict\n\n '''\n Method 2:\n\n def get_run_info(csv_file):\n iem = ''\n investigator = ''\n experiment = ''\n run_date = ''\n workflow = ''\n app = ''\n assay = ''\n description = ''\n chemistry = ''\n worksheet = ''\n manifest = ''\n reads = ''\n data_index = 0\n read_index = 0\n manifest_index = 0\n read1 = 0\n read2 = 0\n sample_dict = {}\n\n with open(csv_file, 'r') as c:\n reader = csv.reader(c, delimiter=',')\n for i, row in enumerate(reader):\n if row[0] == 'IEMFileVersion':\n iem = row[1]\n elif row[0] == \"Investigator Name\":\n investigator = row[1]\n elif row[0] == 'Experiment Name':\n experiment = row[1]\n elif row[0] == 'Date':\n run_date = row[1]\n elif row[0] == 'Workflow':\n workflow = row[1]\n elif row[0] == 'Application':\n app = row[1]\n elif row[0] == 'Assay':\n assay = row[1]\n elif row[0] == 'Description':\n description = row[1]\n elif row[0] == 'Chemistry':\n chemistry = row[1]\n elif row[0] == 'worksheet':\n worksheet = row[1]\n elif row[0] == '[Manifests]':\n manifest_index = i\n elif row[0] == '[Reads]':\n read_index = i\n elif row[0] == '[Data]':\n data_index = i\n else:\n pass\n if i == (read_index + 1):\n read1 = row[0]\n if i == (read_index + 2):\n read2 = row[0]\n if i == (manifest_index + 1):\n manifest = row[1]\n reads = \"(%s,%s)\" % (read1, read2)\n\n run_dict = {\n \"IEM\": iem,\n \"Investigator\": investigator,\n \"Experiment\": experiment,\n \"Date\": run_date,\n \"Workflow\": workflow,\n \"Application\": app,\n \"Assay\": assay,\n \"Description\": description,\n \"Chemistry\": chemistry,\n \"worksheet\": worksheet,\n \"Manifest\": manifest,\n \"Reads\": reads\n }\n\n df_sample_sheet = pd.read_csv(csv_file, header=None)\n df_data = df_sample_sheet.ix[data_index + 1:]\n for row_index, row in df_data.iterrows():\n lab_id = str(row[1])[3:12]\n sample_id = row[0]\n name = row[1]\n plate = row[2]\n well = row[3]\n index1 = row[5]\n index2 = row[7]\n sample_manifest = row[8]\n project = row[10]\n\n sample_dict[lab_id] = {\n \"Sample_id\": sample_id,\n \"Name\": name,\n \"Plate\": plate,\n \"Well\": well,\n \"Index1\": index1,\n \"Index2\": index2,\n \"Manifest\": sample_manifest,\n \"Project\": project\n }\n\n print run_dict, sample_dict\n '''\n","sub_path":"aml/parse_sample_sheet.py","file_name":"parse_sample_sheet.py","file_ext":"py","file_size_in_byte":8059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"399581316","text":"# -*- coding: utf-8 -*-\n\"\"\"\ntransform.py\n\nThis script contains functions that take inputs and transform them to be of use in \nbigger functions where they are called. They focus mainly on overlapping\nrepeated structures and annotation markers.\n\nThis file contains the following functions:\n \n * create_anno_remove_overlaps - Turns rows of repeats into marked rows with \n annotation markers for the start indices and zeroes otherwise. After \n removing the annotations that have overlaps, creates separate arrays\n for annotations with overlaps and annotations without overlaps. Finally,\n the annotation markers are checked and fixed if necessary.\n \n * create_anno_rows - Turns rows of repeats into marked rows with annotation\n markers for start indices and zeroes otherwise. Then checks if the correct \n annotation markers were given and fixes the markers if necessary.\n \n * remove_overlaps - Removes any pairs of repeats with the same length and \n annotation marker where at least one pair of repeats overlap in time\n \n * separate_anno_markers - Expands vector of non-overlapping repeats into\n a matrix representation. The matrix representation is a visual recored of\n where all of the repeats in a song start and end.\n\"\"\"\n\nimport numpy as np\n\ndef create_anno_remove_overlaps(k_mat,song_length,band_width):\n \"\"\"\n Turn k_mat into marked rows with annotation markers for the start indices \n and zeroes otherwise. After removing the annotations that have overlaps, \n output k_lst_out which only contains rows that have no overlaps. Then \n take the annotations that have overlaps from k_lst_out and put them in\n overlap_lst. Lastly, check if the proper sequence of annotation markers \n was given and fix them if necessary.\n \n Args\n ----\n k_mat: np.array\n List of pairs of repeats of length 1 with annotations \n marked. The first two columns refer to the first repeat\n of the pair, the second two refer to the second repeat of\n the pair, the fifth column refers to the length of the\n repeats, and the sixth column contains the annotation markers.\n \n song_length: int\n number of audio shingles\n \n band_width: int\n the length of repeats encoded in k_mat\n \n Returns\n -------\n pattern_row: np.array\n row that marks where non-overlapping repeats occur, \n marking the annotation markers for the start indices \n and 0's otherwise\n \n k_lst_out: np.array\n list of pairs of repeats of length band_width that \n contain no overlapping repeats with annotations\n marked\n \n overlap_lst: np.array\n list of pairs of repeats of length band_width that\n contain overlapping repeats with annotations marked\n \"\"\"\n # Step 0: Initialize outputs: Start with a vector of all 0's for\n # pattern_row and assume that the row has no overlaps\n pattern_row = np.zeros((1,song_length)).astype(int)\n overlap_lst = []\n bw = band_width\n \n # Step 0a: Find the number of distinct annotations\n anno_lst = k_mat[:,5] # Get the elements of k_mat's fifth column\n anno_max = anno_lst.max(0) # Max in each column\n \n # Step 1: Loop over the annotations\n for a in range (1,anno_max+1):\n # Step 1a: Add 1's to pattern_row to the time steps where repeats with\n # annotation a begin\n ands = (anno_lst == a) # Check if anno_lst is equal to a\n bind_rows = [k_mat[ands,0],k_mat[ands,2]]\n start_inds = np.concatenate(bind_rows)\n pattern_row[0,start_inds-1] = a\n\n # Step 2: check annotation by annotation\n # Start with row of 0's\n good_check = np.zeros((1,song_length)).astype(int) \n good_check[0,start_inds-1] = 1 # Add 1 to all time steps where repeats \n # with annotation a begin\n \n # Using reconstruct_full_block to check for overlaps\n block_check = reconstruct_full_block(good_check,bw)\n\n # If there are any overlaps, remove the bad annotations from both\n # the pattern_row and from the k_lst_out\n if block_check.max() > 1:\n # Remove the bad annotations from pattern_row\n pattern_row[0,start_inds-1] = 0\n \n # Remove the bad annotations from k_lst_out and add them to \n # overlap_lst\n remove_inds = ands\n\n temp_add = k_mat[remove_inds,:]\n overlap_lst.append(temp_add)\n \n if np.any(remove_inds == True):\n # Convert the boolean array rm_inds into an array of integers\n remove_inds = np.array(remove_inds).astype(int)\n remove = np.where(remove_inds==1)\n \n # Delete the row that meets the condition set by remove_inds\n k_mat = np.delete(k_mat,remove,axis=0)\n \n anno_lst = k_mat[:,5]\n \n inds_markers = np.unique(pattern_row)\n # If any of inds_markers[i] is equal to zero, then remove this index\n if np.any(inds_markers == 0):\n inds_markers = np.delete(inds_markers,0)\n \n # If inds_markers is not empty, then execute this if statement\n if inds_markers.size > 0:\n for na in range(1,len(inds_markers)+1):\n IM = inds_markers[na-1]\n if IM > na:\n # Fix the annotations in pattern_row\n temp_anno = (pattern_row == IM)\n pattern_row = pattern_row - (IM * temp_anno) + (na * temp_anno)\n \n # If k_mat is not empty, then execute this if statement\n if k_mat.size > 0:\n k_lst_out = np.unique(k_mat,axis=0)\n for na in range(1,len(inds_markers)+1):\n IM = inds_markers[na-1]\n if IM > na:\n # Fix the annotations in k_lst_out\n kmat_temp_anno = (k_lst_out[:,5] == IM)\n k_lst_out[:,5] = k_lst_out[:,5] - (IM * kmat_temp_anno) + \\\n (na * kmat_temp_anno)\n else:\n k_lst_out = np.array([])\n \n # Edit the annotations in the overlap_lst so that the annotations start\n # with 1 and increase one each time\n if overlap_lst.size > 0:\n overlap_lst = np.unique(overlap_lst,axis=0)\n overlap_lst = add_annotations(overlap_lst, song_length)\n\n output = (pattern_row,k_lst_out,overlap_lst)\n \n return output\n\n\ndef create_anno_rows(k_mat,song_length):\n \"\"\"\n Turn the k_mat into marked rows with annotation markers for the start \n indices and zeroes otherwise. Check if the proper sequence of annotation \n markers was given and fix them if necessary.\n\n Args\n ----\n k_mat: np.array\n List of pairs of repeats of length 1 with annotations \n marked. The first two columns refer to the first repeat\n of the pair, the second two refer to the second repeat of\n the pair, the fifth column refers to the length of the\n repeats, and the sixth column contains the annotation markers.\n \n song_length: int\n song length, which is the number of audio shingles\n \n Returns\n ------- \n pattern_row: np.array\n row that marks where non-overlapping repeats\n occur, marking the annotation markers for the\n start indices and zeroes otherwise.\n\n k_lst_out: np.array\n list of pairs of repeats of length BAND_WIDTH that\n contain no overlapping repeats with annotations marked.\n \"\"\"\n # Step 0 Initialize outputs: Start with a vector of all 0's for \n # pattern_row and assume that the row has no overlaps \n pattern_row = np.zeros((1,song_length)).astype(int)\n \n # Step 0a: Find the number of distinct annotations\n anno_lst = k_mat[:,5] # Get the elements of k_mat's fifth column\n anno_max = anno_lst.max(0) # Set the number of max elements in each column\n \n # Step 1: Loop over the annotations\n for a in range(1,anno_max+1):\n ands = (anno_lst == a) # Check if anno_lst is equal to a \n \n # Combine rows into a single matrix\n bind_rows = [k_mat[ands,0],k_mat[ands,2]]\n start_inds = np.concatenate(bind_rows)\n pattern_row[0,start_inds-1] = a\n \n # Step 2: Check that in fact each annotation has a repeat associated to it\n inds_markers = np.unique(pattern_row)\n\n # If any of inds_markers[i] == 0, then delete this index\n if np.any(inds_markers == 0):\n inds_markers = np.delete(inds_markers,0)\n\n if inds_markers.size > 0:\n for na in range (1,len(inds_markers)+1):\n IM = inds_markers[na-1]\n if IM > na:\n # Fix the annotations in pattern_row\n temp_anno = (pattern_row == IM)\n pattern_row = pattern_row - (IM * temp_anno) + (na * temp_anno)\n \n # Edit the annotations to match the annotations in pattern_row\n if k_mat.size > 0:\n k_lst_out = np.unique(k_mat, axis=0)\n for na in range (1,len(inds_markers)+1):\n IM = inds_markers[na-1]\n if IM > na:\n # Fix the annotaions in k_lst_out\n kmat_temp_anno = (k_lst_out[:,5] == IM)\n k_lst_out[:,5] = k_lst_out[:,5] - (IM * kmat_temp_anno) + \\\n (na*kmat_temp_anno)\n else:\n k_lst_out = np.array([])\n \n output = (pattern_row,k_lst_out)\n \n return output\n\n\ndef remove_overlaps(input_mat, song_length): \n \"\"\"\n Removes any pairs of repeat length and specific annotation marker \n where there exists at least one pair of repeats that do\n overlap in time.\n\n Args\n ----\n input_mat: np.array(int)\n List of pairs of repeats with annotations marked. The first \n two columns refer to the first repeat or the pair, the second \n two refer to the second repeat of the pair, the fifth column \n refers to the length of the repeats, and the sixth column \n contains the annotation markers.\n \n song_length: int\n the number of audio shingles\n \n Returns\n -------\n lst_no_overlaps: np.array(int)\n List of pairs of repeats with annotations marked. All the \n repeats of a given length and with a specific annotation \n marker do not overlap in time.\n \n matrix_no_overlaps: np.array(int)\n Matrix representation of lst_no_overlaps with one row for \n each group of repeats\n \n key_no_overlaps: np.array(int)\n Vector containing the lengths of the repeats encoded in \n each row of matrix_no_overlaps\n \n annotations_no_overlaps: np.array(int)\n Vector containing the annotation markers of the repeats \n encoded in each row of matrix_no_overlaps\n \n all_overlap_lst: np.array(int)\n List of pairs of repeats with annotations marked removed \n from input_mat. For each pair of repeat length and specific \n annotation marker, there exist at least one pair of repeats \n that do overlap in time.\n \"\"\"\n # Same list with repetitions removed\n bw_vec = np.unique(input_mat[:,4])\n \n # Convert L to python list of np arrays\n L = []\n for i in range(0,(np.shape(input_mat)[0])-1):\n L.append(np.array(input_mat[i,:]))\n\n # Sort list ascending, then reverse it\n bw_vec = np.sort(bw_vec)\n bw_vec = bw_vec[::-1]\n\n mat_NO = []\n key_NO = []\n anno_NO = []\n all_overlap_lst = []\n \n # While bw_vec still has entries\n while np.size(bw_vec) != 0:\n bw_lst = []\n bw = bw_vec[0]\n # Extract pairs of repeats of length BW from the list of pairs of\n # repeats with annotation markers\n # Create bw_lst\n i = 0 \n while i < len(L):\n line = L[i][4]\n if line == bw:\n bw_lst.append(line)\n L[i] = np.array([])\n i=i+1\n #endWhile\n \n # Remove blanked entries from L (appended to bw_lst)\n\n # Doesn't like elem wise comparison when right operand numpy array\n L = list(filter(lambda L: L.tolist() != [], L))\n if bw > 1:\n # Use LIGHTUP_PATTERN_ROW_GB to do the following three things:\n # ONE: Turn the BW_LST into marked rows with annotation markers for \n # the start indices and 0's otherwise \n # TWO: After removing the annotations that have overlaps, output\n # BW_LST_OUT which only contains rows that have no overlaps\n # THREE: The annotations that have overlaps get removed from \n # BW_LST_OUT and gets added to ALL_OVERLAP_LST\n \n tuple_of_outputs = create_anno_remove_overlaps(bw_lst, \n song_length, bw)\n \n pattern_row = tuple_of_outputs[0]\n bw_lst_out = tuple_of_outputs[1]\n overlap_lst = tuple_of_outputs[2]\n\n\n # Convert the numpy arrays to lists of 1d numpy arrays\n bw_lst_out_py = []\n for i in range(0,(np.shape(bw_lst_out)[0])-1):\n bw_lst_out_py.append(np.array(input_mat[i,:]))\n\n overlap_lst_py = []\n for i in range(0,(np.shape(overlap_lst)[0])-1):\n overlap_lst_py.append(np.array(input_mat[i,:]))\n\n # If there are lines to add\n if len(overlap_lst_py) != 0:\n # Add them \n all_overlap_lst.extend(overlap_lst_py)\n else:\n # Similar to the IF case -- \n # Use LIGHTUP_PATTERN_ROW_BW_1 to do the following two things:\n # ONE: Turn the BW_LST into marked rows with annotation markers for \n # the start indices and 0's otherwise \n # TWO: In this case, there are no overlaps. Then BW_LST_OUT is just\n # BW_LST. Also in this case, THREE from above does not exist\n tuple_of_outputs = create_anno_rows(bw_lst, song_length)\n pattern_row = tuple_of_outputs[0]\n bw_lst_out_orig = tuple_of_outputs[1]\n \n # Convert the numpy arrays to lists of 1d numpy arrays\n bw_lst_out_py = []\n for i in range(0,(np.shape(bw_lst_out)[0])-1):\n bw_lst_out_py.append(np.array(input_mat[i,:]))\n\n overlap_lst_py = []\n for i in range(0,(np.shape(overlap_lst)[0])-1):\n overlap_lst_py.append(np.array(input_mat[i,:]))\n\n if np.max(np.max(pattern_row)) > 0:\n # Separate ALL annotations. In this step, we expand a row into a\n # matrix, so that there is one group of repeats per row.\n \n tuple_of_outputs = separate_anno_markers(bw_lst_out, \n song_length, bw, \n pattern_row)\n pattern_mat = tuple_of_outputs[0]\n pattern_key = tuple_of_outputs[1]\n anno_temp_lst = tuple_of_outputs[2]\n \n \n # Convert the numpy arrays to lists of 1d numpy arrays\n pattern_mat_py = []\n for i in range(0,(np.shape(pattern_mat)[0])-1):\n pattern_mat_py.append(np.array(pattern_mat[i,:]))\n\n pattern_key_py = []\n for i in range(0,(np.shape(pattern_key)[0])-1):\n pattern_key_py.append(np.array(pattern_key[i,:]))\n\n\n anno_temp_lst_py = []\n for i in range(0,(np.shape(anno_temp_lst)[0])-1):\n anno_temp_lst_py.append(np.array(anno_temp_lst[i,:]))\n\n\n else:\n pattern_mat = []\n pattern_key = []\n\n \n if np.sum(np.sum(pattern_mat)) > 0:\n # If there are lines to add, add them\n if np.shape(mat_NO)[0] != 0:\n mat_NO.append(pattern_mat)\n if np.shape(key_NO)[0] != 0:\n key_NO.append(pattern_key)\n if np.shape(anno_NO)[0] != 0:\n anno_NO.append(anno_temp_lst)\n\n\n # Add to L\n L.append(bw_lst_out_py)\n # Sort list by 5th column\n # Create dict to re-sort L\n re_sort_L = {}\n for i in range(0, len(L)-1):\n # Get 5th column values into list of tuples\n # Key = index, value = value\n re_sort_L[i] = (L[i])[4]\n # Convert to list of tuples and sort\n re_sort_L = re_sort_L.items()\n # Sort that dict by values \n re_sort_L = sorted(re_sort_L, key=lambda re_sort_L: re_sort_L[1])\n\n \n sorted_inds = [x[0] for x in re_sort_L]\n # Sort L according to sorted indexes\n L = [L for sorted_inds, L in sorted(zip(sorted_inds, L))]\n\n # Will just use a np array here\n np_mat_L = np.array(L)\n bw_vec = np.unique(np_mat_L[:,4])\n \n # Sort list ascending, then reverse it\n bw_vec = np.sort(bw_vec)\n bw_vec = bw_vec[::-1]\n # Remove entries that fall below the bandwidth threshold\n cut_index = 0\n\n for value in bw_vec:\n # If the value is above the bandwidth \n if value < bw:\n cut_index = cut_index+1\n #endfor\n bw_vec = bw_vec[cut_index:np.shape(bw_vec)[0]]\n\n #endWhile\n\n # Set the outputs\n lst_no_overlaps = np.array(L)\n \n # Turn key_NO, mat_NO, and KEY_NO to numpy lists\n key_NO = list(filter(lambda key_NO: key_NO.tolist() != [], key_NO))\n mat_NO = list(filter(lambda mat_NO: mat_NO.tolist() != [], mat_NO))\n anno_NO = list(filter(lambda anno_NO: anno_NO.tolist() != [], anno_NO))\n\n if len(key_NO) !=0:\n key_NO = np.concatenate(key_NO)\n else:\n key_NO = np.array([])\n \n if len(mat_NO) !=0:\n mat_NO = np.concatenate(mat_NO)\n else:\n mat_NO = np.array([])\n \n if len(anno_NO) !=0:\n anno_NO = np.concatenate(anno_NO)\n else:\n anno_NO = np.array([])\n\n # Convert to np.array\n all_overlap_lst = np.array(all_overlap_lst)\n if np.shape(all_overlap_lst)[0] != 0:\n overlap_inds = np.argsort(all_overlap_lst[:,4])\n all_overlap_lst = all_overlap_lst[overlap_inds, :]\n #endif\n \n key_NO = np.sort(key_NO)\n mat_inds = np.argsort(key_NO)\n if np.shape(mat_NO)[0] != 0:\n matrix_no_overlaps = mat_NO[mat_inds,:]\n else:\n matrix_no_overlaps = mat_NO\n \n key_no_overlaps = key_NO\n if np.shape(anno_NO)[0] != 0:\n annotations_no_overlaps = mat_NO[mat_inds,:]\n else:\n annotations_no_overlaps = mat_NO\n \n # Compile final outputs to a tuple\n output_tuple = (lst_no_overlaps, matrix_no_overlaps, key_no_overlaps, \n annotations_no_overlaps, all_overlap_lst)\n \n return output_tuple\n\ndef separate_anno_markers(k_mat, sn, band_width, pattern_row): \n \"\"\"\n Expands pattern_row, a row vector that marks where non-overlapping\n repeats occur, into a matrix representation or np.array. The dimension of \n this array is twice the pairs of repeats by the length of the song (sn). \n k_mat provides a list of annotation markers that is used in separating the \n repeats of length band_width into individual rows. Each row will mark the \n start and end time steps of a repeat with 1's and 0's otherwise. The array \n is a visual record of where all of the repeats in a song start and end.\n\n Args\n ----\n k_mat: np.array\n List of pairs of repeats of length BAND_WIDTH with annotations \n marked. The first two columns refer to the start and end time\n steps of the first repeat of the pair, the second two refer to \n the start and end time steps of second repeat of the pair, the \n fifth column refers to the length of the repeats, and the sixth \n column contains the annotation markers. We will be indexing into \n the sixth column to obtain a list of annotation markers. \n \n sn: number\n song length, which is the number of audio shingles\n \n band_width: number \n the length of repeats encoded in k_mat\n \n pattern_row: np.array\n row vector of the length of the song that marks where \n non-overlapping repeats occur with the repeats' corresponding \n annotation markers and 0's otherwise\n\n Returns\n -------\n pattern_mat: np.array\n matrix representation where each row contains a group of repeats\n marked \n \n patter_key: np.array\n column vector containing the lengths of the repeats encoded in \n each row of pattern_mat\n \n anno_id_lst: np.array \n column vector containing the annotation markers of the repeats \n encoded in each row of pattern_mat\n \"\"\"\n \n #List of annotation markers \n anno_lst = k_mat[:,5] \n\n #Initialize pattern_mat: Start with a matrix of all 0's that has\n #the same number of rows as there are annotations and sn columns \n pattern_mat = np.zeros((anno_lst.size, sn), dtype = np.intp)\n\n #Separate the annotions into individual rows \n if anno_lst.size > 1: #If there are two or more annotations \n #Loops through the list of annotation markers \n for a in anno_lst: \n #Find starting indices: \n #Start index of first repeat a \n a_one = k_mat[a-1, 0] - 1\n\n #Start index of second repeat a\n a_two = k_mat[a-1, 2] - 1\n\n #Start indices of repeat a \n s_inds = np.append(a_one, a_two)\n\n #Replace entries at each repeats' start time with \"1\"\n pattern_mat[a - 1, s_inds] = 1\n\n #Creates row vector with the same dimensions of anno_lst \n pattern_key = band_width * np.ones((anno_lst.size, 1)).astype(int)\n\n else: #When there is one annotation \n pattern_mat = pattern_row \n pattern_key = band_width\n \n #Transpose anno_lst from a row vector into a column vector \n anno_id_lst = anno_lst.reshape((1,2)).transpose()\n \n output = (pattern_mat, pattern_key, anno_id_lst)\n \n return output \n","sub_path":"aligned-hierarchies/transform.py","file_name":"transform.py","file_ext":"py","file_size_in_byte":22127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"149019321","text":"import sys\n\ndef get_list(data):\n retlist = []\n for c in data:\n retlist.append( ord(c) )\n retlist += [17, 31, 73, 47, 23]\n return retlist\n\n#Given all input data, get the answer to the question\ndef get_answer(data, list_length=256):\n lengths = get_list(data)\n the_list = [x for x in range(list_length)]\n c_position = 0\n skip_size = 0\n\n #perform 64 rounds\n for _ in range(64):\n for length in lengths:\n for i in range(length//2):\n pos1 = (c_position+i)%list_length\n pos2 = (c_position+length-1-i)%list_length\n the_list[pos1], the_list[pos2] = the_list[pos2], the_list[pos1]\n\n c_position = (c_position + length + skip_size)%list_length\n skip_size += 1\n\n #get the xors\n xor_numbers = []\n for block in range(16):\n num = 0\n for x in the_list[block*16:block*16+16]:\n num ^= x\n xor_numbers.append( num )\n \n #get the string\n retstring = \"\"\n for num in xor_numbers:\n retstring += hex(num)[2:].zfill(2)\n\n\n return retstring\n\ndef main():\n data = input().strip()\n answer = get_answer(data)\n print(answer)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2017/Day10/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"519046068","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 29 18:16:49 2018\n\n@author: edanner\n\nThis is the script that I originally threw together to try and understand and sort the sequences from pacbio. I took pieces for the more\nrecent 'sortingseqs.py' . Just took dieas from it. Only for parts.\n\n\"\"\"\n\n\n\nimport os\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom Bio.Alphabet import generic_protein\nfrom Bio.Alphabet import IUPAC\n\n\n#\ninputFileName = \"../seqsfwd/fwdcellsand2kb99 .fasta\"\noutputFileName = \"DandG_mCherry_seq.fasta\"\noutputFileNameshort = \"DandG_mCherry_shortseq.fasta\"\noutputFileNamelong = \"DandG_mCherry_longseq.fasta\"\n\n\n#his is to compile a counting of the different sequence outcomes\n \nlengthList = []\n\ncountingAll = 0\nshortmCherryListCount = 0\nBFPfwd = 0\nBFPrev = 0\nBFPlong = 0\n\nmCherryF = 0\nmCherryR = 0\n\nHITImCherryF = 0\nHITImCherryR = 0\n\nvenus = 0\n\n\n\n#mCherry sequences searched for\nmCherryF_seq = Seq('CGGCGCCCTGAAGGGCGAGATCAA', IUPAC.unambiguous_dna)\nmCherryR_seq = mCherryF_seq.reverse_complement()\n\n#BFP sequences searched for (this is near the 3' end so that the whole issue with ssa or polymerase is avoided)\nBFPF_seq = Seq('TGGAGTTCCGCACCGCCGCCGGGAT', IUPAC.unambiguous_dna)\nBFPR_seq = BFPF_seq.reverse_complement()\n\n#venus sequences searched for\nvenus_seq = Seq('TGGAGTTCGTGACCGCCG', IUPAC.unambiguous_dna)\n\n\nmCherryList = []\nshortmCherryList = []\nlongmCherryList = []\nmCherryLengths = []\n\n\n#check for BFP seq\nfor sequence in SeqIO.parse(inputFileName, \"fasta\"):\n countingAll += 1\n if BFPF_seq in sequence:\n BFPfwd +=1\n if len(sequence) > 1700:\n BFPlong += 1\n elif BFPR_seq in sequence:\n BFPrev +=1\n\n#check for mCherry\nfor sequence in SeqIO.parse(inputFileName, \"fasta\"):\n if mCherryF_seq in sequence: \n mCherryF += 1\n mCherryList.append(sequence)\n mCherryLengths.append(len(sequence))\n elif mCherryR_seq in sequence:\n mCherryR += 1\n\n#check for mCherry HITI\nfor sequence in SeqIO.parse(inputFileName, \"fasta\"):\n if (mCherryF_seq in sequence) and (BFPF_seq in sequence):\n HITImCherryF += 1\n elif (mCherryR_seq in sequence) and (BFPF_seq in sequence):\n HITImCherryR += 1\n\n#check for venus \nfor sequence in SeqIO.parse(inputFileName, \"fasta\"):\n if venus_seq in sequence:\n venus += 1\n \nfor sequence in SeqIO.parse(inputFileName, \"fasta\"):\n if (BFPF_seq in sequence) or (mCherryF_seq in sequence) or (mCherryR_seq in sequence) or (venus_seq in sequence):\n lengthList.append(len(sequence))\n\nfor sequence in mCherryList:\n if len(sequence) <= 1800:\n shortmCherryList.append(sequence)\n elif len(sequence) >= 1800:\n longmCherryList.append(sequence)\n\n#Writing mCherry + cells to file\nSeqIO.write(mCherryList, outputFileName, \"fasta\")\nSeqIO.write(shortmCherryList, outputFileNameshort, \"fasta\")\nSeqIO.write(longmCherryList, outputFileNamelong, \"fasta\")\n\n\n \nprint(mCherryLengths)\n\nprint(\"number of reads in file:\",countingAll)\nprint(\"BFPf alignments:\", BFPfwd)\nprint(\"BFPf of size for original allele:\", BFPlong)\n\nprint(\"BFP inverted alignments:\", BFPrev)\nprint(\"mCherryF seq:\", mCherryF)\nprint(\"mCherryR seq:\", mCherryR)\nprint(\"HITI mCherryF:\", HITImCherryF)\nprint(\"HITI mCherryR:\", HITImCherryR)\nprint(\"Venus counts:\", venus)\n\nprint(\"short mCherryList List length:\", len(shortmCherryList))\nprint(\"long mCherryList List length:\", len(longmCherryList))\n\n\n#Checkign for Deletions \n\nnotdel = 0\ndeletions = 0\n\n\n\n\nfor x in lengthList:\n if x >=1090:\n notdel += 1\n else:\n deletions += 1\n\n \nprint(\"Deletions:\", deletions,\". Not deletion seq:\", notdel, \".\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"scripts/oldsortingscript.py","file_name":"oldsortingscript.py","file_ext":"py","file_size_in_byte":3712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"624213642","text":"# import the necessary packages\nfrom imutils.video import FileVideoStream\nfrom imutils.video import FPS\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport cv2\nimport numpy as np\nimport cv2\nimport glob\nimport matplotlib.pyplot as plt\nimport matplotlib.image as mpimg\nfrom importlib import reload\nimport utils; reload(utils)\nfrom utils import *\nimport math\nfrom caliberate import *\n\ncalibration_dir = \"camera_cal\"\ntest_imgs_dir = \"test_images\"\noutput_imgs_dir = \"output_images\"\noutput_videos_dir = \"output_videos\"\n\ndef compute_perspective_transform_matrices(src, dst):\n \"\"\"\n Returns the tuple (M, M_inv) where M represents the matrix to use for perspective transform\n and M_inv is the matrix used to revert the transformed image back to the original one\n \"\"\"\n #Calculates a perspective transform from four pairs of the corresponding points.\n M = cv2.getPerspectiveTransform(src, dst)\n \n M_inv = cv2.getPerspectiveTransform(dst, src)\n \n return (M, M_inv)\ndef compute_hls_white_yellow_binary(rgb_img):\n \"\"\"\n Returns a binary thresholded image produced retaining only white and yellow elements on the picture\n The provided image should be in RGB format\n \"\"\"\n hls_img = to_hls(rgb_img)\n \n # Compute a binary thresholded image where yellow is isolated from HLS components\n img_hls_yellow_bin = np.zeros_like(hls_img[:,:,0])\n img_hls_yellow_bin[((hls_img[:,:,0] >= 15) & (hls_img[:,:,0] <= 35))\n & ((hls_img[:,:,1] >= 30) & (hls_img[:,:,1] <= 204))\n & ((hls_img[:,:,2] >= 115) & (hls_img[:,:,2] <= 255)) \n ] = 1\n \n # Compute a binary thresholded image where white is isolated from HLS components\n img_hls_white_bin = np.zeros_like(hls_img[:,:,0])\n img_hls_white_bin[((hls_img[:,:,0] >= 0) & (hls_img[:,:,0] <= 255))\n & ((hls_img[:,:,1] >= 200) & (hls_img[:,:,1] <= 255))\n & ((hls_img[:,:,2] >= 0) & (hls_img[:,:,2] <= 255)) \n ] = 1\n \n # Now combine both\n img_hls_white_yellow_bin = np.zeros_like(hls_img[:,:,0])\n img_hls_white_yellow_bin[(img_hls_yellow_bin == 1) | (img_hls_white_bin == 1)] = 1\n\n return img_hls_white_yellow_bin\ndef dir_sobel(gray_img, kernel_size=3, thres=(0, np.pi/2)):\n \"\"\"\n Computes sobel matrix in both x and y directions, gets their absolute values to find the direction of the gradient\n and applies a threshold value to only set pixels within the specified range\n \"\"\"\n sx_abs = np.absolute(cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=kernel_size))\n sy_abs = np.absolute(cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=kernel_size))\n \n dir_sxy = np.arctan2(sx_abs, sy_abs)\n\n binary_output = np.zeros_like(dir_sxy)\n binary_output[(dir_sxy >= thres[0]) & (dir_sxy <= thres[1])] = 1\n \n return binary_output\n\ndef combined_sobels(sx_binary, sy_binary, sxy_magnitude_binary, gray_img, kernel_size=3, angle_thres=(0, np.pi/2)):\n sxy_direction_binary = dir_sobel(gray_img, kernel_size=kernel_size, thres=angle_thres)\n \n combined = np.zeros_like(sxy_direction_binary)\n # Sobel X returned the best output so we keep all of its results. We perform a binary and on all the other sobels \n combined[(sx_binary == 1) | ((sy_binary == 1) & (sxy_magnitude_binary == 1) & (sxy_direction_binary == 1))] = 1\n \n return combined\n\ndef mag_sobel(gray_img, kernel_size=3, thres=(0, 255)):\n \"\"\"\n Computes sobel matrix in both x and y directions, merges them by computing the magnitude in both directions\n and applies a threshold value to only set pixels within the specified range\n \"\"\"\n sx = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=kernel_size)\n sy = cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=kernel_size)\n \n sxy = np.sqrt(np.square(sx) + np.square(sy))\n scaled_sxy = np.uint8(255 * sxy / np.max(sxy))\n \n sxy_binary = np.zeros_like(scaled_sxy)\n sxy_binary[(scaled_sxy >= thres[0]) & (scaled_sxy <= thres[1])] = 1\n \n return sxy_binary\n\ndef abs_sobel(gray_img, x_dir=True, kernel_size=3, thres=(0, 255)):\n \"\"\"\n Applies the sobel operator to a grayscale-like (i.e. single channel) image in either horizontal \n or vertical direction.\n The function also computes the asbolute value of the resulting matrix and applies a \n binary threshold\n \"\"\"\n sobel = cv2.Sobel(gray_img, cv2.CV_64F, 1, 0, ksize=kernel_size) if x_dir else cv2.Sobel(gray_img, cv2.CV_64F, 0, 1, ksize=kernel_size) \n sobel_abs = np.absolute(sobel)\n sobel_scaled = np.uint8(255 * sobel / np.max(sobel_abs))\n \n gradient_mask = np.zeros_like(sobel_scaled)\n gradient_mask[(thres[0] <= sobel_scaled) & (sobel_scaled <= thres[1])] = 1\n return gradient_mask\n\ndef get_combined_binary_thresholded_img(undist_img):\n \"\"\"\n Applies a combination of binary Sobel and color thresholding to an undistorted image\n Those binary images are then combined to produce the returned binary image\n \"\"\"\n undist_img_gray = to_lab(undist_img)[:,:,0]\n sx = abs_sobel(undist_img_gray, kernel_size=15, thres=(20, 120))\n sy = abs_sobel(undist_img_gray, x_dir=False, kernel_size=15, thres=(20, 120))\n sxy = mag_sobel(undist_img_gray, kernel_size=15, thres=(80, 200))\n sxy_combined_dir = combined_sobels(sx, sy, sxy, undist_img_gray, kernel_size=15, angle_thres=(np.pi/4, np.pi/2)) \n \n hls_w_y_thres = compute_hls_white_yellow_binary(undist_img)\n \n combined_binary = np.zeros_like(hls_w_y_thres)\n combined_binary[(sxy_combined_dir == 1) | (hls_w_y_thres == 1)] = 1\n \n return combined_binary\ndef undistort_image(img, objpts, imgpts):\n \"\"\"\n Returns an undistorted image\n The desired object and image points must also be supplied to this function\n \"\"\"\n \n '''returns the camera matrix, distortion coefficients, rotation and translation vectors etc'''\n ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpts, imgpts, to_grayscale(img).shape[::-1], None, None)\n \n undist = cv2.undistort(img, mtx, dist, None, mtx)\n return undist\n\nfrom collections import deque\n\ndef create_queue(length = 10):\n return deque(maxlen=length)\n\nclass LaneLine:\n def __init__(self):\n \n self.polynomial_coeff = None\n self.line_fit_x = None\n self.non_zero_x = []\n self.non_zero_y = []\n self.windows = []\n\nclass LaneLineHistory:\n def __init__(self, queue_depth=2, test_points=[50, 300, 500, 700], poly_max_deviation_distance=150):\n self.lane_lines = create_queue(queue_depth)\n self.smoothed_poly = None\n self.test_points = test_points\n self.poly_max_deviation_distance = poly_max_deviation_distance\n \n def append(self, lane_line, force=False):\n if len(self.lane_lines) == 0 or force:\n self.lane_lines.append(lane_line)\n self.get_smoothed_polynomial()\n return True\n \n test_y_smooth = np.asarray(list(map(lambda x: self.smoothed_poly[0] * x**2 + self.smoothed_poly[1] * x + self.smoothed_poly[2], self.test_points)))\n test_y_new = np.asarray(list(map(lambda x: lane_line.polynomial_coeff[0] * x**2 + lane_line.polynomial_coeff[1] * x + lane_line.polynomial_coeff[2], self.test_points)))\n \n dist = np.absolute(test_y_smooth - test_y_new)\n \n #dist = np.absolute(self.smoothed_poly - lane_line.polynomial_coeff)\n #dist_max = np.absolute(self.smoothed_poly * self.poly_max_deviation_distance)\n max_dist = dist[np.argmax(dist)]\n \n if max_dist > self.poly_max_deviation_distance:\n print(\"**** MAX DISTANCE BREACHED ****\")\n print(\"y_smooth={0} - y_new={1} - distance={2} - max-distance={3}\".format(test_y_smooth, test_y_new, max_dist, self.poly_max_deviation_distance))\n return False\n \n self.lane_lines.append(lane_line)\n self.get_smoothed_polynomial()\n \n return True\n \n def get_smoothed_polynomial(self):\n all_coeffs = np.asarray(list(map(lambda lane_line: lane_line.polynomial_coeff, self.lane_lines)))\n self.smoothed_poly = np.mean(all_coeffs, axis=0)\n \n return self.smoothed_poly\n \n \n\n\nclass AdvancedLaneDetectorWithMemory:\n \"\"\"\n The AdvancedLaneDetectorWithMemory is a class that can detect lines on the road\n ld = AdvancedLaneDetectorWithMemory(opts, ipts, src_pts, dst_pts, 20, 100, 50)\n used parameters:-\n slidingwindows_per_line = 20\n \n # Set the width of the windows +/- margin\n sliding_window_half_width = 100\n \n # Set minimum number of pixels found to recenter window\n sliding_window_recenter_thres = 50\n \n \n \"\"\"\n def __init__(self, objpts, imgpts, psp_src, psp_dst, sliding_windows_per_line, \n sliding_window_half_width, sliding_window_recenter_thres, \n small_img_size=(256, 144), small_img_x_offset=20, small_img_y_offset=10,\n img_dimensions=(720, 1280), lane_width_px=800, \n lane_center_px_psp=600, real_world_lane_size_meters=(32, 3.7)):\n self.objpts = objpts\n self.imgpts = imgpts\n \n (self.M_psp, self.M_inv_psp) = compute_perspective_transform_matrices(psp_src, psp_dst)\n\n self.sliding_windows_per_line = sliding_windows_per_line\n self.sliding_window_half_width = sliding_window_half_width\n self.sliding_window_recenter_thres = sliding_window_recenter_thres\n \n self.small_img_size = small_img_size\n self.small_img_x_offset = small_img_x_offset\n self.small_img_y_offset = small_img_y_offset\n \n self.img_dimensions = img_dimensions\n self.lane_width_px = lane_width_px\n self.lane_center_px_psp = lane_center_px_psp \n self.real_world_lane_size_meters = real_world_lane_size_meters\n\n # We can pre-compute some data here\n \"\"\"\n ym_per_pix = 30/720 # meters per pixel in y dimension\n # xm_per_pix = 3.7/700 # meters per pixel in x dimension\n\n # ym_per_pix = 3.0/100 # meters per pixel in y dimension, lane line is 10 ft = 3 meters\n xm_per_pix = 3.7/550 # meters per pixel in x dimension, lane width is 12 ft = 3.7 meters\n \"\"\"\n \n self.ym_per_px = self.real_world_lane_size_meters[0] / self.img_dimensions[0]\n self.xm_per_px = self.real_world_lane_size_meters[1] / self.lane_width_px\n self.ploty = np.linspace(0, self.img_dimensions[0] - 1, self.img_dimensions[0])\n \n self.previous_left_lane_line = None\n self.previous_right_lane_line = None\n \n self.previous_left_lane_lines = LaneLineHistory()\n self.previous_right_lane_lines = LaneLineHistory()\n \n self.total_img_count = 0\n \n \n def process_image(self, img):\n \"\"\"\n Attempts to find lane lines on the given image and returns an image with lane area colored in green\n as well as small intermediate images overlaid on top to understand how the algorithm is performing\n \"\"\"\n \n # First step - undistort the image using the instance's object and image points\n undist_img = undistort_image(img, self.objpts, self.imgpts)\n \n # Produce binary thresholded image from color and gradients\n thres_img = get_combined_binary_thresholded_img(undist_img)\n \n # Create the undistorted and binary perspective transforms\n img_size = (undist_img.shape[1], undist_img.shape[0])\n undist_img_psp = cv2.warpPerspective(undist_img, self.M_psp, img_size, flags=cv2.INTER_LINEAR)\n thres_img_psp = cv2.warpPerspective(thres_img, self.M_psp, img_size, flags=cv2.INTER_LINEAR)\n \n ll, rl = self.compute_lane_lines(thres_img_psp)\n lcr, rcr, lco = self.compute_lane_curvature(ll, rl)\n\n drawn_lines = self.draw_lane_lines(thres_img_psp,undist_img, ll, rl) \n \n \n drawn_lines_regions = self.draw_lane_lines_regions(thres_img_psp,undist_img, ll, rl)\n \n \n drawn_lane_area = self.draw_lane_area(thres_img_psp, undist_img, ll, rl) \n \n \n drawn_hotspots = self.draw_lines_hotspots(thres_img_psp, ll, rl)\n \n \n combined_lane_img = self.combine_images(drawn_lane_area, drawn_lines, drawn_lines_regions, drawn_hotspots, undist_img_psp)\n final_img = self.draw_lane_curvature_text(combined_lane_img, lcr, rcr, lco)\n \n \n \n \n \n self.total_img_count += 1\n self.previous_left_lane_line = ll\n self.previous_right_lane_line = rl\n \n return final_img\n \n def draw_lane_curvature_text(self, img, left_curvature_meters, right_curvature_meters, center_offset_meters):\n \"\"\"\n Returns an image with curvature information inscribed\n \"\"\"\n \n offset_y = self.small_img_size[1] * 1 + self.small_img_y_offset * 5\n offset_x = self.small_img_x_offset\n \n template = \"{0:17}{1:17}{2:17}\"\n txt_header = template.format(\"Left Curvature\", \"Right Curvature\", \"Center Alignment\") \n print(txt_header)\n txt_values = template.format(\"{:.4f}m\".format(left_curvature_meters), \n \"{:.4f}m\".format(right_curvature_meters),\n \"{:.4f}m Right\".format(center_offset_meters))\n if center_offset_meters < 0.0:\n txt_values = template.format(\"{:.4f}m\".format(left_curvature_meters), \n \"{:.4f}m\".format(right_curvature_meters),\n \"{:.4f}m Left\".format(math.fabs(center_offset_meters)))\n \n \n print(txt_values)\n font = cv2.FONT_HERSHEY_SIMPLEX\n cv2.putText(img, txt_header, (offset_x, offset_y), font, 1, (255,255,255), 1, cv2.LINE_AA)\n cv2.putText(img, txt_values, (offset_x, offset_y + self.small_img_y_offset * 5), font, 1, (255,255,255), 2, cv2.LINE_AA)\n \n return img\n \n def combine_images(self, lane_area_img, lines_img, lines_regions_img, lane_hotspots_img, psp_color_img): \n \"\"\"\n Returns a new image made up of the lane area image, and the remaining lane images are overlaid as\n small images in a row at the top of the the new image\n \"\"\"\n small_lines = cv2.resize(lines_img, self.small_img_size)\n small_region = cv2.resize(lines_regions_img, self.small_img_size)\n small_hotspots = cv2.resize(lane_hotspots_img, self.small_img_size)\n small_color_psp = cv2.resize(psp_color_img, self.small_img_size)\n \n lane_area_img[self.small_img_y_offset: self.small_img_y_offset + self.small_img_size[1], self.small_img_x_offset: self.small_img_x_offset + self.small_img_size[0]] = small_lines\n \n start_offset_y = self.small_img_y_offset \n start_offset_x = 2 * self.small_img_x_offset + self.small_img_size[0]\n lane_area_img[start_offset_y: start_offset_y + self.small_img_size[1], start_offset_x: start_offset_x + self.small_img_size[0]] = small_region\n \n start_offset_y = self.small_img_y_offset \n start_offset_x = 3 * self.small_img_x_offset + 2 * self.small_img_size[0]\n lane_area_img[start_offset_y: start_offset_y + self.small_img_size[1], start_offset_x: start_offset_x + self.small_img_size[0]] = small_hotspots\n\n start_offset_y = self.small_img_y_offset \n start_offset_x = 4 * self.small_img_x_offset + 3 * self.small_img_size[0]\n lane_area_img[start_offset_y: start_offset_y + self.small_img_size[1], start_offset_x: start_offset_x + self.small_img_size[0]] = small_color_psp\n \n \n return lane_area_img\n \n \n def draw_lane_area(self, warped_img, undist_img, left_line, right_line):\n \"\"\"\n Returns an image where the inside of the lane has been colored in bright green\n \"\"\"\n # Create an image to draw the lines on\n warp_zero = np.zeros_like(warped_img).astype(np.uint8)\n color_warp = np.dstack((warp_zero, warp_zero, warp_zero))\n\n ploty = np.linspace(0, warped_img.shape[0] - 1, warped_img.shape[0])\n # Recast the x and y points into usable format for cv2.fillPoly()\n pts_left = np.array([np.transpose(np.vstack([left_line.line_fit_x, ploty]))])\n pts_right = np.array([np.flipud(np.transpose(np.vstack([right_line.line_fit_x, ploty])))])\n pts = np.hstack((pts_left, pts_right))\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp, np.int_([pts]), (255,255, 255))\n\n # Warp the blank back to original image space using inverse perspective matrix (Minv)\n newwarp = cv2.warpPerspective(color_warp, self.M_inv_psp, (undist_img.shape[1], undist_img.shape[0])) \n # Combine the result with the original image\n result = cv2.addWeighted(undist_img, 1, newwarp, 0.3, 0)\n \n return result\n \n \n def draw_lane_lines(self, warped_img, undist_img,left_line, right_line):\n \"\"\"\n Returns an image where the computed lane lines have been drawn on top of the original warped binary image\n \"\"\"\n # Create an output image with 3 colors (RGB) from the binary warped image to draw on and visualize the result\n out_img = np.dstack((warped_img, warped_img, warped_img))*255\n \n # Now draw the lines\n ploty = np.linspace(0, warped_img.shape[0] - 1, warped_img.shape[0])\n pts_left = np.dstack((left_line.line_fit_x, ploty)).astype(np.int32)\n pts_right = np.dstack((right_line.line_fit_x, ploty)).astype(np.int32)\n\n cv2.polylines(out_img, pts_left, False, (255, 140,0), 5)\n cv2.polylines(out_img, pts_right, False, (255, 140,0), 5)\n \n for low_pt, high_pt in left_line.windows:\n cv2.rectangle(out_img, low_pt, high_pt, (0, 255, 0), 3)\n\n for low_pt, high_pt in right_line.windows: \n cv2.rectangle(out_img, low_pt, high_pt, (0, 255, 0), 3) \n # Create an image to draw the lines on\n warp_zero1 = np.zeros_like(warped_img).astype(np.uint8)\n color_warp1 = np.dstack((warp_zero1, warp_zero1, warp_zero1))\n # Warp the blank back to original image space using inverse perspective matrix (Minv\n # Draw the lane onto the warped blank image\n cv2.polylines(color_warp1, pts_left, False, (255, 140,0), 5)\n cv2.polylines(color_warp1, pts_right, False, (255, 140,0), 5)\n \n \n \n newwarp = cv2.warpPerspective(color_warp1, self.M_inv_psp, (undist_img.shape[1], undist_img.shape[0])) \n # Combine the result with the original image\n result = cv2.addWeighted(undist_img, 1, newwarp, 0.3, 0)\n plt.imsave(\"out\\warpedlines.jpg\",result)\n \n return out_img \n \n def draw_lane_lines_regions(self, warped_img,undist_img, left_line, right_line):\n \"\"\"\n Returns an image where the computed left and right lane areas have been drawn on top of the original warped binary image\n \"\"\"\n # Generate a polygon to illustrate the search window area\n # And recast the x and y points into usable format for cv2.fillPoly()\n margin = self.sliding_window_half_width\n ploty = np.linspace(0, warped_img.shape[0] - 1, warped_img.shape[0])\n \n left_line_window1 = np.array([np.transpose(np.vstack([left_line.line_fit_x - margin, ploty]))])\n left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_line.line_fit_x + margin, \n ploty])))])\n left_line_pts = np.hstack((left_line_window1, left_line_window2))\n \n right_line_window1 = np.array([np.transpose(np.vstack([right_line.line_fit_x - margin, ploty]))])\n right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_line.line_fit_x + margin, \n ploty])))])\n right_line_pts = np.hstack((right_line_window1, right_line_window2))\n\n # Create RGB image from binary warped image\n region_img = np.dstack((warped_img, warped_img, warped_img)) * 255\n\n # Draw the lane onto the warped blank image\n cv2.fillPoly(region_img, np.int_([left_line_pts]), (0, 255, 0))\n cv2.fillPoly(region_img, np.int_([right_line_pts]), (0, 255, 0))\n \n # Create an image to draw the lines on\n warp_zero1 = np.zeros_like(warped_img).astype(np.uint8)\n color_warp1 = np.dstack((warp_zero1, warp_zero1, warp_zero1))\n # Warp the blank back to original image space using inverse perspective matrix (Minv\n # Draw the lane onto the warped blank image\n cv2.fillPoly(color_warp1, np.int_([left_line_pts]), (0, 255, 0))\n cv2.fillPoly(color_warp1, np.int_([right_line_pts]), (0, 255, 0))\n \n newwarp = cv2.warpPerspective(color_warp1, self.M_inv_psp, (undist_img.shape[1], undist_img.shape[0])) \n # Combine the result with the original image\n result = cv2.addWeighted(undist_img, 1, newwarp, 0.3, 0)\n plt.imsave(\"out\\warpedregion.jpg\",result)\n return region_img\n\n\n def draw_lines_hotspots(self, warped_img, left_line, right_line):\n \"\"\"\n Returns a RGB image where the portions of the lane lines that were\n identified by our pipeline are colored in yellow (left) and blue (right)\n \"\"\"\n out_img = np.dstack((warped_img, warped_img, warped_img))*255\n \n out_img[left_line.non_zero_y, left_line.non_zero_x] = [255, 255, 0]\n out_img[right_line.non_zero_y, right_line.non_zero_x] = [0, 0, 255]\n \n return out_img\n\n def compute_lane_curvature(self, left_line, right_line):\n \"\"\"\n Returns the triple (left_curvature, right_curvature, lane_center_offset), which are all in meters\n \"\"\" \n ploty = self.ploty\n y_eval = np.max(ploty)\n # Define conversions in x and y from pixels space to meters\n \n leftx = left_line.line_fit_x\n rightx = right_line.line_fit_x\n \n # Fit new polynomials: find x for y in real-world space\n left_fit_cr = np.polyfit(ploty * self.ym_per_px, leftx * self.xm_per_px, 2)\n right_fit_cr = np.polyfit(ploty * self.ym_per_px, rightx * self.xm_per_px, 2)\n \n # Now calculate the radii of the curvature\n left_curverad = ((1 + (2 * left_fit_cr[0] * y_eval * self.ym_per_px + left_fit_cr[1])**2)**1.5) / np.absolute(2 * left_fit_cr[0])\n right_curverad = ((1 + (2 *right_fit_cr[0] * y_eval * self.ym_per_px + right_fit_cr[1])**2)**1.5) / np.absolute(2 * right_fit_cr[0])\n \n # Use our computed polynomial to determine the car's center position in image space, then\n left_fit = left_line.polynomial_coeff\n right_fit = right_line.polynomial_coeff\n \n center_offset_img_space = (((left_fit[0] * y_eval**2 + left_fit[1] * y_eval + left_fit[2]) + \n (right_fit[0] * y_eval**2 + right_fit[1] * y_eval + right_fit[2])) / 2) - self.lane_center_px_psp\n center_offset_real_world_m = center_offset_img_space * self.xm_per_px\n \n # Now our radius of curvature is in meters \n return left_curverad, right_curverad, center_offset_real_world_m\n \n \n \n def compute_lane_lines(self, warped_img):\n \"\"\"\n Returns the tuple (left_lane_line, right_lane_line) which represents respectively the LaneLine\n instances for the computed left and right lanes, for the supplied binary warped image\n \"\"\"\n\n # Take a histogram of the bottom half of the image, summing pixel values column wise \n histogram = np.sum(warped_img[warped_img.shape[0]//3:,:], axis=0)\n \n \n # Find the peak of the left and right halves of the histogram\n # These will be the starting point for the left and right lines \n midpoint = np.int(histogram.shape[0]//3)\n leftx_base = np.argmax(histogram[:midpoint])\n rightx_base = np.argmax(histogram[midpoint:]) + midpoint # don't forget to offset by midpoint!\n \n\n # Set height of windows\n window_height = np.int(warped_img.shape[0]//self.sliding_windows_per_line)\n # Identify the x and y positions of all nonzero pixels in the image\n # NOTE: nonzero returns a tuple of arrays in y and x directions\n nonzero = warped_img.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n \n total_non_zeros = len(nonzeroy)\n non_zero_found_pct = 0.0\n \n # Current positions to be updated for each window\n leftx_current = leftx_base\n rightx_current = rightx_base \n\n\n # Set the width of the windows +/- margin\n margin = self.sliding_window_half_width\n # Set minimum number of pixels found to recenter window\n minpix = self.sliding_window_recenter_thres\n # Create empty lists to receive left and right lane pixel indices\n left_lane_inds = []\n right_lane_inds = []\n \n # Our lane line objects we store the result of this computation\n left_line = LaneLine()\n right_line = LaneLine()\n \n if self.previous_left_lane_line is not None and self.previous_right_lane_line is not None:\n # We have already computed the lane lines polynomials from a previous image\n left_lane_inds = ((nonzerox > (self.previous_left_lane_line.polynomial_coeff[0] * (nonzeroy**2) \n + self.previous_left_lane_line.polynomial_coeff[1] * nonzeroy \n + self.previous_left_lane_line.polynomial_coeff[2] - margin)) \n & (nonzerox < (self.previous_left_lane_line.polynomial_coeff[0] * (nonzeroy**2) \n + self.previous_left_lane_line.polynomial_coeff[1] * nonzeroy \n + self.previous_left_lane_line.polynomial_coeff[2] + margin))) \n\n right_lane_inds = ((nonzerox > (self.previous_right_lane_line.polynomial_coeff[0] * (nonzeroy**2) \n + self.previous_right_lane_line.polynomial_coeff[1] * nonzeroy \n + self.previous_right_lane_line.polynomial_coeff[2] - margin)) \n & (nonzerox < (self.previous_right_lane_line.polynomial_coeff[0] * (nonzeroy**2) \n + self.previous_right_lane_line.polynomial_coeff[1] * nonzeroy \n + self.previous_right_lane_line.polynomial_coeff[2] + margin))) \n \n non_zero_found_left = np.sum(left_lane_inds)\n non_zero_found_right = np.sum(right_lane_inds)\n non_zero_found_pct = (non_zero_found_left + non_zero_found_right) / total_non_zeros\n \n print(\"[Previous lane] Found pct={0}\".format(non_zero_found_pct))\n #print(left_lane_inds)\n \n if non_zero_found_pct < 0.85:\n print(\"Non zeros found below thresholds, begining sliding window - pct={0}\".format(non_zero_found_pct))\n left_lane_inds = []\n right_lane_inds = []\n\n # Step through the windows one by one\n for window in range(self.sliding_windows_per_line):\n # Identify window boundaries in x and y (and right and left)\n # We are moving our windows from the bottom to the top of the screen (highest to lowest y value)\n win_y_low = warped_img.shape[0] - (window + 1)* window_height\n win_y_high = warped_img.shape[0] - window * window_height\n\n # Defining our window's coverage in the horizontal (i.e. x) direction \n # Notice that the window's width is twice the margin\n win_xleft_low = leftx_current - margin\n win_xleft_high = leftx_current + margin\n win_xright_low = rightx_current - margin\n win_xright_high = rightx_current + margin\n\n left_line.windows.append([(win_xleft_low,win_y_low),(win_xleft_high,win_y_high)])\n right_line.windows.append([(win_xright_low,win_y_low),(win_xright_high,win_y_high)])\n\n # Super crytic and hard to understand...\n # Basically nonzerox and nonzeroy have the same size and any nonzero pixel is identified by\n # (nonzeroy[i],nonzerox[i]), therefore we just return the i indices within the window that are nonzero\n # and can then index into nonzeroy and nonzerox to find the ACTUAL pixel coordinates that are not zero\n good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \n (nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]\n good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & \n (nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]\n \n # Append these indices to the lists\n left_lane_inds.append(good_left_inds)\n right_lane_inds.append(good_right_inds)\n\n # If you found > minpix pixels, recenter next window on their mean position\n if len(good_left_inds) > minpix:\n leftx_current = np.int(np.mean(nonzerox[good_left_inds]))\n if len(good_right_inds) > minpix: \n rightx_current = np.int(np.mean(nonzerox[good_right_inds]))\n\n # Concatenate the arrays of indices since we now have a list of multiple arrays (e.g. ([1,3,6],[8,5,2]))\n # We want to create a single array with elements from all those lists (e.g. [1,3,6,8,5,2])\n # These are the indices that are non zero in our sliding windows\n left_lane_inds = np.concatenate(left_lane_inds)\n right_lane_inds = np.concatenate(right_lane_inds)\n \n non_zero_found_left = np.sum(left_lane_inds)\n non_zero_found_right = np.sum(right_lane_inds)\n non_zero_found_pct = (non_zero_found_left + non_zero_found_right) / total_non_zeros\n \n print(\"[Sliding windows] Found pct={0}\".format(non_zero_found_pct))\n \n \n # Extract left and right line pixel positions\n leftx = nonzerox[left_lane_inds]\n lefty = nonzeroy[left_lane_inds] \n rightx = nonzerox[right_lane_inds]\n righty = nonzeroy[right_lane_inds] \n \n #print(\"[LEFT] Number of hot pixels={0}\".format(len(leftx)))\n #print(\"[RIGHT] Number of hot pixels={0}\".format(len(rightx)))\n # Fit a second order polynomial to each\n left_fit = np.polyfit(lefty, leftx, 2)\n right_fit = np.polyfit(righty, rightx, 2)\n #print(\"Poly left {0}\".format(left_fit))\n #print(\"Poly right {0}\".format(right_fit))\n left_line.polynomial_coeff = left_fit\n right_line.polynomial_coeff = right_fit\n \n if not self.previous_left_lane_lines.append(left_line):\n left_fit = self.previous_left_lane_lines.get_smoothed_polynomial()\n left_line.polynomial_coeff = left_fit\n self.previous_left_lane_lines.append(left_line, force=True)\n print(\"**** REVISED Poly left {0}\".format(left_fit)) \n #else:\n #left_fit = self.previous_left_lane_lines.get_smoothed_polynomial()\n #left_line.polynomial_coeff = left_fit\n\n\n if not self.previous_right_lane_lines.append(right_line):\n right_fit = self.previous_right_lane_lines.get_smoothed_polynomial()\n right_line.polynomial_coeff = right_fit\n self.previous_right_lane_lines.append(right_line, force=True)\n print(\"**** REVISED Poly right {0}\".format(right_fit))\n #else:\n #right_fit = self.previous_right_lane_lines.get_smoothed_polynomial()\n #right_line.polynomial_coeff = right_fit\n\n\n \n # Generate x and y values for plotting\n ploty = np.linspace(0, warped_img.shape[0] - 1, warped_img.shape[0] )\n left_fitx = left_fit[0] * ploty**2 + left_fit[1] * ploty + left_fit[2]\n right_fitx = right_fit[0] * ploty**2 + right_fit[1] * ploty + right_fit[2]\n \n \n left_line.polynomial_coeff = left_fit\n left_line.line_fit_x = left_fitx\n left_line.non_zero_x = leftx \n left_line.non_zero_y = lefty\n\n right_line.polynomial_coeff = right_fit\n right_line.line_fit_x = right_fitx\n right_line.non_zero_x = rightx\n right_line.non_zero_y = righty\n\n\n \n return (left_line, right_line)\n \n \n\n# construct the argument parse and parse the arguments\n\n(bottom_px, right_px) = (719, 1279) \n# pts = np.array([[0,bottom_px],[0,bottom_px/3],[right_px,bottom_px/3], [right_px, bottom_px]], np.int32) LVT\n# pts = np.array([[170,bottom_px],[550,530],[740,530], [870, bottom_px]], np.int32) NVT5\n#pts = np.array([[170,bottom_px],[550,530],[740,530], [870, bottom_px]], np.int32) NVT1 without resize\npts = np.array([[100,bottom_px],[225,180],[310,180], [360, bottom_px]], np.int32)\n\nsrc_pts = pts.astype(np.float32)\n\ndst_pts = np.array([[200, bottom_px], [200, 0], [1000, 0], [1000, bottom_px]], np.float32)\n'''\nld = AdvancedLaneDetectorWithMemory(opts, ipts, src_pts, dst_pts, 20, 100, 50)\n\nfrom tkinter.ttk import *\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom PIL import Image,ImageTk \nimport os\n\npath1 = \"extract\"\npath2 = \"extractout\"\ntest_imgs_paths = glob.glob(path1 + \"/*.jpg\") \nfor image in test_imgs_paths:\n img = load_image(image)\n print(\"Processing \"+image)\n img= ld.process_image(img)\n c = 0\n plt.imsave(\"extractout/\"+str(c)+\".jpg\",img) \n c = c+1\n'''\nfrom tkinter.ttk import *\nfrom tkinter import *\nfrom tkinter import filedialog\nfrom PIL import Image,ImageTk \nfrom imageio import *\nfrom tkinter.filedialog import askopenfilename\nfrom imutils.video import FPS\nfrom imutils.video import FileVideoStream\nimport time\nimport imutils\n\n\n\n\n\n\ndef inputImg():\n global panelA\n \n file = filedialog.askopenfilename()\n \n image = load_image(file)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n ld = AdvancedLaneDetectorWithMemory(opts, ipts, src_pts, dst_pts, 20, 100, 50)\n proc_img = ld.process_image(image)\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n if panelA is None :\n panelA = Label(image=image)\n panelA.image = image\n panelA.place(x=500,y=500,anchor=W,height=540,width=780)\n else:\n panelA.configure(image=image)\n panelA.image = image\n \n v.set(\"Image Selected\")\ndef undistort():\n \n global panelA\n\n \n image = load_image(\"out/undist_img.jpg\")\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n if panelA is None :\n panelA = Label(image=image)\n panelA.image = image\n panelA.place(x=500,y=500,anchor=W,height=540,width=780)\n else:\n panelA.configure(image=image)\n panelA.image = image\n v.set(\"Undistorted Output ===>\") \ndef threshold():\n \n global panelA\n\n \n image = load_image(\"out/thres_img.jpg\")\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n if panelA is None :\n panelA = Label(image=image)\n panelA.image = image\n panelA.place(x=500,y=500,anchor=W,height=540,width=780)\n else:\n panelA.configure(image=image)\n panelA.image = image\n v.set(\"Thresholded Output ===>\") \ndef transform():\n \n global panelA\n\n \n image = load_image(\"out/thres_img_psp.jpg\")\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n if panelA is None :\n panelA = Label(image=image)\n panelA.image = image\n panelA.place(x=500,y=500,anchor=W,height=540,width=780)\n else:\n panelA.configure(image=image)\n panelA.image = image\n v.set(\"Transformed Output ===>\") \ndef linefit():\n \n global panelA\n\n \n image = load_image(\"out/drawn_lines.jpg\")\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n if panelA is None :\n panelA = Label(image=image)\n panelA.image = image\n panelA.place(x=500,y=500,anchor=W,height=540,width=780)\n else:\n panelA.configure(image=image)\n panelA.image = image\n v.set(\"Line Fitted Output ===>\") \n \ndef regionmark():\n \n global panelA\n\n \n image = load_image(\"out/drawn_lines_regions.jpg\")\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n if panelA is None :\n panelA = Label(image=image)\n panelA.image = image\n panelA.place(x=500,y=500,anchor=W,height=540,width=780)\n else:\n panelA.configure(image=image)\n panelA.image = image\n v.set(\"Region Marked Output ===>\") \ndef regionimg():\n \n global panelA\n\n \n image = load_image(\"out/warpedregion.jpg\")\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n if panelA is None :\n panelA = Label(image=image)\n panelA.image = image\n panelA.place(x=500,y=500,anchor=W,height=540,width=780)\n else:\n panelA.configure(image=image)\n panelA.image = image\n \n v.set(\"Lane line Region Drawn ===>\") \n\ndef finaloutput():\n \n global panelA\n\n \n image = load_image(\"out/drawn_lane_area.jpg\")\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n if panelA is None :\n panelA = Label(image=image)\n panelA.image = image\n panelA.place(x=500,y=500,anchor=W,height=540,width=780)\n else:\n panelA.configure(image=image)\n panelA.image = image\n \n v.set(\"Final Output ===>\") \n \ndef linedrawn():\n \n global panelA\n\n \n image = load_image(\"out/warpedlines.jpg\")\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n if panelA is None :\n panelA = Label(image=image)\n panelA.image = image\n panelA.place(x=500,y=500,anchor=W,height=540,width=780)\n else:\n panelA.configure(image=image)\n panelA.image = image\n \n v.set(\"Lane Lines Drawn ===>\") \n \ndef combinedoutput():\n \n global panelA\n\n \n image = load_image(\"out/final_img.jpg\")\n scale_percent = 70 # percent of original size\n width = int(image.shape[1] * scale_percent / 100)\n height = int(image.shape[0] * scale_percent / 100)\n dim = (width, height)\n # resize image\n image = cv2.resize(image, dim, interpolation = cv2.INTER_AREA)\n image = Image.fromarray(image)\n image = ImageTk.PhotoImage(image)\n if panelA is None :\n panelA = Label(image=image)\n panelA.image = image\n panelA.place(x=500,y=450,anchor=W,height=510,width=720)\n else:\n panelA.configure(image=image)\n panelA.image = image\n v.set(\" \")\ndef vplay():\n stream = askopenfilename()\n ld = AdvancedLaneDetectorWithMemory(opts, ipts, src_pts, dst_pts, 20, 100, 50)\n # import the necessary packages\n\n print(\"[INFO] starting video file thread...\")\n fvs = FileVideoStream(stream).start()\n time.sleep(1.0)\n\n # start the FPS timer\n fps = FPS().start()\n\n # loop over frames from the video file stream\n while fvs.more():\n # grab the frame from the threaded video file stream, resize\n # it, and convert it to grayscale (while still retaining 3\n # channels)\n frame = fvs.read()\n print(frame.shape)\n \n \n \n frame = ld.process_image(frame)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n #frame = np.dstack([frame])\n frame = np.dstack([frame, frame, frame])\n # display the size of the queue on the frame\n \n # show the frame and update the FPS counter\n print(frame.shape)\n cv2.imshow(\"Frame\", frame)\n cv2.waitKey(1)\n fps.update()\n\n # stop the timer and display FPS information\n fps.stop()\n print(\"[INFO] elasped time: {:.2f}\".format(fps.elapsed()))\n print(\"[INFO] approx. FPS: {:.2f}\".format(fps.fps()))\n\n # do a bit of cleanup\n cv2.destroyAllWindows()\n fvs.stop()\n \n\nwindow = Tk()\nwindow.geometry('1050x1200')\npanelA=None\n\nwindow.configure(background='black')\nwindow.title(\"Road-lane Detection Software\")\nv = StringVar()\nlbl = Label(window, text=\"Road-lane Detection Software \",font=(\"Times New Roman\",25,\"bold\"),fg=\"white\",bg=\"black\")\nlbl.place(x=700,y=30,anchor=N)\nlbl1 = Label(window, text=\" \",font=(\"Times New Roman\",18,\"italic\"),fg=\"red\",bg=\"black\",textvariable=v)\nlbl1.place(x=380,y=130,anchor=N)\n\nb1 = Button(window, text=\"Select Image\", bg=\"grey\", fg=\"cyan\",activebackground=\"red\",font=(\"Times New Roman\",15),command=inputImg)\nb1.place(y=60,x=20,anchor=W,height=60,width=170)\nb2 = Button(window, text=\"Undistortion\", bg=\"grey\", fg=\"cyan\",font=(\"Times New Roman\",15),command=undistort)\nb2.place(x=20,y=130,anchor=W,height=60,width=170)\n\nb3 = Button(window, text=\"Thresholding\", bg=\"grey\",activebackground=\"red\", fg=\"cyan\",font=(\"Times New Roman\",15),command=threshold)\nb3.place(y=200,x=20,anchor=W,height=60,width=170)\nb4 = Button(window, text=\"Perspective \\n Transformation\", bg=\"grey\", fg=\"cyan\",font=(\"Times New Roman\",15),command=transform)\nb4.place(x=20,y=270,anchor=W,height=60,width=170)\n\nb5 = Button(window, text=\"Line Fitting\", bg=\"grey\", fg=\"cyan\",font=(\"Times New Roman\",15),command=linefit)\nb5.place(y=340,x=20,anchor=W,height=60,width=170)\nb8 = Button(window, text=\"Line Drawn \\n Result\", bg=\"grey\", fg=\"cyan\",font=(\"Times New Roman\",15),command=linedrawn)\nb8.place(x=20,y=410,anchor=W,height=60,width=170)\nb6 = Button(window, text=\"Region Marking\", bg=\"grey\", fg=\"cyan\",font=(\"Times New Roman\",15),command=regionmark)\nb6.place(x=20,y=480,anchor=W,height=60,width=170)\nb9 = Button(window, text=\"Region Marked \\n Result\", bg=\"grey\", fg=\"cyan\",font=(\"Times New Roman\",15),command=regionimg)\nb9.place(x=20,y=550,anchor=W,height=60,width=170)\nb7 = Button(window, text=\"Final Output\", bg=\"grey\", fg=\"cyan\",font=(\"Times New Roman\",15),command=finaloutput)\nb7.place(x=20,y=620,anchor=W,height=60,width=170)\n\nb10 = Button(window, text=\"Combined \\n Output\", bg=\"grey\", fg=\"cyan\",font=(\"Times New Roman\",15),command=combinedoutput)\nb10.place(x=20,y=690,anchor=W,height=60,width=170)\nb11 = Button(window, text=\"Video \\n Outputs\", bg=\"grey\", fg=\"cyan\",font=(\"Times New Roman\",15),command=vplay)\nb11.place(x=210,y=690,anchor=W,height=60,width=170)\nwindow.mainloop()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''\n\ndef inputImg():\n\tglobal panelA\n \n\ttest_imgs_paths = glob.glob(test_imgs_dir + \"/*.jpg\")\n\tfor img in test_imgs_paths :\n\t\timage = load_image(img)\n\t\timage = Image.fromarray(image)\n\t\timage = ImageTk.PhotoImage(image)\n\t\tpanelA = Label(image=image)\n\t\tpanelA.image = image\n\t\tpanelA.place(x=500,y=500,anchor=W,height=540,width=780)\n\t\t\n \n\nwindow = Tk()\nwindow.geometry('1050x1200')\npanelA=None\n\nwindow.configure(background='black')\nwindow.title(\"Road-lane Detection Software\")\nv = StringVar()\nlbl = Label(window, text=\"Road-lane Detection Software \",font=(\"Times New Roman\",25,\"bold\"),fg=\"white\",bg=\"black\")\nlbl.place(x=700,y=30,anchor=N)\n\nb1 = Button(window, text=\"Do\", bg=\"grey\", fg=\"cyan\",activebackground=\"red\",font=(\"Times New Roman\",15),command=inputImg)\nb1.place(y=60,x=20,anchor=W,height=60,width=170)\nwindow.mainloop()'''\n","sub_path":"videorun.py","file_name":"videorun.py","file_ext":"py","file_size_in_byte":43696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"51538864","text":"import logging\nimport colorama\n\ncolorama.init()\n\nlogger = logging.getLogger(__name__)\nch = logging.StreamHandler()\nch.setLevel(logging.DEBUG)\nformatter = logging.Formatter(\"%(asctime)s - %(name)s - %(levelname)s - %(message)s\")\nch.setFormatter(formatter)\nlogger.addHandler(ch)\n\n\nfrom describe_dsl.dsl import *\n\n__author__ = \"Hiroshi Ioka\"\n__copyright__ = \"Copyright 2012, Hiroshi Ioka\"\n__credits__ = []\n__license__ = \"MIT\"\n__version__ = \"0.1.0\"\n__maintainer__ = \"Hiroshi Ioka\"\n__email__ = \"hirochachacha@gmail.com\"\n__status__ = \"Production\"\n","sub_path":"describe_dsl/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"589489959","text":"import gi\ngi.require_version('Gtk', '3.0')\nfrom gi.repository import Gtk, Gdk, GObject\n\nimport os, sys\nimport time, datetime\n\nclass Window(Gtk.Window):\n def __init__(self):\n Gtk.Window.__init__(self, title=\"GTK3.0 Example\")\n self.set_default_size(400,360)\n\n st = os.statvfs(\"/\")\n self.total = (st.f_blocks * st.f_frsize)/1024/1024/1024\n self.used = ((st.f_blocks - st.f_bfree) * st.f_frsize)/1024/1024/1024\n self.free = (st.f_bavail * st.f_frsize)/1024/1024/1024\n self.usage = self.used/self.total\n\n vbox = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=8)\n vbox.set_valign(Gtk.Align.START)\n\n # total (GB)\n label = Gtk.Label(\"Disk Total\")\n vbox.pack_start(label, True, True, 8)\n entry = Gtk.Entry()\n entry.set_text(str(self.total)[0:5] + \" GB\")\n vbox.pack_start(entry, True, True, 0)\n\n # used (GB)\n label = Gtk.Label(\"Used Space\")\n vbox.pack_start(label, True, True, 8)\n entry = Gtk.Entry()\n entry.set_text(str(self.used)[0:5] + \" GB\")\n vbox.pack_start(entry, True, True, 0)\n\n # free (GB)\n label = Gtk.Label(\"Free Space\")\n vbox.pack_start(label, True, True, 8)\n entry = Gtk.Entry()\n entry.set_text(str(self.free)[0:5] + \" GB\")\n vbox.pack_start(entry, True, True, 0)\n\n # usage (%)\n label = Gtk.Label(\"Disk Usage (\" + str(self.usage*100)[0:4] + \" %) Used\")\n vbox.pack_start(label, True, True, 4)\n pbar = Gtk.ProgressBar()\n pbar.set_fraction(self.usage)\n vbox.pack_start(pbar, True, True, 16)\n\n frm = Gtk.Frame()\n frm.set_border_width(16)\n frm.add(vbox)\n frm.set_label(\"SYSTEM STATUS\")\n self.add(frm)\n\n self.connect(\"delete-event\", Gtk.main_quit)\n # self.fullscreen()\n self.show_all()\n Gtk.main()\n\nif __name__ == '__main__':\n win = Window()","sub_path":"0004-gtk3.0-simple-system-status/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"447440444","text":"# Copyright © 2017-2018 Cedric Legrand\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice (including the next\n# paragraph) shall be included in all copies or substantial portions of the\n# Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\nclass Failure(Exception):\n def __init__(self, severity, pos, msg):\n self.severity = severity\n self.pos = pos\n self.msg = msg\n\n def __str__(self): return str(self.linecol[0]) + ':' + str(self.linecol[1]) + ': ' + self.msg\n\nclass ParseError(Exception):\n class Severity:\n Warning = 1\n Error = 2\n Fatal = 3\n\n def __init__(self, failures, parser = None):\n self.failures = failures\n self.max_level = -1\n self.errors = 0\n self.warnings = 0\n if parser is not None:\n for f in failures:\n if f.severity > self.max_level: self.max_level = f.severity\n if f.severity is ParseError.Severity.Warning: self.warnings += 1\n else: self.errors += 1\n f.linecol = parser.pos_to_linecol(f.pos)\n\n @property\n def summary(self):\n if self.warnings is 0: return 'build finished with %d errors' % self.errors\n elif self.errors is 0: return 'build finished with %d warnings' % self.warnings\n else: return 'build finished with %d errors and %d warnings' % (self.errors, self.warnings)\n\n def __str__(self):\n res = []\n for f in self.failures:\n res.append(str(f))\n return '\\n'.join(res) + '\\n'\n","sub_path":"reflex/parser/error.py","file_name":"error.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"347836369","text":"# Import required libraries\nimport pandas as pd\nimport dash\nfrom dash import html\nfrom dash import dcc\nfrom dash.dependencies import Input, Output\nimport plotly.express as px\n\n# Read the airline data into pandas dataframe\nspacex_df = pd.read_csv(\"spacex_launch_dash.csv\")\nspacex_df.drop(columns=['Unnamed: 0', 'Mission Outcome'], inplace=True)\nmax_payload = spacex_df['Payload Mass (kg)'].max()\nmin_payload = spacex_df['Payload Mass (kg)'].min()\n\n# Get unique launch sites for drop down menu \nunique_launch_sites = spacex_df['Launch Site'].unique().tolist()\nlaunch_sites = []\nlaunch_sites.append({'label': 'All Sites', 'value': 'All Sites'})\nfor launch_site in unique_launch_sites:\n launch_sites.append({'label': launch_site, 'value': launch_site})\n\n# Create a dash application\napp = dash.Dash(__name__)\n\n# Create an app layout\napp.layout = html.Div(children=[# Header \n html.H1('SpaceX Launch Records Dashboard',\n style={'textAlign': 'center', 'color': '#503D36','font-size': 40}),\n html.Br(),\n\n # TASK 1: Add a dropdown list to enable Launch Site selection\n # ->Set default select value as 'ALL sites'\n html.Div(dcc.Dropdown(\n id='site-dropdown',\n options=launch_sites,\n value='All Sites',\n placeholder='Select a launch site here',\n searchable=True,\n clearable=True)\n ),\n html.Br(),\n\n # TASK 2: Add a pie chart to show the total successful launches count for all sites\n html.Div(dcc.Graph(id='success-pie-chart')),\n html.Br(),\n\n # TASK 3: Add a slider to select payload range\n # -> Set default value to be max & min payload \n html.P(\"Payload range (Kg):\"),\n html.Div(dcc.RangeSlider(id='payload-slider', \n min=0, max=10000, step=1000,\n value=[min_payload,max_payload],\n marks={\n 0:{'label':'0 (min)', 'style':{'font-size':15, 'font-weight':'bold'}},\n 2500:'2500',\n 5000:'5000',\n 7500:'7500',\n 9600: {'label':'9600 (max)', 'style':{'font-size':15, 'font-weight':'bold'}},\n 10000:'1000'\n })\n ),\n html.Div(id='retun-payload-range'),\n html.Br(),\n\n # TASK 4: Add a scatter chart to show the correlation between payload and launch success\n html.Div(dcc.Graph(id='success-payload-scatter-chart')),\n ])\n\n# TASK 2:\n# Add a callback function to output a pie chart in response to drop down selection\n@app.callback(\n Output(component_id='success-pie-chart', component_property='figure'),\n Input(component_id='site-dropdown', component_property='value')\n )\ndef output_pie(site): #input value \n if (site =='All Sites'):\n all_sites = spacex_df[spacex_df['class'] == 1].reset_index(drop=True) # All Success only for all sites.\n all_sites.rename(columns={'class': 'count'}, inplace=True)\n fig = px.pie(\n all_sites, \n values='count', \n names='Launch Site', \n title='Total Success Launches by Site',\n color_discrete_sequence=px.colors.sequential.RdBu\n )\n else:\n selected_site = spacex_df[spacex_df['Launch Site']==site].reset_index(drop=True)\n site_sucessRate = selected_site.groupby(['Launch Site', 'class']).size().reset_index()\n site_sucessRate.rename(columns={0:'count'}, inplace=True)\n site_sucessRate.replace([0,1],['Fail', 'Successs'], inplace=True)\n fig = px.pie(\n site_sucessRate, \n values='count', \n names='class', \n title='Total Success Launches for site '+site,\n )\n return fig \n\n# TASK 3:\n# Add a callback function that returns the selected pay load range\n@app.callback(\n Output('retun-payload-range', 'children'),\n Input('payload-slider', 'value'))\ndef output_payload_range(payload_range):\n return 'You have selected range {}'.format(payload_range)\n\n# TASK 4:\n# Add a callback function to output a scatter plot in response payload range\n@app.callback(\n Output('success-payload-scatter-chart', 'figure'),\n [Input('site-dropdown', 'value'), Input('payload-slider', 'value')]) #multiple inputs\ndef output_scatter(site, payload_range):\n low,high = payload_range\n df = spacex_df\n filtered_df = df[df['Payload Mass (kg)'].between(low,high)]\n\n if site =='All Sites':\n fig = px.scatter(filtered_df, \n x='Payload Mass (kg)', \n y='class', \n size='Payload Mass (kg)',\n color='Booster Version Category', \n title='Success Rate for All Sites by Payload Range')\n else:\n filtered_df = filtered_df[filtered_df['Launch Site']==site]\n fig = px.scatter(filtered_df, \n x='Payload Mass (kg)', \n y='class', \n size='Payload Mass (kg)',\n color='Booster Version Category', \n title='Success Rate for Site {} by Payload Range'.format(site))\n return fig\n\n# Run the app\nif __name__ == '__main__':\n app.run_server(debug=True)","sub_path":"Applied capstone/spacex_dash_app.py","file_name":"spacex_dash_app.py","file_ext":"py","file_size_in_byte":6430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"527980629","text":"#!/usr/bin/env python\n\nimport numpy as np\nimport rospy\nimport math\n\nfrom visualization_msgs.msg import Marker\nfrom geometry_msgs.msg import Point\n\ndef create_marker_msg():\n m = Marker()\n m.header.frame_id = 'world'\n m.ns = 'trajectory'\n m.type = m.LINE_STRIP\n m.action = m.ADD\n m.id = 0\n\n # line width\n m.scale.x = 0.02\n\n # line color\n m.color.a = 1.0\n m.color.r = 1.0\n m.color.g = 1.0\n m.color.b = 0.0\n\n # trajectory\n z = 0.55\n L = 1.0\n mu = 0.1\n\n N = 1000\n t = np.linspace(0.0, 100.0, N)\n #'8'\n L=1\n mu=0.1\n for i in range(N):\n ref_x = L*math.cos(mu*t[i])\n ref_y = 2*L*math.sin(mu*t[i])*math.cos(mu*t[i])\n m.points.append(Point(ref_x, ref_y, z))\n\n return m\n\n # #sinusoid\n # for i in range(N):\n # ref_x = 0.05*t[i]\n # ref_y = 0.5*math.sin(5*ref_x)\n\n # m.points.append(Point(ref_x, ref_y, z))\n\n # return m\n\n #straight line\n # L=1\n # mu=0.1\n # for i in range(N):\n # ref_x = 0.1*t[i]\n # ref_y = 0.2*t[i]\n # m.points.append(Point(ref_x, ref_y, z))\n\n # return m\n\n\ndef main():\n rospy.init_node('reference_traj_publiser', anonymous=True)\n rate = rospy.Rate(10) # 10hz\n pub_traj_viz = rospy.Publisher(\n '/visualization/trajectory',\n Marker, \n queue_size=1\n )\n\n traj_marker = create_marker_msg()\n \n while not rospy.is_shutdown():\n traj_marker.header.stamp = rospy.Time.now()\n pub_traj_viz.publish(traj_marker)\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/reference_trajectory_publisher.py","file_name":"reference_trajectory_publisher.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"193355944","text":"import sys\nimport json\nfrom pyspark import SparkContext\nfrom pyspark.sql import SQLContext\nsc = SparkContext()\nsqlContext = SQLContext(sc)\n\n\norders = sqlContext.read.format(\"csv\")\\\n .option(\"header\",\"true\")\\\n .option(\"inferSchema\", \"true\")\\\n .load(\"instacart_2017_05_01/orders.csv\")\n\nproducts = sqlContext.read.format(\"csv\")\\\n .option(\"header\",\"true\")\\\n .option(\"inferSchema\", \"true\")\\\n .load(\"instacart_2017_05_01/products.csv\")\n \naisles = sqlContext.read.format(\"csv\")\\\n .option(\"header\",\"true\")\\\n .option(\"inferSchema\", \"true\")\\\n .load(\"instacart_2017_05_01/aisles.csv\")\n\ndepartments = sqlContext.read.format(\"csv\")\\\n .option(\"header\",\"true\")\\\n .option(\"inferSchema\", \"true\")\\\n .load(\"instacart_2017_05_01/departments.csv\")\n \norder_products__train = sqlContext.read.format(\"csv\")\\\n .option(\"header\",\"true\")\\\n .option(\"inferSchema\", \"true\")\\\n .load(\"instacart_2017_05_01/order_products__train.csv\")\n\n\n\norder_products__prior = sqlContext.read.format(\"csv\")\\\n .option(\"header\",\"true\")\\\n .option(\"inferSchema\", \"true\")\\\n .load(\"instacart_2017_05_01/order_products__prior.csv\")\n\n#order_products = order_products__prior.union(order_products__train)\n#\n#order_products = order_products.select(\"order_id\",\"product_id\")\n#\n#\n#\norder_products__prior1 = order_products__prior.selectExpr(\"product_id\",\"order_id as order_id_1\")\nproducts_FP = order_products__prior1.join(orders, order_products__prior1.order_id_1 == orders.order_id)\n\nproducts_FP.select(\"product_id\",\"order_id\",\"user_id\")\n#products_FP.show()\n\nimport pyspark.sql.functions as F\n\n\n\ntransactions = products_FP.groupby(\"order_id\").agg(F.collect_list('product_id'))\ntransactions = products_FP.selectExpr(\"collect_list(product_id) as items\").take(100)\n\n\n#trans = transactions.select(\"items\").take(20)\n#trans = trans.collect()\n#a = [(item) for sublist in trans for item in sublist]\n#a = sc.parallelize(a)\n#model = FPGrowth.train(a, minSupport=0.2, numPartitions=10)\n#result = model.freqItemsets().collect()\n\n#for fi in result:\n# print(fi)\n\nfrom pyspark.mllib.fpm import FPGrowth\n\nmodel = FPGrowth.train(transactions, minSupport=0.8, numPartitions=5)\nresult = model.freqItemsets().collect()\n\n#fpGrowth = FPGrowth.train(transactions, minSupport=0.1, minConfidence=0.6)\n#model = fpGrowth.fit(transactions)\n\n#model.associationRules.show(1)\n#model.freqItemsets.show(1)\n\n\n\n# transform examines the input items against all the association rules and summarize the\n# consequents as prediction\n#model.transform(transactions).show(10)\n\n\n\n\n\n\n#import pyspark.ml.stat\n#icecream = orders.join(order_products, orders.order_id == order_products.order_id)\n#icecream = icecream.select(\"order_hour_of_day\",\"product_id\")\n#icecream = icecream.join(products, icecream.product_id == products.product_id)\n#icecream = icecream.select(\"order_hour_of_day\",\"product_name\").show()\n##icecream = icecream.filter(icecream.product_name==\"Ice cream\").show()\n#from pyspark.sql.functions import col,desc\n#from pyspark.ml.stat import ChiSquareTest\n#alcohol = departments.join(products, departments.department_id == products.department_id)\n#alcohol = alcohol.select(\"product_id\",\"department\")\n#alcohol = alcohol.join(order_products, alcohol.product_id == order_products.product_id)\n#alcohol = alcohol.select(\"department\", \"order_id\")\n#alcohol = alcohol.join(orders, alcohol.order_id == orders.order_id)\n#alcohol = alcohol.select(\"order_dow\",\"order_hour_of_day\",\"department\")\n#alcohol = alcohol.filter(alcohol.department==\"alcohol\")\n#alcDail = alcohol.groupby(\"order_dow\").count().sort(desc(\"order_dow\"))\n#alcWeek = alcohol.groupby(\"order_hour_of_day\").count().sort(desc(\"order_hour_of_day\"))\n##r = ChiSquareTest.test(alcohol, \"order_dow\", \"count\")\n##print(r.pValues)\n##print(r.degreesOfFreedom)\n##print(r.statistics)\n#\n#alcohol = alcohol.filter(alcohol.department==\"alcohol\")\n#morningAlc = alcohol.select(\"order_hour_of_day\").filter(alcohol.order_hour_of_day>8).filter(alcohol.order_hour_of_day<17)\\\n#.where(alcohol.order_hour_of_day.isNotNull())\n#eveningAlc = alcohol.select(\"order_hour_of_day\").filter(alcohol.order_hour_of_day>17).filter(alcohol.order_hour_of_day<=23)\\\n#.where(alcohol.order_hour_of_day.isNotNull())\n#print( \"morning alcohol is :\", 100*morningAlc.count()/alcohol.select(\"order_hour_of_day\").count(),\"/n\",\\\n# \"evening alcohol is :\", 100*eveningAlc.count()/alcohol.select(\"order_hour_of_day\").count())\n","sub_path":"fp.py","file_name":"fp.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"558134157","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport pywikibot, re, sys, argparse, json, unicodedata\n\nimport blib\nfrom blib import getparam, rmparam, tname, pname, msg, errandmsg, site\n\noutput_pages_to_delete = []\n\ndef remove_anagram_from_page(index, page, pagetitle_to_remove):\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n def errandpagemsg(txt):\n errandmsg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n if not blib.safe_page_exists(page, errandpagemsg):\n pagemsg(\"WARNING: Trying to remove anagram '%s' but page itself doesn't exist\" % pagetitle_to_remove)\n return\n\n notes = []\n\n text = blib.safe_page_text(page, errandpagemsg)\n if not text:\n return\n\n retval = blib.find_modifiable_lang_section(text, \"Italian\", pagemsg, force_final_nls=True)\n if retval is None:\n return\n sections, j, secbody, sectail, has_non_lang = retval\n\n subsections = re.split(\"(^==+[^=\\n]+==+\\n)\", secbody, 0, re.M)\n for k in range(2, len(subsections), 2):\n if \"===Anagrams===\" in subsections[k - 1]:\n parsed = blib.parse_text(subsections[k])\n for t in parsed.filter_templates():\n tn = tname(t)\n def getp(param):\n return getparam(t, param)\n if tn == \"anagrams\":\n if getp(\"1\") != \"it\":\n pagemsg(\"WARNING: Wrong language in {{anagrams}}: %s\" % str(t))\n return\n anagrams = blib.fetch_param_chain(t, \"2\")\n anagrams = [x for x in anagrams if x != pagetitle_to_remove]\n if anagrams:\n blib.set_param_chain(t, anagrams, \"2\")\n notes.append(\"remove anagram '%s', page deleted or renamed%s\" % (pagetitle_to_remove, annotation))\n subsections[k] = str(parsed)\n else:\n subsections[k - 1] = \"\"\n subsections[k] = \"\"\n notes.append(\"remove Anagrams section; only had '%s', which has been deleted or renamed%s\"\n % (pagetitle_to_remove, annotation))\n\n secbody = \"\".join(subsections)\n # Strip extra newlines added to secbody\n sections[j] = secbody.rstrip(\"\\n\") + sectail\n text = \"\".join(sections)\n\n return text, notes\n\ndef process_page_for_anagrams(index, page, modify_this_page):\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n def errandpagemsg(txt):\n errandmsg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n\n notes = []\n\n text = blib.safe_page_text(page, errandpagemsg)\n if not text:\n return\n\n retval = blib.find_modifiable_lang_section(text, \"Italian\", pagemsg, force_final_nls=True)\n if retval is None:\n return\n\n sections, j, secbody, sectail, has_non_lang = retval\n\n anagrams = []\n\n subsections = re.split(\"(^==+[^=\\n]+==+\\n)\", secbody, 0, re.M)\n for k in range(2, len(subsections), 2):\n if \"===Anagrams===\" in subsections[k - 1]:\n parsed = blib.parse_text(subsections[k])\n for t in parsed.filter_templates():\n tn = tname(t)\n def getp(param):\n return getparam(t, param)\n if tn == \"anagrams\":\n if getp(\"1\") != \"it\":\n pagemsg(\"WARNING: Wrong language in {{anagrams}}: %s\" % str(t))\n return\n for anagram in blib.fetch_param_chain(t, \"2\"):\n if anagram not in anagrams:\n anagrams.append(anagram)\n elif tn == \"l\":\n if getp(\"1\") != \"it\":\n pagemsg(\"WARNING: Wrong language in {{l}}: %s\" % str(t))\n return\n anagram = getp(\"2\")\n if anagram not in anagrams:\n anagrams.append(anagram)\n if modify_this_page:\n subsections[k - 1] = \"\"\n subsections[k] = \"\"\n notes.append(\"remove Anagrams section prior to renaming page%s\" % annotation)\n secbody = \"\".join(subsections)\n\n # Strip extra newlines added to secbody\n sections[j] = secbody.rstrip(\"\\n\") + sectail\n text = \"\".join(sections)\n\n for anagram in anagrams:\n def do_process_page(page, index, parsed):\n return remove_anagram_from_page(index, page, pagetitle)\n blib.do_edit(pywikibot.Page(site, anagram), index, do_process_page,\n save=args.save, verbose=args.verbose, diff=args.diff)\n\n return text, notes\n\ndef process_page_for_deletion(index, page):\n pagetitle = str(page.title())\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n def errandpagemsg(txt):\n errandmsg(\"Page %s %s: %s\" % (index, pagetitle, txt))\n if not blib.safe_page_exists(page, errandpagemsg):\n pagemsg(\"Skipping because page doesn't exist\")\n return\n\n notes = []\n\n text = blib.safe_page_text(page, errandpagemsg)\n if not text:\n return\n\n retval = blib.find_modifiable_lang_section(text, \"Italian\", pagemsg)\n if retval is None:\n return\n\n sections, j, secbody, sectail, has_non_lang = retval\n if not has_non_lang:\n # Can delete the whole page, but check for non-blank section 0\n cleaned_sec0 = re.sub(\"^\\{\\{also\\|.*?\\}\\}\\n\", \"\", sections[0])\n if cleaned_sec0.strip():\n pagemsg(\"WARNING: Whole page deletable except that there's text above all sections: <%s>\" % cleaned_sec0.strip())\n return\n pagemsg(\"Page should be deleted\")\n output_pages_to_delete.append(pagetitle)\n return\n\n del sections[j]\n del sections[j-1]\n notes.append(\"remove Italian section for bad (nonexistent or misspelled) form%s\" % annotation)\n if j > len(sections):\n # We deleted the last section, remove the separator at the end of the\n # previous section.\n sections[-1] = re.sub(r\"\\n+--+\\n*\\Z\", \"\", sections[-1])\n text = \"\".join(sections)\n\n return text, notes\n\n\nparser = blib.create_argparser(\"Delete/rename Italian forms, fixing up anagrams\")\nparser.add_argument(\"--direcfile\", help=\"File listing forms to delete/rename.\", required=True)\nparser.add_argument(\"--comment\", help=\"Optional additional comment to use.\")\nparser.add_argument(\"--output-pages-to-delete\", help=\"Output file containing forms to delete.\")\nargs = parser.parse_args()\nstart, end = blib.parse_start_end(args.start, args.end)\nannotation = \" (%s)\" % args.comment if args.comment else \"\"\n\ninput_pages_to_delete = []\noutput_pages_to_delete = []\npages_to_rename = []\n\n# Separate pages to delete and rename. Do pages to delete first so we can run this in sysop mode\n# (python login.py --sysop), and it will first delete the necessary pages, then ask for the non-sysop password and\n# rename the remaining pages.\nfor index, line in blib.iter_items_from_file(args.direcfile, start, end):\n m = re.search(\"^(.*) -> (.*)$\", line)\n if m:\n frompagetitle, topagetitle = m.groups()\n pages_to_rename.append((index, frompagetitle, topagetitle))\n else:\n m = re.search(\"^(.*): delete$\", line)\n if m:\n badpagetitle = m.group(1)\n input_pages_to_delete.append((index, badpagetitle))\n else:\n errandmsg(\"Line %s: Unrecognized line: %s\" % (index, line))\n\nfor index, badpagetitle in input_pages_to_delete:\n badpage = pywikibot.Page(site, badpagetitle)\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, badpagetitle, txt))\n def errandpagemsg(txt):\n errandmsg(\"Page %s %s: %s\" % (index, badagetitle, txt))\n if not blib.safe_page_exists(badpage, errandpagemsg):\n pagemsg(\"Skipping because page doesn't exist\")\n continue\n process_page_for_anagrams(index, badpage, modify_this_page=False)\n def do_process_page(page, index, parsed):\n return process_page_for_deletion(index, page)\n blib.do_edit(badpage, index, do_process_page, save=args.save, verbose=args.verbose, diff=args.diff)\n #this_comment = 'delete bad Italian non-lemma form'\n #if args.save:\n # existing_text = blib.safe_page_text(badpage, errandpagemsg, bad_value_ret=None)\n # if existing_text is not None:\n # badpage.delete('%s (content was \"%s\")' % (this_comment, existing_text))\n # errandpagemsg(\"Deleted (comment=%s)\" % this_comment)\n #else:\n # pagemsg(\"Would delete (comment=%s)\" % this_comment)\n\nfor index, frompagetitle, topagetitle in pages_to_rename:\n frompage = pywikibot.Page(site, frompagetitle)\n def pagemsg(txt):\n msg(\"Page %s %s: %s\" % (index, frompagetitle, txt))\n def errandpagemsg(txt):\n errandmsg(\"Page %s %s: %s\" % (index, frompagetitle, txt))\n if not blib.safe_page_exists(frompage, errandpagemsg):\n pagemsg(\"Skipping because page doesn't exist\")\n continue\n def do_process_page(page, index, parsed):\n return process_page_for_anagrams(index, page, modify_this_page=True)\n blib.do_edit(frompage, index, do_process_page,\n save=args.save, verbose=args.verbose, diff=args.diff)\n topage = pywikibot.Page(site, topagetitle)\n if blib.safe_page_exists(topage, errandpagemsg):\n errandpagemsg(\"Destination page %s already exists, not moving\" %\n topagetitle)\n continue\n this_comment = 'rename bad Italian non-lemma form'\n if args.save:\n try:\n frompage.move(topagetitle, reason=this_comment, movetalk=True, noredirect=True)\n errandpagemsg(\"Renamed to %s\" % topagetitle)\n except pywikibot.PageRelatedError as error:\n errandpagemsg(\"Error moving to %s: %s\" % (topagetitle, error))\n else:\n pagemsg(\"Would rename to %s (comment=%s)\" % (topagetitle, this_comment))\n\nmsg(\"The following pages need to be deleted:\")\nfor page in output_pages_to_delete:\n msg(page)\nif args.output_pages_to_delete:\n with open(args.output_pages_to_delete, \"w\", encoding=\"utf-8\") as fp:\n for page in output_pages_to_delete:\n print(page, file=fp)\n","sub_path":"delete_rename_it_forms.py","file_name":"delete_rename_it_forms.py","file_ext":"py","file_size_in_byte":9364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"507559964","text":"import sys\nsys.stdin = open(\"5204.txt\")\n\ndef div(LIST):\n global ans\n length = len(LIST)\n idx = length // 2\n l1 = LIST[:idx]\n l2 = LIST[idx:]\n\n if len(l1) > 1:\n l1 = div(l1)\n if len(l2) > 1:\n l2 = div(l2)\n\n if l1[-1] > l2[-1]:\n ans += 1\n return l2 + l1\n else:\n return l1 + l2\n\nfor t in range(int(input())):\n N = int(input())\n num_list = list(map(int, input().split()))\n ans = 0\n tmp = div(num_list)\n\n num_list.sort()\n n = num_list[N//2]\n\n print(f\"#{t + 1} {ans} {n}\")","sub_path":"SWEA/2019/190328/5204.py","file_name":"5204.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"651915576","text":"from sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy import create_engine\nimport simplejson as json\nimport codecs\n\nengine = create_engine(\"mysql+pymysql://allan:zPSsZYtmmjAJAcjR@localhost:3306/house\", max_overflow=5)\nBase = declarative_base()\ntb_name = \"20190525_21\"\n\n\nclass House(Base):\n __tablename__ = tb_name\n id = Column(Integer, primary_key=True)\n title = Column(String(64))\n link = Column(String(64))\n area = Column(String(64))\n location = Column(String(64))\n totalPrice = Column(String(64))\n unitPrice = Column(String(64))\n flood = Column(String(64))\n follow = Column(String(64))\n tag1 = Column(String(64))\n tag2 = Column(String(64))\n\n\ndef init_db():\n Base.metadata.create_all(engine)\n\n\ndef drop_db():\n Base.metadata.drop_all(engine)\n\n\ndef insert_data_many(insert_data):\n Session = sessionmaker(bind=engine)\n session = Session()\n session.add_all(insert_data)\n session.commit()\n\n\ndef insert_data_one_by_one(insert_data):\n Session = sessionmaker(bind=engine)\n session = Session()\n session.add(insert_data)\n session.commit()\n\n\ndef txt_to_json(name):\n # txt_name = name + \".txt\"\n # with open(txt_name, 'r', encoding='utf-8') as file:\n # data = file.read()\n #\n # data = \"[\" + data\n # data = data + \"]\"\n # data = data.replace(\",]\", \"]\")\n json_save_name = \"./data/\" + name + \"_hours.json\"\n # file = codecs.open(json_save_name, 'wb', 'utf-8')\n # file.write(data)\n\n insert_data = []\n with open(json_save_name, 'r', encoding='utf-8') as f:\n data_json = json.load(f)\n f.close()\n\n for house in data_json:\n insert_h = House()\n insert_h.title = house['title']\n insert_h.link = house['link']\n insert_h.area = house['area']\n insert_h.location = house['location']\n insert_h.totalPrice = house['totalPrice']\n insert_h.unitPrice = house['unitPrice']\n # insert_h.flood = house['flood']\n insert_h.follow = house['follow']\n # insert_h.tag1 = house['tag1']\n # insert_h.tag2 = house['tag2']\n insert_data.append(insert_h)\n\n insert_data_many(insert_data)\n\n\nif __name__ == '__main__':\n # def create_db():\n # hous_s = House()\n # hous_s.title = \"title\"\n # hous_s.area = \"area\"\n # insert_data_one_by_one(hous_s)\n init_db()\n # txt_to_json(tb_name)\n# data = read_json(\"./data/20190520_hours.json\")\n# insert_data(data)\n","sub_path":"house/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"180115718","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Aug 22 16:26:21 2018\nThe idea is to use level order traversal. We traverse\n both trees simultaneously and compare the data whenever we dequeue and item from queue.\n@author: anu\n\"\"\"\n\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\nclass Solution:\n def identical(self,A,B):\n q1=[]\n q2=[]\n if A==None and B==None: #if both trees are none\n return 1\n if (not A) or (not B): #if one of the trees is none\n return 0\n \n q1.append(A)\n q2.append(B)\n while len(q1)!=0 and len(q2)!=0:\n ptr1=q1.pop()\n ptr2=q2.pop()\n \n if ptr1.val != ptr2.val: #compare everytime the popped value\n return 0\n \n if ptr1.left!=None and ptr2.left!=None: \n q1.insert(0,ptr1.left)\n q2.insert(0,ptr2.left)\n \n elif ptr1.left or ptr2.left: #if one of the left child is empty(**necessary step)\n return 0\n \n if ptr1.right!=None and ptr2.right!=None:\n q1.insert(0,ptr1.right)\n q2.insert(0,ptr2.right) \n \n elif ptr1.right or ptr2.right: #if one of the right child is empty(**necessary check)\n return 0\n return 1\n \ns=Solution()\nroot1 = TreeNode(1)\nroot1.left = TreeNode(2)\nroot1.right = TreeNode(3)\nroot1.right.right=TreeNode(6)\nroot1.left.left = TreeNode(4)\nroot1.left.right = TreeNode(5)\nroot1.left.left.left=TreeNode(7) \n\nroot2 = TreeNode(1)\nroot2.left = TreeNode(2)\nroot2.right = TreeNode(3)\nroot2.right.right=TreeNode(6)\nroot2.left.left = TreeNode(4)\nroot2.left.right = TreeNode(5)\nroot2.left.left.left=TreeNode(7) \nprint(s.identical(root1,root2))\n \n ","sub_path":"Tree Datastructure/check Identical Binary tree.py","file_name":"check Identical Binary tree.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"101446681","text":"'''\ndef sum(n):\n if n==0:\n return 0\n else:\n return n+sum(n-1)\nprint(sum(3))\n'''\n\n'''\ndef f1(n):\n if n > 0:\n print(n)\n f1(n - 1)\n\n\nf1(5)\n'''\ndef f1(n):\n if n>0:\n return n+f1(n-1)\n else:\n return 0\nprint(f1(5))","sub_path":"python/func-recursion/study.py","file_name":"study.py","file_ext":"py","file_size_in_byte":264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"161391493","text":"#!/usr/bin/env python3\n\nimport tkinter as tk\nimport random\nimport os\n\n#variables\n\ni = 1\n\nanswers = list(range(11))\nanswers[0]=\"I have never seen a thin person drinking Diet Coke.\";\nanswers[1]=\"If Obama resigns from office NOW, thereby doing a great \\nservice to the country—I will give him free lifetime golf \\nat any one of my courses!\";\nanswers[2]=\"Robert I'm getting a lot of heat for saying you should dump \\nKristen- but I'm right. If you saw the Miss Universe girls \\nyou would reconsider.\";\nanswers[3]=\"Windmills are the greatest threat in the US to both bald and \\ngolden eagles. Media claims fictional 'global warming' is \\nworse\";\nanswers[4]=\"The concept of global warming was created by and for the Chinese \\nin order to make U.S. manufacturing non-competitive.\";\nanswers[5]=\"I will build a great wall – and nobody builds walls better than \\nme, believe me – and I’ll build them very inexpensively. I \\nwill build a great, great wall on our southern border, \\nand I will make Mexico pay for that wall. Mark my words.\";\nanswers[6]=\"I’ve said if Ivanka weren’t my daughter, perhaps I’d be dating her.\";\nanswers[7]=\"My fingers are long and beautiful, as, it has \\nbeen well documented, are various other parts of my body.\";\nanswers[8]=\"Look at those hands, are they small hands? \\nAnd, [Republican rival Marco Rubio] referred to my hands: \\n‘If they’re small, something else must be small.’ \\nI guarantee you there’s no problem. I guarantee.\";\nanswers[9]=\"I was down there, and I watched our police \\nand our firemen, down on 7-Eleven, down at the World Trade Center, \\nright after it came down\";\nanswers[10]=\"Grab them by the pussy. You can do anything.\";\n\n#functions\n\ndef meme():\n\tparent = tk.Toplevel()\n\timageFile01 = tk.PhotoImage(file=\"bin/donald/micha_meme\")\n\tchild = tk.Label(parent, image=imageFile01)\n\tchild.image = imageFile01\n\tchild.grid(row=0, column=0)\n\ndef callbackDonald(event=None):\n\n\tglobal i\n\n\tplaintextfile = open(\"log.dat\", \"a\")\n\tentryText = donaldEntry.get()\n\tdonaldEntry.delete(0, \"end\")\n\t#answers-start\n\n\trandomNumber = random.randint(0, 11)\n\n\tif randomNumber == 11:\n\t\tmeme()\n\t\twriteContentDonald = \"Donald Trump: Whoever this Micha guy is, he seems pretty cool!\"\n\t\ti=i+1\n\telif \"tschuess\" in entryText:\n\t\twriteContentDonald = \"Donald Trump: Time to drain the swamp!\"\n\t\ti=i+1\n\telse:\n\t\tanswer = \"Donald Trump: \"+answers[randomNumber]\n\t\twriteContentDonald = answer\n\t\tif randomNumber in [1,2,3,9]:\n\t\t\ti=i+3\n\t\telif randomNumber in [5,8]:\n\t\t\ti=i+4\n\t\telif randomNumber in [4,7]:\n\t\t\ti=i+2\n\t\telse:\n\t\t\ti=i+1\n\n\t#answers-end\n\twriteContentYou = \"\\nDu: \"+entryText\n\tplaintextfile.write(writeContentYou)\n\ti=i+1\n\twriteContentMe = \"\\n\"+writeContentDonald\n\tplaintextfile.write(writeContentMe)\n\tplaintextfile.close()\n\tplaintextfile = open(\"log.dat\", \"r\")\n\tplaintext = plaintextfile.read()\n\tplaintextfile.close()\n\ttext.set(plaintext)\n\n\twhile i > 40:\n\t\ttry:\n\t\t\tplaintextfile = open(\"log.dat\", \"w\")\n\t\t\tplaintextfile.write(writeContentMe)\n\t\t\tplaintextfile.close()\n\t\t\ti=1\n\t\t\tbreak\n\t\texcept OSError:\n\t\t\ti=41\n\n#init gui\n\ndonald = tk.Tk()\ndonald.title(\"Can't Stump The Trump\")\n\n#gui design\n\ntext = tk.StringVar()\nplaintextfile = open(\"log.dat\", \"w\")\nplaintextfile.write(\"Donald Trump: Make America Great Again\")\nplaintextfile.close()\nplaintextfile = open(\"log.dat\", \"r\")\nplaintext = plaintextfile.read()\ntext.set(plaintext)\nplaintextfile.close()\navdonald = tk.PhotoImage(file=\"avatars/randoms/donald\")\navatar = tk.Label(donald, image=avdonald)\navatar.image = avdonald\navatar.grid(row=0, column=0, rowspan=5)\ndonaldChat = tk.Label(donald, textvariable=text, anchor=\"c\", relief=\"sunken\", width=80, height=40)\ndonaldChat.grid(row=1, column=1, rowspan=10, columnspan=4)\ndonaldEntry = tk.Entry(donald, width=70)\ndonaldEntry.grid(row=11, column=1, rowspan=2, columnspan=3)\ndonaldSend = tk.Button(donald, text=\"Send!\", command=callbackDonald, width=10)\ndonaldSend.grid(row=11, column=4, rowspan=2, columnspan=1)\n\ndonald.bind(\"\", callbackDonald)\ndonald.mainloop()\n","sub_path":"pyBots/randoms/donald.py","file_name":"donald.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"14225422","text":"def apply_iscsi_settings(self):\n 'Update the iSCSI target alias and CHAP settings'\n update = False\n target = self.target\n body = dict()\n if ((self.name is not None) and (self.name != target['alias'])):\n update = True\n body['alias'] = self.name\n if (self.chap_secret is not None):\n update = True\n body.update(dict(enableChapAuthentication=True, chapSecret=self.chap_secret))\n elif target['chap']:\n update = True\n body.update(dict(enableChapAuthentication=False))\n if (update and (not self.check_mode)):\n try:\n request((self.url + ('storage-systems/%s/iscsi/target-settings' % self.ssid)), method='POST', data=json.dumps(body), headers=HEADERS, **self.creds)\n except Exception as err:\n self.module.fail_json(msg=('Failed to update the iSCSI target settings. Array Id [%s]. Error [%s].' % (self.ssid, to_native(err))))\n return update","sub_path":"Data Set/bug-fixing-5/eee51486fd3327c1cfa21dc7566afa7debab5ebf--fix.py","file_name":"eee51486fd3327c1cfa21dc7566afa7debab5ebf--fix.py","file_ext":"py","file_size_in_byte":935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"573396038","text":"from os import walk\nfrom os import path\nfrom os import makedirs\nimport numpy as np\nimport librosa\n\ndef main(dataset_main_path, name_class_combo, fs, snippet_length, snippet_hop, block_size, hop_size, mel_length):\n print('%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') \n j = 0\n for a_combo in name_class_combo:\n a_class = a_combo[1]\n temp_folder = a_class + \"_\" + str(snippet_length) + \"ms_\" + str(snippet_hop) + \"ms\"\n snippet_path = dataset_main_path + \"/\" + temp_folder + \"/\" + a_combo[0]\n \n save_path = snippet_path + \"_MelSpectrogram_block\" + str(block_size) + \"_hop\" + str(hop_size) + \"_mel\" + str(mel_length) \n dir = path.dirname(save_path + \"/dummy.aaa\")\n if not path.exists(dir):\n makedirs(dir)\n \n snippet_names = []\n for (dirpath, dirnames, filenames) in walk(snippet_path):\n snippet_names.extend(filenames)\n break\n \n for a_snippet_name in snippet_names:\n a_snippet_path = snippet_path + \"/\" + a_snippet_name\n x, _ = librosa.load(a_snippet_path, sr = fs)\n S = librosa.feature.melspectrogram(y = x, sr = fs, n_fft = block_size, hop_length = hop_size, n_mels = mel_length)\n #S = S / S.max()\n np.savetxt(save_path +\"/\" + a_snippet_name[0:-4] + '.txt', S, fmt='%10.5f')\n \n j = j + 1\n print(a_combo[0], \"'s spectrogram is done\", j)\n\n\n\n","sub_path":"Create Input/getMelSpectrogram.py","file_name":"getMelSpectrogram.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"363780678","text":"#!/usr/bin/env python\n# coding=utf-8\n'''\n\tconfig \n'''\nfrom __future__ import absolute_import, unicode_literals\nimport os\nimport yaml\nimport logging\nPROJECT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir))\n# get config info\nwith open(os.path.join(PROJECT_DIR, 'config', 'config.yaml')) as f:\n config = yaml.load(f)\n# log\ndef logger_init(name, filepath, level=logging.DEBUG):\n logger = logging.getLogger(name)\n logger.setLevel(level)\n ch = logging.FileHandler(filepath)\n formatter = logging.Formatter('[%(asctime)s] [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S')\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n return logger\nlogger = logger_init('access', os.path.join(PROJECT_DIR, 'log', 'access.log'),config['logger']['level'])\n# websocket server connect\nconnection = config['connection']\n# currency\ncurrency = config['currency']\n\n\n\n\n","sub_path":"config/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"413101189","text":"# coding=utf-8\n# Copyright 2015 Pants project contributors (see CONTRIBUTORS.md).\n# Licensed under the Apache License, Version 2.0 (see LICENSE).\n\nfrom __future__ import (absolute_import, division, generators, nested_scopes, print_function,\n unicode_literals, with_statement)\n\nimport os\nimport tarfile\nimport unittest\nfrom contextlib import contextmanager\n\nfrom pants.base.project_tree import Dir, Link\nfrom pants.engine.fs import FilesContent, PathGlobs, Snapshot, create_fs_rules\nfrom pants.util.meta import AbstractClass\nfrom pants_test.engine.scheduler_test_base import SchedulerTestBase\n\n\nclass DirectoryListing(object):\n \"\"\"TODO: See #4027.\"\"\"\n\n\nclass ReadLink(object):\n \"\"\"TODO: See #4027.\"\"\"\n\n\nclass FSTest(unittest.TestCase, SchedulerTestBase, AbstractClass):\n\n _original_src = os.path.join(os.path.dirname(__file__), 'examples/fs_test/fs_test.tar')\n\n @contextmanager\n def mk_project_tree(self, ignore_patterns=None):\n \"\"\"Construct a ProjectTree for the given src path.\"\"\"\n project_tree = self.mk_fs_tree(ignore_patterns=ignore_patterns)\n with tarfile.open(self._original_src) as tar:\n tar.extractall(project_tree.build_root)\n yield project_tree\n\n @staticmethod\n def specs(relative_to, *filespecs):\n return PathGlobs.create(relative_to, include=filespecs)\n\n def assert_walk_dirs(self, filespecs, paths, ignore_patterns=None):\n self.assert_walk_snapshot('dirs', filespecs, paths, ignore_patterns=ignore_patterns)\n\n def assert_walk_files(self, filespecs, paths, ignore_patterns=None):\n self.assert_walk_snapshot('files', filespecs, paths, ignore_patterns=ignore_patterns)\n\n def assert_walk_snapshot(self, field, filespecs, paths, ignore_patterns=None):\n with self.mk_project_tree(ignore_patterns=ignore_patterns) as project_tree:\n scheduler = self.mk_scheduler(rules=create_fs_rules(), project_tree=project_tree)\n result = self.execute(scheduler, Snapshot, self.specs('', *filespecs))[0]\n self.assertEquals(sorted([p.path for p in getattr(result, field)]), sorted(paths))\n\n def assert_content(self, filespecs, expected_content):\n with self.mk_project_tree() as project_tree:\n scheduler = self.mk_scheduler(rules=create_fs_rules(), project_tree=project_tree)\n result = self.execute(scheduler, FilesContent, self.specs('', *filespecs))[0]\n actual_content = {f.path: f.content for f in result.dependencies}\n self.assertEquals(expected_content, actual_content)\n\n def assert_digest(self, filespecs, expected_files):\n with self.mk_project_tree() as project_tree:\n scheduler = self.mk_scheduler(rules=create_fs_rules(), project_tree=project_tree)\n result = self.execute(scheduler, Snapshot, self.specs('', *filespecs))[0]\n # Confirm all expected files were digested.\n self.assertEquals(set(expected_files), set(f.path for f in result.files))\n self.assertTrue(result.fingerprint is not None)\n\n def assert_fsnodes(self, filespecs, subject_product_pairs):\n with self.mk_project_tree() as project_tree:\n scheduler = self.mk_scheduler(rules=create_fs_rules(), project_tree=project_tree)\n request = self.execute_request(scheduler, Snapshot, self.specs('', *filespecs))\n\n # Validate that FilesystemNodes for exactly the given subjects are reachable under this\n # request.\n fs_nodes = [n for n, _ in scheduler.product_graph.walk(roots=request.roots)\n if type(n) is \"TODO: need a new way to filter for FS intrinsics\"]\n self.assertEquals(set((n.subject, n.product) for n in fs_nodes), set(subject_product_pairs))\n\n def test_walk_literal(self):\n self.assert_walk_files(['4.txt'], ['4.txt'])\n self.assert_walk_files(['a/b/1.txt', 'a/b/2'], ['a/b/1.txt', 'a/b/2'])\n self.assert_walk_files(['c.ln/2'], ['c.ln/2'])\n self.assert_walk_files(['d.ln/b/1.txt'], ['d.ln/b/1.txt'])\n self.assert_walk_files(['a/3.txt'], ['a/3.txt'])\n self.assert_walk_files(['z.txt'], [])\n\n def test_walk_literal_directory(self):\n self.assert_walk_dirs(['c.ln'], ['c.ln'])\n self.assert_walk_dirs(['a'], ['a'])\n self.assert_walk_dirs(['a/b'], ['a/b'])\n self.assert_walk_dirs(['z'], [])\n self.assert_walk_dirs(['4.txt', 'a/3.txt'], [])\n\n def test_walk_siblings(self):\n self.assert_walk_files(['*.txt'], ['4.txt'])\n self.assert_walk_files(['a/b/*.txt'], ['a/b/1.txt'])\n self.assert_walk_files(['c.ln/*.txt'], ['c.ln/1.txt'])\n self.assert_walk_files(['a/b/*'], ['a/b/1.txt', 'a/b/2'])\n self.assert_walk_files(['*/0.txt'], [])\n\n def test_walk_recursive(self):\n self.assert_walk_files(['**/*.txt.ln'], ['a/4.txt.ln', 'd.ln/4.txt.ln'])\n self.assert_walk_files(['**/*.txt'], ['4.txt',\n 'a/3.txt',\n 'a/b/1.txt',\n 'c.ln/1.txt',\n 'd.ln/3.txt',\n 'd.ln/b/1.txt'])\n self.assert_walk_files(['**/*.txt'], ['a/3.txt',\n 'a/b/1.txt',\n 'c.ln/1.txt',\n 'd.ln/3.txt',\n 'd.ln/b/1.txt',\n '4.txt'])\n self.assert_walk_files(['**/3.t*t'], ['a/3.txt', 'd.ln/3.txt'])\n self.assert_walk_files(['**/*.zzz'], [])\n\n def test_walk_single_star(self):\n self.assert_walk_files(['*'], ['4.txt'])\n\n def test_walk_parent_link(self):\n self.assert_walk_files(['c.ln/../3.txt'], ['c.ln/../3.txt'])\n\n def test_walk_recursive_all(self):\n self.assert_walk_files(['**'], ['4.txt',\n 'a/3.txt',\n 'a/4.txt.ln',\n 'a/b/1.txt',\n 'a/b/2',\n 'c.ln/1.txt',\n 'c.ln/2',\n 'd.ln/3.txt',\n 'd.ln/4.txt.ln',\n 'd.ln/b/1.txt',\n 'd.ln/b/2'])\n\n def test_walk_ignore(self):\n # Ignore '*.ln' suffixed items at the root.\n self.assert_walk_files(['**'],\n ['4.txt',\n 'a/3.txt',\n 'a/4.txt.ln',\n 'a/b/1.txt',\n 'a/b/2',],\n ignore_patterns=['/*.ln'])\n # Whitelist one entry.\n self.assert_walk_files(['**'],\n ['4.txt',\n 'a/3.txt',\n 'a/4.txt.ln',\n 'a/b/1.txt',\n 'a/b/2',\n 'c.ln/1.txt',\n 'c.ln/2',],\n ignore_patterns=['/*.ln', '!c.ln'])\n\n def test_walk_recursive_trailing_doublestar(self):\n self.assert_walk_files(['a/**'], ['a/3.txt',\n 'a/4.txt.ln',\n 'a/b/1.txt',\n 'a/b/2'])\n self.assert_walk_files(['d.ln/**'], ['d.ln/3.txt',\n 'd.ln/4.txt.ln',\n 'd.ln/b/1.txt',\n 'd.ln/b/2'])\n self.assert_walk_dirs(['a/**'], ['a/b'])\n\n def test_walk_recursive_slash_doublestar_slash(self):\n self.assert_walk_files(['a/**/3.txt'], ['a/3.txt'])\n self.assert_walk_files(['a/**/b/1.txt'], ['a/b/1.txt'])\n self.assert_walk_files(['a/**/2'], ['a/b/2'])\n\n def test_walk_recursive_directory(self):\n self.assert_walk_dirs(['*'], ['a', 'c.ln', 'd.ln'])\n self.assert_walk_dirs(['*/*'], ['a/b', 'd.ln/b'])\n self.assert_walk_dirs(['**/*'], ['a', 'c.ln', 'd.ln', 'a/b', 'd.ln/b'])\n self.assert_walk_dirs(['*/*/*'], [])\n\n def test_remove_duplicates(self):\n self.assert_walk_files(['*', '**'], ['4.txt',\n 'a/3.txt',\n 'a/4.txt.ln',\n 'a/b/1.txt',\n 'a/b/2',\n 'c.ln/1.txt',\n 'c.ln/2',\n 'd.ln/3.txt',\n 'd.ln/4.txt.ln',\n 'd.ln/b/1.txt',\n 'd.ln/b/2'])\n self.assert_walk_files(['**/*.txt', 'a/b/1.txt', '4.txt'], ['4.txt',\n 'a/3.txt',\n 'c.ln/1.txt',\n 'd.ln/3.txt',\n 'a/b/1.txt',\n 'd.ln/b/1.txt'])\n self.assert_walk_dirs(['*', '**'], ['a', 'c.ln', 'd.ln', 'a/b', 'd.ln/b'])\n\n def test_files_content_literal(self):\n self.assert_content(['4.txt', 'a/4.txt.ln'], {'4.txt': 'four\\n', 'a/4.txt.ln': 'four\\n'})\n\n def test_files_content_directory(self):\n with self.assertRaises(Exception):\n self.assert_content(['a/b/'], {'a/b/': 'nope\\n'})\n with self.assertRaises(Exception):\n self.assert_content(['a/b'], {'a/b': 'nope\\n'})\n\n def test_files_content_symlink(self):\n self.assert_content(['c.ln/../3.txt'], {'c.ln/../3.txt': 'three\\n'})\n\n def test_files_digest_literal(self):\n self.assert_digest(['a/3.txt', '4.txt'], ['a/3.txt', '4.txt'])\n\n @unittest.skip('Skipped to expedite landing #3821; see: #4027.')\n def test_nodes_file(self):\n self.assert_fsnodes(['4.txt'], [\n (Dir(''), DirectoryListing),\n ])\n\n @unittest.skip('Skipped to expedite landing #3821; see: #4027.')\n def test_nodes_symlink_file(self):\n self.assert_fsnodes(['c.ln/2'], [\n (Dir(''), DirectoryListing),\n (Link('c.ln'), ReadLink),\n (Dir('a'), DirectoryListing),\n (Dir('a/b'), DirectoryListing),\n ])\n self.assert_fsnodes(['d.ln/b/1.txt'], [\n (Dir(''), DirectoryListing),\n (Link('d.ln'), ReadLink),\n (Dir('a'), DirectoryListing),\n (Dir('a/b'), DirectoryListing),\n ])\n\n @unittest.skip('Skipped to expedite landing #3821; see: #4027.')\n def test_nodes_symlink_globbed_dir(self):\n self.assert_fsnodes(['*/2'], [\n # Scandir for the root.\n (Dir(''), DirectoryListing),\n # Read links to determine whether they're actually directories.\n (Link('c.ln'), ReadLink),\n (Link('d.ln'), ReadLink),\n # Scan second level destinations: `a/b` is matched via `c.ln`.\n (Dir('a'), DirectoryListing),\n (Dir('a/b'), DirectoryListing),\n ])\n\n @unittest.skip('Skipped to expedite landing #3821; see: #4027.')\n def test_nodes_symlink_globbed_file(self):\n self.assert_fsnodes(['d.ln/b/*.txt'], [\n # NB: Needs to scandir every Dir on the way down to track whether\n # it is traversing a symlink.\n (Dir(''), DirectoryListing),\n # Traverse one symlink.\n (Link('d.ln'), ReadLink),\n (Dir('a'), DirectoryListing),\n (Dir('a/b'), DirectoryListing),\n ])\n","sub_path":"tests/python/pants_test/engine/test_fs.py","file_name":"test_fs.py","file_ext":"py","file_size_in_byte":11393,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"22170452","text":"\"\"\" Calculate information contents and parameters of a BSC.\n\nThis program is intended for used in course, Principle of Information and Coding Theory.\nUsage details can be displayed by passing command line argument `--help`.\n\nNote: All information contents calculated are bit-wise, i.e. in (information-)bit per (binary-)bit.\n\nrevise:\n通过修改教师提供的代码\n屏蔽掉其他的输出,只保留\n 输入消息序列的信息熵(信息比特/二元消息)\n 输出消息序列的信息熵(信息比特/二元消息)\n 平均互信息量(信息比特/二元消息)\n这三个输出即可\n\n\"\"\"\n\n# Standard library\nimport sys\nimport argparse\nimport time\nimport csv\nfrom pathlib import Path\n\n# Non-standard library\nimport numpy as np\n\n__author__ = \"Guo, Jiangling\"\n__email__ = \"tguojiangling@jnu.edu.cn\"\n__version__ = \"2020101.1549\"\n\n\ndef main():\n \"\"\"Entry point of this program.\"\"\"\n args = parse_sys_args()\n workflow(args.X, args.Y, args.OUTPUT, verbose=args.verbose)\n\n###\n# The main work flow\n###\n\n\ndef workflow(x_file_name, y_file_name, out_file_name, verbose=False):\n \"\"\"The main workflow.\"\"\"\n\n # Number of binary bits in one symbol.\n N = 8\n\n x = read_file_as_bytes(x_file_name)\n y = read_file_as_bytes(y_file_name)\n\n ## --- Core computation: begin\n start_time = time.time()\n\n joint_p_xy = calc_joint_p_xy(x, y)\n H_x = calc_H_p(calc_p_x(joint_p_xy)) / N\n H_y = calc_H_p(calc_p_y(joint_p_xy)) / N\n joint_H_xy = calc_joint_H_xy(joint_p_xy) / N\n cond_H_xy = calc_cond_H_xy(joint_p_xy) / N\n cond_H_yx = calc_cond_H_yx(joint_p_xy) / N\n I_xy = H_x - cond_H_xy\n\n # Calculate error probability of the BSC.\n err = np.bitwise_xor(x, y)\n p_BSC = count_binary_1(err) / (x.size * N)\n\n elapsed_time = time.time() - start_time\n ## --- Core computation: end\n\n if verbose:\n p_x0 = 1 - (count_binary_1(x) / (x.size * N))\n p_y0 = 1 - (count_binary_1(y) / (y.size * N))\n print('Computation Time: %.5f sec' % (elapsed_time))\n print(' BSC input (X): %d bytes, \"%s\"' % (x.size, x_file_name))\n print(' BSC output (Y): %d bytes, \"%s\"' % (y.size, y_file_name))\n print(' H(X) =', H_x, 'bit/bit, p(x=0) =', p_x0)\n print(' H(Y) =', H_y, 'bit/bit, p(y=0) =', p_y0)\n print(' H(XY) =', joint_H_xy, 'bit/2-bit')\n print('H(X|Y) =', cond_H_xy, 'bit/bit')\n print('H(Y|X) =', cond_H_yx, 'bit/bit')\n print('I(X;Y) =', I_xy, 'bit/bit')\n print('(BSC)p =', p_BSC)\n\n write_results(out_file_name, [\n x_file_name, y_file_name, H_x, H_y, I_xy])\n\n return H_x\n\n###\n# Computation functions\n###\n\n\ndef calc_joint_p_xy(x, y):\n (joint_p_xy, xedges, yedges) = np.histogram2d(\n x, y, bins=range(257), density=True)\n return joint_p_xy\n\n\ndef calc_p_y(joint_p_xy):\n \"\"\"Calculate p(y).\"\"\"\n return np.sum(joint_p_xy, axis=0)\n\n\ndef calc_p_x(joint_p_xy):\n \"\"\"Calculate p(x).\"\"\"\n return np.sum(joint_p_xy, axis=1)\n\n\ndef calc_I_p(P):\n \"\"\"Calculate self-information from given probability distribution.\"\"\"\n P = replace_0_with_eps(P)\n return -np.log2(P)\n\n\ndef calc_H_p(P):\n \"\"\"Compute entropy from given probability distribution.\"\"\"\n return np.sum(P*calc_I_p(P))\n\n\ndef calc_joint_H_xy(joint_p_xy):\n \"\"\"Calculate joint entropy H(XY).\"\"\"\n return np.sum(joint_p_xy * calc_I_p(joint_p_xy))\n\n\ndef calc_cond_H_xy(joint_p_xy):\n \"\"\"Calculate conditional entropy H(X|Y).\"\"\"\n p_y = calc_p_y(joint_p_xy)\n p_y = replace_0_with_eps(p_y)\n\n # Extend p_y vertically to 255 rows:\n # p_y_matrix =\n # [ [ p(y_0), p(y_1), ..., p(y_255) ],\n # [ p(y_0), p(y_1), ..., p(y_255) ],\n # ...\n # [ p(y_0), p(y_1), ..., p(y_255) ] ]\n p_y_matrix = np.repeat([p_y], joint_p_xy.shape[0], axis=0)\n\n return np.sum(joint_p_xy * calc_I_p(joint_p_xy / p_y_matrix))\n\n\ndef calc_cond_H_yx(joint_p_xy):\n \"\"\"Calculate conditional entropy H(Y|X).\"\"\"\n p_x = calc_p_x(joint_p_xy)\n p_x = replace_0_with_eps(p_x)\n\n # Transpose p_x and extend it horizontally to 255 columns:\n # p_x_matrix =\n # [ [ p(x_0), p(x_0), ..., p(x_0) ],\n # [ p(x_1), p(x_1), ..., p(x_1) ],\n # ...\n # [ p(x_255), p(x_255), ..., p(x_255) ] ]\n p_x_matrix = np.repeat(np.array([p_x]).T, joint_p_xy.shape[1], axis=1)\n\n return np.sum(joint_p_xy * calc_I_p(joint_p_xy / p_x_matrix))\n\n\ndef count_binary_1(x):\n # Create a Look-Up Table for number of binary '1' in each byte.\n LUT_num_of_1 = np.array([bin(byte).count(\"1\") for byte in range(256)])\n num_of_1 = np.sum(LUT_num_of_1[x])\n return num_of_1\n\n\ndef replace_0_with_eps(P):\n \"\"\"Replace zeros with the smallest numbers.\"\"\"\n # For probabilities, it makes virtually no difference, but for computation it can prevent some undesired results such as 0*log2(0)=nan.\n return np.where(P == 0, np.spacing(1), P)\n\n###\n# I/O\n###\n\n\ndef read_file_as_bytes(in_file_name):\n \"\"\"Read a file as bytes and return a uint8 array.\"\"\"\n return np.fromfile(in_file_name, dtype='uint8')\n\n\ndef write_results(out_file_name, data):\n \"\"\"Write a row of data into a CSV file.\"\"\"\n\n # Write the header for all columns, if the output file does not exist.\n if not Path(out_file_name).is_file():\n with open(out_file_name, 'w', newline='') as out_file:\n csvwriter = csv.writer(out_file, quoting=csv.QUOTE_ALL)\n csvwriter.writerow(\n ['X', 'Y', 'H(X)', 'H(Y)', 'I(X;Y)'])\n\n with open(out_file_name, 'a', newline='') as out_file:\n csvwriter = csv.writer(out_file, quoting=csv.QUOTE_ALL)\n csvwriter.writerow(data)\n\n###\n# Parse command line arguments.\n###\n\n\ndef parse_sys_args():\n \"\"\"Parse command line arguments.\"\"\"\n\n # Define syntax for command line arguments.\n parser = argparse.ArgumentParser(\n description='Calculate information for BSC.')\n parser.add_argument('X', help='path to the channel input file')\n parser.add_argument('Y', help='path to the channel output file')\n parser.add_argument(\n 'OUTPUT', help='path to the output file to append results')\n parser.add_argument('-v', '--verbose', action='store_true',\n help='display detailed messages')\n\n if len(sys.argv) == 1:\n # No arguments specified.\n parser.print_help()\n parser.exit()\n else:\n args = parser.parse_args()\n\n return args\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ICT00-课程设计-文档/src/byteChannel_calc.py","file_name":"byteChannel_calc.py","file_ext":"py","file_size_in_byte":6418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"634352101","text":"def updateInventory(arr1, arr2):\n # All inventory must be accounted for or you're fired!\n arr = arr1 + arr2\n names = []\n toReturn = []\n\n # convert to dict\n items = {}\n for count, name in arr:\n if name in items:\n items[name] += count\n else: \n items[name] = count\n names.append(name)\n \n names.sort()\n\n # populate list to return\n for name in names:\n toReturn.append([items[name],name])\n \n # print toReturn\n for i in toReturn:\n print(i)\n\n\n\n\n\n\n# Example inventory lists\ncurInv = [\n [21, \"Bowling Ball\"],\n [2, \"Dirty Sock\"],\n [1, \"Hair Pin\"],\n [5, \"Microphone\"]\n];\n\nnewInv = [\n [2, \"Hair Pin\"],\n [3, \"Half-Eaten Apple\"],\n [67, \"Bowling Ball\"],\n [7, \"Toothpaste\"]\n];\n\nupdateInventory(curInv, newInv);\n","sub_path":"Inventory Update/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"607108475","text":"import sys\n\nfrom time import sleep\nfrom random import randint, choice\nfrom InstagramAPI import InstagramAPI\nfrom json_loader import get_account\nfrom json_loader import get_settings\nfrom db_manager import db_manager\n\nfrom instapy import InstaPy\n\nclass img_bot:\n def __init__(self):\n self.api = None\n self.pic_api = None\n self.db = None\n self.photo = None\n self.conn = None\n self.tags = None\n self.similar_acc = None\n\n def db_connect(self, subreddit):\n path, img_path = get_settings(subreddit)\n\n self.db = db_manager(path)\n self.db.table = subreddit\n self.conn = self.db.create_connect()\n self.db.count_row(self.conn)\n return img_path\n\n def start_session(self):\n \"\"\"\n Startes a session for each account in the json file. By creating a random schedule\n of functions to go through before moving to the next account.\"\"\"\n accounts = get_account()\n for account in accounts:\n delay = randint(7,18)\n print(\"\\nSessions started for user: \", account[\"username\"])\n subreddit = account[\"subreddit\"]\n # These are list which get replaced with every loop\n self.similar_acc = account[\"similar_accounts\"]\n self.tags = account[\"tags\"]\n self.pic_api = InstagramAPI(account[\"username\"], account[\"password\"])\n self.api = InstaPy(username=account[\"username\"], password=account[\"password\"], multi_logs=True)\n self.api.login()\n sleep(delay)\n img_path = self.db_connect(subreddit)\n self.schedule()\n sleep(delay)\n self.pic_api.login()\n self.post_image(img_path)\n self.end_session()\n print(\"Session done for user: \", account[\"username\"])\n\n def schedule(self):\n \"\"\"\n Chooses what functions to run.\n Sleeps for a minimum of 10 minutes to avoid bans.\n \"\"\"\n runs = randint(1,8)\n functions = [self.follow_people, self.interact_feed, self.unfollow_inactive,\n self.interact_tags]\n while runs > 0:\n try:\n choice(functions)()\n except:\n continue\n cooldown = randint(601, 1800)\n sleep(cooldown)\n runs -= 1\n return\n\n def follow_people(self):\n \"\"\"Follow people from a list of similar acccounts\"\"\"\n follow = randint(20, 150)\n pictures = randint(1,5)\n interact = randint(20, 45)\n print(\"Started following users. Total number to follow: \", follow)\n self.api.set_user_interact(amount=pictures, randomize=True, percentage=interact, media='Photo')\n self.api.follow_user_followers(self.similar_acc, amount=follow, randomize=True, sleep_delay=600)\n return\n\n\n def unfollow_inactive(self):\n \"\"\"Unfollows inactive users \"\"\"\n unfollow = randint(20, 150)\n print(\"Started unfollwing people. Total number to unfollow: \", unfollow)\n self.api.set_dont_unfollow_active_users(enabled=True, posts=3)\n self.api.unfollow_users(amount=unfollow, onlyNotFollowMe=True, sleep_delay=600)\n return\n\n def interact_feed(self):\n \"\"\"Like pictures in the feed and go into profiles and like pictures.\"\"\"\n likes = randint(2,35)\n interact = randint(20, 45)\n pictures = randint(1, 5)\n print(\"Started interacting with feed. Total likes to give: \", likes)\n self.api.set_user_interact(amount=pictures, randomize=True, percentage=interact, media='Photo')\n self.api.like_by_feed(amount=likes, randomize=True, interact=True)\n return\n\n def interact_tags(self):\n \"\"\"Interact with users based off tags.\"\"\"\n interact = randint(20, 45)\n tag_likes = randint(5, 30)\n pictures = randint(1, 5)\n print(\"Started looking at tags. Total likes to give: \", tag_likes)\n self.api.set_user_interact(amount=pictures, randomize=True, percentage=interact, media='Photo')\n self.api.like_by_tags(self.tags, amount=tag_likes, interact=True)\n return\n\n def end_session(self):\n self.db.close_connection(self.conn)\n self.pic_api.logout()\n self.api.end()\n\n def post_image(self, path):\n \"\"\"Post a random picture from database and delete row from the database \"\"\"\n\n row = self.db.get_random_row(self.conn, 1)\n\n id = row[0]\n img_path = path + row[1] + '0.jpg'\n description = row[2]\n # Try uploading the picture and if that's not working then just delete the image and db entry\n while True:\n try:\n self.pic_api.uploadPhoto(img_path, caption=description)\n print(\"Image uploaded: \" + row)\n print(\"Removing from database\" + str(id))\n self.db.delete_row(self.conn, row)\n break\n except:\n print(\"Problem detected. Deleted from database: \" + str(id), sys.exc_info()[0])\n self.db.delete_row(self.conn, row)\n return\n","sub_path":"img_bot.py","file_name":"img_bot.py","file_ext":"py","file_size_in_byte":5101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"614267716","text":"import pkg_resources\n\n__name__ = 'Another World'\n__description__ = 'This is another example plugin for Joseph'\n__version__ = 'V0.1'\n__license__ = 'LICENSE'\n__readme__ = 'README.md'\n__author__ = 'Niek Keijzer'\n__website__ = 'www.example.com'\n\n\ndef run():\n database = {}\n\n for ep in pkg_resources.iter_entry_points(group='database'):\n database.update({ep.name: ep.load()})\n\n try:\n db = database['database']()\n except ImportError:\n # Do something without permissions\n pass\n else:\n main()\n\n\ndef main():\n print('hello from another plugin')\n","sub_path":"joseph/plugins/another_world/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"332075512","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom odoo import api, models, osv,fields\r\n# from openerp import osv,fields,models, api\r\nfrom odoo.tools.translate import _\r\nfrom datetime import datetime\r\n\r\n\r\nclass model_contract(models.Model):\r\n\r\n _name= \"hr.model.contract\"\r\n _description=u\"Modèle de contrat\"\r\n\r\n\r\n @api.onchange('hr_convention_id')\r\n def on_change_convention_id(self):\r\n if self.hr_convention_id :\r\n return {'domain':{'hr_secteur_id':[('hr_convention_id','=',self.hr_convention_id.id)]}}\r\n else :\r\n return {'domain':{'hr_secteur_id':[('hr_convention_id','=',False)]}}\r\n\r\n\r\n @api.onchange('hr_secteur_id')\r\n def on_change_secteur_id(self):\r\n if self.hr_secteur_id :\r\n return {'domain':{'categorie_salariale':[('hr_secteur_activite_id','=', self.hr_secteur_id.id)]}}\r\n else :\r\n return {'domain':{'categorie_salariale':[('hr_secteur_activite_id','=',False)]}}\r\n \r\n\r\n\r\n @api.onchange(\"categorie_salariale\")\r\n def change_categorie(self):\r\n res = {'value':{\r\n 'salaire_base':0,\r\n }\r\n }\r\n if self.categorie_salariale and self.categorie_salariale.salaire_base:\r\n self.salaire_base= self.categorie_salariale.salaire_base\r\n else :\r\n self.salaire_base= 0\r\n\r\n \r\n\r\n name= fields.Char(\"Designation\",size=128,required=True)\r\n salaire_base= fields.Integer(\"Salaire de base\",required=True)\r\n prime_ids= fields.One2many(\"hr.payroll.prime.montant\",\"model_contract_id\",\"Primes\")\r\n categorie_salariale= fields.Many2one(\"hr.categorie.salariale\",\"Categorie salariale\",required=True,\r\n domain=\"[('hr_secteur_activite_id', '=', secteur_activite_id)]\")\r\n titre_poste= fields.Many2one(\"hr.job\",\"Titre du Poste\",required=True)\r\n type_contract= fields.Many2one(\"hr.contract.type\",\"Type de conntract\",required=True)\r\n structure_salariale= fields.Many2one('hr.payroll.structure',\"Structure salariale\",required=True)\r\n convention_id= fields.Many2one(\"hr.convention\",\"Convention\",required=True)\r\n secteur_activite_id= fields.Many2one(\"hr.secteur.activite\",\"Secteur d'activité\",required=True)\r\n\r\n\r\nclass contract_generate(models.Model):\r\n\r\n api.multi\r\n def generate_contract(self):\r\n contract_obj = self.env[\"hr.contract\"]\r\n prime_obj= self.env['hr.payroll.prime.montant']\r\n for employee in self.employee_ids:\r\n vals={\r\n 'name': \"Contract %s\"%employee.name,\r\n \"wage\": self.model_contract_id.salaire_base,\r\n \"employee_id\":employee.id,\r\n \"sursalaire\": 0,\r\n \"categorie_salariale_id\": self.model_contract_id.categorie_salariale.id,\r\n 'job_id': self.model_contract_id.titre_poste.id,\r\n 'struct_id': self.model_contract_id.structure_salariale.id,\r\n 'hr_convention_id': self.model_contract_id.convention_id.id,\r\n 'hr_secteur_id': self.model_contract_id.secteur_activite_id.id,\r\n 'type_id': self.model_contract_id.type_contract.id,\r\n }\r\n\r\n contract = contract_obj.create(vals)\r\n for prime in self.model_contract_id.prime_ids :\r\n prime_values={\r\n 'prime_id':prime.prime_id.id,\r\n 'montant_prime':prime.montant_prime,\r\n 'contract_id':contract.id,\r\n }\r\n prime_montant_id = prime_obj.create(prime_values)\r\n \r\n _name=\"hr.contract.generate\"\r\n\r\n name= fields.Char(\"Name\",size=128,required=True)\r\n model_contract_id= fields.Many2one(\"hr.model.contract\",'Model',required=True)\r\n date_generate= fields.Datetime(\"Date de génération\")\r\n employee_ids= fields.Many2many(\"hr.employee\",\"hr_model_contract_rel\",\"hr_model_contract_id\",\"employee_id\",\"Employees\")\r\n\r\n\r\nclass hr_payroll_prime_montant(models.Model):\r\n _inherit = 'hr.payroll.prime.montant' \r\n _name=\"hr.payroll.prime.montant\"\r\n\r\n\r\n model_contract_id= fields.Many2one(\"hr.model.contract\",\"Modèle de contrat\")\r\n","sub_path":"hr_contract_model/models/hr_contract_model.py","file_name":"hr_contract_model.py","file_ext":"py","file_size_in_byte":4124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"407014289","text":"import vigenereCipher, vigenereHacker, random\n# this function takes a given string and strips it of all non letter chars\n# also coverts all lower case letters to upper case\ndef alpha_only(plaintext):\n final = ''\n for char in plaintext:\n if char.isalpha() == True:\n final += char.upper()\n return final\n\n#this function insert was retrived from:\n#https://stackoverflow.com/questions/4022827/insert-some-string-into-given-string-at-given-index-in-python\n#by Sim Mak\n# this function inserts a sting in the middle of another given sting at a\n# certian position\ndef insert (source_str, insert_str, pos):\n return source_str[:pos]+insert_str+source_str[pos:]\n\ndef antiKasiski(key, plaintext):\n alphabet = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n plaintext = alpha_only(plaintext)\n encrypted = vigenereCipher.encryptMessage(key, plaintext)\n\n # get the repeating sequnces\n a = vigenereHacker.findRepeatSequencesSpacings(encrypted)\n\n #saving this string for later to be used in insert function\n final = encrypted\n\n#this loop goes through the repeated sequnces and for every time in the string it\n# appers that isnt the first time it will insert a random letter between it\n for key in a:\n length = len(encrypted) - len(key)\n x = len(key)\n count =0\n for i in range(0, length):\n cluster = encrypted[i:i+x]\n if cluster == key:\n if count == 0:\n count +=1\n else:\n # puts rand letter into the string\n int = random.randint(0, 25)\n letter = alphabet[int]\n\n final = insert(final, letter, i)\n return final\n\n#print(antiKasiski('WICK', 'THOSEPOLICEOFFICERSOFFEREDHERARIDEHOMETHEYTELLTHEMAJOKETHOSEBARBERSLENTHERALOTOFMONEY'))\n","sub_path":"02_Encryption/07_Vigenere_Hacker/preproc.py","file_name":"preproc.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"170995135","text":"import sys\n\nimport subprocess\n\nimport os\nimport requests\nimport json\n\nconf = None\n\n\ndef load_conf(file_path):\n global conf\n try:\n with open(file_path) as f:\n conf = json.loads(f.read())\n except Exception as e:\n print(str(e))\n print('Not found %s, or configuration file not in JSON format' % file_path)\n sys.exit(1)\n\n\ndef generate_command(trail):\n o_trail = json.loads(trail)\n cmd = []\n for param in o_trail:\n cmd.append('--%s=%s' % (param['name'], param['value']))\n return cmd\n\n\ndef parse_metric(metric):\n return dict({\n \"value\": metric\n })\n\n\ndef run():\n argv = sys.argv\n conf_path = argv[1]\n trail_id = argv[2]\n trail = argv[3]\n\n load_conf(conf_path)\n print(conf)\n\n # call the objective function and get the output\n cmd = generate_command(trail)\n\n parameter_list = [conf['model'][\"python-version\"], conf['model'][\"entry\"]] + cmd\n # parameter_list = parameter_list + values\n os.chdir(conf['model']['project-root'])\n proc = subprocess.Popen(parameter_list, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n output = str(proc.communicate()[0])\n\n # format the output\n start = 2\n end = len(output)-3\n metric = float(output[start:end])\n metric = parse_metric(metric)\n\n # print(metric)\n\n response_url = conf['response_url']\n r = requests.post(\n url=response_url,\n json={\n \"trail_id\": trail_id,\n \"metric\": metric\n }\n )\n # print(r.text)\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"advisorclient/run_trail.py","file_name":"run_trail.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"118391834","text":"name = input('Please input your name:')\nscore = input('Please input python score:')\n\nprint(type(name))\nprint(type(score))\n\nnumstr = input(\"Please your fumela:\")\nnumber = eval(numstr)\nprint(\"result: %6.3f\" % number)\n\nn1, n2, n3 = eval(input(\"請輸入3個數字(需要用逗號分隔):\"))\n\naverage = (n1 + n2 + n3)/3\nprint(\"3個數字的平均: %6.2f\" % average)\n","sub_path":"20191120/s3-1.py","file_name":"s3-1.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"549101791","text":"import tweepy\nimport pandas as pd\n\napi_key = pd.read_pickle('auth/twitter_auth_key.pickle')\n\nauth = tweepy.OAuthHandler(api_key['consumer_key'], api_key['consumer_secret'])\nauth.set_access_token(api_key['access_token'], api_key['access_secret'])\n\napi = tweepy.API(auth)\n\ntweets = tweepy.Cursor(api.search,q=\"Amazon\",count=100,result_type=\"popular\",include_entities=True,lang=\"en\").items()\nnew_ids = set([tweet.id for tweet in tweets])\n# print(len(new_ids))\n# for id in new_ids:\n# print(id,type(id))\n\nold_ids = set(pd.Series.from_csv('hist_tweets.csv',sep=';').tolist())\n\nall_ids = old_ids.union(new_ids)\n\nprint(len(all_ids))\n\nall_ids = pd.Series(list(all_ids))\nall_ids.to_csv('hist_tweets.csv',sep=';')\n","sub_path":"twitter_filter/tweet_tk/twitter_hist.py","file_name":"twitter_hist.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"571423963","text":"#!/usr/bin/env python3\n\"\"\"\nbuilds a modified version of the LeNet-5 architecture using tensorflow.\n\"\"\"\n\nimport tensorflow as tf\n\n\n# https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D\n# tf.keras.layers.Conv2D(\n# filters,\n# kernel_size,\n# strides=(1, 1),\n# padding='valid',\n# data_format=None,\n# dilation_rate=(1, 1),\n# activation=None,\n# use_bias=True,\n# kernel_initializer='glorot_uniform',\n# bias_initializer='zeros',\n# kernel_regularizer=None,\n# bias_regularizer=None,\n# activity_regularizer=None,\n# kernel_constraint=None,bias_constraint=None,\n# **kwargs )\n\n# https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D\n# tf.keras.layers.MaxPool2D(\n# pool_size=(2, 2),\n# strides=None,\n# padding='valid',\n# data_format=None,\n# **kwargs )\n\n# tf.keras.layers.Dense(\n# units,\n# activation=None,\n# use_bias=True,\n# kernel_initializer='glorot_uniform',\n# bias_initializer='zeros',\n# kernel_regularizer=None,\n# bias_regularizer=None,\n# activity_regularizer=None,\n# kernel_constraint=None,\n# bias_constraint=None,\n# **kwargs )\n\n# tf.keras.layers.Softmax(\n# axis=-1,\n# **kwargs )\n\ndef lenet5(x, y):\n \"\"\"\n builds a modified version of the LeNet-5 architecture using tensorflow\n :param x: x is a tf.placeholder of shape (m, 28, 28, 1) containing the\n input images for the network\n m is the number of images\n :param y: y is a tf.placeholder of shape (m, 10) containing the one-hot\n labels for the network\n :return:\n \"\"\"\n # el initializer 3000\n init = tf.contrib.layers.variance_scaling_initializer()\n\n # la mierda que recibe inputs\n primer_layer = tf.layers.Conv2D(filters=6, kernel_size=[5, 5],\n padding='same', kernel_initializer=init,\n activation='relu')\n\n x = primer_layer(x)\n\n # ahora vamos con un max poolincibiribiri\n segundo_layer = tf.layers.MaxPooling2D(pool_size=(2, 2),\n strides=(2, 2))\n\n x = segundo_layer(x)\n\n # ahora vamos con otra puta convolutional\n tercer_layer = tf.layers.Conv2D(filters=16, kernel_size=[5, 5],\n padding='valid',\n kernel_initializer=init,\n activation='relu')\n\n x = tercer_layer(x)\n\n # ahora vamos con otra piscina la mas chimba\n cuarto_layer = tf.layers.MaxPooling2D(pool_size=(2, 2),\n strides=(2, 2))\n\n x = cuarto_layer(x)\n\n # esta mierda es para pasar las imagenes a 1D porque la vaina no\n # funciona en otras dimensions\n quinto_layer = tf.layers.Flatten()\n\n x = quinto_layer(x)\n\n # ahora vamos con la vieja confiable de un fully connected - orgia total.\n sexto_layer = tf.layers.Dense(units=120, activation='relu',\n kernel_initializer=init)\n\n x = sexto_layer(x)\n\n # otra vieja confiable\n septimo_layer = tf.layers.Dense(units=84, activation='relu',\n kernel_initializer=init)\n\n x = septimo_layer(x)\n\n # una ultima capa sin activacion no se porque putas\n # https://stackoverflow.com/questions/44540769/tensorflow-cnn-dense-\n # layer-as-softmax-layer-input\n # aqui recomiendan hacerlo aparte como si eso fuera chistoso, pero es\n # porque la vieja quiere mostrar el resultado sin aplicarle el softmax\n # para ver los numeritos feitos\n ultimo_layer_sin_activacion = tf.layers.Dense(units=10,\n kernel_initializer=init)\n\n x = ultimo_layer_sin_activacion(x)\n\n ultimo_layer_con_softmax = tf.nn.softmax(x)\n\n # toca definir el loss para retornarlo\n loss = tf.losses.softmax_cross_entropy(y, x)\n\n # como se optimiza la enseñanza del bb\n optimizer = tf.train.AdamOptimizer().minimize(loss)\n\n # el accuracy\n pred = tf.argmax(x, 1)\n eq = tf.equal(pred, tf.argmax(y, 1))\n accuracy = tf.reduce_mean(tf.cast(eq, tf.float32))\n\n return ultimo_layer_con_softmax, optimizer, loss, accuracy\n","sub_path":"supervised_learning/0x07-cnn/4-lenet5.py","file_name":"4-lenet5.py","file_ext":"py","file_size_in_byte":4066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"369741222","text":"# Sample code from the TorchVision 0.3 Object Detection Finetuning Tutorial\n# http://pytorch.org/tutorials/intermediate/torchvision_tutorial.html\n\nimport os\nimport numpy as np\nimport torch\nfrom PIL import Image\n\nimport torchvision\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\nfrom torchvision.models.detection.mask_rcnn import MaskRCNNPredictor\n\nimport sys\nsys.path.append(\"./detection\")\nfrom engine import train_one_epoch, evaluate\nimport utils\nimport transforms as T\nimport cv2\nimport cv2_util\n\nimport random\n\n\nclass PennFudanDataset(object):\n def __init__(self, root, transforms):\n self.root = root\n self.transforms = transforms\n # load all image files, sorting them to\n # ensure that they are aligned\n self.imgs = list(sorted(os.listdir(os.path.join(root, \"PNGImages\"))))\n self.masks = list(sorted(os.listdir(os.path.join(root, \"PedMasks\"))))\n\n def __getitem__(self, idx):\n # load images ad masks\n img_path = os.path.join(self.root, \"PNGImages\", self.imgs[idx])\n mask_path = os.path.join(self.root, \"PedMasks\", self.masks[idx])\n img = Image.open(img_path).convert(\"RGB\")\n # note that we haven't converted the mask to RGB,\n # because each color corresponds to a different instance\n # with 0 being background\n mask = Image.open(mask_path)\n\n mask = np.array(mask)\n # instances are encoded as different colors\n obj_ids = np.unique(mask)\n # first id is the background, so remove it\n obj_ids = obj_ids[1:]\n\n # split the color-encoded mask into a set\n # of binary masks\n masks = mask == obj_ids[:, None, None]\n\n # get bounding box coordinates for each mask\n num_objs = len(obj_ids)\n boxes = []\n for i in range(num_objs):\n pos = np.where(masks[i])\n xmin = np.min(pos[1])\n xmax = np.max(pos[1])\n ymin = np.min(pos[0])\n ymax = np.max(pos[0])\n boxes.append([xmin, ymin, xmax, ymax])\n\n boxes = torch.as_tensor(boxes, dtype=torch.float32)\n # there is only one class\n labels = torch.ones((num_objs,), dtype=torch.int64)\n masks = torch.as_tensor(masks, dtype=torch.uint8)\n\n image_id = torch.tensor([idx])\n area = (boxes[:, 3] - boxes[:, 1]) * (boxes[:, 2] - boxes[:, 0])\n # suppose all instances are not crowd\n iscrowd = torch.zeros((num_objs,), dtype=torch.int64)\n\n target = {}\n target[\"boxes\"] = boxes\n target[\"labels\"] = labels\n target[\"masks\"] = masks\n target[\"image_id\"] = image_id\n target[\"area\"] = area\n target[\"iscrowd\"] = iscrowd\n\n if self.transforms is not None:\n img, target = self.transforms(img, target)\n\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n\n\ndef get_model_instance_segmentation(num_classes):\n # load an instance segmentation model pre-trained pre-trained on COCO\n model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)\n\n # get number of input features for the classifier\n in_features = model.roi_heads.box_predictor.cls_score.in_features\n # replace the pre-trained head with a new one\n model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n\n # now get the number of input features for the mask classifier\n in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels\n hidden_layer = 256\n # and replace the mask predictor with a new one\n model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,\n hidden_layer,\n num_classes)\n\n return model\n\n\ndef get_transform(train):\n transforms = []\n transforms.append(T.ToTensor())\n if train:\n transforms.append(T.RandomHorizontalFlip(0.5))\n return T.Compose(transforms)\n\n\ndef random_color():\n b = random.randint(0,255)\n g = random.randint(0,255)\n r = random.randint(0,255)\n \n return (b,g,r)\n\n\ndef toTensor(img):\n assert type(img) == np.ndarray,'the img type is {}, but ndarry expected'.format(type(img))\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n img = torch.from_numpy(img.transpose((2, 0, 1)))\n return img.float().div(255) # 255也可以改为256\n\n\ndef PredictImg( image, model,device):\n # img, _ = dataset_test[0]\n img = cv2.imread(image)\n result = img.copy()\n dst=img.copy()\n img=toTensor(img)\n\n names = {'0': 'background', '1': 'train'}\n # put the model in evaluati\n # on mode\n model.eval()\n with torch.no_grad():\n prediction = model([img.to(device)])\n\n boxes = prediction[0]['boxes']\n labels = prediction[0]['labels']\n scores = prediction[0]['scores']\n masks=prediction[0]['masks']\n \n m_bOK=False\n for idx in range(boxes.shape[0]):\n if scores[idx] >= 0.8:\n m_bOK=True\n color=random_color()\n mask=masks[idx, 0].mul(255).byte().cpu().numpy()\n thresh = mask\n contours, hierarchy = cv2_util.findContours(\n thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE\n )\n cv2.drawContours(dst, contours, -1, color, -1)\n\n x1, y1, x2, y2 = int(boxes[idx][0]), int(boxes[idx][1]), int(boxes[idx][2]), int(boxes[idx][3])\n name = names.get(str(labels[idx].item()))\n cv2.rectangle(result,(x1,y1),(x2,y2),color,thickness=2)\n cv2.putText(result, text=name, org=(x1, y1+10), fontFace=cv2.FONT_HERSHEY_SIMPLEX, \n fontScale=0.5, thickness=1, lineType=cv2.LINE_AA, color=color)\n\n dst1 = cv2.addWeighted(result, 0.7, dst, 0.3,0)\n\n if m_bOK:\n cv2.imwrite(\"result.png\", dst1)\n cv2.imshow('result', dst1)\n cv2.waitKey()\n cv2.destroyAllWindows()\n \n\ndef main():\n # train on the GPU or on the CPU, if a GPU is not available\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n # our dataset has two classes only - background and person\n num_classes = 2 # 需要修改种类\n train_num = 1200 # 训练集数目\n # use our dataset and defined transformations\n dataset = PennFudanDataset('PennFudanPed2', get_transform(train=True))\n dataset_test = PennFudanDataset('PennFudanPed2', get_transform(train=False))\n\n # split the dataset in train and test set\n indices = torch.randperm(len(dataset)).tolist()\n dataset = torch.utils.data.Subset(dataset, indices[:train_num]) # 训练集张数\n dataset_test = torch.utils.data.Subset(dataset_test, indices[train_num:]) # 测试集张数\n\n # define training and validation data loaders\n data_loader = torch.utils.data.DataLoader(\n dataset, batch_size=2, shuffle=True, num_workers=4,\n collate_fn=utils.collate_fn) # batch_size\n\n data_loader_test = torch.utils.data.DataLoader(\n dataset_test, batch_size=1, shuffle=False, num_workers=4,\n collate_fn=utils.collate_fn)\n\n # get the model using our helper function\n model = get_model_instance_segmentation(num_classes)\n\n # move model to the right device\n model.to(device)\n\n # construct an optimizer\n params = [p for p in model.parameters() if p.requires_grad]\n optimizer = torch.optim.SGD(params, lr=0.002,\n momentum=0.9, weight_decay=0.0005) # 学习率\n # and a learning rate scheduler\n lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,\n step_size=3,\n gamma=0.1)\n\n model_without_ddp = model \n # let's train it for 10 epochs\n num_epochs = 5 # 训练次数\n \n for epoch in range(num_epochs):\n # train for one epoch, printing every 10 iterations\n train_one_epoch(model, optimizer, data_loader, device, epoch, print_freq=10)\n # update the learning rate\n lr_scheduler.step()\n # evaluate on the test dataset\n evaluate(model, data_loader_test, device=device)\n\n # utils.save_on_master({\n # 'model': model_without_ddp.state_dict(),\n # 'optimizer': optimizer.state_dict(),\n # 'lr_scheduler': lr_scheduler.state_dict()},\n # os.path.join('./', 'model_{}.pth'.format(epoch)))\n\n utils.save_on_master({\n 'model': model_without_ddp.state_dict()},\n os.path.join('./model/', 'train_seg_model_epoch_5.pth'))\n\n print(\"That's it!\")\n PredictImg(\"3.jpg\", model, device)\n\n \nif __name__ == \"__main__\":\n main()\n","sub_path":"Maskrcnn/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":8654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"554891649","text":"import urllib.request\nimport json\nimport shutil\nimport tempfile\nimport subprocess\n\nfrom pathlib import Path\nfrom base import BaseTool\n\n\nclass FlexInstaller(BaseTool):\n def download(self, local: Path):\n with urllib.request.urlopen('https://api.github.com/repos/JohnCoates/flexdecrypt/releases/latest') as response:\n info = json.loads(response.read())\n\n url = next(asset['browser_download_url']\n for asset in info['assets'] if asset['name'] == 'flexdecrypt.deb')\n\n with urllib.request.urlopen(url) as response, tmp.open('wb') as fp:\n shutil.copyfileobj(response, fp)\n\n def deploy(self, local: Path):\n remote = '/tmp/flexdecrypt.deb'\n subprocess.call(self.scp(str(local), remote, direction='up'))\n subprocess.call(self.ssh('dpkg', '-i', remote))\n subprocess.call(self.ssh('rm', remote))\n subprocess.call(self.ssh('apt-get', 'install', '-y', 'zip'))\n\n\nif __name__ == \"__main__\":\n import argparse\n import sys\n parser = argparse.ArgumentParser()\n parser.add_argument('port', type=int)\n parser.add_argument('-o', '--output', dest='output', action='store')\n parser.add_argument('-H', '--host', dest='host', action='store')\n parser.add_argument('-u', '--user', dest='user', action='store')\n\n opt = parser.parse_args()\n\n if opt.host and opt.user:\n i = FlexInstaller(opt.port, host=opt.host, user=opt.user)\n else:\n i = FlexInstaller(opt.port)\n\n cwd = Path(tempfile.gettempdir())\n tmp = cwd / 'flexdecrypt.deb'\n sys.stderr.write('downloading latest FlexDecrypt from GitHub\\n')\n i.download(tmp)\n sys.stderr.write('downloaded\\n')\n sys.stderr.write('deploying to iOS\\n')\n i.deploy(tmp)\n sys.stderr.write('done\\n')\n\n\n","sub_path":"backend/fruit/get-flex.py","file_name":"get-flex.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"602932032","text":"import logging\n\nimport numpy as np\nfrom ..geometry import order_channels_by_distance\n\nlogger = logging.getLogger(__name__)\n\n\ndef get_waveforms(rec, neighbors, index, get_score, proj, spike_size,\n n_features, geom, nnt, th):\n\n \"\"\"Extract waveforms from detected spikes\n\n Parameters\n ----------\n recordings: matrix (observations, number of channels)\n Multi-channel recordings\n neighbors: matrix (number of channels, number of channel)\n Neighbors matrix\n index: matrix (number of spikes, 3)\n Spike index matrix, as returned from any of the detectors\n get_score: bool\n Whether or not to calculate scores, if False returns an array with 0s\n proj:\n ?\n spike_size:\n ?\n n_features:\n ?\n n_features:\n ?\n\n Returns\n -------\n score: list\n List with n_channels elements, each element contains the data whose\n main channel is the i channel where i is the index in the list\n clr_index: list\n List with n_channels elements, each element contains the indexes\n for the spikes indicating whether it was a clear spike or not\n spike_times: list\n List with n_channels elements, each element contains the spike time\n\n Notes\n -----\n Le'ts consider a single channel recording V, where V is a vector of\n length 1 x T. When a spike is detected at time t, then (V_(t-R),\n V_(t-R+1), ..., V_t, V_(t+1),...V_(t+R)) is going to be a waveform.\n (a small snippet from the recording around the spike time)\n \"\"\"\n # column ids for index matrix\n SPIKE_TIME, MAIN_CHANNEL = 0, 1\n\n R = spike_size\n _, n_channels = rec.shape\n score = list()\n clr_idx = list()\n spike_time = list()\n\n # loop over every channel\n for c in range(n_channels):\n\n # get spikes whose main channel is the current channel\n idx = index[:, MAIN_CHANNEL] == c\n\n # check if there is at least one spike for the current channel\n nc = np.sum(idx)\n\n # get the indices for channel c neighbors\n (ch_idx, ) = np.where(neighbors[c])\n\n # if there are spikes for channel c, process them...\n if nc > 0:\n\n # get spike times\n spike_time_c = index[idx, SPIKE_TIME]\n\n # append to spike_times\n spike_time.append(spike_time_c)\n\n if get_score == 1:\n # get waveforms\n wf = np.zeros((nc, 2*R+1, ch_idx.shape[0]))\n\n for j in range(nc):\n wf[j] = rec[spike_time_c[j]+np.arange(-R, R+1)][:, ch_idx]\n\n temp, c_order = order_channels_by_distance(c, ch_idx, geom)\n clr_idx_c = nnt.nn_triage(wf[:,:,c_order], th)\n nc_clear = np.sum(clr_idx_c)\n\n else:\n nc_clear = 0\n\n if nc_clear > 0:\n clr_idx.append(np.where(clr_idx_c)[0])\n\n # get score\n wf = wf[clr_idx_c]\n score.append(np.swapaxes(np.matmul(np.reshape(np.swapaxes(wf, 1, 2), (-1, 2*R+1)), proj)\n .reshape((wf.shape[0], wf.shape[2], -1)), 1, 2))\n else:\n logger.debug('Spikes detected with main channel {c}, '\n 'but get_score is False, returning zeros in '\n 'score and clr_idx...'.format(c=c))\n clr_idx.append(np.zeros(0, 'int32'))\n score.append(np.zeros((0, n_features, np.sum(ch_idx))))\n\n # otherwise return zeros...\n else:\n logger.debug('No spikes detected with main channel {c}, '\n 'returning zeros...'.format(c=c))\n spike_time.append(np.zeros(0, 'int32'))\n clr_idx.append(np.zeros(0, 'int32'))\n score.append(np.zeros((0, n_features, np.sum(ch_idx))))\n\n return score, clr_idx, spike_time\n","sub_path":"src/yass/preprocess/waveform.py","file_name":"waveform.py","file_ext":"py","file_size_in_byte":3900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"596189821","text":"import sqlalchemy as sa\nfrom .base import metadata\nimport datetime\n\nchats = sa.Table(\n \"chats\", \n metadata,\n\n sa.Column(\"db_id\", sa.Integer, primary_key=True, unique=True),\n sa.Column(\"id\", sa.Integer, index=True, unique=True, nullable=False),\n sa.Column(\"username\", sa.String, default=None),\n sa.Column(\"title\", sa.String, nullable=False),\n sa.Column(\"invite_link\", sa.String),\n\n sa.Column(\"is_banned\", sa.Boolean, default=False),\n\n # Data\n sa.Column(\"is_parse_smoothie\", sa.Boolean, default=False),\n\n sa.Column(\"created_at\", sa.DateTime, default=datetime.datetime.utcnow),\n sa.Column(\"updated_at\", sa.DateTime, default=datetime.datetime.utcnow)\n)","sub_path":"src/support/db/chats.py","file_name":"chats.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"411017976","text":"\n# This function s called within the function latLonToExif. It converts a float representing a GPS latitude or longitude value\n# in the degrees decimal minutes format to degrees minutes seconds format and returns the three\n# values in a list.\n# e.g 5258.674 is 52 degrees 58.674 minutes. This converts to 52 degrees 58 minutes 40.44 seconds.\n# The returned list is [52,58,40.44]. Note seconds will be rounded to 2 decimal points. This may\n# introduce error into the final result. This may have to be corrected in after testing \ndef GGA_latLonToDegMinSec(latLon):\n \n decimalMinutes = round(latLon%100,3)\n #print(\"decimalMinutes: \")\n #print(decimalMinutes)\n degrees = int((latLon-decimalMinutes)/100)\n #print(\"dergees: \")\n #print(degrees)\n degDecMin= [degrees,decimalMinutes]\n #print(\"degDecMin: \")\n #print(degDecMin) \n return degDecMin\n\n# This function takes an NMEA GGA lat/lon string DDMM.m (degrees and decimal minutes) \n# and converts it to a string that is in Exif format 'Degrees/1,Minutes/1,Seconds/100'\n# e.g 5852.674 will be returned as '58/1,52/1,4044/100'\n#\ndef latLonToExif(latLonString):\n latLon = float(latLonString)\n if(latLon < 0):\n latLon*=-1\n degDecMin = GGA_latLonToDegMinSec(latLon)\n degrees = str(degDecMin[0])\n dec_min = str(degDecMin[1]) \n exifLatLon = degrees+\",\"+dec_min\n #exifLatLon = degrees+' deg '+minutes+'\\' '+seconds+'\\'\\' '\n return exifLatLon\n\n\n\n\n#This function takes a datetime.time(h,m,s) object and converts it into a unix timestamp string\n# of the form \"hh:mm:ss\". i.e datetime.time(11,34,28) -> '11:34:28' \ndef gpsTimeToExif(datetime):\n \n utc_string = str(datetime.hour)+':'+str(datetime.minute)+':'+str(datetime.second) \n return utc_string \n\n","sub_path":"gpsToExif.py","file_name":"gpsToExif.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"378457007","text":"#import numpy as np\n#import cv2\n#\n# # Read the main image\n#\n#inputImage = cv2.imread(\"messi.png\")\n#\n# # Convert it to grayscale\n#\n#inputImageGray = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)\n#\n# # Line Detection\n#\n#edges = cv2.Canny(inputImageGray,100,200,apertureSize = 3)\n#\n#minLineLength = 500 \n#maxLineGap = 5 \n#\n#lines = cv2.HoughLinesP(edges,1,np.pi/180,90,minLineLength,maxLineGap)\n#\n#for x in range(0, len(lines)):\n# for x1,y1,x2,y2 in lines[x]:\n# cv2.line(inputImage,(x1,y1),(x2,y2),(0,128,0),2)\n#\n##cv2.putText(inputImage,\"Tracks Detected\", (500,250), font, 0.5, 255)\n#\n# # Show result\n# \n#cv2.imwrite('messi_result.png',inputImage)\n##cv2.imshow(\"Trolley_Problem_Result\", inputImage)\n#\n#\n\n\n\n\n\n\n\t\nimport numpy as np \nimport cv2\n\ninputImage = cv2.imread(\"messi.png\")\ninputImageGray = cv2.cvtColor(inputImage, cv2.COLOR_BGR2GRAY)\nedges = cv2.Canny(inputImageGray,150,200,apertureSize = 3)\nminLineLength = 30\nmaxLineGap = 5\nlines = cv2.HoughLinesP(edges,cv2.HOUGH_PROBABILISTIC, np.pi/180, 30, minLineLength,maxLineGap)\nfor x in range(0, len(lines)):\n for x1,y1,x2,y2 in lines[x]:\n #cv2.line(inputImage,(x1,y1),(x2,y2),(0,128,0),2, cv2.LINE_AA)\n pts = np.array([[x1, y1 ], [x2 , y2]], np.int32)\n cv2.polylines(inputImage, [pts], True, (0,255,0))\n\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(inputImage,\"Tracks Detected\", (500, 250), font, 0.5, 255)\ncv2.imwrite('final.png',inputImage)\ncv2.imshow(\"Trolley_Problem_Result\", inputImage)\n#cv2.imshow('edge', edges)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n","sub_path":"python code/Hough.py","file_name":"Hough.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"497917833","text":"# Filename: Program5-20.py\r\n# Author: N. Anim\r\n# Date: Mar. 7, 2016\r\n# Purpose: To demonstarte the use and functioning of a counter\r\n# (or for) loop.\r\n# The algorithm is in Figure 5-21.\r\n# This script demonstrates the use of nested loops.\r\n# This particular script simulates a clock.\r\n\r\n\r\n\r\n# The hours loop\r\nhours = 0\r\nwhile (hours < 24):\r\n minutes = 0\r\n while (minutes < 60):\r\n seconds = 0\r\n while (seconds < 60):\r\n print(hours,':',minutes,':',seconds)\r\n seconds += 1\r\n minutes += 1\r\n hours += 1\r\n\r\n","sub_path":"Programs/Program5-20.py","file_name":"Program5-20.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"373625548","text":"\n\n# Make and run OASN files for varying discrete source locations. \n# To Do: define bottom paramters and intervals\n# define outFileName\n# define water sound speed layers\n# input settings for each block\n\n\nimport numpy as np\nimport types\nimport os\nimport subprocess\nimport time\nimport random\n\n\n# define options\noptions = 'N J 0'\n\n# define frequencies\nfreqs = np.array([109,109,1,0]) # [min freq, max freq, # of freqs, integration contour offset]\n#9m dpeth: 109 163 232 385\n#54m depth:112 148 201 283\n# define sound speed layers\nsspfile=\"swellex_ssp.csv\"\n\nlayers = np.loadtxt(sspfile, delimiter=\",\") # import ssp for envrironment from file\n\n#roughness_layer = 3\n#roughness_param = [-0.2, 19.1, 2.5]\n\n# define array\narrayfile=\"vla_array.csv\"\n\narray = np.loadtxt(arrayfile, delimiter=\",\") # import ssp for envrironment from file\n\narray_temp1 = [int(inc) for inc in array[:,3]]\narray_temp2 = [int(inc) for inc in array[:,4]]\n\narr = np.ndarray(array.shape,dtype = object)\narr[:,0:3] = array[:,0:3]\narr[:,3] = array_temp1\narr[:,4] = array_temp2\n\n\n# define sources\nsrc = types.SimpleNamespace()\n\nsrc.src = np.array([0, 0, 0, 1]) # [surf. noise src strength, white noise level, deep src level, # of discr. srcs]\n\nif src.src[0] != 0: #if there is sea surface noise\n src.seacs = np.array([1400,1e8]) # [cmins, cmaxs]\n src.seawav = np.array([400,400,100]) # [samples in cont., discr, evanes.]\n\nif src.src[2] != 0: #if there is deep noise source\n src.dscs = np.array([1400,1e8]) # [cmins, cmaxs]\n src.dswav = np.array([400,400,100]) # [samples in cont., discr, evanes.]\n \nif src.src[3] != 0: #if there is discrete source\n # [depth(m), x-coord(km), y-coord(km), source level]\n src_ycoor = 0\n src_xcoor = np.linspace(0,10,1001)\n src_zcoor = 9 #54\n src_level = 155\n \n src.ndcs = np.array([1300,1600]) # [cmins, cmaxs]\n src.ndwav = np.array([-1,0,0]) # [# of sampling points,first sampling point, last sampling point]; [-1,0,0] for auto\n \n #repeat for each discrete source\n \n# define signal replicas \n\nif 'R' in options:\n reps = types.SimpleNamespace()\n\n reps.zs = np.array([50,50,1]) # depths of sampling of replicas\n reps.xs = np.array([1,1,1]) # x-coord of sampling \n reps.ys = np.array([0,0,1]) # y-coord of sampling\n\n reps.cvals = np.array([1400,1e8]) # [cmins, cmaxs]\n reps.wavs = np.array([-1,0,0]) # [# of sampling points,first sampling point, last sampling point]; [-1,0,0] for auto\n\nii = 0\n\n# define function to write a line\ndef line(var):\n s = ''\n for ii in range(len(var)):\n sii = '{'+str(ii)+'} '\n s = ''.join([s,sii])\n return s[0:-1].format(*var)\n\n\n### start writing input files\nallDirs = []\n\nfor ii in range(src_xcoor.shape[0]): # for each source location\n for jj in range(1): # create 30 small random perturbations to the location\n \n outFileName = str(round(src_xcoor[ii],2)) + 'km' + '.dat'\n\n if not os.path.isdir(outFileName[0:-4]):\n os.mkdir(outFileName[0:-4])\n\n allDirs.append(outFileName[0:-4])\n\n outFile = open(outFileName[0:-4] + '/' + outFileName, 'w')\n\n # Begin writing file:\n\n # Block 1: Title\n outFile.write('ICEX_32array ' + outFileName[0:-4] + '\\n')\n\n # Block 2: Options\n outFile.write(options + '\\n')\n\n # Block 3: Frequencies\n outFile.write(line(freqs) + '\\n')\n\n outFile.write('\\n')\n\n # Block 4: Environment\n\n #number of layers\n outFile.write(str(layers.shape[0]) + '\\n')\n\n #environment layers\n layers[np.isnan(layers)] = 0.\n\n for kk in range(layers.shape[0]):\n outFile.write(line(layers[kk,:]) + '\\n')\n \n\n outFile.write('\\n')\n\n # Block 5: Array\n outFile.write(str(arr.shape[0]) + '\\n')\n for irec in range(arr.shape[0]):\n outFile.write(line(arr[irec,:]) + '\\n') \n\n outFile.write('\\n')\n\n # Block 6: Sources\n outFile.write(line(src.src) + '\\n')\n \n outFile.write('\\n')\n\n # Block 7: Sea Surface\n if src.src[0] != 0: #if there is sea surface noise\n outFile.write(line(src.seacs) + '\\n')\n outFile.write(line(src.seawav) + '\\n')\n \n outFile.write('\\n')\n\n # Block 8: Deep Noise\n if src.src[2] != 0: #if there is deep noise source\n outFile.write(line(src.dscs) + '\\n')\n outFile.write(line(src.dswav) + '\\n')\n \n outFile.write('\\n') \n \n # Block 9: Discrete Sources\n if src.src[3] != 0: #if there is discrete source \n disc_src = np.array([src_zcoor,src_xcoor[ii],src_ycoor,src_level])\n outFile.write(line(disc_src) + '\\n')\n outFile.write(line(src.ndcs) + '\\n')\n outFile.write(line(src.ndwav) + '\\n')\n\n outFile.write('\\n')\n\n # Block 10: Signal replicas\n if 'R' in options:\n outFile.write(line(reps.zs[0:-1]) + ' ' + str(int(reps.zs[-1])) + '\\n')\n outFile.write(line(reps.xs[0:-1]) + ' ' + str(int(reps.xs[-1])) + '\\n')\n outFile.write(line(reps.ys[0:-1]) + ' ' + str(int(reps.ys[-1])) + '\\n')\n\n outFile.write('\\n')\n\n outFile.write(line(reps.cvals) + '\\n')\n outFile.write(line(reps.wavs) + '\\n')\n\n\n# Run OASN\n\nbaseDir = os.getcwd()\n\nos.chdir(baseDir)\nif not os.path.isdir('chk_files'):\n os.mkdir('chk_files')\n\nproc_list = []\nlogs = []\n\n# print('Checking for OASN')\noasn_check = subprocess.Popen('which oasn'.split())\noasn_check.wait()\n\n# print(str(oasn_check.poll()==0))\n# print(str(oasn_check.poll()))\n\nif oasn_check.returncode == 0:\n print('OASN found!')\n for ii in range(len(allDirs)):\n os.chdir(baseDir + '/' + allDirs[ii])\n \n logs = open('oasn_log_'+str(ii+1)+'.txt','w')\n print(('oasn {'+str(ii)+'}').format(*allDirs))\n proc = subprocess.Popen(('oasn {'+str(ii)+'}').format(*allDirs).split(),stdout=logs,stderr=logs)\n proc.wait()\n print(' Return code: ' + str(proc.returncode))\n\n if not proc.returncode == 0:\n print('Trying once more: ')\n\n print(('oasn {'+str(ii)+'}').format(*allDirs))\n proc = subprocess.Popen(('oasn {'+str(ii)+'}').format(*allDirs).split(),stdout=logs,stderr=logs)\n proc.wait()\n print(' Return code: ' + str(proc.returncode))\n\t\n if proc.returncode == 0:\n os.rename(baseDir + '/' + allDirs[ii] + '/' + allDirs[ii] + '.chk', baseDir + '/chk_files/' + allDirs[ii] + '.chk')\n\n logs.close()\n time.sleep(0.05)\n\n \nos.chdir(baseDir)\nprint('Done!')\n\n\n","sub_path":"swellex/varysrc_gen_files.py","file_name":"varysrc_gen_files.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"494139689","text":"import os, random, time, copy\nimport numpy\nimport os.path as path\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.optim import lr_scheduler \nimport torchvision\nfrom torchvision import datasets, models, transforms\nimport pickle\nimport pathlib\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\n\nfrom zplib.curve import interpolate\nfrom zplib.image import pyramid\nfrom keypoint_annotation import keypoint_annotation_model\nimport elegant\nfrom elegant import worm_spline\n#since all the worms will be in the same orientation/worm pixels, hardcode in a worm_frame_mask\ndef to_tck(widths):\n x = numpy.linspace(0, 1, len(widths))\n smoothing = 0.0625 * len(widths)\n return interpolate.fit_nonparametric_spline(x, widths, smoothing=smoothing)\n\ndef get_avg_widths():\n elegant_path = pathlib.Path(elegant.__file__)\n width_trends_path = elegant_path.parent /'width_data/width_trends.pickle'\n WIDTH_TRENDS = pickle.load(open(width_trends_path,'rb'))\n AVG_WIDTHS = numpy.array([numpy.interp(5, WIDTH_TRENDS['ages'], wt) for wt in WIDTH_TRENDS['width_trends']])\n AVG_WIDTHS_TCK = to_tck(AVG_WIDTHS)\n return AVG_WIDTHS_TCK\n\n\"\"\"WIDTH_TRENDS = pickle.load(open('/home/nicolette/.conda/envs/nicolette/lib/python3.7/site-packages/elegant/width_data/width_trends.pickle', 'rb'))\nAVG_WIDTHS = numpy.array([numpy.interp(5, WIDTH_TRENDS['ages'], wt) for wt in WIDTH_TRENDS['width_trends']])\nAVG_WIDTHS_TCK = to_tck(AVG_WIDTHS)\"\"\"\n\nAVG_WIDTHS_TCK = get_avg_widths()\n\n\nclass LossofRegmentation(nn.Module):\n def __init__(self, downscale=2, scale=(0,1,2,3), image_shape=(960,512), mask_error=True):\n super(LossofRegmentation, self).__init__()\n self.scale = scale\n self.reglLoss = nn.L1Loss(reduction='sum')\n #self.segLoss = nn.BCELoss(reduction='sum')\n self.downscale = downscale\n image_size = (int(image_shape[0]/downscale), int(image_shape[1]/downscale))\n widths_tck = (AVG_WIDTHS_TCK[0], AVG_WIDTHS_TCK[1]/downscale, AVG_WIDTHS_TCK[2])\n mask = worm_spline.worm_frame_mask(widths_tck, image_size) #make worm mask for training\n self.mask = mask\n self.mask_error = mask_error\n\n def forward(self, Keypoint0, Output):\n \n K0loss = 0 \n ##image1 mask image2 output\n for i in self.scale: \n s = 2**i \n N,C,H,W = Keypoint0[i].size()\n scaled_mask = pyramid.pyr_down(self.mask, downscale=s)\n m = numpy.array([[scaled_mask]*C]*N) #get mask into the same dimension as keypoint should be (N, 1, H, W)\n tensor_mask = torch.tensor(m) #make the mask into a tensor\n if self.mask_error:\n l = self.reglLoss(Output[('Keypoint0',i)][tensor_mask>0], Keypoint0[i][tensor_mask>0])/(N*C*H*W)\n else:\n l = self.reglLoss(Output[('Keypoint0',i)], Keypoint0[i])/(N*C*H*W)\n print('Loss: {}, scale: {}'.format(l, i))\n K0loss += l\n\n return K0loss\n\n\ndef training_wrapper(dataloaders, dataset_sizes, loss_1_to_2, base_lr = 0.0001 ,scale=[0,1,2,3], \n start_epo = 0, total_epoch_nums=25, work_dir='./', device='cpu'):\n\n log_filename = os.path.join(work_dir,'train.log') \n for i, keypoint in enumerate(['ant_pharynx', 'post_pharynx', 'vulva_kp', 'tail']):\n #for i, keypoint in enumerate(['post_pharynx', 'vulva_kp']):\n since = time.time()\n curr_time = datetime.now()\n print('------------------------{} Training {} ------------------------'.format(curr_time, keypoint))\n print('base_lr: {}\\t scale: {}\\t start_epo: {}\\t total_epoch_nums: {}\\t device: {}\\t work_dir: {}\\t'.format(\n base_lr, scale, start_epo, total_epoch_nums, device, work_dir))\n print('dataloader sizes: {}:{}\\t {}:{}\\t'.format('train', dataset_sizes['train'], 'val', dataset_sizes['val']))\n fn = open(log_filename, 'a')\n fn.write('------------------------{} Training {} ------------------------\\n'.format(curr_time, keypoint))\n #fn.write('base_lr: {}\\t scale: {}\\t start_epo: {}\\t total_epoch_nums: {}\\t device: {}\\t work_dir: {}\\n'.format(\n # base_lr, scale, start_epo, total_epoch_nums, device, work_dir))\n fn.write('dataloader sizes: {}:{}\\t {}:{}\\n'.format('train', dataset_sizes['train'], 'val', dataset_sizes['val']))\n fn.close()\n #initialize model\n initModel = keypoint_annotation_model.WormRegModel(34, scale, pretrained=True)\n initModel.to(device)\n #define loss function\n loss_1_to_2 = loss_1_to_2\n optimizer = torch.optim.Adam([{'params': initModel.parameters()}], lr=base_lr)\n exp_lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=int(total_epoch_nums/10), gamma=0.5)\n\n #train the model\n model_ft = train_reg(initModel, dataloaders, dataset_sizes, loss_1_to_2, optimizer, exp_lr_scheduler, i, keypoint, \n start_epo=0, num_epochs=total_epoch_nums, work_dir=work_dir, device=device)\n\n print('----------------------------------------------------------------------------')\n time_elapsed = time.time() - since\n print('Training complete in {:.0f}m {:.0f}s'.format(time_elapsed // 60, time_elapsed % 60))\n \n fn = open(log_filename,'a')\n fn.write('Training complete in {:.0f}m {:.0f}s\\n'.format(time_elapsed // 60, time_elapsed % 60))\n fn.close()\n\ndef train_reg(model, dataloaders, dataset_sizes, loss_1_to_2, optimizer, scheduler, \n keypoint_idx, keypoint_name, start_epo = 0, num_epochs=25, work_dir='./', device='cpu'):\n \n save_dir = os.path.join(work_dir, keypoint_name)\n if not os.path.exists(save_dir): os.makedirs(save_dir)\n\n log_filename = os.path.join(save_dir,'train.log') \n since = time.time()\n\n best_model_wts = copy.deepcopy(model.state_dict())\n best_loss = float('inf')\n\n for epoch in range(start_epo ,num_epochs): \n print('\\nEpoch {}/{}'.format(epoch+1, num_epochs))\n print('-' * 10)\n fn = open(log_filename,'a')\n fn.write('\\nEpoch {}/{}\\n'.format(epoch+1, num_epochs))\n fn.write('--'*5+'\\n')\n fn.close()\n\n \n # Each epoch has a training and validation phase\n for phase in ['train', 'val']:\n print(phase)\n fn = open(log_filename,'a') \n fn.write(phase+'\\n')\n fn.close()\n \n if phase == 'train':\n model.train() # Set model to training mode\n else: model.eval() # Set model to evaluate mode\n\n #running_loss, running_lossk0, running_lossk1, running_lossk2, running_lossk3 = 0.0, 0.0, 0.0, 0.0, 0.0\n running_loss, running_lossk0, running_acc = 0.0, 0.0, 0.0\n\n # Iterate over data.\n img, keypoint_maps, out = None, None, None\n iterCount,sampleCount = 0, 0\n for sample in dataloaders[phase]:\n img, keypoint_maps = sample \n keypoint0 = keypoint_maps[keypoint_idx] \n img = img.to(device)\n for i in range(len(keypoint0)):\n keypoint0[i] = keypoint0[i].to(device)\n #keypoint1[i] = keypoint1[i].to(device)\n #keypoint2[i] = keypoint2[i].to(device)\n #keypoint3[i] = keypoint3[i].to(device)\n \n \n # zero the parameter gradients\n optimizer.zero_grad()\n\n # forward\n # track history if only in train \n with torch.set_grad_enabled(phase=='train'):\n if phase=='train': # backward + optimize only if in training phase\n model.train() \n output = model(img) \n k0loss = loss_1_to_2(keypoint0, output)\n loss = k0loss\n loss.backward()\n optimizer.step()\n acc = accuracy([keypoint0[0]], output)\n \n else: \n model.eval() \n output = model(img) \n k0loss = loss_1_to_2(keypoint0, output)\n loss = k0loss\n acc = accuracy([keypoint0[0]], output)\n\n # statistics \n iterCount += 1\n sampleCount += img.size(0) \n running_loss += loss.item() * img.size(0) \n running_lossk0 += k0loss.item() * img.size(0)\n running_acc += acc\n #running_lossk1 += k1loss.item() * img.size(0)\n #running_lossk2 += k2loss.item() * img.size(0) \n #running_lossk3 += k3loss.item() * img.size(0)\n accprint2screen_avgLoss = running_acc/sampleCount\n k0print2screen_avgLoss = running_lossk0/sampleCount\n #k1print2screen_avgLoss = running_lossk1/sampleCount\n #k2print2screen_avgLoss = running_lossk2/sampleCount\n #k3print2screen_avgLoss = running_lossk3/sampleCount\n print2screen_avgLoss = running_loss/sampleCount\n \n if iterCount%50==0:\n print('\\t{}/{} loss: {:.4f} \\t k0loss: {:.4f}\\t acc: {:.4f}'.format(iterCount, len(dataloaders[phase]), print2screen_avgLoss, k0print2screen_avgLoss, accprint2screen_avgLoss))\n fn = open(log_filename,'a') \n fn.write('\\t{}/{} loss: {:.4f} \\t k0loss: {:.4f}\\t acc: {:.4f}\\n'.format(iterCount, len(dataloaders[phase]), print2screen_avgLoss, k0print2screen_avgLoss, accprint2screen_avgLoss))\n fn.close()\n \n epoch_loss = running_loss / dataset_sizes[phase]\n k0epoch_loss = running_lossk0 / dataset_sizes[phase]\n accepoch_loss = running_acc / dataset_sizes[phase]\n #k1epoch_loss = running_lossk1 / dataset_sizes[phase]\n #k2epoch_loss = running_lossk2 / dataset_sizes[phase]\n #k3epoch_loss = running_lossk3 / dataset_sizes[phase]\n \n print('\\tloss: {:.6f} \\tk0loss: {:.6f}\\t acc: {:.6f}'.format(epoch_loss, k0epoch_loss, accepoch_loss))\n fn = open(log_filename,'a')\n fn.write('\\tloss: {:.6f} \\tk0loss: {:.6f} acc: {:.6f}\\n'.format(epoch_loss, k0epoch_loss, accepoch_loss))\n fn.close()\n \n keypoint_maps = [keypoint0[0]]\n\n plot_output(img, keypoint_maps, output, epoch, phase, save_dir)\n \n # deep copy the model\n cur_model_wts = copy.deepcopy(model.state_dict())\n path_to_save_paramOnly = os.path.join(save_dir, 'epoch-{}.paramOnly'.format(epoch+1))\n torch.save(cur_model_wts, path_to_save_paramOnly)\n \n if phase=='val' and epoch_loss0\n print(mask.shape)\n #mask = numpy.array([[mask]*C]*N) #get mask into the same dimension as keypoint should be (N, 1, H, W)\n\n for sampleIndex in range(len(keypoint_maps[0])):\n kp_map = keypoint_maps[0][sampleIndex].cpu().numpy()\n gt = kp_map[0]\n gt[~mask] = -1 #since we don't care about things outside of the worm pixels, set everything outside to -1\n gt_kp = numpy.unravel_index(numpy.argmax(gt), gt.shape)\n #gt_kp = numpy.where(gt == numpy.max(gt[mask]))\n\n out_kp_map = out[('Keypoint0',0)][sampleIndex].cpu().detach().numpy()\n pred = out_kp_map[0]\n pred[~mask] = -1 #since we don't care about things outside of the worm pixels, set everything outside to -1\n #out_kp = numpy.where(pred == numpy.max(pred[mask]))\n out_kp = numpy.unravel_index(numpy.argmax(pred), pred.shape)\n\n #dist = numpy.sqrt((gt_kp[0][0]-out_kp[0][0])**2 + (gt_kp[1][0]-out_kp[1][0])**2)\n dist = numpy.sqrt((gt_kp[0]-out_kp[0])**2 + (gt_kp[1]-out_kp[1])**2)\n print(\"GT: {}, Out: {}, dist: {:.0f} \".format(gt_kp, out_kp, dist))\n acc += dist\n print(\"avg acc: \", acc/N)\n return acc\n\n\ndef plot_output(imgList, keypoint_maps, out, epoch, phase, save_dir='./'):\n figWinNumHeight, figWinNumWidth, subwinCount = 4, 4, 1\n plt.figure(figsize=(22,20), dpi=88, facecolor='w', edgecolor='k') # figsize -- inch-by-inch\n plt.clf()\n print(imgList.min())\n print(imgList.max())\n acc = 0\n N,C,H,W = keypoint_maps[0].size()\n print(N,C,H,W)\n s = int(960/H)#get the mask\n widths_tck = (AVG_WIDTHS_TCK[0], AVG_WIDTHS_TCK[1]/s, AVG_WIDTHS_TCK[2])\n mask = worm_spline.worm_frame_mask(widths_tck, (H, W)) #make worm mask\n mask = mask>0\n\n\n for sampleIndex in range(min(4, len(imgList))):\n # visualize image\n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n image = imgList[sampleIndex].cpu().numpy()#.squeeze().transpose((1,2,0)) \n plt.imshow(image[0], cmap='gray') \n plt.axis('off')\n plt.title('Image of worm')\n \n #keypoint 0\n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = keypoint_maps[0][sampleIndex].cpu().numpy()#.squeeze().transpose((1,2,0))\n plt.imshow(kp_map[0], cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(0)+\" GT\")\n \n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = out[('Keypoint0',0)][sampleIndex].cpu().detach().numpy()#.squeeze().transpose((1,2,0))\n plt.imshow(kp_map[0], cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(0))\n \n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = out[('Keypoint0',0)][sampleIndex].cpu().detach().numpy()\n per50 = numpy.percentile(kp_map[0], 50)\n kp_map[0][~mask] = 0\n \n plt.imshow((kp_map[0]>per50).astype(numpy.float32)*1, cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(0))\n\n \"\"\"#Keypoint 1\n \n subwinCount+=1\n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = keypoint_maps[1][sampleIndex].cpu().numpy()#.squeeze().transpose((1,2,0))\n plt.imshow(kp_map[0], cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(1)+\" GT\")\n \n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = out[('Keypoint1',0)][sampleIndex].cpu().detach().numpy()#.squeeze().transpose((1,2,0))\n plt.imshow(kp_map[0], cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(1))\n \n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = out[('Keypoint1',0)][sampleIndex].cpu().detach().numpy()\n per90 = numpy.percentile(kp_map[0], 95)\n \n plt.imshow((kp_map[0]>per90).astype(numpy.float32)*1, cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(1))\n \n #Keypoint 2 \n subwinCount+=1\n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = keypoint_maps[2][sampleIndex].cpu().numpy()#.squeeze().transpose((1,2,0))\n plt.imshow(kp_map[0], cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(2)+\" GT\")\n \n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = out[('Keypoint2',0)][sampleIndex].cpu().detach().numpy()#.squeeze().transpose((1,2,0))\n plt.imshow(kp_map[0], cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(2))\n \n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = out[('Keypoint2',0)][sampleIndex].cpu().detach().numpy()\n per90 = numpy.percentile(kp_map[0], 95)\n \n plt.imshow((kp_map[0]>per90).astype(numpy.float32)*1, cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(2))\n \n #keypoint 3\n subwinCount+=1\n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = keypoint_maps[3][sampleIndex].cpu().numpy()#.squeeze().transpose((1,2,0))\n plt.imshow(kp_map[0], cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(3)+\" GT\")\n \n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = out[('Keypoint3',0)][sampleIndex].cpu().detach().numpy()#.squeeze().transpose((1,2,0))\n plt.imshow(kp_map[0], cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(3))\n \n plt.subplot(figWinNumHeight,figWinNumWidth,subwinCount)\n subwinCount += 1\n kp_map = out[('Keypoint3',0)][sampleIndex].cpu().detach().numpy()\n per90 = numpy.percentile(kp_map[0], 95)\n \n plt.imshow((kp_map[0]>per90).astype(numpy.float32)*1, cmap='jet')\n plt.axis('on')\n plt.colorbar()\n plt.title('Keypoint '+str(3))\"\"\"\n \n\n\n save_path = os.path.join(save_dir, ('epoch '+str(epoch)+' output '+phase+'.png'))\n plt.savefig(save_path)\n plt.close()","sub_path":"keypoint_annotation/keypoint_training.py","file_name":"keypoint_training.py","file_ext":"py","file_size_in_byte":21237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"587531924","text":"from models.resnet import resnet18\nfrom data_loader.data_loader import LMDBDataSet\nimport torch.nn as nn\nimport torch\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nfrom models.gan_resnet import ResNetEncoder64x64, ResNetGenerator64x64, ResNetPixelDiscriminator64x64, ResNetDiscriminator64x64\nfrom models.loss import LossManager, ganBCELoss, reconstructionL1Loss\nfrom collections import OrderedDict\nimport numpy as np\nfrom tqdm import tqdm\nclass CustomResnetModel(nn.Module):\n def __init__(self, n_channels):\n super(CustomResnetModel, self).__init__()\n self.resnet = resnet18(pretrained=False, progress=True, num_classes=2, in_channels=n_channels)\n \n def forward(self, images):\n return self.resnet(images)\nclass CustomModel(nn.Module):\n def __init__(self, n_channels):\n super(CustomModel, self).__init__()\n self.net = nn.Sequential(\n nn.Conv2d(n_channels, 32, 7, padding=3),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Dropout(0.3),\n nn.Conv2d(32, 64, 5,padding=2),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n\n nn.Conv2d(64, 64, 5,padding=2),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.MaxPool2d(3),\n\n nn.Conv2d(64, 128, 5,padding=2),\n nn.ReLU(),\n nn.BatchNorm2d(128),\n nn.Dropout(0.3),\n nn.Conv2d(128, 128, 5,padding=2),\n nn.ReLU(),\n nn.BatchNorm2d(128),\n nn.MaxPool2d(3),\n\n nn.Conv2d(128, 256, 3,padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(256),\n nn.Dropout(0.3),\n nn.Conv2d(256, 256, 3,padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(256),\n nn.MaxPool2d(3),\n\n nn.Conv2d(256, 256, 2),\n nn.ReLU(),\n nn.BatchNorm2d(256),\n nn.Conv2d(256, 2, 1)\n\n\n )\n def forward(self, images):\n result = self.net(images)\n return result.view(images.shape[0], -1)\n\nclass ComplexModel(nn.Module):\n def __init__(self, n_channels, state_dim):\n super(ComplexModel, self).__init__()\n self.state_dim = state_dim\n self.encoder = ResNetEncoder64x64(n_channels, state_dim)\n self.generator = ResNetGenerator64x64(n_channels, state_dim)\n self.discriminator = ResNetDiscriminator64x64(n_channels, state_dim)\n self.pixel_discriminator = ResNetPixelDiscriminator64x64(n_channels)\n self.predictor = nn.Sequential(\n nn.Linear(state_dim, 20),\n nn.LeakyReLU(),\n nn.Linear(20, 20),\n nn.LeakyReLU(),\n nn.Linear(20, 2)\n )\n def encode(self, images):\n return self.encoder(images)\n def generate(self, state):\n return self.generator(state)\n def discriminate(self, images, state):\n return self.discriminator(images, state)\n def pixelDiscriminate(self, real_images, fake_images):\n return self.pixel_discriminator(real_images, fake_images)\n def predict(self, state):\n return self.predictor(state)\n def getParams(self):\n discriminator_params = []\n other_params = []\n for net in [self.pixel_discriminator, self.discriminator]:\n if net is not None:\n discriminator_params += list(net.parameters())\n for net in [self.predictor, self.encoder, self.generator]:\n if net is not None:\n other_params += list(net.parameters())\n return discriminator_params, other_params\n\n def generateRandomState(self, batch_size, device, z_mean=0.0, z_var=0.3):\n '''\n Generating gaussian random states\n :param batch_size: (int)\n :param z_mean: (float) default 0.0\n :param z_var: (float) default 0.3\n '''\n random_z = torch.rand(batch_size, self.state_dim).requires_grad_(False)\n random_z = random_z * 2 - 1.0\n return random_z.to(device)\n\ndef computeLoss(inputs, labels, net, criterion):\n predicts = net(inputs)\n loss = criterion(predicts, labels.view(-1))\n _, indices = torch.max(predicts, dim=1)\n accurancy = (indices == labels).sum().float() / labels.shape[0]\n\n return loss, accurancy\ndef trainGan():\n device = \"cuda:0\"\n batch_size = 64\n test_batch_size = 128\n num_epoch = 100\n model = ComplexModel(11, 200).to(device)\n \n # create loss manager\n discrim_loss_history = OrderedDict()\n other_loss_history = OrderedDict()\n discriminator_loss_manager = LossManager(model, loss_history=discrim_loss_history)\n other_loss_manager = LossManager(model, loss_history=other_loss_history)\n best_error = np.inf\n log_folder = \".\"\n best_model_path = \"{}/model.pth\".format(log_folder)\n discriminator_params, other_params = model.getParams()\n \n other_optimizer = torch.optim.Adam(other_params, lr=0.0001, weight_decay=1e-4)\n discriminator_optimizer = torch.optim.Adam(discriminator_params, lr=0.0001, weight_decay=1e-4)\n\n criterion = nn.CrossEntropyLoss().to(device)\n train_dataset = LMDBDataSet(\"dataset/train\", 0.0, 1.0)\n test_dataset = LMDBDataSet(\"dataset/test\", 0.0, 1.0)\n\n train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=4)\n test_loader = DataLoader(test_dataset, batch_size=test_batch_size, num_workers=4)\n\n for epoch in range(num_epoch):\n pbar = tqdm(total=len(train_loader))\n # re-initialize loss manager\n discriminator_loss_manager.newEpoch()\n other_loss_manager.newEpoch()\n for minibatch_num, (inputs, labels) in enumerate(train_loader):\n inputs = inputs.to(device)\n labels = labels.to(device)\n other_optimizer.zero_grad()\n other_loss_manager.resetLosses()\n discriminator_optimizer.zero_grad()\n discriminator_loss_manager.resetLosses()\n\n # generate true, false labels with the correct dimension\n false_label = torch.ones(inputs.size(0), 1, requires_grad=False, device=device)\n true_label = torch.zeros(inputs.size(0), 1, requires_grad=False, device=device)\n # generate the represented state z randomly\n random_state = model.generateRandomState(inputs.size(0), device)\n\n state = model.encode(inputs)\n random_images = model.generate(random_state)\n images_recon = model.generate(state)\n\n pred_labels = model.predict(state)\n\n #### train the discriminator\n # train BiGAN discriminator\n encoder_result = model.discriminate(inputs.detach(), state.detach())\n generator_result = model.discriminator((random_images).detach(), random_state.detach())\n ganBCELoss(encoder_result, generator_result, false_label, true_label, 1.0,\n discriminator_loss_manager, True, name='train_D_loss')\n\n # train comparing discriminator\n real_pixel_result = model.pixelDiscriminate(inputs.detach(),inputs.detach())\n fake_pixel_result = model.pixelDiscriminate(inputs.detach(),images_recon.detach())\n \n ganBCELoss(fake_pixel_result, real_pixel_result, false_label, true_label, 1.0,\n discriminator_loss_manager, True, name='train_pixel_D_recon_loss')\n # backward\n loss_discriminator = discriminator_loss_manager.computeTotalLoss()\n loss_discriminator.backward()\n discriminator_optimizer.step()\n\n #### train the others\n # train G,E by BiGAN\n encoder_result = model.discriminator(inputs.detach(), state)\n generator_result = model.discriminator(random_images, random_state.detach())\n ganBCELoss(encoder_result, generator_result, false_label, true_label, 1.0,\n other_loss_manager, False, name='train_G_E_loss')\n \n fake_pixel_result = model.pixelDiscriminate(inputs.detach(), images_recon)\n \n ganBCELoss(fake_pixel_result,\n None, false_label,\n true_label, 2.0, other_loss_manager, False, name='train_pixel_G_E_recon_loss')\n\n # predict loss\n pred_loss = criterion(pred_labels, labels.view(-1))\n other_loss_manager.addToLosses(\"train_predict_loss\", 10.0, pred_loss)\n # backward\n other_loss = other_loss_manager.computeTotalLoss()\n other_loss.backward(retain_graph=True)\n other_optimizer.step()\n discriminator_loss_manager.updateLossHistory()\n other_loss_manager.updateLossHistory()\n pbar.update(1)\n pbar.close()\n\n # valuation\n with torch.no_grad(): # ensure no grad is computed\n accuracy_total = 0.0\n total_loss = 0.0\n for minibatch_num, (inputs, labels) in enumerate(test_loader):\n inputs = inputs.to(device)\n labels = labels.to(device)\n\n state = model.encode(inputs)\n images_recon = model.generate(state)\n pred_labels = model.predict(state)\n val_loss = reconstructionL1Loss(images_recon, inputs, is_clip=False,\n clip_min_value=0) # + reconstructionL1Loss(next_obs, next_obs_recon)\n other_loss_manager.addToLosses('val_recon_loss', 1.0, val_loss)\n pred_loss = criterion(pred_labels, labels.view(-1))\n other_loss_manager.addToLosses('val_predict_loss', 1.0, pred_loss)\n total_loss = val_loss.data + pred_loss.data\n _, indices = torch.max(pred_labels, dim=1)\n accuracy_total += (indices == labels).sum().float() / labels.shape[0]\n other_loss_manager.updateLossHistory()\n # Save best model\n if total_loss < best_error:\n best_error = total_loss\n torch.save(model.state_dict(), best_model_path)\n # Then we print the results for this epoch:\n print(\"epoch: {}/{}\".format(epoch, num_epoch))\n discriminator_loss_manager.printLoss()\n print()\n other_loss_manager.printLoss()\n print(\"test accuracy: {}\".format(accuracy_total/len(test_loader)))\ndef train():\n device = \"cuda:0\"\n batch_size = 128\n test_batch_size = 256\n num_epoch = 50\n net = CustomModel(11).to(device)\n criterion = nn.CrossEntropyLoss().to(device)\n optimizer = optim.Adam(net.parameters(), lr=0.001, weight_decay=1e-4)\n train_dataset = LMDBDataSet(\"dataset/train\", 0.0, 1.0)\n test_dataset = LMDBDataSet(\"dataset/test\", 0.0, 1.0)\n train_loader = DataLoader(train_dataset, batch_size=batch_size, num_workers=4)\n test_loader = DataLoader(test_dataset, batch_size=test_batch_size, num_workers=4)\n\n\n for epoch in range(num_epoch): # loop over the dataset multiple times\n train_loss = 0.0\n train_accuracy = 0.0\n train_count = 0\n test_loss = 0.0\n test_accuracy = 0.0\n test_count = 0\n # train\n for i, (inputs, labels) in enumerate(train_loader):\n inputs = inputs.to(device)\n labels = labels.to(device)\n # zero the parameter gradients\n optimizer.zero_grad()\n\n loss, accurancy = computeLoss(inputs, labels, net, criterion)\n train_accuracy += accurancy\n train_loss += loss.data\n train_count += 1\n loss.backward()\n optimizer.step()\n # test\n with torch.no_grad():\n for i, (inputs, labels) in enumerate(test_loader):\n inputs = inputs.to(device)\n labels = labels.to(device)\n loss, accurancy = computeLoss(inputs, labels, net, criterion)\n test_accuracy += accurancy\n test_loss += loss.data\n test_count += 1\n print(\"epoch: {}, train loss : {:8f}, train accuracy: {:8f}, test loss : {:8f}, test accuracy: {:8f}\".format(epoch, train_loss/train_count, train_accuracy/train_count, test_loss/test_count, test_accuracy/test_count))\n\n\n\n print('Finished Training')\n\n\nif __name__ == \"__main__\":\n trainGan()","sub_path":"medical_ml/train/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":12323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"301331558","text":"first_die = 6\nsecond_die = 4\n\nhighest_sum = first_die + second_die + 1 #12 = 6+6 #í range er talið uppað en ekki með seinustu tölu\nlowest_sum = 2; #1+1\n\nmost_frequent_sum = 0\nlhs = 1\nrhs = 1\n\nmax_counter = [0]\n\nfor current_sum in range(lowest_sum, highest_sum):\n\tcurrent_sum_counter = 0\n\tfor first_die_iterator in range(1, first_die+1): #checka 1-6 í 6 hliða tening samhengi\n\t\tfor second_die_iterator in range(1, second_die+1): #checka 1-6 í 6 hliða tening samhengi\n\t\t\tif((first_die_iterator+second_die_iterator) == current_sum):\n\t\t\t\tcurrent_sum_counter += 1\t\t\t\t\n\t\t\tif(current_sum_counter > max_counter[-1]):\n\t\t\t\tdel max_counter[:]\n\t\t\t\tmax_counter.append(current_sum)\t\t\t\t\t\t\t\t\n\n\t\t\telif(current_sum_counter == max_counter[-1]):\n\t\t\t\tmax_counter.append(current_sum)\n\n\nprint(max_counter)\n\n\n\"\"\"\ndebug:\n\nprint(first_die_iterator, \"+\", second_die_iterator, \" are equal to: \", current_sum , \"current summ counter: \", current_sum_counter, \"maxx counter: \", max_counter)\n\"\"\"","sub_path":"dice_cup.py","file_name":"dice_cup.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"607009826","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"The setup script.\"\"\"\n\nimport setuptools\n\nwith open(\"README.rst\", \"r\") as fh:\n long_description = fh.read()\n\nrequirements = [\n 'firecloud', 'pandas'\n]\n\nsetup_requirements = [\n # put setup requirements (distutils extensions, etc.) here\n]\n\ntest_requirements = [\n 'unittest'\n]\n\nsetuptools.setup(\n name='kco',\n version='0.1.0',\n description=\"KCO FireCloud Tools\",\n author=\"KCO Team\",\n author_email='kco-help@broadinstitute.org',\n url='https://github.com/broadinstitute/kco',\n long_description=long_description,\n long_description_content_type=\"text/markdown\",\n packages=setuptools.find_packages(include=['kco']),\n include_package_data=True,\n install_requires=requirements,\n license=\"BSD license\",\n zip_safe=False,\n keywords='FireCloud',\n classifiers=[\n 'License :: OSI Approved :: BSD License',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Science/Research',\n 'Natural Language :: English',\n 'Operating System :: MacOS :: MacOS X',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX :: Linux',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Topic :: Scientific/Engineering :: Bio-Informatics'\n ],\n test_suite='tests',\n tests_require=test_requirements,\n setup_requires=setup_requirements,\n python_requires='>= 3',\n entry_points={\n 'console_scripts': [\n 'kco=kco.__main__:main'\n ]\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"600671240","text":"from rest_framework import serializers\nfrom drf_queryfields import QueryFieldsMixin\nfrom django.contrib.auth.models import User\nfrom v1.models import *\n\nclass UserSerializer(QueryFieldsMixin, serializers.HyperlinkedModelSerializer):\n class Meta:\n model = User\n fields = ('url', 'id', 'username', 'first_name', 'last_name')\n\nclass TagSerializer(QueryFieldsMixin, serializers.ModelSerializer):\n class Meta:\n model = Tag\n fields = ('url', 'id', 'name')\n\nclass CategorySerializer(QueryFieldsMixin, serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ('url', 'id', 'name')\n\nclass PostSerializer(QueryFieldsMixin, serializers.ModelSerializer):\n category = CategorySerializer()\n tags = TagSerializer(many=True)\n \n class Meta:\n model = Post\n fields = (\n 'url', \n 'id', \n 'author', \n 'category', \n 'title',\n 'description',\n 'tags',\n 'byline',\n 'slug',\n 'background_image',\n 'content',\n 'updated_on',\n 'created_on',\n 'publish_on'\n )","sub_path":"v1/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"362073222","text":"from boto.mturk.connection import MTurkConnection\nfrom boto.mturk.question import QuestionContent,Question,QuestionForm,Overview,AnswerSpecification,SelectionAnswer,FormattedContent,FreeTextAnswer\n \n\"\"\"\nThe purpose of this class is to generate a HIT that provides the turker with a set of one or more sentences that form an incomplete story\nIt also provides them with a list of continueing sentences in which they are required to vote on the best fitting\n\"\"\"\nclass VotingSentenceHit(object):\n\n def __init__(self, _access_id, _secret_key, _host, _story_sentences, _vote_sentences):\n \"\"\"\n Purpose: Initialize the HIT\n Parameters: _access_id, _secret_key and _host to connect to MTurk, _story_sentences is a list of sentences that make up the story \n _vote_sentences is a list of sentences on which the turker should vote\n \"\"\"\n self.access_id = _access_id\n self.secret_key = _secret_key\n self.host = _host\n self.story_sentences = _story_sentences\n self.vote_sentences = _vote_sentences\n self.title = 'Vote on the best sentence to end the given story.'\n self.description = ('An incomplete story is provided, vote on a set of given sentences to best continue the story.')\n self.keywords = 'story, sentence, writing, creative'\n\n \"\"\"\n This function connects to Mturk and generates the hit corresponding to the given story and sentence choices\n \"\"\"\n def generate_hit(self, num_assignments, hit_duration, hit_reward):\n \"\"\"\n Purpose: Generate and publish the HIT\n Parameters: num_assignments is the number of avaliable assignments for hit, \n hit_duration is the duration of the hit in seconds (60*5 for 5 minutes),\n hit_reward is the reward given per hit in dollars (0.05 is 5 cents)\n \"\"\"\n # CONNECT TO MTURK\n\n mtc = MTurkConnection(aws_access_key_id = self.access_id,\n aws_secret_access_key = self.secret_key,\n host = self.host)\n\n # BUILD OVERVIEW \n \n overview = Overview()\n\n overview.append_field('Title', 'The following one or more sentences constitute an incomplete story.')\n story = \"\"\n for sentence in self.story_sentences:\n story += sentence + \" \"\n overview.append(FormattedContent(story))\n \n # BUILD QUESTION 1: Copy the first sentence of the story \n \n qc1 = QuestionContent()\n qc1.append_field('Title','Copy verbatim the first sentence of the provided incomplete story. Please keep all capitalization and punctuation as given. Your sumbission will automatically be rejected if any character is incorrect.')\n fta1 = FreeTextAnswer()\n q1 = Question(identifier='verify_sentence', content = qc1, answer_spec = AnswerSpecification(fta1), is_required = True)\n\n # BUILD QUESTION 2: Vote on the best sentence to continue the story\n \n sentence_options = []\n for i, sentence in enumerate (self.vote_sentences):\n selection = (sentence, str(i))\n sentence_options.append(selection)\n qc2 = QuestionContent()\n qc2.append_field('Title','Choose the best sentence to end the story.')\n fta2 = SelectionAnswer(min=1, max=1,style='radiobutton',\n selections=sentence_options,\n type='text',\n other=False)\n q2 = Question(identifier='vote_sentence', content = qc2, answer_spec = AnswerSpecification(fta2), is_required = True)\n\n # BUILD THE QUESTION FORM \n \n question_form = QuestionForm()\n question_form.append(overview)\n question_form.append(q1)\n question_form.append(q2)\n \n # CREATE THE HIT \n \n mtc.create_hit(questions = question_form,\n max_assignments = num_assignments,\n title = self.title,\n description = self.description,\n keywords = self.keywords,\n duration = hit_duration,\n reward = hit_reward)","sub_path":"mturk_vote_end_sentence.py","file_name":"mturk_vote_end_sentence.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"371684290","text":"from flask import render_template\n\nfrom jwt import encode\nfrom uuid import uuid4\nfrom flask import Flask\nfrom flask import request\nfrom flask import make_response\nfrom flask import send_from_directory\nfrom flask_swagger_ui import get_swaggerui_blueprint\n\nimport redis\nimport datetime\nimport requests\n\napp = Flask(__name__)\n\n\ndb = redis.Redis(host='client_redis', port=6381, decode_responses=True)\n\nJWT_SECREATE_DATABASE=\"SECRET\"\nCDN_HOST = \"http://localhost:3000\"\nJWT_SECRET=\"HELLO\"\nJWT_SESSION_TIME=30\nSESSION_TIME = 180\nWEB_HOST = \"http://localhost:3001\"\nINVALIDATE = -1\nSESSION_ID = \"session-id\"\nUSER_COUNTER = \"user_counter\"\nUSERLOGIN=\"userlogin\"\nUSERPASSWORD=\"userpassword\"\nUSER='user'\nFILENAMES=\"filenames\"\n\ndb.set(\"users:\"+\"admin\",\"admin\")\n\n\nSWAGGER_URL = '/swagger'\nAPI_URL = '/static/swagger.json'\nSWAGGERUI_BLUEPRINT = get_swaggerui_blueprint(\n SWAGGER_URL,\n API_URL,\n config={\n 'app_name': \"Seans-Python-Flask-REST-Boilerplate\"\n }\n)\n\napp.register_blueprint(SWAGGERUI_BLUEPRINT, url_prefix=SWAGGER_URL)\n\n@app.route('/swagger')\ndef swagger():\n return \"/static/swagger.json\"\n\n@app.route('/')\ndef index():\n return render_template('login.html',WEB_HOST=WEB_HOST)\n\n@app.route('/auth', methods=['POST'])\ndef auth():\n response = make_response('', 303)\n login = request.form.get('login')\n password = request.form.get('password')\n if db.get(\"users:\"+login)==password:\n session_id = str(uuid4())\n #db.hset(user,SESSION_ID,session_id)\n #db.hset(session_id,FILENAMES,\"\")\n db.hset(\"session:\"+session_id, \"username\", login)\n print(\"SESSION ID\",session_id)\n response.set_cookie(SESSION_ID, session_id, max_age=SESSION_TIME)\n response.headers[\"Location\"] = \"/file_manage\"\n else:\n response.set_cookie(SESSION_ID, \"INVALIDATE\", max_age=INVALIDATE)\n response.headers[\"Location\"] = \"/\"\n return response\n\ndef findCorrectUserByLogin(login):\n for user in users:\n logindb=db.hget(user,USERLOGIN)\n if logindb==login:\n return user\n return None\n\ndef findCorrectUserByID(id):\n for user in users:\n iddb=db.hget(user,SESSION_ID)\n if iddb==id:\n return user\n return None\n\n@app.route('/format_error',methods=[\"GET\"])\ndef format_error():\n return render_template('format_error.html',WEB_HOST=WEB_HOST)\n\n@app.route('/file_manage',methods=['GET'])\ndef upload():\n session_id = request.cookies.get(SESSION_ID)\n if session_id:\n #if session_id in session:\n # fid, content_type = session[session_id]\n #else:\n # fid, content_type = '', 'text/plain'\n\n content_type=\"application/pdf\"\n #fileNames=getFileNames()\n #print(fileNames)\n login = db.hget(\"session:\" + session_id, \"username\")\n allfids= db.hvals(\"files:\"+login)\n print(allfids)\n download_tokens=[]\n filenames=[]\n for fidx in allfids:\n download_tokens.append(create_download_token(fidx).decode())\n filenames.append(db.hget(\"filename:\"+login,fidx))\n #download_token = create_download_token(fid).decode('ascii')\n upload_token = create_upload_token().decode('ascii')\n return render_template(\"file_manage.html\",allfids=allfids,content_type=content_type,CDN_HOST=CDN_HOST,upload_token=upload_token,download_tokens=download_tokens,WEB_HOST=WEB_HOST,filenames=filenames)\n return redirect(\"/\")\n\ndef getFileNames():\n filesName=requests.get(CDN_HOST+\"/files\")\n return filesName.json()\n\ndef create_download_token(fid):\n exp = datetime.datetime.utcnow() + datetime.timedelta(seconds=JWT_SESSION_TIME)\n return encode({\"iss\":\"CLIENT\", \"exp\":exp}, JWT_SECRET, \"HS256\")\n\ndef create_upload_token():\n exp = datetime.datetime.utcnow() + datetime.timedelta(seconds=JWT_SESSION_TIME)\n return encode({\"iss\":\"CLIENT\", \"exp\":exp}, JWT_SECRET, \"HS256\")\n\n@app.route('/rejestracja',methods=['GET'])\ndef rejestracja():\n return render_template('rejestracja.html',WEB_HOST=WEB_HOST)\n\n\n@app.route('/userRegistration',methods=['POST'])\ndef userRegistration():\n #user_prefix = str(db.incr(USER_COUNTER))\n #new_user = user_prefix + USER\n #users.append(new_user)\n login=request.form['login'].rstrip()\n password=request.form['password'].rstrip()\n db.set(\"users:\"+login,password)\n #db.hset(new_user,USERPASSWORD,password)\n print(\"HELLO\")\n return redirect(\"/\")\n\n@app.route('/error',methods=['GET'])\ndef wrong():\n return render_template('error.html')\n\n@app.route('/logout')\ndef logout():\n response = make_response('', 303)\n response.set_cookie(SESSION_ID, \"INVALIDATE\", max_age=INVALIDATE)\n response.headers[\"Location\"] = \"/\"\n return response\n\n\n@app.route('/callback')\ndef uploaded():\n session_id = request.cookies.get(SESSION_ID)\n print(\"SESSION ID\", session_id)\n fid = request.args.get('fid')\n err = request.args.get('error')\n\n filename=request.args.get('namefile')\n print(filename)\n if not session_id:\n return redirect(\"/\")\n if err:\n if err==\"Invalid format file\":\n return redirect(\"/format_error\")\n return f\"

APP

Upload failed: {err}\", 400\n if not fid:\n return f\"

APP

Upload successfull, but no fid returned\", 500\n #content_type = request.args.get('content_type','text/plain')\n #session[session_id] = (fid, content_type)\n new_fied_prefix = str(db.incr(JWT_SECREATE_DATABASE))\n new_fied= new_fied_prefix + fid\n login = db.hget(\"session:\"+session_id,\"username\")\n db.hset(\"files:\"+login,new_fied, fid)\n db.hset(\"filename:\"+login,fid,filename)\n #filenames=db.hget(session_id,FILENAMES)\n #filenames.append()\n #print(\"FILENAMES\")\n #print(filenames)\n #db.hset(session_id,FILENAMES,filenames)\n return redirect(\"/file_manage\")\n\ndef redirect(location):\n response = make_response('', 303)\n response.headers[\"Location\"] = location\n return response","sub_path":"Projekt 1/client/app/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"205421614","text":"import json\nimport os\nimport sys\nimport boto3\nfrom boto3.dynamodb.conditions import Key, Attr\nfrom datetime import datetime\n\nhere = os.path.dirname(os.path.realpath(__file__))\nsys.path.append(os.path.join(here, \"vendored\"))\n\nimport requests\n\nTOKEN = os.environ['TELEGRAM_TOKEN']\nBASE_URL = \"https://api.telegram.org/bot{}\".format(TOKEN)\nSEND_MESSAGE_URL = \"/sendMessage\"\n\nSTART_COMMAND = \"/start\"\nSTATUS_COMMAND = \"/status\"\nPHOTO_COMMAND = \"/photo\"\nWATER_COMMAND = \"/water\"\n\nDEVICE_ID = \"\" # Enter your plantId here e.g. \"yoshi\"\nWHITELIST = [] # Enter the list of Telegram IDs to whitelist here as comma separated strings e.g. [\"123\", \"456\"]\n\n\ndef handle_message(event, _):\n try:\n data = json.loads(event[\"body\"])\n chat_id = data[\"message\"][\"chat\"][\"id\"]\n user_id = str(data[\"message\"][\"from\"][\"id\"])\n\n message = str(data[\"message\"][\"text\"]).split(\" \")\n command = message[0].split(\"@\")\n\n is_relevant_msg = False\n if len(command) > 1:\n target = command[1][:-10]\n if target == DEVICE_ID:\n is_relevant_msg = True\n else:\n is_relevant_msg = True\n command = command[0]\n\n if is_relevant_msg:\n if command == START_COMMAND:\n if is_user_verified(user_id):\n handle_start(chat_id)\n else:\n handle_invalid_user(chat_id, START_COMMAND)\n\n if command == STATUS_COMMAND:\n if is_user_verified(user_id):\n handle_status(chat_id)\n else:\n handle_invalid_user(chat_id, STATUS_COMMAND)\n\n if command == PHOTO_COMMAND:\n if is_user_verified(user_id):\n handle_photo(chat_id)\n else:\n handle_invalid_user(chat_id, PHOTO_COMMAND)\n\n if command == WATER_COMMAND:\n if is_user_verified(user_id):\n handle_water(chat_id, message)\n else:\n handle_invalid_user(chat_id, WATER_COMMAND)\n\n except Exception as e:\n print(e)\n\n return {\"statusCode\": 200}\n\n\ndef handle_start(chat_id):\n response = \"Hello\"\n data = {\"text\": response.encode(\"utf8\"), \"chat_id\": chat_id}\n url = BASE_URL + SEND_MESSAGE_URL\n requests.post(url, data)\n\n\ndef handle_status(chat_id):\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('iotea_last_vitals')\n\n res = table.query(\n Limit=1,\n ScanIndexForward=False,\n KeyConditionExpression=Key('plantID').eq(DEVICE_ID)\n )\n\n item = res['Items'][0]\n humidity = item['humidity']\n temp = item['temperature']\n moisture = item['moisturePer']\n time = datetime.utcfromtimestamp(timestamp_to_seconds(item['ts']))\n\n response = \"Hello!\\n\" \\\n \"My last checkup was at {time}\\n\" \\\n \"Moisture Level: {moisture}%\\n\" \\\n \"Temperature: {temp}\\N{DEGREE SIGN}C\\n\" \\\n \"Humidity: {humidity}%\".format(\n time=time,\n moisture=moisture,\n temp=temp,\n humidity=humidity\n )\n\n data = {\"text\": response.encode(\"utf8\"), \"chat_id\": chat_id}\n url = BASE_URL + SEND_MESSAGE_URL\n requests.post(url, data)\n\n\ndef handle_photo(chat_id):\n response = \"Sending photo of plant...\"\n data = {\"text\": response.encode(\"utf8\"), \"chat_id\": chat_id}\n url = BASE_URL + SEND_MESSAGE_URL\n requests.post(url, data)\n\n\ndef handle_water(chat_id, message):\n response = \"\"\n invalid_thresh = False\n\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table('iotea_thresholds')\n\n res = table.query(\n Limit=1,\n ScanIndexForward=False,\n KeyConditionExpression=Key('plant_id').eq(DEVICE_ID)\n )\n current_thresh = res['Items'][0]['threshold']\n\n if len(message) == 1:\n response = \"The threshold for {plant} is currently {thresh}%\".format(\n plant=DEVICE_ID,\n thresh=current_thresh\n )\n elif len(message) == 2:\n try:\n new_thresh = int(message[1])\n if 0 <= new_thresh <= 100:\n table.update_item(\n Key={'plant_id': DEVICE_ID},\n UpdateExpression=\"set threshold = :r\",\n ExpressionAttributeValues={':r': new_thresh}\n )\n\n response = \"The threshold for {plant} has been changed from {current_thresh}% to {new_thresh}%\".format(\n plant=DEVICE_ID,\n current_thresh=current_thresh,\n new_thresh=new_thresh\n )\n else:\n invalid_thresh = True\n except ValueError:\n invalid_thresh = True\n else:\n invalid_thresh = True\n\n if invalid_thresh:\n response = \"Invalid threshold received.\"\n\n data = {\"text\": response.encode(\"utf8\"), \"chat_id\": chat_id}\n url = BASE_URL + SEND_MESSAGE_URL\n requests.post(url, data)\n\n\ndef handle_invalid_user(chat_id, command):\n response = \"Sorry, you are not authorised to use {}\".format(command)\n data = {\"text\": response.encode(\"utf8\"), \"chat_id\": chat_id}\n url = BASE_URL + SEND_MESSAGE_URL\n requests.post(url, data)\n\n\ndef is_user_verified(user_id):\n return user_id in WHITELIST\n\n\ndef timestamp_to_seconds(ts):\n return ts / 1000\n","sub_path":"bot/src/handler.py","file_name":"handler.py","file_ext":"py","file_size_in_byte":5439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"97299237","text":"import os\nimport sys\nsys.path.insert(1, os.path.join(sys.path[0], '../utils'))\nimport numpy as np\nimport argparse\nimport h5py\nimport math\nimport time\nimport logging\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nimport _pickle as cPickle\nimport librosa\n\nimport torch\ntorch.backends.cudnn.benchmark=True\ntorch.manual_seed(0)\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data\n \nfrom utilities import get_filename\nfrom models import *\nfrom pytorch_utils import (move_data_to_device, count_parameters, count_flops)\nimport config\n\n\"\"\"\nMODEL_TYPE=\"Cnn14\"\nCHECKPOINT_PATH=\"/vol/vssp/msos/qk/workspaces/pub_audioset_tagging_cnn_transfer/checkpoints_for_paper/Cnn14_mAP=0.431.pth\"\nCUDA_VISIBLE_DEVICES=1 python3 pytorch/inference_template.py inference --window_size=1024 --hop_size=320 --mel_bins=64 --fmin=50 --fmax=14000 --model_type=$MODEL_TYPE --checkpoint_path=$CHECKPOINT_PATH --cuda\n\"\"\"\n\ndef inference(args):\n\n # Arugments & parameters\n window_size = args.window_size\n hop_size = args.hop_size\n mel_bins = args.mel_bins\n fmin = args.fmin\n fmax = args.fmax\n model_type = args.model_type\n checkpoint_path = args.checkpoint_path\n device = torch.device('cuda') if args.cuda and torch.cuda.is_available() else torch.device('cpu')\n filename = args.filename\n\n sample_rate = config.sample_rate\n classes_num = config.classes_num\n\n # Model\n Model = eval(model_type)\n model = Model(sample_rate=sample_rate, window_size=window_size, \n hop_size=hop_size, mel_bins=mel_bins, fmin=fmin, fmax=fmax, \n classes_num=classes_num)\n \n checkpoint = torch.load(checkpoint_path, map_location=device)\n model.load_state_dict(checkpoint['model'])\n\n # Parallel\n print('GPU number: {}'.format(torch.cuda.device_count()))\n model = torch.nn.DataParallel(model)\n\n if 'cuda' in str(device):\n model.to(device)\n \n if True:\n waveform = np.zeros(sample_rate * 10)\n else:\n audio_path = \"/vol/vssp/msos/qk/test9/YwfSPbhnpOlQ.wav\"\n (waveform, _) = librosa.core.load(audio_path, sr=sample_rate, mono=True)\n\n waveform = waveform[None, :]\n waveform = move_data_to_device(waveform, device)\n\n # Forward\n model.eval()\n batch_output_dict = model(waveform, None)\n\n clipwise_output = batch_output_dict['clipwise_output'].data.cpu().numpy()[0]\n sorted_indexes = np.argsort(clipwise_output)[::-1]\n\n embedding = batch_output_dict['embedding'].data.cpu().numpy()[0]\n print(embedding.shape)\n\n for k in range(10):\n print('{}, {}'.format(np.array(config.labels)[sorted_indexes[k]], \n clipwise_output[sorted_indexes[k]]))\n\n\nif __name__ == '__main__':\n\n parser = argparse.ArgumentParser(description='Example of parser. ')\n subparsers = parser.add_subparsers(dest='mode')\n\n parser_inference = subparsers.add_parser('inference') \n parser_inference.add_argument('--window_size', type=int, required=True)\n parser_inference.add_argument('--hop_size', type=int, required=True)\n parser_inference.add_argument('--mel_bins', type=int, required=True)\n parser_inference.add_argument('--fmin', type=int, required=True)\n parser_inference.add_argument('--fmax', type=int, required=True) \n parser_inference.add_argument('--model_type', type=str, required=True)\n parser_inference.add_argument('--checkpoint_path', type=str, required=True)\n parser_inference.add_argument('--cuda', action='store_true', default=False)\n \n args = parser.parse_args()\n args.filename = get_filename(__file__)\n\n if args.mode == 'inference':\n inference(args)\n\n else:\n raise Exception('Error argument!')\n","sub_path":"pytorch/inference_template.py","file_name":"inference_template.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"164547975","text":"\"\"\"\n Selection Sort\n Properties:\n Not stable\n O(1) extra space\n Θ(n2) comparisons\n Θ(n) swaps\n Not adaptive\n\"\"\"\n#to use comma seperated input uncomment below line\n#x = [int(i) for i in input().strip().split(\",\")]\n\nx=[10,9,8,7,6,5,4,3,2,1]\nfor i in range(len(x)-1):\n minn = i\n for j in range(i+1,len(x)):\n if(x[j] < x[minn]):\n minn=j\n if(minn!=i):\n x[i],x[minn] = x[minn],x[i]\nprint(x)\n","sub_path":"Selection_sort.py","file_name":"Selection_sort.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"19178858","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import patterns, url\n\nurlpatterns = patterns('banners.views',\n url(regex=r'^placement/(?P\\d+)/(?P\\d+)/$', view='placement',\n name='placement'),\n url(regex=r'^view/(?P\\d+)/(?P\\d+)/(?P[\\w\\d]+)/$',\n view='shows', name='shows'),\n url(regex=r'^view/(?P\\d+)/(?P\\d+)/$',\n view='shows', name='show'),\n url(regex=r'^click/(?P\\d+)/(?P\\d+)/(?P[\\w\\d]+)/$',\n view='clicks', name='click'),\n url(regex=r'^click/(?P\\d+)/(?P\\d+)/$',\n view='clicks', name='clicks'),\n url(regex=r'^code/(?P\\d+)/(?Pip|name)/$', view='code', name='code'),\n url(regex=r'^zones/$', view='code', name='zones'),\n)","sub_path":"banners/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"126797399","text":"#!/usr/bin/python3\r\n\"\"\"\r\n===============================================================================\r\n\r\n████████╗███████╗███████╗████████╗ ██████╗ █████╗ ███████╗███████╗███████╗\r\n╚══██╔══╝██╔════╝██╔════╝╚══██╔══╝ ██╔════╝██╔══██╗██╔════╝██╔════╝██╔════╝\r\n ██║ █████╗ ███████╗ ██║ ██║ ███████║███████╗█████╗ ███████╗\r\n ██║ ██╔══╝ ╚════██║ ██║ ██║ ██╔══██║╚════██║██╔══╝ ╚════██║\r\n ██║ ███████╗███████║ ██║ ╚██████╗██║ ██║███████║███████╗███████║\r\n ╚═╝ ╚══════╝╚══════╝ ╚═╝ ╚═════╝╚═╝ ╚═╝╚══════╝╚══════╝╚══════╝\r\n\r\n===============================================================================\r\n\"\"\"\r\n\r\nfrom models.base_model import BaseModel\r\nfrom models.state import State\r\nimport unittest\r\nimport json\r\nimport pep8\r\nimport datetime\r\n\r\n\r\nclass TestState(unittest.TestCase):\r\n \"\"\" Test State class implementation. \"\"\"\r\n def test_doc_module(self):\r\n \"\"\"Module documentation\"\"\"\r\n doc = State.__doc__\r\n self.assertGreater(len(doc), 1)\r\n\r\n def test_pep8_conformance_state(self):\r\n \"\"\" Test that models/state.py conforms to PEP8. \"\"\"\r\n pep8style = pep8.StyleGuide(quiet=True)\r\n result = pep8style.check_files(['models/state.py'])\r\n self.assertEqual(result.total_errors, 0,\r\n \"Found code style errors (and warnings).\")\r\n\r\n def test_pep8_conformance_test_state(self):\r\n \"\"\"\r\n - Test that tests/test_models/test_state.py conforms to PEP8.\r\n \"\"\"\r\n pep8style = pep8.StyleGuide(quiet=True)\r\n res = pep8style.check_files(['tests/test_models/test_state.py'])\r\n self.assertEqual(res.total_errors, 0,\r\n \"Found code style errors (and warnings).\")\r\n\r\n def test_doc_constructor(self):\r\n \"\"\" Constructor documentation. \"\"\"\r\n doc = State.__init__.__doc__\r\n self.assertGreater(len(doc), 1)\r\n\r\n def test_class(self):\r\n \"\"\" Validate the types of the attributes an class. \"\"\"\r\n with self.subTest(msg='Inheritance'):\r\n self.assertTrue(issubclass(State, BaseModel))\r\n\r\n with self.subTest(msg='Attributes'):\r\n self.assertIsInstance(State.name, str)\r\n","sub_path":"tests/test_models/test_state.py","file_name":"test_state.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"41763569","text":"import pandas as pd\nimport glob \nfrom subprocess import run\nimport os\nfrom collections import Counter\n\n\n \ndef main():\n # Import et nettoyage\n csv_path = 'data/dataset_FR_gen.csv'\n data = pd.read_csv(csv_path)\n data = data.drop([\"category\", \"topic\"], axis=1)\n data = data.dropna()\n\n # Sélection des contributeurs fiables\n data = data[data[\"file\"].apply(lambda x : \"CLEMENT\" in x or \"MONTA\" in x or \"GOULARD\" in x)]\n# data = data[data[\"file\"].apply(lambda x : \"MONTA\" in x)]\n\n\n # Supprime les noms de fichiers avec espace\n data = data.drop(data[data['file'].str.contains(\"ARN\")].index)\n\n # On enlève les chevrons qui apparaissent dans les \"\"\n transcription = data[\"transcription\"].apply(lambda x : x.replace(\"<\", \"\").replace(\">\", \"\"))\n data[\"transcription\"] = transcription\n\n # On supprime les fichiers non présents dans le tracker\n delete_untracked(data)\n\n # Save\n data.to_csv(\"data/dataset_FR_filtered.csv\", index=False)\n \n gen_trans(data)\n gen_dict()\n \nmain()","sub_path":"fine_tuning/training/gen_tracker.py","file_name":"gen_tracker.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"468836022","text":"# -*- coding: utf-8 -*-\nfrom django.contrib.auth.models import AnonymousUser\nfrom django.template import RequestContext\nfrom django.test import RequestFactory\nfrom django.utils.text import smart_split\nfrom django.utils.encoding import force_unicode\n\nfrom .conf import settings\nfrom .utils import strip_tags\n\n\ndef get_plugin_index_data(base_plugin, request):\n text_bits = []\n instance, plugin_type = base_plugin.get_plugin_instance()\n\n if instance is None:\n # this is an empty plugin\n return text_bits\n\n if hasattr(instance, 'search_fulltext'):\n # check if the plugin instance has search enabled\n search_contents = instance.search_fulltext\n elif hasattr(base_plugin, 'search_fulltext'):\n # now check in the base plugin instance (CMSPlugin)\n search_contents = base_plugin.search_fulltext\n elif hasattr(plugin_type, 'search_fulltext'):\n # last check in the plugin class (CMSPluginBase)\n search_contents = plugin_type.search_fulltext\n else:\n # enable by default\n search_contents = True\n\n for field in getattr(instance, 'search_fields', []):\n field_content = strip_tags(getattr(instance, field, ''))\n\n if field_content:\n field_content = force_unicode(field_content)\n text_bits.extend(smart_split(field_content))\n\n if search_contents:\n plugin_contents = instance.render_plugin(context=RequestContext(request))\n\n if plugin_contents:\n plugin_contents = strip_tags(plugin_contents)\n text_bits.extend(smart_split(plugin_contents))\n\n return text_bits\n\n\ndef get_request(language=None):\n \"\"\"\n Returns a Request instance populated with cms specific attributes.\n \"\"\"\n request_factory = RequestFactory(HTTP_HOST=settings.ALLOWED_HOSTS[0])\n request = request_factory.get(\"/\")\n request.session = {}\n request.LANGUAGE_CODE = language or settings.LANGUAGE_CODE\n # Needed for plugin rendering.\n request.current_page = None\n request.user = AnonymousUser()\n return request\n","sub_path":"aldryn_search/helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"285327831","text":"################################################################################\n# Utility functions.\n################################################################################\ndef get_tvtk_class_names():\n \"\"\"Returns 4 lists:\n 1. A list of all the TVTK class names that are not abstract.\n 2. A list of the TVTK sources (have only outputs and no inputs)\n 3. A list of the TVTK filters (both inputs and outputs)\n 4. A list of the TVTK sinks (only inputs and no outputs)\n \"\"\"\n # Shut of VTK warnings for the time being.\n o = vtk.vtkObject\n w = o.GetGlobalWarningDisplay()\n o.SetGlobalWarningDisplay(0) # Turn it off.\n all = []\n src = []\n filter = []\n sink = []\n for name in dir(vtk):\n if name.startswith('vtk') and not name.startswith('vtkQt'):\n klass = getattr(vtk, name)\n try:\n c = klass()\n except TypeError:\n continue\n tvtk_name = get_tvtk_name(name)\n all.append(tvtk_name)\n has_input = has_output = False\n if hasattr(klass, 'GetNumberOfInputPorts'):\n if c.GetNumberOfInputPorts() > 0:\n has_input = True\n if hasattr(klass, 'GetNumberOfOutputPorts'):\n if c.GetNumberOfOutputPorts() > 0:\n has_output = True\n if has_input:\n if has_output:\n filter.append(tvtk_name)\n else:\n sink.append(tvtk_name)\n elif has_output:\n src.append(tvtk_name)\n o.SetGlobalWarningDisplay(w)\n result = (all, src, filter, sink)\n for x in result:\n x.sort()\n return result\n","sub_path":"LIVE/dj_demo/mysite/test_segment_base/tvtk_doc_0.py","file_name":"tvtk_doc_0.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"155497919","text":"def representingL(A, B):\n # This is the representation of L from the homework\n # which will be later used to calculate Fibonacci sequence in O(log n) complexity\n a = (A[0][0] * B[0][0]) + (A[0][1] * B[1][0])\n b = (A[0][0] * B[0][1]) + (A[0][1] * B[1][1])\n c = (A[1][0] * B[0][0]) + (A[1][1] * B[1][0])\n d = (A[1][0] * B[0][1]) + (A[1][1] * B[1][1])\n A[0][0] = a\n A[0][1] = b\n A[1][0] = c\n A[1][1] = d\n\n\ndef Pow(A, n):\n if n == 0 or n == 1:\n return\n Pow(A, n // 2)\n representingL(A, A)\n B = [[1, 1], [1, 0]]\n if n % 2 != 0:\n representingL(A, B)\n\n\ndef fibPow(n):\n\n if n == 0:\n return 0\n if n == 1:\n return 1\n else:\n A = [[1, 1], [1, 0]]\n Pow(A, n - 1)\n return A[0][0]\n\n\ndef main():\n print(fibPow(28))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Assignment2/assign2.py","file_name":"assign2.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"612365987","text":"\"\"\"\nAoC\n\"\"\"\nimport time\n\nstart_secs = time.time()\nprint('')\n\ndef all_zero(arr):\n tot = arr[0] + arr[1] + arr[2] + arr[3] + arr[4] + arr[5] + arr[6]\n return tot == 0\n\ndef get_future_pos(t,pos,positions):\n new_pos = t + pos\n if new_pos < positions:\n return new_pos\n else:\n new_pos = ( (t+pos) % positions )\n return new_pos\n\ndef get_positions(t):\n global pos_temp\n global discs\n for i in range(len(discs)):\n pos_temp[i] = get_future_pos(t+i+1,discs[i][0],discs[i][1])\n return pos_temp\n\n# read in input file\nl=[]\nmy_file = open(\"inp2.txt\", \"r\", encoding='utf-8')\nlines = my_file.readlines()\nfor line in lines:\n l.append(line.strip())\n\ndiscs = [ [0,0] for i in range(len(l)) ] # [ pos, positions ]\npos_temp = [ 0 for i in range(len(l)) ]\nfor s in l:\n arr = s.split(' ')\n disc_num = int(arr[1].replace('#','')) - 1\n positions = int(arr[3])\n pos = int(arr[11].replace('.',''))\n discs[disc_num][0] = pos\n discs[disc_num][1] = positions\n\nfor step in range(3000000):\n res = get_positions(step)\n if all_zero(res):\n print(step)\n break\n \n\nprint('')\nend_secs = time.time()\nprint('--- ' + str(end_secs-start_secs) + ' secs ---')\n","sub_path":"2016/day15/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"74995355","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom fitter import Fitter\nimport os\n\n\n# In[4]:\n\n\ndata_path = os.getcwd()\ndata_path = data_path.replace(\"code\", \"data\")\ndata_path = data_path.replace(\"notebooks\", \"input\")\n\nos.chdir(data_path)\n\n\n# In[5]:\n\n\n\njob_data = pd.read_csv(\"nyc-jobs.csv\")\n\n\n# In[ ]:\n\n\n\n\n\n# In[32]:\n\n\njob_data = job_data[[\"Salary Range To\", \"Salary Frequency\"]]\njob_data = job_data[job_data[\"Salary Frequency\"] == \"Annual\"]\n\n\n# In[33]:\n\n\nplot_data = np.array(job_data[\"Salary Range To\"])\n\n\n# In[34]:\n\n\nplt.hist(plot_data, bins = 30)\nlen(plot_data)\n\n\n# In[78]:\n\n\nsampled_data = np.random.choice(plot_data, 500, replace = False)\n\n\n# In[80]:\n\n\nplt.hist(sampled_data, bins = 30)\n\n\n# In[83]:\n\n\nf = Fitter(plot_data)\nf.fit()\n\n\n# In[84]:\n\n\nf.summary()\n\n\n# In[93]:\n\n\nf_params = f.fitted_param[\"f\"]\nf_params\n\n\n# In[96]:\n\n\nf.fitted_pdf['gamma']\n\n","sub_path":"code/scripts/fitter-test-nyc-jobs.py","file_name":"fitter-test-nyc-jobs.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"488619281","text":"import json\n\n\n# check if working as wished, find out if there's anything wrong, fix it\ndef write_file(data):\n with open('data.txt', 'w') as fp:\n json.dump(data, fp)\n #fp.close()\n\n\n# check if working as wished, find out if there's anything wrong, fix it\ndef read_file(f):\n with open(f, 'r') as fp:\n string = fp.read()\n if len(string) > 1:\n data = json.loads(string)\n return data\n else:\n data = {}\n return data\n","sub_path":"saves.py","file_name":"saves.py","file_ext":"py","file_size_in_byte":490,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"649523544","text":"import re\nimport six\nimport unicodedata\nfrom math import radians, sin, cos, acos\nfrom django import VERSION\nfrom django.contrib.gis.geos import Point\ntry:\n from django.utils.encoding import force_unicode as force_text\nexcept (NameError, ImportError):\n from django.utils.encoding import force_text\nfrom django.utils.safestring import mark_safe, SafeText\n\nearth_radius_km = 6371.009\n\n\ndef geo_distance(a, b):\n \"\"\"Distance between two geo points in km. (p.x = long, p.y = lat)\"\"\"\n a_y = radians(a.y)\n b_y = radians(b.y)\n delta_x = radians(a.x - b.x)\n cos_x = (sin(a_y) * sin(b_y) +\n cos(a_y) * cos(b_y) * cos(delta_x))\n return acos(cos_x) * earth_radius_km\n\n\nto_und_rgx = re.compile(r\"[']\")\nslugify_rgx = re.compile(r'[^-\\w._~]', re.UNICODE)\nmulti_dash_rgx = re.compile(r'-{2,}')\ndash_und_rgx = re.compile(r'[-_]_')\nund_dash_rgx = re.compile(r'[-_]-')\nstarting_chars_rgx = re.compile(r'^[-._]*')\nending_chars_rgx = re.compile(r'[-.]*$')\n\n\ndef default_slugify(obj, value):\n value = force_text(value)\n value = unicodedata.normalize('NFKC', value.strip().lower())\n value = re.sub(to_und_rgx, '_', value)\n value = re.sub(slugify_rgx, '-', value)\n value = re.sub(multi_dash_rgx, '-', value)\n value = re.sub(dash_und_rgx, '_', value)\n value = re.sub(und_dash_rgx, '_', value)\n value = re.sub(starting_chars_rgx, '', value)\n value = re.sub(ending_chars_rgx, '', value)\n return mark_safe(value)\n\nif VERSION < (1, 10):\n from django.utils.functional import allow_lazy\n default_slugify = allow_lazy(default_slugify, six.text_type, SafeText)\nelse:\n from django.utils.functional import keep_lazy\n default_slugify = keep_lazy(six.text_type, SafeText)(default_slugify)\n","sub_path":"cities/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":1729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"627786155","text":"#Author: Xinran\r\nimport csv\r\n# Input data\r\ninputDataName = 'pos'\r\n# inputData = 'neg'\r\n# Output data\r\ndata_out = []\r\nvar_data_out=[]\r\n\r\n# store the label message\r\npos_label_list = [ ['' for col in range(5)] for row in range(27)]\r\nneg_label_list = [ ['' for col in range(5)] for row in range(27)]\r\n\r\ndef get_one_message_data(line, start_index, msg_num,label_list,bit = 27):\r\n for i in range(start_index,start_index+bit):\r\n if(line[i]!=''):\r\n #print(neg_label_list[i - start_index][msg_num - 1])\r\n if (label_list[i - start_index][msg_num - 1] == '0'):\r\n return str((-1) * int(line[i]))\r\n return line[i]\r\n return None\r\n\r\ndef decimal_to_ternary(dec, base = 3):\r\n tempStr = ''\r\n temp = dec\r\n while (temp > 0):\r\n ord = temp % base\r\n tempStr = str(ord) + tempStr\r\n temp = int(temp / base)\r\n #print(tempStr)\r\n if(len(tempStr)<3):\r\n for i in range(0,3-len(tempStr)):\r\n tempStr = str(0) + tempStr\r\n return tempStr\r\n\r\n# open the label file, read in the data\r\nwith open('messageLabel.csv') as messageLabel:\r\n label_csv = csv.reader(messageLabel)\r\n headers = next(label_csv)\r\n count = 0\r\n for line in label_csv:\r\n for i in range(1,6):\r\n pos_label_list[count][i - 1] = line[i]\r\n for i in range(6,11):\r\n neg_label_list[count][i - 6] = line[i]\r\n count += 1\r\n\r\n\r\n# select input data\r\nif inputDataName == 'pos':\r\n datafile = 'positiveMessageFirstBatchData.csv'\r\n label = pos_label_list\r\nelif inputDataName == 'neg':\r\n datafile = 'negativeMessageFirstBatchData.csv'\r\n label = neg_label_list\r\n\r\n# open the input file\r\nwith open(datafile) as inputData:\r\n input_csv = csv.reader(inputData)\r\n headers = next(input_csv)\r\n ac1_index = headers.index('AC1')\r\n ac2_index = headers.index('AC2')\r\n for line in input_csv:\r\n # Delete the invalid data\r\n # when the AC1 and AC2 are both incorrect\r\n if(line[ac1_index]!='2' or line[ac2_index]!='1'):\r\n continue\r\n\r\n # Get the data of every message\r\n # 5 message in total\r\n # print(line)\r\n msg_data_list = []\r\n for i in range(1,6):\r\n start_index = headers.index('M'+str(i)+'_000_1')\r\n msg_data = get_one_message_data(line,start_index,i,label)\r\n msg_data_list.append(msg_data)\r\n data_out.append(msg_data_list)\r\n\r\n #Get the data of every variation\r\n var_data_list = []\r\n #print(line)\r\n\r\n for i in range(0,27):\r\n data_num =decimal_to_ternary(i)\r\n tmp = 0\r\n nullValue=True\r\n for j in range(1,6):\r\n data_index = headers.index('M' +str(j)+ '_'+data_num+'_1')\r\n if(line[data_index] != ''):\r\n nullValue=False\r\n if (label[i][j-1] == '0'):\r\n tmp += (-1) * int(line[data_index])\r\n else:\r\n tmp += int(line[data_index])\r\n #print(str(tmp)+' , '+str(j)+ '_'+str(data_num))\r\n # else:\r\n # tmp = None\r\n if not nullValue:\r\n var_data_list.append(tmp)\r\n else:\r\n var_data_list.append('')\r\n\r\n var_data_out.append(var_data_list)\r\n\r\nwith open(inputDataName+'_msg_output.csv','w') as msg_f:\r\n msg_csv = csv.writer(msg_f)\r\n header = ['M'+str(i) for i in range(1,6)]\r\n msg_csv.writerow(header)\r\n for line in data_out:\r\n msg_csv.writerow(line)\r\n\r\nwith open(inputDataName+'_var_output.csv','w') as var_f:\r\n var_csv = csv.writer(var_f)\r\n header = [decimal_to_ternary(i) for i in range(27)]\r\n var_csv.writerow(header)\r\n for line in var_data_out:\r\n var_csv.writerow(line)\r\n\r\nwith open(inputDataName+'_item_output.txt','w') as var_f:\r\n var_csv = csv.writer(var_f)\r\n header = ['gesture','dist','shading','y']\r\n var_csv.writerow(header)\r\n for line in var_data_out:\r\n for i in range(0,27):\r\n if line[i]!='':\r\n dataNum = decimal_to_ternary(i)\r\n dataTuple = [dataNum[0],dataNum[1],dataNum[2],line[i]]\r\n var_csv.writerow(dataTuple)\r\n","sub_path":"SU17Data/pre-process.py","file_name":"pre-process.py","file_ext":"py","file_size_in_byte":4244,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"393187252","text":"import seaborn as sns\n\n\ndef discrete_qualitative_colors(n_colors: int = 6, reverse: bool = False):\n \"\"\"\n A discrete qualitative color palette\n\n Parameters\n ----------\n n_colors : int\n number of colors\n reverse : boolean\n reverse colors or not\n\n Returns\n -------\n list of RGB tuples\n \"\"\"\n\n return _qualitative_colors(n_colors=n_colors, reverse=reverse)\n\n\ndef _qualitative_colors(n_colors=3, as_cmap=False, reverse=False):\n qualitative = [\n \"#004B87\",\n \"#0D0B0C\",\n \"#B04A5A\",\n \"#61829CFF\",\n \"#B2AAA2\",\n \"#532026FF\",\n ]\n assert n_colors <= len(\n qualitative\n ), f\"color palette only has {len(qualitative)}\"\n if reverse:\n qualitative = qualitative[::-1]\n return sns.color_palette(qualitative, as_cmap=as_cmap, n_colors=n_colors)\n","sub_path":"palettes/qualitative.py","file_name":"qualitative.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"81872052","text":"# Nolan Harris\n# Nph2tx\n\ndef mymap(func, lst):\n '''variable new_list makes a copy of the list to manipulate it, then adds the function to it and returns it'''\n new_list = [lst[:]]\n x = func\n comb = new_list.x\n return comb\n\n\ndef myreduce(func, lst):\n '''turns lst into a list'''\n\n other_list = [lst[:]]\n y = func\n combination = other_list.y\n\n if func in lst > 1:\n return combination * func\n\n\n\n\n\n\n\n","sub_path":"CS1110/CS1110/map_reduce.py/map_reduce.py","file_name":"map_reduce.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"2067626","text":"# Functio to find the maximal sum\n# This can be solved efficiently using dynamic programming\n# From the bottom up, find the maximal element and find corresponding maximal element on the above level and so on\n# untill you reach the tip of the pyramiddatetime A combination of a date and a time. Attributes: ()\n# You can read about a similar C# implementation here https://www.mathblog.dk/project-euler-18/\ndef findMaxSum(arr):\n\tfor i in range(0,len(arr)):\n\t\tarr[i]=arr[i].split()\n\n\tfor i in range(0,len(arr)):\n\t\tfor j in range(0, len(arr[i])):\n\t\t\tarr[i][j]=int(arr[i][j])\n\twhile(len(arr)>1):\n\t\tv=arr[-2]\n\t\tw=arr[-1]\n\t\tfor i in range(0,len(v)):\n\t\t\tv[i]+=max(w[i],w[i+1])\n\t\tarr.pop()\n\treturn arr[0][0]\n\nif __name__ == \"__main__\":\n\tn=int(input())\n\ttri = []\n\tfor i in range(n):\n\t\ttri.append(input())\n\tprint(findMaxSum(tri))","sub_path":"Question 6/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"174118081","text":"from collections import OrderedDict\r\nimport random\r\nfrom django_lazifier.utils.builtin_types.obj import Obj\r\nfrom django_lazifier.utils.builtin_types.str import Str\r\nfrom django_lazifier.utils.utils import log_exception\r\n\r\n\r\nclass Lst:\r\n @classmethod\r\n def get_random(cls, the_list: list, pop=False, default_value=None):\r\n \"\"\"\r\n Get one item at random in the specified list.\r\n\r\n :param the_list:\r\n :param pop:\r\n :param default_value:\r\n :return:\r\n \"\"\"\r\n if not the_list:\r\n return default_value\r\n\r\n length = len(the_list)\r\n rand_index = random.randint(0, length-1)\r\n\r\n if pop:\r\n return the_list.pop(rand_index)\r\n\r\n return the_list[rand_index]\r\n\r\n @classmethod\r\n def casefold(cls, str_list):\r\n \"\"\"\r\n Pass each string element through str.casefold()\r\n :param str_list:\r\n :return:\r\n \"\"\"\r\n return [str(x).casefold() for x in str_list]\r\n\r\n @classmethod\r\n def convert_to_int(cls, str_list):\r\n \"\"\"\r\n Convert a list of string into a list of int\r\n :param str_list: [\"1\", \"2.99\", \"0.11\"] => [1, 3, 0]\r\n :return: []\r\n \"\"\"\r\n if not str_list:\r\n return []\r\n\r\n int_list = []\r\n for s in str_list:\r\n val = Str.int_val(s, None)\r\n if val is not None:\r\n int_list.append(val)\r\n\r\n return int_list\r\n\r\n @classmethod\r\n def convert_to_str(cls, the_list):\r\n \"\"\"\r\n Convert a list of object into a list of string\r\n :return: []\r\n \"\"\"\r\n if not the_list:\r\n return []\r\n\r\n result = []\r\n for s in the_list:\r\n if s is not None:\r\n result.append(s.__str__())\r\n\r\n return result\r\n\r\n @classmethod\r\n def strip_string(cls, the_list, chars=None):\r\n \"\"\"\r\n Trim the list of strings.\r\n :param the_list:\r\n :param chars:\r\n :return:\r\n \"\"\"\r\n the_list = Lst.convert_to_str(the_list)\r\n return [elm.strip(chars) for elm in the_list]\r\n\r\n @classmethod\r\n def group_by(cls, the_list, group, none_value_label='None', flat=False):\r\n \"\"\"\r\n Group the list by the group specified.\r\n eg. Lst.group_by(seats, 'zone.name', 'do not belong to a zone')\r\n => { 'zone1': [seat1, seat2]\r\n 'zone2': [seat7, seat8]\r\n 'do not belong to a zone': [seat3, seat4, seat5]\r\n }\r\n\r\n :param the_list:\r\n :param group: {str|def} name of the attribute, support dot notation group_by(persons, 'contact.phone')\r\n :param none_value_label: the value of the column specified is None then use this label as the key.\r\n :param flat: if true only take the last item for each group and put it in the result dict\r\n :rtype: dict\r\n \"\"\"\r\n result = OrderedDict()\r\n for row in the_list:\r\n if callable(group):\r\n col_value = group(row)\r\n else:\r\n col_value = Obj.getattr(row, group, None)\r\n\r\n if col_value is None:\r\n col_value = none_value_label\r\n\r\n if not flat and col_value not in result:\r\n result[col_value] = []\r\n\r\n if flat:\r\n result[col_value] = row\r\n else:\r\n result[col_value].append(row)\r\n return result\r\n\r\n @classmethod\r\n def multi_group_by(cls, the_list, none_value_label, group_names: list):\r\n \"\"\"\r\n Provide a drilled down version of the data.\r\n eg. Lst.multi_group_by(sensors, _('Unassigned'), ['facility__id', 'zone__id'])\r\n\r\n return { facility_1 : [ {zone_1 : [ {sensor_1},\r\n {sensor_2} ],\r\n {zone_2 : [ {sensor_3},\r\n {sensor_4} ]\r\n\r\n :type the_list: list|QuerySet|ValuesQuerySet\r\n :param the_list: list, QuerySet or ValuesQuerySet\r\n :type none_value_label: str|None|object\r\n :param none_value_label: the value to use if the column value is None\r\n :param group_names: the list of columns to group by\r\n :return: List\r\n \"\"\"\r\n if type(group_names) == str:\r\n group_names = [group_names]\r\n\r\n if not isinstance(group_names, list):\r\n raise ValueError('The argument group_names must be a list of all the columns you want to group.')\r\n\r\n group_names = group_names.copy()\r\n if group_names:\r\n col = group_names.pop(0)\r\n result = Lst.group_by(the_list, col, none_value_label)\r\n if group_names:\r\n for col, rows in result.items():\r\n result[col] = Lst.multi_group_by(rows, none_value_label, group_names)\r\n\r\n return result\r\n return OrderedDict()\r\n\r\n @classmethod\r\n def tuple_multi_group_by(cls, the_list, none_value_label, group_names: list):\r\n \"\"\"\r\n Similarly to multi_group_by but instead of use the value of the specified columns\r\n as a key it combine all the keys together in one tuple as key.\r\n\r\n eg. sensors = Sensor.objects.values(**columns)\r\n Lst.tuple_multi_group_by(sensors, 'None', ['facility__id', 'zone__id'])\r\n\r\n return { (facility_1, zone_1): [ sensor1, sensor2 ],\r\n (facility_1, zone_2): [ sensor3 ],\r\n (facility_2, zone_3): [ sensor4 ])\r\n\r\n :type the_list: list|QuerySet|ValuesQuerySet\r\n :param the_list: list, QuerySet or ValuesQuerySet\r\n :param none_value_label: the value to use if the column value is None\r\n :param group_names: the list of columns to group by\r\n :return: List\r\n \"\"\"\r\n if type(group_names) == str:\r\n group_names = [group_names]\r\n\r\n if not isinstance(group_names, list):\r\n raise ValueError('The argument group_names must be a list of all the fields you want to group.')\r\n\r\n group_names = group_names.copy()\r\n if group_names:\r\n result = OrderedDict()\r\n first_grp_val = group_names.pop(0) # pop at the start\r\n first_group_by = Lst.group_by(the_list, first_grp_val, none_value_label)\r\n\r\n if group_names:\r\n for col, rows in first_group_by.items():\r\n tuple_list = Lst.tuple_multi_group_by(rows, none_value_label, group_names)\r\n for k, t in tuple_list.items():\r\n result[(col,) + k] = t\r\n else:\r\n for k, v in first_group_by.items():\r\n result[(k,)] = v\r\n return result\r\n\r\n return OrderedDict()\r\n\r\n @classmethod\r\n def all(cls, the_list, func, **kwargs):\r\n \"\"\"\r\n Return True if all is True, else False.\r\n Similar to all() but its accept a lambda.\r\n\r\n :param the_list:\r\n :param func: lambda that return bool\r\n :param kwargs: any additional params for func\r\n :return:\r\n \"\"\"\r\n for i in the_list:\r\n if not func(i, **kwargs):\r\n return False\r\n\r\n return True\r\n\r\n @classmethod\r\n def any(cls, the_list, func, **kwargs):\r\n \"\"\"\r\n Return True if any is True, else False.\r\n Similar to any() but its accept a lambda.\r\n\r\n :param the_list:\r\n :param func: lambda that return bool\r\n :param kwargs: any additional params for func\r\n :return:\r\n \"\"\"\r\n for i in the_list:\r\n if func(i, **kwargs):\r\n return True\r\n\r\n return False\r\n\r\n @classmethod\r\n def prep_select_optgroups(cls, the_list, opt_groups: list, value_attr, display_attr, none_value_label, sort_result=False):\r\n \"\"\"\r\n Prep list to be use as a choice for the ChoiceField\r\n\r\n eg. sensor_choices = Lst.prep_select_optgroups(sensors, ['facility.name', 'zone.name'],\r\n 'id', 'sensor_name', _('Unassigned Sensors'))\r\n\r\n :param the_list: ValueQuerySet, QuerySet or list\r\n :param opt_groups: the group column/attr name or index\r\n :param value_attr: the option value\r\n :param display_attr: the option display text\r\n :return:\r\n \"\"\"\r\n groups = Lst.tuple_multi_group_by(the_list, none_value_label, opt_groups)\r\n\r\n if groups:\r\n result = []\r\n for tp, arr in groups.items():\r\n og_header = ' > '.join(tp)\r\n og_list = []\r\n for row in arr:\r\n og_list.append((Obj.getattr(row, value_attr), Obj.getattr(row, display_attr),))\r\n result.append((og_header, tuple(og_list),))\r\n return tuple(result)\r\n\r\n if sort_result:\r\n return sorted(groups)\r\n return groups\r\n\r\n @classmethod\r\n def get_unique(cls, the_list, default_value=None, unique_attr=None):\r\n \"\"\"\r\n Get a list of unique values in the list, default_value is [] if default_value is set to None.\r\n\r\n :param the_list:\r\n :param default_value: if none value is []\r\n :param unique_attr: select your own unique attribute (in case when the object is unhashable\r\n or you want your own attr)\r\n :rtype list\r\n \"\"\"\r\n if default_value is None:\r\n default_value = []\r\n\r\n if not the_list:\r\n return default_value\r\n\r\n try:\r\n # Src: http://stackoverflow.com/questions/480214\r\n # /how-do-you-remove-duplicates-from-a-list-in-python-whilst-preserving-order\r\n # Src: http://www.peterbe.com/plog/uniqifiers-benchmark\r\n if unique_attr is None:\r\n added_list = set()\r\n add_to_added_list = added_list.add # this static ref for performance reason\r\n return [x for x in the_list if not (x in added_list or add_to_added_list(x))]\r\n\r\n result = []\r\n existed_item = {} # dict is much faster than list when checking existence of a key\r\n for itm in the_list:\r\n key = Obj.getattr(itm, unique_attr)\r\n if key not in existed_item:\r\n result.append(itm)\r\n existed_item[key] = None\r\n return result\r\n except Exception as ex:\r\n log_exception(ex)\r\n return default_value\r\n\r\n @classmethod\r\n def reverse(cls, the_list: list):\r\n \"\"\"\r\n Reverse the order of the items in the list.\r\n :param the_list:\r\n :return:\r\n \"\"\"\r\n if not list:\r\n return []\r\n # return list(reversed(the_list))\r\n return the_list[::-1]\r\n\r\n @classmethod\r\n def contains_all(cls, the_list, *args):\r\n \"\"\"\r\n Check to see if the_list contains all of the args\r\n\r\n :param the_list: the haystack\r\n :param args: the needle\r\n :return:\r\n \"\"\"\r\n return Lst.all(args, lambda x: x in the_list)\r\n\r\n @classmethod\r\n def contains_any(cls, the_list, *args):\r\n \"\"\"\r\n Check to see if the_list contains any of the args\r\n\r\n :param the_list: the haystack\r\n :param args: the needle\r\n :return:\r\n \"\"\"\r\n return Lst.any(args, lambda x: x in the_list)\r\n\r\n @classmethod\r\n def unordered_list_equals(cls, lst_a, lst_b):\r\n if not isinstance(lst_a, list) or not isinstance(lst_b, list):\r\n return False\r\n\r\n if lst_a == lst_b:\r\n return True\r\n\r\n if len(lst_a) != len(lst_b):\r\n return False\r\n\r\n return set(lst_a) == set(lst_b)\r\n\r\n @classmethod\r\n def str_join(cls, lst, separator=', ', value_attr: str=None):\r\n if not lst:\r\n return ''\r\n\r\n str_list = []\r\n for itm in lst:\r\n if value_attr is not None:\r\n itm = Obj.getattr(itm, value_attr)\r\n itm = str(itm)\r\n str_list.append(itm)\r\n return separator.join(str_list)\r\n\r\n @classmethod\r\n def chunks(cls, lst, chunk_size, pad_with=None):\r\n \"\"\"\r\n Split the list into chunks.\r\n eg. [1, 2, 3, 4, 5] (chunk == 2) => result [ [1, 2], [3, 4], [5] ]\r\n \"\"\"\r\n result = []\r\n for i in range(0, len(lst), chunk_size):\r\n result.append(lst[i:i + chunk_size])\r\n\r\n if result and pad_with is not None and len(result[-1]) != chunk_size:\r\n result[-1] = result[-1] + ([pad_with] * (chunk_size - len(result[-1])))\r\n return result\r\n\r\n\r\n @classmethod\r\n def get_first(cls, the_list, default_value=None):\r\n \"\"\"\r\n Get the first item of the list.\r\n\r\n :param the_list:\r\n :param default_value:\r\n :return:\r\n \"\"\"\r\n if the_list:\r\n for itm in the_list:\r\n return itm\r\n return default_value\r\n\r\n @classmethod\r\n def map_to(cls, the_list, attribs: list, default_value=None, execute_callable=True):\r\n \"\"\"\r\n Go through the list and extract the specified attributes\r\n\r\n :param the_list:\r\n :param attribs:\r\n :type default_value: object|dict\r\n :param default_value: either a value for all fields default or pass a dict to supply specific default value.\r\n :return: List of value lists\r\n \"\"\"\r\n result = []\r\n if not the_list:\r\n return result\r\n\r\n for itm in the_list:\r\n row = []\r\n for att in attribs:\r\n specific_default = default_value\r\n if isinstance(default_value, dict) and att in default_value:\r\n specific_default = default_value.get(att, default_value)\r\n value = Obj.getattr(itm, att, specific_default, execute_callable=execute_callable)\r\n row.append(value)\r\n result.append(row)\r\n return result\r\n","sub_path":"django_lazifier/utils/builtin_types/list.py","file_name":"list.py","file_ext":"py","file_size_in_byte":13996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"3855358","text":"\"\"\"\n-------------------------------------------------------------------\n-- Title: Analysis of Coronavirus related Tweets using TwitterAPI\n-- File: TwitterDataRetrieval.py\n-- Purpose: Script used to retrieve the tweets through TwitterAPI.\n-- Author: Georgios Spyrou\n-- Date: 01/03/2020\n-------------------------------------------------------------------\n\"\"\"\n\n# Data retrival from Twitter API\n\n# Import dependencies\nimport os\n\nimport json\nfrom datetime import datetime, timedelta\n\n# Twitter related\nfrom searchtweets import load_credentials\nfrom searchtweets import gen_rule_payload\nfrom searchtweets import ResultStream\n\n# Set up the project environment\n\n# Secure location of the required keys to connect to the API\n# This config also contains the search query\njson_loc = 'C:\\\\Users\\\\george\\\\Desktop\\\\Twitter_Project\\\\Twitter\\\\twitter_config.json'\n\nwith open(json_loc) as json_file:\n configFile = json.load(json_file)\n\n# Project folder location and keys\nos.chdir(configFile[\"project_directory\"])\n\n# Import the custom functions that we will use to retrieve and analyse\n# the data, and use the API to save the data to a .jsonl file.\n\nimport twitterCustomFunc as twf\n\ntwitter_keys_loc = configFile[\"keys\"]\n\n# Load the credentials to get access to the API\npremium_search_args = load_credentials(twitter_keys_loc,\n yaml_key=\"search_tweets_api\",\n env_overwrite=False)\nprint(premium_search_args)\n\n\n# Set tweet extraction period and create a list of days of interest\nfromDate = \"2020-03-12\"\ntoDate = \"2020-03-18\"\n\ndaysList = [fromDate]\n\nwhile fromDate != toDate:\n date = datetime.strptime(fromDate, \"%Y-%m-%d\")\n mod_date = date + timedelta(days=2)\n incrementedDay = datetime.strftime(mod_date, \"%Y-%m-%d\")\n daysList.append(incrementedDay)\n \n fromDate = incrementedDay\n\n# Retrieve the data for each day from the API\nfor day in daysList:\n \n dayNhourList = twf.createDateTimeFrame(day, hourSep=2)\n \n for hs in dayNhourList:\n fromDate = hs[0]\n toDate = hs[1]\n # Create the searching rule for the stream\n rule = gen_rule_payload(pt_rule=configFile['search_query'],\n from_date=fromDate,\n to_date=toDate ,\n results_per_call = 100)\n\n # Set up the stream\n rs = ResultStream(rule_payload=rule,\n max_results=100,\n **premium_search_args)\n\n # Create a .jsonl with the results of the Stream query\n #file_date = datetime.now().strftime('%Y_%m_%d_%H_%M')\n file_date = '_'.join(hs).replace(' ', '').replace(':','')\n filename = os.path.join(configFile[\"outputFiles\"],\n f'twitter_30day_results_{file_date}.jsonl')\n \n # Write the data received from the API to a file\n with open(filename, 'a', encoding='utf-8') as f:\n cntr = 0\n for tweet in rs.stream():\n cntr += 1\n if cntr % 100 == 0:\n n_str, cr_date = str(cntr), tweet['created_at']\n print(f'\\n {n_str}: {cr_date}')\n json.dump(tweet, f)\n f.write('\\n')\n print(f'Created file {f}:')\n","sub_path":"Code/TwitterDataRetrieval.py","file_name":"TwitterDataRetrieval.py","file_ext":"py","file_size_in_byte":3323,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"405848420","text":"from django.test import TestCase\nfrom django.http import HttpRequest\nfrom django.shortcuts import render\nfrom django.template.loader import render_to_string\n\n#Create a simple model\nfrom django.db import models\nclass TestModel(models.Model):\n title = models.CharField(max_length=250)\n\n#Create a simple form\nfrom django.forms import ModelForm\nclass TestForm(ModelForm):\n class Meta:\n model = TestModel\n exclude = []\n\ndef test_view(request):\n form = TestForm()\n return render(request,\n 'custom_tests/bootstrap_form.html',\n {'form': form})\n\n# GET /record\nclass BootstrapFormFilterTest(TestCase):\n\n def setUp(self):\n pass\n\n def tearDown(self):\n pass\n\n\n def test_test_view_returns_correct_html(self):\n request = HttpRequest()\n response = test_view(request)\n expected_html = render_to_string('custom_tests/bootstrap_form.html', {'form': TestForm()})\n self.assertEqual(response.content.decode(), expected_html)\n\n\n def test_add_class_inserts_correct_class(self):\n request = HttpRequest()\n response = test_view(request)\n expected_html = render_to_string('custom_tests/bootstrap_form.html', {'form': TestForm()})\n self.assertEqual(expected_html, response.content.decode())\n self.assertInHTML('',\n response.content.decode()\n )\n\n def test_add_attributes_inserts_correct_attr(self):\n request = HttpRequest()\n response = test_view(request)\n expected_html = render_to_string('custom_tests/bootstrap_form.html', {'form': TestForm()})\n self.assertEqual(expected_html, response.content.decode())\n self.assertInHTML('',\n response.content.decode()\n )\n\n","sub_path":"shellac/tests/custom/test_bootstrapForm_unit.py","file_name":"test_bootstrapForm_unit.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"36016375","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 22 10:12:12 2019\n\n@author: Osman\n\n\ndataset : https://www.kaggle.com/tongpython/cat-and-dog\n\nas result you might find val_acc = 0.81 , loss = 0.44 or higher or a bit less.\n\n\n\"\"\"\n\n\nimport keras\nimport pandas as pd\nimport numpy as np\n\n\n## you have to change these values\n# these are the source code of cats and dogs for train and test on your computer. these are for me, change them for you.\ndataset_source_on_your_computer_for_train = r'C:\\Users\\Osman\\Desktop\\ann_cats_dogs\\training_set'\ndataset_source_on_your_computer_for_test = r'C:\\Users\\Osman\\Desktop\\ann_cats_dogs\\test_set'\n#\n\n\n# initialising the cnn\n\ncnn_model = keras.Sequential()\n\n# step 1 - convolution\n\ncnn_model.add(keras.layers.Conv2D(32,(3,3),input_shape = (64,64,3),activation='relu'))\n\n# step 2 - pooling\n\ncnn_model.add(keras.layers.MaxPooling2D())\n\n# step n - add how much layers you want.\n\ncnn_model.add(keras.layers.Conv2D(32,(3,3),activation='relu'))\n\n# step n - pooling\n\ncnn_model.add(keras.layers.MaxPooling2D())\n\n\n\n# step 3 - flettining\n\ncnn_model.add(keras.layers.Flatten())\n\n# step 4 - build ann\n\ncnn_model.add(keras.layers.Dense(128 , activation='relu'))\ncnn_model.add(keras.layers.Dense(1 , activation='sigmoid'))\n\ncnn_model.compile('adam',loss = keras.losses.binary_crossentropy,metrics=['acc'])\n\n\n# fitting the cnn images.\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\ntrain_datagen = ImageDataGenerator(\n rescale=1./255,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True)\n\ntest_datagen = ImageDataGenerator(rescale=1./255)\n\ntraining_set = train_datagen.flow_from_directory(\n dataset_source_on_your_computer_for_train,\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')\n\ntest_set = test_datagen.flow_from_directory(\n dataset_source_on_your_computer_for_test,\n target_size=(64, 64),\n batch_size=32,\n class_mode='binary')\n\ncnn_model.fit_generator(training_set,\n steps_per_epoch=8000,\n epochs=25,\n validation_data=test_set,\n validation_steps=2023)\n\n\n\n\n\n\n","sub_path":"7-Kedi Köpek Tanıma/code.py","file_name":"code.py","file_ext":"py","file_size_in_byte":2181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"206147476","text":"import gurobipy as gp\nimport openpyxl\nfrom gurobipy import GRB\nimport pandas as pa\n\n\nworkbook2 = openpyxl.load_workbook(\"DataFile3.xlsx\")\nsheets=[\"Region1\",\"Region2\",\"Region3\"]\ndef getData():\n capacity=[]\n PatientData=[]\n resreq =[]\n for sheet in sheets:\n sheet_read= workbook2[sheet]\n capacity_range = sheet_read[\"B13:G13\"]\n for cell in capacity_range:\n capacity.append([i.value for i in cell])\n x_range = sheet_read[\"B3:G12\"]\n PatientData.append( [[cell.value if cell.value is not None else 0 for cell in row] for row in x_range])\n resreq.append([[cell.value if cell.value is not None else 0 for cell in row] for row in sheet_read[\"M3:N22\"]])\n # Ndays = len(capacity[0])\n #capacity = [capacity[i:i + Ndays] for i in range(0, len(capacity), Ndays)]\n print(\"X\")\n print(PatientData)\n print(\"R`equested Resources\")\n print(resreq)\n print(\"Capacity\")\n print(capacity)\n return capacity, PatientData,resreq\n\ndef periodic_problem(NPatients, Ndays, NResources, Nregions):\n #sets\n model = gp.Model(\"OptimsationModel\")\n patients = range(NPatients)\n days = range(Ndays)\n resources=range(NResources)\n regions=range (Nregions)\n #parameters\n capacity,x, resreq= getData()\n regionsSet= {\n 0: [1, 2],\n 1: [0,2],\n 2: [0,1]\n }\n\n # Decision Variables\n y=model.addVars(regions,patients,lb=0,vtype=GRB.BINARY,name= \"OwnRegionPatientTaken\" )\n\n z=model.addVars(regions,regions,patients,lb=0,vtype=GRB.BINARY,name=\"OtherRegionPatientTaken\" )\n\n\n\n\n # Objective\n\n totalPatiens = gp.quicksum(y)+gp.quicksum(z)\n model.setObjective(totalPatiens, GRB.MAXIMIZE)\n\n # Contranints\n #for every region, patient taken and transffered patient taken should be less than its capacity\n for r in regionsSet:\n for i in days:\n\n model.addConstr(gp.quicksum(x[r][m][i]*y[r,m] for m in patients)+gp.quicksum(gp.quicksum((x[r2][m][i]*(1-y[r2,m]))*z[r,r2,m] for m in patients) for r2 in regionsSet[r])<=capacity[r][i])\n\n # transferred patient of region can only go to one region\n for r in regionsSet:\n for i in days:\n for m in patients:\n model.addConstr(gp.quicksum(z[r2,r,m] for r2 in regionsSet[r] )<=1)\n #ownpatients taken should not be less than the transferredpatient taken\n for r in regionsSet:\n\n model.addConstr(gp.quicksum(y[r,m] for m in patients)>=gp.quicksum( gp.quicksum(z[r2,r,m]for m in patients)for r2 in regionsSet[r] ))\n\n for r in regionsSet:\n for m in patients:\n model.addConstr(z[r,r,m]==0)\n\n for r in regionsSet:\n for i in days:\n for m in patients:\n model.addConstr(x[r][m][i] >= y[r,m])\n model.addConstr(x[r][m][i] >=gp.quicksum(z[r2,r,m] for r2 in regionsSet[r]))\n\n for r in regionsSet:\n for i in days:\n for m in patients:\n model.addConstr(y[r,m] + gp.quicksum(z[r2, r, m] for r2 in regionsSet[r])<=x[r][m][i])\n\n\n # print(remaingCap)\n\n model.update()\n model.optimize()\n\n print(\"\\nPatient Assignments:\")\n for var in y.values():\n print(f\"{var.VarName} = {var.x}\")\n for var in z.values():\n print(f\"{var.VarName} = {var.x}\")\n\n print(totalPatiens)\nperiodic_problem(10, 5, 2, 3)\n\n","sub_path":"Gurobi/ProposalModel5.py","file_name":"ProposalModel5.py","file_ext":"py","file_size_in_byte":3334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"463173189","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Mar 31 01:01:00 2021\r\n\r\n@author: Samsung\r\n\"\"\"\r\n\r\n#arboles.py\r\n\r\n\r\n#EJERCICIOS DE CLASE 04 AL FINAL DEL CODIGO.\r\n\r\n#Ejercicio 3.18: Lectura de los árboles de un parque\r\n\r\n\r\nimport csv\r\ndef leer_parque (nombre_archivo, parque):\r\n lista_arboles = []\r\n f = open(nombre_archivo,'rt', encoding=\"utf-8\")\r\n rows = csv.reader(f)\r\n headers = next(rows)\r\n \r\n for arbol in rows:\r\n dic_arboles = dict(zip(headers, arbol))\r\n \r\n if dic_arboles ['espacio_ve'] == parque:\r\n lista_arboles.append(dic_arboles)\r\n \r\n return lista_arboles\r\n\r\nlista_arboles = leer_parque('../data/arbolado-en-espacios-verdes.csv', 'GENERAL PAZ')\r\nprint(lista_arboles)\r\n\r\n#%%\r\n#Ejercicio 3.19: Determinar las especies en un parque\r\n\r\ndef especies (lista_arboles):\r\n especie = []\r\n for arbol in lista_arboles:\r\n especie.append(arbol ['nombre_gen']) #primero creo una lista con todos los nombres de especieas de cada arbol. aparecen repetidos\r\n \r\n \r\n return set(especie) #antes de pedir q me devuelva la lista le digo q me elimine los duplicados\r\n\r\n\r\nespecie = especies(lista_arboles)\r\nprint(especie)\r\n\r\n\r\n#%%\r\n#Ejercicio 3.20: Contar ejemplares por especie\r\ndef contar_ejemplares (lista_arboles):\r\n from collections import Counter\r\n ejemplares = Counter()\r\n for arbol in lista_arboles:\r\n ejemplares[arbol['nombre_gen']] += 1\r\n \r\n return ejemplares\r\n\r\nejemplares = contar_ejemplares(lista_arboles)\r\nprint (ejemplares)\r\n \r\n\r\n#%%\r\n# GENERAL PAZ\r\n\r\nlista_arboles = leer_parque('../data/arbolado-en-espacios-verdes.csv', 'GENERAL PAZ')\r\n\r\nejemplares = contar_ejemplares(lista_arboles)\r\n\r\n\r\nranking_GP = ejemplares.most_common(5)\r\n\r\nprint('RANKING CANTIDAD: ', ranking_GP) \r\n\r\n\r\n\r\n\r\n# LOS ANDES\r\nlista_arboles = leer_parque('../data/arbolado-en-espacios-verdes.csv', 'ANDES, LOS')\r\n\r\nejemplares = contar_ejemplares(lista_arboles)\r\n\r\n\r\n\r\nranking = ejemplares.most_common(5)\r\n\r\nprint('RANKING CANTIDAD: ', ranking) \r\n\r\n\r\n\r\n# CENTENARIO\r\n\r\nlista_arboles = leer_parque('../data/arbolado-en-espacios-verdes.csv', 'CENTENARIO')\r\n\r\nejemplares = contar_ejemplares(lista_arboles)\r\n\r\n\r\nranking = ejemplares.most_common(5)\r\nprint('RANKING CANTIDAD : ', ranking)\r\n\r\n\r\n#%%\r\n\r\n#Ejercicio 3.21: Alturas de una especie en una lista\r\n# GENERAL PAZ\r\n\r\n\r\n\r\ndef obtener_alturas(lista_arboles, especie):\r\n alturas = []\r\n for arbol in lista_arboles:\r\n if arbol ['nombre_com'] == especie:\r\n alturas.append(float(arbol['altura_tot']))\r\n return alturas\r\n\r\n#GENERAL PAZ\r\nlista_arboles = leer_parque('../data/arbolado-en-espacios-verdes.csv', 'GENERAL PAZ')\r\nalturasJ = obtener_alturas(lista_arboles, 'Jacarandá')\r\n\r\nprint(f' GENERAL PAZ ---> MAXIMO: {max(alturasJ)} PROMEDIO: {sum(alturasJ) /len(alturasJ)} ')\r\n\r\n# LOS ANDES\r\nlista_arboles = leer_parque('../data/arbolado-en-espacios-verdes.csv', 'ANDES, LOS')\r\nalturasJ = obtener_alturas(lista_arboles, 'Jacarandá')\r\nprint(f' LOS ANDES ---> MAXIMO: {max(alturasJ)} PROMEDIO: {sum(alturasJ) /len(alturasJ)} ')\r\n\r\n\r\n# CENTENARIO\r\n\r\nlista_arboles = leer_parque('../data/arbolado-en-espacios-verdes.csv', 'CENTENARIO')\r\nalturasJ = obtener_alturas(lista_arboles, 'Jacarandá')\r\nprint(f' CENTENARIO ---> MAXIMO: {max(alturasJ)} PROMEDIO: {sum(alturasJ) /len(alturasJ)} ')\r\n\r\n\r\n\r\n#%%\r\n\r\n#Ejercicio 3.22: Inclinaciones por especie de una lista\r\n\r\ndef obtener_inclinaciones(lista_arboles, especie):\r\n inclinaciones = []\r\n for arbol in lista_arboles:\r\n if arbol ['nombre_com'] == especie:\r\n inclinaciones.append(float(arbol['inclinacio']))\r\n return inclinaciones\r\n\r\nlista_arboles = leer_parque('data/arbolado-en-espacios-verdes.csv', 'GENERAL PAZ')\r\ninclinacionesJ = obtener_inclinaciones(lista_arboles, 'Jacarandá')\r\nprint(inclinacionesJ)\r\n\r\n#%%\r\n#Ejercicio 4.15: Lectura de todos los árboles\r\n\r\nimport csv\r\ndef leer_arboles(nombre_archivo):\r\n f = open('../data/arbolado-en-espacios-verdes.csv','rt', encoding=\"utf-8\")\r\n rows = csv.reader(f)\r\n \r\n headers = next(rows)\r\n types = [float, float, int, int, int, int, int, str, str, str, str, str, str,str,str, float, float]\r\n \r\n \r\n row = next(rows)\r\n \r\n \r\n arboleda = [{name:func(val) for name, func, val in zip (headers, types, row)} for row in rows]\r\n return arboleda\r\n\r\narboleda = leer_arboles('../data/arbolado-en-espacios-verdes.csv')\r\nprint(arboleda)\r\n#%%\r\n#Ejercicio 4.16: Lista de altos de Jacarandá\r\naltura_jacaranda = [float(arbol['altura_tot']) for arbol in arboleda if arbol['nombre_com'] == 'Jacarandá']\r\n\r\n#%%\r\n#Ejercicio 4.17: Lista de altos y diámetros de Jacarandá\r\n\r\nalt_diam = [(float(arbol['altura_tot']), float(arbol['diametro'])) for arbol in arboleda if arbol['nombre_com'] == 'Jacarandá']\r\n\r\n\r\n#%%\r\n#Ejercicio 4.18: Diccionario con medidas\r\n\r\ndef medidas_de_especies(especies, arboleda):\r\n \r\n pop = [[(float(arbol['altura_tot']), float(arbol['diametro']))for arbol in arboleda if arbol['nombre_com'] == especie] for especie in especies]\r\n \r\n dic = {nombre:valor for nombre, valor in zip(especies,pop)} \r\n return dic\r\n\r\n \r\ndic_medidas = medidas_de_especies(['Eucalipto', 'Palo borracho rosado', 'Jacarandá'], arboleda)\r\nprint(dic_medidas)\r\n\r\n\r\n#%%\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n#Ejercicio 5.24: Histograma de altos de Jacarandás #Funcion del ej 4.16\r\nplt.hist(altura_jacaranda, bins = 50)\r\n\r\n\r\n#%%\r\n#Ejercicio 5.25: Scatterplot (diámetro vs alto) de Jacarandás #Func del ej 4.17\r\n\r\nalt_diam = np.array(alt_diam)#Convierto en un ndarray\r\n\r\nplt.scatter(alt_diam[:,1], alt_diam[:,0], alpha = 0.3, c = 'g') # x = diam, y = altura\r\n\r\nplt.xlabel(\"diametro (cm)\")\r\nplt.ylabel(\"alto (m)\")\r\nplt.title(\"Relación diámetro-alto para Jacarandás\")\r\n\r\n\r\n\r\n\r\n\r\n#%%\r\n#Ejercicio 5.26: Scatterplot para diferentes especies\r\neucalipto = np.array(dic_medidas['Eucalipto'])\r\npalo = np.array(dic_medidas['Palo borracho rosado'])\r\njaca = np.array(dic_medidas['Jacarandá'])\r\n\r\ndef plot_eucalipto():\r\n plt.scatter(eucalipto[:,1], eucalipto[:,0], alpha = 0.3) # x = diam, y = altura\r\n plt.xlabel(\"diametro (cm)\")\r\n plt.ylabel(\"alto (m)\")\r\n plt.title(\"Relación diámetro-alto para Eucalipto\")\r\n \r\n#%%\r\ndef plot_palo():\r\n plt.scatter(palo[:,1],palo[:,0], alpha = 0.3) # x = diam, y = altura\r\n plt.xlabel(\"diametro (cm)\")\r\n plt.ylabel(\"alto (m)\")\r\n plt.title(\"Relación diámetro-alto para Palo borracho rosado\")\r\n \r\n#%%\r\ndef plot_jacaranda():\r\n plt.scatter(jaca[:,1], jaca[:,0], alpha = 0.3) # x = diam, y = altura\r\n plt.xlabel(\"diametro (cm)\")\r\n plt.ylabel(\"alto (m)\")\r\n plt.title(\"Relación diámetro-alto para Jacarandás\")\r\n ","sub_path":"05_Random_Plt_Dbg/arboles.py","file_name":"arboles.py","file_ext":"py","file_size_in_byte":6713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"226584349","text":"import numpy as np\nimport pandas as pd\nfrom scipy.spatial.distance import euclidean\nfrom fastdtw import fastdtw\nfrom dtw import dtw\nfrom statistics import median\n\n\n\n\ndef dtw_val_gen(sub_section1,sub_section2,dt):\n #print(\"dtw val gen start\")\n if (dt == 0): #Normal DTW\n x=np.array(sub_section1).reshape(-1, 1)\n y=np.array(sub_section2).reshape(-1, 1)\n euclidean_norm = lambda x, y: np.abs(x - y)\n dtw_value, cost_matrix, acc_cost_matrix, path = dtw(x, y, dist=euclidean_norm)\n\n else: #Fast DTW\n x = np.array(sub_section1)\n y = np.array(sub_section2)\n dtw_value, path = fastdtw(x, y, dist=euclidean)\n return dtw_value\n\n\n\n\ndef dtw_rank_gen(dtw_temp):\n \n #med=(dtw_temp['dtw_value'] ).tolist()\n #print(dtw_temp['dtw_value'])\n #if(len(dtw_temp)> 5) :\n #dtw_temp = dtw_temp[dtw_temp['dtw_value'] < median(med) ] #median(med)\n \n dtw_temp= dtw_temp.sort_values(by=['dtw_value'])\n #print(dtw_temp['dtw_value'])\n rank_list=[]\n for m in range(1, len(dtw_temp)+1):\n rank_list.append(m)\n dtw_temp.insert(loc=5, column='ranks', value=rank_list)\n \n return dtw_temp\n\n\n\n\"\"\"------------- Y_Alphabetize ------------- \"\"\"\ndef alphabetize_ts(sub_section,y_alpha_size):\n y_alphabets = get_y_alphabets(y_alpha_size)\n mean_val = x_distrubted_values(sub_section)\n y_alpha_val = min(y_alphabets, key=lambda x:abs(x-mean_val))\n y_alpha_idx = y_alphabets.index(y_alpha_val)\n curr_word = index_to_letter(y_alpha_idx)\n\n return(curr_word)\n\n\"\"\"------------- index to letter ------------- \"\"\"\n\n\ndef index_to_letter(idx):\n \"\"\"Convert a numerical index to a char.\"\"\"\n if 0 <= idx < 20:\n return chr(97 + idx)\n else:\n raise ValueError('A wrong idx value supplied.')\n\n\n\"\"\"------------- X-axis Distribution ------------- \"\"\"\n\ndef x_distrubted_values(series):\n mean = np.mean(series)\n median = sorted(series)[len(series) // 2]\n return mean\n\n\n\"\"\"------------- Normalization ------------- \"\"\"\n\ndef normalize(x):\n epsilon = 1e-6\n X = np.asanyarray(x)\n if np.nanstd(X) < epsilon:\n res = []\n for entry in X:\n if not np.isnan(entry):\n res.append(0)\n else:\n res.append(np.nan)\n return res\n return (X - np.nanmean(X)) / np.nanstd(X)\n\ndef normal_distribution(x):\n x = (x-min(x))/(max(x)-min(x))\n return x\n\n\"\"\"------------- Y-axis Distribution ------------- \"\"\"\ndef break_points_gaussian(size):\n options = {\n 3: np.array([ -0.43, 0.43]),\n 4: np.array([ -0.67, 0, 0.67]),\n 5: np.array([ -0.84, -0.25, 0.25, 0.84]),\n 6: np.array([ -0.97, -0.43, 0, 0.43, 0.97]),\n 7: np.array([ -1.07, -0.57, -0.18, 0.18, 0.57, 1.07]),\n 8: np.array([ -1.15, -0.67, -0.32, 0, 0.32, 0.67, 1.15]),\n 9: np.array([ -1.22, -0.76, -0.43, -0.14, 0.14, 0.43, 0.76, 1.22]),\n 10: np.array([ -1.28, -0.84, -0.52, -0.25, 0, 0.25, 0.52, 0.84, 1.28]),\n 11: np.array([ -1.34, -0.91, -0.6, -0.35, -0.11, 0.11, 0.35, 0.6, 0.91, 1.34]),\n 12: np.array([ -1.38, -0.97, -0.67, -0.43, -0.21, 0, 0.21, 0.43, 0.67, 0.97, 1.38]),\n 13: np.array([ -1.43, -1.02, -0.74, -0.5, -0.29, -0.1, 0.1, 0.29, 0.5, 0.74, 1.02, 1.43]),\n 14: np.array([ -1.47, -1.07, -0.79, -0.57, -0.37, -0.18, 0, 0.18, 0.37, 0.57, 0.79, 1.07, 1.47]),\n 15: np.array([ -1.5, -1.11, -0.84, -0.62, -0.43, -0.25, -0.08, 0.08, 0.25, 0.43, 0.62, 0.84, 1.11, 1.5]),\n 16: np.array([ -1.53, -1.15, -0.89, -0.67, -0.49, -0.32, -0.16, 0, 0.16, 0.32, 0.49, 0.67, 0.89, 1.15, 1.53]),\n 17: np.array([ -1.56, -1.19, -0.93, -0.72, -0.54, -0.38, -0.22, -0.07, 0.07, 0.22, 0.38, 0.54, 0.72, 0.93, 1.19, 1.56]),\n 18: np.array([ -1.59, -1.22, -0.97, -0.76, -0.59, -0.43, -0.28, -0.14, 0, 0.14, 0.28, 0.43, 0.59, 0.76, 0.97, 1.22, 1.59]),\n 19: np.array([ -1.62, -1.25, -1, -0.8, -0.63, -0.48, -0.34, -0.2, -0.07, 0.07, 0.2, 0.34, 0.48, 0.63, 0.8, 1, 1.25, 1.62]),\n 20: np.array([ -1.64, -1.28, -1.04, -0.84, -0.67, -0.52, -0.39, -0.25, -0.13, 0, 0.13, 0.25, 0.39, 0.52, 0.67, 0.84, 1.04, 1.28, 1.64]),\n }\n\n return options[size]\n\n\n\"\"\"------------- Get y_alphabets ------------- \"\"\"\n\ndef get_y_alphabets(y_alpha_size):\n y_alpha_size\n #y_alphabets = break_points_quantiles(y_alphabet_size).tolist()\n y_alphabets = break_points_gaussian(y_alpha_size).tolist()\n return y_alphabets\n\n\n\n\"\"\"------------- Hamming Distance ------------- \"\"\"\ndef hamming_distance1(string1, string2):\n distance = 0\n L = len(string1)\n for i in range(L):\n if string1[i] != string2[i]:\n distance += 1\n return distance\n\n\ndef hamming_distance(s1, s2):\n if len(s1) != len(s2):\n raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(el1 != el2 for el1, el2 in zip(s1, s2))\n","sub_path":"Sax/Final_code_test/helper_functions.py","file_name":"helper_functions.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"471607109","text":"\"\"\"\nCrossval tests multiple potential hyperparameter combinations for the system (a neural net)\n(TODO: test multiple nets per option set and take the best- maybe 10?)\n\"\"\"\nimport numpy as np\nimport copy\n\nimport system as sys\n\n#suppress nan warnings\nnp.seterr(divide = 'ignore', invalid = 'ignore', over = 'ignore')\n\n#crossval options:\niterations = 500 #number of training iterations for each net\n\n#dt.DataSource options:\ncv_preprocessing_options = [\n [['mean'],['stdev']]\n ] #Preprocessing: ['mean']; ['stdev']; ['pca',var]; ['whitening',var]\n #done in same order as the array\n #pca automatically standardizes based on mean and stdev, while whitening automatically does pca\n #thus the only time you should have 2 elements in the array is if you want to do both mean and stdev standardization\ntrain_test_CV_split = [0.6, 0.2, 0.2] #fraction of data split into the 3 groups, should add up to 1\nfile_name = \"iris.data\"\n\n#sys.ystem options:\ncv_options = {}\n\ncv_options['num_examples'] = [5, 20, 50]\n\n#Regularization: ['none']; ['l1', lambda]; ['l2', lambda]; ['elastic', lambda1, lambda2]; (dropout)\ncv_options['Reg'] = [\n ['l1', 0.01],\n ['l1', 0.1],\n\n ['l2', 0.01],\n ['l2', 0.1],\n ]\n\ncv_options['Loss'] = ['logistic'] #Loss function: logistic, (cross-entropy)\n\ncv_options['Act'] = ['tanh'] #Activation function: sigmoid, tanh, ReLU\n\n#Learning method: ['vanilla']; ['momentum', mu]; ['nesterov', mu]\ncv_options['Learn'] = [\n ['nesterov', 0.5],\n ['nesterov', 0.7],\n ['nesterov', 0.9],\n ['nesterov', 0.95]\n ]\n\n#Learning rate: ['constant', alpha]; ['step', alpha, fraction, num_epochs]; ['exp', alpha, k]; ['1/t', alpha, k]\ncv_options['Rate'] = [\n ['step', 0.1, 0.95, 20],\n ['step', 0.1, 0.9, 20],\n ['step', 0.1, 0.9, 35],\n ['step', 0.1, 0.9, 50],\n ['step', 0.1, 0.75, 20],\n ['step', 0.1, 0.5, 20],\n ]\n\n#total number of options to iterate over:\n#datasource: 2, system: 6\nsystems = []\ncosts = []\n\n#create all systems to be tested:\noptions = {}\nprint('Initializing option sets...')\nfor i_preprocessing_options in range(0, len(cv_preprocessing_options)):\n preprocessing_options = cv_preprocessing_options[i_preprocessing_options]\n\n for i_num_examples in range(0, len(cv_options['num_examples'])):\n options['num_examples'] = cv_options['num_examples'][i_num_examples]\n\n for i_Reg in range(0, len(cv_options['Reg'])):\n options['Reg'] = cv_options['Reg'][i_Reg]\n\n for i_Loss in range(0, len(cv_options['Loss'])):\n options['Loss'] = cv_options['Loss'][i_Loss]\n\n for i_Act in range(0, len(cv_options['Act'])):\n options['Act'] = cv_options['Act'][i_Act]\n\n for i_Learn in range(0, len(cv_options['Learn'])):\n options['Learn'] = cv_options['Learn'][i_Learn]\n\n for i_Rate in range(0, len(cv_options['Rate'])):\n options['Rate'] = cv_options['Rate'][i_Rate]\n\n systems.append(sys.System(options, preprocessing_options, file_name, train_test_CV_split))\n options = copy.deepcopy(options) #each system has its own options\n\n#test the systems (only keep final cv_cost):\nprint('Number of option sets:', len(systems))\nfor i in range(0, len(systems)):\n system = systems[i]\n ignore, ignore, cv_cost = system.doManyIterations(iterations)\n \n cv_cost = cv_cost[-1] #look at the final cost\n cv_cost = np.sum(cv_cost) / cv_cost.shape[0] #avg over all classes\n \n costs.append(cv_cost)\n\n print('.', end='')\nprint()\n\nminimums = np.where(costs == np.nanmin(costs))\nfor i in range(0, len(minimums)):\n i_min = minimums[i][0]\n system = systems[i_min]\n cost = costs[i_min]\n options = system.options\n preprocessing_options = system.dataSource.preprocessing_options\n\n print('Cost:', cost)\n print('Options:', options)\n print('Preprocessing:', preprocessing_options)\n","sub_path":"crossval.py","file_name":"crossval.py","file_ext":"py","file_size_in_byte":4399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"191551514","text":"import sys\r\nimport copy\r\nimport time\r\n\r\n# THIS VERSION IS BACKTRACKING SEARCH + AC3\r\n# Running script: given code can be run with the command:\r\n# python file.py, ./path/to/init_state.txt ./output/output.txt\r\n\r\n#This class is used to keep track of what values can be used in that position in the sudoku puzzle\r\nclass Position:\r\n def __init__(self, value):\r\n self.domain = set()\r\n self.value = value\r\n self.unassignedNeighbours = set()\r\n \r\n def __str__(self):\r\n return str(self.value)\r\n\r\n#This class represents the sudoku puzzle and is used to solve a sudoku puzzle\r\nclass Puzzle:\r\n def __init__(self, solution, unusedRowValues, unusedColumnValues, unusedBoxValues):\r\n self.nodeCount = 0\r\n self.solution = solution\r\n self.unusedRowValues = unusedRowValues\r\n self.unusedColumnValues = unusedColumnValues\r\n self.unusedBoxValues = unusedBoxValues\r\n #Seting the domains for all positions\r\n for r in range(9):\r\n for c in range(9):\r\n self.solution[r][c].domain = self.unusedRowValues[r].intersection(self.unusedColumnValues[c], self.unusedBoxValues[r//3][c//3])\r\n self.solution[r][c].unassignedNeighbours = self.getUnassignedNeighbours(r, c)\r\n \r\n #Gets all unassigned neighbours of all positions\r\n def getUnusedNeighbours(self):\r\n for r in range(9):\r\n for c in range(9):\r\n self.solution[r][c].unassignedNeighbours = self.getUnassignedNeighbours(r, c)\r\n \r\n #Gets unassigned neighbours of a particulat position\r\n def getUnassignedNeighbours(self, r, c):\r\n unassignedNeighbours = set()\r\n for i in range(9):\r\n if i != c and self.solution[r][i].value == 0:\r\n unassignedNeighbours.add((r, i))\r\n if i != r and self.solution[i][c].value == 0:\r\n unassignedNeighbours.add((i, c))\r\n boxRow = r // 3 * 3\r\n boxColumn = c // 3 * 3\r\n for i in range(boxRow, boxRow + 3):\r\n for j in range(boxColumn, boxColumn + 3):\r\n if i != r and j != c and self.solution[i][j].value == 0:\r\n unassignedNeighbours.add((i, j))\r\n return unassignedNeighbours\r\n \r\n #Used to choose the position that will be assigned. It currently just chooses the first unassigned position\r\n def choosePosition(self):\r\n row = -1\r\n column = -1\r\n min = 10000\r\n for r in range(9):\r\n for c in range(9):\r\n if self.solution[r][c].value == 0:\r\n if len(self.solution[r][c].domain) < min:\r\n min = len(self.solution[r][c].domain)\r\n row = r\r\n column = c\r\n return (row, column)\r\n \r\n #Used to removed a value from UnusedValues for a row/column\r\n def removeFromUnusedValues(self, r, c, value):\r\n self.unusedRowValues[r].remove(value)\r\n self.unusedColumnValues[c].remove(value)\r\n self.unusedBoxValues[r//3][c//3].remove(value)\r\n \r\n #Used to add a value to UnusedValues for a row/column\r\n def addToUnusedValues(self, r, c, value):\r\n self.unusedRowValues[r].add(value)\r\n self.unusedColumnValues[c].add(value)\r\n self.unusedBoxValues[r//3][c//3].add(value)\r\n \r\n #Used to update all domains whenever a change is made\r\n def updatePositionDomains(self):\r\n for r in range(9):\r\n for c in range(9):\r\n self.solution[r][c].domain = self.unusedRowValues[r].intersection(self.unusedColumnValues[c], self.unusedBoxValues[r//3][c//3])\r\n \r\n #Used to assign a value to a position\r\n def assignValue(self, row, column, value, changes):\r\n self.solution[row][column].value = value\r\n for (r, c) in self.solution[row][column].unassignedNeighbours:\r\n self.solution[r][c].unassignedNeighbours.remove((row, column))\r\n if self.solution[r][c].value == 0 and (value in self.solution[r][c].domain):\r\n self.solution[r][c].domain.remove(value)\r\n if changes.has_key((r, c)):\r\n changes.add(value)\r\n else:\r\n changes[(r, c)] = set([value])\r\n self.AC3(changes)\r\n \r\n #Used to undo the assignment to a position\r\n def removeAssignment(self, row, column, changes):\r\n value = self.solution[row][column].value\r\n self.solution[row][column].value = 0\r\n for (r, c) in self.solution[row][column].unassignedNeighbours:\r\n self.solution[r][c].unassignedNeighbours.add((row, column))\r\n self.undoAC3(changes)\r\n \r\n \r\n #Used to check if we have found a solution\r\n def isGoal(self):\r\n for r in range(9):\r\n for c in range(9):\r\n if self.solution[r][c].value == 0:\r\n return False\r\n return True\r\n \r\n #Used to check whether a solution is possible after the latest assignment\r\n def isValidAssignment(self):\r\n for r in range(9):\r\n for c in range(9):\r\n if len(self.solution[r][c].domain) == 0 and self.solution[r][c].value == 0:\r\n return False\r\n return True\r\n \r\n def createQueue(self):\r\n q = list()\r\n for row in range(9):\r\n for column in range(9):\r\n if self.solution[row][column].value != 0:\r\n continue\r\n for (r, c) in self.solution[row][column].unassignedNeighbours:\r\n if len(self.solution[r][c].domain) == 1 and self.solution[r][c].value == 0:\r\n q.append(((row, column), (r, c)))\r\n return q\r\n \r\n def updateQueue(self, q, row, column, r, c):\r\n for (i, j) in self.solution[row][column].unassignedNeighbours:\r\n if (i, j) != (row, column) and (i, j) != (r, c) and self.solution[i][j].value == 0:\r\n q.append(((i,j), (row, column)))\r\n \r\n def revise(self, row, column, r, c, changes):\r\n myDomain = self.solution[row][column].domain\r\n neighbourDomain = self.solution[r][c].domain\r\n revise = False\r\n if len(neighbourDomain) != 1:\r\n return False\r\n for n in neighbourDomain:\r\n if n in myDomain:\r\n myDomain.remove(n)\r\n revise = True\r\n if changes.has_key((row, column)):\r\n changes[(row, column)].add(n)\r\n else:\r\n changes[(row, column)] = set([n])\r\n return revise\r\n \r\n def AC3(self, changes):\r\n q = self.createQueue()\r\n while len(q) != 0:\r\n (row, column), (r,c) = q.pop(0)\r\n if self.revise(row, column, r, c, changes):\r\n if (len(self.solution[row][column].domain) == 0):\r\n return False\r\n if (len(self.solution[row][column].domain) == 1):\r\n self.updateQueue(q, row, column, r, c)\r\n return True\r\n \r\n def undoAC3(self, changes):\r\n for (r,c), change in changes.items():\r\n while len(change) != 0:\r\n self.solution[r][c].domain.add(change.pop())\r\n \r\n #The backtracking algorithm to find a solution\r\n def backtrack(self):\r\n self.nodeCount = self.nodeCount + 1\r\n if not self.isValidAssignment():\r\n return False\r\n if self.isGoal():\r\n return True\r\n (row, column) = self.choosePosition()\r\n domainCpy = self.solution[row][column].domain.copy()\r\n for n in domainCpy:\r\n changes = dict()\r\n self.assignValue(row, column, n, changes)\r\n isPossible = self.backtrack()\r\n if isPossible:\r\n return True\r\n else:\r\n self.removeAssignment(row, column, changes)\r\n \r\n def __str__(self):\r\n s = \"\"\r\n for r in range(9):\r\n for c in range(9):\r\n s = s + \" \" + str(self.solution[r][c])\r\n s = s + \"\\n\"\r\n return s\r\n \r\n def __hash__(self):\r\n return hash(str(self))\r\n \r\n\r\nclass Sudoku(object):\r\n def __init__(self, puzzle):\r\n #The sudoku puzzle we are given\r\n self.puzzle = puzzle\r\n \r\n #Initialize the 'answer' matrix (since we want each position in the puzzle to be a Position object)\r\n self.ans = [[Position(0) for r in range(9)] for c in range(9)]\r\n for r in range(9):\r\n for c in range(9):\r\n self.ans[r][c].value = self.puzzle[r][c]\r\n \r\n #Sets to keep track of unused values in rows and columns\r\n self.unusedRowValues = [set([1, 2, 3, 4, 5, 6, 7, 8, 9]) for i in range(9)]\r\n self.unusedColumnValues = [set([1, 2, 3, 4, 5, 6, 7, 8, 9]) for i in range(9)]\r\n self.unusedBoxValues = [[set([1, 2, 3, 4, 5, 6, 7, 8, 9]) for i in range(3)] for j in range(3)]\r\n for r in range(9):\r\n for c in range(9):\r\n n = self.ans[r][c].value\r\n if n != 0:\r\n self.unusedRowValues[r].remove(n)\r\n self.unusedColumnValues[c].remove(n)\r\n self.unusedBoxValues[r//3][c//3].remove(n)\r\n\r\n def solve(self):\r\n start = time.time()\r\n mySudoku = Puzzle(self.ans, self.unusedRowValues, self.unusedColumnValues, self.unusedBoxValues)\r\n mySudoku.backtrack()\r\n end = time.time()\r\n print(\"Time taken: \" + str(end - start))\r\n print(\"Nodes seen: \" + str(mySudoku.nodeCount))\r\n return mySudoku.solution\r\n\r\nif __name__ == \"__main__\":\r\n # STRICTLY do NOT modify the code in the main function here\r\n if len(sys.argv) != 3:\r\n print (\"\\nUsage: python CS3243_P2_Sudoku_XX.py input.txt output.txt\\n\")\r\n raise ValueError(\"Wrong number of arguments!\")\r\n\r\n try:\r\n f = open(sys.argv[1], 'r')\r\n except IOError:\r\n print (\"\\nUsage: python CS3243_P2_Sudoku_XX.py input.txt output.txt\\n\")\r\n raise IOError(\"Input file not found!\")\r\n\r\n puzzle = [[0 for i in range(9)] for j in range(9)]\r\n lines = f.readlines()\r\n\r\n i, j = 0, 0\r\n for line in lines:\r\n for number in line:\r\n if '0' <= number <= '9':\r\n puzzle[i][j] = int(number)\r\n j += 1\r\n if j == 9:\r\n i += 1\r\n j = 0\r\n\r\n sudoku = Sudoku(puzzle)\r\n ans = sudoku.solve()\r\n\r\n with open(sys.argv[2], 'a') as f:\r\n for i in range(9):\r\n for j in range(9):\r\n f.write(str(ans[i][j]) + \" \")\r\n f.write(\"\\n\")\r\n","sub_path":"sudoku_version4_AC3_and_MostConstrainedVariable.py","file_name":"sudoku_version4_AC3_and_MostConstrainedVariable.py","file_ext":"py","file_size_in_byte":10640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"192831967","text":"#!/usr/bin/env python\n\"\"\"\n--- Day 2: 1202 Program Alarm ---\nhttps://adventofcode.com/2019/day/2\n\nRank: 5348 / 4567\n\"\"\"\nfrom aoc2019lib import IntCodeVM\nfrom aocutils import read_input_int_split, timer\n\nINPUT = read_input_int_split('02')\n\n\ndef run_with_noun_and_verb(intcode_vm: IntCodeVM, noun: int, verb: int) -> int:\n \"\"\"Reset given intcode VM, set noun and verb, run it and get value at memory index 0.\"\"\"\n intcode_vm.reset()\n intcode_vm.mem_set(1, noun)\n intcode_vm.mem_set(2, verb)\n intcode_vm.run()\n return intcode_vm.mem_get(0)\n\n\n@timer\ndef part1():\n \"\"\"Solve challenge part 1.\"\"\"\n return run_with_noun_and_verb(IntCodeVM(INPUT), 12, 2)\n\n\n@timer\ndef part2():\n \"\"\"Solve challenge part 2.\"\"\"\n intcode_vm = IntCodeVM(INPUT)\n for noun in range(100):\n for verb in range(100):\n if run_with_noun_and_verb(intcode_vm, noun, verb) == 19690720:\n return 100 * noun + verb\n\n\nif __name__ == \"__main__\":\n print(part1())\n print(part2())\n","sub_path":"aoc02.py","file_name":"aoc02.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"416656458","text":"\"\"\" This file contains the Hashcat functions I used in the AutoHash program\"\"\"\n\nimport tempfile\nfrom FuncCommon import get_cmd_lines\nfrom FuncCrackOrganize import get_list_cracked_to_hash, get_list_not_cracked\nfrom FuncConversation import done_basic_session, done_advanced_session\n\n\ndef hashcat_basic(globalVars):\n \"\"\"\n For every hash list creating a temporary file with all the hashes\n Running hashcat on the temporary file\n Adding to the hash list (.cracked_hashes) all the cracked hashes\n Setting the hashlist not cracked list (.hashes)\n Calling the done_basic_session function\n \"\"\"\n globalVars.numNotCracked = globalVars.totalHashes\n for hash_list in globalVars.HashLists:\n tmp_file = tempfile.NamedTemporaryFile()\n tmp_write = open(tmp_file.name, 'w')\n for hash in hash_list.hashes:\n tmp_write.write(hash.hash + '\\n')\n tmp_write.close()\n hashcat_log = get_cmd_lines(['hashcat', '-m', str(hash_list.hash_cat),\n '-a', '0', tmp_file.name, globalVars.wordListPath,\n '--force', '--quiet', '--potfile-disable'\n ])\n if(\"on line 1\" not in hashcat_log[0]):\n hash_list.cracked_hashes = get_list_cracked_to_hash(globalVars,hashcat_log)\n hash_list.hashes = get_list_not_cracked(hash_list.hashes,\n hash_list.cracked_hashes)\n tmp_file.close()\n done_basic_session(globalVars)\n\n\ndef hashcat_advanced(globalVars, rule_list_path, num_in_session, num_timeout):\n \"\"\"\n First checking if the cracking have a timeout timer\n => No:\n Running a loop on the hash lists, divided by hash type\n Creating a temp file that contains the hashes to crack\n Running hashcat with rules on a temp file that contains all the hashes\n => Yes:\n Running a loop on the hash lists, divided by hash type\n Creating a temp file\n Running the next loop for each hash =>\n Sets the temp file to contain only the current hash\n Running hashcat with rules and timeout on the temp file\n Check if the hash been cracked, if not marks him with custom value\n\n Overall wont crack more than specified to this sesssion\n Add the cracked hashes to the cracked list (hash_list.cracked_hashes)\n Reduce the var numNotCracked by the number of hashes been cracked\n In the end calling the done_basic_session function\n :var num_cracked_now: Counting the hashes added to the current session\n :var num_failed: Counting the hashes that their cracking been timed out\n :param rule_list_path: The path to the the rules file\n :param num_in_session: The number of hashes to crack on this session\n :param num_timeout: The number of minutes until the hash cracking timeout\n \"\"\"\n num_cracked_now = 0\n num_failed = 0\n if (num_timeout == 0):\n for hash_list in globalVars.HashLists:\n tmp_file = tempfile.NamedTemporaryFile()\n tmp_write = open(tmp_file.name, 'w')\n for hash in hash_list.hashes:\n tmp_write.write(hash.hash + '\\n')\n num_cracked_now += 1\n if (num_cracked_now == num_in_session):\n break\n tmp_write.close()\n hashcat_log = get_cmd_lines(['hashcat', '-m',\n str(hash_list.hashCat), '-a', '0',\n tmp_file.name, globalVars.wordListPath, '-r',\n rule_list_path, '--force', '--quiet',\n '--potfile-disable'\n ])\n if (\"on line 1\" not in hashcat_log[0]):\n hash_list.cracked_hashes += get_list_cracked_to_hash(globalVars, hashcat_log)\n hash_list.hashes = get_list_not_cracked(hash_list.hashes,\n hash_list.cracked_hashes)\n tmp_file.close()\n if (num_cracked_now == num_in_session):\n break\n else:\n for hash_list in globalVars.HashLists:\n tmp_file = tempfile.NamedTemporaryFile()\n for hash in hash_list.hashes:\n tmp_write = open(tmp_file.name, 'w')\n tmp_write.flush()\n tmp_write.write(hash.hash + '\\n')\n tmp_write.close()\n hash_after = get_cmd_lines(['hashcat', '-m',\n str(hash_list.hash_cat), '-a', '0',\n tmp_file.name, globalVars.wordListPath, '-r',\n rule_list_path, '--runtime',\n str(num_timeout * 60), '--force',\n '--quiet', '--potfile-disable'\n ])\n\n if (hash_after == [] or \"on line 1\" in hash_after):\n hash_after = [\n hash.hash + ':' + '___________'] # 11 times _\n num_failed += 1\n globalVars.numTimedOutCracked += 1\n hash_list.cracked_hashes += get_list_cracked_to_hash(\n globalVars, hash_after)\n num_cracked_now += 1\n if (num_cracked_now == num_in_session):\n break\n hash_list.hashes = get_list_not_cracked(hash_list.hashes,\n hash_list.cracked_hashes)\n tmp_file.close()\n if (num_cracked_now == num_in_session):\n break\n globalVars.numNotCracked += num_failed\n done_advanced_session(globalVars, rule_list_path)","sub_path":"src/FuncHashcat.py","file_name":"FuncHashcat.py","file_ext":"py","file_size_in_byte":5941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"237766654","text":"import socket\n\nfrom utils.configs import AlgorithmConfigs\n\nclass Algorithm:\n def __init__(self):\n self.server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.server_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n # self.server_sock.setblocking(False)\n self.server_ip = AlgorithmConfigs.SERVER_IP\n self.port = AlgorithmConfigs.SERVER_PORT\n self.server_sock.bind((self.server_ip, self.port))\n self.server_sock.listen(1)\n\n self.client_sock = None\n self.clientInfo = None\n self.is_connected = False\n print(\"Algorithm (INSTANTIATED)\")\n\n def isConnected(self):\n return self.is_connected\n\n def connect(self):\n try:\n print(f\"Algorithm (WAITING) at {self.server_ip}\")\n if self.client_sock is None:\n self.client_sock, self.client_address = self.server_sock.accept()\n self.is_connected = True\n print(f\"Algorithm (CONNECTED) to {self.client_address} {self.client_sock}\")\n except KeyboardInterrupt:\n print(f\"Android (KEYBOARD INTERRUPT)\")\n self.disconnect_server()\n except Exception as e:\n print(f\"Algorithm (ERROR) connect():{e}\")\n\n def disconnect_client(self):\n print(\"Algorithm (CLIENT DISCONNECTED) CALLED\")\n self.client_sock.close()\n self.client_sock = None\n self.is_connected = False\n print(\"Algorithm (CLIENT DISCONNECTED)\")\n\n def disconnect_server(self):\n self.server_sock.close()\n print(\"Algorithm (SERVER DISCONNECTED)\")\n \n def read(self):\n try:\n raw_message = self.client_sock.recv(AlgorithmConfigs.BUFFER_SIZE)\n message = raw_message.decode(\"utf-8\").strip().strip('\\x00')\n if len(message) > 0:\n print(f\"Algorithm (MESSAGE-FROM): {message}\")\n return message\n message = None\n except socket.error:\n print(\"Algorithm read disconnect client\")\n self.disconnect_client()\n except Exception as e:\n print(f\"Algorithm (ERROR) read():{e}\")\n return None\n\n def write(self, message):\n try:\n print(f\"Algorithm (MESSAGE-TO): {message}\")\n buffer = message +\"\\x00\"*max(AlgorithmConfigs.BUFFER_SIZE-len(message ),0)\n self.client_sock.send(buffer.encode('utf-8'))\n except socket.error:\n self.disconnect_client()\n except Exception as e:\n print(f\"Algorithm (ERROR) write():{e}\")\n","sub_path":"source/algorithm.py","file_name":"algorithm.py","file_ext":"py","file_size_in_byte":2579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"85119428","text":"'''\nSolve the 2-D linear convection equation using the finite difference method.\n'''\n\nimport numpy as np # here we load numpy\nfrom matplotlib import pyplot as plt # here we load matplotlib\nfrom matplotlib import cm # colormap\nimport time, sys # here we load some utilities\nfrom matplotlib.ticker import (MultipleLocator, FormatStrFormatter, AutoMinorLocator)\nfrom mpl_toolkits.axes_grid1.inset_locator import inset_axes\nfrom mpl_toolkits.mplot3d import Axes3D # new library required for projected 3D plots\nplt.rcParams[\"font.family\"] = \"stix\" # set the font to Times globally\nplt.rcParams[\"mathtext.fontset\"] = \"stix\" # set the math font to Times\n\n## Variable declarations\nnx = 81 # grid points in x-direction\nny = 81 # grid points in y-direction\nnt = 100 # number of time steps\nc = 1 # wave speed\ndx = 2/(nx-1) # spatial resolution in x-direction\ndy = 2/(ny-1) # spatial resolution in y-direction\nsigma = 0.2 # for CFL condition\ndt = sigma*dx\n\nx = np.linspace(0,2,nx) # x-coordinates\ny = np.linspace(0,2,ny) # y-coordinates\n\n## Assign initial conditions\n# u = 2 when x and y are between 0.5 and 1 and u = 1 everywhere else\nu = np.ones((ny, nx)) # col (x) will always be the last dimension\nu[int(0.5/dy):int(1/dy+1), int(0.5/dx):int(1/dx+1)] = 2\n\n## Plot the initial condition\nfig = plt.figure()\nax = fig.gca(projection='3d')\nX, Y = np.meshgrid(x, y)\nsurf = ax.plot_surface(X, Y, u, cmap=cm.viridis, antialiased=False)\n\n# set the axis properties\nax.xaxis.set_major_formatter(FormatStrFormatter('%g'))\nax.yaxis.set_major_formatter(FormatStrFormatter('%g'))\nax.zaxis.set_major_formatter(FormatStrFormatter('%g'))\nax.tick_params(labelsize=8)\n\n# set the figure properties\nplt.xlabel('$x$ (m)', fontsize=10)\nplt.ylabel('$y$ (m)', fontsize=10)\nax.set_zlabel('$u$ (m/s)', fontsize=10)\nplt.xlim(0, 2)\nplt.ylim(0, 2)\nax.set_zlim(1, 2)\nplt.tight_layout(pad=0.1) # make the layout tight to minimize the white space\n\n# annotate the current time\nax.annotate('$t = 0.000$ s', xy=(0.75,0.9), xycoords='axes fraction', fontsize=10)\n\n# save and show the figure\nfolderName = '/home/ygc/Documents/Codes/cfd-python/2dLinearConvection/'\nfileName = 'u000.png'\nplt.savefig(folderName+fileName, dpi=300)\nplt.show(block=False)\nplt.pause(0.1) # show the image for 0.1 s\nplt.close()\n\n## Solve using finite difference and plot the results\nfor n in range(nt):\n un = u.copy()\n\n for i in range(1, nx):\n for j in range(1, ny):\n u[j,i] = un[j,i]-c*dt/dx*(un[j,i]-un[j,i-1])-c*dt/dy*(un[j,i]-un[j-1,i])\n\n # set the boundary values\n u[0,:] = 1\n u[-1,:] = 1\n u[:,0] = 1\n u[:,-1] = 1\n\n # Plot the results\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(X, Y, u, cmap=cm.viridis, antialiased=False)\n\n # set the axis properties\n ax.xaxis.set_major_formatter(FormatStrFormatter('%g'))\n ax.yaxis.set_major_formatter(FormatStrFormatter('%g'))\n ax.zaxis.set_major_formatter(FormatStrFormatter('%g'))\n ax.tick_params(labelsize=8)\n\n # set the figure properties\n plt.xlabel('$x$ (m)', fontsize=10)\n plt.ylabel('$y$ (m)', fontsize=10)\n ax.set_zlabel('$u$ (m/s)', fontsize=10)\n plt.xlim(0, 2)\n plt.ylim(0, 2)\n ax.set_zlim(1, 2)\n plt.tight_layout(pad=0.1) # make the layout tight to minimize the white space\n\n # annotate the current time\n ax.annotate('$t = {0:.3f}$ s'.format((n+1)*dt), xy=(0.75,0.9), xycoords='axes fraction', fontsize=10)\n\n # save and show the figure\n fileName = 'u{:0>3d}.png'.format(n+1)\n plt.savefig(folderName+fileName, dpi=300)\n plt.show(block=False)\n plt.pause(0.1) # show the image for 0.1 s\n plt.close()","sub_path":"5_2dLinearConvection.py","file_name":"5_2dLinearConvection.py","file_ext":"py","file_size_in_byte":4189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"75785237","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport uuid\nimport subprocess\n\n\ndef main():\n domain = '.'.join([\"www\", uuid.uuid4().hex, \"com\"])\n cmd = [\"ping\", domain]\n if os.name == \"nt\":\n cmd.extend([\"-n\", \"1\"])\n elif os.name == \"posix\":\n cmd.extend([\"-c\", \"1\"])\n try:\n returncode = subprocess.check_call(cmd)\n except subprocess.CalledProcessError as ex:\n print(\"ERROR:\", ex.returncode, ex.returncode != 0)\n else:\n print(\"returncode:\", returncode, returncode == 0)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"standard/046.subprocess/subprocess_check_call.py","file_name":"subprocess_check_call.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"236421232","text":"import tensorflow as tf\nimport numpy as np\nfrom models import *\n\n# make new obj\nmnist = type('',(object,),{})()\nmnist.train = type('',(object,),{})()\nmnist.test = type('',(object,),{})()\n\nmnist.train.num_examples = 60000\nmnist.test.num_examples = 10000\n\nimport pickle\nwith open('../tmp/snu_sugang_images.pkl', 'rb') as f:\n tmp_images = pickle.load(f)\n mnist.train.images = tmp_images[:mnist.train.num_examples]\n mnist.test.images = tmp_images[mnist.train.num_examples:mnist.train.num_examples+mnist.test.num_examples]\n\nwith open('../tmp/snu_sugang_labels.pkl', 'rb') as f:\n tmp_labels = pickle.load(f)\n mnist.train.labels = tmp_labels[:mnist.train.num_examples]\n mnist.test.labels = tmp_labels[mnist.train.num_examples:mnist.train.num_examples+mnist.test.num_examples]\ntrX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels\n\nX = tf.placeholder(tf.float32, [None, 26, 52, 3])\nY = tf.placeholder(tf.int32, [None, 2])\nY_onehot = tf.one_hot(Y, 10, 1.0, 0.0)\ntraining = tf.placeholder(bool, (), name='mode')\n\nwith tf.variable_scope('model') as scope:\n prob, hypothesis = vgg(X, True, training)\n\ncorrect_prediction = tf.equal(tf.argmax(hypothesis, 2), tf.argmax(Y_onehot, 2))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\ncost = tf.reduce_mean(tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits(\n logits=hypothesis, labels=Y_onehot), axis=1))\n\na = tf.Variable(0.0003)\noptimizer = tf.train.AdamOptimizer(a)\ntrain = optimizer.minimize(cost)\n\ninit = tf.global_variables_initializer()\n\nsess = tf.Session()\nsess.run(init)\n\ntraining_epochs = 1\nbatch_size = 128\ndisplay_step = 1\nfor epoch in range(training_epochs):\n avg_cost = 0.0\n avg_accuracy = 0.0\n total_batch = int(mnist.train.num_examples / batch_size)\n train_indices = np.arange(len(trX))\n np.random.shuffle(train_indices)\n trX = trX[train_indices]\n trY = trY[train_indices]\n\n for i in range(total_batch):\n batch_xs, batch_ys = trX[i*batch_size:(i+1)*batch_size], trY[i*batch_size:(i+1)*batch_size]\n sess.run(train, feed_dict={X:batch_xs, Y:batch_ys, training:True})\n tmp_cost, tmp_accuracy = sess.run([cost, accuracy], feed_dict={X:batch_xs, Y:batch_ys, training:False})\n avg_accuracy += tmp_accuracy/total_batch\n avg_cost += tmp_cost/total_batch\n print(total_batch, i, end='\\r')\n print()\n\n if epoch % display_step == 0:\n print (\"Epoch:\", '%04d' %(epoch+1), \"cost:\", \"{:0.9f}\".format(avg_cost), \"accuracy: %0.3f\" % avg_accuracy)\n\n test_indices = np.arange(len(teX))\n np.random.shuffle(test_indices)\n test_batch_size = 250\n \n accr = 0\n test_batch_num = len(teX)//test_batch_size\n for j in range(test_batch_num):\n small_ids = test_indices[j*test_batch_size:(j+1)*test_batch_size]\n f_d = {X: teX[small_ids], Y: teY[small_ids], training:False}\n accr += sess.run(accuracy, feed_dict=f_d) / test_batch_num\n print (\"Accuracy:\", accr)","sub_path":"jay/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"249390730","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nimport csv\n\nfrom django.conf import settings\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.http import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\n\nfrom .models import Ingredient, IngredientTag\nfrom .utils import get_nutrition_limits\nfrom targets.models import Target\n\n\nclass IngredientListView(LoginRequiredMixin, ListView):\n # - Table view - generic macros and cost for each ingredient\n model = Ingredient\n queryset = Ingredient.objects.filter(tags__isnull=True)\n #queryset = Ingredient.objects.all() # XXX NOTE: This now only shows untagged Ings\n\n def get_context_data(self, **kwargs):\n context = super(IngredientListView, self).get_context_data(**kwargs)\n context['alltags'] = IngredientTag.objects.values_list('name', flat=True)\n context['limits'] = get_nutrition_limits(self.queryset)\n context['listtype'] = 'untagged'\n return context\n\n\nclass IngredientListAllView(LoginRequiredMixin, ListView):\n # - Table view - generic macros and cost for each ingredient\n model = Ingredient\n queryset = Ingredient.objects.all()\n\n def get_context_data(self, **kwargs):\n context = super(IngredientListAllView, self).get_context_data(**kwargs)\n context['alltags'] = IngredientTag.objects.values_list('name', flat=True)\n context['limits'] = get_nutrition_limits(self.queryset) #TODO: Too intensive?\n context['listtype'] = 'all'\n return context\n\n\nclass IngredientListByTagView(LoginRequiredMixin, ListView):\n # - Table view filtered to a tag\n model = Ingredient\n\n def get_queryset(self):\n self.tag = get_object_or_404(IngredientTag, name=self.args[0])\n return Ingredient.objects.filter(tags=self.tag)\n\n def get_context_data(self, **kwargs):\n context = super(IngredientListByTagView, self).get_context_data(**kwargs)\n context['alltags'] = IngredientTag.objects.values_list('name', flat=True)\n context['limits'] = get_nutrition_limits(self.get_queryset())\n context['tag'] = self.tag\n context['listtype'] = 'tag'\n return context\n\n\nclass IngredientDetailView(LoginRequiredMixin, DetailView):\n model = Ingredient\n\n def get_context_data(self, **kwargs):\n context = super(IngredientDetailView, self).get_context_data(**kwargs)\n\n # User's current daily target for comparison\n # TODO: Should let user compare to different targets, and scale\n # to maximise something (etc)\n user = self.request.user\n daily_target = Target.get_primary_target(user)\n context.update({'daily_target': daily_target})\n\n return context\n\n@login_required\ndef IngredientCSVExportView(request):\n # Create the HttpResponse object with the appropriate CSV header.\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'attachment; filename=\"pants-ingredients.csv\"'\n\n # Use dictionary writer to export nutrition data dicts.\n # Fields are all standard items plus 'name' and calories which should be 1st\n fields = [\n 'name',\n 'kilocalories',\n 'protein_per_j',\n 'fibre_per_j',\n 'protein_per_cost',\n 'fibre_per_cost',\n 'rank',\n 'rank_per_cost',\n 'pf_per_j',\n ] + list(settings.NUTRITION_DATA_ITEMS) + [\n 'tags',\n ]\n writer = csv.DictWriter(\n response,\n fieldnames=fields,\n extrasaction='ignore', # ignore extra data if present in dicts\n )\n\n writer.writeheader()\n for ing in Ingredient.objects.all().iterator():\n data = ing.nutrition_data\n data['name'] = ing.name\n data['tags'] = ing.tags.values_list('name', flat=True)\n writer.writerow(data)\n\n return response\n","sub_path":"pants/ingredients/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"17859434","text":"import numpy as np\nimport pandas as pd\nfrom scipy.optimize import minimize\nimport scipy.optimize\nimport matplotlib.pyplot as plt\n\ndef fit_ata(\n input_endog,\n forecast_length \n ):\n \"\"\"\n :param input_endog: numpy array of intermittent demand time series\n :param forecast_length: forecast horizon\n :return: dictionary of model parameters, in-sample forecast, and out-of-sample forecast\n \"\"\"\n input_series = np.asarray(input_endog)\n epsilon = 1e-7\n input_length = len(input_series)\n nzd = np.where(input_series != 0)[0]\n \n if list(nzd) != [0]:\n \n try:\n w_opt = _ata_opt(\n input_series = input_series,\n input_series_length = input_length,\n epsilon = epsilon, \n w = None,\n nop = 2\n )\n \n ata_training_result = _ata(\n input_series = input_series, \n input_series_length = input_length,\n w = w_opt[0], \n h = forecast_length,\n epsilon = epsilon,\n )\n ata_model = ata_training_result['model']\n ata_fittedvalues = ata_training_result['in_sample_forecast']\n \n ata_forecast = ata_training_result['out_of_sample_forecast']\n\n ata_demand_series = ata_training_result['fit_output']\n\n ata_mse = w_opt[1]\n\n except Exception as e:\n \n ata_model = None\n ata_fittedvalues = None\n ata_forecast = None\n print(str(e))\n \n else:\n \n ata_model = None\n ata_fittedvalues = None\n ata_forecast = None \n \n \n return {\n 'ata_model': ata_model,\n 'ata_fittedvalues': ata_fittedvalues,\n 'ata_forecast': ata_forecast,\n 'ata_demand_series': ata_demand_series,\n 'ata_mse': ata_mse\n }\n\ndef _ata(\n input_series, \n input_series_length,\n w, \n h, \n epsilon\n ):\n \n # ata decomposition\n nzd = np.where(input_series != 0)[0] # find location of non-zero demand\n \n k = len(nzd)\n z = input_series[nzd] # demand\n \n x = np.concatenate([[nzd[0]], np.diff(nzd)]) # intervals\n\n # initialize\n \n init = [z[0], np.mean(x)]\n \n zfit = np.array([None] * k)\n xfit = np.array([None] * k)\n\n # assign initial values and prameters\n \n zfit[0] = init[0]\n xfit[0] = init[1]\n\n correction_factor = 1\n \n p = w[0]\n q = w[1]\n # fit model\n for i in range(1,k):\n a_demand = p / nzd[i]\n a_interval = q / nzd[i]\n\n # 提升效率的操作方式\n if nzd[i] <= p:\n zfit[i] = z[i]\n else:\n zfit[i] = zfit[i-1] + a_demand * (z[i] - zfit[i-1]) # demand\n\n\n if nzd[i] <= q:\n xfit[i] = z[i] - z[i-1]\n else:\n xfit[i] = xfit[i-1] + a_interval * (x[i] - xfit[i-1]) # interval\n \n \n cc = correction_factor * zfit / (xfit + epsilon)\n \n ata_model = {\n 'a_demand': p,\n 'a_interval': q,\n 'demand_series': pd.Series(zfit),\n 'interval_series': pd.Series(xfit),\n 'demand_process': pd.Series(cc),\n 'correction_factor': correction_factor\n }\n \n # calculate in-sample demand rate\n \n frc_in = np.zeros(input_series_length)\n tv = np.concatenate([nzd, [input_series_length]]) # Time vector used to create frc_in forecasts\n\n zfit_output = np.zeros(input_series_length)\n \n for i in range(k):\n frc_in[tv[i]:min(tv[i+1], input_series_length)] = cc[i]\n zfit_output[tv[i]:min(tv[i+1], input_series_length)] = zfit[i]\n\n # forecast out_of_sample demand rate\n \n # ata 中的weight符合超几何分布,因此并不会出现越到后面,weight下降越快。\n # 因此, ata的最后一个值,并不会100%等于最后一个fitted value。\n # 从forecast的公式可知,其中并没有 h 可迭代参数,因此,forecast的结果都是最后一个。\n if h > 0:\n frc_out = np.array([cc[k-1]] * h)\n else:\n frc_out = None\n\n return_dictionary = {\n 'model': ata_model,\n 'in_sample_forecast': frc_in,\n 'out_of_sample_forecast': frc_out,\n 'fit_output': zfit_output\n }\n \n return return_dictionary\n\ndef _ata_opt(\n input_series, \n input_series_length, \n epsilon,\n w = None,\n nop = 2\n ):\n\n # p0 = np.array([3,1])\n init_p = np.random.randint(1, input_series_length)\n init_q = np.random.randint(0, init_p)\n p0 = np.array([init_p,init_q])\n # print(p0)\n pbounds = ((1, input_series_length), (0, input_series_length))\n\n # 通过minimize的方式,获取到一个最优化值。\n # 感觉可以深挖下这个的算法耶。。里面还含有分布函数的选择。\n # 传入梯度下降的公式,则可以降低计算的消耗。。。。\n # 调整步长,来修正梯度下降的效率?\n wopt = minimize(\n fun = _ata_cost, \n x0 = p0, \n method='L-BFGS-B',\n bounds=pbounds,\n args=(input_series, input_series_length, epsilon)\n )\n\n constrained_wopt = wopt.x\n fun = wopt.fun\n\n # wopt = scipy.optimize.brute(_ata_cost,pbounds,\n # args=(input_series, input_series_length, epsilon))\n \n # # constrained_wopt = np.minimum([1], np.maximum([0], wopt.x))\n # constrained_wopt = wopt\n # fun = 0\n \n return (constrained_wopt, fun)\n\n# 仅仅通过rmse来判断,容易产生过拟合的问题,因此需要添加新的正则化来减轻过拟合~\ndef _ata_cost(\n p0,\n input_series,\n input_series_length,\n epsilon\n ):\n #防止进入负数区间\n if p0[0] < 0 or p0[1] < 0:\n return 3.402823466E+38\n # Q: [0, P] P: [1,n]\n if p0[1] > p0[0]:\n return 3.402823466E+38\n frc_in = _ata(\n input_series = input_series,\n input_series_length = input_series_length,\n w=p0,\n h=0,\n epsilon = epsilon\n )['in_sample_forecast']\n\n # MSE-------------------------------------\n E = input_series - frc_in\n\n # count = min(input_series_length-1,(int)(p0[0]))\n # indata = input_series[count:]\n # outdata = frc_in[count:]\n # E = indata - outdata\n \n E = E[E != np.array(None)]\n # E = np.sqrt(np.mean(E ** 2))\n E = np.mean(E ** 2)\n\n # # MAPE--------------------------------\n # E1 = (np.fabs(input_series - frc_in))\n # E2 = (np.fabs(input_series) + np.fabs(frc_in)) / 2\n # E = E1 / E2\n # E = E.sum() / len(input_series)\n\n # print((\"count: {0} p: {1} q: {2} E: {3}\").format(count, p0[0], p0[1], E))\n print((\"p: {0} q: {1} E: {2}\").format(p0[0], p0[1], E))\n # count = count + 1\n return E\n\n# a = np.zeros(7)\n# val = [1.0,4.0,5.0,3.0]\n# idxs = [1,2-1,6-2,7-3]\n# ts = np.insert(a, idxs, val)\n\n\ninput_data = pd.read_csv(\"./data/M4DataSet/NewYearly.csv\")\ninput_data = input_data.fillna(0)\nts = input_data['Feature']\n# ts = input_data['Feature'][:1000]\n\n#-------------Cross validation------------------------------------\ncv_count = 5\nrepeatpoint = 10\nsplit_count = int(len(ts) / cv_count)\nsplit_range = split_count - repeatpoint\ndataend = len(ts)\nopt_para = []\nfor i in range(cv_count):\n start = i * split_count - repeatpoint\n end = (i+1) * split_count - repeatpoint\n if start < 0:\n start = 0\n if end > dataend:\n end = dataend\n data = ts[start : end]\n fit_pred = fit_ata(data, repeatpoint)\n opt_model = fit_pred['ata_model']\n para = (opt_model[\"a_demand\"], opt_model[\"a_interval\"])\n Train_MSE = fit_pred['ata_mse']\n #test------------------------------------------\n test = ts[end : end+repeatpoint]\n test_fitted = fit_pred[\"ata_forecast\"]\n\n test_MSE = test - test_fitted \n test_MSE = test_MSE[test_MSE != np.array(None)]\n test_MSE = np.mean(test_MSE ** 2)\n\n print(\"cv train: {0}\\topt P: {1}\\tQ: {2}\\tTrain_MSE: {3}\\tTest_MSE: {4}\".format(i, opt_model[\"a_demand\"], opt_model[\"a_interval\"], Train_MSE, test_MSE))\n opt_para.append({\"para\":para, \"MSE\": test_MSE, 'fited': fit_pred})\n\n\n\n#--------------------------Output the best paras----------------------------\nopt_para = sorted(opt_para, key=lambda item: item['MSE'])\nbest_para = opt_para[0]\ntest_data = np.asarray(ts)\nfit_pred = _ata(test_data,len(test_data),best_para['para'],4,1e-7)\n\ntest_fited = fit_pred['in_sample_forecast']\nE = test_data - test_fited\nE = E[E != np.array(None)]\nMSE = np.mean(E ** 2)\n\nyhat = np.concatenate([fit_pred['in_sample_forecast'], fit_pred['out_of_sample_forecast']])\n# yhat = fit_pred['ata_demand_series']\n\nopt_model = fit_pred['model']\nprint(\"output P: {0}\\tQ: {1}\\tmse: {2}\".format(opt_model[\"a_demand\"],opt_model[\"a_interval\"], MSE))\n# print(ts)\n# print(yhat)\n\nplt.plot(ts)\nplt.plot(yhat)\n\nplt.show()\nprint(\"\")\n\n# output P: 4557.0 Q: 0.0 mse: 7763694.190167521\n# round 1: output P: 969.6244064590888 Q: 217.45943750155297 mse: 7836237.429606486\n# p: 13782.98556931908 q: 2499.000000000012 E: 2429.2208993823615 grid search\n# p: 13887.31775824237 q: 2157.2851580033325 E: 5840921.397489025\n# p: 14293.167068707324 q: 1e-08 E: 2687.3898919142553\n\n# # -----------------------Invalid-------------------------------\n# # Test\n# init = fit_pred['ata_forecast'][-1]\n# # init = 479\n# W = [fit_pred['ata_model']['a_demand'], fit_pred['ata_model']['a_interval']]\n# # W = [13887.31775824237, 2157.2851580033325]\n# test_data = pd.read_csv(\"./data/M4DataSet/NewYearlyTest.csv\")\n# test_data = test_data.fillna(0)\n# ts_test = test_data['Feature']\n\n# test_out = ata_forecast(W, init, len(ts_test), 1e-7)\n\n# # E = test_out - ts_test\n# # E = E[E != np.array(None)]\n# # E = np.mean(E ** 2)\n# # print(('out: a_demand : {0} a_interval: {1} rmse: {2}').format(W[0], W[1], E))\n\n# # print(ts_test)\n# # print(test_out)\n\n# plt.plot(ts_test)\n# plt.plot(test_out)\n\n# plt.show()\n# # ----------------------------------------------------------\n","sub_path":"ATA/main_doublepara_cross_with_point_forecast.py","file_name":"main_doublepara_cross_with_point_forecast.py","file_ext":"py","file_size_in_byte":11494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"91471058","text":"'''\nCopyright 2019 Google LLC\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n https://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n'''\n\n\nimport os\nimport platform\nimport shutil\nimport subprocess\nimport sys\n\nimport paramiko\nfrom paramiko.ssh_exception import SSHException, PasswordRequiredException\nfrom paramiko.rsakey import RSAKey\n\nfrom mdt import config\n\n\nSUPPORTED_SYSTEMS = [\n 'Linux',\n 'MacOS',\n 'BSD',\n]\n\nKEYSDIR = os.path.join(config.CONFIG_BASEDIR, \"keys\")\nKEYFILE_PATH = os.path.join(config.CONFIG_BASEDIR, \"keys\", \"mdt.key\")\n\n\ndef GenerateAuthorizedKeysLine(paramiko_key):\n public_key = paramiko_key.get_base64()\n authorized_keys_line = 'ssh-rsa {0} mdt\\r\\n'.format(public_key)\n return authorized_keys_line\n\n\nclass Keystore:\n def __init__(self):\n if not os.path.exists(config.CONFIG_BASEDIR):\n os.makedirs(CONFIG_BASEDIR, mode=0o700)\n if not os.path.exists(KEYSDIR):\n os.makedirs(KEYSDIR, mode=0o700)\n if not os.path.exists(KEYFILE_PATH):\n self.pkey = None\n else:\n try:\n self.pkey = RSAKey.from_private_key_file(KEYFILE_PATH)\n except IOError as e:\n print(\"Unable to read private key from file: {0}\".format(e))\n sys.exit(1)\n except PasswordRequiredException as e:\n print(\"Unable to load in private key: {0}\".format(e))\n sys.exit(1)\n\n def generateKey(self):\n self.pkey = RSAKey.generate(bits=4096)\n\n try:\n self.pkey.write_private_key_file(KEYFILE_PATH)\n except IOError as e:\n print(\"Unable to write private key to disk: {0}\".format(e))\n return False\n else:\n return True\n\n def importKey(self, keyfile):\n try:\n self.pkey = RSAKey.from_private_key_file(keyfile)\n except IOError as e:\n print(\"Unable to read private key from file: {0}\".format(e))\n return False\n except PasswordRequiredException as e:\n print(\"Unable to load in private key: {0}\".format(e))\n return False\n except SSHException as e:\n print(\"Unable to import private key: {0}\".format(e))\n print(\"Note: Only OpenSSH keys generated using ssh-keygen in PEM format are supported.\")\n return False\n\n try:\n self.pkey.write_private_key_file(KEYFILE_PATH)\n except IOError as e:\n print(\"Unable to write private key to disk: {0}\".format(e))\n return False\n else:\n return True\n\n def key(self):\n return self.pkey\n\n\nclass GenKeyCommand:\n '''Usage: mdt genkey\n\nGenerates an SSH key and stores it to disk.\n\nNote that this does not prompt if you want to replace an already existing\nkey and will happily overwrite without telling you! Also note, you should remove\nthe keys previously stored on the device in $HOME/.ssh/authorized_keys and\nrestart the mdt-keymaster service on the device to re-push any newly generated\nkeys.\n'''\n\n def run(self, args):\n if os.path.exists(KEYFILE_PATH):\n print('WARNING!')\n print()\n print('MDT has detected a key already on disk. This command')\n print('will overwrite that key! This will effectively lock you out from')\n print('any boards that you may have previously used this key with!')\n print()\n print('If you are attempting to rotate your keys, you will need to run')\n print(\"'mdt resetkeys' on each board you've previously used to remove\")\n print('your old key first, otherwise you will be locked out from SSH')\n print('access and will have to push your key manually.')\n print()\n print(\"If you know what you're doing, you can proceed by typing 'YES'\")\n sys.stdout.write('here: ')\n sys.stdout.flush()\n\n response = sys.stdin.readline()\n if not response.startswith('YES'):\n print('Aborting.')\n return 1\n\n print('Proceeding.')\n os.unlink(KEYFILE_PATH)\n\n keystore = Keystore()\n if not keystore.generateKey():\n return 1\n\n return 0\n\n\nclass SetKeyCommand:\n '''Usage: mdt setkey \n\nCopies an SSH private key provided into the MDT key store for use with\nauthentication later.'''\n\n def run(self, args):\n if len(args) != 2:\n print(\"Usage: mdt setkey \")\n return 1\n\n source_keyfile = args[1]\n if not os.path.exists(source_keyfile):\n print(\"Can't copy {0}: no such file or directory.\".format(source_keyfile))\n return 1\n\n keystore = Keystore()\n if not keystore.importKey(source_keyfile):\n return 1\n\n print(\"Key {0} imported.\".format(source_keyfile))\n return 0\n","sub_path":"venv/Lib/site-packages/mdt/keys.py","file_name":"keys.py","file_ext":"py","file_size_in_byte":5338,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"605367145","text":"#-*- coding:utf-8 -*-\n\"\"\"\nTest module for testing worker spawning and killing\nworkers and sinks\n\"\"\"\nimport unittest\nfrom servers.ventilator import Ventilator\nfrom workers.dummy_worker import DummyWorker\nfrom sinks.dummy_sink import DummySink\nfrom multiprocessing import Process\nfrom subprocess import Popen\nimport time\n\nclass testWorkerModules(unittest.TestCase):\n def setUp(self):\n #start a ventilator\n self.V = Ventilator()\n self.nw = 4\n #spawn 4 workers\n self.ws = [Popen(['python', 'workers/dummy_worker.py'], stdout=None) for i in range(self.nw)]\n #~ self.ws = []\n #~ for i in range(self.nw):\n #~ w = DummyWorker2\n #~ P = Process(target=w)\n #~ P.start()\n #~ self.ws.append(P)\n\n #spawn a sink\n self.sink = Popen(['python', 'sinks/dummy_sink.py'], stdout=None)\n\n\n # wait for workers and sinks to connect\n time.sleep(1)\n\n def test_send_json(self):\n '''\n Pushing json with unicode through workers to sinks.\n '''\n\n self.V.push_load([{'text':u'são joão'} for i in xrange(80)])\n time.sleep(2)\n #[p.wait() for p in self.ws]#wait for the workers to terminate\n wsr = [p.poll() for p in self.ws]\n time.sleep(1)\n self.sink.wait()\n sr = self.sink.returncode\n self.assertEqual([0]*self.nw, wsr)\n self.assertEqual(0, sr)\n\n def tearDown(self):\n pass\n# try:\n# self.sink.kill()\n# #tries to kill worker processes if they are still active\n# [p.kill() for p in self.ws]\n# except OSError as e:\n# print \"No processes left to kill\", e\n\nclass testWorkerAsSubprocesses(unittest.TestCase):\n def setUp(self):\n #start a ventilator\n self.V = Ventilator(pushport=5561,pubport=5562,subport=5563)\n self.nw = 4\n #spawn 4 workers\n self.ws = [Process(target=DummyWorker(pushport=5564,pullport=5561,subport=5563)) for i in range(self.nw)]\n [p.start() for p in self.ws]\n\n\n #spawn a sink\n self.sink = Process(target=DummySink(pullport=5564,pubport=5563,subport=5562))\n self.sink.start()\n\n\n # wait for workers and sinks to connect\n time.sleep(1)\n\n def test_send_json(self):\n '''\n Pushing json with unicode through workers to sinks.\n '''\n\n self.V.push_load([{'text':u'são joão'} for i in xrange(80)])\n time.sleep(1)\n\n\n\n def tearDown(self):\n pass\n# try:\n# self.sink.join()\n# #tries to kill worker processes if they are still active\n# [p.join() for p in self.ws]\n# except OSError as e:\n# print e\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"pypln/test_messaging.py","file_name":"test_messaging.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"325337625","text":"import requests,json\r\nclass api():\r\n def locked(key, api_type):\r\n\r\n url = \"http://api.neko-bot.net/api/locked/{api_type}\"\r\n headers = {\"TagKey\": f\"{key}\"}\r\n r = requests.get(url=url, headers=headers)\r\n if \"403\" not in r.status_code:\r\n return r.text\r\n if \"403\" in r.status_code:\r\n print(f\"Invalid token ({key}) was supplied.\")\r\n","sub_path":"versions/raw/nbapi-1.9.0.1.4/lib/nbapi/apil.py","file_name":"apil.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"144132155","text":"#Excepciones\ndef suma(num1, num2):\n return num1+num2\n\ndef resta(num1, num2):\n return num1-num2\n\ndef multiplicacion(num1, num2):\n return num1*num2\n\ndef division(num1, num2):\n try:\n return num1/num2\n except ZeroDivisionError:\n print(\"No se puede dividir entre 0\")\n return \"Operacion erronea\"\n\nwhile True:\n try:\n op1= int(input(\"Introduzca el primer numero: --->\"))\n op2 = int(input(\"Introduzca el segundo numero: --->\"))\n break\n except ValueError:\n print(\"Los valores introducidos no son correctos, intentalo de nuevo\")\n\noperacion = input(\"Ingrese la operación que va a realizar: (suma/resta/multiplicacion/division)--->\")\n\nif operacion == \"suma\":\n print(suma(op1, op2))\n\nelif operacion== \"resta\":\n print(resta(op1, op2))\n\nelif operacion == \"multiplicacion\":\n print(multiplicacion(op1, op2))\n\nelif operacion == \"division\":\n print(division(op1, op2))\n\nelse:\n print(\"Operacion no encontrada\")\n\nprint(\"Continuando..\")\n","sub_path":"Plan de Estudios/Software Engineering Career/0. Python and Flask/Basico/5.Excepciones/19.py","file_name":"19.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"67851663","text":"#!/usr/bin/env python3.5\n\"\"\"Telegram bot to query KickassTorrents.\"\"\"\n\nimport gc\nfrom katcr import Katcr\nfrom docopt import docopt\nimport telepot\nfrom telepot.loop import MessageLoop\nfrom telepot.namedtuple import InlineKeyboardMarkup, InlineKeyboardButton\n\n\nclass KATBot(telepot.Bot):\n \"\"\"KAT.cr search bot, looks only for the first page.\"\"\"\n\n def __init__(self, token):\n \"\"\"Initialize of KATBot.\"\"\"\n super().__init__(token)\n self.katcr = Katcr()\n self.torrent_by_name_dict = {}\n\n # pylint: disable=too-few-public-methods\n def on_chat_message(self, msg):\n \"\"\"Answer only chat messages.\"\"\"\n if msg['text'] == \"/start\":\n return\n _, _, chat_id = telepot.glance(msg)\n self.sendMessage(chat_id, \"Results for: {}\".format(msg['text']))\n keys = []\n for key, value in self.katcr.search(msg['text'], 1):\n self.torrent_by_name_dict[key[:63]] = value\n keys.append([\n InlineKeyboardButton(text=key, callback_data=key[:63])])\n keyboard = InlineKeyboardMarkup(inline_keyboard=keys)\n self.sendMessage(chat_id, \"Results for: {}\".format(msg['text']),\n reply_markup=keyboard, parse_mode=\"html\")\n gc.collect()\n\n def on_callback_query(self, msg):\n \"\"\"Get the button data.\"\"\"\n _, from_id, query_data = telepot.glance(msg, flavor='callback_query')\n self.sendMessage(from_id, self.torrent_by_name_dict[query_data])\n gc.collect()\n\n\ndef main():\n \"\"\"Run telegram bot.\n\n Usage: katcr_bot [options]\n\n Options:\n --token= Telegram bot token\n \"\"\"\n bot = KATBot(docopt(main.__doc__, version=\"0.0.1\")[\"--token\"])\n MessageLoop(bot).run_forever()\n","sub_path":"katcr/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"348044253","text":"from flask_restful import Resource, reqparse\n\nfrom com_stock_api.naver_news.dao import NewsDao\nfrom com_stock_api.naver_news.dto import NewsDto\n\nclass News(Resource):\n\n def __init__(self):\n parser = reqparse.RequestParser()\n parser.add_argument('id', type=int, required=False, help='This field cannot be left blank')\n parser.add_argument('date', type=int, required=False, help='This field cannot be left blank')\n parser.add_argument('headline', type=str, required=False, help='This field cannot be left blank')\n parser.add_argument('neg', type=float, required=False, help='This field cannot be left blank')\n parser.add_argument('pos', type=float, required=False, help='This field cannot be left blank')\n parser.add_argument('neu', type=float, required=False, help='This field cannot be left blank')\n parser.add_argument('keywords', type=float, required=False, help='This field cannot be left blank')\n parser.add_argument('url', type=str, required=False, help='This field cannot be left blank')\n\n def post(self):\n data = self.parset.parse_args()\n news = NewsDto(data['date'],data['headline'],data['neg'], data['pos'], data['neu'],data['keywords'],data['url'])\n try:\n news.save()\n except:\n return {'message':'An error occured inserting the news'}, 500\n return news.json(), 201\n\n def get(self,id):\n news = NewsDao.find_by_id(id)\n if news:\n return news.json()\n return {'message': 'News not found'}, 404\n\n def put (self, id):\n data = News.parser.parse_args()\n news = NewsDto.find_by_id(id)\n\n news.date = data['date']\n news.symbol = data['symbol']\n news.headline= data['headline']\n news.neg = data['neg']\n news.pos = data['pos']\n news.neu = data['neu']\n news.keywords = data['keywords']\n news.price= data['url']\n news.save()\n return news.json()\n\nclass News_(Resource):\n def get(self):\n return {'news': list(map(lambda news: news.json(), NewsDao.find_all()))}\n \n","sub_path":"com_stock_api/naver_news/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"567927062","text":"import glob\nimport pymongo\nimport logging, os, schedule, time\n\nfrom parsers.solar import fetch_solar\nfrom parsers.wind import fetch_wind\n\nINTERVAL_SECONDS = 60 * 5\n\n# Import all country parsers\ndef import_country(country_code):\n return getattr(\n __import__('parsers.%s' % country_code, globals(), locals(), ['fetch_%s' % country_code]),\n 'fetch_%s' % country_code)\ncountry_codes = map(lambda s: s[len('parsers/'):len('parsers/')+2], glob.glob('parsers/??.py'))\nparsers = map(import_country, country_codes)\n\n# Set up stats\nimport statsd\nstatsd.init_statsd({\n 'STATSD_HOST': os.environ.get('STATSD_HOST', 'localhost'),\n 'STATSD_BUCKET_PREFIX': 'electricymap_feeder'\n})\n\n# Set up logging\nENV = os.environ.get('ENV', 'development').lower()\nlogger = logging.getLogger(__name__)\nif not ENV == 'development':\n from logging.handlers import SMTPHandler\n mail_handler = SMTPHandler(\n mailhost=('smtp.mailgun.org', 587),\n fromaddr='Application Bug Reporter ',\n toaddrs=['olivier.corradi@gmail.com'],\n subject='Electricity Map Feeder Error',\n credentials=(os.environ.get('MAILGUN_USER'), os.environ.get('MAILGUN_PASSWORD'))\n )\n mail_handler.setLevel(logging.ERROR)\n logger.addHandler(mail_handler)\n logging.getLogger('statsd').addHandler(logging.StreamHandler())\nelse: logger.addHandler(logging.StreamHandler())\n\n\nclient = pymongo.MongoClient(os.environ.get('MONGO_URL', 'mongodb://localhost:27017'))\ndb = client['electricity']\ncol = db['realtime']\n\ndef fetch_countries():\n for parser in parsers: \n try:\n with statsd.StatsdTimer('fetch_one_country'):\n obj = parser()\n logging.info('INSERT %s' % obj)\n col.insert_one(obj)\n except: \n statsd.increment('fetch_one_country_error')\n logger.exception('fetch_one_country()')\n\ndef fetch_weather():\n try:\n with statsd.StatsdTimer('fetch_wind'): fetch_wind()\n except: \n statsd.increment('fetch_wind_error')\n logger.exception('fetch_wind()')\n try:\n with statsd.StatsdTimer('fetch_solar'): fetch_solar()\n except: \n statsd.increment('fetch_solar_error')\n logger.exception('fetch_solar()')\n\nschedule.every(INTERVAL_SECONDS).seconds.do(fetch_countries)\nschedule.every(15).minutes.do(fetch_weather)\n\nfetch_countries()\nfetch_weather()\n\nwhile True:\n schedule.run_pending()\n time.sleep(INTERVAL_SECONDS)\n","sub_path":"feeder/feeder.py","file_name":"feeder.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"570040222","text":"import sys\nimport asyncio\nimport synapse.lib.cell as s_cell\nimport synapse.lib.stormsvc as s_stormsvc\n\n# each service needs a unique id\nsvciden = '457a11723f821dd8884cb5f9d80596ad'\nsvcconf = {'svciden': svciden}\n\nclass HelloWorldService(s_stormsvc.StormSvc, s_cell.CellApi):\n '''\n HelloWorldService implements an example Storm Service.\n '''\n _storm_svc_name = 'helloworld'\n _storm_svc_vers = (1, 0, 0)\n _storm_svc_pkgs = (\n {\n 'name': 'hello',\n 'version': (1, 0, 0),\n 'svcconf': svcconf,\n 'modules': (\n {\n 'name': 'hello',\n 'modconf': svcconf,\n 'storm': '''\n function lookup(fqdn) {\n\n $retn = $lib.list()\n\n $hellosvc = $lib.service.get($modconf.svciden)\n\n $answ = $hellosvc.runDnsLook($fqdn)\n\n for $ipv4 in $answ.ipv4s {\n [ inet:dns:a=($fqdn, $ipv4)]\n $retn.append($node)\n }\n\n fini{ return($retn) }\n }\n '''\n },\n ),\n 'commands': (\n {\n 'name': 'hello.lookup',\n 'descr': 'Lookup an FQDN in the helloworld example service.',\n 'cmdargs': (\n # -h / --help plumbing happens automatically\n ('--yield', {'default': False, 'action': 'store_true',\n 'help': 'Yield created inet:dns:a nodes instead of the inbound inet:fqdn nodes.'}),\n ('--debug', {'default': False, 'action': 'store_true',\n 'help': 'Print detailed user feedback.'}),\n ),\n 'cmdconf': {'svciden': svciden},\n 'storm': '''\n // Filter out all node types other than inet:fqdn\n +inet:fqdn\n\n $fqdn = $node.repr()\n\n if $cmdopts.debug {\n $lib.print(\"hello.lookup resolving: {fqdn}\", fqdn=$fqdn)\n }\n\n // import our hello module\n $hello = $lib.import(hello)\n $nodes = $hello.lookup($fqdn)\n\n if $cmdopts.yield {\n -> { yield $nodes }\n }\n ''',\n },\n {\n 'name': 'hello.stream',\n 'descr': 'Yield a potentially large number of results from a service.',\n 'cmdargs': (),\n 'cmdconf': {'svciden': svciden},\n 'storm': '''\n // Filter out all node types other than inet:fqdn\n +inet:fqdn\n\n $hellosvc = $lib.service.get($cmdconf.svciden)\n\n $fqdn = $node.repr()\n -> {\n for $ipv4 in $hellosvc.runGenrLook($fqdn) {\n [ inet:dns:a = ($fqdn, $ipv4) ]\n }\n }\n ''',\n },\n ),\n },\n )\n\n async def runDnsLook(self, fqdn):\n # pretend to run a DNS lookup and return results\n # ( but in reality you could do anything and return it here )\n return {'ipv4s': [ '1.2.3.4', '5.6.7.8' ] }\n\n async def runGenrLook(self, fqdn):\n # storm services may also expose python generators\n # to allow streaming results of any size without memory pressure\n yield '1.1.1.1'\n yield '2.2.2.2'\n\nclass HelloWorldCell(s_cell.Cell):\n '''\n A Cell stores persistant information such as users and permissions.\n '''\n cellapi = HelloWorldService\n\nif __name__ == '__main__':\n asyncio.run(HelloWorldCell.execmain(sys.argv[1:]))\n","sub_path":"hellostormsvc.py","file_name":"hellostormsvc.py","file_ext":"py","file_size_in_byte":4101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"91800038","text":"f = open(\"A-large (1).in\",\"r\")\r\no = open(\"A-large-answers.txt\",\"w\")\r\nT = int(f.readline())\r\n\r\nfor t in range(1,T+1):\r\n #print(\"Case \"+str(t))\r\n n = f.readline()\r\n #if n[-1] == \"\\n\":\r\n # n = n[:-1]\r\n n = int(n)\r\n #print(n)\r\n if n == 0:\r\n o.write(\"Case #\"+str(t)+\": INSOMNIA\\n\")\r\n continue\r\n m = n\r\n n = 0\r\n s = 1\r\n l = [1,2,3,4,5,6,7,8,9,0]\r\n while l:\r\n n += m\r\n n1 = n\r\n while n1:\r\n try:\r\n l.remove(n1%10)\r\n except ValueError:\r\n pass\r\n n1 = n1//10\r\n \r\n o.write(\"Case #\"+str(t)+\": \"+str(n)+\"\\n\")\r\no.close()\r\n\"\"\"\r\nf = open(\"A-test.txt\",\"w\")\r\nf.write(str(1001)+\"\\n\")\r\nfor i in range(99000,100001):\r\n f.write(str(i)+\"\\n\")\r\nf.close()\"\"\"\r\n\r\n","sub_path":"codes/CodeJamCrawler/CJ/16_0_1_AlonH_A.py","file_name":"16_0_1_AlonH_A.py","file_ext":"py","file_size_in_byte":785,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"216828316","text":"import simplegui\nimport time\n\n## Set global variables\ninterval = 100\n# associate interval of 0.1 second \ntime = 0\nx = 0\ny = 0\nsuccess_rate = \"0\" + \"/\" + \"0\"\n\n\n## Event handlers\ndef tick():\n global time\n # time is the total number of 0.1 second passed \n time += 1\n\ndef format():\n global time\n # format() function format variable time into format like 10:59.9\n milesecond = time%10\n second = time/10\n if second>59:\n minute = second/60\n second = second - minute*60\n else:\n minute = 0\n if second < 10:\n s = \"0\"+str(second)\n else:\n s = str(second)\n return str(minute)+\":\"+s+\".\"+str(milesecond) \n\ndef draw(canvas):\n canvas.draw_text(format(),[115,150],30,\"Lime\")\n canvas.draw_text(success_rate,[0,30],30,\"Blue\")\n \n# 3 buttons\ndef start():\n timer.start()\n \ndef stop():\n global x\n global y\n global time\n global success_rate\n # make sure that we only count the times when the game is in progress, \n # rather than everytime we click \"stop\" button \n if timer.is_running():\n y += 1\n if time%10==0:\n x += 1\n success_rate = str(x) + \"/\" + str(y)\n timer.stop()\n\ndef reset():\n global time\n timer.stop()\n time = 0\n \n## Create frame and timer\nframe = simplegui.create_frame(\"Stopwatch\",300,300)\ntimer = simplegui.create_timer(interval,tick)\n\n## Draw on canvas / Add buttons\nframe.set_draw_handler(draw)\nframe.add_button(\"Start\",start)\nframe.add_button(\"Stop\",stop)\nframe.add_button(\"Reset\",reset)\n\n## Start frame \nframe.start()\n\n## Good code from my classmates\n# http://www.codeskulptor.org/#user39_vx55YqYCcD8ajjb_1.py\n# http://www.codeskulptor.org/#user39_Bf7FLIFgZm_2.py\n# http://www.codeskulptor.org/#user39_GeHBa0meqnbN9R0.py\n# http://www.codeskulptor.org/#user39_dH1dHkLRFJWfrHD.py\n# http://www.codeskulptor.org/#user39_V7Xy5oQjBf_0.py\n","sub_path":"IIPP/Stopwatch.py","file_name":"Stopwatch.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"344163156","text":"# import packages\nimport requests\nimport time \nimport csv\nimport os\nfrom functions import *\n\n#save as text or html\nsave_as_text=False\n#The folder to store the job details description\nfoldername='data'\n#The input file name which is geneated using main.py\nfilename='jobs_1.csv'\n\nimport os\nif not os.path.exists(foldername):\n os.makedirs(foldername)\n\nwith open(filename) as csv_file:\n csv_reader = csv.reader(csv_file, delimiter=',')\n next(csv_reader)\n for row in csv_reader:\n id=row[0]\n url=row[9]\n\n # get dom \n try:\n page = requests.get('https://ca.indeed.com' + url, allow_redirects=True)\n\n #ensuring at least 0.01 second between page grabs \n time.sleep(0.01) \n\n #fetch data\n soup = get_soup(page.text)\n #print(soup.prettify())\n desc = soup.find(id=\"jobDescriptionText\");\n \n # if results exist\n if(desc == None):\n break\n\n # get job description \n if save_as_text:\n filename=id+\".txt\"\n data=desc.get_text()\n else:\n filename=id+\".html\"\n data=str(desc)\n outfile = open(foldername+\"/\"+filename,'w')\n outfile.write(data) \n \n except:\n pass\n","sub_path":"getjobdesc.py","file_name":"getjobdesc.py","file_ext":"py","file_size_in_byte":1376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"493388113","text":"# -*- coding: utf-8 -*-\n\"\"\"\nDemonstrate how to synchronize 2 or more MFLI\ninstruments or 2 or more UHFLI instruments using the MDS capability of LabOne.\nIt also measures the temporal response of demodulator filters of both\nboth instruments using the Data Acquisition (DAQ) Module.\n\nCopyright 2008-2018 Zurich Instruments AG\n\"\"\"\n\nfrom __future__ import print_function\nimport time\nimport zhinst.utils\n\n\ndef run_example(device_ids, do_plot=False, synchronize=True):\n \"\"\"\n Run the example: Capture demodulator data from two devices using the Data Acquisition module.\n The devices are first synchronized using the MultiDeviceSync Module.\n\n Hardware configuration:\n The cabling of the instruments must follow the MDS cabling depicted in\n the MDS tab of LabOne.\n Additionally, Signal Out 1 of the master device is split into Signal In 1 of the master and slave.\n\n Arguments:\n\n device_ids (list): The IDs of the devices to run the example with. For\n example, [\"dev3352\",\"dev3562\"]. The first device is the master.\n NOTE The devices must be of the same type, either 2 or more UHF or 2 or more MF instruments.\n\n do_plot (bool, optional): Specify whether to plot the data acquisition. Default is no\n plot output.\n\n synchronize (bool, optional): Specify if multi-device synchronization will\n be started and stopped before and after the data acquisition\n\n Returns:\n\n data (dict): A dictionary with all the data as returend from the sweeper\n module. It contains all the demodulator data dictionaries and some\n metainformation about the data acquisition.\n\n\n See the \"LabOne Programing Manual\" for further help, available:\n - On Windows via the Start-Menu:\n Programs -> Zurich Instruments -> Documentation\n - On Linux in the LabOne .tar.gz archive in the \"Documentation\"\n sub-folder.\n \"\"\"\n\n # Check if the master device ID exists\n if len(device_ids) == 0:\n raise Exception(\"No value for master_id specified. The first argument to the \"\n \"example should contain at least 2 device IDs, \"\n \"e.g. ['dev2006', 'dev2007'] or ['uhf-dev2006', 'uhf-dev2007'].\")\n\n # Check if the slave device ID exists\n if len(device_ids) < 2:\n raise Exception(\"No value for slave_id specified. The first argument to the \"\n \"example should contain at least 2 device IDs, \"\n \"e.g. ['dev2006', 'dev2007'] or ['uhf-dev2006', 'uhf-dev2007'].\")\n\n # Connection to the data server and devices\n\n # Connection to the local server 'localhost' ^= '127.0.0.1'\n apilevel = 6\n daq = zhinst.ziPython.ziDAQServer('localhost', 8004, apilevel)\n discovery = zhinst.ziPython.ziDiscovery()\n\n # Master and slave device ID\n props = []\n for devID in device_ids:\n deviceSerial = discovery.find(devID).lower()\n props.append(discovery.get(deviceSerial))\n devices = props[0]['deviceid']\n for prop in props[1:]:\n devices += \",\"+prop['deviceid']\n # Switching between MFLI and UHFLI\n deviceType = props[0]['devicetype']\n for prop in props[1:]:\n if prop['devicetype'] != deviceType:\n raise Exception(\"This example needs 2 or more MFLI instruments or 2 or more UHFLI instruments.\"\n \"Mixing device types is not possible\")\n\n for prop in props:\n if prop['devicetype'] == 'UHFLI':\n daq.connectDevice(prop['deviceid'], prop['interfaces'][0])\n else:\n daq.connectDevice(prop['deviceid'], '1GbE')\n\n # Disable all available outputs, demods, ...\n for prop in props:\n zhinst.utils.disable_everything(daq, prop['deviceid'])\n\n # Device synchronization\n if synchronize:\n print(\"Synchronizing devices %s ...\\n\" % devices)\n mds = daq.multiDeviceSyncModule()\n mds.set('multiDeviceSyncModule/start', 0)\n mds.set('multiDeviceSyncModule/group', 0)\n mds.execute()\n mds.set('multiDeviceSyncModule/devices', devices)\n mds.set('multiDeviceSyncModule/start', 1)\n\n timeout = 20\n start = time.time()\n status = 0\n while status != 2:\n time.sleep(0.2)\n status = mds.getInt('multiDeviceSyncModule/status')\n if status == -1:\n raise Exception('Error during device sync')\n if (time.time() - start) > timeout:\n raise Exception('Timeout during device sync')\n\n print(\"Devices successfully synchronized.\")\n\n # Device settings\n demod_c = 0 # demod channel, for paths on the device\n out_c = 0 # signal output channel\n # Get the value of the instrument's default Signal Output mixer channel.\n prop = discovery.get(props[0]['deviceid'])\n out_mixer_c = zhinst.utils.default_output_mixer_channel(prop, out_c)\n in_c = 0 # signal input channel\n osc_c = 0 # oscillator\n\n time_constant = 1.0e-3 # [s]\n demod_rate = 10e3 # [Sa/s]\n filter_order = 8\n osc_freq = 1e3 # [Hz]\n out_amp = 0.600 # [V]\n\n # Master device settings\n master = props[0]['deviceid'].lower()\n daq.setInt('/%s/sigouts/%d/on' % (master, out_c), 1)\n daq.setDouble('/%s/sigouts/%d/range' % (master, out_c), 1)\n daq.setDouble('/%s/sigouts/%d/amplitudes/%d' % (master, out_c, out_mixer_c), out_amp)\n daq.setDouble('/%s/demods/%d/phaseshift' % (master, demod_c), 0)\n daq.setInt('/%s/demods/%d/order' % (master, demod_c), filter_order)\n daq.setDouble('/%s/demods/%d/rate' % (master, demod_c), demod_rate)\n daq.setInt('/%s/demods/%d/harmonic' % (master, demod_c), 1)\n daq.setInt('/%s/demods/%d/enable' % (master, demod_c), 1)\n daq.setInt('/%s/demods/%d/oscselect' % (master, demod_c), osc_c)\n daq.setInt('/%s/demods/%d/adcselect' % (master, demod_c), in_c)\n daq.setDouble('/%s/demods/%d/timeconstant' % (master, demod_c), time_constant)\n daq.setDouble('/%s/oscs/%d/freq' % (master, osc_c), osc_freq)\n daq.setInt('/%s/sigins/%d/imp50' % (master, in_c), 1)\n daq.setInt('/%s/sigins/%d/ac' % (master, in_c), 0)\n daq.setDouble('/%s/sigins/%d/range' % (master, in_c), out_amp/2)\n daq.setDouble('/%s/sigouts/%d/enables/%d' % (master, out_c, out_mixer_c), 0)\n # Slave device settings\n for prop in props[1:]:\n slave = prop['deviceid'].lower()\n daq.setDouble('/%s/demods/%d/phaseshift' % (slave, demod_c), 0)\n daq.setInt('/%s/demods/%d/order' % (slave, demod_c), filter_order)\n daq.setDouble('/%s/demods/%d/rate' % (slave, demod_c), demod_rate)\n daq.setInt('/%s/demods/%d/harmonic' % (slave, demod_c), 1)\n daq.setInt('/%s/demods/%d/enable' % (slave, demod_c), 1)\n daq.setInt('/%s/demods/%d/oscselect' % (slave, demod_c), osc_c)\n daq.setInt('/%s/demods/%d/adcselect' % (slave, demod_c), in_c)\n daq.setDouble('/%s/demods/%d/timeconstant' % (slave, demod_c), time_constant)\n daq.setDouble('/%s/oscs/%d/freq' % (slave, osc_c), osc_freq)\n daq.setInt('/%s/sigins/%d/imp50' % (slave, in_c), 1)\n daq.setInt('/%s/sigins/%d/ac' % (slave, in_c), 0)\n daq.setDouble('/%s/sigins/%d/range' % (slave, in_c), out_amp/2)\n # Synchronization\n daq.sync()\n time.sleep(1)\n\n # measuring the transient state of demodulator filters using DAQ module\n\n # DAQ module\n # Create a Data Acquisition Module instance, the return argument is a handle to the module\n daqMod = daq.dataAcquisitionModule()\n # Configure the Data Acquisition Module\n # Device on which trigger will be performed\n daqMod.set('dataAcquisitionModule/device', master)\n # The number of triggers to capture (if not running in endless mode).\n daqMod.set('dataAcquisitionModule/count', 1)\n daqMod.set('dataAcquisitionModule/endless', 0)\n # 'dataAcquisitionModule/grid/mode' - Specify the interpolation method of\n # the returned data samples.\n #\n # 1 = Nearest. If the interval between samples on the grid does not match\n # the interval between samples sent from the device exactly, the nearest\n # sample (in time) is taken.\n #\n # 2 = Linear interpolation. If the interval between samples on the grid does\n # not match the interval between samples sent from the device exactly,\n # linear interpolation is performed between the two neighbouring\n # samples.\n #\n # 4 = Exact. The subscribed signal with the highest sampling rate (as sent\n # from the device) defines the interval between samples on the DAQ\n # Module's grid. If multiple signals are subscribed, these are\n # interpolated onto the grid (defined by the signal with the highest\n # rate, \"highest_rate\"). In this mode, dataAcquisitionModule/duration is\n # read-only and is defined as num_cols/highest_rate.\n grid_mode = 2\n daqMod.set('dataAcquisitionModule/grid/mode', grid_mode)\n # type:\n # NO_TRIGGER = 0\n # EDGE_TRIGGER = 1\n # DIGITAL_TRIGGER = 2\n # PULSE_TRIGGER = 3\n # TRACKING_TRIGGER = 4\n # HW_TRIGGER = 6\n # TRACKING_PULSE_TRIGGER = 7\n # EVENT_COUNT_TRIGGER = 8\n daqMod.set('dataAcquisitionModule/type', 1)\n # triggernode, specify the triggernode to trigger on.\n # SAMPLE.X = Demodulator X value\n # SAMPLE.Y = Demodulator Y value\n # SAMPLE.R = Demodulator Magnitude\n # SAMPLE.THETA = Demodulator Phase\n # SAMPLE.AUXIN0 = Auxilliary input 1 value\n # SAMPLE.AUXIN1 = Auxilliary input 2 value\n # SAMPLE.DIO = Digital I/O value\n triggernode = '/%s/demods/%d/sample.r' % (master, demod_c)\n daqMod.set('dataAcquisitionModule/triggernode', triggernode)\n # edge:\n # POS_EDGE = 1\n # NEG_EDGE = 2\n # BOTH_EDGE = 3\n daqMod.set('dataAcquisitionModule/edge', 1)\n demod_rate = daq.getDouble('/%s/demods/%d/rate' % (master, demod_c))\n # Exact mode: To preserve our desired trigger duration, we have to set\n # the number of grid columns to exactly match.\n trigger_duration = time_constant*30\n sample_count = demod_rate*trigger_duration\n daqMod.set('dataAcquisitionModule/grid/cols', sample_count)\n # The length of each trigger to record (in seconds).\n daqMod.set('dataAcquisitionModule/duration', trigger_duration)\n daqMod.set('dataAcquisitionModule/delay', -trigger_duration/4)\n # Do not return overlapped trigger events.\n daqMod.set('dataAcquisitionModule/holdoff/time', 0)\n daqMod.set('dataAcquisitionModule/holdoff/count', 0)\n daqMod.set('dataAcquisitionModule/level', out_amp/6)\n # The hysterisis is effectively a second criteria (if non-zero) for triggering\n # and makes triggering more robust in noisy signals. When the trigger `level`\n # is violated, then the signal must return beneath (for positive trigger edge)\n # the hysteresis value in order to trigger.\n daqMod.set('dataAcquisitionModule/hysteresis', 0.01)\n # synchronizing the settings\n daq.sync()\n\n # Recording\n\n # Subscribe to the demodulators\n daqMod.unsubscribe('*')\n master_subscribe_node = '/%s/demods/%d/sample.r' % (master, demod_c)\n daqMod.subscribe(master_subscribe_node)\n for prop in props[1:]:\n slave_subscribe_node = '/%s/demods/%d/sample.r' % (prop['deviceid'], demod_c)\n daqMod.subscribe(slave_subscribe_node)\n\n # Execute the module\n daqMod.execute()\n # Send a trigger\n daq.setDouble('/%s/sigouts/%d/enables/%d' % (master, out_c, out_mixer_c), 1)\n\n # wait for the acquisition to be finished\n while not daqMod.finished():\n time.sleep(1)\n print(\"Progress {:.2%}\".format(daqMod.progress()[0]), end=\"\\r\")\n\n # Read the result\n result = daqMod.read(True)\n\n # Turn off the trigger\n daq.setDouble('/%s/sigouts/%d/enables/%d' % (master, out_c, out_mixer_c), 0)\n # Finish the DAQ module\n daqMod.finish()\n daqMod.clear()\n\n # Stopping the MDS module\n if synchronize:\n mds.clear()\n\n # Extracting and plotting the data\n\n if do_plot:\n\n # Master data\n mClockbase = daq.getDouble('/%s/clockbase' % master)\n masTimestamp = result[master_subscribe_node][0]['timestamp']\n masTime = (masTimestamp[0] - float(masTimestamp[0][0])) / mClockbase\n masDemodAmp = result[master_subscribe_node][0]['value'][0]\n # Plotting\n import matplotlib.pyplot as plt\n\n plt.figure(1)\n plt.clf()\n axes1 = plt.subplot(2, 1, 1)\n plt.plot(masTime*1E3, masDemodAmp*1E3, color='blue')\n axes1.set_ylabel('Amplitude [mV]', fontsize=12, color='k')\n axes1.legend(['Master'])\n axes1.set_title('Transient Measurement by DAQ Module')\n plt.grid(True)\n\n # Slave data\n for prop in props[1:]:\n slave = prop['deviceid'].lower()\n slave_subscribe_node = '/%s/demods/%d/sample.r' % (slave, demod_c)\n sClockbase = daq.getDouble('/%s/clockbase' % slave)\n slvTimestamp = result[slave_subscribe_node][0]['timestamp']\n slvTime = (slvTimestamp[0] - float(slvTimestamp[0][0])) / sClockbase\n slvDemodAmp = result[slave_subscribe_node][0]['value'][0]\n\n axes2 = plt.subplot(2, 1, 2)\n plt.plot(slvTime*1E3, slvDemodAmp*1E3, color='red')\n axes2.legend(['Slaves'])\n axes2.set_xlabel('Time [ms]', fontsize=12, color='k')\n axes2.set_ylabel('Amplitude [mV]', fontsize=12, color='k')\n plt.grid(True)\n\n plt.figure(2)\n plt.clf()\n axes1 = plt.subplot(2, 1, 1)\n plt.plot(masTime*1E3, masDemodAmp*1E3, color='blue')\n\n for prop in props[1:]:\n slave = prop['deviceid'].lower()\n slave_subscribe_node = '/%s/demods/%d/sample.r' % (slave, demod_c)\n sClockbase = daq.getDouble('/%s/clockbase' % slave)\n slvTimestamp = result[slave_subscribe_node][0]['timestamp']\n slvTime = (slvTimestamp[0] - float(slvTimestamp[0][0])) / sClockbase\n slvDemodAmp = result[slave_subscribe_node][0]['value'][0]\n plt.plot(slvTime*1E3, slvDemodAmp*1E3, color='red')\n axes1.set_ylabel('Amplitude [mV]', fontsize=12, color='k')\n axes1.legend(['Master', 'Slaves'])\n axes1.set_title('Transient Measurement by DAQ Module')\n plt.grid(True)\n\n axes2 = plt.subplot(2, 1, 2)\n for prop in props[1:]:\n slave = prop['deviceid'].lower()\n slave_subscribe_node = '/%s/demods/%d/sample.r' % (slave, demod_c)\n sClockbase = daq.getDouble('/%s/clockbase' % slave)\n slvTimestamp = result[slave_subscribe_node][0]['timestamp']\n slvTime = (slvTimestamp[0] - float(slvTimestamp[0][0])) / sClockbase\n plt.plot(slvTime*1E3, (masTime - slvTime)*1E3, color='green')\n axes2.set_title('Time Difference between Master and Slaves')\n axes2.set_xlabel('Time [ms]', fontsize=12, color='k')\n axes2.set_ylabel('Time difference [ms]', fontsize=12, color='k')\n plt.grid(True)\n\n plt.tight_layout()\n plt.draw()\n\n plt.show()\n\n return result\n","sub_path":"Drivers/python_libs/linux/zhinst/examples/common/example_multidevice_data_acquisition.py","file_name":"example_multidevice_data_acquisition.py","file_ext":"py","file_size_in_byte":15143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"220136552","text":"several_things=[\"hello\",2,4,6.0,7.5,234352354,\"the end\",\"\",99]\n\n#program 1\nfor i in several_things:\n print(\"List:\",i)\n#program 2\nfor j in several_things:\n print(type(j))\n#program 3\nstr_list= [\"hello\",\"\",\"goodbye\",\"wonderful\",\"I love Python\"]\n\nfor k in str_list:\n print(len(k))\n\n#program 4\naddition_str=\"2+5+10+20\"\nlst=list()\nlst=addition_str.split(\"+\")\nprint(lst)\nsum=0\nfor i in lst:\n j=int(i)\n sum=sum+j\n\nprint(sum)\n#program 5\nweek_temps_f=\"75.1,77.7,83.2,82.5,81.0,79.5,85.7\"\nlst=list()\nlst=week_temps_f.split(\",\")\nprint(lst)\nsum=0\ncount=0\nfor i in lst:\n i=float(i)\n count=count+1\n sum=sum+i\n\navg=sum/count\nprint(\"Average of numbers in list:\\n\",avg)\n","sub_path":"list1.py","file_name":"list1.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"292258508","text":"\"\"\"\r\nContains the PokerGame class\r\n\"\"\"\r\nfrom subprocess import check_call\r\nfrom time import sleep\r\n\r\nfrom deck import Deck\r\nfrom enums import GameState\r\nfrom hand import Hand\r\nfrom hand_evaluator import get_best_hand, get_players_with_hand\r\nfrom player import Player\r\n\r\nclass PokerGame(object):\r\n \"\"\"Play a game of poker\"\"\"\r\n def __init__(self, small_blind, big_blind, players):\r\n self.s_blind = small_blind\r\n self.b_blind = big_blind\r\n self.players = players\r\n self.active_players = []\r\n self.pot = 0\r\n self.call_amount = self.b_blind\r\n self.game_state = GameState.PRE_FLOP\r\n self.deck = Deck()\r\n self.deck.shuffle()\r\n self.first_round = True\r\n\r\n for player in players:\r\n self.active_players.append(player)\r\n\r\n def make_bet(self, player, amount):\r\n \"\"\"Make a bet: Withdraw from player and add to pot\"\"\"\r\n player.chips -= int(amount)\r\n self.pot += int(amount)\r\n player.current_bet += int(amount)\r\n\r\n def reset_round(self):\r\n \"\"\"Reset all game variables for next round\"\"\"\r\n self.pot = 0\r\n self.call_amount = self.b_blind\r\n self.game_state = GameState.PRE_FLOP\r\n self.deck = Deck()\r\n self.deck.shuffle()\r\n self.first_round = True\r\n\r\n self.active_players = []\r\n for player in self.players:\r\n player.reset()\r\n self.active_players.append(player)\r\n\r\n def deal_cards(self):\r\n \"\"\"Deal appropriate cards to each player depending on game state\"\"\"\r\n if self.game_state == GameState.PRE_FLOP:\r\n for i in range(0, 2):\r\n for player in self.players:\r\n card = self.deck.draw_card()\r\n player.hand.add_card(card)\r\n elif self.game_state == GameState.FLOP:\r\n for i in range(0, 3):\r\n card = self.deck.draw_card()\r\n for player in self.players:\r\n player.hand.add_card(card)\r\n elif self.game_state == GameState.TURN or self.game_state == GameState.RIVER:\r\n card = self.deck.draw_card()\r\n for player in self.players:\r\n player.hand.add_card(card)\r\n\r\n def process_user_action(self, player, action_lst, call_amount):\r\n \"\"\"Process a player's requested action\"\"\"\r\n action = action_lst[0]\r\n if len(action_lst) == 2:\r\n amount = int(action_lst[1])\r\n if action == \"RAISE\":\r\n self.call_amount = amount + self.call_amount\r\n self.make_bet(player, self.call_amount - player.current_bet)\r\n else: # CALL\r\n self.call_amount = amount\r\n self.make_bet(player, self.call_amount - player.current_bet)\r\n elif action == \"FOLD\":\r\n player.is_in = False\r\n self.active_players.remove(player)\r\n elif action == \"CALL\":\r\n self.make_bet(player, call_amount - player.current_bet)\r\n return action\r\n\r\n def get_user_action(self, player):\r\n \"\"\"Return a list representing the user's action\"\"\"\r\n # get possible actions depending on game state\r\n if self.game_state == GameState.PRE_FLOP:\r\n possible_actions = [\"FOLD\", \"CALL\", \"BET\"]\r\n prompt = player.name + \": [FOLD] [CALL] [BET]: \"\r\n if player == self.players[1] and self.first_round and \\\r\n self.call_amount == player.current_bet:\r\n self.first_round = False\r\n possible_actions = [\"FOLD\", \"CHECK\", \"BET\"]\r\n prompt = player.name + \": [FOLD] [CHECK] [BET]: \"\r\n elif player == self.players[1] and self.first_round and not \\\r\n self.call_amount == player.current_bet:\r\n self.first_round = False\r\n possible_actions = [\"FOLD\", \"CALL\", \"BET\"]\r\n prompt = player.name + \": [FOLD] [CALL] [BET]: \"\r\n elif not self.first_round:\r\n possible_actions = [\"FOLD\", \"CALL\"]\r\n prompt = player.name + \": [FOLD] [CALL]: \"\r\n else:\r\n if self.call_amount == player.current_bet:\r\n possible_actions = [\"FOLD\", \"CHECK\", \"RAISE\"]\r\n prompt = player.name + \": [FOLD] [CHECK] [RAISE]: \"\r\n elif self.call_amount > player.current_bet:\r\n possible_actions = [\"FOLD\", \"CALL\", \"RAISE\"]\r\n prompt = player.name + \": [FOLD] [CALL] [RAISE]: \"\r\n\r\n # get user input\r\n print(\"Current Hand: \" + str(player.hand))\r\n while True:\r\n user_input = input(prompt).upper().split()\r\n\r\n if user_input[0] in possible_actions:\r\n if len(user_input) > 2:\r\n print(\"ERROR: too many arguments\")\r\n elif len(user_input) == 2:\r\n try:\r\n amount = int(user_input[1])\r\n except:\r\n print(\"ERROR: second argument must be an integer\")\r\n continue\r\n if player.chips < amount:\r\n print(\"ERROR: not enough chips to bet\")\r\n continue\r\n return user_input\r\n else:\r\n print(\"ERROR: you can't do that\")\r\n\r\n def print_done_betting(self):\r\n \"\"\"Print message to indicate betting phase has complete\"\"\"\r\n print(\"\\n*&*&*&*&*&*&*&*&*&*&*&*&\")\r\n print(\"DONE BETTING \" + self.game_state.value)\r\n print(\"*&*&*&*&*&*&*&*&*&*&*&*&\")\r\n sleep(1)\r\n\r\n def print_dealing(self):\r\n \"\"\"Print message to indicate cards are being dealt\"\"\"\r\n for i in range(0, 5):\r\n check_call(\"clear\")\r\n print(\"Dealing cards\" + \".\" * i)\r\n sleep(.25)\r\n\r\n def print_welcome(self):\r\n \"\"\"Print welcome message\"\"\"\r\n print(\"----------------------------------------\")\r\n print(\"WELCOME TO WILD WEST TEXAS HOLD EM POKER\")\r\n print(\"----------------------------------------\\n\")\r\n\r\n def print_game_board(self):\r\n \"\"\"print game board\"\"\"\r\n for player in self.players:\r\n # determine blind text\r\n if player == self.players[0]:\r\n blind_text = \"(SB)\"\r\n elif player == self.players[1]:\r\n blind_text = \"(BB)\"\r\n else:\r\n blind_text = \"\"\r\n if player in self.active_players:\r\n print(\"{:<10} -- Current Bet: {:>8} Call Amount: {:>8} Chips: {:>8} {}\"\r\n .format(player.name, player.current_bet, self.call_amount, player.chips, blind_text))\r\n else:\r\n print(\"{:<10} -- OUT {:>46} {:>8} {}\"\r\n .format(player.name, \"Chips:\", player.chips, blind_text))\r\n\r\n print(\"pot amount: {}\".format(self.pot))\r\n\r\n def print_round_summary(self):\r\n \"\"\"print the round summary\"\"\"\r\n print(\"\\n\\nEND OF ROUND SUMMARRY\")\r\n print(\"---------------------\")\r\n for player in self.players:\r\n print(\"{:<10} -- Chips: {:>8}\".format(player.name, player.chips))\r\n\r\n def initiate_betting(self, game_state):\r\n \"\"\"Initiate the betting sequence\"\"\"\r\n s_blind_placed = False\r\n b_blind_placed = False\r\n first_pass = True\r\n end_enable = False\r\n if game_state == GameState.PRE_FLOP:\r\n end_player = self.players[2]\r\n else:\r\n end_player = self.players[0]\r\n while True:\r\n # Check to see if betting round is done\r\n if not first_pass:\r\n betting_done = True\r\n for player in self.players:\r\n if not player.current_bet == self.call_amount and player.is_in:\r\n betting_done = False\r\n break\r\n if betting_done:\r\n break\r\n\r\n for player in self.players:\r\n check_call(\"clear\")\r\n if game_state == GameState.PRE_FLOP:\r\n # Check to see if blinds are required\r\n if not s_blind_placed and self.players.index(player) == 0:\r\n # place small blind\r\n self.make_bet(player, self.s_blind)\r\n s_blind_placed = True\r\n print(player.name + \" places small blind of \" + str(self.s_blind))\r\n continue\r\n elif not b_blind_placed and self.players.index(player) == 1:\r\n # place big blind\r\n self.make_bet(player, self.b_blind)\r\n b_blind_placed = True\r\n print(player.name + \" places big blind of \" + str(self.b_blind))\r\n continue\r\n\r\n # Check to see if the player is the only active one left\r\n # If True\r\n if len(self.active_players) == 1 and player in self.active_players:\r\n return True\r\n\r\n # check if player has folded\r\n if not player.is_in:\r\n continue\r\n\r\n # Check to see if the player is the end player\r\n if end_player == player:\r\n if end_enable:\r\n break\r\n else:\r\n end_enable = True\r\n\r\n # print game board\r\n self.print_game_board()\r\n\r\n # Check if player is human\r\n if player.is_human:\r\n user_action_lst = self.get_user_action(player)\r\n action = self.process_user_action(player, user_action_lst, self.call_amount)\r\n if action in [\"RAISE\", \"BET\"]:\r\n end_player = player\r\n else:\r\n # Have CPU make decision\r\n # info to pass: pot size, call_amount\r\n \"\"\"\r\n Possible actions\r\n ----------------\r\n [\"FOLD\"] <- exits round, sets player.is_in to false\r\n [\"CHECK\"] <- only possible if call_amount = player.bet_amount\r\n [\"CALL\"] <- bets call_amount\r\n [\"RAISE X\"] <- bets X amount\r\n \"\"\"\r\n\r\n first_pass = False\r\n return False\r\n\r\n def play(self):\r\n \"\"\"Launch a game of Poker\"\"\"\r\n while True: # play until player exits\r\n check_call(\"clear\")\r\n self.print_welcome()\r\n\r\n start_game = input(\"Would you like to start a round of poker? y/n: \")\r\n if start_game.upper() == \"Y\":\r\n for state in GameState:\r\n self.game_state = state\r\n self.print_dealing()\r\n self.deal_cards()\r\n is_done = self.initiate_betting(self.game_state)\r\n check_call(\"clear\")\r\n\r\n if is_done:\r\n print(\"All but one have folded, game is over! Determining winner...\")\r\n sleep(1)\r\n break # continue to end of game to determine winner\r\n else:\r\n self.print_done_betting()\r\n sleep(1)\r\n\r\n check_call(\"clear\")\r\n\r\n # Determine if there is more than one player active and showdown is necessary\r\n if len(self.active_players) == 1:\r\n winner = self.active_players[0]\r\n print(str(winner) + \" wins the pot amount of: \" + str(self.pot))\r\n winner.chips += self.pot\r\n\r\n else:\r\n # Determine and print the winner, update chips for winning player(s)\r\n print(\"End of betting and there are more than one player active. ENTERING SHOWDOWN!\")\r\n showdown_hands = []\r\n for player in self.active_players:\r\n showdown_hands.append(player.get_best_hand())\r\n\r\n winning_hand = get_best_hand(showdown_hands)\r\n winning_players = get_players_with_hand(winning_hand, self.active_players)\r\n winnings = self.pot / len(winning_players)\r\n\r\n print(\"Winning Hand = \" + str(winning_hand))\r\n\r\n for player in winning_players:\r\n player.chips += winnings\r\n print(str(player) + \" has the winning hand and wins the pot amount of: \" + \\\r\n str(winnings))\r\n\r\n self.print_round_summary()\r\n self.reset_round()\r\n self.players.append(self.players.pop(0))\r\n input(\"\\nGame over, press any key to continue: \")\r\n else:\r\n print(\"You have chosen to stop playng. Have a nice day!\")\r\n break\r\n\r\nif __name__ == \"__main__\":\r\n PLAYER1 = Player(\"Arnej\", 500, Hand(), True)\r\n PLAYER2 = Player(\"Drizzy Dre\", 500, Hand(), True)\r\n PLAYER3 = Player(\"Milo\", 500, Hand(), True)\r\n\r\n PLAYERS = [PLAYER1, PLAYER2, PLAYER3]\r\n\r\n GAME = PokerGame(1, 2, PLAYERS)\r\n GAME.play()\r\n","sub_path":"poker.py","file_name":"poker.py","file_ext":"py","file_size_in_byte":13263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"359144338","text":"from datetime import datetime\n\n\ndef hour_float_to_time(time_float, is_24hr_format=True):\n \"\"\" Converts hour floats like 9.5 into 9:30 or 09:30 if is_24_hr_format is True \"\"\"\n if type(time_float) is str:\n time_float = float(time_float)\n hour, minute = divmod(time_float * 60, 60)\n # print('hour n minute', hour, minute)\n hm_string = '{0:02.0f}:{1:02.0f}'.format(hour, minute)\n if is_24hr_format:\n return hm_string\n else:\n return datetime.strptime(hm_string, '%H:%M').strftime('%-I.%M %p')\n\n\ndef float_to_day_time(time_float):\n if type(time_float) is str:\n time_float = float(time_float)\n day = int(time_float)\n if time_float > 0:\n pass\n # print('time_float = ', time_float)\n day_left = (time_float % 1) * 24\n hour, minute = divmod(day_left * 60, 60)\n if minute >= 59.00:\n minute = 0\n hour += 1\n if hour >= 7.9:\n day += 1\n hour = 0\n hm_string = str(day) + ' - ' + '{0:02.0f}:{1:02.0f}'.format(hour, minute)\n return hm_string\n\n\ndef ordinal_num(number):\n return str(number) + (\"th\" if 4 <= number % 100 <= 20 else {1:\"st\",2:\"nd\",3:\"rd\"}.get(number % 10, \"th\"))","sub_path":"utility/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"528144435","text":"#!/usr/bin/env python\nfrom sqlalchemy import orm\n# cancerhermit\nfrom pg_catalog import pg_attrdef, pg_class\nfrom pgvcs import row, _regclass,column,index,foreign_table,sequence,table,trigger,rule,view\n\nclass regclass(_regclass):\n \"\"\"init_on_load class assigment only\"\"\"\n __table__ = _regclass.__table__\n\n @orm.reconstructor\n def init_on_load(self):\n super(type(self),self).init_on_load()\n self.columns = []\n self.inhparent = None\n self.inhseqno = None\n if self.relkind=='c': # table for composite type\n pass\n #self.__class__ = composite_table\n if self.relkind=='i': # index class\n self.__class__ = index\n if self.relkind=='t': # toast class\n return\n if self.relkind=='f':\n self.__class__ = foreign_table\n if self.relkind=='r':\n self.__class__ = table\n if self.relkind=='S':\n self.__class__ = sequence\n sql='select * from %s.%s' % (self.schema.name,self.name)\n self.params=self.db.session.execute(sql).fetchone()\n if self.relkind=='v':\n self.__class__ = view\n #raise ValueError(\"Invalid regclass relkind, %s\" % self.relkind)\n\n","sub_path":"pgvcs/ddl/regclass.py","file_name":"regclass.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"3793320","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy import integrate\n\nT0 = 1/5000\nt = np.linspace(0,20,10000)\nf_sampling = 1/(t[1]-t[0])\nlfft = len(t)\n\ndef v1(t):\n\tE0 = 8\n\tf0 = 100*1e3\n\tEm = 6\n\tfm = 5*1e3\n\tem = Em*np.cos(2*np.pi*fm*t)\n\te0 = E0*np.cos(2*np.pi*f0*t)\n\treturn 0.5*(em+e0)\n\nfreq = np.fft.fftfreq(lfft)\nfreq = np.fft.fftshift(freq)\n\n\ndef v2(t):\n\tb1,b2=0.054,0.004\n\treturn b1*v1(t) + b2*v1(t)**2\n'''\ndef find_period(array_y,array_x):\n\tts = array_x[1]-array_x[0]\n\tN_ts = np.where(array_y>max(array_y)*0.99)[0][-1]\n\tN_ts -= np.where(array_y>max(array_y)*0.99)[0][-2]\n\treturn N_ts*ts\n'''\n\n\ndef _cos(x,n_,w_):\n\treturn v2(x)*np.cos(x*n_*w_)\n\t\ndef data_cos(data,x,n_,w_):\n\treturn data*np.cos(x*n_*w_)\n\t\ndef an(T,n=10):\n\tan=np.zeros(n)\n\tW = 2*np.pi/T\n\n\tfor i in range(n):\n\t\tan[i]=(2/T)*(integrate.quad(_cos,0,T,args=(i,W))[0])\n\treturn an\n\ndef data_an(data_,x,T,TN,n=10):\n\tan=np.zeros(n)\n\tW = 2*np.pi/T\n\tfor i in range(n):\n\t\tarray = data_cos(data_[TN:2*TN],x[TN:2*TN],i,W)\n\t\tan[i]=(2/TN)*(np.trapz(array))\n\treturn an\n\n\n\n\n\t\ndef _sin(x,n_,w_):\n\treturn v2(x)*np.sin(x*n_*w_)\n\t\ndef data_sin(data,x,n_,w_):\n\treturn data*np.sin(x*n_*w_)\n\n\ndef bn(T,n=10):\n\tbn=np.zeros(n)\n\tW = 2*np.pi/T\n\n\tfor i in range(n):\n\t\tbn[i]=(2/T)*(integrate.quad(_sin,0,T,args=(i,W))[0])\n\treturn bn\t\n\n\ndef data_bn(data_,x,T,TN,n=10):\n\tbn=np.zeros(n)\n\tW = 2*np.pi/T\n\tfor i in range(n):\n\t\tarray = data_sin(data_[TN:2*TN],x[TN:2*TN],i,W)\n\t\tbn[i]=(2/TN)*(np.trapz(array))\n\treturn bn\n\n\n\n\n\n\ndef rebuilt(an_coefs,bn_coefs,T,x):\n\tw0 = 2*np.pi/T\n\tN = len(an_coefs)\n\tf_sum = 0\n\tfor n in range(N):\n\t\tf_sum += an_coefs[n]*np.cos(x*n*w0)\n\t\tf_sum += bn_coefs[n]*np.sin(x*n*w0)\n\treturn f_sum\n\t\n\n\n#filtragem do passa-faixas\nfrom scipy.signal import butter\nfrom scipy.signal import lfilter\nC = 130*1e-9\nL = 20*1e-6\nwc = 1/(np.sqrt(L*C))\nB = 2*np.pi*14*1e3 #bandwidth\nQ = wc/B #quality factor\nw_inferior = wc*np.sqrt(1+1/(4*Q**2))-wc/(2*Q)\nw_superior = wc*np.sqrt(1+1/(4*Q**2))+wc/(2*Q)\n\nfin=w_inferior/(2*np.pi)\nfsup=w_superior/(2*np.pi)\n\n\n\ndef butter_bandpass(lowcut, highcut, fs, order=5):\n nyq = 0.5 * fs\n low = lowcut / nyq\n high = highcut / nyq\n b, a = butter(order, [low, high], btype='band')\n return b, a\n\n\ndef butter_bandpass_filter(data, lowcut, highcut, fs, order=5):\n b, a = butter_bandpass(lowcut, highcut, fs, order=order)\n y = lfilter(b, a, data)\n return y\n\nv3 = butter_bandpass_filter(v2(t),fin/10000,fsup/10000,f_sampling,order=1)\nv3_S = np.fft.fft(v3,lfft)/lfft\nv3_S = np.fft.fftshift(v3_S)\n\n\nN = 25\nTN = 1000\na_n = data_an(v3,t,T0,TN,N)\nb_n = data_bn(v3,t,T0,TN,N)\n\nv3_rec = rebuilt(a_n,b_n,14,t)\nv3_rec_S = np.fft.fftshift(np.fft.fft(v3_rec))\nplt.subplot(211)\nplt.title(\"V3(t): sinal original\")\nplt.ylabel(\"Amplitude (V)\")\nplt.xlabel(\"tempo (s)\")\n\nplt.plot(t[TN:2*TN+200],v3[TN:2*TN+200])\n\nplt.subplot(212)\nplt.title(\"V3(f): sinal reconstruído pela Série de Fourier\")\nplt.ylabel(\"Amplitude (V)\")\nplt.xlabel(\"tempo (s)\")\nplt.plot(t,v3_rec,'r')\n\nplt.tight_layout()\nplt.show()\n\n\n\t\n\n","sub_path":"am_dsb_fseries_v3.py","file_name":"am_dsb_fseries_v3.py","file_ext":"py","file_size_in_byte":2987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"516065695","text":"# coding: utf-8\n\nimport glob, sys\nfrom distutils.core import setup\ntry:\n from setuptools import setup\nexcept ImportError:\n pass\n\n\nversion = '0.2.1'\n\nsetup(\n name=\"taxon\",\n version=version,\n description=(\"Provides simple object taxonomy.\"),\n classifiers=[\"Development Status :: 4 - Beta\",\n \"Intended Audience :: Developers\",\n \"License :: Public Domain\",\n \"Programming Language :: Python\",\n \"Topic :: Software Development :: Libraries :: Python Modules\"],\n author=\"Yasushi Masuda\",\n author_email=\"whosaysni at gmail.com\",\n url=\"http://github.com/whosaysni/taxon/\",\n license=\"Public Domain\",\n zip_safe=True,\n packages=[\"taxon\"],\n test_suite = 'tests.suite',\n )\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"413183441","text":"from deepspeed.profiling.flops_profiler import get_model_profile\nfrom deepspeed.profiling.flops_profiler import FlopsProfiler\nimport torchvision.models as models\nimport torch\nimport torchvision\nimport random\nimport time\nimport argparse\nimport os\nimport sys\nimport math\nimport torch.nn as nn\nimport torch.multiprocessing as mp\nfrom utils.fp16util import network_to_half, get_param_copy\nfrom utils.shufflenet import shufflenet\nfrom utils.shufflenet_v2 import shufflenet as shufflenet_v2\ntry:\n import apex\n HAVE_APEX = True\nexcept:\n HAVE_APEX = False\n\ndef weight_init(m):\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n# num_classes=1000\nmodels = {\n \"alexnet\" : torchvision.models.alexnet,\n \"densenet121\" : torchvision.models.densenet121,\n \"densenet161\" : torchvision.models.densenet161,\n \"densenet169\" : torchvision.models.densenet169,\n \"densenet201\" : torchvision.models.densenet201,\n \"googlenet\" : torchvision.models.googlenet,\n \"inception_v3\" : torchvision.models.inception_v3,\n \"mnasnet0_5\" : torchvision.models.mnasnet0_5,\n \"mnasnet0_75\" : torchvision.models.mnasnet0_75,\n \"mnasnet1_0\" : torchvision.models.mnasnet1_0,\n \"mnasnet1_3\" : torchvision.models.mnasnet1_3,\n \"mobilenet_v2\" : torchvision.models.mobilenet_v2,\n \"resnet18\" : torchvision.models.resnet18,\n \"resnet34\" : torchvision.models.resnet34,\n \"resnet50\" : torchvision.models.resnet50,\n \"resnet101\" : torchvision.models.resnet101,\n \"resnet152\" : torchvision.models.resnet152,\n \"resnext50\" : torchvision.models.resnext50_32x4d,\n \"resnext50_32x4d\" : torchvision.models.resnext50_32x4d,\n \"resnext101\" : torchvision.models.resnext101_32x8d,\n \"resnext101_32x8d\" : torchvision.models.resnext101_32x8d,\n \"shufflenet\" : shufflenet,\n \"shufflenet_v2\" : shufflenet_v2,\n \"shufflenet_v2_x05\" : torchvision.models.shufflenet_v2_x0_5,\n \"shufflenet_v2_x10\" : torchvision.models.shufflenet_v2_x1_0,\n \"shufflenet_v2_x15\" : torchvision.models.shufflenet_v2_x1_5,\n \"shufflenet_v2_x20\" : torchvision.models.shufflenet_v2_x2_0,\n \"shufflenet_v2_x0_5\" : torchvision.models.shufflenet_v2_x0_5,\n \"shufflenet_v2_x1_0\" : torchvision.models.shufflenet_v2_x1_0,\n \"shufflenet_v2_x1_5\" : torchvision.models.shufflenet_v2_x1_5,\n \"shufflenet_v2_x2_0\" : torchvision.models.shufflenet_v2_x2_0,\n \"SqueezeNet\" : torchvision.models.squeezenet1_0,\n \"squeezenet1_0\" : torchvision.models.squeezenet1_0,\n \"SqueezeNet1.1\" : torchvision.models.squeezenet1_1,\n \"squeezenet1_1\" : torchvision.models.squeezenet1_1,\n \"vgg11\" : torchvision.models.vgg11,\n \"vgg13\" : torchvision.models.vgg13,\n \"vgg16\" : torchvision.models.vgg16,\n \"vgg19\" : torchvision.models.vgg19,\n \"vgg11_bn\" : torchvision.models.vgg11_bn,\n \"vgg13_bn\" : torchvision.models.vgg13_bn,\n \"vgg16_bn\" : torchvision.models.vgg16_bn,\n \"vgg19_bn\" : torchvision.models.vgg19_bn,\n \"wide_resnet50_2\" : torchvision.models.wide_resnet50_2,\n \"wide_resnet101_2\" : torchvision.models.wide_resnet101_2,\n}\n\n# newer torchvision models, for backwards compat\ntry:\n models[\"mobilenet_v3_large\"] = torchvision.models.mobilenet_v3_large\n models[\"mobilenet_v3_small\"] = torchvision.models.mobilenet_v3_small\nexcept AttributeError:\n pass\n\n# segmentation models, num_classes=21\nsegmentation_models = {\n \"fcn_resnet50\" : torchvision.models.segmentation.fcn_resnet50,\n \"fcn_resnet101\" : torchvision.models.segmentation.fcn_resnet101,\n \"deeplabv3_resnet50\" : torchvision.models.segmentation.deeplabv3_resnet50,\n \"deeplabv3_resnet101\" : torchvision.models.segmentation.deeplabv3_resnet101,\n}\n\n# newer torchvision segmentation models, for backwards compat\ntry:\n segmentation_models[\"deeplabv3_mobilenet_v3_large\"] = torchvision.models.segmentation.deeplabv3_mobilenet_v3_large\n segmentation_models[\"lraspp_mobilenet_v3_large\"] = torchvision.models.segmentation.lraspp_mobilenet_v3_large,\nexcept AttributeError:\n pass\n\ndef get_network_names():\n return sorted(list(models.keys()) + list(segmentation_models.keys()))\n\ndef get_network(net):\n # aux_logits=False only used by inception_v3\n if \"inception_v3\" == net:\n return models[net](aux_logits=False).to(device=\"cuda\")\n elif net in models:\n return models[net]().to(device=\"cuda\")\n elif net in segmentation_models:\n return segmentation_models[net]().to(device=\"cuda\")\n else:\n print (\"ERROR: not a supported model '%s'\" % net)\n sys.exit(1)\n\ndef forwardbackward(inp, optimizer, network, target, amp_opt_level, prof_step=0):\n # params: prof_step - none zero step to profile\n\n optimizer.zero_grad()\n\n if prof_step != 0:\n prof = FlopsProfiler(network)\n prof.start_profile()\n\n out = network(inp)\n # WIP: googlenet, deeplabv3_*, fcn_* missing log_softmax for this to work\n loss = torch.nn.functional.cross_entropy(out, target)\n\n # End profiler here if profile fwd pass only\n\n if prof_step != 0:\n if amp_opt_level:\n with apex.amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n\n # End profiler here to both fwd &bwd passes\n flops = prof.get_total_flops(as_string=True)\n params = prof.get_total_params(as_string=True)\n prof.print_model_profile(profile_step=prof_step)\n prof.end_profile()\n\n else:\n if amp_opt_level:\n with apex.amp.scale_loss(loss, optimizer) as scaled_loss:\n scaled_loss.backward()\n else:\n loss.backward()\n optimizer.step()\n\ndef rendezvous(distributed_parameters):\n print(\"Initializing process group...\")\n torch.distributed.init_process_group(backend=distributed_parameters['dist_backend'], init_method=distributed_parameters['dist_url'], rank=distributed_parameters['rank'], world_size=distributed_parameters['world_size'])\n print(\"Rendezvous complete. Created process group...\")\n\ndef run_benchmarking_wrapper(net, batch_size, iterations, prof_step, amp_opt_level, run_fp16, dataparallel, distributed_dataparallel, device_ids=None, distributed_parameters=None):\n if (dataparallel or distributed_dataparallel):\n ngpus = len(device_ids) if device_ids else torch.cuda.device_count()\n else:\n ngpus = 1\n\n if (distributed_dataparallel):\n # Assumption below that each process launched with --distributed_dataparallel has the same number of devices visible/specified\n distributed_parameters['world_size'] = ngpus * distributed_parameters['world_size']\n distributed_parameters['rank'] = ngpus * distributed_parameters['rank']\n mp.spawn(run_benchmarking, nprocs=ngpus, args=(ngpus, net, batch_size, iterations, prof_step, amp_opt_level, run_fp16, dataparallel, distributed_dataparallel, device_ids, distributed_parameters))\n else:\n run_benchmarking(0, ngpus, net, batch_size, iterations, prof_step, amp_opt_level, run_fp16, dataparallel, distributed_dataparallel, device_ids=None, distributed_parameters=None)\n\ndef run_benchmarking(local_rank, ngpus, net, batch_size, iterations, prof_step, amp_opt_level, run_fp16, dataparallel, distributed_dataparallel, device_ids=None, distributed_parameters=None):\n if device_ids:\n assert ngpus == len(device_ids)\n torch.cuda.set_device(\"cuda:%d\" % device_ids[local_rank])\n else:\n torch.cuda.set_device(\"cuda:0\")\n\n network = get_network(net)\n if \"shufflenet\" == net:\n model.apply(weight_init)\n\n if (run_fp16):\n network = network_to_half(network)\n\n if (dataparallel):\n devices_to_run_on = device_ids if device_ids else list(range(ngpus))\n print (\"INFO: Running dataparallel on devices: {}\".format(str(devices_to_run_on)))\n network = torch.nn.DataParallel(network, device_ids=devices_to_run_on)\n elif (distributed_dataparallel):\n distributed_parameters['rank'] += local_rank\n rendezvous(distributed_parameters)\n devices_to_run_on = [(device_ids[local_rank] if device_ids else local_rank)]\n print (\"INFO: Rank {} running distributed_dataparallel on devices: {}\".format(distributed_parameters['rank'], str(devices_to_run_on)))\n network = torch.nn.parallel.DistributedDataParallel(network, device_ids=devices_to_run_on)\n batch_size = int(batch_size / ngpus)\n\n if (net == \"inception_v3\"):\n inp = torch.randn(batch_size, 3, 299, 299, device=\"cuda\")\n else:\n inp = torch.randn(batch_size, 3, 224, 224, device=\"cuda\")\n if (run_fp16):\n inp = inp.half()\n if net in models:\n # number of classes is 1000 for imagenet\n target = torch.randint(0, 1000, (batch_size,), device=\"cuda\")\n elif net in segmentation_models:\n # number of classes is 21 for segmentation\n target = torch.randint(0, 21, (batch_size,), device=\"cuda\")\n param_copy = network.parameters()\n if (run_fp16):\n param_copy = get_param_copy(network)\n optimizer = torch.optim.SGD(param_copy, lr = 0.01, momentum = 0.9)\n\n if (amp_opt_level):\n network, optimizer = apex.amp.initialize(network, optimizer, opt_level=\"O%d\"%amp_opt_level)\n\n ## warmup.\n print (\"INFO: running forward and backward for warmup.\")\n forwardbackward(inp, optimizer, network, target, amp_opt_level)\n forwardbackward(inp, optimizer, network, target, amp_opt_level)\n\n time.sleep(1)\n torch.cuda.synchronize()\n\n ## benchmark.\n print (\"INFO: running the benchmark..\")\n tm = time.time()\n for i in range(iterations):\n if i == prof_step:\n forwardbackward(inp, optimizer, network, target, amp_opt_level, i)\n else:\n forwardbackward(inp, optimizer, network, target, amp_opt_level)\n torch.cuda.synchronize()\n\n tm2 = time.time()\n time_per_batch = (tm2 - tm) / iterations\n\n if run_fp16:\n dtype = 'FP16'\n elif amp_opt_level == 1:\n dtype = 'AMP-O1: Insert automatic FP16 casts around safe Pytorch functions and Tensor methods.'\n elif amp_opt_level == 2:\n dtype = 'AMP-O2: FP16 training with FP32 batchnorm and FP32 master weights.'\n elif amp_opt_level == 3:\n dtype = 'AMP-O3: Pure FP16 training.'\n elif amp_opt_level == 4:\n dtype = 'AMP-O4: Insert automatic BFLOAT16 casts around safe Pytorch functions and Tensor methods.'\n elif amp_opt_level == 5:\n dtype = 'AMP-O5: BFLOAT16 training with FP32 batchnorm and FP32 master weights.'\n else:\n dtype = 'FP32'\n\n print (\"OK: finished running benchmark..\")\n print (\"--------------------SUMMARY--------------------------\")\n print (\"Microbenchmark for network : {}\".format(net))\n if (distributed_dataparallel):\n print (\"--------This process: rank \" + str(distributed_parameters['rank']) + \"--------\");\n print (\"Num devices: 1\")\n else:\n print (\"Num devices: {}\".format(ngpus))\n print (\"Dtype: {}\".format(dtype))\n print (\"Mini batch size [img] : {}\".format(batch_size))\n print (\"Time per mini-batch : {}\".format(time_per_batch))\n print (\"Throughput [img/sec] : {}\".format(batch_size/time_per_batch))\n if (distributed_dataparallel):\n print (\"\")\n print (\"--------Overall (all ranks) (assuming same num/type devices for each rank)--------\")\n world_size = distributed_parameters['world_size']\n print (\"Num devices: {}\".format(world_size))\n print (\"Dtype: {}\".format(dtype))\n print (\"Mini batch size [img] : {}\".format(batch_size*world_size))\n print (\"Time per mini-batch : {}\".format(time_per_batch))\n print (\"Throughput [img/sec] : {}\".format(batch_size*world_size/time_per_batch))\n\ndef main():\n net = args.network\n batch_size = args.batch_size\n iterations = args.iterations\n prof_step = args.profile_step\n run_fp16 = args.fp16\n amp_opt_level = args.amp_opt_level\n dataparallel = args.dataparallel\n distributed_dataparallel = args.distributed_dataparallel\n device_ids_str = args.device_ids\n if (args.device_ids):\n device_ids = [int(x) for x in device_ids_str.split(\",\")]\n else:\n device_ids = None\n distributed_parameters = {}\n distributed_parameters['rank'] = args.rank\n distributed_parameters['world_size'] = args.world_size\n distributed_parameters['dist_backend'] = args.dist_backend\n distributed_parameters['dist_url'] = args.dist_url\n # Some arguments are required for distributed_dataparallel\n if distributed_dataparallel:\n assert args.rank is not None and \\\n args.world_size is not None and \\\n args.dist_backend is not None and \\\n args.dist_url is not None, \"rank, world-size, dist-backend and dist-url are required arguments for distributed_dataparallel\"\n\n run_benchmarking_wrapper(net, batch_size, iterations, prof_step, amp_opt_level, run_fp16, dataparallel, distributed_dataparallel, device_ids, distributed_parameters)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--network\", type=str, choices=get_network_names(), required=True, help=\"Network to run.\")\n parser.add_argument(\"--batch-size\" , type=int, required=False, default=128, help=\"Batch size (will be split among devices used by this invocation)\")\n parser.add_argument(\"--iterations\", type=int, required=False, default=20, help=\"Iterations\")\n parser.add_argument(\"--profile-step\", type=int, required=False, default=0, help=\"The global profiling step\")\n parser.add_argument(\"--fp16\", type=int, required=False, default=0,help=\"FP16 mixed precision benchmarking\")\n parser.add_argument(\"--amp-opt-level\", type=int, required=False, default=0,help=\"apex.amp mixed precision benchmarking opt level\")\n parser.add_argument(\"--dataparallel\", action='store_true', required=False, help=\"Use torch.nn.DataParallel api to run single process on multiple devices. Use only one of --dataparallel or --distributed_dataparallel\")\n parser.add_argument(\"--distributed_dataparallel\", action='store_true', required=False, help=\"Use torch.nn.parallel.DistributedDataParallel api to run on multiple processes/nodes. The multiple processes need to be launched manually, this script will only launch ONE process per invocation. Use only one of --dataparallel or --distributed_dataparallel\")\n parser.add_argument(\"--device_ids\", type=str, required=False, default=None, help=\"Comma-separated list (no spaces) to specify which HIP devices (0-indexed) to run dataparallel or distributedDataParallel api on. Might need to use HIP_VISIBLE_DEVICES to limit visiblity of devices to different processes.\")\n parser.add_argument(\"--rank\", type=int, required=False, default=None, help=\"Rank of this process. Required for --distributed_dataparallel\")\n parser.add_argument(\"--world-size\", type=int, required=False, default=None, help=\"Total number of ranks/processes. Required for --distributed_dataparallel\")\n parser.add_argument(\"--dist-backend\", type=str, required=False, default=None, help=\"Backend used for distributed training. Can be one of 'nccl' or 'gloo'. Required for --distributed_dataparallel\")\n parser.add_argument(\"--dist-url\", type=str, required=False, default=None, help=\"url used for rendezvous of processes in distributed training. Needs to contain IP and open port of master rank0 eg. 'tcp://172.23.2.1:54321'. Required for --distributed_dataparallel\")\n\n args = parser.parse_args()\n\n if args.fp16 and args.amp_opt_level:\n print (\"ERROR: Cannot use both --fp16 and --amp-opt-level\")\n sys.exit(1)\n if args.amp_opt_level and not HAVE_APEX:\n print (\"ERROR: You must install apex to use --amp-opt-level\")\n sys.exit(1)\n\n main()\n","sub_path":"FlopsProfile/flops_train_vision.py","file_name":"flops_train_vision.py","file_ext":"py","file_size_in_byte":16411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"401320390","text":"\"\"\"\nMethods for testing Operation Types in a Docker container\n\"\"\"\n\nimport json as j\nimport os\nimport subprocess\nfrom pydent import AqSession\n\n\ndef get_records(session, protocol, records, record_names):\n \"\"\"\n Generates a dictionary of all new records by their tag\n\n :param session: session with container\n :type session: AqSession\n :param protocol: directory that manages the protocol\n :type protocol: ODir\n :return: dictionary of all records by tag\n :rtype: dict\n \"\"\"\n record_dict = {}\n\n for model_string in record_names:\n for record_info in records[model_string]:\n if 'data' in record_info:\n data = record_info['data']\n else:\n with protocol.open_file(record_info['source'], mode='r') as f:\n data = j.load(f)\n\n # Format data with tags\n new_data = format_data(data, record_dict)\n\n if model_string == \"operation_types\":\n for ft in new_data['field_types']:\n ft['allowable_field_types'] = [\n format_data(aft, record_dict)\n for aft in ft['allowable_field_types']\n ]\n\n # Make records with Trident\n new_record = make_record(\n session, protocol.name, model_string, new_data, record_dict)\n\n record_dict[record_info['tag']] = new_record\n\n return record_dict\n\n\ndef format_data(data, record_dict):\n \"\"\"\n Replaces tags in given data with corresponding record ids\n\n :param data: raw data for a given record\n :type data: dict\n :param record_dict: dictionary of all records by tag\n :type record_dict: dict\n :return: formatted data\n :rtype: dict\n \"\"\"\n new_data = {}\n for attr in data:\n tag = data[attr]\n if type(tag) is str and tag in record_dict:\n record_id = record_dict[tag].id\n new_data[attr[:-4] + '_id'] = record_id\n else:\n new_data[attr] = data[attr]\n\n return new_data\n\n\ndef make_record(session, name, model_string, data, record_dict):\n \"\"\"\n Makes a new record of the given model (or loads an existing one\n if applicable)\n\n :param session: session with container\n :type session: AqSession\n :param name: name of the :class:`OperationType`\n :type name: str\n :param model_string: name of the model\n :type model_string: str\n :param data: formatted data for given record\n :type data: dict\n :param record_dict: dictionary of all records by tag\n :type record_dict: dict\n :return: record made from data\n :rtype: ModelBase\n \"\"\"\n model_name = model_string[:-1]\n record = None\n\n if model_name == \"sample_type\":\n record = session.SampleType.load(data)\n record.save()\n elif model_name == \"object_type\":\n record = session.ObjectType.load(data)\n record.save()\n elif model_name == \"sample\":\n try:\n record = session.Sample.load(data)\n record.save()\n except Exception:\n record = session.Sample.find_by_name(data['name'])\n elif model_name == \"item\":\n record = session.Item.load(data)\n record.make()\n elif model_name == \"operation\":\n ot = record_dict['ot']\n record = ot.instance()\n record.x = record.y = record.parent_id = record.parent = 0\n\n for in_data in data['inputs']:\n name = in_data['name']\n sample = record_dict[in_data['sample_tag']]\n item = record_dict[in_data['item_tag']]\n record.set_input(name, sample=sample, item=item)\n\n for out_data in data['outputs']:\n name = out_data['name']\n sample = record_dict[out_data['sample_tag']]\n record.set_output(name, sample=sample)\n elif model_name == \"operation_type\":\n # data['name'] = 'hullabaloo25'\n record = session.OperationType.load(data)\n try:\n record.save()\n except Exception:\n # Trident errors even though Operation Type is successfully created\n # TODO Handle creating Operation Types (and Field Types and\n # Allowable Field Types) more cleanly (e.g., many problems\n # arise from the fact that we can't 'reload' the OT record\n # upon creating it on a server; 'id's are incorrect).\n record = session.OperationType.find_by_name(record.name)\n\n else:\n raise Exception(\n 'Malformed data: {} is not a valid model name'.format(model_name))\n\n return record\n\n\ndef test_protocol(protocol, record_dict):\n \"\"\"\n Submits a plan to container given test data\n\n :param session: session with container\n :type session: AqSession\n :param protocol: directory that manages the protocol\n :type protocol: ODir\n :param record_dict: dictionary of all records by tag\n :type record_dict: dict\n :return: plan information (success status and errors)\n :rtype: dict\n \"\"\"\n # GET THINE SESSION\n session = record_dict['ot'].session\n\n # READ THAT JSON\n test_data = None\n with protocol.open_file('testing/data.json', mode='r') as f:\n test_data = j.load(f)\n\n # SUBMIT THAT PLAN\n plan = session.Plan.load({\n 'name': '{} Test'.format(protocol.name),\n 'layout': {'wires': None}})\n for op_tag in test_data['plan']['operations']:\n op = record_dict[op_tag]\n plan.add_operation(op)\n\n plan.create()\n plan.estimate_cost()\n plan.validate()\n\n plan.submit(session.current_user, session.current_user.budgets[0])\n\n # DEBUG THAT PLAN\n session.utils.batch_operations(\n {'operation_ids': [op.id for op in plan.operations]})\n op = plan.operations[0]\n job = op.jobs[0]\n try:\n session.utils.job_debug(job.id)\n except Exception:\n pass\n\n # return result\n op.reload(session.Operation.find(op.id))\n\n return {\n 'success': op.status == 'done',\n 'plan_url': '{}launcher?plan_id={}'.format(session.url, plan.id)\n }\n\n\ndef load_data(protocol):\n \"\"\"\n Open a session with running container\n\n :param protocol: directory that manages the protocol\n :type protocol: ODir\n :return: dictionary of all records by tag\n :rtype: dict\n \"\"\"\n # OPEN A SESSION\n session = AqSession('neptune', 'aquarium', 'http://localhost:3001/')\n\n # READ THAT JSON\n # protocol = environment.get_protocol_dir(cat_name, prot_name)\n test_data = None\n with protocol.open_file('testing/data.json', mode='r') as f:\n test_data = j.load(f)\n\n # GET THOSE RECORDS\n record_names = [\n 'sample_types',\n 'object_types',\n 'operation_types',\n 'samples',\n 'items',\n 'operations'\n ]\n record_dict = get_records(\n session, protocol, test_data['records'], record_names)\n\n return record_dict\n\n\ndef start_container(reset, cid):\n \"\"\"\n Start a Docker container\n\n :param reset: kill the running container if one exists\n :type reset: bool\n :return: whether a new container was started\n :rtype: bool\n \"\"\"\n # CHECK IF DOCKER CONTAINER IS RUNNING ALREADY\n if cid == '':\n running = False\n else:\n command = 'sudo docker ps -aq'\n check_running = subprocess.Popen(\n command.split(), stdout=subprocess.PIPE)\n output, error = check_running.communicate()\n running_ids = [rid.decode('utf-8') for rid in output.split(b'\\n')]\n\n running = cid in running_ids\n if not running:\n cid = ''\n\n if reset or not running:\n # RUN THIS DOCKER CONTAINER\n print('Starting Aquarium container...')\n here = os.path.abspath(os.path.dirname(__file__))\n start = subprocess.Popen([\n 'bash',\n '{}/docker_container.sh'.format(here),\n 'run',\n str(cid)\n ], stdout=subprocess.PIPE)\n output, error = start.communicate()\n cid = output.split(b'\\n')[-2].decode('utf-8')\n\n return {\n 'success': True,\n 'id': cid\n }\n\n return {\n 'success': False,\n 'id': cid\n }\n\n\ndef stop_container(cid):\n \"\"\" Stop a Docker container \"\"\"\n # KILL THIS DOCKER CONTAINER\n here = os.path.abspath(os.path.dirname(__file__))\n subprocess.call([\n 'bash',\n '{}/docker_container.sh'.format(here),\n 'kill',\n cid,\n ])\n","sub_path":"parrotfish/utils/docker_testing.py","file_name":"docker_testing.py","file_ext":"py","file_size_in_byte":8423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"66375502","text":"# Copyright(c) 2020 Jake Fowler\n#\n# Permission is hereby granted, free of charge, to any person\n# obtaining a copy of this software and associated documentation\n# files (the \"Software\"), to deal in the Software without\n# restriction, including without limitation the rights to use, \n# copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the\n# Software is furnished to do so, subject to the following\n# conditions:\n#\n# The above copyright notice and this permission notice shall be\n# included in all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, \n# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, \n# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n# OTHER DEALINGS IN THE SOFTWARE.\n\nimport pandas as pd\nfrom datetime import datetime\nimport clr\nimport System as dotnet\nfrom pathlib import Path\nclr.AddReference(str(Path(\"cmdty_storage/lib/Cmdty.TimePeriodValueTypes\")))\nimport Cmdty.TimePeriodValueTypes as tp\nclr.AddReference(str(Path('cmdty_storage/lib/Cmdty.TimeSeries')))\nimport Cmdty.TimeSeries as ts\nfrom typing import Union\nfrom datetime import date\n\n\ndef from_datetime_like(datetime_like, time_period_type):\n \"\"\" Converts either a pandas Period, datetime or date to a .NET Time Period\"\"\"\n if (hasattr(datetime_like, 'hour')):\n time_args = (datetime_like.hour, datetime_like.minute, datetime_like.second)\n else:\n time_args = (0, 0, 0)\n\n date_time = dotnet.DateTime(datetime_like.year, datetime_like.month, datetime_like.day, *time_args)\n return tp.TimePeriodFactory.FromDateTime[time_period_type](date_time)\n\n\ndef net_datetime_to_py_datetime(net_datetime):\n return datetime(net_datetime.Year, net_datetime.Month, net_datetime.Day, net_datetime.Hour, net_datetime.Minute, net_datetime.Second, net_datetime.Millisecond * 1000)\n\n\ndef net_time_period_to_pandas_period(net_time_period, freq):\n start_datetime = net_datetime_to_py_datetime(net_time_period.Start)\n return pd.Period(start_datetime, freq=freq)\n\n\ndef series_to_double_time_series(series, time_period_type):\n \"\"\"Converts an instance of pandas Series to a Cmdty.TimeSeries.TimeSeries type with Double data type.\"\"\"\n return series_to_time_series(series, time_period_type, dotnet.Double, lambda x: x)\n\n\ndef series_to_time_series(series, time_period_type, net_data_type, data_selector):\n \"\"\"Converts an instance of pandas Series to a Cmdty.TimeSeries.TimeSeries.\"\"\"\n series_len = len(series)\n net_indices = dotnet.Array.CreateInstance(time_period_type, series_len)\n net_values = dotnet.Array.CreateInstance(net_data_type, series_len)\n\n for i in range(series_len):\n net_indices[i] = from_datetime_like(series.index[i], time_period_type)\n net_values[i] = data_selector(series.values[i])\n\n return ts.TimeSeries[time_period_type, net_data_type](net_indices, net_values)\n\n\ndef net_time_series_to_pandas_series(net_time_series, freq):\n \"\"\"Converts an instance of class Cmdty.TimeSeries.TimeSeries to a pandas Series\"\"\"\n curve_start = net_time_series.Indices[0].Start\n curve_start_datetime = net_datetime_to_py_datetime(curve_start)\n index = pd.period_range(start=curve_start_datetime, freq=freq, periods=net_time_series.Count)\n prices = [net_time_series.Data[idx] for idx in range(0, net_time_series.Count)]\n return pd.Series(prices, index)\n\n\ndef is_scalar(arg):\n return isinstance(arg, int) or isinstance(arg, float)\n\n\ndef raise_if_none(arg, error_message):\n if arg is None:\n raise ValueError(error_message)\n\n\ndef raise_if_not_none(arg, error_message):\n if arg is not None:\n raise ValueError(error_message)\n\n\nFREQ_TO_PERIOD_TYPE = {\n \"15min\" : tp.QuarterHour,\n \"30min\" : tp.HalfHour,\n \"H\" : tp.Hour,\n \"D\" : tp.Day,\n \"M\" : tp.Month,\n \"Q\" : tp.Quarter\n }\n\"\"\" dict of str: .NET time period type.\nEach item describes an allowable granularity of curves constructed, as specified by the \nfreq parameter in the curves public methods.\n\nThe keys represent the pandas Offset Alias which describe the granularity, and will generally be used\n as the freq of the pandas Series objects returned by the curve construction methods.\nThe values are the associated .NET time period types used in behind-the-scenes calculations.\n\"\"\"\n\ndef wrap_settle_for_dotnet(py_settle_func, freq):\n\n def wrapper_settle_function(py_function, net_time_period, freq):\n pandas_period = net_time_period_to_pandas_period(net_time_period, freq)\n py_function_result = py_function(pandas_period)\n net_settle_day = from_datetime_like(py_function_result, tp.Day)\n return net_settle_day\n\n def wrapped_function(net_time_period):\n return wrapper_settle_function(py_settle_func, net_time_period, freq)\n\n time_period_type = FREQ_TO_PERIOD_TYPE[freq]\n return dotnet.Func[time_period_type, tp.Day](wrapped_function)\n\n\nTimePeriodSpecType = Union[datetime, date, pd.Period]","sub_path":"src/Cmdty.Storage.Python/cmdty_storage/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"649912177","text":"\"\"\"\nFaça programa que leia idade de 7 pessoas e mostre quantas tem > 21 anos e quantas são menores\n\"\"\"\n\npessoas = ('primeira','segunda','terceira','quarta', 'quinta','sexta', 'última')\n\nsoma = 0\nfor c in range(0,7):\n i = int(input(\"Digite a idade da {} pessoa: \".format(pessoas[c])))\n if i >= 21:\n soma +=1\n\nprint(\"O número de pessoas com mais de 21 anos é {}, e o números de pessoas com idade menor de 21 é {}\".format(soma, 7 - soma))","sub_path":"Arquivos Exercicios/Exercicios/Ex054.py","file_name":"Ex054.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"262590225","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 19 13:58:55 2018\n\n@author: david.saltiel\n\"\"\"\n\n''' \n All the import for this file\n'''\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nfrom sklearn.feature_selection import RFE\n#from sklearn.ensemble import RandomForestClassifier\nfrom functions import compute_accuracy_score\nimport pickle\nfrom xgboost import XGBClassifier\nfrom sklearn.model_selection import train_test_split\nimport numpy as np\n#%%\n'''\n Generic RFEMEthod for feature selection\n'''\nclass RFEMethod:\n '''\n df an objec that contains a dataframe contained in df.df \n the method uses the RFE method from sklearn\n to compute RFE for the df\n '''\n def __init__(self, df, verbose = False):\n self.df = df\n self.verbose = verbose\n\n '''\n get score and feature for a given number of features\n using the RFE method from sklearn\n '''\n def get_score_and_features(self, n_features, split = 'temporal'):\n features = list(self.df.columns.values)\n features.remove('Label')\n \n df_X = self.df[features]\n df_Y = self.df['Label']\n estimator = XGBClassifier(random_state=0)\n \n if split == 'temporal' :\n split_data = int(0.67*df_X.shape[0])\n x_train, x_test, y_train, y_test = df_X.iloc[:split_data,:], df_X.iloc[split_data:,:],\\\n df_Y.iloc[:split_data], df_Y.iloc[split_data:]\n else :\n x_train, x_test, y_train, y_test = train_test_split(df_X, df_Y, test_size=0.33,\n random_state = 0)\n \n selector = RFE(estimator, n_features, step=1)\n selector = selector.fit(x_train, y_train)\n \n features_bool = np.array(selector.support_)\n features = np.array(df_X.columns)\n list_feat = list(features[features_bool])\n# list_to_keep = []\n# for i in range(len(list(selector.support_))):\n# \tif list(selector.support_)[i] :\n# \t\tlist_to_keep.append(i)\n# list_feat = list(df_X.columns[list_to_keep])\n# list_delete = list(set(list(df_X.columns.values))-set(df_X.columns[list_to_keep]))\n# x_test = x_test.drop(list_delete ,axis=1)\n \n# clf_xgb = XGBClassifier(random_state=0)\n# clf_xgb = clf_xgb.fit(x_train , y_train)\n \n score = selector.score(x_test,y_test)\n# score = compute_accuracy_score(x_test, y_test)\n self.selected_features = list_feat\n self.list_score = score\n return self.selected_features, self.list_score\n \n \n '''\n loop over all features between nMin\n and nMax and generate the corresponding score\n if save_pickle == True, we also save as \n a pickle the result\n '''\n def save_all_score(self, nMin = 1, nMax = 20, save_pickle = False):\n dic_score_RFE ={}\n for n_features in range(nMin, nMax):\n list_feat, score = self.get_score_and_features(n_features)\n dic_score_RFE[n_features] = [list_feat, score]\n if self.verbose :\n print('n_features : {0} score : {1}'.format(n_features, score))\n if save_pickle:\n pickle.dump( dic_score_RFE, open( \"dic_score_RFE_v2.p\", \"wb\" ) )\n return dic_score_RFE\n\n ''' \n select features \n return the subset of initial features kept by the method\n \n '''\n def select_features(self, n_features= 6):\n return self.get_score_and_features(n_features)\n","sub_path":"code/RFE.py","file_name":"RFE.py","file_ext":"py","file_size_in_byte":3542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"595871102","text":"import numpy as np\nimport cv2\n\n# Identify pixels above the threshold\n# Threshold of RGB > 160 does a nice job of identifying ground pixels only\ndef color_thresh(img, rgb_thresh=(160, 160, 160), rock_min = (0, 0, 145), rock_max = (255, 148, 180)):\n # Create an array of zeros same xy size as img, but single channel\n path_select = np.zeros_like(img[:,:,0])\n obs_select = np.zeros_like(img[:,:,0])\n rock_select = np.zeros_like(img[:,:,0])\n # Require that each pixel be above all three threshold values in RGB\n # above_thresh will now contain a boolean array with \"True\"\n # where threshold was met\n above_thresh = (img[:,:,0] > rgb_thresh[0]) \\\n & (img[:,:,1] > rgb_thresh[1]) \\\n & (img[:,:,2] > rgb_thresh[2])\n below_thresh = (img[:,:,0] <= rgb_thresh[0]) \\\n & (img[:,:,1] <= rgb_thresh[1]) \\\n & (img[:,:,2] <= rgb_thresh[2])\n inbetween_thresh = (img[:,:,0] >= rock_min[0]) \\\n & (img[:,:,1] >= rock_min[1]) \\\n & (img[:,:,2] >= rock_min[2]) \\\n & (img[:,:,0] < rock_max[0]) \\\n & (img[:,:,1] < rock_max[1]) \\\n & (img[:,:,2] < rock_max[2])\n # Index the array of zeros with the boolean array and set to 1\n path_select[above_thresh] = 1\n obs_select[below_thresh] = 1\n rock_select[inbetween_thresh] = 1\n # Return the binary image\n return path_select, obs_select, rock_select\n\n# Define a function to convert from image coords to rover coords\ndef rover_coords(binary_img):\n # Identify nonzero pixels\n ypos, xpos = binary_img.nonzero()\n # Calculate pixel positions with reference to the rover position being at the \n # center bottom of the image. \n x_pixel = -(ypos - binary_img.shape[0]).astype(np.float)\n y_pixel = -(xpos - binary_img.shape[1]/2 ).astype(np.float)\n return x_pixel, y_pixel\n\n\n# Define a function to convert to radial coords in rover space\ndef to_polar_coords(x_pixel, y_pixel):\n # Convert (x_pixel, y_pixel) to (distance, angle) \n # in polar coordinates in rover space\n # Calculate distance to each pixel\n dist = np.sqrt(x_pixel**2 + y_pixel**2)\n # Calculate angle away from vertical for each pixel\n angles = np.arctan2(y_pixel, x_pixel)\n return dist, angles\n\n# Define a function to map rover space pixels to world space\ndef rotate_pix(xpix, ypix, yaw):\n # Convert yaw to radians\n yaw_rad = yaw * np.pi / 180\n xpix_rotated = (xpix * np.cos(yaw_rad)) - (ypix * np.sin(yaw_rad))\n \n ypix_rotated = (xpix * np.sin(yaw_rad)) + (ypix * np.cos(yaw_rad))\n # Return the result \n return xpix_rotated, ypix_rotated\n\ndef translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale): \n # Apply a scaling and a translation\n xpix_translated = (xpix_rot / scale) + xpos\n ypix_translated = (ypix_rot / scale) + ypos\n # Return the result \n return xpix_translated, ypix_translated\n\n\n# Define a function to apply rotation and translation (and clipping)\n# Once you define the two functions above this function should work\ndef pix_to_world(xpix, ypix, xpos, ypos, yaw, world_size, scale):\n # Apply rotation\n xpix_rot, ypix_rot = rotate_pix(xpix, ypix, yaw)\n # Apply translation\n xpix_tran, ypix_tran = translate_pix(xpix_rot, ypix_rot, xpos, ypos, scale)\n # Perform rotation, translation and clipping all at once\n x_pix_world = np.clip(np.int_(xpix_tran), 0, world_size - 1)\n y_pix_world = np.clip(np.int_(ypix_tran), 0, world_size - 1)\n # Return the result\n return x_pix_world, y_pix_world\n\n# Define a function to perform a perspective transform\ndef perspect_transform(img, src, dst):\n \n M = cv2.getPerspectiveTransform(src, dst)\n warped = cv2.warpPerspective(img, M, (img.shape[1], img.shape[0]))# keep same size as input image\n \n return warped\n\n\n# Apply the above functions in succession and update the Rover state accordingly\ndef perception_step(Rover):\n # Perform perception steps to update Rover()\n # TODO: \n # NOTE: camera image is coming to you in Rover.img\n # 1) Define source and destination points for perspective transform\n dst_size = 5\n bottom_offset = 6\n source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])\n destination = np.float32([[Rover.img.shape[1]/2 - dst_size, Rover.img.shape[0] - bottom_offset],\n [Rover.img.shape[1]/2 + dst_size, Rover.img.shape[0] - bottom_offset],\n [Rover.img.shape[1]/2 + dst_size, Rover.img.shape[0] - 2*dst_size - bottom_offset], \n [Rover.img.shape[1]/2 - dst_size, Rover.img.shape[0] - 2*dst_size - bottom_offset],\n ])\n\n # 2) Apply perspective transform\n warped = perspect_transform(Rover.img, source, destination)\n\n # 3) Apply color threshold to identify navigable terrain/obstacles/rock samples\n threshed = color_thresh(warped)\n\n # 4) Update Rover.vision_image (this will be displayed on left side of screen)\n # Example: Rover.vision_image[:,:,0] = obstacle color-thresholded binary image\n # Rover.vision_image[:,:,1] = rock_sample color-thresholded binary image\n # Rover.vision_image[:,:,2] = navigable terrain color-thresholded binary image\n Rover.vision_image[:,:,0] = threshed[1]\n Rover.vision_image[:,:,1] = threshed[2]\n Rover.vision_image[:,:,2] = threshed[0]\n\n # 5) Convert map image pixel values to rover-centric coords\n x_path_rover, y_path_rover = rover_coords(threshed[0])\n x_obs_rover, y_obs_rover = rover_coords(threshed[1])\n x_rock_rover, y_rock_rover = rover_coords(threshed[2])\n\n # 6) Convert rover-centric pixel values to world coordinates\n xpos, ypos = Rover.pos\n yaw = Rover.yaw\n world_size = Rover.worldmap.shape[0]\n scale = 2 * dst_size\n x_path_world, y_path_world = pix_to_world(x_path_rover, y_path_rover, xpos, ypos, yaw, world_size, scale)\n x_obs_world, y_obs_world = pix_to_world(x_obs_rover, y_obs_rover, xpos, ypos, yaw, world_size, scale)\n x_rock_world, y_rock_world = pix_to_world(x_rock_rover, y_rock_rover, xpos, ypos, yaw, world_size, scale)\n # 7) Update Rover worldmap (to be displayed on right side of screen)\n # Example: Rover.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1\n # Rover.worldmap[rock_y_world, rock_x_world, 1] += 1\n # Rover.worldmap[navigable_y_world, navigable_x_world, 2] += 1\n roll_max = 359.5\n roll_min = 0.5\n pitch_max = 359.5\n pitch_min = 0.5\n if ((Rover.roll > roll_max or Rover.roll < roll_min) and (Rover.pitch > pitch_max \\\n or Rover.pitch < pitch_min)):\n Rover.worldmap[y_obs_world, x_obs_world, 0] += 1\n Rover.worldmap[y_rock_world, x_rock_world, 1] += 1\n Rover.worldmap[y_path_world, x_path_world, 2] += 1\n\n # 8) Convert rover-centric pixel positions to polar coordinates\n # Update Rover pixel distances and angles\n # Rover.nav_dists = rover_centric_pixel_distances\n # Rover.nav_angles = rover_centric_angles\n dist, angles = to_polar_coords(x_path_rover, y_path_rover)\n Rover.nav_dists = dist\n Rover.nav_angles = angles\n \n return Rover","sub_path":"code/perception.py","file_name":"perception.py","file_ext":"py","file_size_in_byte":7250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"555516271","text":"#! /usr/local/bin/python\n\nfrom rtcmix import *\nimport random\nfrom DNAFuncs import *\nimport words\nimport sys\n\nrtsetparams(44100, 2)\nload(\"GRANULATE\")\n\ntransTab = [\n\t\t\t[0.00, 0.03, 0.07, 0.10],\n\t\t\t[0.00, 0.04, 0.07, 0.10],\n\t\t\t[0.00, 0.02, 0.07, 0.10],\n\t\t\t[0.00, 0.05, 0.10, 1.02]\n\t\t\t]\n\ndur = int(sys.argv[1])\n\ndef grainAmp():\n\tx = random.uniform(0.125, 0.5)\n\ty = random.uniform(0.25, 0.75)\n\tif x > y:\n\t\treturn y, x\n\telse:\n\t\treturn x, y\n\ninskip = 0\namp = 1\nenv = maketable(\"curve\", 1000, 0,0,2, 150,1,0, 850,1,2, 1000,0)\ncurrent = random.choice(words.words)\nfile = current[1]\nrtinput(file)\ninTab = maketable(\"soundfile\", \"nonorm\", 0, file)\ninChan = 0\nwinStart = 0.0\nwinEnd = DUR() - 0.0\ntravRate = random.uniform(0.001, 0.1)\ngrainTab = maketable(\"window\", 1000, \"hanning\")\nhopTime = random.uniform(0.001, 0.009)\ninJit = outJit = 0.0025\ngrainMin = hopTime * random.uniform(11, 33)\ngrainMax = grainMin * random.uniform(0.1, 0.5)\ngrainAmpMin, grainAmpMax = grainAmp()\ngrainTrans = random.uniform(-0.02, 0.02)\n#print file, grainTrans, hopTime, grainMin, grainMax, grainTrans\ntransColl = random.choice(transTab)\n\nGRANULATE(0, inskip, dur, amp * env, inTab, 1, inChan, winStart, winEnd, 1, travRate,\n\t\t grainTab, hopTime, inJit, outJit, grainMin, grainMax, grainAmpMax, grainAmpMin,\n\t\t grainTrans, transColl, 0.005, 1, 0, 1, 1)","sub_path":"acidPlay.py","file_name":"acidPlay.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"186163729","text":"#!/usr/bin/env python\nfrom error.error import *\nfrom cmdPush.Interface import Interface\nfrom setIP.SetIP import setip\nfrom pushConf.PushConf import pushconf\nfrom pushConf.PushCF import pushconf_cf\nfrom check.check import CheckList\nfrom GetLoad.GetMinLoad import hostload\nfrom GetLoad.FromRedis import getloadpay\nfrom conf_analytic.lvs_auto import confanal\nfrom gevent.pywsgi import WSGIServer\nfrom Online.Online import onlinepay\nfrom Offline.Offline import offlinepay\nfrom append.FileExist import filecheck\n\nimport traceback\nimport logging\nimport json\n\nLOG = logging.getLogger(\"lvsauto\")\n\ndef application(env, start_response):\n\twhile True:\n\t\ttry:\n\t\t\treq_body_size = int(env['CONTENT_LENGTH'])\n\t\t\tgetinput = env['wsgi.input'].read(req_body_size)\n\t\t\tgetinput = json.loads(getinput)\n\t\t\tLOG.info(\"Get payload:%s, path info:%s\" % (getinput, env['PATH_INFO']))\n\t\t\tif 'setip' in env['PATH_INFO']:\n\t\t\t\tstatus, reason = setip(getinput)\n\t\t\telif 'pushconf' in env['PATH_INFO']:\n\t\t\t\tif getinput['option'] in ['vip', 'member', 'cluster']:\n\t\t\t\t\tstatus, reason = pushconf(getinput)\n\t\t\t\telif getinput['option'] in ['vip_cf', 'member_cf']:\n\t\t\t\t\tstatus, reason = pushconf_cf(getinput)\n\t\t\telif 'getload' in env['PATH_INFO']:\n\t\t\t\tstatus, reason = hostload(getinput)\n\t\t\telif 'confanal' in env['PATH_INFO']:\n\t\t\t\tstatus, reason = confanal(getinput)\n\t\t\telif 'cluster/online' in env['PATH_INFO']:\n\t\t\t\tstatus, reason = onlinepay(getinput)\n\t\t\telif '/cluster/offline' in env['PATH_INFO']:\n\t\t\t\tstatus, reason = offlinepay(getinput)\n\t\t\telif '/load/min' in env['PATH_INFO']:\n\t\t\t\tstatus, reason = getloadpay(getinput)\n\t\t\telif '/cluster/fileexist' in env['PATH_INFO']:\n\t\t\t\tstatus, reason = filecheck(getinput)\n\t\t\telse:\n\t\t\t\tstatus, reason = 500, \"Internal server error.\"\n\t\t\tLOG.info(\"%s\" % (reason))\n\t\texcept:\n\t\t\tstart_response(\"500\", [('Content-Type','text/json')])\n\t\t\tLOG.error(\"%s\" % (traceback.format_exc()))\n\t\t\treturn json.dumps({\"status\":\"failed\",\"reason\":\"Internal server error.\"})\n\n\t\tstart_response(str(status),[('Content-Type','text/json')])\n\t\tif type(reason) is dict:\n\t\t\treturn json.dumps(reason)\n\t\telif type(reason) is str or type(reason) is unicode:\n\t\t\tif status == 200:\n\t\t\t\treturn json.dumps({\"status\":\"succ\",\"reason\":reason})\n\t\t\telse:\n\t\t\t\treturn json.dumps({\"status\":\"failed\", \"reason\":reason})\n\nif __name__ == '__main__':\n\tWSGIServer(('', 33999), application).serve_forever()\n","sub_path":"lavos.py","file_name":"lavos.py","file_ext":"py","file_size_in_byte":2357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"649985918","text":"import re\nimport os\nimport shutil\nimport random\nimport time\nfrom captcha.image import ImageCaptcha\nfrom config import CHAR_SET, CAPTCHA_LEN, CAPTCHA_IMAGE_DIR, TEST_IMAGE_DIR,\\\n TEST_IMAGE_NUMBER\n\n\n# 生成验证码图片\ndef generate_captcha_image(\n char_set=CHAR_SET, img_path=CAPTCHA_IMAGE_DIR):\n k = 0\n total = 1\n for i in range(CAPTCHA_LEN):\n total *= len(char_set)\n\n for i in range(len(char_set)):\n for j in range(len(char_set)):\n for m in range(len(char_set)):\n for n in range(len(char_set)):\n captcha_text = char_set[i] + char_set[j] + char_set[m] +\\\n char_set[n]\n image = ImageCaptcha()\n image.write(\n captcha_text, format_img_path(img_path, captcha_text))\n k += 1\n print(\"\\rCreating %d/%d\" % (k, total))\n\n\n# 从验证码的图片集中取出一部分作为测试集,这些图片不参加训练,只用于模型的测试\ndef prepare_test_set():\n file_name_list = []\n for filePath in os.listdir(CAPTCHA_IMAGE_DIR):\n captcha_name = filePath.split('/')[-1]\n captcha_name = re.sub(r'\\.jpg$', '', captcha_name)\n file_name_list.append(captcha_name)\n random.seed(time.time())\n random.shuffle(file_name_list)\n for i in range(TEST_IMAGE_NUMBER):\n name = file_name_list[i]\n shutil.move(\n format_img_path(CAPTCHA_IMAGE_DIR, name),\n format_img_path(TEST_IMAGE_DIR, name))\n\n\ndef format_img_path(img_dir, img_name):\n return '{}/{}.jpg'.format(img_dir, img_name)\n\n\nif __name__ == '__main__':\n generate_captcha_image(CHAR_SET, CAPTCHA_IMAGE_DIR)\n prepare_test_set()\n print(\"\\nFinished\")\n","sub_path":"generate_sample.py","file_name":"generate_sample.py","file_ext":"py","file_size_in_byte":1774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"505697578","text":"#!/usr/bin/env python3\n\n\"\"\"\nThe objective of this project is to understand, implement, \nand empirically measure the performance of two space allocation methods\nused in file systems, namely contiguous and linked allocation, against various inputs.\n\"\"\"\n\n__author__ = \"Firat Tamur\"\n__email__ = \"ftamur16@ku.edu.tr\"\n\n\nclass FAT:\n\n def __init__(self, block_size, block_count=32768, fat_entry_size=4):\n \"\"\"\n Initializes File Allocation Table.\n\n :param block_count: int\n :param block_size: int\n :param fat_entry_size: int\n \"\"\"\n self.block_count = block_count\n self.block_size = block_size\n self.fat_entry_size = fat_entry_size\n\n # create a list for files:\n # 0: not allocated\n # -1: end of file chain\n self.blocks = [0] * block_count\n\n # create a dict to keep each file_id and starting block\n # allocated space for fat starts from block 0.\n self.fat = {'fat': 0}\n\n # allocate blocks for fat\n self.fat_blocks = int(self.block_count / ((self.block_size / 4) + 1)) + 1\n\n # From 0 to (fat_blocks - 1) allocated to fat.\n # We can start from index fat_blocks to allocate\n # new files.\n for i in range(self.fat_blocks - 1):\n self.blocks[i] = i + 1\n\n self.blocks[self.fat_blocks - 1] = -1\n\n # set capacity which is block_count - fat_blocks\n self.capacity = block_count - self.fat_blocks\n\n # set size\n self.size = 0\n\n def create_file(self, file_id, file_length):\n \"\"\"\n Allocates blocks in self.blocks to given file_id\n Also updates fat dict.\n\n :param file_id: int\n :param file_length: int -> bytes\n\n :return: False -> failure, True -> success\n \"\"\"\n\n if file_id in self.fat.keys():\n # print(\"File already created!\")\n return False\n\n if file_length < 0:\n # print(\"Length value must be positive integer!\")\n return False\n\n # bytes to blocks\n block_count = self._byte_to_block(file_length)\n\n if self.capacity < block_count:\n # print(\"Not Enough Space!\")\n return False\n\n for i in range(self.fat_blocks, self.block_count):\n if self.blocks[i] == 0:\n\n # set fat initial file to fat_blocks\n self.fat[file_id] = i\n\n # search block after start point and allocate them.\n self._allocate_fat(i, i + 1, block_count)\n\n break\n\n\n self.capacity -= block_count\n self.size += block_count\n\n return True\n\n def access(self, file_id, byte_offset):\n \"\"\"\n Accesses file with given byte_offset.\n\n :param file_id: int\n :param byte_offset: int -> bytes\n\n :return: int\n \"\"\"\n\n if file_id not in self.fat.keys():\n # print(\"File doesn't exist!\")\n return False\n\n if byte_offset < 0:\n # print(\"Offset value must be positive integer!\")\n return False\n\n # bytes to block\n block_offset = self._byte_to_block(byte_offset)\n\n # start index\n start = self.fat[file_id]\n\n for i in range(block_offset - 1):\n start = self.blocks[start]\n\n return start\n\n def extend(self, file_id, extension):\n \"\"\"\n Extends file with given file_id.\n\n :param file_id: int\n :param extension: int -> blocks\n\n :return: False -> failure, True -> success\n \"\"\"\n\n if file_id not in self.fat.keys():\n # print(\"File doesn't exist!\")\n return False\n\n if extension < 0:\n # print(\"Extension value must be positive integer!\")\n return False\n\n if self.capacity < extension:\n # print(\"Not Enough Space!\")\n return False\n\n end = self.fat[file_id]\n\n while True:\n if self.blocks[end] == -1:\n break\n\n end = self.blocks[end]\n\n self._allocate_fat(end, self.fat_blocks, extension + 1)\n\n self.capacity -= extension\n self.size += extension\n\n return True\n\n def shrink(self, file_id, shrinking):\n \"\"\"\n Shrinks file with given file_id.\n\n :param file_id: int\n :param shrinking: int -> blocks\n\n :return: False -> failure, True -> success\n \"\"\"\n\n if file_id not in self.fat.keys():\n #print(\"File doesn't exist!\")\n return False\n\n if shrinking < 0:\n #print(\"Shrink value must be positive integer!\")\n return False\n\n file_size = self._find_size(file_id)\n\n if file_size - shrinking < 1:\n #print(\"Too large shrink value!\")\n return False\n\n delete_starts = 0\n end = self.fat[file_id]\n\n while True:\n delete_starts += 1\n\n if delete_starts >= file_size - shrinking:\n delete = end\n end = self.blocks[end]\n\n if delete_starts == file_size - shrinking:\n self.blocks[delete] = -1\n else:\n self.blocks[delete] = 0\n\n if self.blocks[end] == -1:\n self.blocks[end] = 0\n break\n else:\n end = self.blocks[end]\n\n self.capacity += shrinking\n self.size -= shrinking\n\n return True\n\n \"\"\" Utils \"\"\"\n\n def _find_size(self, file_id):\n \"\"\"\n Find file size of given file_id.\n\n :param file_id: int\n :return: int -> blocks\n \"\"\"\n\n file_size = 0\n\n end = self.fat[file_id]\n\n while end != -1:\n file_size += 1\n end = self.blocks[end]\n\n return file_size\n\n def _allocate_fat(self, end_index, search_starts, blocks):\n \"\"\"\n Allocates space for given file_index.\n Searches empty blocks starting from start index and blocks count.\n\n :param end_index: int\n :param search_starts: int\n :param blocks: int\n\n :return: None\n \"\"\"\n\n for i in range(search_starts, self.block_count):\n\n if self.blocks[i] == 0:\n\n if blocks == 1:\n self.blocks[end_index] = -1\n break\n\n self.blocks[end_index] = i\n end_index = i\n\n blocks -= 1\n\n if blocks == 1:\n self.blocks[end_index] = -1\n\n def _byte_to_block(self, bytes_count):\n \"\"\"\n Returns givens bytes count to blocks count.\n\n :param bytes_count: int\n :return: blocks: int\n \"\"\"\n\n if bytes_count % self.block_size == 0:\n blocks = bytes_count // self.block_size\n else:\n blocks = (bytes_count // self.block_size) + 1\n\n return blocks\n\n\n\n\n\n\n\n","sub_path":"fat.py","file_name":"fat.py","file_ext":"py","file_size_in_byte":6877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"443242906","text":"import numpy as np\n\n\ndef insert_global_vars(vars):\n global img, img_sample\n global overlap, patch_sz\n global sample_height, sample_width\n img, img_sample = vars.get('img'), vars.get('img_sample')\n sample_height, sample_width = vars.get('sample_height'), vars.get('sample_width')\n overlap, patch_sz = vars.get('OverlapWidth'), vars.get('PatchSize')\n\n\n# ------------------------------------ #\n# Best Fit Patch and related functions #\n# ------------------------------------ #\ndef overlap_error_vertical(img_px, sample_px):\n iLeft, jLeft = img_px\n iRight, jRight = sample_px\n img_int = img.astype(np.int32)\n img_sample_int = img_sample.astype(np.int32)\n\n diff = (img_int[iLeft:iLeft + patch_sz, jLeft:jLeft + overlap]\n - img_sample_int[iRight:iRight + patch_sz, jRight:jRight + overlap])\n overlap_err = np.sum(np.sum(diff**2, axis=2)**0.5)\n\n return overlap_err\n\n\ndef overlap_error_horizntl(left_px, right_px):\n iLeft, jLeft = left_px\n iRight, jRight = right_px\n img_int = img.astype(np.int32)\n img_sample_int = img_sample.astype(np.int32)\n\n diff = (img_int[iLeft:iLeft + overlap, jLeft:jLeft + patch_sz]\n - img_sample_int[iRight:iRight + overlap, jRight:jRight + patch_sz])\n overlap_err = np.sum(np.sum(diff**2, axis=2)**0.5)\n\n return overlap_err\n\n\ndef get_best_tex_patches(px, overlap_err_threshold):\n pixels = []\n\n def ssd_error(img_pos, tex_pos):\n ix, iy = img_pos\n tx, ty = tex_pos\n src = img.astype(np.int32)\n tex = img_sample.astype(np.int32)\n diff = (src[ix:ix + patch_sz, iy:iy + patch_sz]\n - tex[tx:tx + patch_sz, ty:ty + patch_sz])\n return np.sum(diff ** 2) ** 0.5\n\n for i in range(patch_sz, sample_height - patch_sz):\n for j in range(patch_sz, sample_width - patch_sz):\n err = ssd_error((px[0], px[1]), (i, j))\n if err < overlap_err_threshold:\n pixels.append((i, j))\n elif err < overlap_err_threshold / 2:\n return [(i, j)]\n\n return pixels\n\n\ndef get_best_patches(px, overlap_err_threshold):\n pixels = []\n # check for top layer\n if px[0] == 0:\n for i in range(sample_height - patch_sz):\n for j in range(overlap, sample_width - patch_sz):\n error = overlap_error_vertical((px[0], px[1] - overlap),\n (i, j - overlap))\n if error < overlap_err_threshold:\n pixels.append((i, j))\n elif error < overlap_err_threshold / 2:\n return [(i, j)]\n # check for leftmost layer\n elif px[1] == 0:\n for i in range(overlap, sample_height - patch_sz):\n for j in range(sample_width - patch_sz):\n error = overlap_error_horizntl((px[0] - overlap, px[1]),\n (i - overlap, j))\n if error < overlap_err_threshold:\n pixels.append((i, j))\n elif error < overlap_err_threshold / 2:\n return [(i, j)]\n # for pixel placed inside\n else:\n for i in range(overlap, sample_height - patch_sz):\n for j in range(overlap, sample_width - patch_sz):\n error_vertical = overlap_error_vertical(\n (px[0], px[1] - overlap), (i, j - overlap))\n error_horizntl = overlap_error_horizntl(\n (px[0] - overlap, px[1]), (i - overlap, j))\n if (error_vertical < overlap_err_threshold and\n error_horizntl < overlap_err_threshold):\n pixels.append((i, j))\n elif (error_vertical < overlap_err_threshold / 2 and\n error_horizntl < overlap_err_threshold / 2):\n return [(i, j)]\n return pixels\n\n\n# ------------------------------ #\n# Quilting and related Functions #\n# ------------------------------ #\ndef calc_ssd_error(offset, img_px, sample_px):\n err_r = int(img[img_px[0] + offset[0], img_px[1] + offset[1]][0]) - int(\n img_sample[sample_px[0] + offset[0], sample_px[1] + offset[1]][0])\n err_g = int(img[img_px[0] + offset[0], img_px[1] + offset[1]][1]) - int(\n img_sample[sample_px[0] + offset[0], sample_px[1] + offset[1]][1])\n err_b = int(img[img_px[0] + offset[0], img_px[1] + offset[1]][2]) - int(\n img_sample[sample_px[0] + offset[0], sample_px[1] + offset[1]][2])\n return (err_r**2 + err_g**2 + err_b**2) / 3.0\n\n\n# ---------------- #\n# Calculating Cost #\n# ---------------- #\ndef get_cost_vertical(img_px, sample_px):\n cost = np.zeros((patch_sz, overlap))\n for j in range(overlap):\n for i in range(patch_sz):\n if i == patch_sz - 1:\n cost[i, j] = calc_ssd_error((i, j - overlap), img_px,\n sample_px)\n else:\n if j == 0:\n cost[i, j] = calc_ssd_error(\n (i, j - overlap), img_px, sample_px) + min(\n calc_ssd_error(\n (i + 1, j - overlap), img_px, sample_px),\n calc_ssd_error(\n (i + 1, j + 1 - overlap), img_px, sample_px))\n elif j == overlap - 1:\n cost[i, j] = calc_ssd_error(\n (i, j - overlap), img_px, sample_px) + min(\n calc_ssd_error(\n (i + 1, j - overlap), img_px, sample_px),\n calc_ssd_error(\n (i + 1, j - 1 - overlap), img_px, sample_px))\n else:\n cost[i, j] = calc_ssd_error(\n (i, j - overlap), img_px, sample_px) + min(\n calc_ssd_error(\n (i + 1, j - overlap), img_px, sample_px),\n calc_ssd_error(\n (i + 1, j + 1 - overlap), img_px, sample_px),\n calc_ssd_error(\n (i + 1, j - 1 - overlap), img_px, sample_px))\n return cost\n\n\ndef get_cost_horizntl(img_px, sample_px):\n cost = np.zeros((overlap, patch_sz))\n for i in range(overlap):\n for j in range(patch_sz):\n if j == patch_sz - 1:\n cost[i, j] = calc_ssd_error((i - overlap, j), img_px,\n sample_px)\n elif i == 0:\n cost[i, j] = calc_ssd_error(\n (i - overlap, j), img_px, sample_px) + min(\n calc_ssd_error(\n (i - overlap, j + 1), img_px, sample_px),\n calc_ssd_error(\n (i + 1 - overlap, j + 1), img_px, sample_px))\n elif i == overlap - 1:\n cost[i, j] = calc_ssd_error(\n (i - overlap, j), img_px, sample_px) + min(\n calc_ssd_error(\n (i - overlap, j + 1), img_px, sample_px),\n calc_ssd_error(\n (i - 1 - overlap, j + 1), img_px, sample_px))\n else:\n cost[i, j] = calc_ssd_error(\n (i - overlap, j), img_px, sample_px) + min(\n calc_ssd_error(\n (i - overlap, j + 1), img_px, sample_px),\n calc_ssd_error(\n (i + 1 - overlap, j + 1), img_px, sample_px),\n calc_ssd_error(\n (i - 1 - overlap, j + 1), img_px, sample_px))\n return cost\n\n\n# ------------------------- #\n# Finding Minimum Cost Path #\n# ------------------------- #\n\n\ndef find_mincost_path_vertical(cost):\n boundary = np.zeros((patch_sz), np.int)\n parent_matrix = np.zeros((patch_sz, overlap), np.int)\n for i in range(1, patch_sz):\n for j in range(overlap):\n if j == 0:\n parent_matrix[i, j] = j if cost[i - 1, j] < cost[i - 1, j +\n 1] else j + 1\n elif j == overlap - 1:\n parent_matrix[i, j] = j if cost[i - 1, j] < cost[i - 1, j -\n 1] else j - 1\n else:\n curr_min = j if cost[i - 1, j] < cost[i - 1, j - 1] else j - 1\n parent_matrix[i, j] = curr_min if cost[i - 1, curr_min] < cost[\n i - 1, j + 1] else j + 1\n cost[i, j] += cost[i - 1, parent_matrix[i, j]]\n min_idx = 0\n for j in range(1, overlap):\n min_idx = min_idx if cost[patch_sz - 1, min_idx] < cost[patch_sz - 1,\n j] else j\n boundary[patch_sz - 1] = min_idx\n for i in range(patch_sz - 1, 0, -1):\n boundary[i - 1] = parent_matrix[i, boundary[i]]\n return boundary\n\n\ndef find_mincost_path_horizntl(cost):\n boundary = np.zeros((patch_sz), np.int)\n parent_matrix = np.zeros((overlap, patch_sz), np.int)\n for j in range(1, patch_sz):\n for i in range(overlap):\n if i == 0:\n parent_matrix[i, j] = i if cost[i, j - 1] < cost[i + 1, j -\n 1] else i + 1\n elif i == overlap - 1:\n parent_matrix[i, j] = i if cost[i, j - 1] < cost[i - 1, j -\n 1] else i - 1\n else:\n curr_min = i if cost[i, j - 1] < cost[i - 1, j - 1] else i - 1\n parent_matrix[i, j] = curr_min if cost[curr_min, j - 1] < cost[\n i - 1, j - 1] else i + 1\n cost[i, j] += cost[parent_matrix[i, j], j - 1]\n min_idx = 0\n for i in range(1, overlap):\n min_idx = min_idx if cost[min_idx, patch_sz - 1] < cost[i, patch_sz -\n 1] else i\n boundary[patch_sz - 1] = min_idx\n for j in range(patch_sz - 1, 0, -1):\n boundary[j - 1] = parent_matrix[boundary[j], j]\n return boundary\n\n\n# -------- #\n# Quilting #\n# -------- #\n\n\ndef quilt_vertical(boundary, img_px, sample_px):\n for i in range(patch_sz):\n for j in range(boundary[i], 0, -1):\n img[img_px[0] + i, img_px[1] - j] = img_sample[sample_px[0] + i,\n sample_px[1] - j]\n\n\ndef quilt_horizntl(boundary, img_px, sample_px):\n for j in range(patch_sz):\n for i in range(boundary[j], 0, -1):\n img[img_px[0] - i, img_px[1] + j] = img_sample[sample_px[0] - i,\n sample_px[1] + j]\n\n\ndef quilt_patches(img_px, sample_px):\n # check for top layer\n if img_px[0] == 0:\n cost = get_cost_vertical(img_px, sample_px)\n # Getting boundary to stitch\n boundary = find_mincost_path_vertical(cost)\n # Quilting Patches\n quilt_vertical(boundary, img_px, sample_px)\n # check for leftmost layer\n elif img_px[1] == 0:\n cost = get_cost_horizntl(img_px, sample_px)\n # Boundary to stitch\n boundary = find_mincost_path_horizntl(cost)\n # Quilting Patches\n quilt_horizntl(boundary, img_px, sample_px)\n # for pixel placed inside\n else:\n cost_vertical = get_cost_vertical(img_px, sample_px)\n cost_horizntl = get_cost_horizntl(img_px, sample_px)\n boundary_vertical = find_mincost_path_vertical(cost_vertical)\n boundary_horizntl = find_mincost_path_horizntl(cost_horizntl)\n quilt_vertical(boundary_vertical, img_px, sample_px)\n quilt_horizntl(boundary_horizntl, img_px, sample_px)\n\n\n# ---------------------------- #\n# Growing Image Patch-by-patch #\n# ---------------------------- #\ndef fill_image(img_px, sample_px, output=None):\n x, y = img_px\n ref_x, ref_y = sample_px\n out = output if output is not None else img\n out[x:x + patch_sz, y:y + patch_sz] = \\\n img_sample[ref_x:ref_x + patch_sz, ref_y:ref_y + patch_sz]\n","sub_path":"toolbox.py","file_name":"toolbox.py","file_ext":"py","file_size_in_byte":12190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"44614325","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\n\nfrom Functions.get_Cp_debye import getOptimizedDebyeParams, get_Cp_debye\n\n\ndef get_demons(experimental_temps, \n experimental_Cp, \n target_temps_range, \n *bounds):\n # \n k_b=1.38064852E-23\n Na=6.022E+23\n n_kinetic = 3\n #\n params, pcov = getOptimizedDebyeParams(experimental_temps,\n experimental_Cp, \n *bounds)\n \n Cp_debye = get_Cp_debye(target_temps_range, *params)\n demon_number_floor = [ (0, np.floor((cp/(k_b*Na) - n_kinetic)) )[(cp/(k_b*Na) - n_kinetic) > 0] \\\n for cp in np.array(Cp_debye) ]\n demon_number_ceil = [ (0, np.ceil((cp/(k_b*Na) - n_kinetic)) )[(cp/(k_b*Na) - n_kinetic) > 0] \\\n for cp in np.array(Cp_debye) ]\n ### calculate floor weight: the diference between cp/(k_b*Na) - n_kinetic and floor\n weight_floor = [1 -\\\n np.sqrt(np.abs(demon_number_floor[i]-\\\n Cp_debye[i]/(k_b*Na)+n_kinetic)**2) \\\n for i in range(len( demon_number_floor)) \\\n ]\n ### calculate ceil weight: the diference between cp/(k_b*Na) - n_kinetic and ceil\n weight_ceil = [1 -\\\n np.sqrt(np.abs(demon_number_ceil[i]-\\\n Cp_debye[i]/(k_b*Na)+n_kinetic)**2) \\\n for i in range(len( demon_number_floor)) \\\n ]\n #### average of demons: demon_number_avg\n demon_number_avg = [weight_floor[i]*demon_number_floor[i] +\\\n weight_ceil[i]*demon_number_ceil[i] for i in range(len( demon_number_floor))]\n ####\n demons=np.zeros([len(target_temps_range), 4])\n demons[:, 0]= target_temps_range\n demons[:, 1]= demon_number_ceil\n demons[:, 2]= demon_number_floor\n ### the average:\n demons[:, 3]= demon_number_avg\n return demons , weight_floor, weight_ceil\n","sub_path":"FunctionsLayer2/get_demons.py","file_name":"get_demons.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"354757716","text":"# Copyright 2018 The TensorFlow Hub Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for tensorflow_hub.compressed_module_resolver.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint:disable=g-import-not-at-top,g-statement-before-imports\ntry:\n import mock as mock\nexcept ImportError:\n import unittest.mock as mock\n# pylint:disable=g-import-not-at-top,g-statement-before-imports\n\nimport os\nimport re\nimport socket\nimport tarfile\nimport tempfile\nimport uuid\n\nimport tensorflow as tf\n\nfrom tensorflow_hub import compressed_module_resolver\nfrom tensorflow_hub import resolver\nfrom tensorflow_hub import test_utils\nfrom tensorflow_hub import tf_utils\n\nFLAGS = tf.flags.FLAGS\n\n\nclass HttpCompressedFileResolverTest(tf.test.TestCase):\n\n def setUp(self):\n # Set current directory to test temp directory where we can create\n # files and serve them through the HTTP server.\n os.chdir(self.get_temp_dir())\n\n # Create three temp files.\n self.files = [\"file1\", \"file2\", \"file3\"]\n for cur_file in self.files:\n with tf.gfile.GFile(cur_file, mode=\"w\") as f:\n f.write(cur_file)\n\n # Write a dummy file so download server doesn't return 404.\n with tf.gfile.GFile(\"mock_module\", mode=\"w\") as f:\n f.write(\"module\")\n\n # Create TAR files.\n tar = tarfile.open(\"mock_module.tar\", \"w\")\n for name in self.files:\n tar.add(name)\n tar.close()\n\n # Create TGZ file\n tar = tarfile.open(\"mock_module.tar.gz\", \"w:gz\")\n for name in self.files:\n tar.add(name)\n tar.close()\n\n self.server_port = test_utils.start_http_server()\n self.module_handle = (\n \"http://localhost:%d/mock_module.tar.gz\" % self.server_port)\n\n self.redirect_server_port = test_utils.start_http_server(\n redirect=\"http://localhost:%d\" % self.server_port)\n\n self.smart_server_port = test_utils.start_smart_module_server(\n self.module_handle)\n self.smart_handle = (\n \"http://localhost:%d/mock_module\" % self.smart_server_port)\n\n def testGetModulePathTar(self):\n FLAGS.tfhub_cache_dir = os.path.join(self.get_temp_dir(), \"cache_dir\")\n http_resolver = compressed_module_resolver.HttpCompressedFileResolver()\n path = http_resolver(\n \"http://localhost:%d/mock_module.tar\" % self.server_port)\n files = os.listdir(path)\n self.assertListEqual(sorted(files), [\"file1\", \"file2\", \"file3\"])\n\n def testGetModulePathTarGz(self):\n FLAGS.tfhub_cache_dir = os.path.join(self.get_temp_dir(), \"cache_dir\")\n http_resolver = compressed_module_resolver.HttpCompressedFileResolver()\n path = http_resolver(self.module_handle)\n files = os.listdir(path)\n self.assertListEqual(sorted(files), [\"file1\", \"file2\", \"file3\"])\n\n def testGetModuleFromSmartLocation(self):\n FLAGS.tfhub_cache_dir = os.path.join(self.get_temp_dir(), \"cache_dir\")\n http_resolver = compressed_module_resolver.HttpCompressedFileResolver()\n path = http_resolver(self.smart_handle)\n files = os.listdir(path)\n self.assertListEqual(sorted(files), [\"file1\", \"file2\", \"file3\"])\n\n def testModuleDescriptor(self):\n FLAGS.tfhub_cache_dir = os.path.join(self.get_temp_dir(), \"cache_dir\")\n http_resolver = compressed_module_resolver.HttpCompressedFileResolver()\n path = http_resolver(self.module_handle)\n desc = tf_utils.read_file_to_string(resolver._module_descriptor_file(path))\n self.assertRegexpMatches(desc, \"Module: %s\\n\"\n \"Download Time: .*\\n\"\n \"Downloader Hostname: %s .PID:%d.\" %\n (re.escape(self.module_handle),\n re.escape(socket.gethostname()), os.getpid()))\n\n def testNoCacheDirSet(self):\n FLAGS.tfhub_cache_dir = \"\"\n http_resolver = compressed_module_resolver.HttpCompressedFileResolver()\n handle = \"http://localhost:%d/mock_module.tar.gz\" % self.server_port\n path = http_resolver(handle)\n files = os.listdir(path)\n self.assertListEqual(sorted(files), [\"file1\", \"file2\", \"file3\"])\n self.assertStartsWith(path, tempfile.gettempdir())\n\n def testIsTarFile(self):\n self.assertTrue(compressed_module_resolver._is_tarfile(\"foo.tar\"))\n self.assertTrue(compressed_module_resolver._is_tarfile(\"foo.tar.gz\"))\n self.assertTrue(compressed_module_resolver._is_tarfile(\"foo.tgz\"))\n self.assertFalse(compressed_module_resolver._is_tarfile(\"foo\"))\n self.assertFalse(compressed_module_resolver._is_tarfile(\"footar\"))\n\n def testAppendFormatQuery(self):\n tests = [(\n \"https://example.com/module.tar.gz\",\n \"https://example.com/module.tar.gz?tf-hub-format=compressed\",\n ), (\n \"https://example.com/module\",\n \"https://example.com/module?tf-hub-format=compressed\",\n ), (\n \"https://example.com/module?extra=abc\",\n \"https://example.com/module?extra=abc&tf-hub-format=compressed\",\n ), (\n \"https://example.com/module?extra=abc\",\n \"https://example.com/module?extra=abc&tf-hub-format=compressed\",\n ), (\n \"https://example.com/module?extra=abc&tf-hub-format=test\",\n (\"https://example.com/module?extra=abc&\"\n \"tf-hub-format=test&tf-hub-format=compressed\"),\n )]\n for handle, expected in tests:\n self.assertTrue(\n compressed_module_resolver._append_compressed_format_query(handle),\n expected)\n\n def testAbandondedLockFile(self):\n # Tests that the caching procedure is resilient to an abandonded lock\n # file.\n FLAGS.tfhub_cache_dir = os.path.join(self.get_temp_dir(), \"cache_dir\")\n\n # Create an \"abandoned\" lock file, i.e. a lock file with no process actively\n # downloading anymore.\n module_dir = compressed_module_resolver._module_dir(self.module_handle)\n task_uid = uuid.uuid4().hex\n lock_filename = resolver._lock_filename(module_dir)\n tf_utils.atomic_write_string_to_file(lock_filename,\n resolver._lock_file_contents(task_uid),\n overwrite=False)\n with mock.patch.object(\n compressed_module_resolver.HttpCompressedFileResolver,\n \"_lock_file_timeout_sec\",\n return_value=10):\n http_resolver = compressed_module_resolver.HttpCompressedFileResolver()\n handle = \"http://localhost:%d/mock_module.tar.gz\" % self.server_port\n # After seeing the lock file is abandoned, this resolver will download the\n # module and return a path to the extracted contents.\n path = http_resolver(handle)\n files = os.listdir(path)\n self.assertListEqual(sorted(files), [\"file1\", \"file2\", \"file3\"])\n self.assertFalse(tf.gfile.Exists(lock_filename))\n\n def testModuleAlreadyDownloaded(self):\n FLAGS.tfhub_cache_dir = os.path.join(self.get_temp_dir(), \"cache_dir\")\n http_resolver = compressed_module_resolver.HttpCompressedFileResolver()\n path = http_resolver(self.module_handle)\n files = sorted(os.listdir(path))\n self.assertListEqual(files, [\"file1\", \"file2\", \"file3\"])\n creation_times = [\n tf.gfile.Stat(os.path.join(path, f)).mtime_nsec for f in files\n ]\n # Call resolver again and make sure that the module is not downloaded again\n # by checking the timestamps of the module files.\n path = http_resolver(self.module_handle)\n files = sorted(os.listdir(path))\n self.assertListEqual(files, [\"file1\", \"file2\", \"file3\"])\n self.assertListEqual(\n creation_times,\n [tf.gfile.Stat(os.path.join(path, f)).mtime_nsec for f in files])\n\n def testCorruptedArchive(self):\n with tf.gfile.GFile(\"bad_archive.tar.gz\", mode=\"w\") as f:\n f.write(\"bad_archive\")\n http_resolver = compressed_module_resolver.HttpCompressedFileResolver()\n try:\n http_resolver(\n \"http://localhost:%d/bad_archive.tar.gz\" % self.server_port)\n self.fail(\"Corrupted archive should have failed to resolve.\")\n except IOError as e:\n self.assertEqual(\n \"http://localhost:%d/bad_archive.tar.gz does not appear \"\n \"to be a valid module.\" %\n self.server_port, str(e))\n try:\n http_resolver(\n \"http://localhost:%d/bad_archive.tar.gz\" % self.redirect_server_port)\n self.fail(\"Corrupted archive should have failed to resolve.\")\n except IOError as e:\n # Check that the error message contain the ultimate (redirected to) URL.\n self.assertEqual(\n \"http://localhost:%d/bad_archive.tar.gz does not appear \"\n \"to be a valid module.\" %\n self.redirect_server_port, str(e))\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n","sub_path":"tensorflow_hub/compressed_module_resolver_test.py","file_name":"compressed_module_resolver_test.py","file_ext":"py","file_size_in_byte":9184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"211430101","text":"\n# AI events\nAI_LEFT = 1000\nAI_RIGHT = 1001\nAI_UP = 1002\nAI_DOWN = 1003\nAI_MOVE = 1004\nAI_SKIP = 1005\n\n# Turn events\nEND_TURN = 1006\nDEATH = 1007\n\nclass Event(object):\n\n _events = []\n\n @classmethod\n def get( cls ):\n events = cls._events\n cls._events = []\n\n return events\n\n def __init__( self, key, target=None, **kwargs ):\n self.key = key\n self.target = target\n\n for k, v in kwargs.items():\n setattr(self, k, v )\n\n Event._events.append( self )\n\n","sub_path":"event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"197113627","text":"import ctypes\n\n\nclass bytestream_t(ctypes.Structure):\n _fields_ = [(\"buf\", ctypes.POINTER(ctypes.c_ubyte)),\n (\"max_len\", ctypes.c_size_t),\n (\"idx\", ctypes.c_size_t)]\n\n @classmethod\n def create(cls, length):\n array_type = ctypes.c_ubyte * length\n array = array_type()\n return cls(array, length, 0)\n\n @classmethod\n def from_stream(cls, stream):\n array_type = ctypes.c_ubyte * len(stream)\n array = array_type(stream)\n return cls(array, len(stream), 0)\n\n @property\n def stream(self):\n return bytearray([self.buf[i] for i in range(self.max_len)])\n","sub_path":"sermsg/codegen/implementations/static_c/test/bytestream.py","file_name":"bytestream.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"389729163","text":"#this program will remove the tag that y2mate add\n# to the name when you download it\n# Linux version\n\nimport os\n\ndef main():\n print( \"Eliminando tag..\")\n try:\n recorrer_directorio()\n print(\"Finalizado con exito!\")\n except error:\n print(\"Ocurrio un error :(\",error)\n\ndef recorrer_directorio():\n listar_canciones = os.listdir()\n\n for cancion in listar_canciones:\n if (cancion[:10] == \"y2mate.com\"):\n nombre_nuevo = cancion[12:]\n os.rename(cancion,nombre_nuevo)\n\n\nmain()\n","sub_path":"Music_downloader/music_downloader_v2/remove_nametag_from_y2mate/remover.py","file_name":"remover.py","file_ext":"py","file_size_in_byte":533,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"195262741","text":"# -*- coding: utf-8 -*-\r\n#\r\n# Copyright © 2011-2013 Pierre Raybaut\r\n# Licensed under the terms of the MIT License\r\n# (see spyderlib/__init__.py for details)\r\n\r\n\"\"\"\r\nSpyder base configuration management\r\n\r\nAs opposed to spyderlib/config.py, this configuration script deals \r\nexclusively with non-GUI features configuration only\r\n(in other words, we won't import any PyQt object here, avoiding any \r\nsip API incompatibility issue in spyderlib's non-gui modules)\r\n\"\"\"\r\n\r\nfrom __future__ import print_function\r\n\r\nimport os.path as osp\r\nimport os\r\nimport sys\r\n\r\n# Local imports\r\nfrom spyderlib import __version__\r\nfrom spyderlib.utils import encoding\r\nfrom spyderlib.py3compat import (is_unicode, TEXT_TYPES, INT_TYPES, PY3,\r\n to_text_string, is_text_string)\r\n\r\n\r\n#==============================================================================\r\n# Only for development\r\n#==============================================================================\r\n# To activate/deactivate certain things for development\r\n# SPYDER_DEV is (and *only* has to be) set in bootstrap.py\r\nDEV = os.environ.get('SPYDER_DEV')\r\n\r\n# For testing purposes\r\n# SPYDER_TEST can be set using the --test option of bootstrap.py\r\nTEST = os.environ.get('SPYDER_TEST')\r\n\r\n\r\n#==============================================================================\r\n# Debug helpers\r\n#==============================================================================\r\nSTDOUT = sys.stdout\r\nSTDERR = sys.stderr\r\ndef _get_debug_env():\r\n debug_env = os.environ.get('SPYDER_DEBUG', '')\r\n if not debug_env.isdigit():\r\n debug_env = bool(debug_env)\r\n return int(debug_env) \r\nDEBUG = _get_debug_env()\r\n\r\ndef debug_print(message):\r\n \"\"\"Output debug messages to stdout\"\"\"\r\n if DEBUG:\r\n ss = STDOUT\r\n print(message, file=ss)\r\n\r\n#==============================================================================\r\n# Configuration paths\r\n#==============================================================================\r\n# Spyder settings dir\r\nif TEST is None:\r\n SUBFOLDER = '.spyder%s' % __version__.split('.')[0]\r\nelse:\r\n SUBFOLDER = 'spyder_test'\r\n\r\n\r\n# We can't have PY2 and PY3 settings in the same dir because:\r\n# 1. This leads to ugly crashes and freezes (e.g. by trying to\r\n# embed a PY2 interpreter in PY3)\r\n# 2. We need to save the list of installed modules (for code\r\n# completion) separately for each version\r\nif PY3:\r\n SUBFOLDER = SUBFOLDER + '-py3'\r\n\r\n\r\ndef get_home_dir():\r\n \"\"\"\r\n Return user home directory\r\n \"\"\"\r\n try:\r\n # expanduser() returns a raw byte string which needs to be\r\n # decoded with the codec that the OS is using to represent file paths.\r\n path = encoding.to_unicode_from_fs(osp.expanduser('~'))\r\n except:\r\n path = ''\r\n for env_var in ('HOME', 'USERPROFILE', 'TMP'):\r\n if osp.isdir(path):\r\n break\r\n # os.environ.get() returns a raw byte string which needs to be\r\n # decoded with the codec that the OS is using to represent environment\r\n # variables.\r\n path = encoding.to_unicode_from_fs(os.environ.get(env_var, ''))\r\n if path:\r\n return path\r\n else:\r\n raise RuntimeError('Please define environment variable $HOME')\r\n\r\n\r\ndef get_conf_path(filename=None):\r\n \"\"\"Return absolute path for configuration file with specified filename\"\"\"\r\n if TEST is None:\r\n conf_dir = osp.join(get_home_dir(), SUBFOLDER)\r\n else:\r\n import tempfile\r\n conf_dir = osp.join(tempfile.gettempdir(), SUBFOLDER)\r\n if not osp.isdir(conf_dir):\r\n os.mkdir(conf_dir)\r\n if filename is None:\r\n return conf_dir\r\n else:\r\n return osp.join(conf_dir, filename)\r\n \r\n\r\ndef get_module_path(modname):\r\n \"\"\"Return module *modname* base path\"\"\"\r\n return osp.abspath(osp.dirname(sys.modules[modname].__file__))\r\n\r\n\r\ndef get_module_data_path(modname, relpath=None, attr_name='DATAPATH'):\r\n \"\"\"Return module *modname* data path\r\n Note: relpath is ignored if module has an attribute named *attr_name*\r\n \r\n Handles py2exe/cx_Freeze distributions\"\"\"\r\n datapath = getattr(sys.modules[modname], attr_name, '')\r\n if datapath:\r\n return datapath\r\n else:\r\n datapath = get_module_path(modname)\r\n parentdir = osp.join(datapath, osp.pardir)\r\n if osp.isfile(parentdir):\r\n # Parent directory is not a directory but the 'library.zip' file:\r\n # this is either a py2exe or a cx_Freeze distribution\r\n datapath = osp.abspath(osp.join(osp.join(parentdir, osp.pardir),\r\n modname))\r\n if relpath is not None:\r\n datapath = osp.abspath(osp.join(datapath, relpath))\r\n return datapath\r\n\r\n\r\ndef get_module_source_path(modname, basename=None):\r\n \"\"\"Return module *modname* source path\r\n If *basename* is specified, return *modname.basename* path where \r\n *modname* is a package containing the module *basename*\r\n \r\n *basename* is a filename (not a module name), so it must include the\r\n file extension: .py or .pyw\r\n \r\n Handles py2exe/cx_Freeze distributions\"\"\"\r\n srcpath = get_module_path(modname)\r\n parentdir = osp.join(srcpath, osp.pardir)\r\n if osp.isfile(parentdir):\r\n # Parent directory is not a directory but the 'library.zip' file:\r\n # this is either a py2exe or a cx_Freeze distribution\r\n srcpath = osp.abspath(osp.join(osp.join(parentdir, osp.pardir),\r\n modname))\r\n if basename is not None:\r\n srcpath = osp.abspath(osp.join(srcpath, basename))\r\n return srcpath\r\n\r\n\r\ndef is_py2exe_or_cx_Freeze():\r\n \"\"\"Return True if this is a py2exe/cx_Freeze distribution of Spyder\"\"\"\r\n return osp.isfile(osp.join(get_module_path('spyderlib'), osp.pardir))\r\n\r\n\r\nSCIENTIFIC_STARTUP = get_module_source_path('spyderlib',\r\n 'scientific_startup.py')\r\n\r\n\r\n#==============================================================================\r\n# Image path list\r\n#==============================================================================\r\n\r\nIMG_PATH = []\r\ndef add_image_path(path):\r\n if not osp.isdir(path):\r\n return\r\n global IMG_PATH\r\n IMG_PATH.append(path)\r\n for _root, dirs, _files in os.walk(path):\r\n for dir in dirs:\r\n IMG_PATH.append(osp.join(path, dir))\r\n\r\nadd_image_path(get_module_data_path('spyderlib', relpath='images'))\r\n\r\nfrom spyderlib.otherplugins import PLUGIN_PATH\r\nif PLUGIN_PATH is not None:\r\n add_image_path(osp.join(PLUGIN_PATH, 'images'))\r\n\r\ndef get_image_path(name, default=\"not_found.png\"):\r\n \"\"\"Return image absolute path\"\"\"\r\n for img_path in IMG_PATH:\r\n full_path = osp.join(img_path, name)\r\n if osp.isfile(full_path):\r\n return osp.abspath(full_path)\r\n if default is not None:\r\n return osp.abspath(osp.join(img_path, default))\r\n\r\n\r\n#==============================================================================\r\n# Translations\r\n#==============================================================================\r\ndef get_translation(modname, dirname=None):\r\n \"\"\"Return translation callback for module *modname*\"\"\"\r\n if dirname is None:\r\n dirname = modname\r\n locale_path = get_module_data_path(dirname, relpath=\"locale\",\r\n attr_name='LOCALEPATH')\r\n # fixup environment var LANG in case it's unknown\r\n if \"LANG\" not in os.environ:\r\n import locale\r\n lang = locale.getdefaultlocale()[0]\r\n if lang is not None:\r\n os.environ[\"LANG\"] = lang\r\n import gettext\r\n try:\r\n _trans = gettext.translation(modname, locale_path, codeset=\"utf-8\")\r\n lgettext = _trans.lgettext\r\n def translate_gettext(x):\r\n if not PY3 and is_unicode(x):\r\n x = x.encode(\"utf-8\")\r\n y = lgettext(x)\r\n if is_text_string(y) and PY3:\r\n return y\r\n else:\r\n return to_text_string(y, \"utf-8\")\r\n return translate_gettext\r\n except IOError as _e: # analysis:ignore\r\n #print \"Not using translations (%s)\" % _e\r\n def translate_dumb(x):\r\n if not is_unicode(x):\r\n return to_text_string(x, \"utf-8\")\r\n return x\r\n return translate_dumb\r\n\r\n# Translation callback\r\n_ = get_translation(\"spyderlib\")\r\n\r\n\r\n#==============================================================================\r\n# Namespace Browser (Variable Explorer) configuration management\r\n#==============================================================================\r\n\r\ndef get_supported_types():\r\n \"\"\"\r\n Return a dictionnary containing types lists supported by the \r\n namespace browser:\r\n dict(picklable=picklable_types, editable=editables_types)\r\n \r\n See:\r\n get_remote_data function in spyderlib/widgets/externalshell/monitor.py\r\n get_internal_shell_filter method in namespacebrowser.py\r\n \r\n Note:\r\n If you update this list, don't forget to update doc/variablexplorer.rst\r\n \"\"\"\r\n from datetime import date\r\n editable_types = [int, float, complex, list, dict, tuple, date\r\n ] + list(TEXT_TYPES) + list(INT_TYPES)\r\n try:\r\n from numpy import ndarray, matrix, generic\r\n editable_types += [ndarray, matrix, generic]\r\n except ImportError:\r\n pass\r\n try:\r\n from pandas import DataFrame, Series\r\n editable_types += [DataFrame, Series]\r\n except ImportError:\r\n pass\r\n picklable_types = editable_types[:]\r\n try:\r\n from spyderlib.pil_patch import Image\r\n editable_types.append(Image.Image)\r\n except ImportError:\r\n pass\r\n return dict(picklable=picklable_types, editable=editable_types)\r\n\r\n# Variable explorer display / check all elements data types for sequences:\r\n# (when saving the variable explorer contents, check_all is True,\r\n# see widgets/externalshell/namespacebrowser.py:NamespaceBrowser.save_data)\r\nCHECK_ALL = False #XXX: If True, this should take too much to compute...\r\n\r\nEXCLUDED_NAMES = ['nan', 'inf', 'infty', 'little_endian', 'colorbar_doc',\r\n 'typecodes', '__builtins__', '__main__', '__doc__', 'NaN',\r\n 'Inf', 'Infinity', 'sctypes', 'rcParams', 'rcParamsDefault',\r\n 'sctypeNA', 'typeNA', 'False_', 'True_',]\r\n\r\n#==============================================================================\r\n# Mac application utilities\r\n#==============================================================================\r\n\r\nif PY3:\r\n MAC_APP_NAME = 'Spyder.app'\r\nelse:\r\n MAC_APP_NAME = 'Spyder-Py2.app'\r\n\r\ndef running_in_mac_app():\r\n if sys.platform == \"darwin\" and MAC_APP_NAME in __file__:\r\n return True\r\n else:\r\n return False\r\n","sub_path":"lib/python2.7/site-packages/spyderlib/baseconfig.py","file_name":"baseconfig.py","file_ext":"py","file_size_in_byte":10872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"374620529","text":"\n################################################################################\n# NAME : get_spire_beam.py\n# DATE STARTED : June 18, 2019\n# AUTHORS : Dale Mercado & Benjamin Vaugahn\n# PURPOSE : This function computes the spire beam given a band, pixel size and\n# output map kernel.\n# EXPLANATION :\n# CALLING SEQUENCE :\n# INPUTS :\n# band (string) = one of 'PSW', 'PMW', 'PLW' (def: PSW)\n# pixsize (float) = the pixel size in arcsec (def: 6/8.333/12)\n# npixx (int) = the number of pixels required in the x axis\n# This should be odd so the PSF is\n# centered. (def: 5 FWHM rounded to odd)\n# npixy (int) = the number of pixels required in the y axis.\n# This should be odd so the PSF is\n# centered. (def: npixx)\n# xcent (float) = x pixel corresponding to center of the beam\n# (can be fractional, note that pixel\n# numbering starts at 0) (def: npixx/2 using\n# integral division, so if npixx is 31,\n# this is 15). This is in the\n# non-oversampled beam.\n# ycent (float) = y pixel corresponding to center of the beam\n# (can be fractional). (def: npixy/2, see\n# note for xcent for further info)\n# Optional inputs:\n# bolometer (string) = Optional argument specifying which bolometer\n# to return beam for (i.e., band='PSW',\n# bolometer='A11': psf for 'PSWA11').\n# Not currently supported, but here for\n# when we have bolometer specific psfs in\n# the future.\n# fwhm (float) = The FWHM of the beam, in arcsec.\n# Normally this is determined by band.\n# oversamp = Amount to oversample pixels by before\n# convolving with pixel function.\n# Should be an odd integer (Def: 7)\n#\n#\n# OUTPUTS :\n# beamkern (float) = array size npixx x npixy containing\n# REVISION HISTORY :\n################################################################################\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import *\nfrom astropy.io import fits\nimport scipy.signal\nimport os\nfrom astropy.convolution import Gaussian2DKernel, Gaussian1DKernel\nfrom get_spire_beam_fwhm import *\n\n\n\ndef get_spire_beam(band=None, pixsize=0,npixx=0, npixy=0,\n xcent=0, ycent=0,bolometer=0, fwhm='',\n norm=0, oversamp=0, verbose=1,\n factor=0):\n errmsg = False\n if bolometer != 0:\n if verbose:\n print('PSFs for specific bolometer not yet supported -- ignoring')\n\n # Check if we have been given a band\n # If not make an assumption\n if band == None:\n if verbose:\n print('Band parameter not supplied, assuming PSW')\n band = 'PSW'\n # Check if we have been givin a pixel size\n if pixsize == 0:\n # band = upper(band)\n # units arcsec/pixel\n if band == 'PSW':\n pixsize = 6\n if band == 'PMW':\n pixsize = 8. + (1/3)\n if band == 'PLW':\n pixsize = 12\n else:\n print('Unknown band'+band)\n if verbose:\n print('pixsize paramter not supplied assuming %s arcsec' , (pixsize))\n\n\n if len(fwhm) == 0:\n beamFWHM = get_spire_beam_fwhm(band)\n else:\n beamFWHM = fwhm\n\n if beamFWHM < 0:\n print('Invalid Beam FWHM value'+ str(beamFWHM))\n\n # Check if we've been given the map size, if not assume something\n # npixx/npixy will be the final number of pixels\n if npixx == 0:\n npixx = round(beamFWHM * 5 / (pixsize))\n if npixx % 2 != 1:\n npixx +=1\n if verbose:\n print('npixx not supplied, using \",I0\"')\n #If no y size then assume same as x\n if npixy == 0:\n npixy = npixx\n\n #Make sure that these have been cast from a float properly or we get errors\n npixx = int(ceil(npixx))\n npixy = int(ceil(npixy))\n\n if npixx % 2 != 1 and verbose:\n print('WARNING: npixx not odd, so PSF will not be centered')\n if npixy % 2 != 1 and verbose:\n print('WARNING: npixy not odd, so psf will not be centered')\n\n # Now deal with oversampling\n if oversamp == 0:\n ioversamp = 7\n else:\n ioversamp = round(oversamp) #in case user provides float\n if ioversamp % 2 != 1:\n print('Oversamp must be an odd interger!')\n\n x_gen = npixx * ioversamp\n y_gen = npixy * ioversamp\n gen_pixsize = np.float64(pixsize) / ioversamp\n\n # Check if we have been givin the center, if not assume middle\n if xcent == 0:\n # if verbose:\n # print('xcent parameter not supplied, assuming array center')\n ixcent = x_gen / 2\n else:\n # Adjust for oversampling\n ixcent = xcent * ioversamp\n if ioversamp > 1:\n ixcent = ixcent + ioversamp / 2\n\n if ycent == 0:\n # if verbose:\n # print('ycent parameter not supplied, assuming array center')\n iycent = y_gen / 2\n else:\n iycent = ycent * ioversamp\n if ioversamp > 1:\n iycent = iycent +ioversamp / 2\n\n # Normalize FWHM to pixels\n beamFWHM /= gen_pixsize\n # Convert the FWHM to a standard deviation for astropy fit. From psf_gaussian\n stdev = beamFWHM / (sqrt(8 * log(2)))\n\n # If we want this normalized then call with norm flag set\n if factor:\n # 1D beam\n beamkernraw = Gaussian1DKernel(stdev, x_size=x_gen)\n beamkern = np.array(beamkernraw)\n if norm:\n beamkern = beamkern / beamkern.max()\n if ioversamp > 1:\n beamkern = rebin(beamkern,(npixx,npixy))\n else:\n beamkernraw = Gaussian2DKernel(stdev,x_size = x_gen, y_size = y_gen)\n beamkern = np.array(beamkernraw)\n if norm:\n beamkern = beamkern / beamkern.max()\n if ioversamp > 1:\n beamkern = \trebin(beamkern,(npixx,npixy))\n\n\n\n # # Use for debugging\n # plt.plot for 1d\n # plt.plot(beamkern, drawstyle='steps')\n # plt.imshow for 2d & colorbar\n # plt.imshow(beamkern, interpolation='none', origin='lower')\n # plt.colorbar()\n # plt.xlabel('x [pixels]')\n # plt.ylabel('y [pixels]')\n # plt.show()\n # beamkern = 1\n\n return beamkern\n\n\ndef rebin(a, new_shape):\n shape = a.shape\n M = int(shape[0])\n N = int(shape[1])\n m, n = new_shape\n if m radiusOfZeros * radiusOfZeros) and\n ((r - i - 1) * (r - i - 1) + j * j > radiusOfZeros * radiusOfZeros) and\n (i * i + (c - j - 1) * (c - j - 1) > radiusOfZeros * radiusOfZeros) and\n ((r - i - 1) * (r - i - 1) + (c - j - 1) * (c - j - 1) > radiusOfZeros * radiusOfZeros)):\n mask[i, j] = 1\n\ndft = dft * mask\n\nimage_back = cv2.idft(dft, flags=cv2.DFT_COMPLEX_OUTPUT)\nimage_back = cv2.magnitude(image_back[:, :, 0], image_back[:, :, 1])\ncv2.normalize(image_back, image_back, 0.0, 1.0, cv2.cv.CV_MINMAX)\ncv2.imshow(\"imageBackAfterDFT\", image_back)\n\nimage = cv2.Laplacian(image, 0)\ncv2.imshow(\"imageAfterLaplacian\", image)\ncv2.waitKey(0)","sub_path":"task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"642818884","text":"### simple if\ncars = ['audi', 'bmw', 'subaru', 'toyota']\n\nfor car in cars:\n if car == 'bmw':\n print(car.upper())\n else:\n print(car.title())\n\n# if-elif-else\nage = 12\nif age < 4:\n price = 0\nelif age < 18:\n price = 5\nelse:\n price = 10\nprint('Your cost is: ' + str(price) + '.')\n\n#multiple conditions\ntoppings = ['cheese', 'mushroom', 'cucumber']\nif 'cheese' in toppings:\n print('added cheese')\nif 'cucumber' in toppings:\n print('added cucumber')\n\n#exercise\n#1\nalien_color = 'red'\nif alien_color == 'green':\n print('Congratulations, you earned 5 points!')\nif alien_color == 'red':\n print('Congratulations, you earned 5 points!')\n#2\nif alien_color == 'green':\n print('Congratulations, you earned 5 points for shooting alien!')\nelse:\n print('Congratulations, you earned 10 points')\n#3\nif alien_color == 'green':\n print('Congratulations, you earned 5 points for shooting alien!')\nelif alien_color == 'yellow':\n print('Congratulations, you earned 10 points for shooting alien')\nelif alien_color == 'red':\n print('Congratulations, you earned 15 points for shooting alien')\n\n#Using if statement with list\nrequested_toppings = ['mushrooms', 'green peppers', 'extra cheese']\n\nfor topping in requested_toppings:\n if topping == 'green peppers':\n print(\"Sorry we don't have green peppers available now!\")\n else:\n print(topping.title() + ' added.')\n\n#List not empty\nrequested_toppings = []\n\nif requested_toppings:\n for topping in requested_toppings:\n if topping == 'green peppers':\n print(\"Sorry we don't have green peppers available now!\")\n else:\n print(topping.title() + ' added.')\nelse:\n print(\"Are you sure you want a plain pizza?\")\n\n#Using multiple lists\navailable_toppings = ['mushrooms', 'olives', 'green peppers', 'pepperoni', 'pineapple' , 'extra cheese']\nrequested_toppings = ['mushrooms', 'french fries', 'extra cheese']\n\nfor topping in requested_toppings:\n if topping not in available_toppings:\n print('Topping ' + topping.title() + ' is not available in our restaurant')\n else:\n print(topping.title() + ' added.')\n\n","sub_path":"chapter1/isstatement.py","file_name":"isstatement.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"169058159","text":"#Author: Fatima Abukar\r\n#Events class wrapper with many thanks to: http://www.pygame.org/wiki/InputWrapper?parent=CookBook\r\n\r\nimport pygame,sys\r\nfrom pygame.locals import *\r\nfrom pygame import *\r\n\r\n \r\n\r\nclass Event(object):\r\n pygame.init()\r\n #create global variables \r\n m_DOWN = \"isdown\"\r\n k_DOWN= \"isdown\"\r\n m_UP=\"isup\"\r\n k_UP=\"isup\"\r\n MOTION = \"motion\"\r\n Quit = True\r\n butt = {1: \"Button1\", 2: \"Button2\", 3: \"Button3\"}\r\n events = {}\r\n keydict={}\r\n mousedict={}\r\n quitdict={}\r\n button_state = ()\r\n mouse_pos = ()\r\n sizex, sizey=0,0\r\n newkeyname =\"\"\r\n \r\n #add a keyevent to dictionary \r\n @classmethod\r\n def __add_keyDict(cls,event):\r\n if event.type ==KEYDOWN:\r\n Event.keydict.update({event.key:[event, Event.k_DOWN]})\r\n elif event.type==KEYUP:\r\n Event.keydict.update({event.key:[event, Event.k_UP]})\r\n\r\n #add an mouseevent to dictionary \r\n @classmethod\r\n def __add_mouseDict(cls, event):\r\n if event.type == MOUSEBUTTONDOWN:\r\n Event.events.update({Event.butt[event.button]: [event, Event.m_DOWN]})\r\n elif event.type == MOUSEBUTTONUP:\r\n Event.events.update({Event.butt[event.button]: [event, Event.m_UP]})\r\n elif event.type == MOUSEMOTION:\r\n Event.events.update({MOUSEMOTION: [event, Event.MOTION]})\r\n \r\n #add a quitevent to dictionary \r\n @classmethod \r\n def __add_quitDict(cls,event):\r\n if event.type == pygame.QUIT:\r\n Event.quitdict.update({\"exit\": [event, Event.Quit]})\r\n \r\n #add a list of events to dictionaries\r\n @classmethod\r\n def __add_events(cls, events):\r\n for event in events:\r\n Event.__add_mouseDict(event)\r\n Event.__add_keyDict(event)\r\n Event.__add_quitDict(event)\r\n\r\n #set the keycodes \r\n @classmethod\r\n def __set_code(cls,keyname):\r\n if keyname ==\"left\":\r\n keyname = K_LEFT\r\n elif keyname ==\"right\":\r\n keyname =K_RIGHT\r\n elif keyname== \"up\":\r\n keyname = K_UP\r\n elif keyname == \"down\":\r\n keyname = K_DOWN\r\n Event.newkeyname = keyname\r\n\r\n #update the mouse buttons and positions \r\n @classmethod\r\n def __update_mouse(cls, buttons = (0,0,0), pos = (0,0)):\r\n Event.mouse_pos = pos\r\n Event.button_state = buttons\r\n\r\n #checks if the key is \"quit\" so you can exit the game \r\n @classmethod\r\n def contains(cls, name):\r\n for names in Event.quitdict:\r\n if names == name:\r\n return name\r\n \r\n #quits the game \r\n @classmethod\r\n def quit_game(cls):\r\n pygame.quit()\r\n sys.exit()\r\n \r\n @classmethod\r\n def exit_game(cls,game):\r\n for event in pygame.event.get():\r\n if event.type==pygame.QUIT:\r\n game=False\r\n pygame.quit()\r\n \r\n \r\n #checks if the key is at a down state and you have entered a keyname\r\n @classmethod\r\n def is_key(cls, keyname):\r\n Event.__set_code(keyname)\r\n event = Event.keydict.get(Event.newkeyname)\r\n novalue = None\r\n if event == novalue:\r\n key_code = novalue\r\n else:\r\n key_code = event[1]\r\n if key_code == Event.k_DOWN:\r\n return True\r\n else:\r\n return False\r\n \r\n #checks if the key has been released\r\n @classmethod\r\n def key_up(cls, keyname):\r\n #get event \r\n event = Event.keydict.get(keyname)\r\n novalue = None\r\n #if the event has no value \r\n if event==novalue:\r\n #key has no value \r\n key_code = novalue\r\n else:\r\n #key has state\r\n key_code = event[1]\r\n #if the key equals the state \r\n if key_code == Event.k_UP:\r\n #then the key has been released \r\n return True\r\n #otherwise the key has not been released \r\n return False\r\n \r\n #checks if the mouse has been pressed \r\n @classmethod\r\n def mouse_down(cls, button):\r\n \r\n event = Event.events.get(button)\r\n novalue=None\r\n if event ==novalue:\r\n key_code = novalue\r\n else:\r\n key_code = event[1]\r\n \r\n if key_code == Event.m_DOWN: \r\n return True\r\n else:\r\n return False\r\n\r\n #sets the image size for the cursor \r\n @classmethod\r\n def set_CursorPosOnImage(cls,sizex,sizey):\r\n Event.sizex=sizex\r\n Event.sizey=sizey\r\n\r\n #sets the cursor position on the image \r\n @classmethod \r\n def set_MouseCursorPos(cls,x,y):\r\n ## the passed x and y is the position of the cursor. \r\n x,y=Event.mouse_pos\r\n ##subtract half the image size from cursor x and y \r\n x=x-Event.sizex/2\r\n y=y-Event.sizey/2\r\n ## set the new mouse position of x and y. \r\n Event.mouse_pos=x,y\r\n \r\n #updates all the events \r\n @classmethod\r\n def update(cls):\r\n clock = pygame.time.Clock()\r\n time_passed = clock.tick(60)\r\n Event.__update_mouse(pygame.mouse.get_pressed(),pygame.mouse.get_pos())\r\n Event.__add_events(pygame.event.get())\r\n x,y = Event.mouse_pos\r\n Event.set_MouseCursorPos(x,y)\r\n\r\n \r\n \r\n \r\n","sub_path":"Game/PGS/Event.py","file_name":"Event.py","file_ext":"py","file_size_in_byte":5477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"522983395","text":"import collections\n\nimport dataset\n\nFilterSizes = collections.namedtuple(\n 'FilterSizes', ['conv0', 'conv1', 'conv2', 'conv3'])\n\n\nclass Model:\n def __init__(\n self,\n name,\n datadir,\n validation_size=500,\n test_size=1,\n batch_size=100,\n learning_rate=0.01,\n learning_rate_decay_factor=.1,\n max_steps=1000,\n rnn_cell_size=64,\n num_rnn_layers=1,\n grad_clip=10,\n conv_filter_sizes=FilterSizes(16, 16, 16, 16),\n embedding_dims=dataset.EmbeddingSize(\n **{'chars': 5, 'fonts': 3, 'fontsizes': 2, 'tokens': 10}),\n use_lstm=False,\n use_rnn_layer_norm=False,\n dropout_keep_prob=1.0\n ):\n self.name = name\n self.data = dataset.read_datasets(datadir, validation_size, test_size)\n self.batch_size = batch_size\n self.learning_rate = learning_rate\n self.learning_rate_decay_factor = learning_rate_decay_factor\n self.grad_clip = grad_clip,\n self.max_steps = max_steps\n self.rnn_cell_size = rnn_cell_size\n self.num_rnn_layers = num_rnn_layers\n self.feature_vocab_size = self.data.feature_vocab_size\n self.token_vocab_size = self.data.token_vocab_size\n self.filters = conv_filter_sizes\n self.embedding_dims = embedding_dims\n self.use_lstm = use_lstm\n self.use_rnn_layer_norm = use_rnn_layer_norm\n self.dropout_keep_prob = dropout_keep_prob\n self.feature_dim = (embedding_dims.chars +\n embedding_dims.fonts +\n embedding_dims.fontsizes)\n\n @classmethod\n def small(cls, datadir, validation_size=500, test_size=1):\n return cls('small', datadir, validation_size, test_size,\n 20, 1e-4, 1, 6000, 64, 1)\n\n @classmethod\n def medium(cls, datadir, validation_size=1000, test_size=1):\n return cls('medium', datadir, validation_size, test_size,\n 100, 0.0001, 1, 2000, 256, 3)\n\n @classmethod\n def large(cls, datadir, validation_size=5000, test_size=1):\n return cls('large', datadir, validation_size, test_size,\n 50, 0.0001, 1, 6000, 1536, 3,\n conv_filter_sizes=FilterSizes(10, 10, 10, 10),\n dropout_keep_prob=0.5)\n\n @classmethod\n def medium_reg(cls, datadir, validation_size=500, test_size=1):\n return cls('medium-reg', datadir, validation_size, test_size,\n 100, 0.01, 1, 3000, 200, 4,\n use_lstm=True, use_rnn_layer_norm=True)\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"432320108","text":"#typically programmers us i, j, k , l for the temp variable\n#if the temp var is special or used a lot, it will be given a proper name\n\n#range is a special built-in funtion in python. when used with a \n#for loop it will create a list of numbers from (i, i-1)\n#So in this loop, the range() function will generate a sequence of 0->9\n#This seems weird, but will make sense later in the data structure lesson\n\nfor i in range(0,10):\n\tprint(i)\n\nx = int(input(\"Enter number: \"))\nfor i in range(0, x):\n\tprint(\"Hello World\")","sub_path":"for_loops.py","file_name":"for_loops.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"295129443","text":"from flappening.entities import Text\n\nfrom flappening.utils import avgScore\n\n\nclass Statistics:\n\n #\n #\n # -------- Init -----------\n #\n def __init__(self):\n super().__init__()\n\n self.bestScore = Text('', position=[550, 25])\n self.avgScore = Text('', position=[550, 50])\n self.playersAlive = Text('', position=[550, 75])\n self.generation = Text('', position=[550, 100])\n\n #\n #\n # -------- update -----------\n #\n def update(self, players, playersGarbage, gameIteration) -> None:\n\n if (len(players) >= 1):\n\n self.playersAlive.setContent('bird alive: ' + str(len(players)))\n\n self.bestScore.setContent('best score: ' +\n str(players[-1].getScore()))\n\n self.avgScore.setContent(\n 'avg score: ' + str(avgScore([*players, *playersGarbage])))\n\n self.generation.setContent('generation: ' + str(gameIteration))\n\n # -------- draw -----------\n #\n def draw(self) -> None:\n self.playersAlive.draw()\n self.bestScore.draw()\n self.avgScore.draw()\n self.generation.draw()\n","sub_path":"flappening/game/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":1161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"99783402","text":"'''\n$ python3 s52.py\n\n\n'''\n\nfrom s52_aux import *\nfrom Crypto.Cipher import AES\nfrom Crypto.Hash import MD5\nfrom Crypto.Util.Padding import pad\nfrom Crypto.Random import get_random_bytes as rand\n#from binascii import hexlify\n\ndef f(M, H = AES.new(b'YELLOW SUBMARINE', AES.MODE_ECB).encrypt(b'\\x34\\x12\\x80' + (b'\\x00' * 13))[:2] + (b'\\x80' + b'\\x00' * 13)):\n if len(H) < 16:\n H = pad(H, 16, 'iso7816')\n if len(M) % 16 != 0:\n M = pad(M, 16, 'iso7816')\n blocks = []\n for i in range(len(M) // 16):\n blocks.append(M[i*16:(i*16)+16])\n for block in blocks:\n aes = AES.new(H, AES.MODE_ECB)\n H = aes.encrypt(block)[:2] + b'\\x80' + (b'\\x00' * 13)\n return H[:2]\n\ndef g(M, H = AES.new(b'YELLOW SUBMARINE', AES.MODE_ECB).encrypt(b'\\x21\\x43\\x54\\x80' + (b'\\x00' * 12))[:3] + (b'\\x80' + b'\\x00' * 12)):\n if len(H) < 16:\n H = pad(H, 16, 'iso7816')\n if len(M) % 16 != 0:\n M = pad(M, 16, 'iso7816')\n blocks = []\n for i in range(len(M) // 16):\n blocks.append(M[i*16:(i*16)+16])\n for block in blocks:\n aes = AES.new(H, AES.MODE_ECB)\n H = aes.encrypt(block)[:3] + b'\\x80' + (b'\\x00' * 12)\n return H[:3]\n\ndef collide(M_chain = None):\n while True:\n sample0 = rand(16)\n sample1 = rand(16)\n if sample1 == sample0:\n continue\n if M_chain is not None:\n H_0 = f(sample0, M_chain)\n H_1 = f(sample1, M_chain)\n if H_0 == H_1:\n return [sample0, sample1, f(sample0, M_chain)]\n else:\n H_0 = f(sample0)\n H_1 = f(sample1)\n if H_0 == H_1:\n return [sample0, sample1, f(sample0)]\n\ndef main():\n collisions = []\n collisions.append(collide())\n\n print('pre')\n for b in range(12):\n print('b', b + 1)\n collisions.append(collide(collisions[-1][2]))\n\n g_calls = 0\n rev_lookup = {}\n for collision in coll4096(collisions):\n g_calls += 1\n lookup = g(collision)\n if lookup not in rev_lookup:\n rev_lookup[lookup] = collision\n else:\n print('found\\n', collision, '\\n', rev_lookup.get(lookup))\n print('4096-collision: g calls', g_calls, end='\\n\\n')\n exit()\n\n print('\\n\\n4096-collision: g calls', g_calls, end='\\n\\n')\n print('length', len(rev_lookup))\n\n\n g_calls = 0\n rev_lookup = {}\n for collision in coll8192(collisions):\n g_calls += 1\n lookup = g(collision)\n if lookup not in rev_lookup:\n rev_lookup[lookup] = collision\n else:\n print('found\\n', collision, '\\n', rev_lookup.get(lookup))\n print('8102-collision: g calls', g_calls)\n exit()\n \n print('\\n\\n8192-collision: g calls', g_calls, end='\\n\\n')\n print('length', len(rev_lookup))\n\n\n for collision in collisions:\n print(collision)\n print('\\n')\n\n print('failed to find g-collision.')\n\n# print('count', trampoline(collisions))\n\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"response/s52.py","file_name":"s52.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"576642754","text":"from multiprocessing import Process\nimport pulse as pul\nfrom flask import Flask, render_template\nimport csv\n\napp = Flask(__name__)\n\n@app.route('/')\ndef index():\n with open('csv/stats.csv', 'r') as f:\n dataReader = csv.reader(f)\n for row in dataReader:\n print(row)\n return render_template('index.html', row=row, dataReader=dataReader)\n\nif __name__ == \"__main__\":\n # サブプロセスを作成し定期実行スクリプトを開始します\n p = Process(target=pul.start)\n p.start()\n \n # Flaskhttpサーバー起動\n app.run(host='0.0.0.0', port=8000)\n print(\"c_start終了\")","sub_path":"cuous_start.py","file_name":"cuous_start.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"269930634","text":"from gameboard import *\ngrid = Grid(11, 11)\ngrid.set_grid()\ngrid.test()\n# print(grid.grid)\nfor y in range(len(grid.grid)):\n for x in range(len(grid.grid[0])):\n if grid.grid[x][y] == 10:\n print(\"* \", end=\"\")\n elif grid.grid[x][y] == 3:\n print(\"o \", end=\"\")\n elif x == 5 and y == 0:\n print(\"@ \", end=\"\")\n else:\n print(\". \", end=\"\")\n print()\ncell = grid.get_cell((5, 0))\ntail = grid.get_cell((4, 6))\nend = grid.get_cell((8, 8))\n\nprint(grid.count_reachable_area(cell))\n\nprint(grid.a_star(cell, [tail]))\n\n\nprint(\"check neighbor\")\nneighbors = grid.get_neighbors(cell)\ngrid.set_cell((1, 6), 0)\n# for y in range(len(grid.grid)):\n# for x in range(len(grid.grid[0])):\n# if grid.grid[x][y] == 10:\n# print(\"* \", end=\"\")\n# elif grid.grid[x][y] == 3:\n# print(\"o \", end=\"\")\n# elif x == 2 and y == 5:\n# print(\"@ \", end=\"\")\n# else:\n# print(\". \", end=\"\")\n# print()\nfor neighbor in neighbors:\n print(\"direction:\", cell.get_direction(neighbor))\n print(grid.count_reachable_area(neighbor))\n\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"216852423","text":"#!/usr/bin/python\n\nimport requests\nimport time\nimport sys\n\nvalues = {\n \"65-US Central\": \"US Central\",\n \"1-US East\": \"US East\",\n \"4-US West\": \"US West\",\n \"91-WINDFLIX US\": \"WINDFLIX US\",\n \"7-Canada East\": \"Canada East\",\n \"63-Canada West\": \"Canada West\",\n \"69-Austria\": \"Austria\",\n \"75-Belgium\": \"Belgium\",\n \"74-Bulgaria\": \"Bulgaria\",\n \"97-Croatia\": \"Croatia\",\n \"70-Czech Republic\": \"Czech Republic\",\n \"62-Denmark\": \"Denmark\",\n \"99-Estonia\": \"Estonia\",\n \"73-Finland\": \"Finland\",\n \"21-France\": \"France\",\n \"16-Germany\": \"Germany\",\n \"84-Greece\": \"Greece\",\n \"71-Hungary\": \"Hungary\",\n \"80-Iceland\": \"Iceland\",\n \"58-Ireland\": \"Ireland\",\n \"79-Israel\": \"Israel\",\n \"55-Italy\": \"Italy\",\n \"76-Latvia\": \"Latvia\",\n \"90-Lithuania\": \"Lithuania\",\n \"83-Moldova\": \"Moldova\",\n \"13-Netherlands\": \"Netherlands\",\n \"48-Norway\": \"Norway\",\n \"68-Poland\": \"Poland\",\n \"92-Portugal\": \"Portugal\",\n \"45-Romania\": \"Romania\",\n \"94-Slovakia\": \"Slovakia\",\n \"51-Spain\": \"Spain\",\n \"24-Sweden\": \"Sweden\",\n \"33-Switzerland\": \"Switzerland\",\n \"100-Tunisia\": \"Tunisia\",\n \"10-United Kingdom\": \"United Kingdom\",\n \"93-WINDFLIX UK\": \"WINDFLIX UK\",\n \"102-Albania\": \"Albania\",\n \"82-Azerbaijan\": \"Azerbaijan\",\n \"56-India\": \"India\",\n \"42-Russia\": \"Russia\",\n \"104-Serbia\": \"Serbia\",\n \"103-Slovenia\": \"Slovenia\",\n \"66-South Africa\": \"South Africa\",\n \"60-Turkey\": \"Turkey\",\n \"77-Ukraine\": \"Ukraine\",\n \"30-Australia\": \"Australia\",\n \"67-New Zealand\": \"New Zealand\",\n \"23-Hong Kong\": \"Hong Kong\",\n \"87-Indonesia\": \"Indonesia\",\n \"39-Japan\": \"Japan\",\n \"78-Malaysia\": \"Malaysia\",\n \"98-Philippines\": \"Philippines\",\n \"36-Singapore\": \"Singapore\",\n \"59-South Korea\": \"South Korea\",\n \"85-Thailand\": \"Thailand\",\n \"81-Vietnam\": \"Vietnam\",\n \"89-Argentina\": \"Argentina\",\n \"64-Brazil\": \"Brazil\",\n \"96-Colombia\": \"Colombia\",\n \"54-Mexico\": \"Mexico\"\n}\n\nheaders = {\n 'authority': 'nld.windscribe.com',\n 'cache-control': 'max-age=0',\n 'origin': 'https://nld.windscribe.com',\n 'upgrade-insecure-requests': '1',\n 'content-type': 'application/x-www-form-urlencoded',\n 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Safari/537.36',\n 'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'referer': 'https://nld.windscribe.com/getconfig/openvpn',\n 'accept-encoding': 'gzip, deflate, br',\n 'accept-language': 'en-US,en;q=0.9',\n 'cookie': sys.argv[1]\n}\n\nfor key, value in values.items():\n for proto in [\"tcp\", \"udp\"]:\n data = {\n 'location': key,\n 'protocol': proto,\n 'port': '1194',\n 'cipher': 'cbc'\n }\n\n filename = \"{name}-{protocol}.ovpn\".format(\n name=value.replace(' ', '-'),\n protocol=proto\n )\n time.sleep(5)\n response = requests.post('https://nld.windscribe.com/getconfig/openvpn', headers=headers, data=data)\n f = open(filename, \"w\")\n f.write(response.text)\n","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"239272040","text":"import sys\n\nfrom PyQt5.QtWidgets import QApplication\n\nfrom playground.mainwindow import MainWindow\nfrom playground.projectmanagerdialog import ProjectManagerDialog\n\n\ndef main():\n app = QApplication(sys.argv)\n app.setOrganizationName('cyberegoorg')\n app.setApplicationName('playground')\n\n pm = ProjectManagerDialog()\n ret = pm.exec()\n\n if ret == pm.Accepted:\n name, dir = pm.open_project_name, pm.open_project_dir\n\n mw = MainWindow()\n mw.show()\n mw.focusWidget()\n mw.open_project(name, dir)\n\n sys.exit(app.exec_())\n\n else:\n sys.exit(0)","sub_path":"playground/src/playground/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"402356882","text":"import numpy as np\nfrom numba import jit\n\n\ndef gradd(init, b, Q, tol=1e-10, iters=1000):\n\n old = init\n new = init + 10 + tol\n i = 0\n # print(\"a\",np.linalg.norm(old - new) > tol,(iters > i))\n\n while (np.linalg.norm(old - new) > tol) and (iters > i):\n if i != 0:\n old = new\n new = old - np.linalg.inv(Q) @ (Q @ old + b)\n i += 1\n # print(new)\n # print(\"a\",np.linalg.norm(old - new))\n if i < iters:\n return new\n else:\n return \"not finished\"\n\n\nQ = np.array([[3, 12], [0, 4]])\nb = np.array([3, 7])\nxo = np.array([10, 0])\n\n\nprint(gradd(xo, b, Q))\nprint(-np.linalg.inv(Q) @ b)\n","sub_path":"ProbSets/Math/Week 7/six.py","file_name":"six.py","file_ext":"py","file_size_in_byte":650,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"532509297","text":"#!/usr/bin/python\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport pandas as pd\nfrom sys import argv\n\n\n### Llamar así\n# $ python plotter.py stat/YYMMDD_HHMM\n\nif len(argv) == 1:\n\tprint(\"Run ./plotter.py stat/YYMMDD_HHMM\")\n\texit(-1)\n\n# Directorio donde están los csv\npath=argv[1]\n\n# Tablas de datos\nmeasuredf = pd.read_csv(path+'/metrics_out.csv')\nperfdf = pd.read_csv(path+'/perf_out.csv')\n\n\n# Grafico Metricas / Flags\nplt.figure(figsize=(15,10))\nsns.violinplot(y='flags', x='Métricas',data=measuredf, linewidth=0.3)\nplt.xlim(0, None)\n# Guardar\nplt.savefig(path+'violinplot_flags.jpg')\nplt.show()\n\n\n# Mas graficos por aquí\n\"\"\" plt.figure(figsize=(8,8))\ndf = pd.read_csv(\"data_cambio_algoritmo.csv\")\nsns.set_style(\"whitegrid\")\nplot1 = sns.violinplot(x=df[\"Versión\"], y=df[\"Métrica\"])\nplot1.set(ylim=(0,None))\nplt.savefig(\"algorithm_comparison.jpg\")\n\nplt.figure(figsize=(15,15))\ndf = pd.read_csv(\"datos comp-flag.csv\")\nsns.set_style(\"whitegrid\")\nplot2 = sns.violinplot(y=df[\"COMP/OPT/FLAGS\"], x=df[\"METRICA\"], linewidth=0.2, scale=\"width\",palette=\"Set3\")\nplot2.set(xlim=(0,None))\nplt.savefig(\"comp-opt-flag_comparison.jpg\")\n\nplt.figure(figsize=(10,8))\ndf = pd.read_csv(\"comp_flags.csv\")\nsns.set_style(\"whitegrid\")\nplot3 = sns.boxplot(x=\"variable\",y=\"value\", data=pd.melt(df),palette=\"Set3\")\nplot3.set(ylim=(0,None))\nplot3.set(xlabel=\"Flags\")\nplot3.set(ylabel=\"Métrica\")\nplt.savefig(\"specific-flags_boxplot.jpg\")\n\nplt.figure(figsize=(15,8))\ndf = pd.read_csv(\"comp_flags_firaunroll.csv\")\nsns.set_style(\"whitegrid\")\nplot4 = sns.kdeplot(df[\"FIRA\"])\nplot4 = sns.kdeplot(df[\"FBLOCK\"])\nplot4 = sns.kdeplot(df[\"FUNROLL\"])\nplot4 = sns.kdeplot(df[\"FFASTMATH\"])\nplot4 = sns.kdeplot(df[\"FUNROLL_FIRA\"], shade=True)\nplot4.set(xlim=(0,None))\nplot4.set(xlabel=\"Métrica\")\nplot4.set(ylabel=\"Frecuencia\")\n\nplt.savefig(\"specific-flags_distribution.jpg\")\n\"\"\"\n\n","sub_path":"plotter.py","file_name":"plotter.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"483259890","text":"NAME = 'Extract UV Thumbnail'\nORDER = 1\nVALID = True\nTYPE = 'extractor'\nKEY = 'uv_thumbnail'\nOWNER = 'Subin Gopi'\nCOMMENTS = 'To create uv thumbnail file'\nVERSION = '0.0.0'\nMODIFIED = 'April 19, 2020'\n\n\ndef execute(output_path=None, **kwargs):\n import os\n from studio_usd_pipe.core import common\n from studio_usd_pipe.utils import maya_asset \n if not os.path.isfile(kwargs['thumbnail']):\n return False, [kwargs['thumbnail']], 'not found input thumbnail!...'\n ouput_image_path = os.path.join(\n output_path,\n '{}.png'.format(kwargs['caption'])\n )\n premission = common.data_exists(ouput_image_path, True)\n if not premission:\n return False, [ouput_image_path], 'not able to save thumbnail!...'\n thumbnail = maya_asset.create_thumbnail(kwargs['thumbnail'], ouput_image_path)\n return True, [thumbnail], 'success!...'\n","sub_path":"studio_usd_pipe/resource/push/maya/uv/extractor_thumbnail.py","file_name":"extractor_thumbnail.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"50792899","text":"from django.shortcuts import render, HttpResponseRedirect, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import SignUpForm, OrganizationForm\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth import login, authenticate\nfrom .models import OrganizationCreator, Organization\n\nimport phonenumbers\n\n\ndef home(request):\n return render(request, 'core/index.html')\n\n\ndef signup(request):\n template = 'registration/signup.html'\n\n def post():\n data = {\n 'username': None,\n 'password': None,\n 'confirm_password': None,\n 'email': None,\n 'phone_number': None,\n 'first_name': None,\n 'last_name': None\n }\n form = SignUpForm(request.POST)\n\n if not form.is_valid():\n return render(request, template, {'form': form})\n\n for val in data:\n data[val] = form.cleaned_data[val]\n\n if data['password'] != data['confirm_password']:\n return render(request, template, {'form': form, 'error': {\n 'password': 'Passwords do not match'\n }})\n\n try:\n parsed_phone = phonenumbers.parse(data['phone_number'], \"KE\")\n if not phonenumbers.is_valid_number(parsed_phone):\n raise phonenumbers.NumberParseException(msg='Invalid phone number', error_type='Invalid phone')\n\n except phonenumbers.NumberParseException:\n return render(request, template, {'form': form, 'error': {\n 'phone_number': 'Enter a valid phone number'\n }})\n\n if not User.objects.filter(username=data['username']):\n user = User.objects.create(\n username=data['username'],\n first_name=data['first_name'],\n last_name=data['last_name'],\n email=data['email']\n )\n user.set_password(data['password'])\n user.save()\n OrganizationCreator.objects.create(user=user, phone=data['phone_number'])\n user = authenticate(username=data['username'], password=data['password'])\n login(request, user)\n return HttpResponseRedirect('/dashboard/')\n\n return render(request, template, {'form': form, 'error': {\n 'username': 'A user exists with that username'\n }})\n\n def get():\n form = SignUpForm()\n return render(request, template, {'form': form})\n\n if request.method == 'GET':\n return get()\n\n if request.method == 'POST':\n return post()\n\n\ndef student_signup(request):\n return render(request, 'registration/signup.html')\n\n\n@login_required()\ndef dashboard(request):\n context = {\n 'page_title': 'dashboard'\n }\n return render(request, 'core/dashboard/home.html', context)\n\n\n@login_required()\ndef add_organization(request):\n template = 'core/dashboard/add_organization.html'\n\n def get():\n form = OrganizationForm()\n context = {\n 'page_title': 'add organization',\n 'form': form\n }\n return render(request, template, context)\n\n def post():\n username = request.user.username\n\n data = {\n 'name': None,\n 'email': None,\n 'website': None,\n 'address': None,\n 'phone': None,\n }\n form = OrganizationForm(request.POST)\n\n if not form.is_valid():\n return render(request, template, {'form': form})\n\n for val in data:\n data[val] = form.cleaned_data[val]\n\n try:\n parsed_phone = phonenumbers.parse(data['phone'], \"KE\")\n if not phonenumbers.is_valid_number(parsed_phone):\n raise phonenumbers.NumberParseException(msg='Invalid phone number', error_type='Invalid phone')\n\n except phonenumbers.NumberParseException:\n return render(request, template, {'form': form, 'error': {\n 'phone': 'Enter a valid phone number'\n }})\n\n try:\n user = User.objects.get(username=username)\n creator = OrganizationCreator.objects.get(user=user)\n\n obj, created = Organization.objects.get_or_create(\n name=data['name'],\n email=data['email'],\n website=data['website'],\n address=data['address'],\n phone=data['phone'],\n created_by=creator,\n approved=True\n )\n if created is False:\n return render(request, template, {\n 'form': form,\n 'errors': 'An organization exists with the same entries'\n })\n\n return redirect('/organizations/view/')\n\n except User.DoesNotExist or OrganizationCreator.DoesNotExist:\n return render(request, template, {\n 'form': form,\n 'errors': 'User does not exist'\n })\n\n if request.method == 'GET':\n return get()\n\n if request.method == 'POST':\n return post()\n\n\n@login_required()\ndef get_organizations(request):\n user = User.objects.get(username=request.user.username)\n creator = OrganizationCreator.objects.get(user=user)\n organizations = Organization.objects.filter(created_by=creator)\n context = {\n 'page_title': 'View Organizations',\n 'organizations': organizations\n }\n return render(request, 'core/dashboard/view_organizations.html', context)\n\n\n@login_required()\ndef add_position(request):\n context = {\n 'page_title': 'add position'\n }\n return render(request, 'core/dashboard/add_position.html', context)\n\n\n@login_required()\ndef get_positions(request):\n context = {\n 'page_title': 'view positions'\n }\n return render(request, 'core/dashboard/view_positions.html', context)\n\n\n@login_required()\ndef applications(request):\n context = {\n 'page_title': 'applications'\n }\n return render(request, 'core/dashboard/applications.html', context)\n\n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"605625606","text":"# -*- coding: utf-8 -*-\n\n# Copyright (c) 2017 SHIELD, UBIWHERE\n# ALL RIGHTS RESERVED.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# Neither the name of the SHIELD, UBIWHERE nor the names of its\n# contributors may be used to endorse or promote products derived from this\n# software without specific prior written permission.\n#\n# This work has been performed in the framework of the SHIELD project,\n# funded by the European Commission under Grant number 700199 through the\n# Horizon 2020 program. The authors would like to acknowledge the contributions\n# of their colleagues of the SHIELD partner consortium (www.shield-h2020.eu).\n\n\nimport logging.config\n\nimport os\nimport yaml\n\n\ndef setup_logging(config_file='logging.yaml', default_level=logging.INFO, env_key='LOG_CFG'):\n \"\"\"\n Setup logging configuration\n\n \"\"\"\n\n path = config_file\n cfg_from_env = os.getenv(env_key, None)\n\n if cfg_from_env:\n path = cfg_from_env\n\n if os.path.exists(path):\n with open(path, 'rt') as f:\n config = yaml.safe_load(f.read())\n logging.config.dictConfig(config)\n else:\n logging.basicConfig(level=default_level)\n","sub_path":"src/utils/storeutils/log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"167411743","text":"\"\"\"\nHey Andrew! This is the assignment my friend Jake gave me. He already gave some sick feedback for it.\nSo whenever you finish, I'll send along my code and his notes. I'm not sure of the best way to organize\nour code on here yet, so I may end up switching some stuff around if we end up with a lot of files.\n\n\nASSIGNMENT 1 -- INTRO\n````````````````````````````````````````````````````````````````````````````````````````\n1. Print out a greeting to the user\n2. Make a function that adds two variables together and prints the output\n3. Break a given string into a character array, then print the letters in alphabetical order with no duplicates.\n In python, an 'array' is generally a list.\n\"\"\"\n\n####################################\n######### ANDREW'S CODE ############\n####################################\n\nname = input(\"Hello User, what is your name? \")\nprint(\"Welcome\", name)\n\ndef assignment():\n dna = \"tcgcgatcgc\"\n dna2 = \"tggggcatgc\"\n\n recombination = dna + dna2\n lst = list(recombination)\n print(lst)\n\nassignment()","sub_path":"andrew_assignment_1.py","file_name":"andrew_assignment_1.py","file_ext":"py","file_size_in_byte":1031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"227082219","text":"from .include import *\n\nimport tensorflow as tf\ntf.get_logger().setLevel(\"ERROR\")\n\nclass Conv3d:\n \"\"\"\n 3d convolution layer.\n \"\"\"\n def __init__(self, filters=32, kernel_size=3, stride=1, \n padding=\"SAME\", name=\"Conv3d\",\n activation=tf.nn.relu, \n regularizer=tf.contrib.layers.l2_regularizer(scale=1.),\n use_batch_norm=True):\n self.name = name\n self.filters = filters\n self.kernel_size = kernel_size\n self.stride = stride\n self.padding = padding\n self.activation = activation\n self.regularizer = regularizer\n self.use_batch_norm = use_batch_norm\n\n def forward(self, input, training=False, **args):\n with tf.variable_scope(self.name):\n weights = tf.get_variable(name=\"w\", \n shape=(self.kernel_size, self.kernel_size, self.kernel_size, input.shape[-1], self.filters),\n dtype=tf.float32, initializer=tf.contrib.layers.xavier_initializer(), \n regularizer=self.regularizer)\n x = tf.nn.conv3d(input, weights, strides=[1, self.stride, self.stride, self.stride, 1],\n padding=self.padding, name=\"conv\")\n if self.use_batch_norm:\n x = tf.layers.batch_normalization(x, name=\"bn\", training=training)\n if self.activation != None:\n x = self.activation(x)\n return x\n\n def __call__(self, input, training=False, **args):\n return self.forward(input, training=training, **args)\n\nclass Pool3d:\n def __init__(self, pool_size=2, stride=2, padding=\"SAME\", name=\"Pool3d\", **args):\n self.name = name\n self.pool_size = pool_size\n self.stride = stride\n self.padding = padding\n\n def forward(self, input, **args):\n with tf.variable_scope(self.name):\n pool = tf.layers.max_pooling3d(input, \n pool_size=[self.pool_size, self.pool_size, self.pool_size],\n strides=[self.stride, self.stride, self.stride],\n padding=self.padding, name=\"pool\")\n return pool\n\n def __call__(self, input, **args):\n return self.forward(input, **args)\n\nclass ConvBlock:\n \"\"\"\n Convolution block layer: (Conv3d, Conv3d, Conv3dPool).\n \"\"\"\n def __init__(self, filters=32, name=\"ConvBlock\", **args):\n self.name = name\n self.filters = filters\n self.conv1 = Conv3d(filters=self.filters, name=\"conv1\", **args)\n self.conv2 = Conv3d(filters=self.filters, name=\"conv2\", **args)\n self.pool = Conv3d(filters=self.filters, stride=2, name=\"pool\", **args)\n\n def forward(self, input, **args):\n with tf.variable_scope(self.name):\n x = self.conv1(input, **args)\n x = self.conv2(x, **args)\n x = self.pool(x, **args)\n return x\n\n def __call__(self, input, **args):\n return self.forward(input, **args)\n\nclass PredictionActivation:\n \"\"\"\n Final activation layer: \n sigmoid function is applied to [:, :, :, :, 0] confidence of model output;\n prediction coordinates are renormalized from cell local coordinates to absolute:\n sigmoid function is applied and then added to cell indexes.\n \"\"\"\n def __init__(self, name=\"PredictionActivation\"):\n self.name = name\n\n def forward(self, input, name=\"prediction\", **args):\n with tf.variable_scope(self.name):\n confidences = input[:, :, :, :, 0]\n centers = input[:, :, :, :, 1:4]\n confidences = tf.reshape(confidences, \n [-1, input.shape[1], input.shape[2], input.shape[3], 1])\n confidences = tf.sigmoid(confidences, name=\"confidences\")\n indices = np.zeros((input.shape[1], input.shape[2], input.shape[3], 3))\n for i in range(input.shape[1]):\n for j in range(input.shape[2]):\n for k in range(input.shape[3]):\n indices[i, j, k, :] = [i, j, k]\n centers = tf.add(tf.nn.sigmoid(centers), indices, name=\"centers\")\n out = tf.concat([confidences, centers], axis=-1, name=name)\n return out\n\n def __call__(self, input, **args):\n return self.forward(input, **args)\n\nclass Loss:\n \"\"\"\n Cost function.\n \"\"\"\n def __init__(self, cost_lambda=5.):\n self.cost_lambda = cost_lambda\n\n def position_loss(self, target, prediction, sample_weight=1.):\n centers_target = target[:, :, :, :, 1:4]\n confidence_target = target[:, :, :, :, 0]\n centers_prediction = prediction[:, :, :, :, 1:4]\n\n pos_cost = tf.reduce_sum(tf.square(centers_target - centers_prediction), axis=-1)\n pos_cost = tf.multiply(pos_cost, confidence_target)\n pos_cost = tf.reduce_sum(pos_cost, axis=[1, 2, 3])\n\n pos_cost = tf.scalar_mul(self.cost_lambda, pos_cost)\n pos_cost = tf.multiply(pos_cost, sample_weight)\n\n return tf.reduce_mean(pos_cost, name=\"position_cost\")\n\n def confidence_loss(self, target, prediction, sample_weight=1.):\n confidence_target = target[:, :, :, :, 0]\n confidence_prediction = prediction[:, :, :, :, 0]\n confidence_cost = tf.square(confidence_target - confidence_prediction)\n confidence_cost = tf.reduce_sum(confidence_cost, axis=[1, 2, 3])\n\n confidence_cost = tf.multiply(confidence_cost, sample_weight)\n return tf.reduce_mean(confidence_cost, name=\"confidence_cost\")\n\n def __call__(self, target, prediction, sample_weight=1.):\n cost = tf.add(\n self.position_loss(target, prediction, sample_weight),\n self.confidence_loss(target, prediction, sample_weight),\n name=\"prediction_cost\")\n return cost\n\nclass Model:\n def __init__(self, \n input_shape=(None, default_cube_size, default_cube_size, default_cube_size, default_channel_num),\n cell_size=default_cell_size):\n \n self.input_shape = input_shape\n self.cell_size = cell_size\n self.output_shape = (None, \n input_shape[1] // cell_size, input_shape[2] // cell_size, input_shape[3] // cell_size, 4)\n\n # number of size downsampling depends on cube_size/cell_size ratio\n pooling_num = int(math.log2(cell_size))\n args = {}\n\n # if input grid size is:\n # 64x64x64x11\n self.layers = [\n Conv3d(32, name=\"Conv1\", **args),\n Conv3d(32, stride=2, name=\"Pool1\", **args)]\n # 64x64x64x32\n # 32x32x32x32\n\n if pooling_num - 1 == 1:\n self.layers += [ConvBlock(64, **args)]\n elif pooling_num - 1 == 2:\n self.layers += [\n ConvBlock(32, name=\"ConvBlock1\", **args), \n ConvBlock(64, name=\"ConvBlock2\", **args)]\n # 16x16x16x32\n # 8x8x8x64\n elif pooling_num - 1 == 3:\n self.layers += [\n ConvBlock(32, name=\"ConvBlock1\", **args),\n ConvBlock(64, name=\"ConvBlock2\", **args),\n ConvBlock(64, name=\"ConvBlock3\", **args)]\n \n self.layers += [\n Conv3d(128, name=\"ConvFinal\", **args),\n Conv3d(4, activation=None, use_batch_norm=False, **args),\n ]\n # 8x8x8x128\n # 8x8x8x4\n self.prediction_activation = PredictionActivation()\n\n def build(self, optimizer=tf.train.AdamOptimizer, \n cost_lambda=default_params[\"cost_lambda\"],\n cost_gamma=default_params[\"cost_gamma\"]):\n \"\"\"\n Builds computation graph.\n Args:\n optimizer: tf.train.Optimizer; default=tf.train.AdamOptimizer;\n cost_lambda: float; default=5.;\n parameter lambda for cost function;\n cost_gamma: float; default=1e-5;\n parameter gamma for regularization;\n \"\"\"\n\n # placeholder for input grids\n self.X = tf.placeholder(tf.float32, shape=self.input_shape, name=\"input_grid\")\n # placeholder for target labels\n self.Y = tf.placeholder(tf.float32, shape=self.output_shape, name=\"target\")\n # placeholder for boolean for batch norm \n self.training = tf.placeholder(tf.bool, (), name=\"training\")\n \n x = self.X\n # forward\n for l in self.layers:\n x = l(x, training=self.training)\n # output activation function\n self.output = self.prediction_activation(x, name=\"output\")\n\n # placeholder for learning_rate\n self.learning_rate = tf.placeholder(tf.float32, (), name=\"lrate_placeholder\")\n # not used\n self.sample_weight = tf.placeholder(tf.float32, shape=(None), name=\"sample_weight\")\n\n # loss function\n self.loss = Loss(cost_lambda=cost_lambda)\n # regularization terms\n reg_variables = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n reg_loss = cost_gamma * tf.reduce_sum(reg_variables, name=\"reg_cost\")\n # losses for prediction coordinates and confidence\n self.pos_loss = self.loss.position_loss(self.Y, self.output, self.sample_weight)\n self.conf_loss = self.loss.confidence_loss(self.Y, self.output, self.sample_weight)\n self.total_loss = tf.add_n([reg_loss, self.pos_loss, self.conf_loss], name=\"total_cost\")\n \n # optimizer\n self.optimizer = optimizer(learning_rate=self.learning_rate, name=\"optimizer\")\n # applying gradients\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.train_op = self.optimizer.minimize(self.total_loss)\n # weights saver\n self.saver = tf.train.Saver()\n self.saver_short = None\n self.saver_none = tf.train.Saver(max_to_keep=None)\n\n\n def save(self, path, full=True, step=None):\n \"\"\"\n Saves weights.\n Args:\n path: str; \n path to output file;\n full: bool; default=True;\n if True gradients are saved;\n step: int; default=None;\n global_step for saver;\n \"\"\"\n if full:\n if step == None:\n self.saver_none.save(self.sess, path)\n else:\n self.saver.save(self.sess, path, global_step=step)\n else:\n if self.saver_short == None:\n var_list = []\n for v in tf.global_variables():\n if \"Adam\" not in v.name:\n var_list.append(v)\n self.saver_short = tf.train.Saver(var_list)\n self.saver_short.save(self.sess, path, global_step=step)\n\n def load(self, path, full=False):\n \"\"\"\n Loads graph and weights from file.\n Args:\n path: str;\n filename prefix for .meta file;\n full: bool; default=False;\n if True, gradients and cost tensors are loaded as well.\n \"\"\"\n self.saver = tf.train.import_meta_graph(path + \".meta\")\n self.saver.restore(self.sess, path)\n self.saver_short = None\n self.saver_none = tf.train.Saver(max_to_keep=None)\n graph = tf.get_default_graph()\n self.X = graph.get_tensor_by_name(\"input_grid:0\")\n self.Y = graph.get_tensor_by_name(\"target:0\")\n self.training = graph.get_tensor_by_name(\"training:0\")\n self.output = graph.get_tensor_by_name(\"output:0\")\n if full:\n self.pos_loss = graph.get_tensor_by_name(\"position_cost:0\")\n self.conf_loss = graph.get_tensor_by_name(\"confidence_cost:0\")\n self.total_loss = graph.get_tensor_by_name(\"total_cost:0\")\n self.learning_rate = graph.get_tensor_by_name(\"lrate_placeholder:0\")\n self.sample_weight = graph.get_tensor_by_name(\"sample_weight:0\")\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n self.train_op = graph.get_operation_by_name(\"optimizer\")\n \n\n def init_session(self, gpus=\"\", cpu_only=False):\n \"\"\"\n Initializes tensorflow session.\n Args:\n gpus: str; default=\"\";\n available gpus;\n cpu_only: bool; default=False;\n if True, session will be ran on cpu only.\n \"\"\"\n if cpu_only:\n config = tf.ConfigProto(device_count={'GPU': 0})\n else:\n gpu_options = tf.GPUOptions(visible_device_list=gpus)\n config = tf.ConfigProto(gpu_options=gpu_options)\n self.sess = tf.Session(config=config)\n self.sess.run(tf.global_variables_initializer())\n\n def train_step(self, grids, targets, sample_weight=[], \n minibatch_size=default_params[\"minibatch_size\"], learning_rate=1e-3):\n \"\"\"\n Single train step of model.\n\n Args:\n grids: np.array of shape (n_grids, cube_size, cube_size, cube_size, n_channels);\n input cubic grids for model forward pass;\n targets: np.array of shape (n_grids, N, N, N, 4) where N is the number of cells;\n true target labels for loss calculation;\n sample_weight: not used;\n minibatch_size: int; default=32;\n minibatch size, number of grids in single forward pass; \n n_grids will be splitted to these minibatches;\n learning_rate: float; default=1e-3;\n optimizer learning rate.\n\n Returns: (predictions, [pos_loss, conf_loss, total_loss]):\n predictions: np.array of shape (n_grids, N, N, N, 4);\n model output for each grid;\n pos_loss: float; \n loss value for predictions coordinates;\n conf_loss: float;\n loss value for predictions confidence values;\n total_loss: float;\n total loss value = pos_loss + conf_loss + reg_loss.\n \"\"\"\n\n if len(sample_weight) == 0:\n sample_weight = np.ones((len(targets)))\n prediction_list, pos_loss, conf_loss, total_loss = [], 0., 0., 0.\n for minibatch_index in range(math.ceil(len(grids) / minibatch_size)):\n i_start = minibatch_index * minibatch_size\n sw = sample_weight[i_start : i_start + minibatch_size]\n\n res = self.sess.run([self.train_op, self.output, self.pos_loss, self.conf_loss, self.total_loss],\n feed_dict={self.X : grids[i_start : i_start + minibatch_size],\n self.Y : targets[i_start : i_start + minibatch_size],\n self.sample_weight : sw, self.learning_rate : learning_rate,\n self.training : True})\n prediction_list.append(res[1])\n pos_loss += res[2] * np.sum(sw)\n conf_loss += res[3] * np.sum(sw)\n total_loss += res[4] * np.sum(sw)\n pos_loss /= np.sum(sample_weight)\n conf_loss /= np.sum(sample_weight)\n total_loss /= np.sum(sample_weight)\n return np.concatenate(prediction_list), [pos_loss, conf_loss, total_loss]\n\n def predict(self, grids, minibatch_size=default_params[\"minibatch_size\"]):\n \"\"\"\n Retrieving predictions for input grids.\n\n Args:\n grids: np.array of shape (n_grids, cube_size, cube_size, cube_size, n_channels);\n input cubic grids;\n minibatch_size: int; default=32;\n minibatch size, number of grids in single forward pass; \n n_grids will be splitted to these minibatches.\n \n Returns:\n predictions: np.array of shape (n_grids, N, N, N, 4);\n model output for each grid.\n \"\"\"\n prediction_list = []\n for minibatch_index in range(math.ceil(len(grids) / minibatch_size)):\n i_start = minibatch_index * minibatch_size\n\n predictions = self.sess.run(self.output, \n feed_dict={\n self.X : grids[i_start : i_start + minibatch_size],\n self.training : False})\n prediction_list.append(predictions)\n if len(prediction_list) > 0:\n return np.concatenate(prediction_list)\n else:\n return np.array([])\n\n def __call__(self, grids, **args):\n return self.predict(grids, **args)","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":16320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"431296892","text":"import sqlite3\r\nimport csv\r\n\r\nclass database:\r\n\r\n def __init__(self, path):\r\n self.db = sqlite3.connect(path, check_same_thread=False)\r\n\r\n\r\n def InsertRow(self, tablename, row):\r\n cursor2 = self.db.cursor()\r\n cursor2.execute(f'insert into {tablename} values (?,?,?,?,?,?)', (row[0], row[1], row[2], row[3], row[4], row[5]))\r\n self.db.commit()\r\n return\r\n\r\n def GetRows(self, tablename, query_param1, query_param2):\r\n cursor2 = self.db.cursor()\r\n cursor2.execute(f'SELECT * FROM {tablename} WHERE {query_param1} AND {query_param2}')\r\n rows = cursor2.fetchall()\r\n return rows\r\n\r\n\r\n def ExportCSV(self, tablename):\r\n csv_cursor = self.db.cursor()\r\n csv_cursor.execute(f'SELECT * FROM {tablename}')\r\n with open('export.csv', 'w', newline='') as out_csv_file:\r\n csv_out = csv.writer(out_csv_file)\r\n # write header\r\n csv_out.writerow([d[0] for d in csv_cursor.description])\r\n # write data\r\n for result in csv_cursor:\r\n csv_out.writerow(result)\r\n\r\n out_csv_file.close()\r\n return out_csv_file","sub_path":"database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"285708740","text":"import logging\nimport os\nimport StringIO\nimport xml.sax\n\nimport requests\n\nlogging.basicConfig(format='[%(asctime)s] [%(process)d] [%(name)s] '\n '[%(levelname)s] %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S +0000', level=logging.DEBUG)\nlogger = logging.getLogger(__name__)\n\n_REQUIRED_OPTS = ['WATCHES_URL', 'EVECENTRAL_URL', 'SYSTEM_ID', 'PRICES_URL']\n\n\ndef system_id():\n return os.environ.get('SYSTEM_ID')\n\n\nclass EveCentralMarketStatHandler(xml.sax.ContentHandler):\n def __init__(self):\n xml.sax.ContentHandler.__init__(self)\n # Initialize the flag to false\n self.mode = None\n self.capturing = None\n self.data = {'buy': {}, 'sell': {}, 'system_id': system_id()}\n\n def startElement(self, name, attrs):\n if name in ['buy', 'sell']:\n self.mode = name\n if self.mode and name in ['min', 'max', 'avg', 'median', 'stddev']:\n self.capturing = name\n self.data[self.mode][name] = ''\n\n def endElement(self, name):\n if name in ['buy', 'sell']:\n self.mode = None\n if self.mode and name in ['min', 'max', 'avg', 'median', 'stddev']:\n self.capturing = None\n\n def characters(self, content):\n if self.mode and self.capturing:\n partial = self.data[self.mode][self.capturing]\n self.data[self.mode][self.capturing] = partial + content\n\n def data(self):\n return self.data\n\n\ndef verify_parameters():\n missing = [n for n in _REQUIRED_OPTS if not os.environ.get(n, None)]\n if len(missing) > 0:\n logging.critical('Missing options in environment: %s' % missing)\n exit(1)\n\n\ndef watched_ids(http):\n try:\n wurl = os.environ.get('WATCHES_URL')\n w = http.get(url=wurl)\n except Exception as e:\n logger.exception(e)\n exit(1)\n return [watched['id'] for watched in w.json()]\n\n\ndef record_price(by_id, payload, http):\n try:\n purl = '/'.join([os.environ.get('PRICES_URL'), str(by_id)])\n logger.info('recording price')\n http.post(url=purl, json=payload)\n except Exception as e:\n logger.exception(e)\n exit(1)\n\n\ndef translate(content):\n try:\n parser = xml.sax.make_parser()\n handler = EveCentralMarketStatHandler()\n parser.setContentHandler(handler)\n source = StringIO.StringIO(content)\n logger.info('translating price')\n parser.parse(source)\n payload = handler.data\n except Exception as e:\n logger.exception(e)\n exit(1)\n return payload\n\n\ndef fetch_price(by_id, http):\n try:\n params = {'typeid': by_id, 'usesystem': system_id()}\n ecurl = os.environ.get('EVECENTRAL_URL')\n logger.info('fetching price')\n ec_body = http.get(url=ecurl, params=params).content\n except Exception as e:\n logger.exception(e)\n exit(1)\n return ec_body\n\n\ndef main():\n verify_parameters()\n headers = {'user-agent': 'github.com/eve-basil/checker[0.1.0-dev]'}\n\n session = requests.Session()\n session.headers.update(headers)\n\n for by_id in watched_ids(session):\n logger.info('checking price for type_id %s', by_id)\n ec_body = fetch_price(by_id, session)\n payload = translate(ec_body)\n record_price(by_id, payload, session)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":3370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"215049109","text":"from batou.utils import cmd\nfrom batou import output\nimport argparse\nimport os.path\nimport pkg_resources\nimport shutil\n\n\ndef main(destination, **kw):\n develop = os.environ['BATOU_DEVELOP']\n if develop:\n output.annotate(\n 'Initializing with a development copy of batou will cause your '\n 'project to have a reference outside its repository. '\n 'Use at your own risk. ')\n develop = os.path.abspath(develop)\n print(('Bootstrapping new batou project in {}. This can take a while.'\n .format(os.path.abspath(destination))))\n if os.path.exists(destination):\n print(('{} exists already. Not copying template structure.'.format(\n destination)))\n os.chdir(destination)\n else:\n source = os.path.dirname(__file__) + '/init-template'\n shutil.copytree(source, destination)\n os.chdir(destination)\n cmd('hg -y init .')\n for key in list(os.environ):\n if key.startswith('BATOU_'):\n del os.environ[key]\n cmd('./batou --help')\n\n\ndef console_main():\n parser = argparse.ArgumentParser(\n description=\"\"\"\\\nInitialize batou project in the given directory. If the given directory does\nnot exist, it will be created.\n\nIf no directory is given, the current directory is used.\n\"\"\")\n parser.add_argument('destination')\n\n os.environ['BATOU_VERSION'] = pkg_resources.require('batou')[0].version\n os.environ['BATOU_DEVELOP'] = ''\n args = parser.parse_args()\n main(args.destination)\n","sub_path":"src/batou/init.py","file_name":"init.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"408148486","text":"import unittest\n\nfrom postpy.admin import (get_user_tables, get_primary_keys,\n get_column_metadata, install_extensions,\n reflect_table, reset)\nfrom postpy.base import Database, Column, PrimaryKey, Table\nfrom postpy.connections import connect\nfrom postpy.fixtures import PostgreSQLFixture\n\n\nclass TestTableStats(PostgreSQLFixture, unittest.TestCase):\n\n @classmethod\n def _prep(cls):\n cls.conn.autocommit = True\n cls.schema = 'stats_test'\n cls.table = 'admin_table_tests'\n create_table_statement = \"\"\"\\\n CREATE TABLE {schema}.{table} (\n mycol CHAR(2),\n mycol2 CHAR(3) NULL,\n PRIMARY KEY (mycol));\"\"\".format(schema=cls.schema,\n table=cls.table)\n\n with cls.conn.cursor() as cursor:\n cursor.execute('CREATE SCHEMA {};'.format(cls.schema))\n cursor.execute(create_table_statement)\n\n def test_get_user_tables(self):\n\n expected = (self.schema, self.table)\n result = get_user_tables(self.conn)\n\n self.assertIn(expected, result)\n\n def test_get_column_meta_data(self):\n expected = [\n {'name': 'mycol',\n 'data_type': 'character(2)',\n 'nullable': False},\n {'name': 'mycol2',\n 'data_type': 'character(3)',\n 'nullable': True}\n ]\n result = list(\n get_column_metadata(self.conn, self.table, schema=self.schema)\n )\n\n self.assertEqual(expected, result)\n\n def test_get_primary_keys(self):\n expected = ['mycol']\n result = list(get_primary_keys(self.conn, self.table, self.schema))\n\n self.assertEqual(expected, result)\n\n def test_reflect_table(self):\n columns = [Column('mycol', data_type='character(2)', nullable=False),\n Column('mycol2', data_type='character(3)', nullable=True)]\n primary_key = PrimaryKey(['mycol'])\n\n expected = Table(self.table, columns, primary_key, schema=self.schema)\n result = reflect_table(self.conn, self.table, self.schema)\n\n self.assertEqual(expected, result)\n\n @classmethod\n def _clean(cls):\n statement = 'DROP SCHEMA IF EXISTS {} CASCADE;'.format(cls.schema)\n\n with cls.conn.cursor() as cursor:\n cursor.execute(statement)\n\n\nclass TestDatabase(unittest.TestCase):\n\n def setUp(self):\n self.db = Database('reset_db_test')\n self.db_query = \"\"\"SELECT datname\n FROM pg_database\n WHERE datistemplate=false;\"\"\"\n self.conn = connect()\n self.conn.autocommit = True\n\n def test_reset(self):\n reset(self.db.name)\n\n with self.conn.cursor() as cursor:\n cursor.execute(self.db_query)\n result = [item[0] for item in cursor.fetchall()]\n\n self.assertIn(self.db.name, result)\n\n def tearDown(self):\n with self.conn.cursor() as cursor:\n cursor.execute(self.db.drop_statement())\n\n self.conn.close()\n\n\nclass TestExtensions(PostgreSQLFixture, unittest.TestCase):\n @classmethod\n def _prep(cls):\n cls.pg_extension = 'sslinfo'\n cls.conn.autocommit = True\n\n def test_install_extensions(self):\n\n install_extensions([self.pg_extension])\n\n @classmethod\n def _clean(cls):\n statement = 'DROP EXTENSION IF EXISTS {};'.format(cls.pg_extension)\n\n with cls.conn.cursor() as cursor:\n cursor.execute(statement)\n","sub_path":"tests/test_admin.py","file_name":"test_admin.py","file_ext":"py","file_size_in_byte":3559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"69200702","text":"# -*- coding: utf-8 -*-\n# flake8: noqa\n# pylint: skip-file\n\"\"\"\nnose tests\n\"\"\"\n\nimport os\n\nfrom .. import _merge as merge\nfrom nose.tools import assert_equals\n\nWINDOWS = os.name == 'nt'\n\n\ndef test_vias_parallel():\n \"\"\"Test 'vias' parallel operation.\"\"\"\n if WINDOWS:\n # appveyor doesn't allow!\n return\n assert_equals(len(repr(merge.query('9783319020983', 'parallel'))) > 100,\n True)\n\ndef test_vias_multi():\n \"\"\"Test 'vias' multi operation.\"\"\"\n if WINDOWS:\n # appveyor doesn't allow!\n return\n assert_equals(len(repr(merge.query('9783319020983', 'multi'))) > 100, True)\n\ndef test_vias_serial():\n \"\"\"Test 'vias' serial operation.\"\"\"\n if WINDOWS:\n # appveyor doesn't allow!\n return\n assert_equals(len(repr(merge.query('9783319020983', 'serial'))) > 100,\n True)\n\ndef test_vias_cache_cleanning():\n \"\"\"Test 'vias' cache cleanning for serial.\"\"\"\n # test if the secondary cache (cache in vias) does clears... sequentially\n assert_equals(len(repr(merge.query('9781680450260', 'serial'))) < 20, True) # NO METADATA\n assert_equals(len(repr(merge.query('9780521581783', 'serial'))) > 100,\n True)\n assert_equals(len(repr(merge.query('9781680450260', 'serial'))) < 20, True) # NO METADATA\n","sub_path":"env/Lib/site-packages/isbnlib/test/test_vias.py","file_name":"test_vias.py","file_ext":"py","file_size_in_byte":1309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"603837514","text":"if __name__ == '__main__':\n L = int(input())\n R = int(input())\n left = min(L,R)\n right = max(L, R)\n max_xor = 0\n for i in range(left, right + 1):\n for j in range(i, right + 1):\n max_xor = max(max_xor, i ^ j)\n print(max_xor)\n","sub_path":"HackerRank/Algorithms/BitManipulation/maximizeXOR/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"380134066","text":"from reading import *\n\n# Below, write:\n# *The cartesian_product function\n# *All other functions and helper functions\n# *Main code that obtains queries from the keyboard,\n# processes them, and uses the below function to output csv results\n\n\n# helper function for outputting tables\n\ndef num_rows(table):\n '''(table) -> Integer\n Get the number of rows of a table.\n '''\n one_column = list(table.keys())[0]\n rows = table[one_column]\n return len(rows)\n\n\ndef print_csv(table):\n '''(table) -> NoneType\n Print a representation of table.\n '''\n columns = list(table.keys())\n print(','.join(columns))\n rows = num_rows(table)\n for i in range(rows):\n cur_column = []\n for column in columns:\n cur_column.append(table[column][i])\n print(','.join(cur_column))\n\n\ndef cartesian_product(tab_1, tab_2):\n '''(table, table) -> table\n Merge the two table into a table that cartesian product.\n '''\n table = {}\n column_1_list = list(tab_1.keys())\n column_2_list = list(tab_2.keys())\n for column in column_1_list + column_2_list:\n table[column] = []\n\n for row_1 in range(num_rows(tab_1)):\n for row_2 in range(num_rows(tab_2)):\n for column_1 in column_1_list:\n value = tab_1[column_1][row_1]\n table[column_1].append(value)\n for column_2 in column_2_list:\n value = tab_2[column_2][row_2]\n table[column_2].append(value)\n\n return table\n\n\ndef process_from(from_str, database):\n '''(string, database) -> table\n Process the \"from string\" to get a result table.\n '''\n from_table_list = from_str.split(\",\")\n query_table = None\n for from_table in from_table_list:\n temp_table = database[from_table]\n if query_table:\n query_table = cartesian_product(query_table, temp_table)\n else:\n query_table = temp_table\n\n return query_table\n\ndef process_where(where_str, query_table):\n '''(string, table) -> table\n Process the \"where string\" to get a result table.\n '''\n if where_str:\n where_list = where_str.split(\",\")\n for optional in where_list:\n\n temp_table = {}\n column_list = query_table.keys()\n for column in column_list:\n temp_table[column] = []\n\n if \"=\" in optional:\n col_1, col_2 = optional.split(\"=\")\n optional_flag = \"=\"\n elif \">\" in optional:\n col_1, col_2 = optional.split(\">\")\n optional_flag = \">\"\n elif \"<\" in optional:\n col_1, col_2 = optional.split(\"<\")\n optional_flag = \"<\"\n\n for i in range(num_rows(query_table)):\n value_1 = query_table[col_1][i]\n if \"'\" in col_2:\n value_2 = col_2[1:-1]\n else:\n value_2 = query_table[col_2][i]\n\n if optional_flag == \"=\" and value_1 == value_2:\n match_flag = True\n elif optional_flag == \">\" and value_1 > value_2:\n match_flag = True\n elif optional_flag == \"<\" and value_1 < value_2:\n match_flag = True\n else:\n match_flag = False\n\n if match_flag:\n for column in column_list:\n temp_table[column].append(query_table[column][i])\n\n query_table = temp_table\n\n return query_table\n\n\ndef process_select(select_str, query_table):\n '''(string, table) -> table\n Process the \"select string\" to get a result table.\n '''\n if select_str == \"*\":\n return query_table\n\n select_list = select_str.split(\",\")\n select_table = {}\n for select in select_list:\n select_table[select] = query_table[select]\n\n return select_table\n\n\ndef process_query(query, database):\n '''(string, database) -> table\n Process the \"query string\" to get the final result table.\n '''\n\n from_where_str = query.split(\"from \")[1]\n from_str = from_where_str.split(\" where \")[0]\n query_table = process_from(from_str, database)\n\n if \"where\" in query:\n where_str = from_where_str.split(\" where \")[1]\n else:\n where_str = \"\"\n query_table = process_where(where_str, query_table)\n\n select_str = query.split(\" from \")[0]\n select_str = select_str.replace(\"select \", \"\")\n query_table = process_select(select_str, query_table)\n\n return query_table\n\n\n\nif(__name__ == \"__main__\"):\n database = read_database()\n while True:\n query = input(\"Enter a SQuEaL query, or a blank line to exit:\")\n if query:\n print_csv(process_query(query, database))\n else:\n break","sub_path":"starter/4/squeal.py","file_name":"squeal.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"301125945","text":"import datetime\nimport os\nimport torch\nimport time\nimport timeit\nimport shutil\nimport torchvision.models as models\nimport numpy as np\nimport torchvision.transforms as standard_transforms\nimport torchvision.utils as vutils\nimport My_train.joint_transforms as joint_transforms\nimport My_train.transforms as extended_transforms\nimport My_train.color_transforms as Colorjitter\n# import My_train.size_transforms as Mytransforms\nfrom My_train import size_transforms as Mytransforms\nimport argparse\nimport torch.nn.functional\nimport torch\n\nfrom PIL import Image\nfrom cfgs import DenseASPP121\nfrom cfgs import DenseASPP161\nfrom tensorboardX import SummaryWriter\nfrom torch import optim\nfrom torch.autograd import Variable\nfrom torch.backends import cudnn\nfrom torch.utils.data import DataLoader\nfrom My_train import segmentation_dataloader\n\n# from models.DenseASPP_v3 import *\n# from models.DenseASPP_v2 import *\nfrom models.DenseASPP_boundary_depthwise import *\n\nfrom My_train.misc import check_mkdir, evaluate, AverageMeter, compute_mean_iou\nfrom collections import OrderedDict\n\nparser = argparse.ArgumentParser(description='DenseASPP training')\nparser.add_argument('--input_height', type=int, help='input height', default=512)\nparser.add_argument('--input_width', type=int, help='input width', default=512)\nparser.add_argument('--train_batch_size', type=int, help='train batch size', default=4)\nparser.add_argument('--val_batch_size', type=int, help='validation batch size', default=4)\nparser.add_argument('--num_threads', type=int, help='number of threads to use for data loading', default=12)\nparser.add_argument('--learning_rate', type=float, help='initial learning rate', default=3e-4)\nparser.add_argument('--num_epochs', type=int, help='number of epochs', default=80)\nparser.add_argument('--weight_decay', type=float, help='weight decay', default=1e-5)\nparser.add_argument('--print_frequency', type=int, help='print frequency', default=10)\nparser.add_argument('--val_save_to_img_file', type=bool, help='save validation image file', default=True)\nparser.add_argument('--val_img_sample_rate', type=float, help='randomly sample some validation results to display', default=0.05)\nparser.add_argument('--checkpoint_path', type=str, help='path ro a specific checkpoint to load',\n default='/home/mk/Semantic_Segmentation/DenseASPP-master/pretrained_model/densenet121.pth')\nparser.add_argument('--GPU', type=int, help='the number of GPU', default=1)\nparser.add_argument('--model_freq', type=int, help='save the model', default=100)\n\nargs = parser.parse_args()\n\ncudnn.benchmark = True\n\ndef poly_lr_scheduler(init_lr, epoch, maxEpoch=args.num_epochs, power=0.9):\n \"init_lr : base learning rate \\\n iter : current iteration \\\n lr_decay_iter : how frequently decay occurs, default is 1 \\\n power : polynomial power\"\n lr = init_lr * ((1 - epoch / maxEpoch) ** power)\n # for param_group in optimizer.param_groups:\n # param_group['lr'] = lr\n return lr\n\ndef main():\n net = DenseASPP_boundary(model_cfg=DenseASPP121.Model_CFG).cuda()\n # densenet121 = models.densenet121(pretrained=True)\n if len(args.checkpoint_path) == 0:\n curr_epoch = 1\n # Initializing 'best_record'\n args.best_record = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}\n else:\n # load the pretrained model\n print('training resumes from ' + args.checkpoint_path)\n # lambda ==> argument: manipulate(argument)\n pretrained_weight = torch.load(args.checkpoint_path, map_location=lambda storage, loc: storage)\n \"\"\" map_location = lambda storage, loc: storage--> Load all tensors onto the CPU, using a function\"\"\"\n new_state_dict = OrderedDict()\n model_dict = net.state_dict()\n for key, value in pretrained_weight.items():\n name = key\n new_state_dict[name] = value\n if name.find('norm') >= 9:\n print('norm contained from pretrained_weight : ', name)\n value.requires_grad = False\n # if name.find('conv0') >= 9:\n # print('norm contained from pretrained_weight : ', name)\n # value.requires_grad = False\n\n new_state_dict.pop('features.conv0.weight')\n new_state_dict.pop('features.norm5.weight')\n new_state_dict.pop('features.norm5.bias')\n new_state_dict.pop('features.norm5.running_mean')\n new_state_dict.pop('features.norm5.running_var')\n new_state_dict.pop('classifier.weight')\n new_state_dict.pop('classifier.bias')\n model_dict.update(new_state_dict)\n net.load_state_dict(model_dict)\n # pretrained_dict = {key: value for key, value in pretrained_dict.items() if key in model_dict}\n # model_dict.update(pretrained_dict)\n # pretrained_dict = {key: value for key, value in pretrained_dict.items() if key != 'classifier.weight' or 'classifier.bias'}\n\n # model.load_state_dict(model_dict, strict=False)\n # model.load_state_dict(new_pretrained_dict, strict=False)\n curr_epoch = 1\n args.best_record = {'epoch': 0, 'val_loss': 1e10, 'acc': 0, 'acc_cls': 0, 'mean_iu': 0, 'fwavacc': 0}\n\n # ---------------------------------- [[ data - augmentation ]] ---------------------------------------------------\n # ----------------------------------------------------------------------------------------------------------------\n # [[joint_transforms]]\n # both raw image and gt are transformed by data-augmentation\n train_joint_transform = joint_transforms.Compose([\n # joint_transforms.ImageScaling(size=[0.5, 2.0]),\n joint_transforms.RandomHorizontallyFlip(),\n joint_transforms.RandomSizedCrop(size=args.input_width),\n ])\n\n # transform : To preprocess images\n # Compose : if there are a lot of preprocessed images, compose plays a role as collector in a single space.\n input_transform = standard_transforms.Compose([\n # Colorjitter.ColorJitter(brightness=[-10, 10]),\n standard_transforms.ColorJitter(hue=0.1),\n standard_transforms.ToTensor(),\n # standard_transforms.Normalize(*my_mean_std)\n ])\n\n target_transform = extended_transforms.MaskToTensor()\n\n train_set = segmentation_dataloader.CityScapes('fine', 'train', joint_transform=train_joint_transform,\n transform=input_transform, target_transform=target_transform)\n train_loader = DataLoader(train_set, batch_size=args.train_batch_size, num_workers=args.num_threads, shuffle=True)\n\n # optimizer = optim.Adam(net.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)\n\n criterion = torch.nn.CrossEntropyLoss(ignore_index=segmentation_dataloader.ignore_label).cuda()\n\n num_training_samples = len(train_set)\n steps_per_epoch = np.ceil(num_training_samples / args.train_batch_size).astype(np.int32)\n num_total_steps = args.num_epochs * steps_per_epoch\n\n print(\"total number of samples: {}\".format(num_training_samples))\n print(\"total number of steps : {}\".format(num_total_steps))\n\n # COUNT_PARAMS\n total_num_paramters = 0\n for param in net.parameters():\n total_num_paramters += np.array(list(param.size())).prod()\n\n print(\"number of trainable parameters: {}\".format(total_num_paramters))\n\n for epoch in range(curr_epoch, args.num_epochs + 1):\n lr_ = poly_lr_scheduler(init_lr=args.learning_rate, epoch=epoch - 1)\n optimizer = optim.Adam(net.parameters(), lr=lr_, weight_decay=args.weight_decay)\n\n train(train_loader, net, criterion, optimizer, epoch, args)\n\n print('Training Done!!')\n\ndef train(train_loader, net, criterion, optimizer, epoch, train_args):\n train_loss = AverageMeter()\n\n # curr_iter : total dataset per epoch\n curr_iter = (epoch - 1) * len(train_loader)\n index = 0\n predictions_all = []\n visual = []\n\n start_time = time.time()\n net.train()\n for i, data in enumerate(train_loader):\n inputs, labels, boundarys = data\n bound_inputs = torch.cat((inputs, boundarys), dim=1)\n\n assert inputs.size()[2:] == labels.size()[1:]\n N = inputs.size(0)\n bound_inputs = Variable(bound_inputs).cuda()\n labels = Variable(labels).cuda()\n\n optimizer.zero_grad()\n\n outputs = net(bound_inputs)\n assert outputs.size()[2:] == labels.size()[1:]\n assert outputs.size()[1] == segmentation_dataloader.num_classes\n\n before_op_time = timeit.default_timer()\n loss = criterion(outputs, labels)\n duration = timeit.default_timer() - before_op_time\n\n loss.backward()\n optimizer.step()\n batch_time = time.time() - start_time\n\n train_loss.update(loss.data[0], N)\n curr_iter += 1\n\n writer.add_scalar('train_loss', train_loss.avg, curr_iter)\n\n if (i + 1) % train_args.print_frequency == 0:\n examples_time = args.train_batch_size / duration\n print('epoch: %d | iter: %d / %d | train loss: %.5f | examples/s: %4.2f | time_elapsed: %.5f''s' %\n (epoch, i + 1, len(train_loader), train_loss.avg, examples_time, batch_time))\n\n # SAVE THE IMAGES\n data_transform = standard_transforms.ToTensor()\n\n np_outputs = outputs.data.cpu().numpy()\n result = np_outputs.argmax(axis=1)\n predictions_all.append(result)\n\n predictions_all = np.concatenate(predictions_all)\n for idx, data in enumerate(predictions_all):\n predictions_pil = segmentation_dataloader.colorize_mask(data)\n predictions = data_transform(predictions_pil.convert('RGB'))\n visual.extend([predictions])\n\n visual = torch.stack(visual, 0)\n visual = vutils.make_grid(visual, nrow=1, padding=0)\n # result = np_outputs.argmax(axis=1)[0]\n # row, col = result.shape\n # dst = np.zeros((row, col, 3), dtype=np.uint8)\n #\n # for i in range(19):\n # dst[result == i] = COLOR_MAP[i]\n # dst = np.array(dst, dtype=np.uint8)\n # dst = cv2.cvtColor(dst, cv2.COLOR_RGB2BGR)\n # if not os.path.exists(os.path.join(ckpt_path, 'TensorboardX', ImageNet, exp_name_ImageNet, 'prediction')):\n # os.makedirs(os.path.join(ckpt_path, 'TensorboardX', ImageNet, exp_name_ImageNet, 'prediction'))\n #\n # cv2.imwrite(os.path.join(ckpt_path, 'TensorboardX', ImageNet, exp_name_ImageNet, 'prediction/%06d.png' %\n # epoch), dst)\n writer.add_image('Output_image_{}'.format(epoch), visual)\n\n # SAVE THE MODEL\n if (i + 1) % train_args.print_frequency == 0:\n torch.save(net.state_dict(), os.path.join(ckpt_path, 'Model', ImageNet, exp_name_ImageNet,\n 'model-{}'.format(idx + 1) + '.pkl'))\n\n with open(os.path.join(ckpt_path, 'TensorboardX', ImageNet, exp_name_ImageNet, 'LR_v0{}_{}.txt'.format(x,version)), 'a') as LRtxt:\n LRtxt.write(\"index : {}, epoch : {}, learning rate : {: f}\".format(index, epoch, optimizer.param_groups[0]['lr']) + '\\n')\n index += 1\n\nif __name__ == '__main__':\n x = 1\n version = '0'\n\n ckpt_path = '../../ckpt'\n ImageNet = 'ImageNet/DenseNet121_v3'\n exp_name_ImageNet = 'segImageNet_v0{}_{}'.format(x, version)\n\n # [[ SummaryWriter]]\n # Writes 'Summary' directly to event files.\n # writer = SummaryWriter(os.path.join(ckpt_path, 'exp', exp_name))\n writer = SummaryWriter(os.path.join(ckpt_path, 'TensorboardX', ImageNet, exp_name_ImageNet))\n\n check_mkdir(ckpt_path)\n check_mkdir(os.path.join(ckpt_path, 'Model', ImageNet, exp_name_ImageNet))\n open(os.path.join(ckpt_path, 'Model', ImageNet, exp_name_ImageNet, str(datetime.datetime.now()) + '.txt'),\n 'w').write(\n str(args) + '\\n\\n')\n\n src = \"/home/mk/Semantic_Segmentation/DenseASPP-master/My_train/segmentation_main_v3.py\"\n src_model = \"/home/mk/Semantic_Segmentation/DenseASPP-master/models/DenseASPP.py\"\n\n copy_path = os.path.join(ckpt_path, 'TensorboardX', ImageNet, exp_name_ImageNet,\n \"segmentation_main_v3_\" + \"v0{}_{}.py\".format(x, version))\n model_copy_path = os.path.join(ckpt_path, 'TensorboardX', ImageNet, exp_name_ImageNet,\n \"DenseASPP_\" + \"v0{}_{}.py\".format(x, version))\n\n shutil.copy(src, copy_path)\n shutil.copy(src_model, model_copy_path)\n\n GPU_ID = args.GPU\n\n main()\n","sub_path":"My_train/segmentation_main_v3_1.py","file_name":"segmentation_main_v3_1.py","file_ext":"py","file_size_in_byte":12771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"540710902","text":"# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors\n# License: GNU General Public License v3. See license.txt\n\nfrom __future__ import unicode_literals\n\nimport frappe\nfrom frappe import _\nfrom frappe.model import no_value_fields\nfrom erpnext.stock.doctype.packing_slip.packing_slip import PackingSlip\nfrom frappe.utils import cint, flt\n\n\nclass CustomPackingSlip(PackingSlip):\n\n\tdef validate(self):\n\t\t\"\"\"\n\t\t\t* Validate existence of submitted Delivery Note\n\t\t\t* Case nos do not overlap\n\t\t\t* Check if packed qty doesn't exceed actual qty of delivery note\n\n\t\t\tIt is necessary to validate case nos before checking quantity\n\t\t\"\"\"\n\t\tself.validate_delivery_note()\n\t\tself.validate_items_mandatory()\n\t\tself.validate_case_nos()\n\t\tself.validate_qty()\n\t\tself.validate_item()\n\n\t\tfrom erpnext.utilities.transaction_base import validate_uom_is_integer\n\t\tvalidate_uom_is_integer(self, \"stock_uom\", \"qty\")\n\t\tvalidate_uom_is_integer(self, \"weight_uom\", \"net_weight\")\n\n\tdef validate_qty(self):\n\t\t\"\"\"Check packed qty across packing slips and delivery note\"\"\"\n\t\t# Get Delivery Note Items, Item Quantity Dict and No. of Cases for this Packing slip\n\t\tdn_details, ps_item_qty, ps_item, no_of_cases = self.get_details_for_packing()\n\n\t\tfor item in dn_details:\n\t\t\tnew_packed_qty = (flt(ps_item_qty[item['item_code']]) * no_of_cases) + \\\n\t\t\t\tflt(item['packed_qty'])\n\t\t\tif new_packed_qty > flt(item['qty']) and no_of_cases:\n\t\t\t\tself.recommend_new_qty(item, ps_item_qty, no_of_cases)\n\n\tdef validate_item(self):\n\t\t\"\"\"Check packed items across packing slips and delivery note\"\"\"\n\t\t# Get Delivery Note Items, Item Quantity Dict and No. of Cases for this Packing slip\n\t\tdn_details, ps_item_qty, ps_item, no_of_cases = self.get_details_for_packing()\n\n\t\tdelivery_note_items = [d['item_code'] for d in dn_details]\n\t\t\n\t\tfor item in ps_item:\n\t\t\tif item not in delivery_note_items:\n\t\t\t\tself.recommend_delete_item(item)\n\n\n\tdef get_details_for_packing(self):\n\t\t\"\"\"\n\t\t\tReturns\n\t\t\t* 'Delivery Note Items' query result as a list of dict\n\t\t\t* Item Quantity dict of current packing slip doc\n\t\t\t* No. of Cases of this packing slip\n\t\t\"\"\"\n\n\t\trows = [d.item_code for d in self.get(\"items\")]\n\n\t\t# also pick custom fields from delivery note\n\t\tcustom_fields = ', '.join(['dni.`{0}`'.format(d.fieldname)\n\t\t\tfor d in frappe.get_meta(\"Delivery Note Item\").get_custom_fields()\n\t\t\tif d.fieldtype not in no_value_fields])\n\n\t\tif custom_fields:\n\t\t\tcustom_fields = ', ' + custom_fields\n\n\t\tcondition = \"\"\n\t\tif rows:\n\t\t\tcondition = \" and item_code in (%s)\" % (\", \".join([\"%s\"]*len(rows)))\n\n\t\t# gets item code, qty per item code, latest packed qty per item code and stock uom\n\t\tres = frappe.db.sql(\"\"\"select item_code, sum(qty) as qty,\n\t\t\t(select sum(psi.qty * (abs(ps.to_case_no - ps.from_case_no) + 1))\n\t\t\t\tfrom `tabPacking Slip` ps, `tabPacking Slip Item` psi\n\t\t\t\twhere ps.name = psi.parent and (ps.docstatus = 1 or ps.docstatus = 0)\n\t\t\t\tand ps.delivery_note = dni.parent and psi.item_code=dni.item_code\n\t\t\t\tand from_case_no != {from_case_no}) as packed_qty,\n\t\t\tstock_uom, item_name, description, dni.batch_no {custom_fields}\n\t\t\tfrom `tabDelivery Note Item` dni\n\t\t\twhere parent=%s {condition}\n\t\t\tgroup by item_code\"\"\".format(condition=condition, custom_fields=custom_fields, from_case_no=self.from_case_no),\n\t\t\ttuple([self.delivery_note] + rows), as_dict=1)\n\n\t\tps_item_qty = dict([[d.item_code, d.qty] for d in self.get(\"items\")])\n\t\tps_item = [d.item_code for d in self.get(\"items\")]\n\t\tno_of_cases = cint(self.to_case_no) - cint(self.from_case_no) + 1\n\n\t\treturn res, ps_item_qty, ps_item, no_of_cases\n\n\n\tdef recommend_new_qty(self, item, ps_item_qty, no_of_cases):\n\t\t\"\"\"\n\t\t\tRecommend a new quantity and raise a validation exception\n\t\t\"\"\"\n\t\titem['recommended_qty'] = (flt(item['qty']) - flt(item['packed_qty'])) / no_of_cases + 1\n\t\titem['specified_qty'] = flt(ps_item_qty[item['item_code']])\n\t\tif not item['packed_qty']: item['packed_qty'] = 0\n\n\t\tfrappe.throw(_(\"Quantity for Item {0} must be less than {1}\").format(item.get(\"item_code\"), item.get(\"recommended_qty\")))\n\n\tdef recommend_delete_item(self, item):\n\t\t\"\"\"\n\t\t\tRecommend deleting the product and raise a validation exception\n\t\t\"\"\"\n\t\tfrappe.throw(_(\"Товар {0} отсутствует в накладной {1}. Удалите данный товар из накладной.\").format(item, self.delivery_note))\n\n","sub_path":"trava_erpnext/overrides/packing_slip.py","file_name":"packing_slip.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"157460612","text":"#!/usr/bin/env python\n\n__author__ = \"Patrick Wieschollek\"\n__email__ = \"patrick@wieschollek.info\"\n\n\nimport tensorflow as tf\nimport stream, network\nimport tfblocks.diary, tfblocks.trainer, tfblocks.callbacks\nimport time, os.path\nimport numpy as np\n\nFLAGS = tf.app.flags.FLAGS\n\ntf.app.flags.DEFINE_string('train_dir', 'cifar10_train/', \n \"\"\"Directory where to write event logs \"\"\"\n \"\"\"and checkpoint.\"\"\")\ntf.app.flags.DEFINE_string('data_dir', 'data', \n \"\"\"Directory of the plain dataset \"\"\")\ntf.app.flags.DEFINE_integer(\"batch_size\", 64, \"number of images in each batch\")\ntf.app.flags.DEFINE_integer(\"epoch_iterations\", 500, \"iterations ine ach epoch\")\ntf.app.flags.DEFINE_float('learning_rate', 0.001, 'Initial learning rate.')\ntf.app.flags.DEFINE_integer('gpus', 1, 'Initial learning rate.')\n\ndef main(argv=None):\n\n # compute and indidual path from current run\n work_dir = os.path.abspath(os.path.join(FLAGS.train_dir, \"runs\", str(int(time.time()))))\n\n # where does the data comes from ...\n train_data = stream.Stream(FLAGS.data_dir, 'train.h5', FLAGS.batch_size)\n teacher = tfblocks.trainer.Trainer(optimizer=tf.train.AdamOptimizer(FLAGS.learning_rate))\n teacher.init(network.Network, train_data, num=FLAGS.gpus)\n\n # validation data generation\n tf.get_variable_scope().reuse_variables()\n valid_data = stream.Stream(FLAGS.data_dir, 'validation.h5', FLAGS.batch_size)\n valid_net = network.Network()\n valid_net.build(valid_data.next())\n\n # keep track of everything during training\n diary = tfblocks.diary.Diary()\n diary.epochs = 100\n diary.batch_size = FLAGS.batch_size\n diary.batches_per_epoch = FLAGS.epoch_iterations\n\n diary.callbacks.append(tfblocks.callbacks.AccuracyCallback(5000, FLAGS.batch_size, valid_net.accuracy_op))\n diary.callbacks.append(tfblocks.callbacks.EtaCallback())\n\n init_op = tf.initialize_all_variables()\n with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:\n\n # create all variables\n sess.run(init_op)\n\n # start the data creation mechanism\n train_data.start(sess);\n valid_data.start(sess);\n\n # init all class variables that depends on the current session\n diary.attach(sess, work_dir)\n\n # as long as the worker are creating data and there are iterations left\n # do an update step\n while train_data.exists():\n err = diary.train_step(sess, teacher, train_data, 10)\n\n train_data.stop()\n\n\nif __name__ == '__main__':\n tf.app.run()\n \n","sub_path":"mnist/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"117065384","text":"import copy\nimport itertools\ngrid = 2\ng = [[0 for x in range(grid+1)] for y in range(grid+1)]\ng[0][0] =1\n\nmove_right = (0,1)\nmove_down = (1,0)\npos = [move_right, move_down]\n\nstart = [0,0]\n\ndef move(g, dir):\n f = copy.deepcopy(g)\n for i in range(grid+1):\n find = False\n for j in range(grid+1):\n if g[i][j] == 1:\n pos = i,j\n g[i][j] = 0\n find = True\n break\n if find:\n break\n try:\n pos = (i+dir[0], j+dir[1])\n g[pos[0]][pos[1]] = 1\n moves = True\n return g, moves\n except IndexError as e:\n moves = False\n return f, moves\n\npossible = []\nfor a in pos:\n for b in pos:\n for c in pos:\n for d in pos:\n possible.append((a,b,c,d))\n\nsolutions = []\nfor a in possible:\n within = []\n g = [[0 for x in range(grid+1)] for y in range(grid+1)]\n g[0][0] =1\n for c,b in enumerate(a):\n g,is_possible = move(g,b)\n within.append(b)\n if is_possible == False:\n break\n if c == len(a)-1:\n solutions.append(within)\n\nprint(len(solutions))\n# 6\n","sub_path":"15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":1167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"200867433","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nfrom apps.hello.models import Person, RequestsLog\nfrom django.test import TestCase\nfrom django.test.utils import override_settings\nfrom django.core.files.uploadedfile import SimpleUploadedFile\nimport os\nfrom django.conf import settings\nimport shutil\nimport datetime\nfrom freezegun import freeze_time\n\n\nREQUIRED_FIELDS = [\n 'bio',\n 'date_of_birth',\n 'email',\n 'id',\n 'jabber',\n 'last_name',\n 'name',\n 'other_contacts',\n 'skype',\n 'photo'\n]\n\nREQUEST_DATA = {\n 'method': 'GET',\n 'path': '/requests/',\n 'status_code': 200\n}\n\n\n@override_settings(MEDIA_ROOT=settings.MEDIA_TEST_ROOT)\nclass PersonModelTest(TestCase):\n\n def setUp(self):\n self.test_person = Person.objects.first()\n self.test_img_path = os.path.join(\n settings.BASE_DIR,\n 'assets/img/test_image.png'\n )\n with open(self.test_img_path, 'rb') as test_img:\n self.test_image_1 = SimpleUploadedFile(\n name='test_image_1.png',\n content=test_img.read(),\n content_type='image/png'\n )\n self.test_person.photo = self.test_image_1\n self.test_person.save()\n self.first_photo_file = self.test_person.photo.path\n\n def tearDown(self):\n test_dir = os.path.exists(settings.MEDIA_TEST_ROOT)\n if test_dir:\n shutil.rmtree(settings.MEDIA_TEST_ROOT)\n\n def test_unicode_method(self):\n \"\"\"Ensures, the __unicode__ method return proper string\"\"\"\n test_person = Person.objects.first()\n test_person.name = u'Александр'\n test_person.last_name = u'Юникод'\n test_person.save()\n self.assertEqual(test_person.__unicode__(), u'Александр Юникод')\n\n def test_proper_model_fields(self):\n \"\"\"Check, if person model consist of proper fields\"\"\"\n exist_fields = Person._meta.get_all_field_names()\n self.assertItemsEqual(exist_fields, REQUIRED_FIELDS)\n\n def test_save_method_store_photo_file_to_proper_path(self):\n \"\"\"Check, if save method, store photo_file to proper path\"\"\"\n self.assertTrue(os.path.exists(self.first_photo_file))\n self.assertEqual(\n self.first_photo_file,\n settings.MEDIA_TEST_ROOT + self.test_person.photo.name\n )\n\n def test_save_method_crop_photo_to_proper_size(self):\n \"\"\"Check, if save method, crop image to proper size\"\"\"\n with open(self.test_img_path, 'rb') as test_img:\n self.test_image_2 = SimpleUploadedFile(\n name='test_image_2.png',\n content=test_img.read(),\n content_type='image/png'\n )\n self.test_person.photo = self.test_image_2\n photo_width_before_save = self.test_person.photo.width\n photo_height_before_save = self.test_person.photo.height\n self.assertTrue(\n photo_width_before_save or photo_height_before_save > 200\n )\n self.test_person.save()\n self.assertTrue(\n self.test_person.photo.width <= 200 and\n self.test_person.photo.height <= 200\n )\n self.assertEqual(\n self.test_person.photo.width or self.test_person.photo.height,\n 200\n )\n\n def test_save_method_resize_photo_maintaining_aspect_ratio(self):\n \"\"\"Check, if save method resize photo maintaining_aspect_ratio\"\"\"\n with open(self.test_img_path, 'rb') as test_img:\n self.test_image_2 = SimpleUploadedFile(\n name='test_image_2.png',\n content=test_img.read(),\n content_type='image/png'\n )\n self.test_person.photo = self.test_image_2\n aspect_ratio_before_save = (\n self.test_person.photo.width /\n self.test_person.photo.height\n )\n aspect_ratio_after_save = (\n self.test_person.photo.width /\n self.test_person.photo.height\n )\n self.test_person.save()\n self.assertEqual(\n aspect_ratio_before_save,\n aspect_ratio_after_save\n )\n\n def test_save_method_remove_unused_img(self):\n \"\"\"Check, if model save method delete unused images\"\"\"\n with open(self.test_img_path, 'rb') as test_img:\n self.test_image_2 = SimpleUploadedFile(\n name='test_image_2.png',\n content=test_img.read(),\n content_type='image/png'\n )\n self.test_person.photo = self.test_image_2\n self.test_person.save()\n self.second_photo_file = self.test_person.photo.path\n self.assertTrue(os.path.exists(self.second_photo_file))\n self.assertFalse(os.path.exists(self.first_photo_file))\n\n def test_photo_update_status_return_true(self):\n \"\"\"Check, for True status, if new photo_file is given\"\"\"\n with open(self.test_img_path, 'rb') as test_img:\n self.test_image_2 = SimpleUploadedFile(\n name='test_image_2.png',\n content=test_img.read(),\n content_type='image/png'\n )\n self.test_person.photo = self.test_image_2\n exist_person = Person.objects.filter(id=self.test_person.id).first()\n test_update_status = self.test_person.photo_update_status(exist_person)\n self.test_person.save()\n self.assertTrue(test_update_status)\n\n def test_photo_update_status_return_true_if_photo_remove(self):\n \"\"\"Check, for True status, if photo is remove\"\"\"\n self.test_person.photo = None\n exist_person = Person.objects.filter(id=self.test_person.id).first()\n test_update_status = self.test_person.photo_update_status(exist_person)\n self.test_person.save()\n self.assertTrue(test_update_status)\n\n def test_photo_update_status_return_false(self):\n \"\"\"Check, for False status, if no new photo_file is given\"\"\"\n self.test_person.name = \"Alex\"\n exist_person = Person.objects.filter(id=self.test_person.id).first()\n test_update_status = self.test_person.photo_update_status(exist_person)\n self.test_person.save()\n self.assertFalse(test_update_status)\n\n\nclass RequestsLogModelTest(TestCase):\n\n def setUp(self):\n for i in range(10):\n with freeze_time(\"2012-01-14 12:00:01\") as frozen_time:\n frozen_time.tick(delta=datetime.timedelta(seconds=i))\n RequestsLog(**REQUEST_DATA).save()\n\n def test_get_db_update_status_method_if_None_given(self):\n \"\"\"Must return True, if None given as parameter\"\"\"\n test_db_update_status = RequestsLog.get_db_update_status(None)\n self.assertTrue(test_db_update_status)\n\n def test_get_db_update_status_method_if_DB_has_changed(self):\n \"\"\"Must return True, if DB updates exists\"\"\"\n test_time = \"2012-01-14 12:00:00\"\n test_db_update_status = RequestsLog.get_db_update_status(test_time)\n self.assertTrue(test_db_update_status)\n\n def test_get_db_update_status_method_if_no_DB_changes(self):\n \"\"\"Must return False, if no changes in DB\"\"\"\n test_time = \"2012-01-14 13:00:00\"\n test_db_update_status = RequestsLog.get_db_update_status(test_time)\n self.assertFalse(test_db_update_status)\n\n def test_get_no_viewed_count_method_if_None_given(self):\n \"\"\"Must return all DB entries count, if None given as parameter\"\"\"\n db_entries_count = RequestsLog.objects.count()\n test_count = RequestsLog.get_no_viewed_count(None)\n self.assertEqual(db_entries_count, test_count)\n\n def test_get_no_viewed_count_method_with_proper_parameters(self):\n \"\"\"Must return unviewed requests count, if parameter is given\"\"\"\n last_requests = RequestsLog.objects.all()\n fifth_last_request = last_requests[4].edit_time\n new_test_count = RequestsLog.get_no_viewed_count(\n fifth_last_request.isoformat()\n )\n self.assertEqual(new_test_count, 4)\n\n def test_get_last_edit_time_method(self):\n \"\"\"Must return last edit time\"\"\"\n last_edit_time = RequestsLog\\\n .objects\\\n .order_by('edit_time')\\\n .last()\\\n .edit_time\\\n .isoformat()\n test_edit_time = RequestsLog.get_last_edit_time()\n self.assertEqual(last_edit_time, test_edit_time)\n","sub_path":"apps/hello/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":8410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"402360790","text":"import pandas as pd\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util.retry import Retry\nfrom bs4 import BeautifulSoup\nfrom datetime import datetime\nimport json\nimport re\n\ndef steamChartScrapper(steamId, release_date) :\n url = 'https://steamcharts.com/app/' + steamId\n\n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n\n response = session.get(url)\n\n content = BeautifulSoup(response.content, 'html.parser')\n\n gamePlayers = content.findAll('tr')\n\n if gamePlayers != [] :\n date_str = gamePlayers[-2].find('td', {'class': 'month-cell left'})\n if date_str is not None :\n date_str = date_str.text\n posToStart = 6\n posToEnd = -5\n date_str = date_str[posToStart:]\n date_str = date_str[:posToEnd]\n date_datetime = datetime.strptime(date_str, '%B %Y').date()\n if date_datetime > release_date :\n num_player_after_month = gamePlayers[-2].find('td', {'class': 'right num-f'})\n if num_player_after_month is not None:\n num_player_after_month = num_player_after_month.text\n else:\n num_player_after_month = None\n else :\n num_player_after_month = None\n else :\n num_player_after_month = None\n else:\n num_player_after_month = None\n\n return num_player_after_month\n\ndef getDescription(steamId) :\n url = \"https://store.steampowered.com/app/\" + steamId + \"/?cc=us\"\n \n session = requests.Session()\n retry = Retry(connect=3, backoff_factor=0.5)\n adapter = HTTPAdapter(max_retries=retry)\n session.mount('http://', adapter)\n session.mount('https://', adapter)\n\n response = session.get(url)\n \n content = BeautifulSoup(response.content, 'html.parser')\n \n gameDescription = content.find('div', attrs = {'id' : 'game_area_description'})\n \n if gameDescription is not None : \n return gameDescription.text\n else :\n return None\n #steamRequest = json.loads(response.text)\n \n #if steamRequest[steamId][\"success\"] == True and 'detailed_description' in steamRequest[steamId][\"data\"].keys() :\n #cleanr = re.compile('<.*?>|&([a-z0-9]+|#[0-9]{1,6}|#x[0-9a-f]{1,6});')\n #description = re.sub(cleanr, ' ', steamRequest[steamId]['data']['detailed_description'])\n ##description = BeautifulSoup(steamRequest[steamId]['data']['detailed_description'], 'html.parser').text\n #return description\n #else :\n #return None\n \ndef dataUpdatePlayers() :\n #df = pd.read_csv('dataForMachineLearning.csv')\n \n #for i in range(len(df.index)) :\n #release_date = datetime.strptime(df['release_date'][i], '%m-%d-%Y').date()\n #if release_date.year < 2012 :\n #df.at[i, \"num_players_after_month\"] = None\n #else :\n #df.at[i, \"num_players_after_month\"] = steamChartScrapper(str(df['steamId'][i]), release_date)\n \n #df.to_csv('dataForMachineLearning.csv', index=False)\n df = pd.read_csv('dataForMachineLearning.csv')\n \n df['description'] = ''\n for i in range(len(df.index)) :\n steamId = df['steamId'][i]\n description = getDescription(str(steamId))\n df.loc[i, 'description'] = description\n \n df = df[df.description.notnull()]\n df.to_csv('dataForRecommendation.csv', index=False)\n \ndef dataUpdateRemoveRowsPlayers() :\n df = pd.read_csv('dataForMachineLearning.csv')\n \n df = df[df.num_players_after_month.notnull()]\n \n df.to_csv('dataForMachineLearning.csv', index=False)\n \ndataUpdatePlayers()","sub_path":"dataUpdateProcess.py","file_name":"dataUpdateProcess.py","file_ext":"py","file_size_in_byte":3796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"145129245","text":"import matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import axes3d\nimport pymysql\nfrom day07.mydao import DaoStock\nds = DaoStock()\n\nconn = pymysql.connect(user=\"root\", password=\"python\",\n host=\"localhost\", database=\"mypydb\", charset='utf8')\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\nX = list() # []\nY = list()\nZ = list()\n\nidx = 0\nfor c_name in ds.retrieveAll() :\n X.clear()\n Y.clear()\n Z.clear()\n \n for i in range(6) :\n X.append(idx)\n Y.append(i*2)\n Z = ds.get_prices(c_name)\n \n min_z = min(Z)\n max_z = max(Z)\n if (max_z - min_z) == 0 :\n Z[:] = [0 in Z]\n else :\n Z[:] = [(ele - min_z)/(max_z-min_z) for ele in Z]\n \n ax.plot(X, Y, Z)\n idx+=1\n \nprint(3)\nplt.show()\n","sub_path":"HELLOPYTHON/mystock/my3dgraph02_samsung_homework.py","file_name":"my3dgraph02_samsung_homework.py","file_ext":"py","file_size_in_byte":786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"589230006","text":"import random\nimport math\n\nN_BOXES = 162 #/* Number of disjoint boxes of state space. */\nALPHA = 1000 #/* Learning rate for action weights, w. */\nBETA = 0.5 #/* Learning rate for critic weights, v. */\nGAMMA = 0.95 #/* Discount factor for critic. */\nLAMBDAw = 0.9 #/* Decay rate for w eligibility trace. */\nLAMBDAv = 0.8 #/* Decay rate for v eligibility trace. */\n\nMAX_FAILURES = 100 #/* Termination criterion. */\nMAX_STEPS = 100000\n\n#typedef float vector[N_BOXES];\n\n\"\"\"----------------------------------------------------------------------\n cart_pole: Takes an action (0 or 1) and the current values of the\n four state variables and updates their values by estimating the state\n TAU seconds later.\n----------------------------------------------------------------------\n\"\"\"\n\n#/*** Parameters for simulation ***/\n\nGRAVITY = 9.8\nMASSCART = 1.0\nMASSPOLE = 0.1\nTOTAL_MASS = (MASSPOLE + MASSCART)\nLENGTH = 0.5 #/* actually half the pole's length */\nPOLEMASS_LENGTH = (MASSPOLE * LENGTH)\nFORCE_MAG = 10.0\nTAU = 0.02 #/* seconds between state updates */\nFOURTHIRDS = 1.3333333333333\n\ndef prob_push_right(s):\n return (1.0 / (1.0 + math.exp(-max(-50.0, min(s, 50.0)))))\n\ndef cart_pole(action, x, x_dot, theta, theta_dot):\n if action > 0:\n force = FORCE_MAG\n else:\n force = -FORCE_MAG\n\n costheta = math.cos(theta);\n sintheta = math.sin(theta);\n\n temp = (force + POLEMASS_LENGTH * theta_dot * theta_dot * sintheta)/ TOTAL_MASS;\n\n thetaacc = (GRAVITY * sintheta - costheta* temp)/ (LENGTH * (FOURTHIRDS - MASSPOLE * costheta * costheta/ TOTAL_MASS));\n\n xacc = temp - POLEMASS_LENGTH * thetaacc* costheta / TOTAL_MASS;\n\n #/*** Update the four state variables, using Euler's method. ***/\n\n x += TAU * x_dot;\n x_dot += TAU * xacc;\n theta += TAU * theta_dot;\n theta_dot += TAU * thetaacc;\n return [x, x_dot, theta, theta_dot]\n\n\"\"\"\n/*----------------------------------------------------------------------\n get_box: Given the current state, returns a number from 1 to 162\n designating the region of the state space encompassing the current state.\n Returns a value of -1 if a failure state is encountered.\n----------------------------------------------------------------------*/\n\"\"\"\none_degree = 0.0174532 #/* 2pi/360 */\nsix_degrees = 0.1047192\ntwelve_degrees = 0.2094384\nfifty_degrees = 0.87266\n\ndef get_box(x,x_dot,theta,theta_dot):\n\n box=0;\n\n if (x < -2.4 or x > 2.4 or theta < -twelve_degrees or theta > twelve_degrees):\n return -1 #/* to signal failure */\n\n if (x < -0.8):\n box = 0;\n elif (x < 0.8):\n box = 1;\n else:\n box = 2;\n\n if (x_dot < -0.5):\n pass\n elif (x_dot < 0.5):\n box += 3;\n else:\n box += 6;\n\n if (theta < -six_degrees):\n pass\n elif (theta < -one_degree):\n box += 9;\n elif (theta < 0):\n box += 18;\n elif (theta < one_degree):\n box += 27;\n elif (theta < six_degrees):\n box += 36;\n else:\n box += 45;\n\n if (theta_dot < -fifty_degrees):\n pass\n elif (theta_dot < fifty_degrees):\n box += 54;\n else:\n box += 108;\n\n return box\n\nif __name__ == \"__main__\":\n \"\"\"\n float x, /* cart position, meters */\n x_dot, /* cart velocity */\n theta, /* pole angle, radians */\n theta_dot; /* pole angular velocity */\n vector w, /* vector of action weights */\n v, /* vector of critic weights */\n e, /* vector of action weight eligibilities */\n xbar; /* vector of critic weight eligibilities */\n float p, oldp, rhat, r;\n int box, i, y, steps = 0, failures=0, failed;\n \"\"\"\n steps = 0\n failures = 0\n\n w = [0.0]*N_BOXES\n v = [0.0]*N_BOXES\n e = [0.0]*N_BOXES\n xbar = [0.0]*N_BOXES\n\n print(\"Seed? \")\n i = int(raw_input())\n random.seed(i);\n\n #--- Initialize action and heuristic critic weights and traces. ---*/\n for i in range(N_BOXES):\n w[i] = v[i] = xbar[i] = e[i] = 0.0\n\n #/*--- Starting state is (0 0 0 0) ---*/\n x = x_dot = theta = theta_dot = 0.0\n\n #/*--- Find box in state space containing start state ---*/\n box = get_box(x, x_dot, theta, theta_dot)\n\n #/*--- Iterate through the action-learn loop. ---*/\n while steps < MAX_STEPS and failures < MAX_FAILURES:\n\n steps += 1\n #/*--- Choose action randomly, biased by current weight. ---*/\n randomF = (float(random.random()) / float(((1 << 31) - 1)))\n y = (randomF < prob_push_right(w[box]))\n\n #/*--- Update traces. ---*/\n e[box] += (1.0 - LAMBDAw) * (y - 0.5);\n xbar[box] += (1.0 - LAMBDAv);\n\n #/*--- Remember prediction of failure for current state ---*/\n oldp = v[box];\n\n #/*--- Apply action to the simulated cart-pole ---*/\n x, x_dot, theta, theta_dot = cart_pole(y, x, x_dot, theta, theta_dot);\n\n #/*--- Get box of state space containing the resulting state. ---*/\n box = get_box(x, x_dot, theta, theta_dot);\n\n if (box < 0):\n #/*--- Failure occurred. ---*/\n failed = 1;\n failures+=1;\n print(\"Trial {} was {}steps.\\n\".format(failures, steps));\n steps = 0;\n\n #/*--- Reset state to (0 0 0 0). Find the box. ---*/\n x = x_dot = theta = theta_dot = 0.0;\n box = get_box(x, x_dot, theta, theta_dot);\n\n #/*--- Reinforcement upon failure is -1. Prediction of failure is 0. ---*/\n r = -1.0;\n p = 0.;\n else:\n #/*--- Not a failure. ---*/\n failed = 0;\n\n #/*--- Reinforcement is 0. Prediction of failure given by v weight. ---*/\n r = 0;\n p= v[box];\n\n #/*--- Heuristic reinforcement is: current reinforcement + gamma * new failure prediction - previous failure prediction ---*/\n rhat = r + GAMMA * p - oldp;\n\n for i in range(0, N_BOXES):\n\n #/*--- Update all weights. ---*/\n w[i] += ALPHA * rhat * e[i];\n v[i] += BETA * rhat * xbar[i];\n if v[i] < -1.0:\n v[i] = v[i]\n\n if failed:\n #/*--- If failure, zero all traces. ---*/\n e[i] = 0.;\n xbar[i] = 0.;\n else:\n #/*--- Otherwise, update (decay) the traces. ---*/\n e[i] *= LAMBDAw;\n xbar[i] *= LAMBDAv;\n\n if failures == MAX_FAILURES:\n print(\"Pole not balanced. Stopping after {} failures.\".format(failures))\n else:\n print(\"Pole balanced successfully for at least {} steps\\n\".format(steps));\n\n\n","sub_path":"pole_balance.py","file_name":"pole_balance.py","file_ext":"py","file_size_in_byte":6544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"442970628","text":"# -*- coding: utf-8 -*-\nimport random\n\nfrom django.db import models\nfrom app.investigacion.models import Investigacion\nfrom app.adjuntos.image_functions import ImgOpt\nfrom django.db.models.signals import post_save\nfrom django.conf import settings\nfrom django.core.exceptions import ValidationError\nimport os\n\n\nclass Adjuntos(models.Model):\n investigacion = models.ForeignKey(Investigacion, on_delete=models.CASCADE)\n adj2 = models.FileField(verbose_name='1. Foto de perfil del candidato', upload_to='adj', blank=True, null=True)\n adj3 = models.FileField(verbose_name='2.a Interior derecho', upload_to='adj', blank=True, null=True)\n adj4 = models.FileField(verbose_name='2.b Interior izquierdo', upload_to='adj', blank=True, null=True)\n adj5 = models.FileField(verbose_name='2.c Exterior derecho', upload_to='adj', blank=True, null=True)\n adj6 = models.FileField(verbose_name='2.d Exterior izquierdo', upload_to='adj', blank=True, null=True)\n adj9 = models.FileField(verbose_name='2.e Frente', upload_to='adj', blank=True, null=True)\n\n adj10 = models.FileField(verbose_name='3. Gestor Entrevistador', upload_to='adj', blank=True, null=True)\n adj13 = models.FileField(verbose_name='4. Croquis', upload_to='adj', blank=True, null=True)\n adj11 = models.FileField(verbose_name='5. Aviso Privacidad', upload_to='adj', blank=True, null=True)\n adj12 = models.FileField(verbose_name='6. Constancia', upload_to='adj', blank=True, null=True)\n\n adj14 = models.FileField(verbose_name='7.a Identificación con fotografia', upload_to='adj', blank=True, null=True)\n adj22 = models.FileField(verbose_name='7.b Identificación con fotografia', upload_to='adj', blank=True, null=True)\n adj23 = models.FileField(verbose_name='7.c Identificación con fotografia', upload_to='adj', blank=True, null=True)\n adj24 = models.FileField(verbose_name='7.d Identificación con fotografia', upload_to='adj', blank=True, null=True)\n\n adj17 = models.FileField(verbose_name='8. Acta de nacimiento', upload_to='adj', blank=True, null=True)\n adj16 = models.FileField(verbose_name='9. Comprobante de domicilio', upload_to='adj', blank=True, null=True)\n\n adj8 = models.FileField(verbose_name='10.a Semanas Cotizadas', upload_to='adj', blank=True, null=True)\n adj25 = models.FileField(verbose_name='10.b Semanas Cotizadas', upload_to='adj', blank=True, null=True)\n adj26 = models.FileField(verbose_name='10.c Semanas Cotizadas', upload_to='adj', blank=True, null=True)\n adj27 = models.FileField(verbose_name='10.d Semanas Cotizadas', upload_to='adj', blank=True, null=True)\n adj28 = models.FileField(verbose_name='10.e Semanas Cotizadas', upload_to='adj', blank=True, null=True)\n\n adj7 = models.FileField(verbose_name='11.a Validación de Demandas Laborales', upload_to='adj', blank=True,null=True)\n adj36 = models.FileField(verbose_name='11.b Validacion web', upload_to='adj', blank=True, null=True)\n\n adj18 = models.FileField(verbose_name='Carta Laboral', upload_to='adj', blank=True, null=True)\n adj37 = models.FileField(verbose_name='Carta Laboral Extra', upload_to='adj', blank=True, null=True)\n\n adj19 = models.FileField(verbose_name='Adicionales A', upload_to='adj', blank=True, null=True)\n adj20 = models.FileField(verbose_name='Adicionales B', upload_to='adj', blank=True, null=True)\n adj21 = models.FileField(verbose_name='Adicionales C', upload_to='adj', blank=True, null=True)\n\n adj29 = models.FileField(verbose_name='Adicionales D', upload_to='adj', blank=True, null=True)\n adj30 = models.FileField(verbose_name='Adicionales E', upload_to='adj', blank=True, null=True)\n adj31 = models.FileField(verbose_name='Adicionales F', upload_to='adj', blank=True, null=True)\n adj32 = models.FileField(verbose_name='Adicionales G', upload_to='adj', blank=True, null=True)\n adj33 = models.FileField(verbose_name='Adicionales H', upload_to='adj', blank=True, null=True)\n adj34 = models.FileField(verbose_name='Adicionales I', upload_to='adj', blank=True, null=True)\n\n adj35 = models.FileField(verbose_name='Extra A', upload_to='adj', blank=True, null=True)\n\n def filename(self):\n return os.path.basename(self.file.name)\n\n def __str__(self):\n return u'%s' % self.investigacion\n\n\ndef resize_adjuntos(sender, **kwargs):\n if len(str(kwargs['instance'].adj2)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj2), size_x=1600)\n\n if len(str(kwargs['instance'].adj3)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj3), size_x=1600)\n\n if len(str(kwargs['instance'].adj4)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj4), size_x=1600)\n\n if len(str(kwargs['instance'].adj5)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj5), size_x=1600)\n\n if len(str(kwargs['instance'].adj6)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj6), size_x=1600)\n\n if len(str(kwargs['instance'].adj7)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj7), size_x=1600)\n\n if len(str(kwargs['instance'].adj36)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj36), size_x=1600)\n\n if len(str(kwargs['instance'].adj8)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj8), size_x=1600)\n\n if len(str(kwargs['instance'].adj9)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj9), size_x=1600)\n\n if len(str(kwargs['instance'].adj10)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj10), size_x=1600)\n\n if len(str(kwargs['instance'].adj11)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj11), size_x=1600)\n # adj12\n if len(str(kwargs['instance'].adj12)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj12), size_x=1600)\n\n if len(str(kwargs['instance'].adj13)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj13), size_x=1600)\n\n if len(str(kwargs['instance'].adj14)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj14), size_x=1600)\n\n if len(str(kwargs['instance'].adj16)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj16), size_x=1600)\n if len(str(kwargs['instance'].adj17)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj17), size_x=1600)\n\n if len(str(kwargs['instance'].adj18)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj18), size_x=1600)\n if len(str(kwargs['instance'].adj37)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj37), size_x=1600)\n if len(str(kwargs['instance'].adj19)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj19), size_x=1600)\n if len(str(kwargs['instance'].adj20)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj20), size_x=1600)\n if len(str(kwargs['instance'].adj21)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj21), size_x=1600)\n\n if len(str(kwargs['instance'].adj29)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj29), size_x=1600)\n if len(str(kwargs['instance'].adj30)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj30), size_x=1600)\n if len(str(kwargs['instance'].adj31)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj31), size_x=1600)\n if len(str(kwargs['instance'].adj32)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj32), size_x=1600)\n if len(str(kwargs['instance'].adj33)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj33), size_x=1600)\n if len(str(kwargs['instance'].adj34)):\n ImgOpt.resize(file_path=settings.MEDIA_ROOT + '/' + str(kwargs['instance'].adj34), size_x=1600)\n\n\npost_save.connect(resize_adjuntos, sender=Adjuntos)\n","sub_path":"project/app/adjuntos/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"593847063","text":"# Author: twmicro, chicherintim@gmail.com\n\n# imports\nimport vk_api\nimport time\nimport sys, traceback\nimport datetime\n\n# variables\n\nvalues = {'out':0, 'count':100, 'time_offset':60, 'group_id':160615635}\nOPERATORS = {'+': (1, lambda x, y: x + y), '-': (1, lambda x, y: x - y),\n '*': (2, lambda x, y: x * y), '/': (2, lambda x, y: x / y)}\nPI = 3.14\nCONFIG_VARIABLES = {'П':3.14}\n\n# authorize\n\nvk = vk_api.VkApi(token='cf60cf5f6f4e49')\nvk._auth_token()\n\n# functions\n\ndef write_msg(user_id, s):\n vk.method('messages.send', {'user_id': user_id, 'message': s})\n\ndef now_wd():\n return datetime.datetime.now().weekday() + 1\n\ndef wd_string():\n days = ['понедельник', 'вторник', 'среда', 'четверг', 'пятница', 'суббота', 'воскресенье']\n return days[now_wd() - 1]\n\ndef now_mth():\n months = ['Январь', 'Февраль', 'Март', 'Апрель', 'Май', 'Июнь', 'Июль', 'Август', 'Сентябрь', 'Октябрь',\n 'Ноябрь', 'Декабрь']\n return months[datetime.datetime.now().month - 1]\ndef eval_(formula):\n def parse(formula_string):\n number = ''\n index = 0\n for s in formula_string:\n #for i in range(0, len(CONFIG_VARIABLES) - 1):\n # if s == CONFIG_VARIABLES.keys()[i]:\n # yield CONFIG_VARIABLES.values()[i]\n if s in '1234567890.':\n number += s\n elif s == 'П':\n yield PI\n elif s == 'И':\n pass\n elif number:\n yield float(number)\n number = ''\n if s in OPERATORS or s in \"()\":\n yield s\n index+=1\n if number:\n yield float(number)\n\n def shunting_yard(parsed_formula):\n stack = []\n for token in parsed_formula:\n if token in OPERATORS:\n while stack and stack[-1] != \"(\" and OPERATORS[token][0] <= OPERATORS[stack[-1]][0]:\n yield stack.pop()\n stack.append(token)\n elif token == \")\":\n while stack:\n x = stack.pop()\n if x == \"(\":\n break\n yield x\n elif token == \"(\":\n stack.append(token)\n else:\n yield token\n while stack:\n yield stack.pop()\n\n def calc(polish):\n stack = []\n for token in polish:\n if token in OPERATORS:\n y, x = stack.pop(), stack.pop()-0\n stack.append(OPERATORS[token][1](x, y))\n else:\n stack.append(token)\n return stack[0]\n\n return calc(shunting_yard(parse(formula)))\n\n# loop\nwhile True:\n try:\n response = vk.method('messages.getConversations', values)\n #if response['items']:\n #values['last_message_id'] = response['items'][0]['id']\n for item in response['items']:\n\n if item['last_message']['text']==\"Привет!\":\n answer = 'И вам привет!'\n elif item['last_message']['text'] == \"Время\":\n time = datetime.datetime.time(datetime.datetime.now())\n answer = str(time.hour) + ':' + str(time.minute)\n elif item['last_message']['text'] == \"День\":\n answer = 'Cегодня ' + wd_string()\n elif item['last_message']['text'] == \"День по счету\":\n answer = 'Cегодня ' + str(now_wd()) + ' день недели'\n elif item['last_message']['text'] == \"Месяц\":\n answer = now_mth()\n elif item['last_message']['text'] == \"Пока\":\n answer = 'Пока'\n elif item['last_message']['text'] == \"Как дела?\":\n answer = 'Хорошо'\n elif item['last_message']['text'] == \"Хей\":\n answer = 'Привет братан'\n else:\n try:\n answer = eval_(item['last_message']['text'])\n except:\n answer = 'Не понял :)'\n write_msg(item['last_message']['from_id'], answer)\n #print(item['last_message']['from_id'])\n print(datetime.datetime.now(),'from id {} sent message = {} we answered = {}'.\n format(item['last_message']['from_id'],item['last_message']['text'],answer))\n time.sleep(0.5)\n except:\n pass\n","sub_path":"VKBot.py","file_name":"VKBot.py","file_ext":"py","file_size_in_byte":4550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"246324648","text":"from __future__ import print_function, division\nfrom builtins import range\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Bandit:\n # a code that simulates casino machines (bandit)\n # goal: pick 3 bandits and find the best moment to play in each one\n\n def __init__(self, m):\n self.m = m\n self.mean = 0 # estimate of bandits mean\n self.N = 0 # number of plays\n\n def pull(self):\n # simulates pulling the bandits arm\n return np.random.randn() + self.m\n\n def update(self, x):\n # x is the latest sample received from the bandit\n self.N += 1\n self.mean = (1 - 1.0 / self.N) * self.mean + 1.0 / self.N * x\n\ndef run_experiment(m1, m2, m3, eps, N):\n # there's 3 different means because we compare 3 bandits in this example\n bandits = [Bandit(m1), Bandit(m2), Bandit(m3)]\n data = np.empty(N)\n \n for i in xrange(N):\n # epsilon greedy\n\n # generate a random number P between 0 and 1\n p = np.random.random()\n\n if p < eps:\n # choose a bandit at random\n j = np.random.choice(3)\n else:\n # choose the bandit with the best current sample mean\n j = np.argmax([b.mean for b in bandits])\n\n # pulling the bandit\n x = bandits[j].pull()\n\n # update the bandit with the reward x got\n bandits[j].update(x)\n\n # for the plot\n data[i] = x\n\n # calculate the cumulative average\n cumulative_average = np.cumsum(data) / (np.arange(N) + 1)\n\n # plot moving average ctr\n plt.plot(cumulative_average)\n plt.plot(np.ones(N) * m1)\n plt.plot(np.ones(N) * m2)\n plt.plot(np.ones(N) * m3)\n plt.xscale('log')\n plt.show()\n\n for b in bandits:\n print(b.mean)\n\n return cumulative_average\n\n\nif __name__ == '__main__':\n # do the same experiment 3 times, with different espilons\n\n # when epsilon is 10%\n c_1 = run_experiment(1.0, 2.0, 3.0, 0.1, 100000)\n\n # when epsilon is 5%\n c_05 = run_experiment(1.0, 2.0, 3.0, 0.05, 100000)\n\n # when epsilon is 1%\n c_01 = run_experiment(1.0, 2.0, 3.0, 0.01, 100000)\n\n # log scale plot\n plt.plot(c_1, label='eps = 0.1')\n plt.plot(c_05, label='eps = 0.05')\n plt.plot(c_01, label='eps = 0.01')\n plt.legend()\n plt.xscale('log')\n plt.show()\n\n # linear plot\n plt.plot(c_1, label='eps = 0.1')\n plt.plot(c_05, label='eps = 0.05')\n plt.plot(c_01, label='eps = 0.01')\n plt.legend()\n plt.show()\n\n","sub_path":"compairing_epsylons.py","file_name":"compairing_epsylons.py","file_ext":"py","file_size_in_byte":2303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"478858173","text":"\"\"\"\n@Project : decaNLP\n@Module : data_load.py\n@Author : Deco [deco@cubee.com]\n@Created : 8/2/18 2:49 PM\n@Desc : \n\"\"\"\nimport logging\nimport logging.handlers\nimport os\nfrom pprint import pformat\nimport torch\nfrom text import torchtext\nimport arguments\nfrom util import (get_splits, set_seed)\n\n\ndef initialize_logger(args, rank='data_load.py'):\n # set up file logger\n logger = logging.getLogger(f'process_{rank}')\n logger.setLevel(logging.DEBUG)\n handler = logging.handlers.RotatingFileHandler(\n os.path.join(args.log_dir, f'process_{rank}.log'),\n maxBytes=1024*1024*10, backupCount=1)\n handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(name)s - %(lineno)d - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n handler.setLevel(logging.DEBUG)\n logger.addHandler(handler)\n logger.propagate = False\n return logger\n\n\ndef prepare_data(args, field, logger):\n\n if field is None:\n logger.info(f'Constructing field')\n FIELD = torchtext.data.ReversibleField(\n batch_first=True, init_token='', eos_token='',\n lower=args.lower, include_lengths=True)\n else:\n FIELD = field\n\n logger.debug(FIELD)\n\n train_sets, val_sets, vocab_sets = [], [], []\n # train sets, validation sets\n for task in args.train_tasks:\n logger.info(f'Loading {task}')\n # kwargs = {'test': None}\n # kwargs['subsample'] = args.subsample\n # kwargs['validation'] = None\n kwargs = {'test': None,\n 'subsample': args.subsample,\n # 'subsample': 20000000\n 'validation': None\n }\n logger.info(f'Adding {task} to training datasets')\n split = get_splits(args, task, FIELD, **kwargs)[0]\n # 取了tuple的第一个元素,只保留train_data,不要validation data\n # split = torchtext.datasets.generic.SQuAD.splits(fields=FIELD,\n # root=args.data, **kwargs)\n logger.info(f'{task} has {len(split)} training examples')\n logger.debug(type(split))\n train_sets.append(split)\n\n logger.debug(args.vocab_tasks)\n\n if args.vocab_tasks is not None and task in args.vocab_tasks:\n vocab_sets.extend(split)\n\n logger.debug(train_sets)\n\n # return FIELD, train_sets, val_sets\n\n\ndef main():\n args = arguments.parse()\n if args is None:\n return\n set_seed(args)\n # 给numpy and torch设定seed\n logger = initialize_logger(args)\n logger.info(f'Arguments:\\n{pformat(vars(args))}')\n # 调用vars(args)的format函数,得到字符串?\n # pformat是一种format函数,从pprint中引入的\n\n field, save_dict = None, None\n # tuple unpacking\n if args.load is not None:\n logger.info(f'Loading field from {os.path.join(args.save, args.load)}')\n save_dict = torch.load(os.path.join(args.save, args.load))\n field = save_dict['field']\n # field is the value in the 'field' key of the data\n\n logger.info(field)\n\n # field is None\n prepare_data(args, field, logger)\n # field, train_sets, val_sets = prepare_data(args, field, logger)\n\n\nif __name__ == '__main__':\n# python decaNLP/data_load.py --train_tasks squad --gpus 0 --train_batch_tokens 5000 --val_batch_size 128\n\n main()\n","sub_path":"data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"66185162","text":"#!/usr/bin/env python\n\nimport time\nfrom serialport import board\nimport sys\n\n# Callback function (temp)\nvalue = 0\n\n# delay()\ndef delay(num):\n num = float(num)\n time.sleep(num / 1000.0)\n\ndef print_analog(data):\n global value\n value = data [2]\n\n# digital_write()\ndef digital_write(digital_pin, val):\n board.set_pin_mode(digital_pin, board.OUTPUT, board.DIGITAL)\n board.digital_write(digital_pin, val)\n\n# analog_read()\ndef analog_init(analog_pin):\n board.set_pin_mode(analog_pin, board.INPUT, board.ANALOG)\n\ndef analog_read(analog_pin):\n print(\"Pino: \" + str(analog_pin))\n board.set_pin_mode(analog_pin, board.INPUT, board.ANALOG)\n value = board.analog_read(analog_pin)\n return value\n print(\"Valor: \" + str(value))\n\n# Servo\ndef servo(digital_pin):\n SERVO_BASE = digital_pin\n board.servo_config(SERVO_BASE)\n for pos in range(0, 175):\n board.digital_write(SERVO_BASE, pos)\n delay(0.010)\n for pos in range(175, 0, -1):\n board.digital_write(SERVO_BASE, pos)\n delay(0.010)\n\n# blink()\ndef blink(digital_pin, led_delay, led_range):\n for i in range(led_range):\n digital_write(digital_pin, 1)\n delay(led_delay)\n digital_write(digital_pin, 0)\n delay(led_delay)\n board.reset()\n\n# handler definition\ndef signal_handler (sig, frame):\n print ('You pressed Ctrl+C!!!!')\n if board is not None:\n board.reset ()\n sys.exit (0)\n","sub_path":"scripts/unoIDE/serial/boardsetup.py","file_name":"boardsetup.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"36708671","text":"import sqlite3 as sql\nfrom dataclasses import dataclass\nfrom novelreader.helpers import plog, show\nfrom wescrape.helpers import identify_status\nfrom wescrape.models.novel import Novel, Chapter, Meta, Website, Status\n\n@dataclass\nclass Chapter(Chapter):\n has_read: bool = False\n\nclass Database:\n INSTANCE = None\n\n @classmethod\n def build(cls, db_file):\n \"\"\"Build An Instance Of Database\"\"\"\n if cls.INSTANCE is None:\n cls.INSTANCE = Database(db_file)\n plog([\"created\"], \"database instance\")\n return cls.INSTANCE\n else:\n plog([\"exists\"], \"database instance\")\n return cls.INSTANCE\n \n \n def __init__(self, db_file):\n self.__conn = sql.connect(db_file)\n self.__conn.row_factory = sql.Row\n\n def commit(self):\n self.__conn.commit()\n\n @classmethod\n def instance(cls):\n if cls.INSTANCE is not None:\n return cls.INSTANCE\n else:\n plog([\"missing\"], \"database instance\")\n \n @classmethod\n def close(cls):\n \"\"\"Close Database Connection, Clear Instance\"\"\"\n if cls.INSTANCE is not None:\n cls.INSTANCE.conn.close()\n cls.INSTANCE = None\n plog([\"removed\"], \"database instance\")\n else:\n plog([\"missing\"], \"database instance\")\n\n @property\n def conn(self):\n return self.__conn\n\n # @show\n def __conditions_builder(self, conditions: [(str, any)]): \n if type(conditions) == list:\n values = tuple([c[1] for c in conditions]) \n part = \" AND \".join([f\"{c[0].upper()} = ?\" for c in conditions])\n conditions = \" \".join([\"WHERE\", part]) if len(conditions) > 0 else \"\"\n else: \n values = (conditions[1],)\n conditions = f\"where {conditions[0].upper()} = ?\"\n return conditions, values\n\n # @show\n def __select(self, table: str, cols:[str] = [\"*\",], conditions: [(str, any)] = []):\n cols = \" \".join(cols) if type(cols) == list else cols\n conditions, values = self.__conditions_builder(conditions)\n statement = f\"\"\"SELECT {cols} FROM {table} {conditions}\"\"\".strip().upper()\n rows = self.__conn.execute(statement, values)\n return rows\n\n def __update(self, table: str, cols=[str], conditions=[(str, any)]):\n valid_tables = [\"novels\", \"chapters\", \"metas\"]\n if table.lower() not in valid_tables:\n return None\n cols = \", \".join(cols) if type(cols) == list else cols\n conditions, values = self.__conditions_builder(conditions)\n statement = f\"UPDATE {table} SET {cols} {conditions}\".strip().upper()\n self.__conn.execute(statement, values)\n\n def __convert_row(self, row: dict, obj_type):\n converted_row = None\n if row:\n try:\n if obj_type == Novel:\n converted_row = Novel(\n url=row[\"url\"],\n title=row[\"title\"],\n thumbnail=row[\"thumbnail\"]\n )\n elif obj_type == Chapter:\n converted_row = Chapter(\n id=row[\"chapter_id\"],\n url=row[\"url\"],\n title=row[\"title\"],\n content=row[\"content\"],\n has_read=bool(row[\"has_read\"])\n )\n elif obj_type == Meta:\n converted_row = Meta(\n authors=row[\"authors\"].split(\", \"),\n genres=row[\"genres\"].split(\", \"),\n rating=row[\"rating\"],\n release_date=row[\"release_date\"],\n status=identify_status(row[\"status\"]),\n description=row[\"description\"],\n )\n except Exception as ex:\n raise ex\n return converted_row\n\n def __convert_rows(self, rows: [dict], convert_type):\n converted_rows = []\n if rows:\n for row in rows:\n converted_row = self.__convert_row(row, convert_type)\n if converted_row:\n converted_rows.append(converted_row)\n return converted_rows\n \n # create functions\n def create_novels_table(self):\n statement = \"\"\"CREATE TABLE IF NOT EXISTS NOVELS(\n URL TEXT PRIMARY KEY UNIQUE,\n TITLE TEXT NOT NULL,\n THUMBNAIL TEXT NOT NULL);\n \"\"\"\n self.__conn.execute(statement)\n\n def create_chapters_table(self):\n statement = \"\"\"CREATE TABLE IF NOT EXISTS CHAPTERS(\n URL TEXT PRIMARY KEY,\n CHAPTER_ID TEXT NOT NULL,\n TITLE TEXT NOT NULL,\n CONTENT TEXT,\n NOVEL_URL TEXT NOT NULL,\n HAS_READ INTEGER,\n FOREIGN KEY (NOVEL_URL)\n REFERENCES NOVELS (URL));\n \"\"\"\n self.__conn.execute(statement)\n\n def create_metas_table(self):\n statement = \"\"\"CREATE TABLE IF NOT EXISTS METAS(\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n AUTHORS TEXT,\n GENRES TEXT,\n RATING TEXT,\n RELEASE_DATE TEXT,\n STATUS TEXT,\n DESCRIPTION TEXT,\n NOVEL_URL TEXT UNIQUE NOT NULL,\n FOREIGN KEY (NOVEL_URL)\n REFERENCES NOVELS (URL));\n \"\"\"\n self.__conn.execute(statement)\n\n # insert functions\n def insert_novel(self, novel: Novel):\n statement = \"\"\"INSERT INTO NOVELS (URL, TITLE, THUMBNAIL) VALUES (?, ?, ?);\"\"\"\n self.__conn.execute(statement, (novel.url, novel.title, novel.thumbnail))\n\n def insert_meta(self, novel_url: str, meta: Meta):\n statement = \"\"\"INSERT INTO \n METAS (\n AUTHORS, GENRES, RATING, STATUS, RELEASE_DATE, DESCRIPTION, NOVEL_URL\n )\n VALUES (?, ?, ?, ?, ?, ?, ?);\"\"\"\n if type(meta) == Meta:\n meta = meta.__dict__\n \n values = (\n \", \".join(meta[\"authors\"]),\n \", \".join(meta[\"genres\"]),\n meta[\"rating\"],\n meta[\"status\"].name,\n meta[\"release_date\"],\n meta[\"description\"],\n novel_url\n )\n self.__conn.execute(statement, values)\n\n def insert_chapter(self, novel_url: str, chapter: Chapter):\n statement = \"\"\"INSERT INTO CHAPTERS (CHAPTER_ID, URL, TITLE, CONTENT, NOVEL_URL) VALUES(?, ?, ?, ?, ?);\"\"\"\n \n if type(chapter) == Chapter:\n chapter = chapter.__dict__\n\n values = (\n chapter[\"id\"],\n chapter[\"url\"],\n chapter[\"title\"],\n chapter[\"content\"],\n novel_url\n )\n self.__conn.execute(statement, values)\n\n def update_chapter(self, chapter: Chapter):\n \"\"\"Update cols of selected chapter whose col URL is `url`\"\"\"\n if type(chapter) == Chapter:\n chapter = chapter.__dict__\n self.update_chapter_v2(chapter)\n # statement = \"\"\"UPDATE CHAPTERS \n # SET CHAPTER_ID = ?,\n # TITLE = ?,\n # CONTENT = ?,\n # HAS_READ = ?\n # WHERE URL = ?;\"\"\"\n\n # values = (\n # chapter[\"id\"],\n # chapter[\"title\"],\n # chapter[\"content\"],\n # chapter[\"has_read\"],\n # chapter[\"url\"]\n # )\n\n # self.__conn.execute(statement, values)\n\n def update_chapter_v2(self, chapter: Chapter):\n self.__update(\n \"chapters\",\n [\"content\"],\n (\"url\", chapter[\"url\"])\n )\n \n def update_meta(self, meta: Meta):\n self.__update(\n \"metas\",\n [\"status\"],\n [(\"novel_url\", meta.novel_url)]\n )\n\n def select_novels(self) -> [Novel]:\n cur = self.__select(\"novels\")\n novel_rows = cur.fetchall()\n # novel_rows = self.__conn.execute(\"\"\"SELECT * FROM NOVELS\"\"\").fetchall()\n novels = self.__convert_rows(novel_rows, Novel)\n return novels\n\n def select_novel(self, novel_url) -> Novel:\n cur = self.__select(\"novels\", conditions=[(\"url\", novel_url)])\n novel_row = cur.fetchone()\n novel = self.__convert_row(novel_row, Novel)\n return novel\n\n def select_chapters(self, novel_url: str) -> [Chapter]:\n cur = self.__select(\"chapters\", conditions=(\"novel_url\", novel_url))\n chapter_rows = cur.fetchall()\n chapters = self.__convert_rows(chapter_rows, Chapter)\n return chapters\n\n def select_chapter(self, chapter_url: str) -> Chapter:\n cur = self.__select(\"chapters\", conditions=(\"url\", chapter_url))\n chapter_row = cur.fetchone()\n chapter = self.__convert_row(chapter_row, Chapter)\n return chapter\n\n def select_metas(self) -> [Meta]:\n cur = self.__select(\"metas\")\n meta_rows = cur.fetchall()\n metas = self.__convert_rows(meta_rows, Meta)\n return metas\n\n def select_meta(self, novel_url) -> Meta:\n cur = self.__select(\"metas\", conditions=(\"novel_url\", novel_url))\n meta_row = cur.fetchone()\n meta = self.__convert_row(meta_row, Meta)\n return meta\n","sub_path":"novelreader/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"569810311","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Apr 22 14:50:38 2019\n\n@author: Shubham\n\"\"\"\n\nimport os, time, random, argparse\nimport os.path as osp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport collections\nimport torch\nimport torchvision\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport cv2\nfrom torch.utils import data\nfrom PIL import Image\nimport torch.nn.functional as F\nfrom torch.optim.lr_scheduler import MultiStepLR\nimport torch.optim as optim\nimport time\nimport pickle as pkl\nimport h5py\nimport copy\nimport glob\nimport torchvision.transforms as trans\n\nclass h5_loader(data.Dataset):\n def __init__(self, file_path):\n self.file_list = [f for f in glob.glob(os.path.join(file_path, '*.h5'))]\n self.len = len(self.file_list)\n\n def __getitem__(self, index):\n self.file = h5py.File(self.file_list[index], 'r')\n self.data = self.file.get('Feature')\n self.data = torch.tensor(np.array(self.data)) \n self.label = self.file.get('label')\n return np.array(self.data), np.array(self.label)\n\n def __len__(self):\n return self.len\n\nclass Index_pred(nn.Module):\n def __init__(self, in_channels=60, out_channels=60):\n super(Index_pred, self).__init__()\n self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=1, padding=1)\n \n def forward(self, x):\n out = self.conv1(x)\n max_val, _ = torch.max(out, dim = 1)\n max_val, _ = torch.max(max_val, dim = 1) \n out = F.sigmoid(out)\n return out\n \ndef train(trainloader, net, criterion, optimizer, device, scheduler, epochs=2):\n for epoch in range(epochs):\n scheduler.step()\n start = time.time()\n running_loss = 0.0\n for i, (images, target) in enumerate(trainloader):\n images = images.to(device)\n target = target.to(device).float()\n target = target/4\n optimizer.zero_grad()\n output = net(images)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n running_loss += loss.item()\n if i % 100 == 99: \n end = time.time()\n print('[epoch %d, iter %5d] loss: %.3f eplased time %.3f' %\n (epoch + 1, i + 1, running_loss / 100, end-start))\n start = time.time()\n running_loss = 0.0\n print('Finished Training')\n \ndef test(testloader, net, device):\n correct = 0\n total = 0\n with torch.no_grad():\n for data in testloader:\n pred, labels = data\n _, c, h, w = pred.shape\n pred = pred.to(device)\n labels = labels.to(device)\n labels = labels/4\n outputs = net(pred)\n # out_c = copy.deepcopy(outputs)\n out_c = outputs\n outputs[(out_c >= 0.0) & (out_c < 0.25)] = 0\n outputs[(out_c >= 0.25) & (out_c < 0.5)] = 1\n outputs[(out_c >= 0.5) & (out_c < 0.75)] = 2\n outputs[(out_c >= 0.75) & (out_c <= 1.0)] = 3\n print(outputs)\n total = c * h * w\n labels = labels * 4\n correct = (outputs.long() == labels.long()).sum().item()\n print('Accuracy of the network on the 1 sample: %d %%' % (\n 100 * correct / total))\n \ndef main(trainloader, testloader):\n os.environ['CUDA_VISIBLE_DEVICES']='2'\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n print('Running on device : {}'.format(device))\n #device = torch.device('cpu')\n net = Index_pred().to(device)\n saved_state_dict = torch.load('index_pred.pth')\n net.load_state_dict(saved_state_dict.state_dict())\n criterion = nn.MSELoss()\n # optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4)\n optimizer = optim.Adam(net.parameters() ,lr=0.01, eps=1e-08)\n scheduler = MultiStepLR(optimizer, milestones=[50, 100], gamma=0.1)\n # train(trainloader, net, criterion, optimizer, device, scheduler)\n # torch.save(net, 'index_pred1.pth') # save the model\n test(testloader, net, device) # testing\n \nif __name__ == \"__main__\":\n # obg = h5_loader('./SWWAE_dataset/')\n # print(obg[0])\n train_loader = torch.utils.data.DataLoader(h5_loader('./SPN_dataset/'),batch_size=2, shuffle=True, num_workers=1, pin_memory=True) # Data loader for train set\n main(train_loader, train_loader)","sub_path":"SPN_training/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"300344194","text":"import os\nimport requests\nfrom pprint import pprint\nimport pymysql\nimport json\n\ndef get_conn():\n return pymysql.connect(\n host=os.getenv('mysql_host'),\n user=os.getenv('mysql_user'),\n password=os.getenv('mysql_pw'),\n port=3306,\n db='projectdb',\n charset='utf8')\n\nsql_list = []\n\ndate = ''\nmain = ''\ndesc = ''\nmaxtemp = ''\nmintemp = ''\n\nwith open('weathercode.json') as json_file: \n codes = json.load(json_file)\n\napikey = os.getenv('WeatherAPI')\n\nfor citycode, cityname in codes.items():\n url = \"http://api.openweathermap.org/data/2.5/forecast?id={}&APPID={}\".format(citycode, apikey)\n\n res = requests.get(url).text\n\n weather = json.loads(res)\n\n\n for w in weather['list']:\n \n dt = w['dt_txt']\n\n if dt[11:19] not in ('15:00:00', '03:00:00'):\n continue\n else:\n desc = w['weather'][0]['description']\n main = w['weather'][0]['main']\n\n date = dt[0:10]\n\n tp = w['main']['temp']\n temp = round(tp - 273.15)\n\n if (dt[11:19] == '03:00:00'):\n maxtemp = temp\n\n continue\n\n else:\n if maxtemp == \"\":\n maxtemp = 100\n\n mintemp = temp\n\n tupledata = (citycode, cityname, date, main, desc, mintemp, maxtemp)\n\n print(tupledata)\n \n sql_list.append(tupledata)\n\n\nsql_insert = 'insert into Weather(citycode, cityname, dt, main, description, mintemp, maxtemp) values(%s, %s, %s, %s, %s, %s, %s)'\n\nconn = get_conn()\n\n\nwith conn:\n cur = conn.cursor()\n cur.executemany(sql_insert, sql_list)\n\n\nprint(\"The weather data have been successfully stored\")\n\n\n\n\n\n\n\n \n\n\n\n","sub_path":"weather/weathertomysql.py","file_name":"weathertomysql.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"118012435","text":"# GradCAM, Guided Backpropagation, and guided GradCAM utilities\n# Importations\nimport sys\n# sys.path.append('FireOccurrence/experiments')\n\nimport os\nimport numpy as np\nimport cv2\nfrom matplotlib import pyplot as plt\nfrom tensorflow.keras import backend as K\nfrom tensorflow.keras.preprocessing import image\nfrom tensorflow.keras.models import load_model\nfrom tensorflow.keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions\nfrom tensorflow.keras.applications.resnet50 import ResNet50\nfrom tensorflow.keras.models import Model\nfrom tqdm import tqdm\nfrom compress_pickle import dump as cdump\n\nimport tensorflow as tf\nfrom tensorflow.python.framework import ops\ntf.compat.v1.disable_eager_execution()\n\n\n# Build the model\ndef build_model(model=None, \n modelPath=None,\n model_constructor=None,\n dims=(38,31,1),\n pretrained='VGG16'):\n\n # Multiple returns\n if model is None and modelPath is None:\n if pretrained == 'VGG16':\n return VGG16(include_top=True, weights='imagenet')\n elif pretrained == 'ResNet50':\n return ResNet50(include_top=True, weights='imagenet')\n if model is not None:\n # Model from scratch\n model = model\n return model\n \n if modelPath is not None:\n return load_model(modelPath)\n\n# Load and preprocess image\ndef load_image(path,\n targetSize=(32,38),\n preprocess=True):\n \n x = image.load_img(path, target_size=targetSize)\n if preprocess:\n x = image.img_to_array(x)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n return x\n\n# Deprocess image\ndef deprocess_image(x):\n \"\"\"Same normalization as in:\n https://github.com/fchollet/keras/blob/master/examples/conv_filter_visualization.py\n \"\"\"\n x = x.copy()\n if np.ndim(x) > 3:\n x = np.squeeze(x)\n # normalize tensor: center on 0., ensure std is 0.1\n x -= x.mean()\n x /= (x.std() + 1e-5)\n x *= 0.1\n\n # clip to [0, 1]\n x += 0.5\n x = np.clip(x, 0, 1)\n\n # convert to RGB array\n x *= 255\n if K.image_data_format() == 'th':\n x = x.transpose((1, 2, 0))\n x = np.clip(x, 0, 255).astype('uint8')\n return x\n\n# Utility function to normalize a tensor by its L2 norm\ndef normalize(x):\n return (x + 1e-10) / (K.sqrt(K.mean(K.square(x))) + 1e-10)\n\n# Guided Model: Changes gradient function for all ReLu activations according to Guided Backpropagation.\ndef build_guided_model(model):\n if \"GuidedBackProp\" not in ops._gradient_registry._registry:\n @ops.RegisterGradient(\"GuidedBackProp\")\n def _GuidedBackProp(op, grad):\n dtype = op.inputs[0].dtype\n return grad * tf.cast(grad > 0., dtype) * \\\n tf.cast(op.inputs[0] > 0., dtype)\n\n g = tf.compat.v1.get_default_graph()\n with g.gradient_override_map({'Relu': 'GuidedBackProp'}):\n new_model = build_model(model)\n return new_model\n\n# Guided Backpropagation method for visualizing input saliency\ndef guided_backprop(input_model,\n images,\n layer_name):\n \n input_imgs = input_model.input\n #print('input_imgs:', input_imgs)\n layer_output = input_model.get_layer(layer_name).output\n grads = K.gradients(layer_output, input_imgs)[0]\n backprop_fn = K.function([input_imgs, K.learning_phase()], [grads])\n grads_val = backprop_fn([images, 0])[0]\n return grads_val\n\n# GradCAM method for visualizing input saliency\ndef grad_cam(input_model,\n image,\n cls,\n layer_name,\n classes=[0],\n targetSize=(32,38)):\n\n #image = np.expand_dims(image, axis=0)\n loss = tf.gather_nd(input_model.output, np.dstack([range(1), classes])[0])\n layer_output = input_model.get_layer(layer_name).output\n grads = K.gradients(loss, layer_output)[0]\n gradient_fn = K.function([input_model.input, K.learning_phase()], [layer_output, grads])\n\n conv_output, grads_val = gradient_fn([image, 0]) \n weights = np.mean(grads_val, axis=(1, 2))\n cams = np.einsum('ijkl,il->ijk', conv_output, weights)\n \n # Process CAMs\n new_cams = np.empty((1, targetSize[0], targetSize[1]))\n #print(\"new_camsShape:\", new_cams.shape)\n for i in range(new_cams.shape[0]):\n cam_i = cams[i] - cams[i].mean()\n cam_i = (cam_i + 1e-10) / (np.linalg.norm(cam_i, 2) + 1e-10)\n new_cams[i] = cv2.resize(cam_i, (targetSize[1], targetSize[0]), cv2.INTER_LINEAR)\n new_cams[i] = np.maximum(new_cams[i], 0)\n new_cams[i] = new_cams[i] / new_cams[i].max()\n \n return np.squeeze(new_cams, axis=0)\n\n# GradCAM method for visualizing input saliency.\n# Same as grad_cam but processes multiple images in one run\ndef grad_cam_batch(input_model,\n images,\n classes,\n layer_name,\n targetSize=(32,38)):\n \n loss = tf.gather_nd(input_model.output, np.dstack([range(images.shape[0]), classes])[0])\n layer_output = input_model.get_layer(layer_name).output\n grads = K.gradients(loss, layer_output)[0]\n gradient_fn = K.function([input_model.input, K.learning_phase()], [layer_output, grads])\n\n conv_output, grads_val = gradient_fn([images, 0]) \n weights = np.mean(grads_val, axis=(1, 2))\n cams = np.einsum('ijkl,il->ijk', conv_output, weights)\n \n # Process CAMs\n new_cams = np.empty((images.shape[0], targetSize[1], targetSize[0]))\n for i in range(new_cams.shape[0]):\n cam_i = cams[i] - cams[i].mean()\n cam_i = (cam_i + 1e-10) / (np.linalg.norm(cam_i, 2) + 1e-10)\n new_cams[i] = cv2.resize(cam_i, targetSize, cv2.INTER_LINEAR)\n new_cams[i] = np.maximum(new_cams[i], 0)\n new_cams[i] = new_cams[i] / new_cams[i].max()\n \n return new_cams\n\n# Compute saliency using all three approaches.\n# -layer_name: layer to compute gradients;\n# -cls: class number to localize (-1 for most probable class).\ndef compute_saliency(model,\n guided_model,\n img_path,\n layer_name='block5_conv3',\n cls=-1,\n visualize=True,\n save=True,\n path=None,\n top_n=2,\n inputSize=(32,38),\n channels=3,\n size=(15, 10)):\n \n # Pre-Process image\n # Loop\n if channels == 1:\n image = cv2.imread(img_path, 0)\n else:\n image = cv2.imread(img_path)\n image = cv2.resize(image, (inputSize[1], inputSize[0]))\n \n if channels == 1:\n image = image.reshape((image.shape[0], image.shape[1], 1))\n \n if channels != 1:\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n \n # To array and process\n image = np.array(image)\n print(\"Shape image:\", image.shape)\n preprocessed_input = image / 255.\n preprocessed_input = np.expand_dims(preprocessed_input, 0)\n print(\"Preprocessed inputs:\", preprocessed_input.shape)\n # Gather predictions\n predictions = model.predict(preprocessed_input)\n \n # Get top n (5 by default)\n top = np.sort(predictions[0])[:top_n][::-1]\n classes = np.argsort(predictions[0])[-top_n:][::-1]\n print(\"top:\", top)\n print(\"predictions:\", predictions)\n print(\"classes:\", classes)\n \n # Predictions\n print('Model prediction:')\n for c, p in zip(classes, np.array(top).flatten()):\n #print(c,p)\n label = 'fire' if c == 1 else 'no_fire'\n print('\\t{:15s}\\t({})\\twith probability {:.3f}'.format(label, c, p))\n \n # If cls = -1, most likely classes\n if cls == -1:\n cls = np.argmax(predictions)\n class_name = \"fire\" if classes[0] == 1 else 'no_fire'\n print(\"Explanation for '{}'\".format(class_name))\n #print(\"cls:\", cls)\n \n # Calculate the 3 methods\n gradcam = generate_gradCAM(batch_size=1, \n layer=layer_name,\n model=model,\n processedimages= preprocessed_input, \n rawimages=preprocessed_input,\n save=False,\n showID=-1,\n title='Test',)\n \n \n #gradcam = grad_cam(model, preprocessed_input / 255., cls, layer_name, classes=classes[cls], targetSize=inputSize)\n print(\"gradcam:\", gradcam.shape)\n gb = guided_backprop(guided_model, preprocessed_input, layer_name)\n print(\"gb:\", gb.shape)\n guided_gradcam = gb * gradcam[..., np.newaxis]\n print(\"guided_backprop:\", guided_gradcam.shape)\n\n # Save outputs\n if save:\n jetcam = cv2.applyColorMap(np.uint8(255 * gradcam), cv2.COLORMAP_JET)\n jetcam = (np.float32(jetcam) + load_image(img_path, preprocess=False)) / 2\n if path is not None:\n gradcamPath = os.path.join(path, 'gradcam.png')\n guidedBackPath = os.path.join(path, 'guided_backprop.png')\n guidedcamPath = os.path.join(path, 'guided_gradcam.png')\n else:\n gradcamPath = 'gradcam.png'\n guidedBackPath = 'guided_backprop.png'\n guidedcamPath = 'guided_gradcam.png'\n \n cv2.imwrite(gradcamPath, np.uint8(jetcam))\n cv2.imwrite(guidedBackPath, deprocess_image(gb[0]))\n cv2.imwrite(guidedcamPath, deprocess_image(guided_gradcam[0]))\n \n # Visualize (show)\n if visualize:\n plt.figure(figsize=size)\n plt.subplot(131)\n plt.title('GradCAM')\n plt.axis('off')\n if len(image.shape) >= 3 and channels == 1:\n image = image.squeeze(-1)\n plt.imshow(image)\n plt.imshow(gradcam[0], cmap='jet', alpha=0.5)\n\n plt.subplot(132)\n plt.title('Guided Backprop')\n plt.axis('off')\n plt.imshow(np.flip(deprocess_image(gb[0]), -1))\n \n plt.subplot(133)\n plt.title('Guided GradCAM')\n plt.axis('off')\n plt.imshow(np.flip(deprocess_image(guided_gradcam[0]), -1))\n plt.show()\n \n return gradcam, gb, guided_gradcam\n\n# Generate gradcam from rawimages\ndef generate_gradCAM(model, \n rawimages,\n processedimages, \n layer, \n batch_size=32, \n save=False,\n savefile=None,\n title='',\n showID=0):\n # Let's set classes for explanations as most probable class for each image.\n top = np.argmax(model.predict(processedimages), 1)\n gradcam = np.empty((processedimages.shape[:-1]))\n \n # Number of pictures\n N = processedimages.shape[0]\n\n # Batch loop\n for i in tqdm(range((N + batch_size - 1) // batch_size)):\n start = i * batch_size\n end = min((i+1) * batch_size, N)\n gradcam[start:end] = grad_cam_batch(model, \n processedimages[start:end],\n top[start:end],\n layer,\n (processedimages.shape[2], processedimages.shape[1]))\n\n # Save file\n if save:\n outfile = 'gradcam.lzma' if savefile is None else savefile\n cdump(gradcam, outfile, compression='lzma')\n\n # Show\n if showID >= 0: \n i = showID\n plt.title(title)\n plt.imshow(rawimages[i])\n plt.imshow(gradcam[i], alpha=0.3, cmap='jet')\n plt.show()\n \n # Return gradcam array\n return gradcam\n\n# Show attention map using the rawimage and gradcam output\ndef show_gradCAM(rawimage, \n gradcam, \n showID=0, \n title=''):\n i = showID\n plt.title(title)\n plt.imshow(rawimage[i])\n plt.imshow(gradcam[i], alpha=0.5, cmap='jet')\n plt.show()\n \n# Generate guided backprop from guided model\ndef generate_guidedbackprop(guided_model, \n processedimages, \n deprocess_object,\n layer, \n batch_size=32, \n save=False,\n savefile=None,\n title='',\n showID=0):\n # Container\n gbp = np.empty((processedimages.shape))\n N = processedimages.shape[0]\n\n # Batch loop\n for i in range((N + batch_size - 1) // batch_size):\n start = i * batch_size\n end = min((i+1) * batch_size, N)\n gbp[start:end] = guided_backprop(guided_model, \n processedimages[start:end], \n layer)\n\n # Save\n if save:\n outfile = 'guided_backprop.lzma' if savefile is None else savefile\n cdump(gradcam, outfile, compression='lzma')\n \n # Show\n if showID >= 0:\n i = showID\n plt.title(title)\n plt.imshow(np.flip(deprocess_object(gbp[i]), -1), cmap='jet')\n plt.show()\n \n # Return guided backprop\n return gbp\n\n# Show guidedbp\ndef show_guidedBP(gbp, \n deprocess_object,\n title='', \n showID=0):\n i = showID\n plt.title(title)\n plt.imshow(np.flip(deprocess_object(gbp[i]), -1), cmap='jet')\n plt.show()\n \n# Generate guided GradCAM\ndef generate_guidedgradCAM(gbp, \n gradcam,\n showID=0,\n save=False,\n savefile=None,\n title='',\n deprocess_object=None):\n # Guided gradCam\n guided_gradcam = gbp * gradcam[..., np.newaxis]\n\n # Save\n if save:\n outfile = 'guided_gradcam.lzma' if savefile is None else savefile\n cdump(guided_gradcam, 'guided_gradcam.lzma', compression='lzma')\n \n # Predictions\n if showID >= 0:\n i = showID\n plt.title(title)\n plt.imshow(deprocess_object(guided_gradcam[i]), \n alpha=0.5, cmap='jet')\n plt.show()\n \n # Return guided gradcam\n return guided_gradcam\n\n# Show guided GradCAM\ndef show_guidedgradCAM(ggcam, \n title='', \n showID=0, \n deprocess_object=None):\n # Predictions\n i = showID\n plt.title(title)\n plt.imshow(deprocess_object(ggcam[i]), \n alpha=0.5, cmap='jet')\n plt.show()\n \n# Process rawimages and gcam and save gcam ones (returns array of images)\ndef gcam_processed(rawimages, \n gcam,\n outGCAM=os.path.join('..', 'exp_outputs', 'GCAM_output'),\n show=False, \n size=(5,5), \n outsize=(128,128)):\n # Process a batch of rawimages\n if not os.path.exists(outGCAM):\n os.makedirs(outGCAM)\n\n # Size\n if show:\n plt.rcParams['figure.figsize'] = size\n\n # Save processed pictures\n for idx, image in enumerate(rawimages):\n fileName = os.path.join(outGCAM, str(idx) + '.png')\n im = plt.imshow(rawimages[idx])\n im2 = plt.imshow(gcam[idx], alpha=0.3, cmap='jet')\n plt.axis('off')\n plt.tight_layout()\n plt.axis(\"tight\") # gets rid of white border\n plt.axis(\"image\") # square up the image instead of filling the \"figure\" space\n plt.savefig(fileName, bbox_inches='tight', pad_inches=0.0)\n\n # Read back the gcam processed\n GCAM_imagesPaths = sorted(list(paths.list_images(outGCAM)))\n GCAM_images = []\n \n # Processe back\n for imagepath in GCAM_imagesPaths:\n image = cv2.imread(imagepath)\n image = cv2.resize(image, outsize)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n GCAM_images.append(image) \n \n # Return\n return GCAM_images\n\n# Process rawimages and gcam and save gcam ones (returns array of images)\ndef gprop_processed(gprop,\n outGPROP=os.path.join('..', 'exp_outputs', 'GPROP_output'),\n deprocess_object=None,\n show=False, \n size=(5,5), \n outsize=(128,128)):\n # Process a batch of rawimages\n if not os.path.exists(outGPROP):\n os.makedirs(outGPROP)\n\n # Save processed pictures\n for idx, image in enumerate(rawimages):\n plt.imsave(os.path.join(outGPROP, str(idx) + '.png'), \n np.flip(deprocess_object(gprop[idx]), -1), \n cmap='jet', format='png')\n \n # Show\n if show:\n plt.rcParams['figure.figsize'] = size\n plt.imshow(np.flip(deprocess_object(gprop[idx]), -1), \n cmap='jet',)\n\n # Read back the gcam processed\n GPROP_imagesPaths = sorted(list(paths.list_images(outGPROP)))\n GPROP_images = []\n \n # Processe back\n for imagepath in GPROP_imagesPaths:\n image = cv2.imread(imagepath)\n image = cv2.resize(image, outsize)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n GPROP_images.append(image) \n \n # Return\n return GPROP_images\n\n# Process rawimages and gcam and save gcam ones (returns array of images)\ndef guidedGCAM_processed(gprop,\n gradcam,\n outGGCAM=os.path.join('..', 'exp_outputs', 'GGCAM_output'),\n deprocess_object=None,\n show=False, \n size=(5,5), \n alpha=0.5,\n outsize=(128,128)):\n # Process a batch of rawimages\n if not os.path.exists(outGGCAM):\n os.makedirs(outGGCAM)\n \n # Guided gradCam\n guided_gradcam = gprop * gradcam[..., np.newaxis]\n\n # Save processed pictures\n for idx, image in enumerate(guided_gradcam):\n plt.imsave(os.path.join(outGGCAM, str(idx) + '.png'), \n np.flip(deprocess_object(guided_gradcam[idx]), -1), \n cmap='jet', format='png')\n \n # Show\n if show:\n plt.rcParams['figure.figsize'] = size\n plt.imshow(np.flip(deprocess_object(guided_gradcam[idx]), -1), \n cmap='jet', alpha=alpha)\n\n # Read back the gcam processed\n GGCAM_imagesPaths = sorted(list(paths.list_images(outGGCAM)))\n GGCAM_images = []\n \n # Processe back\n for imagepath in GGCAM_imagesPaths:\n image = cv2.imread(imagepath)\n image = cv2.resize(image, outsize)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n GGCAM_images.append(image) \n \n # Return\n return GGCAM_images\n\n# Plot all maps in squares\ndef plot_filters(feature_maps, \n size=(20,20), \n cmap=None):\n square = np.sqrt(feature_maps.shape[-1]).astype(np.int)\n ix = 1\n for _ in range(square):\n for _ in range(square):\n # specify subplot and turn of axis\n ax = plt.subplot(square, square, ix)\n ax.set_xticks([])\n ax.set_yticks([])\n\n # plot filter channel in grayscale\n plt.imshow(feature_maps[0, :, :, ix-1], cmap=cmap)\n ix += 1\n\n # show the figure\n plt.rcParams['figure.figsize'] = size[0], size[1]\n plt.show() \n \n# Get conv layers for feature maps\ndef get_featuremaps_model(model, layers_idxs):\n # Check model\n if model == 'VGG16':\n model = VGG16()\n if model == 'ResNet50':\n model = ResNet50()\n \n # Get Outputs\n outputs = [model.layers[i+1].output for i in ixs]\n \n # Generate new model\n model = Model(inputs=model.inputs, outputs=outputs)\n \n # New model with multiple outputs\n return model","sub_path":"src/utils/gradcam_utils_tf2.py","file_name":"gradcam_utils_tf2.py","file_ext":"py","file_size_in_byte":19678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"312009009","text":"import pylab as p\nimport numpy as np\n\n# Setup Parameters\n\n# Values of Mu and Sigma is given \nmu=0.1; \nsigma=0.26; \nS0=39; # Price at time zero\nn_path=1000; # Total number of simulations\nn= n_partitions = 1000; # Number of partitions in the interval \n\n# Create Brownian Paths \nt = p.linspace (0,3,n+1); \ndB = p.randn(n_path,n+1) / p.sqrt(n);\ndB[:,0] = 0; \nB=dB.cumsum(axis=1);\n\n# Calculate stock prices\nnu = mu - sigma*sigma/2;\nS =p.zeros_like(B);\nS[:,0] = S0\nS[:,1:] = S0*p.exp(nu*t[1:]+sigma*B[:,1:]);\n\n#Plot the 5 realizations with x label and y label \nS_plot= S[0:5]\np.plot(t,S_plot.transpose());\np.xlabel('Time, t');\np.ylabel('Stock prices, RM');\np.title('5 REALIZATIONS OF THE GEOMETRIC BROWNIAN MOTION')\np.show();\n\n# Calculate the expected value of S(3)\nlast_price_x = p.array(S[:,-1]);\nexpected_price_S3 = np.mean(last_price_x);\nprint('Expected value, E[S(3)] = ',expected_price_S3);\n\n# Calculate the variance of S(3)\nvariance_S3 = np.var(last_price_x);\nprint('Variance, Var[S(3)] = ',variance_S3);\n\n# Calculate P[S(3)>39]\ny = last_price_x > 39; #find out all the values that are larger than 39\ntotal = p.sum(y) #add together all the values that are larger than 39 \nprobability= total/n_path \nprint('P[S(3)>39] = ' ,probability);\n\n# Calculate E[S(3)|S(3)>39]\nz = p.sum(last_price_x * y) \nexpected_value = z/total\nprint('E[S(3)|S(3)>39] = ' ,expected_value);\n ","sub_path":"gbm.py","file_name":"gbm.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"288651564","text":"\"\"\"\n CDNSim\n\n file: netStreamingPrimitives.py\n\n NEC Europe Ltd. PROPRIETARY INFORMATION\n\n This software is supplied under the terms of a license agreement\n or nondisclosure agreement with NEC Europe Ltd. and may not be\n copied or disclosed except in accordance with the terms of that\n agreement. The software and its source code contain valuable trade\n secrets and confidential information which have to be maintained in\n confidence.\n Any unauthorized publication, transfer to third parties or duplication\n of the object or source code - either totally or in part - is\n prohibited.\n\n Copyright (c) 2016 NEC Europe Ltd. All Rights Reserved.\n\n Author: Anton Ivanov \n\n NEC Europe Ltd. DISCLAIMS ALL WARRANTIES, EITHER EXPRESS OR IMPLIED,\n INCLUDING BUT NOT LIMITED TO IMPLIED WARRANTIES OF MERCHANTABILITY\n AND FITNESS FOR A PARTICULAR PURPOSE AND THE WARRANTY AGAINST LATENT\n DEFECTS, WITH RESPECT TO THE PROGRAM AND THE ACCOMPANYING\n DOCUMENTATION.\n\n No Liability For Consequential Damages IN NO EVENT SHALL NEC Europe\n Ltd., NEC Corporation OR ANY OF ITS SUBSIDIARIES BE LIABLE FOR ANY\n DAMAGES WHATSOEVER (INCLUDING, WITHOUT LIMITATION, DAMAGES FOR LOSS\n OF BUSINESS PROFITS, BUSINESS INTERRUPTION, LOSS OF INFORMATION, OR\n OTHER PECUNIARY LOSS AND INDIRECT, CONSEQUENTIAL, INCIDENTAL,\n ECONOMIC OR PUNITIVE DAMAGES) ARISING OUT OF THE USE OF OR INABILITY\n TO USE THIS PROGRAM, EVEN IF NEC Europe Ltd. HAS BEEN ADVISED OF THE\n POSSIBILITY OF SUCH DAMAGES.\n\n THIS HEADER MAY NOT BE EXTRACTED OR MODIFIED IN ANY WAY.\n\"\"\"\n\nfrom __future__ import print_function\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport numpy.random\nimport random\nimport Queue\nimport math\nimport csv\nimport sys\nimport os\nimport re\n\nnumpy.random.seed(42)\nrandom.seed(42)\n\nEVENT_RESERVED = 0\nEVENT_USER_REQUEST = 1\nEVENT_STREAM_START = 2\nEVENT_STREAM_COMPLETED = 3\nEVENT_CONSUME_BEGIN = 4\nEVENT_CONSUME_COMPLETE = 5\nEVENT_CONSUME_BUFFER_EMPTY = 6\n\nEVENT_STREAM_EXPAND = 7\nEVENT_NOISE_USER_REQUEST = 8\nEVENT_SWITCH_TO_LIVERATE = 9\nEVENT_CACHE_READY = 10\n\nEVENT_CHANGE_REQUEST_RATE = 11\nEVENT_SIM_FINALIZE = 12\nEVENT_PERIODIC_STATS = 13\n\n\nNAMES_EVENTS = ['---',\n 'Connect reqst',\n 'Dwnl. started',\n 'Dwnl. stopped',\n 'Playing start',\n 'Playing stop',\n 'Buffer empty',\n 'Stream expand',\n 'Noise request',\n 'to live-TRate']\n\nCOLORS_EVENTS = []\nm = plt.cm.get_cmap('Paired')\nfor i in range(1, len(NAMES_EVENTS) + 1):\n COLORS_EVENTS.append(m(float(i) / (len(NAMES_EVENTS) + 1)))\n\nPROPAGATION_DELAY = 0.01\n# Max rates for video streaming quality: 360p, 480p, 720p, 1080p, 2K, 4K\nSTREAM_RATES = [1000000, 2500000, 5000000, 8000000, 10000000, 20000000]\nFAST_BACKBONE_LINK_BANDWIDTH = 40000000000.0 # 40 Gbps\nBACKBONE_LINK_BANDWIDTH = 10000000000.0 # 10 Gbps\nBACKBONE_LINK_DELAY = 0.005 # 5ms in sec\nLAN_LINK_RATE = 25000000.0 # 25 Mbps\nNUMBER_CHANNELS = 200\nEXPAND_INTERVAL = 1 # seconds\n\nMIN_PBK_TIME = 60.0 # sec\nMOD_PBK_TIME = 1800.0 # sec\nMAX_PBK_TIME = 2700.0 # sec\nMEAN_PBK_TIME = (MIN_PBK_TIME + MOD_PBK_TIME + MAX_PBK_TIME) / 3 # sec\n# MEAN_PBK_TIME is valid for triangular distribution\n\nSTREAM_NORMAL = 0\nSTREAM_NOISE = 1\nSTREAM_CACHE = 2\n\nMODEL_USER_BEHAVIOR = True\nLOCAL_CACHE_ONLY = True\n\nglobalStreamID = 0\nglobalNoiseStreamID = 0\nglobalCacheStreamID = 0\nglobalEventID = 0\nglobalCacheID = 1000000\n\n\nclass event:\n __slots__ = ['time', 'objRef', 'type', 'id']\n\n def __init__(self, time, objRef, typ):\n self.time = time\n self.objRef = objRef\n self.type = typ\n global globalEventID\n self.id = globalEventID\n globalEventID += 1\n return\n\n def __lt__(self, other):\n return (self.time, self.id) < (other.time, other.id)\n\n def __ge__(self, other):\n return (self.time, self.id) > (other.time, other.id)\n\n\nclass netLink:\n\n def __init__(self, sim, ca, as_nodeA, as_nodeB):\n self.capacity = float(ca)\n self.netDataStreams = []\n self.as_nodeA = as_nodeA\n self.as_nodeB = as_nodeB\n self.simRef = sim\n return\n\n def __str__(self):\n s = 'netLink: ' + str(self.as_nodeA) + '-' + str(self.as_nodeB) +\\\n ', capacity=' + str(self.capacity) +\\\n ', capacityLeft=' + str(self.getCapacityLeft()) +\\\n ', occupied by ' + str(len(self.netDataStreams)) + ' streams'\n return s\n\n def getCapacityLeft(self):\n capacityLeft = self.capacity\n for s in self.netDataStreams:\n capacityLeft -= s.transmitRate\n return capacityLeft\n\n def getHopsTo(self, link):\n assert link != self\n path = nx.shortest_path(\n self.simRef.urRef.gnGraph.netGraph,\n self.as_nodeA,\n link.as_nodeA\n )\n path.remove(self.as_nodeA)\n path.remove(link.as_nodeA)\n if self.as_nodeB in path:\n path.remove(self.as_nodeB)\n if link.as_nodeB in path:\n path.remove(link.as_nodeB)\n return len(path) + 1\n\n def getFairThroughput(self, nNewStreams):\n res = self.capacity\n nStreams = len(self.netDataStreams) + nNewStreams\n if len(self.netDataStreams) > 0:\n share = self.capacity / nStreams\n nExcludeStreams = 0\n for s in self.netDataStreams:\n if s.bottleneckLink != self and s.transmitRate < share:\n nExcludeStreams += 1\n res -= s.transmitRate\n if nExcludeStreams != nStreams:\n res /= (nStreams - nExcludeStreams)\n return res\n\n def allocateBandwidthForNewStream(self, curTime, newTR):\n for s in self.netDataStreams:\n if newTR < s.transmitRate:\n s.setTransmitRate(newTR, curTime)\n return\n\n def process(self, ev):\n # nothing\n return\n\n\nclass cacheNode:\n\n def __init__(self, sim, gnGraph, ASNum):\n global globalCacheID\n self.id = globalCacheID\n globalCacheID += 1\n self.ASnum = ASNum\n self.simRef = sim\n self.gnGraph = gnGraph\n self.ready = False\n self.waitingStreams = []\n # listStreamsPerChannelPerRate[STREAM_RATE][CHANNEL_NUMBER][STREAM]\n self.cacheStrs = [None] * len(STREAM_RATES) * NUMBER_CHANNELS\n self.lstStrs_Cnl_Rate = [None] * len(STREAM_RATES)\n for j in range(len(STREAM_RATES)):\n self.lstStrs_Cnl_Rate[j] = dict()\n return\n\n def attachNetDataStream(self, stream, curTime):\n if self.ready:\n # attach a stream to the cache instance:\n # a cache stream is created, 'stream' is added as dependent stream\n sRateID = STREAM_RATES.index(stream.consumeRate)\n if stream.channel in self.lstStrs_Cnl_Rate[sRateID]:\n # we have channel with this rate in cache\n self.lstStrs_Cnl_Rate[sRateID][stream.channel].append(\n stream\n )\n else:\n self.lstStrs_Cnl_Rate[sRateID][stream.channel] = [stream]\n if self.cacheStrs[sRateID * NUMBER_CHANNELS + stream.channel] is \\\n None:\n # FIXME: use ip-address as the dest ip instead...\n cSt = netDataStream(\n self.simRef,\n stream.consumeRate,\n stream.srcIP,\n 'cache@'+str(self.ASnum),\n 0,\n stream.channel,\n STREAM_CACHE\n )\n cSt.downCacheRef = self\n path = nx.shortest_path(\n self.gnGraph.netGraph,\n self.ASnum,\n self.gnGraph.ip2as[cSt.srcIP]\n )\n if self.simRef.topArgs.hierarchical:\n # in case of hierarchical caches,\n # on-demand instantiations are not allowed -> 'first=False'\n self.simRef.urRef.routeStreamPath_inclCache(\n path,\n cSt,\n curTime,\n first=False\n )\n else:\n self.simRef.urRef.routeStreamPath(path, cSt, curTime)\n self.cacheStrs[sRateID * NUMBER_CHANNELS + stream.channel] = cSt\n else:\n cSt = self.cacheStrs[sRateID * NUMBER_CHANNELS + stream.channel]\n if cSt.beingConsumed:\n self.simRef.eventPush(\n event(\n curTime + PROPAGATION_DELAY,\n stream,\n EVENT_STREAM_START\n )\n )\n else:\n if self.simRef.topArgs.waitCacheBoot:\n self.waitingStreams.append(stream)\n else:\n return False\n if not stream.connectedToCache:\n stream.upCacheRef = self\n stream.connectedToCache = True\n return True\n\n def detachNetDataStream(self, stream, curTime):\n sRateID = STREAM_RATES.index(stream.consumeRate)\n self.lstStrs_Cnl_Rate[sRateID][stream.channel].remove(stream)\n stream.upCacheRef = None\n if len(self.lstStrs_Cnl_Rate[sRateID][stream.channel]) == 0:\n cSt = self.cacheStrs[sRateID * NUMBER_CHANNELS + stream.channel]\n self.cacheStrs[sRateID * NUMBER_CHANNELS + stream.channel] = None\n cEv = event(curTime, cSt, EVENT_STREAM_COMPLETED)\n cSt.process(cEv)\n if 'static_cache' not in self.gnGraph.netGraph.node[self.ASnum]:\n deleteCache = True\n for sr in range(len(STREAM_RATES)):\n if self.cacheStrs[sr * NUMBER_CHANNELS + stream.channel] \\\n is not None:\n deleteCache = False\n break\n if deleteCache:\n self.gnGraph.netGraph.remove_node(self.id)\n # delete old cache node not to crowd up the topology\n self.gnGraph.netGraph.\\\n node[self.ASnum]['caches'][stream.channel] = None\n self.gnGraph.netGraph.\\\n node[self.ASnum]['nCacheRequests'][stream.channel] = 0\n return\n\n def startDependentStraems(self, cacheStream, curTime):\n cacheStream.updateCounters(curTime)\n cacheStream.beingConsumed = True\n cacheStream.consumePoint = curTime\n channel = cacheStream.channel\n sRateID = STREAM_RATES.index(cacheStream.consumeRate)\n for stream in self.lstStrs_Cnl_Rate[sRateID][channel]:\n if not stream.beingTransmitted:\n self.simRef.eventPush(\n event(\n curTime + PROPAGATION_DELAY,\n stream,\n EVENT_STREAM_START\n )\n )\n return\n\n def getParentCacheStreamTransmitRate(self, stream):\n channel = stream.channel\n sRateID = STREAM_RATES.index(stream.consumeRate)\n cSt = self.cacheStrs[sRateID * NUMBER_CHANNELS + channel]\n return cSt.transmitRate\n\n def getParentCacheStreamBufferSize(self, stream, curTime):\n channel = stream.channel\n sRateID = STREAM_RATES.index(stream.consumeRate)\n cSt = self.cacheStrs[sRateID * NUMBER_CHANNELS + channel]\n cSt.updateCounters(curTime)\n inBuffer = float(cSt.downloadedBit - cSt.consumedBit)\n return inBuffer\n\n def updateDependentStreams(self, cacheStream, curTime):\n channel = cacheStream.channel\n sRateID = STREAM_RATES.index(cacheStream.consumeRate)\n for stream in self.lstStrs_Cnl_Rate[sRateID][channel]:\n if stream.transmitingLive:\n stream.tryUseMaxTRate(curTime)\n return\n\n def process(self, ev):\n if ev.type == EVENT_CACHE_READY:\n self.ready = True\n for s in self.waitingStreams:\n self.attachNetDataStream(s, ev.time)\n self.waitingStreams = []\n else:\n raise Exception(\"Unknown event type:\" + str(ev.type))\n return\n\n\nclass netDataStream:\n def __init__(self, sim, cr, sip, dip, s, cnl=None, strType=STREAM_NORMAL):\n self.downloadedBit = 0\n self.sizeBit = s\n self.transmitRate = 0\n self.transmitPoint = None\n self.consumeRate = float(cr)\n self.srcIP = sip\n self.dstIP = dip\n self.links = []\n self.bottleneckLink = None\n self.simRef = sim\n self.id = 0\n self.beingConsumed = False\n self.beingTransmitted = False\n self.consumePoint = 0\n self.consumedBit = 0\n self.bufferingBegin = 0.0\n self.eventRef_trComplete = None\n self.eventRef_consBegin = None\n self.eventRef_consComplete = None\n self.eventRef_bufferEmpty = None\n self.eventRef_expand = None\n self.eventRef_toLiveTRate = None\n self.stats_startTime = None\n self.stats_bufferingTime = 0.0\n self.stats_bufferingEvents = 0\n self.stats_bitRates = []\n self.collectBitrateStats = False\n self.stats_lastTransmitRate_time = 0\n self.stats_transmitRate_sumRates = 0\n self.stats_transmitRate_sumTime = 0\n self.interestingResult = False\n self.stats_events = []\n self.streamType = strType\n if strType == STREAM_NORMAL or strType == STREAM_NOISE:\n self.links.append(netLink(sim, LAN_LINK_RATE, None, None))\n self.channel = cnl\n self.connectedToCache = False # true when a stream is getting the data\n # from a cache node\n self.upCacheRef = None # link to the upped level cache, the one\n # from which the stream gets its data\n self.downCacheRef = None # link to the lower level cache, used to\n # enable cache hierarchy\n self.transmitingLive = False\n return\n\n def __del__(self):\n self.printStats()\n return\n\n def __str__(self):\n if self.streamType == STREAM_NORMAL:\n s = 'netDataStream-'\n elif self.streamType == STREAM_CACHE:\n s = 'netCacheStream-'\n elif self.streamType == STREAM_NOISE:\n s = 'netNoiseStream-'\n else:\n s = 'unknownStream-'\n s += str(self.id) + ' from: ' + self.srcIP +\\\n (\n '(c' + str(len(self.links)) + ')'\n if self.connectedToCache\n else '(d' + str(len(self.links)) + ')'\n ) +\\\n ', to: ' + self.dstIP + ', transmitRate: ' +\\\n str(self.transmitRate) + 'b/s'\n return s\n\n def printStats(self):\n if self.streamType == STREAM_NORMAL:\n self.simRef.simulationStatistics.append(\n (self.streamType,\n self.id,\n self.channel,\n self.stats_startTime,\n self.stats_bufferingTime,\n self.stats_bufferingEvents,\n self.sizeBit / self.consumeRate,\n self.getAvgTRate(),\n self.consumeRate,\n self.connectedToCache,\n self.srcIP,\n self.dstIP)\n )\n if self.streamType != STREAM_NORMAL:\n return\n s = 'stream-' + str(self.id) +\\\n ' from: ' + self.srcIP + ', to: ' + self.dstIP + '\\n' +\\\n 'start time: {:.2f}'.format(self.stats_startTime) +\\\n ', buffering time: {:.2f}'.format(self.stats_bufferingTime) +\\\n ', buffering events:' + str(self.stats_bufferingEvents) +\\\n ', playback time: {:.2f}'.format(self.sizeBit / self.consumeRate) +\\\n ', avg dwl-rate: {:.2f}'.format(self.getAvgTRate())\n # and draw a plot\n if (self.interestingResult and self.simRef.topArgs.figures)\\\n or self.simRef.topArgs.allfigures:\n self.drawStreamingPlot(s)\n return\n\n def getAvgTRate(self):\n r = float(self.stats_transmitRate_sumRates) / \\\n self.stats_transmitRate_sumTime\n return r\n\n def drawStreamingPlot(self, s):\n downStartX = downStopX = consStartX = consStopX = None\n buffStartX = buffStopX = None\n legendLines = set()\n for time, typ in self.stats_events:\n plt.plot(\n (time, time),\n (0, LAN_LINK_RATE),\n marker='.',\n mec='k',\n mew=0.25,\n ms=5,\n ls='-',\n lw=0.5,\n color=COLORS_EVENTS[typ],\n label=NAMES_EVENTS[typ] if typ not in legendLines else ''\n )\n if typ == EVENT_CONSUME_BEGIN and consStartX is None:\n consStartX = time\n elif typ == EVENT_CONSUME_BEGIN:\n buffStopX = time\n elif typ == EVENT_CONSUME_BUFFER_EMPTY:\n buffStartX = time\n elif typ == EVENT_CONSUME_COMPLETE:\n consStopX = time\n elif typ == EVENT_STREAM_START:\n downStartX = time\n elif typ == EVENT_STREAM_COMPLETED:\n downStopX = time\n if buffStartX is not None and buffStopX is not None:\n plt.plot(\n (buffStartX, buffStopX),\n (self.consumeRate, self.consumeRate),\n color='r',\n ls='-',\n lw=5,\n alpha=0.8,\n solid_capstyle='butt',\n label='Bufferring' if 'Bufferring' not in legendLines\n else ''\n )\n buffStartX = None\n buffStopX = None\n legendLines.add('Bufferring')\n legendLines.add(typ)\n avgTRate = self.getAvgTRate()\n plt.plot(\n (downStartX, downStopX),\n (avgTRate, avgTRate),\n color='c',\n ls=':',\n lw=2,\n alpha=0.7,\n solid_capstyle='butt',\n label='Avg. TRate'\n )\n plt.plot(\n (consStartX, consStopX),\n (self.consumeRate, self.consumeRate),\n color='c',\n ls='-',\n lw=2,\n alpha=0.7,\n solid_capstyle='butt',\n label='Cons. rate'\n )\n x, y = zip(*self.stats_bitRates)\n plt.plot(x, y, lw=1, color='b')\n plt.legend(\n fontsize=7,\n bbox_to_anchor=(1, 1),\n numpoints=1,\n framealpha=0.7\n )\n plt.suptitle(s, fontsize=7)\n plt.ylabel('Bandwidth (b/s)', fontsize=7)\n plt.xlabel('Time (s)', fontsize=7)\n plt.yticks(\n range(\n 0,\n int(LAN_LINK_RATE) + (int(LAN_LINK_RATE)/10),\n int(LAN_LINK_RATE) / 10)\n )\n plt.tick_params(axis='both', which='both', labelsize=5)\n plt.ticklabel_format(style='plain', useOffset=False)\n plt.minorticks_on()\n plt.grid(True)\n plt.savefig(self.simRef.simResDirName + '/fig_' + str(self.id) + '.pdf')\n plt.clf()\n return\n\n def updateCounters(self, curTime):\n if self.beingTransmitted:\n self.downloadedBit +=\\\n (curTime - self.transmitPoint) * self.transmitRate\n self.transmitPoint = curTime\n if self.beingConsumed:\n if self.streamType == STREAM_CACHE \\\n and self.transmitingLive \\\n and self.consumeRate > self.transmitRate:\n self.consumedBit +=\\\n (curTime - self.consumePoint) * self.transmitRate\n else:\n self.consumedBit +=\\\n (curTime - self.consumePoint) * self.consumeRate\n self.consumePoint = curTime\n return\n\n def updateEvent_trComplete(self, curTime):\n if self.beingTransmitted \\\n and self.streamType != STREAM_CACHE:\n expStreamingComplete = \\\n curTime +\\\n float(self.sizeBit - self.downloadedBit) / self.transmitRate\n if self.eventRef_trComplete is None:\n self.eventRef_trComplete = event(\n expStreamingComplete,\n self,\n EVENT_STREAM_COMPLETED\n )\n self.simRef.eventPush(self.eventRef_trComplete)\n else:\n self.simRef.eventUpdateTime(\n self.eventRef_trComplete,\n expStreamingComplete\n )\n else:\n if self.eventRef_trComplete is not None:\n self.simRef.deleteEvent(self.eventRef_trComplete)\n self.eventRef_trComplete = None\n return\n\n def updateEvent_bufferEmpty(self, curTime):\n inBuffer = float(self.downloadedBit - self.consumedBit)\n if -1 < inBuffer < 1:\n inBuffer = 0.0\n if self.transmitRate < self.consumeRate and self.beingConsumed:\n # buffer will become empty\n timeLeft = self.calcBefferEmptyTime(\n inBuffer,\n self.transmitRate,\n self.consumeRate\n )\n if self.eventRef_bufferEmpty is None:\n self.eventRef_bufferEmpty = event(\n curTime + timeLeft,\n self,\n EVENT_CONSUME_BUFFER_EMPTY\n )\n self.simRef.eventPush(self.eventRef_bufferEmpty)\n else:\n self.simRef.eventUpdateTime(\n self.eventRef_bufferEmpty,\n curTime +\n timeLeft\n )\n elif self.eventRef_bufferEmpty is not None:\n # buffer will not become empty\n self.simRef.deleteEvent(self.eventRef_bufferEmpty)\n self.eventRef_bufferEmpty = None\n return\n\n def updateEvent_toLiveTRate(self, curTime):\n if not self.transmitingLive and self.beingTransmitted:\n if self.connectedToCache and self.upCacheRef is not None:\n cacheStreamBufferSize = \\\n self.upCacheRef.getParentCacheStreamBufferSize(\n self, curTime\n )\n timeTillSwitch = \\\n cacheStreamBufferSize / self.transmitRate + curTime\n else:\n bufferSize = self.consumeRate * self.simRef.topArgs.cachesec\n inBuffer = float(self.downloadedBit - self.consumedBit)\n if -1 < inBuffer < 1:\n inBuffer = 0.0\n if self.streamType != STREAM_CACHE:\n if bufferSize > inBuffer + \\\n (self.sizeBit - self.downloadedBit):\n bufferSize = \\\n inBuffer + (self.sizeBit - self.downloadedBit)\n if bufferSize < inBuffer:\n bufferSize = inBuffer\n timeTillSwitch = \\\n (bufferSize - inBuffer) / self.transmitRate + curTime\n if self.eventRef_toLiveTRate is None:\n self.eventRef_toLiveTRate = event(\n timeTillSwitch,\n self,\n EVENT_SWITCH_TO_LIVERATE\n )\n self.simRef.eventPush(self.eventRef_toLiveTRate)\n else:\n self.simRef.eventUpdateTime(\n self.eventRef_toLiveTRate,\n timeTillSwitch\n )\n return\n\n def updateEvent_consumeBegin(self, curTime):\n # when data in buffer must be >= befferSize\n if self.beingConsumed:\n return\n bufferSize = self.consumeRate * self.simRef.topArgs.cachesec\n inBuffer = float(self.downloadedBit - self.consumedBit)\n if -1 < inBuffer < 1:\n inBuffer = 0.0\n if self.streamType != STREAM_CACHE:\n if bufferSize > inBuffer + (self.sizeBit - self.downloadedBit):\n bufferSize = inBuffer + (self.sizeBit - self.downloadedBit)\n if bufferSize < inBuffer:\n bufferSize = inBuffer\n if self.beingTransmitted:\n readyToPlayTime = \\\n (bufferSize - inBuffer) / self.transmitRate + curTime\n if self.eventRef_consBegin is None: # new event\n self.eventRef_consBegin = event(\n readyToPlayTime,\n self,\n EVENT_CONSUME_BEGIN\n )\n self.simRef.eventPush(self.eventRef_consBegin)\n else: # update old\n self.simRef.eventUpdateTime(\n self.eventRef_consBegin,\n readyToPlayTime\n )\n elif bufferSize == inBuffer and inBuffer > 0:\n if self.eventRef_consBegin is not None:\n self.simRef.eventUpdateTime(self.eventRef_consBegin, curTime)\n else:\n if self.eventRef_consBegin is not None:\n self.simRef.deleteEvent(self.eventRef_consBegin)\n self.eventRef_consBegin = None\n return\n\n def updateEvent_consumeComplete(self, curTime):\n # when we finish consuming the file (if no buff.empty occurs)\n if self.streamType == STREAM_CACHE:\n return\n if self.beingConsumed: # need to update event consume complete\n duration = float(self.sizeBit - self.consumedBit) / self.consumeRate\n if self.eventRef_consComplete is None:\n self.eventRef_consComplete = event(\n curTime + duration,\n self,\n EVENT_CONSUME_COMPLETE\n )\n self.simRef.eventPush(self.eventRef_consComplete)\n else:\n self.simRef.eventUpdateTime(\n self.eventRef_consComplete,\n curTime + duration\n )\n else:\n if self.eventRef_consComplete is not None:\n self.simRef.deleteEvent(self.eventRef_consComplete)\n self.eventRef_consComplete = None\n return\n\n def updateEvents(self, curTime):\n self.updateEvent_trComplete(curTime)\n if self.simRef.topArgs.streaming == 'live':\n self.updateEvent_toLiveTRate(curTime)\n if self.streamType != STREAM_NOISE:\n self.updateEvent_bufferEmpty(curTime)\n self.updateEvent_consumeBegin(curTime)\n if not self.beingTransmitted:\n if self.eventRef_expand is not None:\n self.simRef.deleteEvent(self.eventRef_expand)\n self.eventRef_expand = None\n return\n\n def calcBefferEmptyTime(self, buffSize, Vi, Vo):\n # Vi -- download speed\n # Vo -- playback speed\n # Calculate the sum of the first N terms of a geometric series\n if Vi >= Vo:\n raise Exception(\"Series has no sum (diverges) -> \"\n \"Buffer will not become empty\")\n Vi = float(Vi)\n t0 = float(buffSize) / Vo\n if Vi > 0:\n accuracy = 0.001 # seconds\n b = Vi/Vo\n n = math.ceil(math.log(accuracy, b))\n sumN = t0 * (1.0 - math.pow((Vi/Vo), n)) / (1.0 - Vi/Vo)\n else:\n sumN = t0\n return sumN\n\n def startStreaming(self, curTime):\n if self.streamType == STREAM_NOISE:\n global globalNoiseStreamID\n self.id = globalNoiseStreamID\n globalNoiseStreamID += 1\n elif self.streamType == STREAM_CACHE:\n global globalCacheStreamID\n self.id = globalCacheStreamID\n globalCacheStreamID += 1\n elif self.streamType == STREAM_NORMAL:\n global globalStreamID\n self.id = globalStreamID\n globalStreamID += 1\n self.updateBottleneckLink(newStream=1)\n if self.simRef.simulatorReady:\n newTR = self.bottleneckLink.getFairThroughput(1)\n self.stats_lastTransmitRate_time = curTime\n self.setTransmitRate(newTR, curTime)\n for link in self.links:\n link.allocateBandwidthForNewStream(curTime, newTR)\n link.netDataStreams.append(self)\n else:\n # implementing simultaneous start of background noise streams\n # they are all placed onto the links, but have tRate = 0\n newTR = self.bottleneckLink.getFairThroughput(0)\n self.stats_lastTransmitRate_time = curTime\n self.setTransmitRate(newTR, curTime)\n self.eventRef_expand = event(\n curTime + EXPAND_INTERVAL,\n self,\n EVENT_STREAM_EXPAND\n )\n self.simRef.eventPush(self.eventRef_expand)\n return\n\n def setTransmitRate(self, newRate, curTime):\n if newRate != self.transmitRate:\n self.updateCounters(curTime)\n if self.collectBitrateStats:\n self.stats_bitRates.append((curTime, self.transmitRate))\n self.stats_bitRates.append((curTime, newRate))\n self.stats_transmitRate_sumRates += \\\n (curTime - self.stats_lastTransmitRate_time) * self.transmitRate\n self.stats_transmitRate_sumTime += \\\n (curTime - self.stats_lastTransmitRate_time)\n self.stats_lastTransmitRate_time = curTime\n self.transmitRate = newRate\n self.updateEvents(curTime)\n if self.streamType == STREAM_CACHE:\n self.downCacheRef.updateDependentStreams(self, curTime)\n return\n\n def tryUseMaxTRate(self, curTime):\n tr = self.updateBottleneckLink()\n if self.transmitingLive:\n if self.connectedToCache and self.upCacheRef is not None:\n cacheStreamTRate = \\\n self.upCacheRef.getParentCacheStreamTransmitRate(self)\n else:\n cacheStreamTRate = self.consumeRate\n if cacheStreamTRate < tr:\n tr = cacheStreamTRate\n if self.transmitRate != tr:\n self.setTransmitRate(tr, curTime)\n return\n\n def updateBottleneckLink(self, newStream=0):\n self.bottleneckLink = self.links[0]\n minThroughput = self.bottleneckLink.getFairThroughput(newStream)\n for l in self.links:\n tempRateVal = l.getFairThroughput(newStream)\n if tempRateVal < minThroughput:\n minThroughput = tempRateVal\n self.bottleneckLink = l\n return minThroughput\n\n def process(self, ev):\n if ev.type == EVENT_STREAM_START:\n self.beingTransmitted = True\n self.transmitPoint = ev.time\n self.startStreaming(ev.time)\n\n elif ev.type == EVENT_STREAM_COMPLETED:\n self.updateCounters(ev.time)\n self.beingTransmitted = False\n self.eventRef_trComplete = None\n self.setTransmitRate(0, ev.time)\n for link in self.links:\n link.netDataStreams.remove(self)\n if self.connectedToCache:\n self.upCacheRef.detachNetDataStream(self, ev.time)\n if self.streamType == STREAM_NOISE:\n self.simRef.urRef.activeNoiseStreams -= 1\n if self.simRef.simulatorReady \\\n and not self.simRef.simulationDone:\n newEv = self.simRef.urRef.getNoiseEvent(ev.time)\n self.simRef.eventPush(newEv)\n elif self.streamType == STREAM_NORMAL:\n self.simRef.urRef.activeStreams -= 1\n if not self.simRef.urRef.streamGenActive \\\n and (self.simRef.urRef.activeStreams == 0\n and self.simRef.simulatorReady):\n self.simRef.simulationDone = True\n\n elif ev.type == EVENT_STREAM_EXPAND:\n if self.beingTransmitted:\n self.tryUseMaxTRate(ev.time)\n # try to expand every second\n self.eventRef_expand = event(\n ev.time + EXPAND_INTERVAL,\n self,\n EVENT_STREAM_EXPAND\n )\n self.simRef.eventPush(self.eventRef_expand)\n else:\n self.eventRef_expand = None\n return\n # don't let Expand event register in the stream stats\n\n elif ev.type == EVENT_CONSUME_BEGIN:\n self.eventRef_consBegin = None\n self.updateCounters(ev.time)\n self.beingConsumed = True\n self.consumePoint = ev.time\n self.updateEvent_consumeComplete(ev.time)\n self.updateEvent_bufferEmpty(ev.time)\n if self.streamType == STREAM_CACHE:\n self.downCacheRef.startDependentStraems(self, ev.time)\n # statistics\n if self.stats_startTime is None:\n self.stats_startTime = ev.time - self.bufferingBegin\n if self.bufferingBegin != 0:\n self.stats_bufferingTime += ev.time - self.bufferingBegin\n self.bufferingBegin = 0\n\n elif ev.type == EVENT_SWITCH_TO_LIVERATE:\n self.transmitingLive = True\n self.eventRef_toLiveTRate = None\n self.tryUseMaxTRate(ev.time)\n return\n # don't let 'Switch to liveRate' event register in the stream stats\n\n elif ev.type == EVENT_CONSUME_COMPLETE:\n self.updateCounters(ev.time)\n self.beingConsumed = False\n self.eventRef_consComplete = None\n self.updateEvents(ev.time)\n\n elif ev.type == EVENT_CONSUME_BUFFER_EMPTY:\n self.eventRef_bufferEmpty = None\n self.updateCounters(ev.time)\n if self.beingConsumed:\n if self.streamType != STREAM_CACHE:\n # the cache stream continues sending\n self.beingConsumed = False\n self.bufferingBegin = ev.time\n self.updateEvent_consumeBegin(ev.time)\n if self.beingTransmitted:\n self.updateEvent_consumeComplete(ev.time)\n # statistics\n self.stats_bufferingEvents += 1\n if self.collectBitrateStats:\n self.interestingResult = True\n else:\n raise Exception(\"Unknown event type: \" + str(ev.type))\n if self.streamType != STREAM_NOISE:\n self.stats_events.append((ev.time, ev.type))\n return\n\n\nclass userRequests:\n def __init__(self, sim, fName, gnGraph, listHosts, maxHosts,\n maxActiveStreams):\n self.re = re.compile(\n '(\\d+\\.\\d+\\.\\d+\\.\\d+)\\s(\\S+)\\s(\\d+)'\n '\\s(\\d+\\.\\d+)\\s(\\d+\\.\\d+)\\s(\\d+\\.\\d+)\\s(\\d+)',\n re.UNICODE\n )\n self.requestQueue = Queue.Queue()\n self.noiseRequestQueue = Queue.Queue()\n self.gnGraph = gnGraph\n self.listOfHosts = listHosts\n self.traceHostMap = dict()\n self.maxHosts = maxHosts # total number of hosts\n self.simRef = sim\n self.activeStreams = 0\n self.totalStreams = 0\n self.activeStreamsMax = maxActiveStreams\n self.streamGenerationRate = self.calcStreamGenRate(sim.topArgs.reqRate)\n self.streamGenRate_next = 0\n self.streamGenActive = True\n self.activeNoiseStreams = 0\n self.totalNoiseStreams = 0\n self.activeNoiseStreamsMax = int(sim.topArgs.backnoise)\n self.startTime = None\n self.initStreamsList = []\n self.listOfChannels = None\n self.numRequestsPerTimePeriod = 0\n self.streamGenRateScenario = [] # (time, requests per min)\n if sim.topArgs.scenario != '':\n if os.path.isfile(sim.topArgs.scenario):\n print(\"\\tUsing a scenaio file: \" + sim.topArgs.scenario)\n with open(sim.topArgs.scenario, 'rb') as csvfile:\n reader = csv.reader(csvfile)\n for row in reader:\n time, rate = row\n self.streamGenRateScenario.append(\n (float(time), float(rate))\n )\n else:\n print(\"\\tspecified scenaio file not found: \" +\n sim.topArgs.scenario)\n exit(-3)\n if MODEL_USER_BEHAVIOR is True:\n self.startTime = 0.0\n self.traceFile = None\n for t, r in self.streamGenRateScenario:\n self.simRef.eventPush(\n event(t, self, EVENT_CHANGE_REQUEST_RATE)\n )\n self.simRef.eventPush(\n event(sim.topArgs.endtime, self, EVENT_SIM_FINALIZE)\n )\n else:\n self.traceFile = open(fName, 'r')\n self.simRef.eventPush(event(1, self, EVENT_PERIODIC_STATS))\n return\n\n def __del__(self):\n self.traceFile.close()\n\n def calcStreamGenRate(self, userRequest=0.0):\n autoCalcRate = float(self.activeStreamsMax) / MEAN_PBK_TIME\n if userRequest == 0.0:\n result = autoCalcRate\n else:\n result = float(userRequest) / 60\n if result < autoCalcRate:\n print(\n \"\\n\\tinfo: given reqRate (\" + str(60 * result) +\n \") is too small to guarantee \" +\n str(self.simRef.topArgs.active) +\n \" active connections. Try reqRate = \" +\n str(60 * autoCalcRate)\n )\n elif result > autoCalcRate:\n print(\n \"\\n\\tinfo: given reqRate (\" + str(60 * result) +\n \") is too high. Number active connections (\" +\n str(self.simRef.topArgs.active) +\n \") will be exceeded. Try reqRate = \" +\n str(60 * autoCalcRate)\n )\n return result\n\n def genChannelNumber(self):\n channel = numpy.random.zipf(1.2) - 1\n while channel >= NUMBER_CHANNELS:\n channel = numpy.random.zipf(1.2) - 1\n return channel\n\n def getNextEvent(self, curTime):\n if MODEL_USER_BEHAVIOR:\n self.totalStreams += 1\n randHost = random.choice(self.listOfHosts).exploded\n randStartTime = curTime + numpy.random.\\\n standard_gamma(1.0/self.streamGenerationRate)\n randPlayTime = numpy.random.\\\n triangular(MIN_PBK_TIME, MOD_PBK_TIME, MAX_PBK_TIME)\n rateN = numpy.random.poisson(2)\n while rateN > len(STREAM_RATES) - 1:\n rateN = numpy.random.poisson(2)\n randStreamRate = STREAM_RATES[rateN]\n futureRequest = (\n randHost,\n randStreamRate,\n randStreamRate * randPlayTime\n )\n ev = event(randStartTime, self, EVENT_USER_REQUEST)\n else:\n # If we have a trace file with realistic user events...\n futureRequestLine = self.traceFile.readline()\n if futureRequestLine == '':\n return None\n match = self.re.match(futureRequestLine)\n if match is not None:\n if self.startTime is None:\n self.startTime = float(match.group(4))\n # if the trace file is using masked\n # ip-addresses, we have to re-map them\n if match.group(1) not in self.traceHostMap:\n randHost = random.choice(self.listOfHosts).exploded\n self.traceHostMap[match.group(1)] = randHost\n else:\n randHost = self.traceHostMap[match.group(1)]\n futureRequest = (\n randHost,\n STREAM_RATES[2],\n float(match.group(7))\n )\n ev = event(\n float(match.group(4)) - self.startTime,\n self,\n EVENT_USER_REQUEST\n )\n else:\n raise Exception(\n \"Unrecognized format of user behavior trace file,\"\n \" line:\\n\\t>> \" + futureRequestLine\n )\n self.requestQueue.put(futureRequest)\n return ev\n\n def getNoiseEvent(self, curTime):\n self.totalNoiseStreams += 1\n randHost = random.choice(self.listOfHosts).exploded\n randStartTime = curTime + numpy.random.\\\n standard_gamma(MEAN_PBK_TIME/self.activeNoiseStreamsMax)\n randPlayTime = numpy.random.triangular(600, 1800, 3600)\n randStreamRate = STREAM_RATES[int(\n numpy.random.triangular(\n -1, len(STREAM_RATES) / 2, len(STREAM_RATES)\n ))]\n futureNoiseRequest = \\\n (randHost, randStreamRate, randPlayTime * randStreamRate)\n self.noiseRequestQueue.put(futureNoiseRequest)\n ev = event(randStartTime, self, EVENT_NOISE_USER_REQUEST)\n return ev\n\n def routeStreamPath(self, path, s, curTime):\n nodeA = path[0]\n for nodeB in path[1:]:\n if 'p2p_link' not in self.gnGraph.netGraph[nodeA][nodeB]:\n if self.gnGraph.isAccessNode(\n self.gnGraph.netGraph.node[nodeA]['type']\n ) or self.gnGraph.isAccessNode(\n self.gnGraph.netGraph.node[nodeB]['type']\n ):\n self.gnGraph.netGraph[nodeA][nodeB]['p2p_link'] =\\\n netLink(\n self.simRef,\n BACKBONE_LINK_BANDWIDTH,\n nodeA,\n nodeB\n )\n else:\n self.gnGraph.netGraph[nodeA][nodeB]['p2p_link'] =\\\n netLink(\n self.simRef,\n FAST_BACKBONE_LINK_BANDWIDTH,\n nodeA,\n nodeB\n )\n s.links.append(self.gnGraph.netGraph[nodeA][nodeB]['p2p_link'])\n nodeA = nodeB\n if s.streamType == STREAM_NOISE and not self.simRef.simulatorReady:\n for l in s.links:\n l.netDataStreams.append(s)\n self.initStreamsList.append(s)\n else:\n self.simRef.eventPush(\n event(\n curTime + PROPAGATION_DELAY*len(path),\n s,\n EVENT_STREAM_START\n )\n )\n return\n\n def addCacheToAS(self, ASn, curTime, channelNum, static=False):\n if 'caches' not in self.gnGraph.netGraph.node[ASn]:\n self.gnGraph.netGraph.\\\n node[ASn]['caches'] = [None] * NUMBER_CHANNELS\n # 1 vm per channel (all str.Rates)\n if self.gnGraph.netGraph.node[ASn]['caches'][channelNum] is None:\n cache = cacheNode(self.simRef, self.gnGraph, ASn)\n assert cache.id not in self.gnGraph.netGraph\n self.gnGraph.netGraph.add_edge(ASn, cache.id)\n self.gnGraph.netGraph.node[ASn]['caches'][channelNum] = cache\n if static:\n cache.process(\n event(\n curTime,\n cache,\n EVENT_CACHE_READY\n )\n )\n else:\n self.simRef.eventPush(\n event(\n curTime + self.simRef.topArgs.cacheinit,\n cache,\n EVENT_CACHE_READY\n )\n )\n else:\n cache = self.gnGraph.netGraph.node[ASn]['caches'][channelNum]\n return cache\n\n def routeStreamPath_inclCache(self, path, s, curTime, first=True):\n cacheOnDemand = self.simRef.topArgs.ondemandCache\n nodeA = path[0]\n for nodeB in path[1:]:\n # Creating a link between node A and B, if it does not exist yet\n if 'p2p_link' not in self.gnGraph.netGraph[nodeA][nodeB]:\n # if one of the nodes is an 'access' AS node then the link\n # speed is set to BACKBONE_LINK_BANDWIDTH\n if self.gnGraph.isAccessNode(\n self.gnGraph.netGraph.node[nodeA]['type']\n ) or self.gnGraph.isAccessNode(\n self.gnGraph.netGraph.node[nodeB]['type']\n ):\n self.gnGraph.netGraph[nodeA][nodeB]['p2p_link'] = \\\n netLink(\n self.simRef,\n BACKBONE_LINK_BANDWIDTH,\n nodeA,\n nodeB\n )\n else:\n self.gnGraph.netGraph[nodeA][nodeB]['p2p_link'] =\\\n netLink(\n self.simRef,\n FAST_BACKBONE_LINK_BANDWIDTH,\n nodeA,\n nodeB\n )\n if nodeA == path[0] or not LOCAL_CACHE_ONLY:\n # increase the cache-init counter and check the threshold\n if 'nCacheRequests' not in self.gnGraph.netGraph.node[nodeA]:\n self.gnGraph.netGraph.\\\n node[nodeA]['nCacheRequests'] = [0] * NUMBER_CHANNELS\n self.gnGraph.netGraph.\\\n node[nodeA]['nCacheRequests'][s.channel] += 1\n if self.gnGraph.netGraph.\\\n node[nodeA]['nCacheRequests'][s.channel] >= \\\n self.simRef.topArgs.cachethreshold:\n # threshold passed, add a cache\n # (all checks are inside the 'addCacheToAS')\n cache = None\n if 'static_cache' in self.gnGraph.netGraph.node[nodeA]:\n cache = self.addCacheToAS(\n nodeA,\n curTime,\n s.channel,\n static=True\n )\n elif cacheOnDemand and first:\n cache = self.addCacheToAS(nodeA, curTime, s.channel)\n if cache is not None\\\n and cache != s.downCacheRef \\\n and cache.attachNetDataStream(s, curTime):\n # 'attachNetDataStream' returns False if cache is not\n # ready yet, if connected -> stop routing\n break\n # if the stream is not connected to a\n # cache @ node A (or there is no cache @ node A)\n if not s.connectedToCache:\n # add the link from node A to node B to the stream path\n # (! this does not mean adding stream to all links along\n # the path, this is done later)\n s.links.append(self.gnGraph.netGraph[nodeA][nodeB]['p2p_link'])\n nodeA = nodeB\n # background noise streams: adding stream to all links along\n # the path at init time\n if not self.simRef.simulatorReady and s.streamType == STREAM_NOISE:\n for l in s.links:\n l.netDataStreams.append(s)\n self.initStreamsList.append(s)\n else:\n # schedule 'start streaming' events\n if not s.connectedToCache:\n self.simRef.eventPush(\n event(\n curTime + PROPAGATION_DELAY*len(path),\n s,\n EVENT_STREAM_START\n )\n )\n return\n\n def process(self, ev):\n if ev.type == EVENT_USER_REQUEST:\n dest_ip, stream_rate, data_size = self.requestQueue.get()\n hostAs = self.gnGraph.ip2as[dest_ip]\n path = nx.shortest_path(\n self.gnGraph.netGraph,\n hostAs,\n self.gnGraph.contentProvider\n )\n serv_ip = self.gnGraph.netGraph.\\\n node[self.gnGraph.contentProvider]['ip'].exploded\n ds = netDataStream(\n self.simRef,\n stream_rate,\n serv_ip,\n dest_ip,\n data_size,\n self.genChannelNumber()\n )\n ds.bufferingBegin = ev.time\n if self.simRef.topArgs.streaming == 'live':\n self.routeStreamPath_inclCache(path, ds, ev.time)\n else:\n self.routeStreamPath(path, ds, ev.time)\n # statistics for user request\n ds.stats_events.append((ev.time, ev.type))\n self.activeStreams += 1\n self.numRequestsPerTimePeriod += 1\n if self.streamGenActive:\n self.simRef.eventPush(self.getNextEvent(ev.time))\n elif ev.type == EVENT_NOISE_USER_REQUEST:\n dest_ip, stream_rate, data_size = self.noiseRequestQueue.get()\n hostAs = self.gnGraph.ip2as[dest_ip]\n servAs = random.choice(self.gnGraph.contentNodes)\n serv_ip = self.gnGraph.as2ip[servAs][0][1].exploded\n path = nx.shortest_path(self.gnGraph.netGraph, hostAs, servAs)\n ds = netDataStream(\n self.simRef,\n stream_rate,\n serv_ip,\n dest_ip,\n data_size,\n strType=STREAM_NOISE\n )\n self.routeStreamPath(path, ds, ev.time)\n if self.simRef.simulatorReady:\n self.simRef.eventPush(\n event(\n ev.time + PROPAGATION_DELAY*len(path),\n ds,\n EVENT_STREAM_START\n )\n )\n else:\n self.initStreamsList.append(ds)\n self.activeNoiseStreams += 1\n if not self.simRef.simulationDone:\n if not self.simRef.simulatorReady:\n self.simRef.eventPush(self.getNoiseEvent(ev.time))\n if self.activeNoiseStreams >= self.activeNoiseStreamsMax:\n for tmpStream in self.initStreamsList:\n tmpStream.startStreaming(ev.time)\n tmpStream.bufferingBegin = ev.time\n self.initStreamsList = []\n self.simRef.simulatorReady = True\n self.streamGenActive = True\n # start normal stream\n self.simRef.eventPush(self.getNextEvent(ev.time))\n elif ev.type == EVENT_CHANGE_REQUEST_RATE:\n self.streamGenerationRate = self.calcStreamGenRate(\n self.streamGenRateScenario[self.streamGenRate_next][1]\n )\n self.streamGenRate_next += 1\n elif ev.type == EVENT_SIM_FINALIZE:\n print(\"\\n{:.2f}\".format(float(ev.time)) +\n \" sec. -- SIM_FINALIZE: no new streams\")\n self.streamGenActive = False\n elif ev.type == EVENT_PERIODIC_STATS:\n self.simRef.urStatistics_nActCons.append(\n (ev.time, self.activeStreams)\n )\n reqPerSec = float(self.numRequestsPerTimePeriod) / 10 * 60\n self.simRef.urStatistics_nReqPSec.append((ev.time, reqPerSec))\n self.numRequestsPerTimePeriod = 0\n if not self.simRef.simulationDone:\n self.simRef.eventPush(\n event(\n ev.time + 10,\n self,\n EVENT_PERIODIC_STATS\n )\n )\n print(\n '\\r{:.2f}'.format(float(ev.time)) +\n \" sec. simulated. Active Streams = \" +\n str(self.activeStreams), end=\"\"\n )\n sys.stdout.flush()\n else:\n raise Exception(\"Unknown event type:\" + str(ev.type))\n return\n","sub_path":"netStreamingPrimitives.py","file_name":"netStreamingPrimitives.py","file_ext":"py","file_size_in_byte":52354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"426274575","text":"from os.path import isfile\nfrom pagegen.utility import appropriate_markup, DIRDEFAULTFILE\n\n\ndef source_link(site, page):\n\t''' Return link to source file, if it exists '''\n\n\tif isfile(page.target_path + '.txt'):\n\n\t\turl = page.url_path\n\n\t\tif url.endswith('/'):\n\t\t\turl += DIRDEFAULTFILE\n\n\t\thtml = '
'\n\telse:\n\t\thtml = ''\n\n\treturn html\n\n\ndef list_shortcodes(site, page):\n\t''' List built-in shortcodes '''\n\n\tsc_built_in_whitelist = [\n\t\t'figure',\n\t\t'image',\n\t\t'integrity_hash',\n\t\t'menu',\n\t\t'page_url',\n\t\t'youtube',\n\t]\n\n\tscs = site.shortcodes.__repr__()\n\n\thtml = '
    '\n\n\tfor sc in scs.splitlines():\n\t\tfor bsc in sc_built_in_whitelist:\n\t\t\tif sc.startswith(bsc + '('):\n\t\t\t\thtml += '
  • ' + sc + '
  • '\n\t\t\t\tbreak\n\n\thtml += '
'\n\n\treturn appropriate_markup(page, html)\n","sub_path":"shortcodes.py","file_name":"shortcodes.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"316339003","text":"import json\nimport os\nfrom flask import Flask, jsonify, request\nfrom flask_cors import CORS, cross_origin\n\napp = Flask(__name__)\napp.config['CORS_HEADERS'] = 'Content-Type'\n\n\ndef openfile(arq):\n json_file = open(file=f'{arq}')\n arquivoo = json.load(json_file)\n json_file.close()\n return arquivoo\n\n\ncors = CORS(app, resources={r\"/data/*\": {\"origins\": \"*\"}})\n\nstatus_all = openfile('status_count.json')\nAlbuquerque = openfile(\n './projetosStatus/status_count_[Albuquerque, Albuquerque and Carvalho Comércio] - Mandatory human-resource open architecture.json')\nBatista = openfile(\n './projetosStatus/status_count_[Batista, Moreira and Pereira LTDA] - Monitored multi-state installation.json')\nCarvalho = openfile(\n './projetosStatus/status_count_[Carvalho, Costa and Costa e Associados] - Ergonomic methodical methodology.json')\nCosta_Comércio = openfile(\n './projetosStatus/status_count_[Costa Comércio Comércio] - Sharable non-volatile internet solution.json')\nCosta_LTDA = openfile(\n './projetosStatus/status_count_[Costa LTDA S.A.] - Total asynchronous secured line.json')\nMelo = openfile(\n './projetosStatus/status_count_[Melo, Melo and Santos e Associados] - Organized impactful instruction set.json')\nPereira = openfile(\n './projetosStatus/status_count_[Pereira - Barros Comércio] - Mandatory fault-tolerant Graphical User Interface.json')\nSantos = openfile(\n './projetosStatus/status_count_[Santos - Batista Comércio] - Stand-alone well-modulated policy.json')\nSouza = openfile(\n './projetosStatus/status_count_[Souza Comércio e Associados] - Innovative background implementation.json')\nXavier = openfile(\n './projetosStatus/status_count_[Xavier EIRELI S.A.] - Vision-oriented holistic architecture.json')\n\n\n@app.route(\"/data/status/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef trello():\n if request.method == \"GET\":\n return jsonify(status_all)\n\n\n@app.route(\"/data/albuquerque/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef albuquerque():\n if request.method == \"GET\":\n return jsonify(Albuquerque)\n\n\n@app.route(\"/data/batista/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef batista():\n if request.method == \"GET\":\n return jsonify(Batista)\n\n\n@app.route(\"/data/carvalho/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef carvalho():\n if request.method == \"GET\":\n return jsonify(Carvalho)\n\n\n@app.route(\"/data/costacomercio/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef costacomercio():\n if request.method == \"GET\":\n return jsonify(Costa_Comércio)\n\n\n@app.route(\"/data/costaltda/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef costaltda():\n if request.method == \"GET\":\n return jsonify(Costa_LTDA)\n\n\n@app.route(\"/data/melo/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef melo():\n if request.method == \"GET\":\n return jsonify(Melo)\n\n\n@app.route(\"/data/pereira/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef pereira():\n if request.method == \"GET\":\n return jsonify(Pereira)\n\n\n@app.route(\"/data/santos/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef santos():\n if request.method == \"GET\":\n return jsonify(Santos)\n\n\n@app.route(\"/data/souza/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef souza():\n if request.method == \"GET\":\n return jsonify(Souza)\n\n\n@app.route(\"/data/xavier/\", methods=[\"GET\"])\n@cross_origin(origin='*', headers=['Content- Type', 'Authorization'])\ndef xavier():\n if request.method == \"GET\":\n return jsonify(Xavier)\n\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5000))\n app.run(host='0.0.0.0', port=port)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":4062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"183915548","text":"num1=int(input('Digite o primeiro numero:'))\nnum2=int(input('Digite o segundo numero:'))\nnum3=int(input('Digite o terceiro numero:'))\nif num1>num2 and num1>num3:\n maior=num1\nif num2>num1 and num2>num3:\n maior=num2\nif num3>num1 and num3>num2:\n maior=num3\nif num1\nDate: \nDescription:\n\"\"\"\nimport os\nimport sys\nimport comtypes.client\n\nrutaEspecifica = False\nProgramPath = \"C:\\\\Program Files\\\\Computers and Structures\\\\ETABS 18\\\\ETABS.exe\"\ntry:\n ETABSObject = comtypes.client.GetActiveObject(\"CSI.ETABS.API.ETABSObject\")\n print(\"Coneccion exitosa!.\\nadjuntando a una instancia existente.\")\nexcept (OSError, comtypes.COMError):\n print(\"No se encontró ninguna instancia en ejecución del programa(Etabs).\")\n print(\"Tratando de Ejecutar etabs!.\")\n helper = comtypes.client.CreateObject('ETABSv1.Helper')\n helper = helper.QueryInterface(comtypes.gen.ETABSv1.cHelper)\n if rutaEspecifica:\n try:\n ETABSObject = helper.CreateObject(ProgramPath)\n print(\"Coneccion esitosa!.\\nConexion Manual\")\n except (OSError, comtypes.COMError):\n print(\"Cannot start a new instance of the program from \" + ProgramPath)\n sys.exit(-1)\n else:\n try: \n ETABSObject = helper.CreateObjectProgID(\"CSI.ETABS.API.ETABSObject\") \n print(\"Coneccion esitosa!.\")\n except (OSError, comtypes.COMError):\n print(\"Cannot start a new instance of the program.\")\n sys.exit(-1)\n print(\"Ejecutando etabs!.\")\n ETABSObject.ApplicationStart()\nSapModel = ETABSObject.SapModel\nSapModel.SetModelIsLocked(False)\nres = SapModel.InitializeNewModel()\n\n# === FORMAS DE INICIALIZAR UN MODELO ===\n# res = SapModel.File.NewBlank()\n# res = SapModel.File.NewGridOnly(4,12,12,4,4,24,24)\nres = SapModel.File.NewSteelDeck(4,12.0,12.0,4,4,24.0,24.0)\n\n# Unit Preferences | Preferencias de Unidad\nN_mm_C = 6 #kN_m_c\nSapModel.SetPresentUnits(N_mm_C)\n\n\n# recurso para optener las funciones que se tiene\n# for nombre in dir(SapModel.PropMaterial):\n# if nombre.startswith('__') or nombre.startswith('_'):\n# continue\n# print(f\"{nombre}\")\n\n# 'add ASTM A706 rebar material property in United states Region\n# ret = SapModel.PropMaterial.AddMaterial(\"CONC34\", 2, \"Spain\", \"HA-20\", \"Grade 60\")\n# print(ret)\n\n# Materials | materiales\nSapModel.PropMaterial.SetMaterial(\"CONC35\", 2, -1, \"Comentario...\")\nprint(SapModel.PropMaterial.GetMaterial(\"CONC35\", 2, -1, \"Comentario...\"))\n\n# 'change name of material property\nret = SapModel.PropMaterial.ChangeName(\"CONC35\", \"CONC36\")\n\nret = SapModel.PropMaterial.SetOConcrete_1(\"CONC35\", 35, False, 0, 1, 2, 0.0022, 0.0052, -0.1, 0, 0)\n\n# 'assign other properties\nret = SapModel.PropMaterial.SetOConcrete(\"CONC37\", 5, False, 0, 1, 2, 0.0022, 0.0052)\n\n\n# 'specify temps at which properties will be provided\nMyTemp = [0,50,100]\nret = SapModel.PropMaterial.SetTemp(\"Steel\", 3, MyTemp)\n\n\n\n\n\n# ETABSObject.ApplicationExit(True)\n# clean up variables | limpiamos las variables y eliminamos\nETABSObject, SapModel, res = None, None, None\ndel ETABSObject, SapModel, res","sub_path":"src/EtabsAPI/EtabsAPI09i_NewGridOnly_material.py","file_name":"EtabsAPI09i_NewGridOnly_material.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"163649732","text":"# https://www.kaggle.com/pmarcelino/comprehensive-data-exploration-with-python\n# %% 引入模块\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nfrom scipy import stats\nfrom scipy.stats import norm, skew\nfrom sklearn.base import BaseEstimator, RegressorMixin, TransformerMixin, clone\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.kernel_ridge import KernelRidge\nfrom sklearn.linear_model import Lasso, LinearRegression\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.model_selection import RandomizedSearchCV, train_test_split\nfrom sklearn.pipeline import Pipeline, make_pipeline\nfrom sklearn.preprocessing import LabelEncoder, MinMaxScaler, StandardScaler\nfrom sklearn.tree import DecisionTreeRegressor\n\nplt.style.use(style=\"ggplot\")\nsns.set(color_codes=True)\nprint(\"引入必要模块,完成!\")\n\n# %% 直观观察\n# 发现四个变量与目标值关系密切\n# OverallQual\n# YearBuilt\n# TotalBsmtSF\n# GrLivArea\n\ntrain_data = pd.read_csv('train.csv')\nfigure = plt.figure()\nsns.pairplot(x_vars=['OverallQual', 'GrLivArea', 'YearBuilt', 'TotalBsmtSF'], y_vars=[\n 'SalePrice'], data=train_data, dropna=True)\nplt.show()\n\n\n# %% 观察变量相关性\n\ncorrmat = train_data.corr()\n# plt.subplots(figsize=(12, 9))\n# sns.heatmap(corrmat, vmax=0.9, square=True)\n# plt.show()\n\n# saleprice correlation matrix\nk = 10 # number of variables for heatmap\ncols = corrmat.nlargest(k, 'SalePrice')['SalePrice'].index\n\ncm = np.corrcoef(train_data[cols].values.T)\nsns.set(font_scale=1.25)\nhm = sns.heatmap(cm, cbar=True, annot=True, square=True, fmt='.2f', annot_kws={\n 'size': 10}, yticklabels=cols.values, xticklabels=cols.values)\nplt.show()\n\n# 分析这10个变量\n# GarageCars 和 GarageArea 相似,取 GarageCars\n# TotalBsmtSF 和 1stFloor 相关,取 TotalBsmtSF\n# ToRmsAbvGrd 和 GrLivArea 相关,取 GrLivArea\n\n# scatterplot,两两相关性分析,慎重执行,费CPU,结果见'7-features-scatter.pdf'\n# sns.set()\n# cols = ['SalePrice', 'OverallQual', 'GrLivArea',\n# 'GarageCars', 'TotalBsmtSF', 'FullBath', 'YearBuilt']\n# sns.pairplot(train_data[cols], size=2.5)\n# plt.show()\n\n# %% 处理缺失值\n# missing data\ntotal = train_data.isnull().sum().sort_values(ascending=False)\npercent = (train_data.isnull().sum()/train_data.isnull().count()\n ).sort_values(ascending=False)\nmissing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])\nmissing_data.head(20)\n\n# 缺失值超过15%的应该删掉该变量,所以'PoolQC', 'MiscFeature' and 'FireplaceQu' 应该可以被删掉。\n# GarageX 系列变量丢失相同的数据,并且 GarageCars 已经表示了这一套变量的含义,所以,其他的这些GarageX变量可以删除。\n# BsmtX同理\n# MasVnrArea 和 MasVnrType 并不是必须的,相关含义已经可以通过YearBuilt和OverallQual所代表。因此这两个变量也可以删除。\n# Electrical 变量只有一个NA值,这条记录删除即可。\n# In summary, to handle missing data, we'll delete all the variables with missing data, except the variable 'Electrical'. In 'Electrical' we'll just delete the observation with missing data.\n# dealing with missing data\ntrain_data = train_data.drop(\n (missing_data[missing_data['Total'] > 1]).index, 1)\ntrain_data = train_data.drop(\n train_data.loc[train_data['Electrical'].isnull()].index)\n# just checking that there's no missing data missing...\ntrain_data.isnull().sum().max()\n\n# %% 处理 OutLiars 异常值\n# 单变量分析 Univariate analysis\n# The primary concern here is to establish a threshold that defines an observation as an outlier. To do so, we'll standardize the data. In this context, data standardization means converting data values to have mean of 0 and a standard deviation of 1.\n# standardizing data\nsaleprice_scaled = StandardScaler().fit_transform(\n train_data['SalePrice'][:, np.newaxis])\nlow_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][:10]\nhigh_range = saleprice_scaled[saleprice_scaled[:, 0].argsort()][-10:]\nprint('outer range (low) of the distribution:')\nprint(low_range)\nprint('\\nouter range (high) of the distribution:')\nprint(high_range)\n\n# outer range最后两个大于7的断定为异常值,删掉\ntrain_data.drop(train_data[(train_data['GrLivArea'] > 4000) & (\n train_data['SalePrice'] < 200000)].index, inplace=True)\n\n# 二元变量分析 bivariate analysis saleprice/grlivarea\nplt.scatter(x=train_data['TotalBsmtSF'], y=train_data['SalePrice'])\nplt.ylim(0, 800000)\n\n# 另一种画图方式:用pandas里的plot\nvar = 'TotalBsmtSF'\ndata = pd.concat([train_data['SalePrice'], train_data[var]], axis=1)\ndata.plot.scatter(x=var, y='SalePrice', ylim=(0, 800000))\n# %% 深入了解 SalePrice, Who is 'SalePrice'?\n# 应该验证四个假设:\n# 1. 正态性-当谈论正态性时,我们的意思是数据看起来应该像正态分布。 这很重要,因为几个统计检验都依赖于此(例如t统计)。 在本练习中,我们将仅检查“ SalePrice”的单变量正态性(这是一种有限的方法)。 请记住,单变量正态性不能确保多元正态性(这是我们希望拥有的),但可以提供帮助。 要考虑的另一个细节是,在大样本(> 200个观测值)中,正态性不是这样的问题。 但是,如果我们解决正态性,就可以避免很多其他问题(例如,异方差性),这就是我们进行此分析的主要原因。\n# 2. 同方差性 - 我只希望我写的是正确的。 同方差性是指“假设因变量在预测变量范围内表现出相等的方差水平”。 同方差性是理想的,因为我们希望误差项在自变量的所有值上都相同。\n# 3. 线性-评估线性的最常用方法是检查散点图并搜索线性模式。 如果模式不是线性的,则探索数据转换是值得的。 但是,由于我们所看到的大多数散点图似乎都具有线性关系,因此我们不会对此进行讨论。\n# 4. 缺少相关错误-正如定义所暗示的,相关错误发生在一个错误与另一个错误相关时。 例如,如果一个正误差系统地产生一个负误差,则意味着这些变量之间存在关联。 这通常发生在时间序列中,其中某些模式与时间相关。 我们也不会涉及到这一点。 但是,如果检测到某些东西,请尝试添加一个变量,该变量可以解释所获得的效果。 这是相关错误的最常见解决方案。\n# 这里的重点是要以非常精简的方式测试“ SalePrice”。 我们将注意以下事项:\n# 1. 直方图-峰度和偏度。\n# 2. 正态概率图-数据分布应紧跟代表正态分布的对角线。\n\n# histogram and normal probability plot\nsns.distplot(train_data['SalePrice'], fit=norm)\nfig = plt.figure()\nres = stats.probplot(train_data['SalePrice'], plot=plt)\n# 结论:\n# 'SalePrice' is not normal. It shows 'peakedness', positive skewness and does not follow the diagonal line(对角线).\n# 对于正偏度,通过取log来纠正\ntrain_data['SalePrice'] = np.log(train_data['SalePrice'])\n# transformed histogram and normal probability plot\nsns.distplot(train_data['SalePrice'], fit=norm)\nfig = plt.figure()\nres = stats.probplot(train_data['SalePrice'], plot=plt)\n# Done!\n\n# %% 处理 GrLivArea\nsns.distplot(train_data['GrLivArea'], fit=norm)\nfig = plt.figure()\nres = stats.probplot(train_data['GrLivArea'], plot=plt)\n# log\ntrain_data['GrLivArea'] = np.log(train_data['GrLivArea'])\n# transformed histogram and normal probability plot\nsns.distplot(train_data['GrLivArea'], fit=norm)\nfig = plt.figure()\nres = stats.probplot(train_data['GrLivArea'], plot=plt)\n\n# %%timeit\n# 处理 TotalBsmtSF\nsns.distplot(train_data['TotalBsmtSF'], fit=norm)\nfig = plt.figure()\nres = stats.probplot(train_data['TotalBsmtSF'], plot=plt)\n# 对于没有bsmt的数据不能取对数\ntrain_data['TotalBsmtSF'] = train_data['TotalBsmtSF'].apply(\n lambda x: np.log(x) if x != 0 else x)\n\n\nsns.distplot(train_data[train_data['TotalBsmtSF'] > 0]\n ['TotalBsmtSF'], fit=norm)\nfig = plt.figure()\nres = stats.probplot(\n train_data[train_data['TotalBsmtSF'] > 0]['TotalBsmtSF'], plot=plt)\n","sub_path":"DataScience/Kaggle/HoursePrice/Kaggle-learning3.py","file_name":"Kaggle-learning3.py","file_ext":"py","file_size_in_byte":8187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"620396985","text":"# Simple demo of printing the temperature from the first found DS18x20 sensor every second.\r\n# Author: Tony DiCola\r\nimport time\r\n\r\nimport board\r\n\r\nfrom adafruit_onewire.bus import OneWireBus\r\nfrom adafruit_ds18x20 import DS18X20\r\n\r\n\r\n# Initialize one-wire bus on board pin D5.\r\now_bus = OneWireBus(board.D5)\r\n\r\n# Scan for sensors and grab the first one found.\r\nds18_bus=ow_bus.scan()\r\nprint(ds18_bus)\r\n\r\nds18=[]\r\nfor probe in ds18_bus:\r\n print(probe)\r\n ds18.append(DS18X20(ow_bus, probe))\r\n\r\n# Main loop to print the temperature every second.\r\nwhile True:\r\n\r\n for sensor in ds18:\r\n print('{0:0.3f}C'.format(sensor.temperature))\r\n time.sleep(5.0)\r\n","sub_path":"feather/onewire_test.py","file_name":"onewire_test.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"546967230","text":"import classes.functions as f\nimport classes.globals as g\nimport datetime\nimport sys\n\nfrom classes.measure_component import measure_component\nfrom classes.measure_condition import measure_condition\nfrom classes.footnote_association_measure import footnote_association_measure\n\n\nclass measure(object):\n def __init__(self, goods_nomenclature_item_id, quota_order_number_id, origin_identifier, duty_amount,\n monetary_unit_code, measurement_unit_code, measurement_unit_qualifier_code, measure_type_id,\n start_date_override=\"\", end_date_override=\"\", measure_sid=-1):\n # from parameters\n self.goods_nomenclature_item_id = goods_nomenclature_item_id\n self.quota_order_number_id = quota_order_number_id\n self.origin_identifier = origin_identifier\n self.duty_amount = duty_amount\n self.monetary_unit_code = monetary_unit_code\n self.measurement_unit_code = measurement_unit_code\n self.measurement_unit_qualifier_code = measurement_unit_qualifier_code\n self.measure_type_id = measure_type_id\n self.start_date_override = start_date_override\n self.end_date_override = end_date_override\n self.goods_nomenclature_sid = 0\n self.duty_list = []\n\n lx = len(self.goods_nomenclature_item_id)\n if (lx < 10):\n self.goods_nomenclature_item_id += (\"0\" * (10 - lx))\n\n # Get the goods nomenclature SID\n sql = \"\"\"SELECT goods_nomenclature_sid FROM goods_nomenclatures\n WHERE producline_suffix = '80' AND goods_nomenclature_item_id = %s\n AND (validity_end_date is null) ORDER BY validity_start_date DESC LIMIT 1\"\"\"\n params = [\n self.goods_nomenclature_item_id\n ]\n cur = g.app.conn.cursor()\n cur.execute(sql, params)\n rows = cur.fetchall()\n if len(rows) > 0:\n self.goods_nomenclature_sid = rows[0][0]\n else:\n # print(\"Error - incorrect goods nomenclature item ID -\", self.goods_nomenclature_item_id)\n self.goods_nomenclature_sid = -1\n\n # Initialised\n self.justification_regulation_id = \"\"\n self.justification_regulation_role = \"1\"\n self.measure_generating_regulation_role = 1\n self.stopped_flag = \"0\"\n self.additional_code_type_id = \"\"\n self.additional_code_id = \"\"\n self.additional_code_sid = \"\"\n self.reduction_indicator = \"\"\n self.export_refund_nomenclature_sid = \"\"\n\n self.measure_component_list = []\n self.measure_sid = measure_sid\n\n def duty_string(self):\n if self.monetary_unit_code == \"\":\n return str(self.duty_amount) + \"%\"\n else:\n out = \"€\"\n out += format(self.duty_amount, \"^0.3f\")\n if self.measurement_unit_code != \"\":\n out += \" per \" + self.fmt_mu()\n\n if self.measurement_unit_qualifier_code != \"\":\n out += \" (\" + self.fmt_muq() + \")\"\n\n return (out)\n\n def fmt_muq(self):\n if self.measurement_unit_qualifier_code == \"E\":\n return (\"net of drained weight\")\n else:\n return (\"blah blah blah\")\n\n def fmt_mu(self):\n if self.measurement_unit_code == \"TNE\":\n return (\"1000kg\")\n elif self.measurement_unit_code == \"DTN\":\n return (\"100kg\")\n elif self.measurement_unit_code == \"DAP\":\n return (\"Decatonne, corrected according to polarisation\")\n elif self.measurement_unit_code == \"HLT\":\n return (\"hl\")\n else:\n return self.measurement_unit_code\n\n def transfer_sid(self):\n pass\n\n def xml(self):\n if self.goods_nomenclature_sid == -1:\n return \"\"\n\n s = g.app.template_measure\n s = s.replace(\"[TRANSACTION_ID]\", str(g.app.transaction_id))\n s = s.replace(\"[MESSAGE_ID]\", str(g.app.message_id))\n s = s.replace(\"[RECORD_SEQUENCE_NUMBER]\", str(g.app.message_id))\n\n for obj in self.measure_component_list:\n obj.measure_sid = self.measure_sid\n obj.update_type = \"3\"\n\n if self.measure_excluded_geographical_area_list is not None:\n for obj in self.measure_excluded_geographical_area_list:\n obj.measure_sid = self.measure_sid\n obj.update_type = \"3\"\n\n s = s.replace(\"[UPDATE_TYPE]\", \"3\")\n s = s.replace(\"[MEASURE_SID]\", f.mstr(self.measure_sid))\n s = s.replace(\"[MEASURE_TYPE_ID]\", f.mstr(self.measure_type_id))\n s = s.replace(\"[GEOGRAPHICAL_AREA_ID]\", f.mstr(self.geographical_area_id))\n s = s.replace(\"[GOODS_NOMENCLATURE_ITEM_ID]\", f.mstr(self.goods_nomenclature_item_id))\n s = s.replace(\"[VALIDITY_START_DATE]\", self.validity_start_date)\n s = s.replace(\"[MEASURE_GENERATING_REGULATION_ROLE]\", f.mstr(self.measure_generating_regulation_role))\n s = s.replace(\"[MEASURE_GENERATING_REGULATION_ID]\", f.mstr(self.measure_generating_regulation_id))\n if self.validity_end_date is None:\n print(self.goods_nomenclature_item_id, self.quota_order_number_id)\n s = s.replace(\"[VALIDITY_END_DATE]\", f.mstr(self.validity_end_date))\n s = s.replace(\"[JUSTIFICATION_REGULATION_ROLE]\", f.mstr(self.justification_regulation_role))\n s = s.replace(\"[JUSTIFICATION_REGULATION_ID]\", f.mstr(self.justification_regulation_id))\n s = s.replace(\"[STOPPED_FLAG]\", self.stopped_flag)\n s = s.replace(\"[GEOGRAPHICAL_AREA_SID]\", f.mstr(self.geographical_area_sid))\n s = s.replace(\"[GOODS_NOMENCLATURE_SID]\", f.mstr(self.goods_nomenclature_sid))\n s = s.replace(\"[ORDERNUMBER]\", f.mstr(self.quota_order_number_id))\n s = s.replace(\"[ADDITIONAL_CODE_TYPE_ID]\", f.mstr(self.additional_code_type_id))\n s = s.replace(\"[ADDITIONAL_CODE_ID]\", f.mstr(self.additional_code_id))\n s = s.replace(\"[ADDITIONAL_CODE_SID]\", f.mstr(self.additional_code_sid))\n s = s.replace(\"[REDUCTION_INDICATOR]\", f.mstr(self.reduction_indicator))\n s = s.replace(\"[EXPORT_REFUND_NOMENCLATURE_SID]\", f.mstr(self.export_refund_nomenclature_sid))\n\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n s = s.replace(\"\\t\\t\\t\\t\\t\\t\\n\", \"\")\n\n g.app.message_id += 1\n\n self.component_content = \"\"\n self.condition_content = \"\"\n self.condition_component_content = \"\"\n self.exclusion_content = \"\"\n self.footnote_content = \"\"\n self.pts_content = \"\"\n\n for obj in self.measure_component_list:\n self.component_content += obj.xml()\n\n for obj in self.measure_excluded_geographical_area_list:\n obj.measure_sid = self.measure_sid\n self.exclusion_content += obj.measure_xml()\n\n if self.quota_order_number_id[0:3] == \"094\":\n # Add the standard conditions\n self.conditions = []\n my_condition = measure_condition(self.measure_sid, \"C\", 1, \"27\", \"L\", \"001\")\n self.conditions.append(my_condition)\n\n my_condition = measure_condition(self.measure_sid, \"C\", 2, \"07\", None, None)\n self.conditions.append(my_condition)\n\n my_condition = measure_condition(self.measure_sid, \"Q\", 1, \"27\", \"Y\", \"100\")\n self.conditions.append(my_condition)\n\n my_condition = measure_condition(self.measure_sid, \"Q\", 2, \"07\", None, None)\n self.conditions.append(my_condition)\n\n for c in self.conditions:\n self.condition_content += c.xml()\n\n # Add the standard footnote\n self.footnotes = []\n fn = footnote_association_measure(self.measure_sid, \"CD\", \"356\")\n self.footnotes.append(fn)\n\n for fn in self.footnotes:\n self.footnote_content += fn.xml()\n\n s = s.replace(\"[COMPONENTS]\\n\", self.component_content)\n s = s.replace(\"[CONDITIONS]\\n\", self.condition_content)\n s = s.replace(\"[CONDITION_COMPONENTS]\\n\", self.condition_component_content)\n s = s.replace(\"[EXCLUDED]\\n\", self.exclusion_content)\n s = s.replace(\"[FOOTNOTES]\\n\", self.footnote_content)\n s = s.replace(\"[PTS]\\n\", self.pts_content)\n\n return (s)\n","sub_path":"create-data/quotas/classes/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":9278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"308508013","text":"# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n a=headA\n b=headB\n while not a==b:\n if a==None:\n a=headB\n else:\n a=a.next\n if b==None:\n b=headA\n else:\n b=b.next\n return a\n\ns=Solution()\nz=ListNode(0)\na=ListNode(1)\nb=ListNode(2)\nc=ListNode(3)\nz.next=a\na.next=c\n# b.next=c\nprint(s.getIntersectionNode(z,b).val)","sub_path":"getIntersectionNode.py","file_name":"getIntersectionNode.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"437891694","text":"import boto.ec2\nimport time\nimport sys\nfrom boto.ec2.regioninfo import RegionInfo\n\n\nCOOKIE = \"COOKIE\"\nPASSWORD = \"PASSWORD\"\n\n\ndef main(argv):\n nInstances = int(argv[1])\n\n while nInstances:\n region = RegionInfo(name='REGION', endpoint='ENDPOINT')\n\n ec2_my_conn = boto.connect_ec2(\n aws_access_key_id='YOUR_ACCESS_KEY',\n aws_secret_access_key='YOUR_SECRET_ACCESS_KEY',\n is_secure=True,\n region=region,\n port=8773,\n path='/services/Cloud',\n validate_certs=False\n )\n\n reservation = ec2_my_conn.run_instances(\n 'ami-190a1773',\n key_name='KEY',\n instance_type='m2.tiny',\n security_groups=['CouchDB nodes', 'ssh'],\n placement='melbourne-qh2'\n )\n\n instance = reservation.instances[0]\n print('New instance with id: {}, was created.'.format(instance.id))\n\n #vol = ec2_my_conn.create_volume(50, 'melbourne-np')\n #print ('A volume with id: {}, has been created'.format(vol.id))\n\n status = instance.state\n while status != 'running':\n time.sleep(2)\n print ('The instance with id: {} is booting, please wait.'.format(instance.id))\n time.sleep(15)\n status = instance.update()\n\n print('The instance is ready')\n\n #ec2_my_conn.attach_volume(vol.id, instance.id, '/dev/vdc')\n #print (\"volume with id{}, was attached to instance with id {}\".format(vol.id, instance.id))\n\n #time.sleep(7)\n #snapshot = ec2_my_conn.create_snapshot(vol.id, 'instance_snapshot')\n #print ('snapshot {}, of volume {}, was created'.format(snapshot.id, vol.id))\n\n #new_vol = snapshot.create_volume('melbourne-np')\n #print ('creating volume from snapshot')\n\n #ec2_my_conn.delete_snapshot(snapshot.id)\n #print ('snapshot with id {}, was deleted'.format(snapshot.id))\n nInstances -= 1\n\n reservations = ec2_my_conn.get_all_reservations()\n\n INVENTORY_PATH = \"couchdb-inventory\"\n\n print('Index\\tID\\t\\tInstance')\n for idx, res in enumerate(reservations):\n print('{}\\t{}\\t{}'.format(idx, res.id, res.instances))\n\n toWrite = \"\"\n for i in range(len(reservations)):\n print('\\nID: {}\\tIP {}\\tPlacement {}'.format(\n reservations[i].id,\n reservations[i].instances[0].private_ip_address,\n reservations[i].instances[0].placement,\n ))\n if (i == 0):\n toWrite += '[nodes]\\n' + str(reservations[i].instances[0].private_ip_address) + '\\n'\n else:\n toWrite += str(reservations[i].instances[0].private_ip_address) + '\\n'\n\n inventoryfile = open(INVENTORY_PATH, 'w+')\n inventoryfile.write(toWrite)\n inventoryfile.close()\n\n DEFAULTS_PATH = \"roles/couchdb/defaults/main.yaml\"\n\n defaultspath = open(DEFAULTS_PATH, 'w+')\n defaultspath.write(\"---\\nCOOKIE: \\\"\"+ COOKIE + \"\\\"\\nN_NODES: \" + argv[1] + \"\\nPASSWORD: \\\"\"+PASSWORD+\"\\\"\")\n\n\nif __name__ == \"__main__\":\n main(sys.argv)","sub_path":"boto_script.py","file_name":"boto_script.py","file_ext":"py","file_size_in_byte":3061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"578642491","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\ndef bubble_sort(arr):\r\n for i in range(0, len(arr)):\r\n for j in range(i+1, len(arr)):\r\n if arr[j] < arr[i]:\r\n tmp = arr[i]\r\n arr[i] = arr[j]\r\n arr[j] = tmp\r\n\r\nimport csv\r\nimport os\r\nimport time\r\n\r\nif __name__ == \"__main__\":\r\n my_arr = [1,22,9, 15, 18, 3, 2, 6, 8]\r\n bubble_sort(my_arr)\r\n print(my_arr)\r\n\r\n script_dir = os.path.dirname(__file__)\r\n rel_path = \"raw_sort_data\"\r\n file_path = os.path.join(script_dir, rel_path)\r\n with open(file_path, \"r+t\") as f:\r\n reader = csv.reader(f)\r\n sortlist = list(reader)\r\n start_time = time.time()\r\n bubble_sort(sortlist)\r\n end_time = time.time()\r\n print (\"bubble sort 10000 item cost is \" , (end_time-start_time))\r\n","sub_path":"sort/bubble_sort.py","file_name":"bubble_sort.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"651851448","text":"\"\"\"\n# this expects the json(b) decoder to be as such:\nawait conn.set_type_codec(\n 'json',\n encoder=json.dumps,\n decoder=json.loads,\n schema='pg_catalog'\n )\n\"\"\"\nfrom typez import DefineLang, FnArg, FnRecord, TableRecord\n\n\ndef _get_calling_sql(schema: str, fn: FnRecord) -> str:\n arg_sql = \",\\n \".join([\n f\"{fn.args[i].name} => ${i + 1}::{fn.args[i].type}\"\n for i in range(len(fn.args))\n ])\n bindargs = \", \".join([arg.name for arg in fn.args])\n return f\"\"\"return await db.fetch_val(\\\"\\\"\\\"\n SELECT {schema}.{fn.name}(\n {arg_sql}\n )\n \\\"\\\"\\\", ({bindargs}))\"\"\"\n\n\ndef _type_lookup(typename: str) -> str:\n typemap = {\n \"text\": \"str\",\n \"integer\": \"int\",\n \"uuid\": \"str\",\n \"json\": \"Dict\",\n \"jsonb\": \"Dict\",\n \"boolean\": \"bool\",\n \"bytea\": \"bytes\",\n }\n if len(typename) > 2 and typename[-2:] == \"[]\":\n item_type = typemap[typename[:-2]]\n return f\"List[{item_type}]\"\n return typemap[typename]\n\n\ndef _get_fn_args(fn: FnRecord) -> str:\n def fmt_arg(arg: FnArg) -> str:\n arg_type = _type_lookup(arg.type)\n if arg.default is None:\n return f\"{arg.name}: {arg_type}\"\n new_default = arg.default\n if arg.default == \"NULL\":\n new_default = \"None\"\n return f\"{arg.name}: {arg_type} = {new_default}\"\n return \", \".join([fmt_arg(arg) for arg in fn.args])\n\n\ndef get_impl_language_fn_def(schema: str, fn: FnRecord) -> str:\n calling_sql = _get_calling_sql(schema, fn)\n impl_fn_args = _get_fn_args(fn)\n ret_type = _type_lookup(fn.ret_type)\n impl = f\"\"\"async def {fn.name}({impl_fn_args}) -> {ret_type}:\n {calling_sql}\n\n\"\"\"\n return impl\n\n\ndef _snake_case_to_camel(word):\n return ''.join(x.capitalize() or '_' for x in word.split('_'))\n\n\ndef get_impl_language_model_def(schema: str, view: TableRecord):\n nt_name = _snake_case_to_camel(view.name)\n props = [f\"{col.name}: {_type_lookup(col.type)}\" for col in view.columns]\n props_content = \"\\n \".join(props)\n named_tuple = f\"\"\"class {nt_name}(NamedTuple):\n {props_content}\n\"\"\"\n column_content = \"\\n \".join([f\"'{col.name}': {view.name}_table.{col.name},\" for col in view.columns])\n columns = f\"\"\"{view.name}_table = Table(\"{schema}.{view.name}\")\n{view.name} = Box({{\n 'table_ref': {view.name}_table,\n 'get_query': lambda: PostgreSQLQuery.from_({view.name}_table),\n {column_content}\n}})\"\"\"\n return f\"\"\"{named_tuple}\n\n{columns}\"\"\"\n\n\ndef wrap_view_defs(contents: str) -> str:\n return f\"\"\"from typing import NamedTuple, List, Dict\n\nfrom box import Box # type: ignore\nfrom pypika import Table, PostgreSQLQuery, Schema # type: ignore\n\n\n# views = Schema(\"sys\")\n\n{contents}\n\"\"\"\n\n\ndef wrap_fn_defs(contents: str) -> str:\n return f\"\"\"from typing import Dict, List\n\nimport webskeleton.db as db\n\n\n{contents}\n\"\"\"\n","sub_path":"codegen/define/python_define.py","file_name":"python_define.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"83"} +{"seq_id":"4579400","text":"#!/usr/bin/env python3\n\"\"\"LeetCode CLI interface and helper functions.\n\nUsage: lc [-h] [-v] command ...\n\npositional arguments:\n command\n init Initilize at a directory and set up remote GitHub repo.\n new Create a new LeetCode solution from template.\n upload (u) Commit a LeetCode solution and push to GitHub.\n template (t)\n Set up a template file for writing solutions.\n category (c)\n Add/Remove categories which LeetCode problems belong to.\n\noptional arguments:\n -h, --help show this help message and exit\n -v, --version show program's version number and exit\n\"\"\"\n\nimport argparse\nfrom datetime import datetime\nimport os\nimport subprocess\nimport json\n\n__version__ = \"0.2.2\"\n__author__ = \"Weiran Fu\"\n__license__ = \"MIT\"\n\nthis_dir, this_filename = os.path.split(__file__)\nDATA_PATH = os.path.abspath(os.path.join(this_dir, \"data.json\"))\nDEFAULT_TEMPLATE_PATH = os.path.abspath(os.path.join(this_dir, \"template.md\"))\ndata = {}\nTEMPLATE_PATH = \"\"\nBASE_DIR = \"\"\ncategories = []\n# load data from data.json\nwith open(DATA_PATH, 'r') as f:\n data = json.load(f)\n TEMPLATE_PATH = data['template']\n BASE_DIR = data['base_dir']\n categories = data['categories']\nif not TEMPLATE_PATH:\n TEMPLATE_PATH = DEFAULT_TEMPLATE_PATH\n\n\ndef init(args):\n \"\"\"Initialize at current directory.\"\"\"\n data['base_dir'] = os.path.abspath(args.directory)\n data['template'] = \"\"\n with open(DATA_PATH, 'w') as f:\n json.dump(data, f)\n subprocess.call([\"git\", \"init\"], cwd=args.directory)\n subprocess.call([\"git\", \"remote\", \"rm\", \"origin\"],\n cwd=args.directory,\n stderr=subprocess.DEVNULL) # omit error in subprocess\n subprocess.call([\"git\", \"remote\", \"add\", \"origin\", args.remote_repo],\n cwd=args.directory)\n\n\ndef create_file(args):\n \"\"\"Create a new LeetCode solution from template.\"\"\"\n filename = args.filename + \".md\"\n target_path = \"{}/Problems/{}/{}\".format(\n BASE_DIR, args.category,\n filename) if args.category != \"Summary\" else \"{}/Summary/{}\".format(\n BASE_DIR, filename)\n # open the template and read lines\n lines = open(TEMPLATE_PATH, 'r').readlines()\n # update title && category && datetime in solution file\n title = \" \".join([s.capitalize() for s in filename[:-3].split(\"-\")])\n now = datetime.now()\n dt_string = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n # create a new file and write lines\n os.makedirs(os.path.dirname(target_path), exist_ok=True)\n if TEMPLATE_PATH != DEFAULT_TEMPLATE_PATH:\n with open(target_path, 'w') as fp:\n for line in lines:\n fp.write(line)\n else:\n with open(target_path, 'w') as fp:\n fp.write(lines[0])\n for line in lines[1:]:\n if \"title:\" in line:\n fp.write(\"title: Easy Medium Hard | {}\\n\".format(title))\n elif \"Graph\" in line:\n fp.write(\" - {}\\n\".format(args.category))\n elif \"date:\" in line:\n fp.write(\"date: {}\\n\".format(dt_string))\n elif \"TITLE\" in line:\n fp.write(\"# {}\\n\".format(title))\n else:\n fp.write(line)\n subprocess.call([\"open\", target_path])\n\ndef open_file(args):\n \"\"\"Open a LeetCode solution contains this filename.\"\"\"\n searchname = args.filename\n names = []\n paths = []\n for root, dirs, files in os.walk(BASE_DIR):\n for name in files:\n if searchname.lower() in name.lower():\n names.append(name)\n paths.append(os.path.join(root, name))\n if len(names) == 0:\n print(\"Cannot search any solution contains {}.\".format(searchname))\n return\n for i in range(len(names)):\n print(\"{}. {}\".format(i, names[i]))\n num = int(input(\"Please choose one to open:\\n\"))\n subprocess.call([\"open\", paths[num]])\n\ndef upload_files(args):\n \"\"\"Commit a LeetCode solution and push to GitHub.\"\"\"\n subprocess.call([\"git\", \"add\", \".\"], cwd=BASE_DIR)\n subprocess.call([\"git\", \"commit\", \"-m\", args.m], cwd=BASE_DIR)\n subprocess.call([\"git\", \"push\", \"origin\", \"main\"], cwd=BASE_DIR)\n\n\ndef template(args):\n template_path = \"\"\n if args.set:\n if os.path.isabs(args.set):\n template_path = args.set\n else:\n template_path = os.path.abspath(args.set)\n else:\n template_path = DEFAULT_TEMPLATE_PATH\n data['template'] = template_path\n with open(DATA_PATH, 'w') as f:\n json.dump(data, f)\n\n\ndef category_add(args):\n if args.category in categories:\n return\n categories.append(args.category)\n data['categories'] = sorted(categories)\n with open(DATA_PATH, 'w') as f:\n json.dump(data, f)\n\n\ndef category_rm(args):\n if args.category not in categories:\n return\n categories.remove(args.category)\n data['categories'] = categories\n with open(DATA_PATH, 'w') as f:\n json.dump(data, f)\n\n\n# Construct the CLI\nparser = argparse.ArgumentParser(\n description=\"LeetCode CLI interface and helper functions.\",\n prog=\"lc\",\n epilog=\n \"Further documentation is available at .\"\n)\nparser.add_argument(\"-v\",\n \"--version\",\n action=\"version\",\n version='%(prog)s version ' + __version__)\n\n\ndef parser_help(args):\n parser.print_help()\n\n\nparser.set_defaults(func=parser_help)\nsubparsers = parser.add_subparsers(metavar=\"command\")\n\nparser_init = subparsers.add_parser(\n 'init', help=\"Initilize at a directory and set up remote GitHub repo.\")\nparser_init.add_argument(\"directory\",\n metavar=\"\",\n help=\"The path to the directory to initialize at.\")\nparser_init.add_argument(\n \"remote_repo\",\n metavar=\"\",\n help=\"The link of remote GitHub repo to connect with.\")\nparser_init.set_defaults(func=init)\n\nparser_new = subparsers.add_parser(\n \"new\", help=\"Create a new LeetCode solution from template.\")\nparser_new.add_argument(\"filename\",\n metavar=\"\",\n help=\"The filename of LeetCode solution.\")\nparser_new.add_argument(\n \"category\",\n metavar=\"\",\n choices=categories,\n help=\"The category which LeetCode solution belongs to.\")\nparser_new.set_defaults(func=create_file)\n\nparser_open = subparsers.add_parser(\"open\", help=\"Open an existing solution.\")\nparser_open.add_argument(\"filename\", metavar=\"\", help=\"The filename of the solution.\")\nparser_open.set_defaults(func=open_file)\n\nparser_upload = subparsers.add_parser(\n \"upload\",\n aliases=[\"u\"],\n help=\"Commit a LeetCode solution and push to GitHub.\")\nparser_upload.add_argument(\"-m\",\n metavar=\"\",\n default=\":pencil: LeetCode with Me!\",\n help=\"The Git commit message\")\nparser_upload.set_defaults(func=upload_files)\n\nparser_template = subparsers.add_parser(\n \"template\",\n aliases=['t'],\n help=\"Set up a template file for writing solutions.\")\ngroup = parser_template.add_mutually_exclusive_group(\n required=True) # One of -set and --use-default must be chosen\ngroup.add_argument(\"-set\",\n metavar=\"